aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/phy.txt288
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c6
-rw-r--r--drivers/net/bonding/bond_alb.c17
-rw-r--r--drivers/net/forcedeth.c577
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hamradio/baycom_par.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c3
-rw-r--r--drivers/net/hamradio/mkiss.c3
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c170
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c59
-rw-r--r--drivers/net/ixgb/ixgb_hw.h9
-rw-r--r--drivers/net/ixgb/ixgb_main.c53
-rw-r--r--drivers/net/loopback.c22
-rw-r--r--drivers/net/pci-skeleton.c6
-rw-r--r--drivers/net/phy/Kconfig49
-rw-r--r--drivers/net/phy/Makefile9
-rw-r--r--drivers/net/phy/cicada.c134
-rw-r--r--drivers/net/phy/davicom.c195
-rw-r--r--drivers/net/phy/lxt.c179
-rw-r--r--drivers/net/phy/marvell.c140
-rw-r--r--drivers/net/phy/mdio_bus.c99
-rw-r--r--drivers/net/phy/phy.c690
-rw-r--r--drivers/net/phy/phy_device.c572
-rw-r--r--drivers/net/phy/qsemi.c143
-rw-r--r--drivers/net/s2io-regs.h87
-rw-r--r--drivers/net/s2io.c3090
-rw-r--r--drivers/net/s2io.h364
-rw-r--r--drivers/net/skge.c63
-rw-r--r--drivers/net/skge.h19
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/proteon.c104
-rw-r--r--drivers/net/tokenring/skisa.c104
-rw-r--r--drivers/net/tokenring/tms380tr.c37
-rw-r--r--drivers/net/tokenring/tms380tr.h8
-rw-r--r--drivers/net/tokenring/tmspci.c4
-rw-r--r--drivers/net/wireless/orinoco.c78
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/mii.h9
-rw-r--r--include/linux/phy.h360
43 files changed, 5815 insertions, 1955 deletions
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
new file mode 100644
index 000000000000..29ccae409031
--- /dev/null
+++ b/Documentation/networking/phy.txt
@@ -0,0 +1,288 @@
1
2-------
3PHY Abstraction Layer
4(Updated 2005-07-21)
5
6Purpose
7
8 Most network devices consist of set of registers which provide an interface
9 to a MAC layer, which communicates with the physical connection through a
10 PHY. The PHY concerns itself with negotiating link parameters with the link
11 partner on the other side of the network connection (typically, an ethernet
12 cable), and provides a register interface to allow drivers to determine what
13 settings were chosen, and to configure what settings are allowed.
14
15 While these devices are distinct from the network devices, and conform to a
16 standard layout for the registers, it has been common practice to integrate
17 the PHY management code with the network driver. This has resulted in large
18 amounts of redundant code. Also, on embedded systems with multiple (and
19 sometimes quite different) ethernet controllers connected to the same
20 management bus, it is difficult to ensure safe use of the bus.
21
22 Since the PHYs are devices, and the management busses through which they are
23 accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
24 In doing so, it has these goals:
25
26 1) Increase code-reuse
27 2) Increase overall code-maintainability
28 3) Speed development time for new network drivers, and for new systems
29
30 Basically, this layer is meant to provide an interface to PHY devices which
31 allows network driver writers to write as little code as possible, while
32 still providing a full feature set.
33
34The MDIO bus
35
36 Most network devices are connected to a PHY by means of a management bus.
37 Different devices use different busses (though some share common interfaces).
38 In order to take advantage of the PAL, each bus interface needs to be
39 registered as a distinct device.
40
41 1) read and write functions must be implemented. Their prototypes are:
42
43 int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44 int read(struct mii_bus *bus, int mii_id, int regnum);
45
46 mii_id is the address on the bus for the PHY, and regnum is the register
47 number. These functions are guaranteed not to be called from interrupt
48 time, so it is safe for them to block, waiting for an interrupt to signal
49 the operation is complete
50
51 2) A reset function is necessary. This is used to return the bus to an
52 initialized state.
53
54 3) A probe function is needed. This function should set up anything the bus
55 driver needs, setup the mii_bus structure, and register with the PAL using
56 mdiobus_register. Similarly, there's a remove function to undo all of
57 that (use mdiobus_unregister).
58
59 4) Like any driver, the device_driver structure must be configured, and init
60 exit functions are used to register the driver.
61
62 5) The bus must also be declared somewhere as a device, and registered.
63
64 As an example for how one driver implemented an mdio bus driver, see
65 drivers/net/gianfar_mii.c and arch/ppc/syslib/mpc85xx_devices.c
66
67Connecting to a PHY
68
69 Sometime during startup, the network driver needs to establish a connection
70 between the PHY device, and the network device. At this time, the PHY's bus
71 and drivers need to all have been loaded, so it is ready for the connection.
72 At this point, there are several ways to connect to the PHY:
73
74 1) The PAL handles everything, and only calls the network driver when
75 the link state changes, so it can react.
76
77 2) The PAL handles everything except interrupts (usually because the
78 controller has the interrupt registers).
79
80 3) The PAL handles everything, but checks in with the driver every second,
81 allowing the network driver to react first to any changes before the PAL
82 does.
83
84 4) The PAL serves only as a library of functions, with the network device
85 manually calling functions to update status, and configure the PHY
86
87
88Letting the PHY Abstraction Layer do Everything
89
90 If you choose option 1 (The hope is that every driver can, but to still be
91 useful to drivers that can't), connecting to the PHY is simple:
92
93 First, you need a function to react to changes in the link state. This
94 function follows this protocol:
95
96 static void adjust_link(struct net_device *dev);
97
98 Next, you need to know the device name of the PHY connected to this device.
99 The name will look something like, "phy0:0", where the first number is the
100 bus id, and the second is the PHY's address on that bus.
101
102 Now, to connect, just call this function:
103
104 phydev = phy_connect(dev, phy_name, &adjust_link, flags);
105
106 phydev is a pointer to the phy_device structure which represents the PHY. If
107 phy_connect is successful, it will return the pointer. dev, here, is the
108 pointer to your net_device. Once done, this function will have started the
109 PHY's software state machine, and registered for the PHY's interrupt, if it
110 has one. The phydev structure will be populated with information about the
111 current state, though the PHY will not yet be truly operational at this
112 point.
113
114 flags is a u32 which can optionally contain phy-specific flags.
115 This is useful if the system has put hardware restrictions on
116 the PHY/controller, of which the PHY needs to be aware.
117
118 Now just make sure that phydev->supported and phydev->advertising have any
119 values pruned from them which don't make sense for your controller (a 10/100
120 controller may be connected to a gigabit capable PHY, so you would need to
121 mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
122 for these bitfields. Note that you should not SET any bits, or the PHY may
123 get put into an unsupported state.
124
125 Lastly, once the controller is ready to handle network traffic, you call
126 phy_start(phydev). This tells the PAL that you are ready, and configures the
127 PHY to connect to the network. If you want to handle your own interrupts,
128 just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start.
129 Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL.
130
131 When you want to disconnect from the network (even if just briefly), you call
132 phy_stop(phydev).
133
134Keeping Close Tabs on the PAL
135
136 It is possible that the PAL's built-in state machine needs a little help to
137 keep your network device and the PHY properly in sync. If so, you can
138 register a helper function when connecting to the PHY, which will be called
139 every second before the state machine reacts to any changes. To do this, you
140 need to manually call phy_attach() and phy_prepare_link(), and then call
141 phy_start_machine() with the second argument set to point to your special
142 handler.
143
144 Currently there are no examples of how to use this functionality, and testing
145 on it has been limited because the author does not have any drivers which use
146 it (they all use option 1). So Caveat Emptor.
147
148Doing it all yourself
149
150 There's a remote chance that the PAL's built-in state machine cannot track
151 the complex interactions between the PHY and your network device. If this is
152 so, you can simply call phy_attach(), and not call phy_start_machine or
153 phy_prepare_link(). This will mean that phydev->state is entirely yours to
154 handle (phy_start and phy_stop toggle between some of the states, so you
155 might need to avoid them).
156
157 An effort has been made to make sure that useful functionality can be
158 accessed without the state-machine running, and most of these functions are
159 descended from functions which did not interact with a complex state-machine.
160 However, again, no effort has been made so far to test running without the
161 state machine, so tryer beware.
162
163 Here is a brief rundown of the functions:
164
165 int phy_read(struct phy_device *phydev, u16 regnum);
166 int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
167
168 Simple read/write primitives. They invoke the bus's read/write function
169 pointers.
170
171 void phy_print_status(struct phy_device *phydev);
172
173 A convenience function to print out the PHY status neatly.
174
175 int phy_clear_interrupt(struct phy_device *phydev);
176 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
177
178 Clear the PHY's interrupt, and configure which ones are allowed,
179 respectively. Currently only supports all on, or all off.
180
181 int phy_enable_interrupts(struct phy_device *phydev);
182 int phy_disable_interrupts(struct phy_device *phydev);
183
184 Functions which enable/disable PHY interrupts, clearing them
185 before and after, respectively.
186
187 int phy_start_interrupts(struct phy_device *phydev);
188 int phy_stop_interrupts(struct phy_device *phydev);
189
190 Requests the IRQ for the PHY interrupts, then enables them for
191 start, or disables then frees them for stop.
192
193 struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
194 u32 flags);
195
196 Attaches a network device to a particular PHY, binding the PHY to a generic
197 driver if none was found during bus initialization. Passes in
198 any phy-specific flags as needed.
199
200 int phy_start_aneg(struct phy_device *phydev);
201
202 Using variables inside the phydev structure, either configures advertising
203 and resets autonegotiation, or disables autonegotiation, and configures
204 forced settings.
205
206 static inline int phy_read_status(struct phy_device *phydev);
207
208 Fills the phydev structure with up-to-date information about the current
209 settings in the PHY.
210
211 void phy_sanitize_settings(struct phy_device *phydev)
212
213 Resolves differences between currently desired settings, and
214 supported settings for the given PHY device. Does not make
215 the changes in the hardware, though.
216
217 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
218 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
219
220 Ethtool convenience functions.
221
222 int phy_mii_ioctl(struct phy_device *phydev,
223 struct mii_ioctl_data *mii_data, int cmd);
224
225 The MII ioctl. Note that this function will completely screw up the state
226 machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
227 use this only to write registers which are not standard, and don't set off
228 a renegotiation.
229
230
231PHY Device Drivers
232
233 With the PHY Abstraction Layer, adding support for new PHYs is
234 quite easy. In some cases, no work is required at all! However,
235 many PHYs require a little hand-holding to get up-and-running.
236
237Generic PHY driver
238
239 If the desired PHY doesn't have any errata, quirks, or special
240 features you want to support, then it may be best to not add
241 support, and let the PHY Abstraction Layer's Generic PHY Driver
242 do all of the work.
243
244Writing a PHY driver
245
246 If you do need to write a PHY driver, the first thing to do is
247 make sure it can be matched with an appropriate PHY device.
248 This is done during bus initialization by reading the device's
249 UID (stored in registers 2 and 3), then comparing it to each
250 driver's phy_id field by ANDing it with each driver's
251 phy_id_mask field. Also, it needs a name. Here's an example:
252
253 static struct phy_driver dm9161_driver = {
254 .phy_id = 0x0181b880,
255 .name = "Davicom DM9161E",
256 .phy_id_mask = 0x0ffffff0,
257 ...
258 }
259
260 Next, you need to specify what features (speed, duplex, autoneg,
261 etc) your PHY device and driver support. Most PHYs support
262 PHY_BASIC_FEATURES, but you can look in include/mii.h for other
263 features.
264
265 Each driver consists of a number of function pointers:
266
267 config_init: configures PHY into a sane state after a reset.
268 For instance, a Davicom PHY requires descrambling disabled.
269 probe: Does any setup needed by the driver
270 suspend/resume: power management
271 config_aneg: Changes the speed/duplex/negotiation settings
272 read_status: Reads the current speed/duplex/negotiation settings
273 ack_interrupt: Clear a pending interrupt
274 config_intr: Enable or disable interrupts
275 remove: Does any driver take-down
276
277 Of these, only config_aneg and read_status are required to be
278 assigned by the driver code. The rest are optional. Also, it is
279 preferred to use the generic phy driver's versions of these two
280 functions if at all possible: genphy_read_status and
281 genphy_config_aneg. If this is not possible, it is likely that
282 you only need to perform some actions before and after invoking
283 these functions, and so your functions will wrap the generic
284 ones.
285
286 Feel free to look at the Marvell, Cicada, and Davicom drivers in
287 drivers/net/phy/ for examples (the lxt and qsemi drivers have
288 not been tested as of this writing)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8a835eb58808..1e50b8e32add 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -131,6 +131,8 @@ config NET_SB1000
131 131
132 source "drivers/net/arcnet/Kconfig" 132 source "drivers/net/arcnet/Kconfig"
133 133
134source "drivers/net/phy/Kconfig"
135
134# 136#
135# Ethernet 137# Ethernet
136# 138#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 63c6d1e6d4d9..a369ae284a9a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
65# 65#
66 66
67obj-$(CONFIG_MII) += mii.o 67obj-$(CONFIG_MII) += mii.o
68obj-$(CONFIG_PHYLIB) += phy/
68 69
69obj-$(CONFIG_SUNDANCE) += sundance.o 70obj-$(CONFIG_SUNDANCE) += sundance.o
70obj-$(CONFIG_HAMACHI) += hamachi.o 71obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 3707df6b0cfa..11c44becc08f 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -323,12 +323,6 @@ extern struct net_device *proteon_probe(int unit);
323extern struct net_device *smctr_probe(int unit); 323extern struct net_device *smctr_probe(int unit);
324 324
325static struct devprobe2 tr_probes2[] __initdata = { 325static struct devprobe2 tr_probes2[] __initdata = {
326#ifdef CONFIG_SKISA
327 {sk_isa_probe, 0},
328#endif
329#ifdef CONFIG_PROTEON
330 {proteon_probe, 0},
331#endif
332#ifdef CONFIG_SMCTR 326#ifdef CONFIG_SMCTR
333 {smctr_probe, 0}, 327 {smctr_probe, 0},
334#endif 328#endif
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5ce606d9dc03..19e829b567d0 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1106,18 +1106,13 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1106 } 1106 }
1107 } 1107 }
1108 1108
1109 if (found) { 1109 if (!found)
1110 /* a slave was found that is using the mac address 1110 return 0;
1111 * of the new slave
1112 */
1113 printk(KERN_ERR DRV_NAME
1114 ": Error: the hw address of slave %s is not "
1115 "unique - cannot enslave it!",
1116 slave->dev->name);
1117 return -EINVAL;
1118 }
1119 1111
1120 return 0; 1112 /* Try setting slave mac to bond address and fall-through
1113 to code handling that situation below... */
1114 alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
1115 bond->alb_info.rlb_enabled);
1121 } 1116 }
1122 1117
1123 /* The slave's address is equal to the address of the bond. 1118 /* The slave's address is equal to the address of the bond.
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 64f0f697c958..f165ae973985 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -85,6 +85,14 @@
85 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added. 87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * per-packet flags.
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * of nv_remove
88 * 96 *
89 * Known bugs: 97 * Known bugs:
90 * We suspect that on some hardware no TX done interrupts are generated. 98 * We suspect that on some hardware no TX done interrupts are generated.
@@ -96,7 +104,7 @@
96 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 104 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
97 * superfluous timer interrupts from the nic. 105 * superfluous timer interrupts from the nic.
98 */ 106 */
99#define FORCEDETH_VERSION "0.35" 107#define FORCEDETH_VERSION "0.41"
100#define DRV_NAME "forcedeth" 108#define DRV_NAME "forcedeth"
101 109
102#include <linux/module.h> 110#include <linux/module.h>
@@ -131,11 +139,10 @@
131 * Hardware access: 139 * Hardware access:
132 */ 140 */
133 141
134#define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */ 142#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
135#define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */ 143#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
136#define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */ 144#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
137#define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */ 145#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
138#define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */
139 146
140enum { 147enum {
141 NvRegIrqStatus = 0x000, 148 NvRegIrqStatus = 0x000,
@@ -146,13 +153,16 @@ enum {
146#define NVREG_IRQ_RX 0x0002 153#define NVREG_IRQ_RX 0x0002
147#define NVREG_IRQ_RX_NOBUF 0x0004 154#define NVREG_IRQ_RX_NOBUF 0x0004
148#define NVREG_IRQ_TX_ERR 0x0008 155#define NVREG_IRQ_TX_ERR 0x0008
149#define NVREG_IRQ_TX2 0x0010 156#define NVREG_IRQ_TX_OK 0x0010
150#define NVREG_IRQ_TIMER 0x0020 157#define NVREG_IRQ_TIMER 0x0020
151#define NVREG_IRQ_LINK 0x0040 158#define NVREG_IRQ_LINK 0x0040
159#define NVREG_IRQ_TX_ERROR 0x0080
152#define NVREG_IRQ_TX1 0x0100 160#define NVREG_IRQ_TX1 0x0100
153#define NVREG_IRQMASK_WANTED_1 0x005f 161#define NVREG_IRQMASK_WANTED 0x00df
154#define NVREG_IRQMASK_WANTED_2 0x0147 162
155#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1)) 163#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
164 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
165 NVREG_IRQ_TX1))
156 166
157 NvRegUnknownSetupReg6 = 0x008, 167 NvRegUnknownSetupReg6 = 0x008,
158#define NVREG_UNKSETUP6_VAL 3 168#define NVREG_UNKSETUP6_VAL 3
@@ -286,6 +296,18 @@ struct ring_desc {
286 u32 FlagLen; 296 u32 FlagLen;
287}; 297};
288 298
299struct ring_desc_ex {
300 u32 PacketBufferHigh;
301 u32 PacketBufferLow;
302 u32 Reserved;
303 u32 FlagLen;
304};
305
306typedef union _ring_type {
307 struct ring_desc* orig;
308 struct ring_desc_ex* ex;
309} ring_type;
310
289#define FLAG_MASK_V1 0xffff0000 311#define FLAG_MASK_V1 0xffff0000
290#define FLAG_MASK_V2 0xffffc000 312#define FLAG_MASK_V2 0xffffc000
291#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 313#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
@@ -293,7 +315,7 @@ struct ring_desc {
293 315
294#define NV_TX_LASTPACKET (1<<16) 316#define NV_TX_LASTPACKET (1<<16)
295#define NV_TX_RETRYERROR (1<<19) 317#define NV_TX_RETRYERROR (1<<19)
296#define NV_TX_LASTPACKET1 (1<<24) 318#define NV_TX_FORCED_INTERRUPT (1<<24)
297#define NV_TX_DEFERRED (1<<26) 319#define NV_TX_DEFERRED (1<<26)
298#define NV_TX_CARRIERLOST (1<<27) 320#define NV_TX_CARRIERLOST (1<<27)
299#define NV_TX_LATECOLLISION (1<<28) 321#define NV_TX_LATECOLLISION (1<<28)
@@ -303,7 +325,7 @@ struct ring_desc {
303 325
304#define NV_TX2_LASTPACKET (1<<29) 326#define NV_TX2_LASTPACKET (1<<29)
305#define NV_TX2_RETRYERROR (1<<18) 327#define NV_TX2_RETRYERROR (1<<18)
306#define NV_TX2_LASTPACKET1 (1<<23) 328#define NV_TX2_FORCED_INTERRUPT (1<<30)
307#define NV_TX2_DEFERRED (1<<25) 329#define NV_TX2_DEFERRED (1<<25)
308#define NV_TX2_CARRIERLOST (1<<26) 330#define NV_TX2_CARRIERLOST (1<<26)
309#define NV_TX2_LATECOLLISION (1<<27) 331#define NV_TX2_LATECOLLISION (1<<27)
@@ -379,9 +401,13 @@ struct ring_desc {
379#define TX_LIMIT_START 62 401#define TX_LIMIT_START 62
380 402
381/* rx/tx mac addr + type + vlan + align + slack*/ 403/* rx/tx mac addr + type + vlan + align + slack*/
382#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) 404#define NV_RX_HEADERS (64)
383/* even more slack */ 405/* even more slack. */
384#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) 406#define NV_RX_ALLOC_PAD (64)
407
408/* maximum mtu size */
409#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
410#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
385 411
386#define OOM_REFILL (1+HZ/20) 412#define OOM_REFILL (1+HZ/20)
387#define POLL_WAIT (1+HZ/100) 413#define POLL_WAIT (1+HZ/100)
@@ -396,6 +422,7 @@ struct ring_desc {
396 */ 422 */
397#define DESC_VER_1 0x0 423#define DESC_VER_1 0x0
398#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) 424#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK)
425#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK)
399 426
400/* PHY defines */ 427/* PHY defines */
401#define PHY_OUI_MARVELL 0x5043 428#define PHY_OUI_MARVELL 0x5043
@@ -468,11 +495,12 @@ struct fe_priv {
468 /* rx specific fields. 495 /* rx specific fields.
469 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 496 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
470 */ 497 */
471 struct ring_desc *rx_ring; 498 ring_type rx_ring;
472 unsigned int cur_rx, refill_rx; 499 unsigned int cur_rx, refill_rx;
473 struct sk_buff *rx_skbuff[RX_RING]; 500 struct sk_buff *rx_skbuff[RX_RING];
474 dma_addr_t rx_dma[RX_RING]; 501 dma_addr_t rx_dma[RX_RING];
475 unsigned int rx_buf_sz; 502 unsigned int rx_buf_sz;
503 unsigned int pkt_limit;
476 struct timer_list oom_kick; 504 struct timer_list oom_kick;
477 struct timer_list nic_poll; 505 struct timer_list nic_poll;
478 506
@@ -484,7 +512,7 @@ struct fe_priv {
484 /* 512 /*
485 * tx specific fields. 513 * tx specific fields.
486 */ 514 */
487 struct ring_desc *tx_ring; 515 ring_type tx_ring;
488 unsigned int next_tx, nic_tx; 516 unsigned int next_tx, nic_tx;
489 struct sk_buff *tx_skbuff[TX_RING]; 517 struct sk_buff *tx_skbuff[TX_RING];
490 dma_addr_t tx_dma[TX_RING]; 518 dma_addr_t tx_dma[TX_RING];
@@ -519,6 +547,11 @@ static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
519 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 547 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
520} 548}
521 549
550static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
551{
552 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
553}
554
522static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 555static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
523 int delay, int delaymax, const char *msg) 556 int delay, int delaymax, const char *msg)
524{ 557{
@@ -792,7 +825,7 @@ static int nv_alloc_rx(struct net_device *dev)
792 nr = refill_rx % RX_RING; 825 nr = refill_rx % RX_RING;
793 if (np->rx_skbuff[nr] == NULL) { 826 if (np->rx_skbuff[nr] == NULL) {
794 827
795 skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); 828 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
796 if (!skb) 829 if (!skb)
797 break; 830 break;
798 831
@@ -803,9 +836,16 @@ static int nv_alloc_rx(struct net_device *dev)
803 } 836 }
804 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, 837 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
805 PCI_DMA_FROMDEVICE); 838 PCI_DMA_FROMDEVICE);
806 np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 839 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
807 wmb(); 840 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
808 np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); 841 wmb();
842 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
843 } else {
844 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
845 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
846 wmb();
847 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
848 }
809 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 849 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
810 dev->name, refill_rx); 850 dev->name, refill_rx);
811 refill_rx++; 851 refill_rx++;
@@ -831,19 +871,37 @@ static void nv_do_rx_refill(unsigned long data)
831 enable_irq(dev->irq); 871 enable_irq(dev->irq);
832} 872}
833 873
834static int nv_init_ring(struct net_device *dev) 874static void nv_init_rx(struct net_device *dev)
835{ 875{
836 struct fe_priv *np = get_nvpriv(dev); 876 struct fe_priv *np = get_nvpriv(dev);
837 int i; 877 int i;
838 878
839 np->next_tx = np->nic_tx = 0;
840 for (i = 0; i < TX_RING; i++)
841 np->tx_ring[i].FlagLen = 0;
842
843 np->cur_rx = RX_RING; 879 np->cur_rx = RX_RING;
844 np->refill_rx = 0; 880 np->refill_rx = 0;
845 for (i = 0; i < RX_RING; i++) 881 for (i = 0; i < RX_RING; i++)
846 np->rx_ring[i].FlagLen = 0; 882 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
883 np->rx_ring.orig[i].FlagLen = 0;
884 else
885 np->rx_ring.ex[i].FlagLen = 0;
886}
887
888static void nv_init_tx(struct net_device *dev)
889{
890 struct fe_priv *np = get_nvpriv(dev);
891 int i;
892
893 np->next_tx = np->nic_tx = 0;
894 for (i = 0; i < TX_RING; i++)
895 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
896 np->tx_ring.orig[i].FlagLen = 0;
897 else
898 np->tx_ring.ex[i].FlagLen = 0;
899}
900
901static int nv_init_ring(struct net_device *dev)
902{
903 nv_init_tx(dev);
904 nv_init_rx(dev);
847 return nv_alloc_rx(dev); 905 return nv_alloc_rx(dev);
848} 906}
849 907
@@ -852,7 +910,10 @@ static void nv_drain_tx(struct net_device *dev)
852 struct fe_priv *np = get_nvpriv(dev); 910 struct fe_priv *np = get_nvpriv(dev);
853 int i; 911 int i;
854 for (i = 0; i < TX_RING; i++) { 912 for (i = 0; i < TX_RING; i++) {
855 np->tx_ring[i].FlagLen = 0; 913 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
914 np->tx_ring.orig[i].FlagLen = 0;
915 else
916 np->tx_ring.ex[i].FlagLen = 0;
856 if (np->tx_skbuff[i]) { 917 if (np->tx_skbuff[i]) {
857 pci_unmap_single(np->pci_dev, np->tx_dma[i], 918 pci_unmap_single(np->pci_dev, np->tx_dma[i],
858 np->tx_skbuff[i]->len, 919 np->tx_skbuff[i]->len,
@@ -869,7 +930,10 @@ static void nv_drain_rx(struct net_device *dev)
869 struct fe_priv *np = get_nvpriv(dev); 930 struct fe_priv *np = get_nvpriv(dev);
870 int i; 931 int i;
871 for (i = 0; i < RX_RING; i++) { 932 for (i = 0; i < RX_RING; i++) {
872 np->rx_ring[i].FlagLen = 0; 933 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
934 np->rx_ring.orig[i].FlagLen = 0;
935 else
936 np->rx_ring.ex[i].FlagLen = 0;
873 wmb(); 937 wmb();
874 if (np->rx_skbuff[i]) { 938 if (np->rx_skbuff[i]) {
875 pci_unmap_single(np->pci_dev, np->rx_dma[i], 939 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -900,11 +964,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
900 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, 964 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
901 PCI_DMA_TODEVICE); 965 PCI_DMA_TODEVICE);
902 966
903 np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 967 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
968 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
969 else {
970 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
971 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
972 }
904 973
905 spin_lock_irq(&np->lock); 974 spin_lock_irq(&np->lock);
906 wmb(); 975 wmb();
907 np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); 976 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
977 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
978 else
979 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
908 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", 980 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
909 dev->name, np->next_tx); 981 dev->name, np->next_tx);
910 { 982 {
@@ -942,7 +1014,10 @@ static void nv_tx_done(struct net_device *dev)
942 while (np->nic_tx != np->next_tx) { 1014 while (np->nic_tx != np->next_tx) {
943 i = np->nic_tx % TX_RING; 1015 i = np->nic_tx % TX_RING;
944 1016
945 Flags = le32_to_cpu(np->tx_ring[i].FlagLen); 1017 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1018 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
1019 else
1020 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
946 1021
947 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1022 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
948 dev->name, np->nic_tx, Flags); 1023 dev->name, np->nic_tx, Flags);
@@ -993,9 +1068,56 @@ static void nv_tx_timeout(struct net_device *dev)
993 struct fe_priv *np = get_nvpriv(dev); 1068 struct fe_priv *np = get_nvpriv(dev);
994 u8 __iomem *base = get_hwbase(dev); 1069 u8 __iomem *base = get_hwbase(dev);
995 1070
996 dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name, 1071 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
997 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); 1072 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
998 1073
1074 {
1075 int i;
1076
1077 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
1078 dev->name, (unsigned long)np->ring_addr,
1079 np->next_tx, np->nic_tx);
1080 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1081 for (i=0;i<0x400;i+= 32) {
1082 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1083 i,
1084 readl(base + i + 0), readl(base + i + 4),
1085 readl(base + i + 8), readl(base + i + 12),
1086 readl(base + i + 16), readl(base + i + 20),
1087 readl(base + i + 24), readl(base + i + 28));
1088 }
1089 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
1090 for (i=0;i<TX_RING;i+= 4) {
1091 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1092 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1093 i,
1094 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
1095 le32_to_cpu(np->tx_ring.orig[i].FlagLen),
1096 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
1097 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
1098 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
1099 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
1100 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
1101 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
1102 } else {
1103 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1104 i,
1105 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
1106 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
1107 le32_to_cpu(np->tx_ring.ex[i].FlagLen),
1108 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
1109 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
1110 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
1111 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
1112 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
1113 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
1114 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
1115 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
1116 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
1117 }
1118 }
1119 }
1120
999 spin_lock_irq(&np->lock); 1121 spin_lock_irq(&np->lock);
1000 1122
1001 /* 1) stop tx engine */ 1123 /* 1) stop tx engine */
@@ -1009,7 +1131,10 @@ static void nv_tx_timeout(struct net_device *dev)
1009 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1131 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1010 nv_drain_tx(dev); 1132 nv_drain_tx(dev);
1011 np->next_tx = np->nic_tx = 0; 1133 np->next_tx = np->nic_tx = 0;
1012 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 1134 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1135 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1136 else
1137 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1013 netif_wake_queue(dev); 1138 netif_wake_queue(dev);
1014 } 1139 }
1015 1140
@@ -1084,8 +1209,13 @@ static void nv_rx_process(struct net_device *dev)
1084 break; /* we scanned the whole ring - do not continue */ 1209 break; /* we scanned the whole ring - do not continue */
1085 1210
1086 i = np->cur_rx % RX_RING; 1211 i = np->cur_rx % RX_RING;
1087 Flags = le32_to_cpu(np->rx_ring[i].FlagLen); 1212 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1088 len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver); 1213 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
1214 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1215 } else {
1216 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1217 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1218 }
1089 1219
1090 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1220 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1091 dev->name, np->cur_rx, Flags); 1221 dev->name, np->cur_rx, Flags);
@@ -1207,15 +1337,133 @@ next_pkt:
1207 } 1337 }
1208} 1338}
1209 1339
1340static void set_bufsize(struct net_device *dev)
1341{
1342 struct fe_priv *np = netdev_priv(dev);
1343
1344 if (dev->mtu <= ETH_DATA_LEN)
1345 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
1346 else
1347 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
1348}
1349
1210/* 1350/*
1211 * nv_change_mtu: dev->change_mtu function 1351 * nv_change_mtu: dev->change_mtu function
1212 * Called with dev_base_lock held for read. 1352 * Called with dev_base_lock held for read.
1213 */ 1353 */
1214static int nv_change_mtu(struct net_device *dev, int new_mtu) 1354static int nv_change_mtu(struct net_device *dev, int new_mtu)
1215{ 1355{
1216 if (new_mtu > ETH_DATA_LEN) 1356 struct fe_priv *np = get_nvpriv(dev);
1357 int old_mtu;
1358
1359 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1217 return -EINVAL; 1360 return -EINVAL;
1361
1362 old_mtu = dev->mtu;
1218 dev->mtu = new_mtu; 1363 dev->mtu = new_mtu;
1364
1365 /* return early if the buffer sizes will not change */
1366 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1367 return 0;
1368 if (old_mtu == new_mtu)
1369 return 0;
1370
1371 /* synchronized against open : rtnl_lock() held by caller */
1372 if (netif_running(dev)) {
1373 u8 *base = get_hwbase(dev);
1374 /*
1375 * It seems that the nic preloads valid ring entries into an
1376 * internal buffer. The procedure for flushing everything is
1377 * guessed, there is probably a simpler approach.
1378 * Changing the MTU is a rare event, it shouldn't matter.
1379 */
1380 disable_irq(dev->irq);
1381 spin_lock_bh(&dev->xmit_lock);
1382 spin_lock(&np->lock);
1383 /* stop engines */
1384 nv_stop_rx(dev);
1385 nv_stop_tx(dev);
1386 nv_txrx_reset(dev);
1387 /* drain rx queue */
1388 nv_drain_rx(dev);
1389 nv_drain_tx(dev);
1390 /* reinit driver view of the rx queue */
1391 nv_init_rx(dev);
1392 nv_init_tx(dev);
1393 /* alloc new rx buffers */
1394 set_bufsize(dev);
1395 if (nv_alloc_rx(dev)) {
1396 if (!np->in_shutdown)
1397 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1398 }
1399 /* reinit nic view of the rx queue */
1400 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1401 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1402 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1403 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1404 else
1405 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1406 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1407 base + NvRegRingSizes);
1408 pci_push(base);
1409 writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
1410 pci_push(base);
1411
1412 /* restart rx engine */
1413 nv_start_rx(dev);
1414 nv_start_tx(dev);
1415 spin_unlock(&np->lock);
1416 spin_unlock_bh(&dev->xmit_lock);
1417 enable_irq(dev->irq);
1418 }
1419 return 0;
1420}
1421
1422static void nv_copy_mac_to_hw(struct net_device *dev)
1423{
1424 u8 *base = get_hwbase(dev);
1425 u32 mac[2];
1426
1427 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1428 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1429 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1430
1431 writel(mac[0], base + NvRegMacAddrA);
1432 writel(mac[1], base + NvRegMacAddrB);
1433}
1434
1435/*
1436 * nv_set_mac_address: dev->set_mac_address function
1437 * Called with rtnl_lock() held.
1438 */
1439static int nv_set_mac_address(struct net_device *dev, void *addr)
1440{
1441 struct fe_priv *np = get_nvpriv(dev);
1442 struct sockaddr *macaddr = (struct sockaddr*)addr;
1443
1444 if(!is_valid_ether_addr(macaddr->sa_data))
1445 return -EADDRNOTAVAIL;
1446
1447 /* synchronized against open : rtnl_lock() held by caller */
1448 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
1449
1450 if (netif_running(dev)) {
1451 spin_lock_bh(&dev->xmit_lock);
1452 spin_lock_irq(&np->lock);
1453
1454 /* stop rx engine */
1455 nv_stop_rx(dev);
1456
1457 /* set mac address */
1458 nv_copy_mac_to_hw(dev);
1459
1460 /* restart rx engine */
1461 nv_start_rx(dev);
1462 spin_unlock_irq(&np->lock);
1463 spin_unlock_bh(&dev->xmit_lock);
1464 } else {
1465 nv_copy_mac_to_hw(dev);
1466 }
1219 return 0; 1467 return 0;
1220} 1468}
1221 1469
@@ -1470,7 +1718,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1470 if (!(events & np->irqmask)) 1718 if (!(events & np->irqmask))
1471 break; 1719 break;
1472 1720
1473 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) { 1721 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) {
1474 spin_lock(&np->lock); 1722 spin_lock(&np->lock);
1475 nv_tx_done(dev); 1723 nv_tx_done(dev);
1476 spin_unlock(&np->lock); 1724 spin_unlock(&np->lock);
@@ -1761,6 +2009,50 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1761 return 0; 2009 return 0;
1762} 2010}
1763 2011
2012#define FORCEDETH_REGS_VER 1
2013#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2014
2015static int nv_get_regs_len(struct net_device *dev)
2016{
2017 return FORCEDETH_REGS_SIZE;
2018}
2019
2020static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2021{
2022 struct fe_priv *np = get_nvpriv(dev);
2023 u8 __iomem *base = get_hwbase(dev);
2024 u32 *rbuf = buf;
2025 int i;
2026
2027 regs->version = FORCEDETH_REGS_VER;
2028 spin_lock_irq(&np->lock);
2029 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
2030 rbuf[i] = readl(base + i*sizeof(u32));
2031 spin_unlock_irq(&np->lock);
2032}
2033
2034static int nv_nway_reset(struct net_device *dev)
2035{
2036 struct fe_priv *np = get_nvpriv(dev);
2037 int ret;
2038
2039 spin_lock_irq(&np->lock);
2040 if (np->autoneg) {
2041 int bmcr;
2042
2043 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2044 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
2045 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2046
2047 ret = 0;
2048 } else {
2049 ret = -EINVAL;
2050 }
2051 spin_unlock_irq(&np->lock);
2052
2053 return ret;
2054}
2055
1764static struct ethtool_ops ops = { 2056static struct ethtool_ops ops = {
1765 .get_drvinfo = nv_get_drvinfo, 2057 .get_drvinfo = nv_get_drvinfo,
1766 .get_link = ethtool_op_get_link, 2058 .get_link = ethtool_op_get_link,
@@ -1768,6 +2060,9 @@ static struct ethtool_ops ops = {
1768 .set_wol = nv_set_wol, 2060 .set_wol = nv_set_wol,
1769 .get_settings = nv_get_settings, 2061 .get_settings = nv_get_settings,
1770 .set_settings = nv_set_settings, 2062 .set_settings = nv_set_settings,
2063 .get_regs_len = nv_get_regs_len,
2064 .get_regs = nv_get_regs,
2065 .nway_reset = nv_nway_reset,
1771}; 2066};
1772 2067
1773static int nv_open(struct net_device *dev) 2068static int nv_open(struct net_device *dev)
@@ -1792,6 +2087,7 @@ static int nv_open(struct net_device *dev)
1792 writel(0, base + NvRegAdapterControl); 2087 writel(0, base + NvRegAdapterControl);
1793 2088
1794 /* 2) initialize descriptor rings */ 2089 /* 2) initialize descriptor rings */
2090 set_bufsize(dev);
1795 oom = nv_init_ring(dev); 2091 oom = nv_init_ring(dev);
1796 2092
1797 writel(0, base + NvRegLinkSpeed); 2093 writel(0, base + NvRegLinkSpeed);
@@ -1802,20 +2098,14 @@ static int nv_open(struct net_device *dev)
1802 np->in_shutdown = 0; 2098 np->in_shutdown = 0;
1803 2099
1804 /* 3) set mac address */ 2100 /* 3) set mac address */
1805 { 2101 nv_copy_mac_to_hw(dev);
1806 u32 mac[2];
1807
1808 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1809 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1810 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1811
1812 writel(mac[0], base + NvRegMacAddrA);
1813 writel(mac[1], base + NvRegMacAddrB);
1814 }
1815 2102
1816 /* 4) give hw rings */ 2103 /* 4) give hw rings */
1817 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 2104 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1818 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 2105 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2106 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2107 else
2108 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1819 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 2109 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1820 base + NvRegRingSizes); 2110 base + NvRegRingSizes);
1821 2111
@@ -1837,7 +2127,7 @@ static int nv_open(struct net_device *dev)
1837 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 2127 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
1838 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 2128 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1839 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 2129 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1840 writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig); 2130 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1841 2131
1842 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 2132 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
1843 get_random_bytes(&i, sizeof(i)); 2133 get_random_bytes(&i, sizeof(i));
@@ -1942,6 +2232,12 @@ static int nv_close(struct net_device *dev)
1942 if (np->wolenabled) 2232 if (np->wolenabled)
1943 nv_start_rx(dev); 2233 nv_start_rx(dev);
1944 2234
2235 /* special op: write back the misordered MAC address - otherwise
2236 * the next nv_probe would see a wrong address.
2237 */
2238 writel(np->orig_mac[0], base + NvRegMacAddrA);
2239 writel(np->orig_mac[1], base + NvRegMacAddrB);
2240
1945 /* FIXME: power down nic */ 2241 /* FIXME: power down nic */
1946 2242
1947 return 0; 2243 return 0;
@@ -2006,32 +2302,55 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2006 } 2302 }
2007 2303
2008 /* handle different descriptor versions */ 2304 /* handle different descriptor versions */
2009 if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 || 2305 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2010 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 || 2306 /* packet format 3: supports 40-bit addressing */
2011 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 || 2307 np->desc_ver = DESC_VER_3;
2012 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 2308 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2013 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) 2309 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2014 np->desc_ver = DESC_VER_1; 2310 pci_name(pci_dev));
2015 else 2311 }
2312 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2313 /* packet format 2: supports jumbo frames */
2016 np->desc_ver = DESC_VER_2; 2314 np->desc_ver = DESC_VER_2;
2315 } else {
2316 /* original packet format */
2317 np->desc_ver = DESC_VER_1;
2318 }
2319
2320 np->pkt_limit = NV_PKTLIMIT_1;
2321 if (id->driver_data & DEV_HAS_LARGEDESC)
2322 np->pkt_limit = NV_PKTLIMIT_2;
2017 2323
2018 err = -ENOMEM; 2324 err = -ENOMEM;
2019 np->base = ioremap(addr, NV_PCI_REGSZ); 2325 np->base = ioremap(addr, NV_PCI_REGSZ);
2020 if (!np->base) 2326 if (!np->base)
2021 goto out_relreg; 2327 goto out_relreg;
2022 dev->base_addr = (unsigned long)np->base; 2328 dev->base_addr = (unsigned long)np->base;
2329
2023 dev->irq = pci_dev->irq; 2330 dev->irq = pci_dev->irq;
2024 np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), 2331
2025 &np->ring_addr); 2332 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2026 if (!np->rx_ring) 2333 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
2027 goto out_unmap; 2334 sizeof(struct ring_desc) * (RX_RING + TX_RING),
2028 np->tx_ring = &np->rx_ring[RX_RING]; 2335 &np->ring_addr);
2336 if (!np->rx_ring.orig)
2337 goto out_unmap;
2338 np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
2339 } else {
2340 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
2341 sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2342 &np->ring_addr);
2343 if (!np->rx_ring.ex)
2344 goto out_unmap;
2345 np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
2346 }
2029 2347
2030 dev->open = nv_open; 2348 dev->open = nv_open;
2031 dev->stop = nv_close; 2349 dev->stop = nv_close;
2032 dev->hard_start_xmit = nv_start_xmit; 2350 dev->hard_start_xmit = nv_start_xmit;
2033 dev->get_stats = nv_get_stats; 2351 dev->get_stats = nv_get_stats;
2034 dev->change_mtu = nv_change_mtu; 2352 dev->change_mtu = nv_change_mtu;
2353 dev->set_mac_address = nv_set_mac_address;
2035 dev->set_multicast_list = nv_set_multicast; 2354 dev->set_multicast_list = nv_set_multicast;
2036#ifdef CONFIG_NET_POLL_CONTROLLER 2355#ifdef CONFIG_NET_POLL_CONTROLLER
2037 dev->poll_controller = nv_poll_controller; 2356 dev->poll_controller = nv_poll_controller;
@@ -2080,17 +2399,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2080 2399
2081 if (np->desc_ver == DESC_VER_1) { 2400 if (np->desc_ver == DESC_VER_1) {
2082 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; 2401 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
2083 if (id->driver_data & DEV_NEED_LASTPACKET1)
2084 np->tx_flags |= NV_TX_LASTPACKET1;
2085 } else { 2402 } else {
2086 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; 2403 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
2087 if (id->driver_data & DEV_NEED_LASTPACKET1)
2088 np->tx_flags |= NV_TX2_LASTPACKET1;
2089 } 2404 }
2090 if (id->driver_data & DEV_IRQMASK_1) 2405 np->irqmask = NVREG_IRQMASK_WANTED;
2091 np->irqmask = NVREG_IRQMASK_WANTED_1;
2092 if (id->driver_data & DEV_IRQMASK_2)
2093 np->irqmask = NVREG_IRQMASK_WANTED_2;
2094 if (id->driver_data & DEV_NEED_TIMERIRQ) 2406 if (id->driver_data & DEV_NEED_TIMERIRQ)
2095 np->irqmask |= NVREG_IRQ_TIMER; 2407 np->irqmask |= NVREG_IRQ_TIMER;
2096 if (id->driver_data & DEV_NEED_LINKTIMER) { 2408 if (id->driver_data & DEV_NEED_LINKTIMER) {
@@ -2155,8 +2467,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2155 return 0; 2467 return 0;
2156 2468
2157out_freering: 2469out_freering:
2158 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), 2470 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2159 np->rx_ring, np->ring_addr); 2471 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2472 np->rx_ring.orig, np->ring_addr);
2473 else
2474 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2475 np->rx_ring.ex, np->ring_addr);
2160 pci_set_drvdata(pci_dev, NULL); 2476 pci_set_drvdata(pci_dev, NULL);
2161out_unmap: 2477out_unmap:
2162 iounmap(get_hwbase(dev)); 2478 iounmap(get_hwbase(dev));
@@ -2174,18 +2490,14 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
2174{ 2490{
2175 struct net_device *dev = pci_get_drvdata(pci_dev); 2491 struct net_device *dev = pci_get_drvdata(pci_dev);
2176 struct fe_priv *np = get_nvpriv(dev); 2492 struct fe_priv *np = get_nvpriv(dev);
2177 u8 __iomem *base = get_hwbase(dev);
2178 2493
2179 unregister_netdev(dev); 2494 unregister_netdev(dev);
2180 2495
2181 /* special op: write back the misordered MAC address - otherwise
2182 * the next nv_probe would see a wrong address.
2183 */
2184 writel(np->orig_mac[0], base + NvRegMacAddrA);
2185 writel(np->orig_mac[1], base + NvRegMacAddrB);
2186
2187 /* free all structures */ 2496 /* free all structures */
2188 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr); 2497 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2498 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
2499 else
2500 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
2189 iounmap(get_hwbase(dev)); 2501 iounmap(get_hwbase(dev));
2190 pci_release_regions(pci_dev); 2502 pci_release_regions(pci_dev);
2191 pci_disable_device(pci_dev); 2503 pci_disable_device(pci_dev);
@@ -2195,109 +2507,64 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
2195 2507
2196static struct pci_device_id pci_tbl[] = { 2508static struct pci_device_id pci_tbl[] = {
2197 { /* nForce Ethernet Controller */ 2509 { /* nForce Ethernet Controller */
2198 .vendor = PCI_VENDOR_ID_NVIDIA, 2510 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
2199 .device = PCI_DEVICE_ID_NVIDIA_NVENET_1, 2511 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2200 .subvendor = PCI_ANY_ID,
2201 .subdevice = PCI_ANY_ID,
2202 .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2203 }, 2512 },
2204 { /* nForce2 Ethernet Controller */ 2513 { /* nForce2 Ethernet Controller */
2205 .vendor = PCI_VENDOR_ID_NVIDIA, 2514 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
2206 .device = PCI_DEVICE_ID_NVIDIA_NVENET_2, 2515 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2207 .subvendor = PCI_ANY_ID,
2208 .subdevice = PCI_ANY_ID,
2209 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2210 }, 2516 },
2211 { /* nForce3 Ethernet Controller */ 2517 { /* nForce3 Ethernet Controller */
2212 .vendor = PCI_VENDOR_ID_NVIDIA, 2518 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
2213 .device = PCI_DEVICE_ID_NVIDIA_NVENET_3, 2519 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2214 .subvendor = PCI_ANY_ID,
2215 .subdevice = PCI_ANY_ID,
2216 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2217 }, 2520 },
2218 { /* nForce3 Ethernet Controller */ 2521 { /* nForce3 Ethernet Controller */
2219 .vendor = PCI_VENDOR_ID_NVIDIA, 2522 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
2220 .device = PCI_DEVICE_ID_NVIDIA_NVENET_4, 2523 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2221 .subvendor = PCI_ANY_ID,
2222 .subdevice = PCI_ANY_ID,
2223 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2224 }, 2524 },
2225 { /* nForce3 Ethernet Controller */ 2525 { /* nForce3 Ethernet Controller */
2226 .vendor = PCI_VENDOR_ID_NVIDIA, 2526 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
2227 .device = PCI_DEVICE_ID_NVIDIA_NVENET_5, 2527 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2228 .subvendor = PCI_ANY_ID,
2229 .subdevice = PCI_ANY_ID,
2230 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2231 }, 2528 },
2232 { /* nForce3 Ethernet Controller */ 2529 { /* nForce3 Ethernet Controller */
2233 .vendor = PCI_VENDOR_ID_NVIDIA, 2530 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
2234 .device = PCI_DEVICE_ID_NVIDIA_NVENET_6, 2531 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2235 .subvendor = PCI_ANY_ID,
2236 .subdevice = PCI_ANY_ID,
2237 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2238 }, 2532 },
2239 { /* nForce3 Ethernet Controller */ 2533 { /* nForce3 Ethernet Controller */
2240 .vendor = PCI_VENDOR_ID_NVIDIA, 2534 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
2241 .device = PCI_DEVICE_ID_NVIDIA_NVENET_7, 2535 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2242 .subvendor = PCI_ANY_ID,
2243 .subdevice = PCI_ANY_ID,
2244 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2245 }, 2536 },
2246 { /* CK804 Ethernet Controller */ 2537 { /* CK804 Ethernet Controller */
2247 .vendor = PCI_VENDOR_ID_NVIDIA, 2538 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
2248 .device = PCI_DEVICE_ID_NVIDIA_NVENET_8, 2539 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2249 .subvendor = PCI_ANY_ID,
2250 .subdevice = PCI_ANY_ID,
2251 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2252 }, 2540 },
2253 { /* CK804 Ethernet Controller */ 2541 { /* CK804 Ethernet Controller */
2254 .vendor = PCI_VENDOR_ID_NVIDIA, 2542 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
2255 .device = PCI_DEVICE_ID_NVIDIA_NVENET_9, 2543 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2256 .subvendor = PCI_ANY_ID,
2257 .subdevice = PCI_ANY_ID,
2258 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2259 }, 2544 },
2260 { /* MCP04 Ethernet Controller */ 2545 { /* MCP04 Ethernet Controller */
2261 .vendor = PCI_VENDOR_ID_NVIDIA, 2546 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
2262 .device = PCI_DEVICE_ID_NVIDIA_NVENET_10, 2547 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2263 .subvendor = PCI_ANY_ID,
2264 .subdevice = PCI_ANY_ID,
2265 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2266 }, 2548 },
2267 { /* MCP04 Ethernet Controller */ 2549 { /* MCP04 Ethernet Controller */
2268 .vendor = PCI_VENDOR_ID_NVIDIA, 2550 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
2269 .device = PCI_DEVICE_ID_NVIDIA_NVENET_11, 2551 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2270 .subvendor = PCI_ANY_ID,
2271 .subdevice = PCI_ANY_ID,
2272 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2273 }, 2552 },
2274 { /* MCP51 Ethernet Controller */ 2553 { /* MCP51 Ethernet Controller */
2275 .vendor = PCI_VENDOR_ID_NVIDIA, 2554 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
2276 .device = PCI_DEVICE_ID_NVIDIA_NVENET_12, 2555 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2277 .subvendor = PCI_ANY_ID,
2278 .subdevice = PCI_ANY_ID,
2279 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2280 }, 2556 },
2281 { /* MCP51 Ethernet Controller */ 2557 { /* MCP51 Ethernet Controller */
2282 .vendor = PCI_VENDOR_ID_NVIDIA, 2558 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
2283 .device = PCI_DEVICE_ID_NVIDIA_NVENET_13, 2559 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2284 .subvendor = PCI_ANY_ID,
2285 .subdevice = PCI_ANY_ID,
2286 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2287 }, 2560 },
2288 { /* MCP55 Ethernet Controller */ 2561 { /* MCP55 Ethernet Controller */
2289 .vendor = PCI_VENDOR_ID_NVIDIA, 2562 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
2290 .device = PCI_DEVICE_ID_NVIDIA_NVENET_14, 2563 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2291 .subvendor = PCI_ANY_ID,
2292 .subdevice = PCI_ANY_ID,
2293 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2294 }, 2564 },
2295 { /* MCP55 Ethernet Controller */ 2565 { /* MCP55 Ethernet Controller */
2296 .vendor = PCI_VENDOR_ID_NVIDIA, 2566 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
2297 .device = PCI_DEVICE_ID_NVIDIA_NVENET_15, 2567 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2298 .subvendor = PCI_ANY_ID,
2299 .subdevice = PCI_ANY_ID,
2300 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2301 }, 2568 },
2302 {0,}, 2569 {0,},
2303}; 2570};
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a7f15d9f13e5..5298096afbdb 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -54,6 +54,7 @@
54#include <linux/kmod.h> 54#include <linux/kmod.h>
55#include <linux/hdlcdrv.h> 55#include <linux/hdlcdrv.h>
56#include <linux/baycom.h> 56#include <linux/baycom.h>
57#include <linux/jiffies.h>
57#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 58#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
58/* prototypes for ax25_encapsulate and ax25_rebuild_header */ 59/* prototypes for ax25_encapsulate and ax25_rebuild_header */
59#include <net/ax25.h> 60#include <net/ax25.h>
@@ -287,7 +288,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
287 * measure the interrupt frequency 288 * measure the interrupt frequency
288 */ 289 */
289 bc->debug_vals.cur_intcnt++; 290 bc->debug_vals.cur_intcnt++;
290 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 291 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
291 bc->debug_vals.last_jiffies = cur_jiffies; 292 bc->debug_vals.last_jiffies = cur_jiffies;
292 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 293 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
293 bc->debug_vals.cur_intcnt = 0; 294 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 612ad452bee0..3b1bef1ee215 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -84,6 +84,7 @@
84#include <linux/baycom.h> 84#include <linux/baycom.h>
85#include <linux/parport.h> 85#include <linux/parport.h>
86#include <linux/bitops.h> 86#include <linux/bitops.h>
87#include <linux/jiffies.h>
87 88
88#include <asm/bug.h> 89#include <asm/bug.h>
89#include <asm/system.h> 90#include <asm/system.h>
@@ -165,7 +166,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc)
165 * measure the interrupt frequency 166 * measure the interrupt frequency
166 */ 167 */
167 bc->debug_vals.cur_intcnt++; 168 bc->debug_vals.cur_intcnt++;
168 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 169 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
169 bc->debug_vals.last_jiffies = cur_jiffies; 170 bc->debug_vals.last_jiffies = cur_jiffies;
170 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 171 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
171 bc->debug_vals.cur_intcnt = 0; 172 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 25f270b05378..232793d2ce6b 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -79,6 +79,7 @@
79#include <asm/io.h> 79#include <asm/io.h>
80#include <linux/hdlcdrv.h> 80#include <linux/hdlcdrv.h>
81#include <linux/baycom.h> 81#include <linux/baycom.h>
82#include <linux/jiffies.h>
82 83
83/* --------------------------------------------------------------------- */ 84/* --------------------------------------------------------------------- */
84 85
@@ -159,7 +160,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
159 * measure the interrupt frequency 160 * measure the interrupt frequency
160 */ 161 */
161 bc->debug_vals.cur_intcnt++; 162 bc->debug_vals.cur_intcnt++;
162 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 163 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
163 bc->debug_vals.last_jiffies = cur_jiffies; 164 bc->debug_vals.last_jiffies = cur_jiffies;
164 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 165 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
165 bc->debug_vals.cur_intcnt = 0; 166 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index eead85d00962..be596a3eb3fd 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -69,6 +69,7 @@
69#include <asm/io.h> 69#include <asm/io.h>
70#include <linux/hdlcdrv.h> 70#include <linux/hdlcdrv.h>
71#include <linux/baycom.h> 71#include <linux/baycom.h>
72#include <linux/jiffies.h>
72 73
73/* --------------------------------------------------------------------- */ 74/* --------------------------------------------------------------------- */
74 75
@@ -150,7 +151,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
150 * measure the interrupt frequency 151 * measure the interrupt frequency
151 */ 152 */
152 bc->debug_vals.cur_intcnt++; 153 bc->debug_vals.cur_intcnt++;
153 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 154 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
154 bc->debug_vals.last_jiffies = cur_jiffies; 155 bc->debug_vals.last_jiffies = cur_jiffies;
155 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 156 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
156 bc->debug_vals.cur_intcnt = 0; 157 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3035422f5ad8..e94952e799fe 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -46,6 +46,7 @@
46#include <linux/etherdevice.h> 46#include <linux/etherdevice.h>
47#include <linux/skbuff.h> 47#include <linux/skbuff.h>
48#include <linux/if_arp.h> 48#include <linux/if_arp.h>
49#include <linux/jiffies.h>
49 50
50#include <net/ax25.h> 51#include <net/ax25.h>
51 52
@@ -429,7 +430,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
429 * May be we must check transmitter timeout here ? 430 * May be we must check transmitter timeout here ?
430 * 14 Oct 1994 Dmitry Gorodchanin. 431 * 14 Oct 1994 Dmitry Gorodchanin.
431 */ 432 */
432 if (jiffies - dev->trans_start < 20 * HZ) { 433 if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
433 /* 20 sec timeout not reached */ 434 /* 20 sec timeout not reached */
434 return 1; 435 return 1;
435 } 436 }
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f8d3385c7842..c83271b38621 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -119,7 +119,7 @@ struct ixgb_adapter;
119 * so a DMA handle can be stored along with the buffer */ 119 * so a DMA handle can be stored along with the buffer */
120struct ixgb_buffer { 120struct ixgb_buffer {
121 struct sk_buff *skb; 121 struct sk_buff *skb;
122 uint64_t dma; 122 dma_addr_t dma;
123 unsigned long time_stamp; 123 unsigned long time_stamp;
124 uint16_t length; 124 uint16_t length;
125 uint16_t next_to_watch; 125 uint16_t next_to_watch;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 3aae110c5560..661a46b95a61 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -565,24 +565,6 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
565 } 565 }
566} 566}
567 567
568/******************************************************************************
569 * return the compatibility flags from EEPROM
570 *
571 * hw - Struct containing variables accessed by shared code
572 *
573 * Returns:
574 * compatibility flags if EEPROM contents are valid, 0 otherwise
575 ******************************************************************************/
576uint16_t
577ixgb_get_ee_compatibility(struct ixgb_hw *hw)
578{
579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
580
581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
582 return (le16_to_cpu(ee_map->compatibility));
583
584 return(0);
585}
586 568
587/****************************************************************************** 569/******************************************************************************
588 * return the Printed Board Assembly number from EEPROM 570 * return the Printed Board Assembly number from EEPROM
@@ -602,81 +584,6 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
602 return(0); 584 return(0);
603} 585}
604 586
605/******************************************************************************
606 * return the Initialization Control Word 1 from EEPROM
607 *
608 * hw - Struct containing variables accessed by shared code
609 *
610 * Returns:
611 * Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
612 ******************************************************************************/
613uint16_t
614ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
615{
616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
617
618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
619 return (le16_to_cpu(ee_map->init_ctrl_reg_1));
620
621 return(0);
622}
623
624/******************************************************************************
625 * return the Initialization Control Word 2 from EEPROM
626 *
627 * hw - Struct containing variables accessed by shared code
628 *
629 * Returns:
630 * Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
631 ******************************************************************************/
632uint16_t
633ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
634{
635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
636
637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
638 return (le16_to_cpu(ee_map->init_ctrl_reg_2));
639
640 return(0);
641}
642
643/******************************************************************************
644 * return the Subsystem Id from EEPROM
645 *
646 * hw - Struct containing variables accessed by shared code
647 *
648 * Returns:
649 * Subsystem Id if EEPROM contents are valid, 0 otherwise
650 ******************************************************************************/
651uint16_t
652ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
653{
654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
655
656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
657 return (le16_to_cpu(ee_map->subsystem_id));
658
659 return(0);
660}
661
662/******************************************************************************
663 * return the Sub Vendor Id from EEPROM
664 *
665 * hw - Struct containing variables accessed by shared code
666 *
667 * Returns:
668 * Sub Vendor Id if EEPROM contents are valid, 0 otherwise
669 ******************************************************************************/
670uint16_t
671ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
672{
673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
674
675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
676 return (le16_to_cpu(ee_map->subvendor_id));
677
678 return(0);
679}
680 587
681/****************************************************************************** 588/******************************************************************************
682 * return the Device Id from EEPROM 589 * return the Device Id from EEPROM
@@ -694,81 +601,6 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 601 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
695 return (le16_to_cpu(ee_map->device_id)); 602 return (le16_to_cpu(ee_map->device_id));
696 603
697 return(0); 604 return (0);
698}
699
700/******************************************************************************
701 * return the Vendor Id from EEPROM
702 *
703 * hw - Struct containing variables accessed by shared code
704 *
705 * Returns:
706 * Device Id if EEPROM contents are valid, 0 otherwise
707 ******************************************************************************/
708uint16_t
709ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
710{
711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
712
713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
714 return (le16_to_cpu(ee_map->vendor_id));
715
716 return(0);
717}
718
719/******************************************************************************
720 * return the Software Defined Pins Register from EEPROM
721 *
722 * hw - Struct containing variables accessed by shared code
723 *
724 * Returns:
725 * SDP Register if EEPROM contents are valid, 0 otherwise
726 ******************************************************************************/
727uint16_t
728ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
729{
730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
731
732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
733 return (le16_to_cpu(ee_map->swdpins_reg));
734
735 return(0);
736} 605}
737 606
738/******************************************************************************
739 * return the D3 Power Management Bits from EEPROM
740 *
741 * hw - Struct containing variables accessed by shared code
742 *
743 * Returns:
744 * D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
745 ******************************************************************************/
746uint8_t
747ixgb_get_ee_d3_power(struct ixgb_hw *hw)
748{
749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
750
751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
752 return (le16_to_cpu(ee_map->d3_power));
753
754 return(0);
755}
756
757/******************************************************************************
758 * return the D0 Power Management Bits from EEPROM
759 *
760 * hw - Struct containing variables accessed by shared code
761 *
762 * Returns:
763 * D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
764 ******************************************************************************/
765uint8_t
766ixgb_get_ee_d0_power(struct ixgb_hw *hw)
767{
768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
769
770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
771 return (le16_to_cpu(ee_map->d0_power));
772
773 return(0);
774}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 3fa113854eeb..9d026ed77ddd 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -98,10 +98,10 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
98static int 98static int
99ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 99ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
100{ 100{
101 struct ixgb_adapter *adapter = netdev->priv; 101 struct ixgb_adapter *adapter = netdev_priv(netdev);
102 102
103 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 103 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
104 ecmd->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 104 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
105 ecmd->port = PORT_FIBRE; 105 ecmd->port = PORT_FIBRE;
106 ecmd->transceiver = XCVR_EXTERNAL; 106 ecmd->transceiver = XCVR_EXTERNAL;
107 107
@@ -120,7 +120,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
120static int 120static int
121ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 121ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
122{ 122{
123 struct ixgb_adapter *adapter = netdev->priv; 123 struct ixgb_adapter *adapter = netdev_priv(netdev);
124 124
125 if(ecmd->autoneg == AUTONEG_ENABLE || 125 if(ecmd->autoneg == AUTONEG_ENABLE ||
126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) 126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
@@ -130,6 +130,12 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
130 ixgb_down(adapter, TRUE); 130 ixgb_down(adapter, TRUE);
131 ixgb_reset(adapter); 131 ixgb_reset(adapter);
132 ixgb_up(adapter); 132 ixgb_up(adapter);
133 /* be optimistic about our link, since we were up before */
134 adapter->link_speed = 10000;
135 adapter->link_duplex = FULL_DUPLEX;
136 netif_carrier_on(netdev);
137 netif_wake_queue(netdev);
138
133 } else 139 } else
134 ixgb_reset(adapter); 140 ixgb_reset(adapter);
135 141
@@ -140,7 +146,7 @@ static void
140ixgb_get_pauseparam(struct net_device *netdev, 146ixgb_get_pauseparam(struct net_device *netdev,
141 struct ethtool_pauseparam *pause) 147 struct ethtool_pauseparam *pause)
142{ 148{
143 struct ixgb_adapter *adapter = netdev->priv; 149 struct ixgb_adapter *adapter = netdev_priv(netdev);
144 struct ixgb_hw *hw = &adapter->hw; 150 struct ixgb_hw *hw = &adapter->hw;
145 151
146 pause->autoneg = AUTONEG_DISABLE; 152 pause->autoneg = AUTONEG_DISABLE;
@@ -159,7 +165,7 @@ static int
159ixgb_set_pauseparam(struct net_device *netdev, 165ixgb_set_pauseparam(struct net_device *netdev,
160 struct ethtool_pauseparam *pause) 166 struct ethtool_pauseparam *pause)
161{ 167{
162 struct ixgb_adapter *adapter = netdev->priv; 168 struct ixgb_adapter *adapter = netdev_priv(netdev);
163 struct ixgb_hw *hw = &adapter->hw; 169 struct ixgb_hw *hw = &adapter->hw;
164 170
165 if(pause->autoneg == AUTONEG_ENABLE) 171 if(pause->autoneg == AUTONEG_ENABLE)
@@ -177,6 +183,11 @@ ixgb_set_pauseparam(struct net_device *netdev,
177 if(netif_running(adapter->netdev)) { 183 if(netif_running(adapter->netdev)) {
178 ixgb_down(adapter, TRUE); 184 ixgb_down(adapter, TRUE);
179 ixgb_up(adapter); 185 ixgb_up(adapter);
186 /* be optimistic about our link, since we were up before */
187 adapter->link_speed = 10000;
188 adapter->link_duplex = FULL_DUPLEX;
189 netif_carrier_on(netdev);
190 netif_wake_queue(netdev);
180 } else 191 } else
181 ixgb_reset(adapter); 192 ixgb_reset(adapter);
182 193
@@ -186,19 +197,26 @@ ixgb_set_pauseparam(struct net_device *netdev,
186static uint32_t 197static uint32_t
187ixgb_get_rx_csum(struct net_device *netdev) 198ixgb_get_rx_csum(struct net_device *netdev)
188{ 199{
189 struct ixgb_adapter *adapter = netdev->priv; 200 struct ixgb_adapter *adapter = netdev_priv(netdev);
201
190 return adapter->rx_csum; 202 return adapter->rx_csum;
191} 203}
192 204
193static int 205static int
194ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) 206ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
195{ 207{
196 struct ixgb_adapter *adapter = netdev->priv; 208 struct ixgb_adapter *adapter = netdev_priv(netdev);
209
197 adapter->rx_csum = data; 210 adapter->rx_csum = data;
198 211
199 if(netif_running(netdev)) { 212 if(netif_running(netdev)) {
200 ixgb_down(adapter,TRUE); 213 ixgb_down(adapter,TRUE);
201 ixgb_up(adapter); 214 ixgb_up(adapter);
215 /* be optimistic about our link, since we were up before */
216 adapter->link_speed = 10000;
217 adapter->link_duplex = FULL_DUPLEX;
218 netif_carrier_on(netdev);
219 netif_wake_queue(netdev);
202 } else 220 } else
203 ixgb_reset(adapter); 221 ixgb_reset(adapter);
204 return 0; 222 return 0;
@@ -246,14 +264,15 @@ static void
246ixgb_get_regs(struct net_device *netdev, 264ixgb_get_regs(struct net_device *netdev,
247 struct ethtool_regs *regs, void *p) 265 struct ethtool_regs *regs, void *p)
248{ 266{
249 struct ixgb_adapter *adapter = netdev->priv; 267 struct ixgb_adapter *adapter = netdev_priv(netdev);
250 struct ixgb_hw *hw = &adapter->hw; 268 struct ixgb_hw *hw = &adapter->hw;
251 uint32_t *reg = p; 269 uint32_t *reg = p;
252 uint32_t *reg_start = reg; 270 uint32_t *reg_start = reg;
253 uint8_t i; 271 uint8_t i;
254 272
255 /* the 1 (one) below indicates an attempt at versioning, if the 273 /* the 1 (one) below indicates an attempt at versioning, if the
256 * interface in ethtool or the driver this 1 should be incremented */ 274 * interface in ethtool or the driver changes, this 1 should be
275 * incremented */
257 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; 276 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
258 277
259 /* General Registers */ 278 /* General Registers */
@@ -283,7 +302,8 @@ ixgb_get_regs(struct net_device *netdev,
283 *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */ 302 *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
284 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ 303 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
285 304
286 for (i = 0; i < IXGB_RAR_ENTRIES; i++) { 305 /* there are 16 RAR entries in hardware, we only use 3 */
306 for(i = 0; i < 16; i++) {
287 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ 307 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
288 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ 308 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
289 } 309 }
@@ -391,7 +411,7 @@ static int
391ixgb_get_eeprom(struct net_device *netdev, 411ixgb_get_eeprom(struct net_device *netdev,
392 struct ethtool_eeprom *eeprom, uint8_t *bytes) 412 struct ethtool_eeprom *eeprom, uint8_t *bytes)
393{ 413{
394 struct ixgb_adapter *adapter = netdev->priv; 414 struct ixgb_adapter *adapter = netdev_priv(netdev);
395 struct ixgb_hw *hw = &adapter->hw; 415 struct ixgb_hw *hw = &adapter->hw;
396 uint16_t *eeprom_buff; 416 uint16_t *eeprom_buff;
397 int i, max_len, first_word, last_word; 417 int i, max_len, first_word, last_word;
@@ -439,7 +459,7 @@ static int
439ixgb_set_eeprom(struct net_device *netdev, 459ixgb_set_eeprom(struct net_device *netdev,
440 struct ethtool_eeprom *eeprom, uint8_t *bytes) 460 struct ethtool_eeprom *eeprom, uint8_t *bytes)
441{ 461{
442 struct ixgb_adapter *adapter = netdev->priv; 462 struct ixgb_adapter *adapter = netdev_priv(netdev);
443 struct ixgb_hw *hw = &adapter->hw; 463 struct ixgb_hw *hw = &adapter->hw;
444 uint16_t *eeprom_buff; 464 uint16_t *eeprom_buff;
445 void *ptr; 465 void *ptr;
@@ -497,7 +517,7 @@ static void
497ixgb_get_drvinfo(struct net_device *netdev, 517ixgb_get_drvinfo(struct net_device *netdev,
498 struct ethtool_drvinfo *drvinfo) 518 struct ethtool_drvinfo *drvinfo)
499{ 519{
500 struct ixgb_adapter *adapter = netdev->priv; 520 struct ixgb_adapter *adapter = netdev_priv(netdev);
501 521
502 strncpy(drvinfo->driver, ixgb_driver_name, 32); 522 strncpy(drvinfo->driver, ixgb_driver_name, 32);
503 strncpy(drvinfo->version, ixgb_driver_version, 32); 523 strncpy(drvinfo->version, ixgb_driver_version, 32);
@@ -512,7 +532,7 @@ static void
512ixgb_get_ringparam(struct net_device *netdev, 532ixgb_get_ringparam(struct net_device *netdev,
513 struct ethtool_ringparam *ring) 533 struct ethtool_ringparam *ring)
514{ 534{
515 struct ixgb_adapter *adapter = netdev->priv; 535 struct ixgb_adapter *adapter = netdev_priv(netdev);
516 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 536 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
517 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 537 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
518 538
@@ -530,7 +550,7 @@ static int
530ixgb_set_ringparam(struct net_device *netdev, 550ixgb_set_ringparam(struct net_device *netdev,
531 struct ethtool_ringparam *ring) 551 struct ethtool_ringparam *ring)
532{ 552{
533 struct ixgb_adapter *adapter = netdev->priv; 553 struct ixgb_adapter *adapter = netdev_priv(netdev);
534 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 554 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
535 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 555 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
536 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; 556 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
@@ -573,6 +593,11 @@ ixgb_set_ringparam(struct net_device *netdev,
573 adapter->tx_ring = tx_new; 593 adapter->tx_ring = tx_new;
574 if((err = ixgb_up(adapter))) 594 if((err = ixgb_up(adapter)))
575 return err; 595 return err;
596 /* be optimistic about our link, since we were up before */
597 adapter->link_speed = 10000;
598 adapter->link_duplex = FULL_DUPLEX;
599 netif_carrier_on(netdev);
600 netif_wake_queue(netdev);
576 } 601 }
577 602
578 return 0; 603 return 0;
@@ -607,7 +632,7 @@ ixgb_led_blink_callback(unsigned long data)
607static int 632static int
608ixgb_phys_id(struct net_device *netdev, uint32_t data) 633ixgb_phys_id(struct net_device *netdev, uint32_t data)
609{ 634{
610 struct ixgb_adapter *adapter = netdev->priv; 635 struct ixgb_adapter *adapter = netdev_priv(netdev);
611 636
612 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 637 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
613 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 638 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
@@ -643,7 +668,7 @@ static void
643ixgb_get_ethtool_stats(struct net_device *netdev, 668ixgb_get_ethtool_stats(struct net_device *netdev,
644 struct ethtool_stats *stats, uint64_t *data) 669 struct ethtool_stats *stats, uint64_t *data)
645{ 670{
646 struct ixgb_adapter *adapter = netdev->priv; 671 struct ixgb_adapter *adapter = netdev_priv(netdev);
647 int i; 672 int i;
648 673
649 ixgb_update_stats(adapter); 674 ixgb_update_stats(adapter);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 97898efe7cc8..8bcf31ed10c2 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -822,17 +822,8 @@ extern void ixgb_clear_vfta(struct ixgb_hw *hw);
822 822
823/* Access functions to eeprom data */ 823/* Access functions to eeprom data */
824void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); 824void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
825uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
826uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); 825uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
827uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
828uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw);
829uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw *hw);
830uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw *hw);
831uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); 826uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
832uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw *hw);
833uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw);
834uint8_t ixgb_get_ee_d3_power(struct ixgb_hw *hw);
835uint8_t ixgb_get_ee_d0_power(struct ixgb_hw *hw);
836boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw); 827boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
837uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); 828uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
838 829
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 097b90ccf575..5c555373adbe 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -29,6 +29,11 @@
29#include "ixgb.h" 29#include "ixgb.h"
30 30
31/* Change Log 31/* Change Log
32 * 1.0.96 04/19/05
33 * - Make needlessly global code static -- bunk@stusta.de
34 * - ethtool cleanup -- shemminger@osdl.org
35 * - Support for MODULE_VERSION -- linville@tuxdriver.com
36 * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
32 * 1.0.88 01/05/05 37 * 1.0.88 01/05/05
33 * - include fix to the condition that determines when to quit NAPI - Robert Olsson 38 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
34 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down 39 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
@@ -47,10 +52,9 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
47#else 52#else
48#define DRIVERNAPI "-NAPI" 53#define DRIVERNAPI "-NAPI"
49#endif 54#endif
50 55#define DRV_VERSION "1.0.100-k2"DRIVERNAPI
51#define DRV_VERSION "1.0.95-k2"DRIVERNAPI
52char ixgb_driver_version[] = DRV_VERSION; 56char ixgb_driver_version[] = DRV_VERSION;
53char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 57static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
54 58
55/* ixgb_pci_tbl - PCI Device ID Table 59/* ixgb_pci_tbl - PCI Device ID Table
56 * 60 *
@@ -145,10 +149,12 @@ MODULE_LICENSE("GPL");
145MODULE_VERSION(DRV_VERSION); 149MODULE_VERSION(DRV_VERSION);
146 150
147/* some defines for controlling descriptor fetches in h/w */ 151/* some defines for controlling descriptor fetches in h/w */
148#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
149#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
150 pushed this many descriptors from head */
151#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */ 152#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
153#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
154 * this */
155#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
156 * is pushed this many descriptors
157 * from head */
152 158
153/** 159/**
154 * ixgb_init_module - Driver Registration Routine 160 * ixgb_init_module - Driver Registration Routine
@@ -376,7 +382,7 @@ ixgb_probe(struct pci_dev *pdev,
376 SET_NETDEV_DEV(netdev, &pdev->dev); 382 SET_NETDEV_DEV(netdev, &pdev->dev);
377 383
378 pci_set_drvdata(pdev, netdev); 384 pci_set_drvdata(pdev, netdev);
379 adapter = netdev->priv; 385 adapter = netdev_priv(netdev);
380 adapter->netdev = netdev; 386 adapter->netdev = netdev;
381 adapter->pdev = pdev; 387 adapter->pdev = pdev;
382 adapter->hw.back = adapter; 388 adapter->hw.back = adapter;
@@ -512,7 +518,7 @@ static void __devexit
512ixgb_remove(struct pci_dev *pdev) 518ixgb_remove(struct pci_dev *pdev)
513{ 519{
514 struct net_device *netdev = pci_get_drvdata(pdev); 520 struct net_device *netdev = pci_get_drvdata(pdev);
515 struct ixgb_adapter *adapter = netdev->priv; 521 struct ixgb_adapter *adapter = netdev_priv(netdev);
516 522
517 unregister_netdev(netdev); 523 unregister_netdev(netdev);
518 524
@@ -583,7 +589,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
583static int 589static int
584ixgb_open(struct net_device *netdev) 590ixgb_open(struct net_device *netdev)
585{ 591{
586 struct ixgb_adapter *adapter = netdev->priv; 592 struct ixgb_adapter *adapter = netdev_priv(netdev);
587 int err; 593 int err;
588 594
589 /* allocate transmit descriptors */ 595 /* allocate transmit descriptors */
@@ -626,7 +632,7 @@ err_setup_tx:
626static int 632static int
627ixgb_close(struct net_device *netdev) 633ixgb_close(struct net_device *netdev)
628{ 634{
629 struct ixgb_adapter *adapter = netdev->priv; 635 struct ixgb_adapter *adapter = netdev_priv(netdev);
630 636
631 ixgb_down(adapter, TRUE); 637 ixgb_down(adapter, TRUE);
632 638
@@ -1017,7 +1023,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1017static int 1023static int
1018ixgb_set_mac(struct net_device *netdev, void *p) 1024ixgb_set_mac(struct net_device *netdev, void *p)
1019{ 1025{
1020 struct ixgb_adapter *adapter = netdev->priv; 1026 struct ixgb_adapter *adapter = netdev_priv(netdev);
1021 struct sockaddr *addr = p; 1027 struct sockaddr *addr = p;
1022 1028
1023 if(!is_valid_ether_addr(addr->sa_data)) 1029 if(!is_valid_ether_addr(addr->sa_data))
@@ -1043,7 +1049,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
1043static void 1049static void
1044ixgb_set_multi(struct net_device *netdev) 1050ixgb_set_multi(struct net_device *netdev)
1045{ 1051{
1046 struct ixgb_adapter *adapter = netdev->priv; 1052 struct ixgb_adapter *adapter = netdev_priv(netdev);
1047 struct ixgb_hw *hw = &adapter->hw; 1053 struct ixgb_hw *hw = &adapter->hw;
1048 struct dev_mc_list *mc_ptr; 1054 struct dev_mc_list *mc_ptr;
1049 uint32_t rctl; 1055 uint32_t rctl;
@@ -1371,7 +1377,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1371static int 1377static int
1372ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1378ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1373{ 1379{
1374 struct ixgb_adapter *adapter = netdev->priv; 1380 struct ixgb_adapter *adapter = netdev_priv(netdev);
1375 unsigned int first; 1381 unsigned int first;
1376 unsigned int tx_flags = 0; 1382 unsigned int tx_flags = 0;
1377 unsigned long flags; 1383 unsigned long flags;
@@ -1425,7 +1431,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1425static void 1431static void
1426ixgb_tx_timeout(struct net_device *netdev) 1432ixgb_tx_timeout(struct net_device *netdev)
1427{ 1433{
1428 struct ixgb_adapter *adapter = netdev->priv; 1434 struct ixgb_adapter *adapter = netdev_priv(netdev);
1429 1435
1430 /* Do the reset outside of interrupt context */ 1436 /* Do the reset outside of interrupt context */
1431 schedule_work(&adapter->tx_timeout_task); 1437 schedule_work(&adapter->tx_timeout_task);
@@ -1434,7 +1440,7 @@ ixgb_tx_timeout(struct net_device *netdev)
1434static void 1440static void
1435ixgb_tx_timeout_task(struct net_device *netdev) 1441ixgb_tx_timeout_task(struct net_device *netdev)
1436{ 1442{
1437 struct ixgb_adapter *adapter = netdev->priv; 1443 struct ixgb_adapter *adapter = netdev_priv(netdev);
1438 1444
1439 ixgb_down(adapter, TRUE); 1445 ixgb_down(adapter, TRUE);
1440 ixgb_up(adapter); 1446 ixgb_up(adapter);
@@ -1451,7 +1457,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
1451static struct net_device_stats * 1457static struct net_device_stats *
1452ixgb_get_stats(struct net_device *netdev) 1458ixgb_get_stats(struct net_device *netdev)
1453{ 1459{
1454 struct ixgb_adapter *adapter = netdev->priv; 1460 struct ixgb_adapter *adapter = netdev_priv(netdev);
1455 1461
1456 return &adapter->net_stats; 1462 return &adapter->net_stats;
1457} 1463}
@@ -1467,7 +1473,7 @@ ixgb_get_stats(struct net_device *netdev)
1467static int 1473static int
1468ixgb_change_mtu(struct net_device *netdev, int new_mtu) 1474ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1469{ 1475{
1470 struct ixgb_adapter *adapter = netdev->priv; 1476 struct ixgb_adapter *adapter = netdev_priv(netdev);
1471 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1477 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1472 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1478 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1473 1479
@@ -1522,7 +1528,8 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1522 1528
1523 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); 1529 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1524 /* fix up multicast stats by removing broadcasts */ 1530 /* fix up multicast stats by removing broadcasts */
1525 multi -= bcast; 1531 if(multi >= bcast)
1532 multi -= bcast;
1526 1533
1527 adapter->stats.mprcl += (multi & 0xFFFFFFFF); 1534 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1528 adapter->stats.mprch += (multi >> 32); 1535 adapter->stats.mprch += (multi >> 32);
@@ -1641,7 +1648,7 @@ static irqreturn_t
1641ixgb_intr(int irq, void *data, struct pt_regs *regs) 1648ixgb_intr(int irq, void *data, struct pt_regs *regs)
1642{ 1649{
1643 struct net_device *netdev = data; 1650 struct net_device *netdev = data;
1644 struct ixgb_adapter *adapter = netdev->priv; 1651 struct ixgb_adapter *adapter = netdev_priv(netdev);
1645 struct ixgb_hw *hw = &adapter->hw; 1652 struct ixgb_hw *hw = &adapter->hw;
1646 uint32_t icr = IXGB_READ_REG(hw, ICR); 1653 uint32_t icr = IXGB_READ_REG(hw, ICR);
1647#ifndef CONFIG_IXGB_NAPI 1654#ifndef CONFIG_IXGB_NAPI
@@ -1688,7 +1695,7 @@ ixgb_intr(int irq, void *data, struct pt_regs *regs)
1688static int 1695static int
1689ixgb_clean(struct net_device *netdev, int *budget) 1696ixgb_clean(struct net_device *netdev, int *budget)
1690{ 1697{
1691 struct ixgb_adapter *adapter = netdev->priv; 1698 struct ixgb_adapter *adapter = netdev_priv(netdev);
1692 int work_to_do = min(*budget, netdev->quota); 1699 int work_to_do = min(*budget, netdev->quota);
1693 int tx_cleaned; 1700 int tx_cleaned;
1694 int work_done = 0; 1701 int work_done = 0;
@@ -2017,7 +2024,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2017static void 2024static void
2018ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2025ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2019{ 2026{
2020 struct ixgb_adapter *adapter = netdev->priv; 2027 struct ixgb_adapter *adapter = netdev_priv(netdev);
2021 uint32_t ctrl, rctl; 2028 uint32_t ctrl, rctl;
2022 2029
2023 ixgb_irq_disable(adapter); 2030 ixgb_irq_disable(adapter);
@@ -2055,7 +2062,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2055static void 2062static void
2056ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 2063ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2057{ 2064{
2058 struct ixgb_adapter *adapter = netdev->priv; 2065 struct ixgb_adapter *adapter = netdev_priv(netdev);
2059 uint32_t vfta, index; 2066 uint32_t vfta, index;
2060 2067
2061 /* add VID to filter table */ 2068 /* add VID to filter table */
@@ -2069,7 +2076,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2069static void 2076static void
2070ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 2077ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2071{ 2078{
2072 struct ixgb_adapter *adapter = netdev->priv; 2079 struct ixgb_adapter *adapter = netdev_priv(netdev);
2073 uint32_t vfta, index; 2080 uint32_t vfta, index;
2074 2081
2075 ixgb_irq_disable(adapter); 2082 ixgb_irq_disable(adapter);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1f61f0cc95d8..690a1aae0b34 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,6 +68,7 @@ static DEFINE_PER_CPU(struct net_device_stats, loopback_stats);
68 * of largesending device modulo TCP checksum, which is ignored for loopback. 68 * of largesending device modulo TCP checksum, which is ignored for loopback.
69 */ 69 */
70 70
71#ifdef LOOPBACK_TSO
71static void emulate_large_send_offload(struct sk_buff *skb) 72static void emulate_large_send_offload(struct sk_buff *skb)
72{ 73{
73 struct iphdr *iph = skb->nh.iph; 74 struct iphdr *iph = skb->nh.iph;
@@ -119,6 +120,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
119 120
120 dev_kfree_skb(skb); 121 dev_kfree_skb(skb);
121} 122}
123#endif /* LOOPBACK_TSO */
122 124
123/* 125/*
124 * The higher levels take care of making this non-reentrant (it's 126 * The higher levels take care of making this non-reentrant (it's
@@ -130,12 +132,13 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
130 132
131 skb_orphan(skb); 133 skb_orphan(skb);
132 134
133 skb->protocol=eth_type_trans(skb,dev); 135 skb->protocol = eth_type_trans(skb,dev);
134 skb->dev=dev; 136 skb->dev = dev;
135#ifndef LOOPBACK_MUST_CHECKSUM 137#ifndef LOOPBACK_MUST_CHECKSUM
136 skb->ip_summed = CHECKSUM_UNNECESSARY; 138 skb->ip_summed = CHECKSUM_UNNECESSARY;
137#endif 139#endif
138 140
141#ifdef LOOPBACK_TSO
139 if (skb_shinfo(skb)->tso_size) { 142 if (skb_shinfo(skb)->tso_size) {
140 BUG_ON(skb->protocol != htons(ETH_P_IP)); 143 BUG_ON(skb->protocol != htons(ETH_P_IP));
141 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 144 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
@@ -143,14 +146,14 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
143 emulate_large_send_offload(skb); 146 emulate_large_send_offload(skb);
144 return 0; 147 return 0;
145 } 148 }
146 149#endif
147 dev->last_rx = jiffies; 150 dev->last_rx = jiffies;
148 151
149 lb_stats = &per_cpu(loopback_stats, get_cpu()); 152 lb_stats = &per_cpu(loopback_stats, get_cpu());
150 lb_stats->rx_bytes += skb->len; 153 lb_stats->rx_bytes += skb->len;
151 lb_stats->tx_bytes += skb->len; 154 lb_stats->tx_bytes = lb_stats->rx_bytes;
152 lb_stats->rx_packets++; 155 lb_stats->rx_packets++;
153 lb_stats->tx_packets++; 156 lb_stats->tx_packets = lb_stats->rx_packets;
154 put_cpu(); 157 put_cpu();
155 158
156 netif_rx(skb); 159 netif_rx(skb);
@@ -208,9 +211,12 @@ struct net_device loopback_dev = {
208 .type = ARPHRD_LOOPBACK, /* 0x0001*/ 211 .type = ARPHRD_LOOPBACK, /* 0x0001*/
209 .rebuild_header = eth_rebuild_header, 212 .rebuild_header = eth_rebuild_header,
210 .flags = IFF_LOOPBACK, 213 .flags = IFF_LOOPBACK,
211 .features = NETIF_F_SG|NETIF_F_FRAGLIST 214 .features = NETIF_F_SG | NETIF_F_FRAGLIST
212 |NETIF_F_NO_CSUM|NETIF_F_HIGHDMA 215#ifdef LOOPBACK_TSO
213 |NETIF_F_LLTX, 216 | NETIF_F_TSO
217#endif
218 | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA
219 | NETIF_F_LLTX,
214 .ethtool_ops = &loopback_ethtool_ops, 220 .ethtool_ops = &loopback_ethtool_ops,
215}; 221};
216 222
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 4a391ea0f58a..a1ac4bd1696e 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -486,9 +486,9 @@ struct netdrv_private {
486MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); 486MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
487MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver"); 487MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
488MODULE_LICENSE("GPL"); 488MODULE_LICENSE("GPL");
489MODULE_PARM (multicast_filter_limit, "i"); 489module_param(multicast_filter_limit, int, 0);
490MODULE_PARM (max_interrupt_work, "i"); 490module_param(max_interrupt_work, int, 0);
491MODULE_PARM (media, "1-" __MODULE_STRING(8) "i"); 491module_param_array(media, int, NULL, 0);
492MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses"); 492MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
493MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt"); 493MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
494MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex"); 494MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
new file mode 100644
index 000000000000..6450bd71deb4
--- /dev/null
+++ b/drivers/net/phy/Kconfig
@@ -0,0 +1,49 @@
1#
2# PHY Layer Configuration
3#
4
5menu "PHY device support"
6
7config PHYLIB
8 tristate "PHY Device support and infrastructure"
9 depends on NET_ETHERNET
10 help
11 Ethernet controllers are usually attached to PHY
12 devices. This option provides infrastructure for
13 managing PHY devices.
14
15comment "MII PHY device drivers"
16 depends on PHYLIB
17
18config MARVELL_PHY
19 tristate "Drivers for Marvell PHYs"
20 depends on PHYLIB
21 ---help---
22 Currently has a driver for the 88E1011S
23
24config DAVICOM_PHY
25 tristate "Drivers for Davicom PHYs"
26 depends on PHYLIB
27 ---help---
28 Currently supports dm9161e and dm9131
29
30config QSEMI_PHY
31 tristate "Drivers for Quality Semiconductor PHYs"
32 depends on PHYLIB
33 ---help---
34 Currently supports the qs6612
35
36config LXT_PHY
37 tristate "Drivers for the Intel LXT PHYs"
38 depends on PHYLIB
39 ---help---
40 Currently supports the lxt970, lxt971
41
42config CICADA_PHY
43 tristate "Drivers for the Cicada PHYs"
44 depends on PHYLIB
45 ---help---
46 Currently supports the cis8204
47
48endmenu
49
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
new file mode 100644
index 000000000000..fb7cb385a659
--- /dev/null
+++ b/drivers/net/phy/Makefile
@@ -0,0 +1,9 @@
1# Makefile for Linux PHY drivers
2
3libphy-objs := phy.o phy_device.o mdio_bus.o
4
5obj-$(CONFIG_MARVELL_PHY) += libphy.o marvell.o
6obj-$(CONFIG_DAVICOM_PHY) += libphy.o davicom.o
7obj-$(CONFIG_CICADA_PHY) += libphy.o cicada.o
8obj-$(CONFIG_LXT_PHY) += libphy.o lxt.o
9obj-$(CONFIG_QSEMI_PHY) += libphy.o qsemi.o
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
new file mode 100644
index 000000000000..c47fb2ecd147
--- /dev/null
+++ b/drivers/net/phy/cicada.c
@@ -0,0 +1,134 @@
1/*
2 * drivers/net/phy/cicada.c
3 *
4 * Driver for Cicada PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* Cicada Extended Control Register 1 */
42#define MII_CIS8201_EXT_CON1 0x17
43#define MII_CIS8201_EXTCON1_INIT 0x0000
44
45/* Cicada Interrupt Mask Register */
46#define MII_CIS8201_IMASK 0x19
47#define MII_CIS8201_IMASK_IEN 0x8000
48#define MII_CIS8201_IMASK_SPEED 0x4000
49#define MII_CIS8201_IMASK_LINK 0x2000
50#define MII_CIS8201_IMASK_DUPLEX 0x1000
51#define MII_CIS8201_IMASK_MASK 0xf000
52
53/* Cicada Interrupt Status Register */
54#define MII_CIS8201_ISTAT 0x1a
55#define MII_CIS8201_ISTAT_STATUS 0x8000
56#define MII_CIS8201_ISTAT_SPEED 0x4000
57#define MII_CIS8201_ISTAT_LINK 0x2000
58#define MII_CIS8201_ISTAT_DUPLEX 0x1000
59
60/* Cicada Auxiliary Control/Status Register */
61#define MII_CIS8201_AUX_CONSTAT 0x1c
62#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
63#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
64#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
65#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
66#define MII_CIS8201_AUXCONSTAT_100 0x0008
67
68MODULE_DESCRIPTION("Cicadia PHY driver");
69MODULE_AUTHOR("Andy Fleming");
70MODULE_LICENSE("GPL");
71
72static int cis820x_config_init(struct phy_device *phydev)
73{
74 int err;
75
76 err = phy_write(phydev, MII_CIS8201_AUX_CONSTAT,
77 MII_CIS8201_AUXCONSTAT_INIT);
78
79 if (err < 0)
80 return err;
81
82 err = phy_write(phydev, MII_CIS8201_EXT_CON1,
83 MII_CIS8201_EXTCON1_INIT);
84
85 return err;
86}
87
88static int cis820x_ack_interrupt(struct phy_device *phydev)
89{
90 int err = phy_read(phydev, MII_CIS8201_ISTAT);
91
92 return (err < 0) ? err : 0;
93}
94
95static int cis820x_config_intr(struct phy_device *phydev)
96{
97 int err;
98
99 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
100 err = phy_write(phydev, MII_CIS8201_IMASK,
101 MII_CIS8201_IMASK_MASK);
102 else
103 err = phy_write(phydev, MII_CIS8201_IMASK, 0);
104
105 return err;
106}
107
108/* Cicada 820x */
109static struct phy_driver cis8204_driver = {
110 .phy_id = 0x000fc440,
111 .name = "Cicada Cis8204",
112 .phy_id_mask = 0x000fffc0,
113 .features = PHY_GBIT_FEATURES,
114 .flags = PHY_HAS_INTERRUPT,
115 .config_init = &cis820x_config_init,
116 .config_aneg = &genphy_config_aneg,
117 .read_status = &genphy_read_status,
118 .ack_interrupt = &cis820x_ack_interrupt,
119 .config_intr = &cis820x_config_intr,
120 .driver = { .owner = THIS_MODULE,},
121};
122
123static int __init cis8204_init(void)
124{
125 return phy_driver_register(&cis8204_driver);
126}
127
128static void __exit cis8204_exit(void)
129{
130 phy_driver_unregister(&cis8204_driver);
131}
132
133module_init(cis8204_init);
134module_exit(cis8204_exit);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
new file mode 100644
index 000000000000..6caf499fae32
--- /dev/null
+++ b/drivers/net/phy/davicom.c
@@ -0,0 +1,195 @@
1/*
2 * drivers/net/phy/davicom.c
3 *
4 * Driver for Davicom PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#define MII_DM9161_SCR 0x10
42#define MII_DM9161_SCR_INIT 0x0610
43
44/* DM9161 Interrupt Register */
45#define MII_DM9161_INTR 0x15
46#define MII_DM9161_INTR_PEND 0x8000
47#define MII_DM9161_INTR_DPLX_MASK 0x0800
48#define MII_DM9161_INTR_SPD_MASK 0x0400
49#define MII_DM9161_INTR_LINK_MASK 0x0200
50#define MII_DM9161_INTR_MASK 0x0100
51#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
52#define MII_DM9161_INTR_SPD_CHANGE 0x0008
53#define MII_DM9161_INTR_LINK_CHANGE 0x0004
54#define MII_DM9161_INTR_INIT 0x0000
55#define MII_DM9161_INTR_STOP \
56(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
57 | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
58
59/* DM9161 10BT Configuration/Status */
60#define MII_DM9161_10BTCSR 0x12
61#define MII_DM9161_10BTCSR_INIT 0x7800
62
63MODULE_DESCRIPTION("Davicom PHY driver");
64MODULE_AUTHOR("Andy Fleming");
65MODULE_LICENSE("GPL");
66
67
68#define DM9161_DELAY 1
69static int dm9161_config_intr(struct phy_device *phydev)
70{
71 int temp;
72
73 temp = phy_read(phydev, MII_DM9161_INTR);
74
75 if (temp < 0)
76 return temp;
77
78 if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
79 temp &= ~(MII_DM9161_INTR_STOP);
80 else
81 temp |= MII_DM9161_INTR_STOP;
82
83 temp = phy_write(phydev, MII_DM9161_INTR, temp);
84
85 return temp;
86}
87
88static int dm9161_config_aneg(struct phy_device *phydev)
89{
90 int err;
91
92 /* Isolate the PHY */
93 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
94
95 if (err < 0)
96 return err;
97
98 /* Configure the new settings */
99 err = genphy_config_aneg(phydev);
100
101 if (err < 0)
102 return err;
103
104 return 0;
105}
106
107static int dm9161_config_init(struct phy_device *phydev)
108{
109 int err;
110
111 /* Isolate the PHY */
112 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
113
114 if (err < 0)
115 return err;
116
117 /* Do not bypass the scrambler/descrambler */
118 err = phy_write(phydev, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
119
120 if (err < 0)
121 return err;
122
123 /* Clear 10BTCSR to default */
124 err = phy_write(phydev, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
125
126 if (err < 0)
127 return err;
128
129 /* Reconnect the PHY, and enable Autonegotiation */
130 err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
131
132 if (err < 0)
133 return err;
134
135 return 0;
136}
137
138static int dm9161_ack_interrupt(struct phy_device *phydev)
139{
140 int err = phy_read(phydev, MII_DM9161_INTR);
141
142 return (err < 0) ? err : 0;
143}
144
145static struct phy_driver dm9161_driver = {
146 .phy_id = 0x0181b880,
147 .name = "Davicom DM9161E",
148 .phy_id_mask = 0x0ffffff0,
149 .features = PHY_BASIC_FEATURES,
150 .config_init = dm9161_config_init,
151 .config_aneg = dm9161_config_aneg,
152 .read_status = genphy_read_status,
153 .driver = { .owner = THIS_MODULE,},
154};
155
156static struct phy_driver dm9131_driver = {
157 .phy_id = 0x00181b80,
158 .name = "Davicom DM9131",
159 .phy_id_mask = 0x0ffffff0,
160 .features = PHY_BASIC_FEATURES,
161 .flags = PHY_HAS_INTERRUPT,
162 .config_aneg = genphy_config_aneg,
163 .read_status = genphy_read_status,
164 .ack_interrupt = dm9161_ack_interrupt,
165 .config_intr = dm9161_config_intr,
166 .driver = { .owner = THIS_MODULE,},
167};
168
169static int __init davicom_init(void)
170{
171 int ret;
172
173 ret = phy_driver_register(&dm9161_driver);
174 if (ret)
175 goto err1;
176
177 ret = phy_driver_register(&dm9131_driver);
178 if (ret)
179 goto err2;
180 return 0;
181
182 err2:
183 phy_driver_unregister(&dm9161_driver);
184 err1:
185 return ret;
186}
187
188static void __exit davicom_exit(void)
189{
190 phy_driver_unregister(&dm9161_driver);
191 phy_driver_unregister(&dm9131_driver);
192}
193
194module_init(davicom_init);
195module_exit(davicom_exit);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
new file mode 100644
index 000000000000..4c840448ec86
--- /dev/null
+++ b/drivers/net/phy/lxt.c
@@ -0,0 +1,179 @@
1/*
2 * drivers/net/phy/lxt.c
3 *
4 * Driver for Intel LXT PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* The Level one LXT970 is used by many boards */
42
43#define MII_LXT970_IER 17 /* Interrupt Enable Register */
44
45#define MII_LXT970_IER_IEN 0x0002
46
47#define MII_LXT970_ISR 18 /* Interrupt Status Register */
48
49#define MII_LXT970_CONFIG 19 /* Configuration Register */
50
51/* ------------------------------------------------------------------------- */
52/* The Level one LXT971 is used on some of my custom boards */
53
54/* register definitions for the 971 */
55#define MII_LXT971_IER 18 /* Interrupt Enable Register */
56#define MII_LXT971_IER_IEN 0x00f2
57
58#define MII_LXT971_ISR 19 /* Interrupt Status Register */
59
60
61MODULE_DESCRIPTION("Intel LXT PHY driver");
62MODULE_AUTHOR("Andy Fleming");
63MODULE_LICENSE("GPL");
64
65static int lxt970_ack_interrupt(struct phy_device *phydev)
66{
67 int err;
68
69 err = phy_read(phydev, MII_BMSR);
70
71 if (err < 0)
72 return err;
73
74 err = phy_read(phydev, MII_LXT970_ISR);
75
76 if (err < 0)
77 return err;
78
79 return 0;
80}
81
82static int lxt970_config_intr(struct phy_device *phydev)
83{
84 int err;
85
86 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
87 err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
88 else
89 err = phy_write(phydev, MII_LXT970_IER, 0);
90
91 return err;
92}
93
94static int lxt970_config_init(struct phy_device *phydev)
95{
96 int err;
97
98 err = phy_write(phydev, MII_LXT970_CONFIG, 0);
99
100 return err;
101}
102
103
104static int lxt971_ack_interrupt(struct phy_device *phydev)
105{
106 int err = phy_read(phydev, MII_LXT971_ISR);
107
108 if (err < 0)
109 return err;
110
111 return 0;
112}
113
114static int lxt971_config_intr(struct phy_device *phydev)
115{
116 int err;
117
118 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
119 err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
120 else
121 err = phy_write(phydev, MII_LXT971_IER, 0);
122
123 return err;
124}
125
126static struct phy_driver lxt970_driver = {
127 .phy_id = 0x07810000,
128 .name = "LXT970",
129 .phy_id_mask = 0x0fffffff,
130 .features = PHY_BASIC_FEATURES,
131 .flags = PHY_HAS_INTERRUPT,
132 .config_init = lxt970_config_init,
133 .config_aneg = genphy_config_aneg,
134 .read_status = genphy_read_status,
135 .ack_interrupt = lxt970_ack_interrupt,
136 .config_intr = lxt970_config_intr,
137 .driver = { .owner = THIS_MODULE,},
138};
139
140static struct phy_driver lxt971_driver = {
141 .phy_id = 0x0001378e,
142 .name = "LXT971",
143 .phy_id_mask = 0x0fffffff,
144 .features = PHY_BASIC_FEATURES,
145 .flags = PHY_HAS_INTERRUPT,
146 .config_aneg = genphy_config_aneg,
147 .read_status = genphy_read_status,
148 .ack_interrupt = lxt971_ack_interrupt,
149 .config_intr = lxt971_config_intr,
150 .driver = { .owner = THIS_MODULE,},
151};
152
153static int __init lxt_init(void)
154{
155 int ret;
156
157 ret = phy_driver_register(&lxt970_driver);
158 if (ret)
159 goto err1;
160
161 ret = phy_driver_register(&lxt971_driver);
162 if (ret)
163 goto err2;
164 return 0;
165
166 err2:
167 phy_driver_unregister(&lxt970_driver);
168 err1:
169 return ret;
170}
171
172static void __exit lxt_exit(void)
173{
174 phy_driver_unregister(&lxt970_driver);
175 phy_driver_unregister(&lxt971_driver);
176}
177
178module_init(lxt_init);
179module_exit(lxt_exit);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
new file mode 100644
index 000000000000..4a72b025006b
--- /dev/null
+++ b/drivers/net/phy/marvell.c
@@ -0,0 +1,140 @@
1/*
2 * drivers/net/phy/marvell.c
3 *
4 * Driver for Marvell PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#define MII_M1011_IEVENT 0x13
42#define MII_M1011_IEVENT_CLEAR 0x0000
43
44#define MII_M1011_IMASK 0x12
45#define MII_M1011_IMASK_INIT 0x6400
46#define MII_M1011_IMASK_CLEAR 0x0000
47
48MODULE_DESCRIPTION("Marvell PHY driver");
49MODULE_AUTHOR("Andy Fleming");
50MODULE_LICENSE("GPL");
51
52static int marvell_ack_interrupt(struct phy_device *phydev)
53{
54 int err;
55
56 /* Clear the interrupts by reading the reg */
57 err = phy_read(phydev, MII_M1011_IEVENT);
58
59 if (err < 0)
60 return err;
61
62 return 0;
63}
64
65static int marvell_config_intr(struct phy_device *phydev)
66{
67 int err;
68
69 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
70 err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
71 else
72 err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
73
74 return err;
75}
76
77static int marvell_config_aneg(struct phy_device *phydev)
78{
79 int err;
80
81 /* The Marvell PHY has an errata which requires
82 * that certain registers get written in order
83 * to restart autonegotiation */
84 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
85
86 if (err < 0)
87 return err;
88
89 err = phy_write(phydev, 0x1d, 0x1f);
90 if (err < 0)
91 return err;
92
93 err = phy_write(phydev, 0x1e, 0x200c);
94 if (err < 0)
95 return err;
96
97 err = phy_write(phydev, 0x1d, 0x5);
98 if (err < 0)
99 return err;
100
101 err = phy_write(phydev, 0x1e, 0);
102 if (err < 0)
103 return err;
104
105 err = phy_write(phydev, 0x1e, 0x100);
106 if (err < 0)
107 return err;
108
109
110 err = genphy_config_aneg(phydev);
111
112 return err;
113}
114
115
116static struct phy_driver m88e1101_driver = {
117 .phy_id = 0x01410c00,
118 .phy_id_mask = 0xffffff00,
119 .name = "Marvell 88E1101",
120 .features = PHY_GBIT_FEATURES,
121 .flags = PHY_HAS_INTERRUPT,
122 .config_aneg = &marvell_config_aneg,
123 .read_status = &genphy_read_status,
124 .ack_interrupt = &marvell_ack_interrupt,
125 .config_intr = &marvell_config_intr,
126 .driver = { .owner = THIS_MODULE,},
127};
128
129static int __init marvell_init(void)
130{
131 return phy_driver_register(&m88e1101_driver);
132}
133
134static void __exit marvell_exit(void)
135{
136 phy_driver_unregister(&m88e1101_driver);
137}
138
139module_init(marvell_init);
140module_exit(marvell_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
new file mode 100644
index 000000000000..d5a05be28818
--- /dev/null
+++ b/drivers/net/phy/mdio_bus.c
@@ -0,0 +1,99 @@
1/*
2 * drivers/net/phy/mdio_bus.c
3 *
4 * MDIO Bus interface
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* mdio_bus_match
42 *
43 * description: Given a PHY device, and a PHY driver, return 1 if
44 * the driver supports the device. Otherwise, return 0
45 */
46static int mdio_bus_match(struct device *dev, struct device_driver *drv)
47{
48 struct phy_device *phydev = to_phy_device(dev);
49 struct phy_driver *phydrv = to_phy_driver(drv);
50
51 return (phydrv->phy_id == (phydev->phy_id & phydrv->phy_id_mask));
52}
53
54/* Suspend and resume. Copied from platform_suspend and
55 * platform_resume
56 */
57static int mdio_bus_suspend(struct device * dev, u32 state)
58{
59 int ret = 0;
60 struct device_driver *drv = dev->driver;
61
62 if (drv && drv->suspend) {
63 ret = drv->suspend(dev, state, SUSPEND_DISABLE);
64 if (ret == 0)
65 ret = drv->suspend(dev, state, SUSPEND_SAVE_STATE);
66 if (ret == 0)
67 ret = drv->suspend(dev, state, SUSPEND_POWER_DOWN);
68 }
69 return ret;
70}
71
72static int mdio_bus_resume(struct device * dev)
73{
74 int ret = 0;
75 struct device_driver *drv = dev->driver;
76
77 if (drv && drv->resume) {
78 ret = drv->resume(dev, RESUME_POWER_ON);
79 if (ret == 0)
80 ret = drv->resume(dev, RESUME_RESTORE_STATE);
81 if (ret == 0)
82 ret = drv->resume(dev, RESUME_ENABLE);
83 }
84 return ret;
85}
86
87struct bus_type mdio_bus_type = {
88 .name = "mdio_bus",
89 .match = mdio_bus_match,
90 .suspend = mdio_bus_suspend,
91 .resume = mdio_bus_resume,
92};
93
94int __init mdio_bus_init(void)
95{
96 return bus_register(&mdio_bus_type);
97}
98
99
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
new file mode 100644
index 000000000000..d3e43631b89b
--- /dev/null
+++ b/drivers/net/phy/phy.c
@@ -0,0 +1,690 @@
1/*
2 * drivers/net/phy/phy.c
3 *
4 * Framework for configuring and reading PHY devices
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h>
33#include <linux/version.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/phy.h>
37
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42static void phy_timer(unsigned long data);
43static int phy_disable_interrupts(struct phy_device *phydev);
44static void phy_sanitize_settings(struct phy_device *phydev);
45static int phy_stop_interrupts(struct phy_device *phydev);
46
47
48/* Convenience functions for reading/writing a given PHY
49 * register. They MUST NOT be called from interrupt context,
50 * because the bus read/write functions may wait for an interrupt
51 * to conclude the operation. */
52int phy_read(struct phy_device *phydev, u16 regnum)
53{
54 int retval;
55 struct mii_bus *bus = phydev->bus;
56
57 spin_lock_bh(&bus->mdio_lock);
58 retval = bus->read(bus, phydev->addr, regnum);
59 spin_unlock_bh(&bus->mdio_lock);
60
61 return retval;
62}
63EXPORT_SYMBOL(phy_read);
64
65int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
66{
67 int err;
68 struct mii_bus *bus = phydev->bus;
69
70 spin_lock_bh(&bus->mdio_lock);
71 err = bus->write(bus, phydev->addr, regnum, val);
72 spin_unlock_bh(&bus->mdio_lock);
73
74 return err;
75}
76EXPORT_SYMBOL(phy_write);
77
78
79int phy_clear_interrupt(struct phy_device *phydev)
80{
81 int err = 0;
82
83 if (phydev->drv->ack_interrupt)
84 err = phydev->drv->ack_interrupt(phydev);
85
86 return err;
87}
88
89
90int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
91{
92 int err = 0;
93
94 phydev->interrupts = interrupts;
95 if (phydev->drv->config_intr)
96 err = phydev->drv->config_intr(phydev);
97
98 return err;
99}
100
101
102/* phy_aneg_done
103 *
104 * description: Reads the status register and returns 0 either if
105 * auto-negotiation is incomplete, or if there was an error.
106 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
107 */
108static inline int phy_aneg_done(struct phy_device *phydev)
109{
110 int retval;
111
112 retval = phy_read(phydev, MII_BMSR);
113
114 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
115}
116
117/* phy_start_aneg
118 *
119 * description: Calls the PHY driver's config_aneg, and then
120 * sets the PHY state to PHY_AN if auto-negotiation is enabled,
121 * and to PHY_FORCING if auto-negotiation is disabled. Unless
122 * the PHY is currently HALTED.
123 */
124static int phy_start_aneg(struct phy_device *phydev)
125{
126 int err;
127
128 spin_lock(&phydev->lock);
129
130 if (AUTONEG_DISABLE == phydev->autoneg)
131 phy_sanitize_settings(phydev);
132
133 err = phydev->drv->config_aneg(phydev);
134
135 if (err < 0)
136 goto out_unlock;
137
138 if (phydev->state != PHY_HALTED) {
139 if (AUTONEG_ENABLE == phydev->autoneg) {
140 phydev->state = PHY_AN;
141 phydev->link_timeout = PHY_AN_TIMEOUT;
142 } else {
143 phydev->state = PHY_FORCING;
144 phydev->link_timeout = PHY_FORCE_TIMEOUT;
145 }
146 }
147
148out_unlock:
149 spin_unlock(&phydev->lock);
150 return err;
151}
152
153/* A structure for mapping a particular speed and duplex
154 * combination to a particular SUPPORTED and ADVERTISED value */
155struct phy_setting {
156 int speed;
157 int duplex;
158 u32 setting;
159};
160
161/* A mapping of all SUPPORTED settings to speed/duplex */
162static struct phy_setting settings[] = {
163 {
164 .speed = 10000,
165 .duplex = DUPLEX_FULL,
166 .setting = SUPPORTED_10000baseT_Full,
167 },
168 {
169 .speed = SPEED_1000,
170 .duplex = DUPLEX_FULL,
171 .setting = SUPPORTED_1000baseT_Full,
172 },
173 {
174 .speed = SPEED_1000,
175 .duplex = DUPLEX_HALF,
176 .setting = SUPPORTED_1000baseT_Half,
177 },
178 {
179 .speed = SPEED_100,
180 .duplex = DUPLEX_FULL,
181 .setting = SUPPORTED_100baseT_Full,
182 },
183 {
184 .speed = SPEED_100,
185 .duplex = DUPLEX_HALF,
186 .setting = SUPPORTED_100baseT_Half,
187 },
188 {
189 .speed = SPEED_10,
190 .duplex = DUPLEX_FULL,
191 .setting = SUPPORTED_10baseT_Full,
192 },
193 {
194 .speed = SPEED_10,
195 .duplex = DUPLEX_HALF,
196 .setting = SUPPORTED_10baseT_Half,
197 },
198};
199
200#define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting))
201
202/* phy_find_setting
203 *
204 * description: Searches the settings array for the setting which
205 * matches the desired speed and duplex, and returns the index
206 * of that setting. Returns the index of the last setting if
207 * none of the others match.
208 */
209static inline int phy_find_setting(int speed, int duplex)
210{
211 int idx = 0;
212
213 while (idx < ARRAY_SIZE(settings) &&
214 (settings[idx].speed != speed ||
215 settings[idx].duplex != duplex))
216 idx++;
217
218 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
219}
220
221/* phy_find_valid
222 * idx: The first index in settings[] to search
223 * features: A mask of the valid settings
224 *
225 * description: Returns the index of the first valid setting less
226 * than or equal to the one pointed to by idx, as determined by
227 * the mask in features. Returns the index of the last setting
228 * if nothing else matches.
229 */
230static inline int phy_find_valid(int idx, u32 features)
231{
232 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
233 idx++;
234
235 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
236}
237
238/* phy_sanitize_settings
239 *
240 * description: Make sure the PHY is set to supported speeds and
241 * duplexes. Drop down by one in this order: 1000/FULL,
242 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF
243 */
244static void phy_sanitize_settings(struct phy_device *phydev)
245{
246 u32 features = phydev->supported;
247 int idx;
248
249 /* Sanitize settings based on PHY capabilities */
250 if ((features & SUPPORTED_Autoneg) == 0)
251 phydev->autoneg = 0;
252
253 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
254 features);
255
256 phydev->speed = settings[idx].speed;
257 phydev->duplex = settings[idx].duplex;
258}
259
260/* phy_force_reduction
261 *
262 * description: Reduces the speed/duplex settings by
263 * one notch. The order is so:
264 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF,
265 * 10/FULL, 10/HALF. The function bottoms out at 10/HALF.
266 */
267static void phy_force_reduction(struct phy_device *phydev)
268{
269 int idx;
270
271 idx = phy_find_setting(phydev->speed, phydev->duplex);
272
273 idx++;
274
275 idx = phy_find_valid(idx, phydev->supported);
276
277 phydev->speed = settings[idx].speed;
278 phydev->duplex = settings[idx].duplex;
279
280 pr_info("Trying %d/%s\n", phydev->speed,
281 DUPLEX_FULL == phydev->duplex ?
282 "FULL" : "HALF");
283}
284
285/* phy_ethtool_sset:
286 * A generic ethtool sset function. Handles all the details
287 *
288 * A few notes about parameter checking:
289 * - We don't set port or transceiver, so we don't care what they
290 * were set to.
291 * - phy_start_aneg() will make sure forced settings are sane, and
292 * choose the next best ones from the ones selected, so we don't
293 * care if ethtool tries to give us bad values
294 */
295int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
296{
297 if (cmd->phy_address != phydev->addr)
298 return -EINVAL;
299
300 /* We make sure that we don't pass unsupported
301 * values in to the PHY */
302 cmd->advertising &= phydev->supported;
303
304 /* Verify the settings we care about. */
305 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
306 return -EINVAL;
307
308 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
309 return -EINVAL;
310
311 if (cmd->autoneg == AUTONEG_DISABLE
312 && ((cmd->speed != SPEED_1000
313 && cmd->speed != SPEED_100
314 && cmd->speed != SPEED_10)
315 || (cmd->duplex != DUPLEX_HALF
316 && cmd->duplex != DUPLEX_FULL)))
317 return -EINVAL;
318
319 phydev->autoneg = cmd->autoneg;
320
321 phydev->speed = cmd->speed;
322
323 phydev->advertising = cmd->advertising;
324
325 if (AUTONEG_ENABLE == cmd->autoneg)
326 phydev->advertising |= ADVERTISED_Autoneg;
327 else
328 phydev->advertising &= ~ADVERTISED_Autoneg;
329
330 phydev->duplex = cmd->duplex;
331
332 /* Restart the PHY */
333 phy_start_aneg(phydev);
334
335 return 0;
336}
337
338int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
339{
340 cmd->supported = phydev->supported;
341
342 cmd->advertising = phydev->advertising;
343
344 cmd->speed = phydev->speed;
345 cmd->duplex = phydev->duplex;
346 cmd->port = PORT_MII;
347 cmd->phy_address = phydev->addr;
348 cmd->transceiver = XCVR_EXTERNAL;
349 cmd->autoneg = phydev->autoneg;
350
351 return 0;
352}
353
354
355/* Note that this function is currently incompatible with the
356 * PHYCONTROL layer. It changes registers without regard to
357 * current state. Use at own risk
358 */
359int phy_mii_ioctl(struct phy_device *phydev,
360 struct mii_ioctl_data *mii_data, int cmd)
361{
362 u16 val = mii_data->val_in;
363
364 switch (cmd) {
365 case SIOCGMIIPHY:
366 mii_data->phy_id = phydev->addr;
367 break;
368 case SIOCGMIIREG:
369 mii_data->val_out = phy_read(phydev, mii_data->reg_num);
370 break;
371
372 case SIOCSMIIREG:
373 if (!capable(CAP_NET_ADMIN))
374 return -EPERM;
375
376 if (mii_data->phy_id == phydev->addr) {
377 switch(mii_data->reg_num) {
378 case MII_BMCR:
379 if (val & (BMCR_RESET|BMCR_ANENABLE))
380 phydev->autoneg = AUTONEG_DISABLE;
381 else
382 phydev->autoneg = AUTONEG_ENABLE;
383 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
384 phydev->duplex = DUPLEX_FULL;
385 else
386 phydev->duplex = DUPLEX_HALF;
387 break;
388 case MII_ADVERTISE:
389 phydev->advertising = val;
390 break;
391 default:
392 /* do nothing */
393 break;
394 }
395 }
396
397 phy_write(phydev, mii_data->reg_num, val);
398
399 if (mii_data->reg_num == MII_BMCR
400 && val & BMCR_RESET
401 && phydev->drv->config_init)
402 phydev->drv->config_init(phydev);
403 break;
404 }
405
406 return 0;
407}
408
409/* phy_start_machine:
410 *
411 * description: The PHY infrastructure can run a state machine
412 * which tracks whether the PHY is starting up, negotiating,
413 * etc. This function starts the timer which tracks the state
414 * of the PHY. If you want to be notified when the state
415 * changes, pass in the callback, otherwise, pass NULL. If you
416 * want to maintain your own state machine, do not call this
417 * function. */
418void phy_start_machine(struct phy_device *phydev,
419 void (*handler)(struct net_device *))
420{
421 phydev->adjust_state = handler;
422
423 init_timer(&phydev->phy_timer);
424 phydev->phy_timer.function = &phy_timer;
425 phydev->phy_timer.data = (unsigned long) phydev;
426 mod_timer(&phydev->phy_timer, jiffies + HZ);
427}
428
429/* phy_stop_machine
430 *
431 * description: Stops the state machine timer, sets the state to
432 * UP (unless it wasn't up yet), and then frees the interrupt,
433 * if it is in use. This function must be called BEFORE
434 * phy_detach.
435 */
436void phy_stop_machine(struct phy_device *phydev)
437{
438 del_timer_sync(&phydev->phy_timer);
439
440 spin_lock(&phydev->lock);
441 if (phydev->state > PHY_UP)
442 phydev->state = PHY_UP;
443 spin_unlock(&phydev->lock);
444
445 if (phydev->irq != PHY_POLL)
446 phy_stop_interrupts(phydev);
447
448 phydev->adjust_state = NULL;
449}
450
451/* phy_error:
452 *
453 * Moves the PHY to the HALTED state in response to a read
454 * or write error, and tells the controller the link is down.
455 * Must not be called from interrupt context, or while the
456 * phydev->lock is held.
457 */
458void phy_error(struct phy_device *phydev)
459{
460 spin_lock(&phydev->lock);
461 phydev->state = PHY_HALTED;
462 spin_unlock(&phydev->lock);
463}
464
465static int phy_stop_interrupts(struct phy_device *phydev)
466{
467 int err;
468
469 err = phy_disable_interrupts(phydev);
470
471 if (err)
472 phy_error(phydev);
473
474 free_irq(phydev->irq, phydev);
475
476 return err;
477}
478
479/* Disable the PHY interrupts from the PHY side */
480static int phy_disable_interrupts(struct phy_device *phydev)
481{
482 int err;
483
484 /* Disable PHY interrupts */
485 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
486
487 if (err)
488 goto phy_err;
489
490 /* Clear the interrupt */
491 err = phy_clear_interrupt(phydev);
492
493 if (err)
494 goto phy_err;
495
496 return 0;
497
498phy_err:
499 phy_error(phydev);
500
501 return err;
502}
503
504/* PHY timer which handles the state machine */
505static void phy_timer(unsigned long data)
506{
507 struct phy_device *phydev = (struct phy_device *)data;
508 int needs_aneg = 0;
509 int err = 0;
510
511 spin_lock(&phydev->lock);
512
513 if (phydev->adjust_state)
514 phydev->adjust_state(phydev->attached_dev);
515
516 switch(phydev->state) {
517 case PHY_DOWN:
518 case PHY_STARTING:
519 case PHY_READY:
520 case PHY_PENDING:
521 break;
522 case PHY_UP:
523 needs_aneg = 1;
524
525 phydev->link_timeout = PHY_AN_TIMEOUT;
526
527 break;
528 case PHY_AN:
529 /* Check if negotiation is done. Break
530 * if there's an error */
531 err = phy_aneg_done(phydev);
532 if (err < 0)
533 break;
534
535 /* If auto-negotiation is done, we change to
536 * either RUNNING, or NOLINK */
537 if (err > 0) {
538 err = phy_read_status(phydev);
539
540 if (err)
541 break;
542
543 if (phydev->link) {
544 phydev->state = PHY_RUNNING;
545 netif_carrier_on(phydev->attached_dev);
546 } else {
547 phydev->state = PHY_NOLINK;
548 netif_carrier_off(phydev->attached_dev);
549 }
550
551 phydev->adjust_link(phydev->attached_dev);
552
553 } else if (0 == phydev->link_timeout--) {
554 /* The counter expired, so either we
555 * switch to forced mode, or the
556 * magic_aneg bit exists, and we try aneg
557 * again */
558 if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
559 int idx;
560
561 /* We'll start from the
562 * fastest speed, and work
563 * our way down */
564 idx = phy_find_valid(0,
565 phydev->supported);
566
567 phydev->speed = settings[idx].speed;
568 phydev->duplex = settings[idx].duplex;
569
570 phydev->autoneg = AUTONEG_DISABLE;
571 phydev->state = PHY_FORCING;
572 phydev->link_timeout =
573 PHY_FORCE_TIMEOUT;
574
575 pr_info("Trying %d/%s\n",
576 phydev->speed,
577 DUPLEX_FULL ==
578 phydev->duplex ?
579 "FULL" : "HALF");
580 }
581
582 needs_aneg = 1;
583 }
584 break;
585 case PHY_NOLINK:
586 err = phy_read_status(phydev);
587
588 if (err)
589 break;
590
591 if (phydev->link) {
592 phydev->state = PHY_RUNNING;
593 netif_carrier_on(phydev->attached_dev);
594 phydev->adjust_link(phydev->attached_dev);
595 }
596 break;
597 case PHY_FORCING:
598 err = phy_read_status(phydev);
599
600 if (err)
601 break;
602
603 if (phydev->link) {
604 phydev->state = PHY_RUNNING;
605 netif_carrier_on(phydev->attached_dev);
606 } else {
607 if (0 == phydev->link_timeout--) {
608 phy_force_reduction(phydev);
609 needs_aneg = 1;
610 }
611 }
612
613 phydev->adjust_link(phydev->attached_dev);
614 break;
615 case PHY_RUNNING:
616 /* Only register a CHANGE if we are
617 * polling */
618 if (PHY_POLL == phydev->irq)
619 phydev->state = PHY_CHANGELINK;
620 break;
621 case PHY_CHANGELINK:
622 err = phy_read_status(phydev);
623
624 if (err)
625 break;
626
627 if (phydev->link) {
628 phydev->state = PHY_RUNNING;
629 netif_carrier_on(phydev->attached_dev);
630 } else {
631 phydev->state = PHY_NOLINK;
632 netif_carrier_off(phydev->attached_dev);
633 }
634
635 phydev->adjust_link(phydev->attached_dev);
636
637 if (PHY_POLL != phydev->irq)
638 err = phy_config_interrupt(phydev,
639 PHY_INTERRUPT_ENABLED);
640 break;
641 case PHY_HALTED:
642 if (phydev->link) {
643 phydev->link = 0;
644 netif_carrier_off(phydev->attached_dev);
645 phydev->adjust_link(phydev->attached_dev);
646 }
647 break;
648 case PHY_RESUMING:
649
650 err = phy_clear_interrupt(phydev);
651
652 if (err)
653 break;
654
655 err = phy_config_interrupt(phydev,
656 PHY_INTERRUPT_ENABLED);
657
658 if (err)
659 break;
660
661 if (AUTONEG_ENABLE == phydev->autoneg) {
662 err = phy_aneg_done(phydev);
663 if (err < 0)
664 break;
665
666 /* err > 0 if AN is done.
667 * Otherwise, it's 0, and we're
668 * still waiting for AN */
669 if (err > 0) {
670 phydev->state = PHY_RUNNING;
671 } else {
672 phydev->state = PHY_AN;
673 phydev->link_timeout = PHY_AN_TIMEOUT;
674 }
675 } else
676 phydev->state = PHY_RUNNING;
677 break;
678 }
679
680 spin_unlock(&phydev->lock);
681
682 if (needs_aneg)
683 err = phy_start_aneg(phydev);
684
685 if (err < 0)
686 phy_error(phydev);
687
688 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
689}
690
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
new file mode 100644
index 000000000000..c44d54f6310a
--- /dev/null
+++ b/drivers/net/phy/phy_device.c
@@ -0,0 +1,572 @@
1/*
2 * drivers/net/phy/phy_device.c
3 *
4 * Framework for finding and configuring PHYs.
5 * Also contains generic PHY driver
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h>
33#include <linux/version.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/phy.h>
37
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42static int genphy_config_init(struct phy_device *phydev);
43
44static struct phy_driver genphy_driver = {
45 .phy_id = 0xffffffff,
46 .phy_id_mask = 0xffffffff,
47 .name = "Generic PHY",
48 .config_init = genphy_config_init,
49 .features = 0,
50 .config_aneg = genphy_config_aneg,
51 .read_status = genphy_read_status,
52 .driver = {.owner = THIS_MODULE, },
53};
54
55/* get_phy_device
56 *
57 * description: Reads the ID registers of the PHY at addr on the
58 * bus, then allocates and returns the phy_device to
59 * represent it.
60 */
61struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
62{
63 int phy_reg;
64 u32 phy_id;
65 struct phy_device *dev = NULL;
66
67 /* Grab the bits from PHYIR1, and put them
68 * in the upper half */
69 phy_reg = bus->read(bus, addr, MII_PHYSID1);
70
71 if (phy_reg < 0)
72 return ERR_PTR(phy_reg);
73
74 phy_id = (phy_reg & 0xffff) << 16;
75
76 /* Grab the bits from PHYIR2, and put them in the lower half */
77 phy_reg = bus->read(bus, addr, MII_PHYSID2);
78
79 if (phy_reg < 0)
80 return ERR_PTR(phy_reg);
81
82 phy_id |= (phy_reg & 0xffff);
83
84 /* If the phy_id is all Fs, there is no device there */
85 if (0xffffffff == phy_id)
86 return NULL;
87
88 /* Otherwise, we allocate the device, and initialize the
89 * default values */
90 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
91
92 if (NULL == dev)
93 return ERR_PTR(-ENOMEM);
94
95 dev->speed = 0;
96 dev->duplex = -1;
97 dev->pause = dev->asym_pause = 0;
98 dev->link = 1;
99
100 dev->autoneg = AUTONEG_ENABLE;
101
102 dev->addr = addr;
103 dev->phy_id = phy_id;
104 dev->bus = bus;
105
106 dev->state = PHY_DOWN;
107
108 spin_lock_init(&dev->lock);
109
110 return dev;
111}
112
113/* phy_prepare_link:
114 *
115 * description: Tells the PHY infrastructure to handle the
116 * gory details on monitoring link status (whether through
117 * polling or an interrupt), and to call back to the
118 * connected device driver when the link status changes.
119 * If you want to monitor your own link state, don't call
120 * this function */
121void phy_prepare_link(struct phy_device *phydev,
122 void (*handler)(struct net_device *))
123{
124 phydev->adjust_link = handler;
125}
126
127/* Generic PHY support and helper functions */
128
129/* genphy_config_advert
130 *
131 * description: Writes MII_ADVERTISE with the appropriate values,
132 * after sanitizing the values to make sure we only advertise
133 * what is supported
134 */
135static int genphy_config_advert(struct phy_device *phydev)
136{
137 u32 advertise;
138 int adv;
139 int err;
140
141 /* Only allow advertising what
142 * this PHY supports */
143 phydev->advertising &= phydev->supported;
144 advertise = phydev->advertising;
145
146 /* Setup standard advertisement */
147 adv = phy_read(phydev, MII_ADVERTISE);
148
149 if (adv < 0)
150 return adv;
151
152 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
153 ADVERTISE_PAUSE_ASYM);
154 if (advertise & ADVERTISED_10baseT_Half)
155 adv |= ADVERTISE_10HALF;
156 if (advertise & ADVERTISED_10baseT_Full)
157 adv |= ADVERTISE_10FULL;
158 if (advertise & ADVERTISED_100baseT_Half)
159 adv |= ADVERTISE_100HALF;
160 if (advertise & ADVERTISED_100baseT_Full)
161 adv |= ADVERTISE_100FULL;
162 if (advertise & ADVERTISED_Pause)
163 adv |= ADVERTISE_PAUSE_CAP;
164 if (advertise & ADVERTISED_Asym_Pause)
165 adv |= ADVERTISE_PAUSE_ASYM;
166
167 err = phy_write(phydev, MII_ADVERTISE, adv);
168
169 if (err < 0)
170 return err;
171
172 /* Configure gigabit if it's supported */
173 if (phydev->supported & (SUPPORTED_1000baseT_Half |
174 SUPPORTED_1000baseT_Full)) {
175 adv = phy_read(phydev, MII_CTRL1000);
176
177 if (adv < 0)
178 return adv;
179
180 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
181 if (advertise & SUPPORTED_1000baseT_Half)
182 adv |= ADVERTISE_1000HALF;
183 if (advertise & SUPPORTED_1000baseT_Full)
184 adv |= ADVERTISE_1000FULL;
185 err = phy_write(phydev, MII_CTRL1000, adv);
186
187 if (err < 0)
188 return err;
189 }
190
191 return adv;
192}
193
194/* genphy_setup_forced
195 *
196 * description: Configures MII_BMCR to force speed/duplex
197 * to the values in phydev. Assumes that the values are valid.
198 * Please see phy_sanitize_settings() */
199int genphy_setup_forced(struct phy_device *phydev)
200{
201 int ctl = BMCR_RESET;
202
203 phydev->pause = phydev->asym_pause = 0;
204
205 if (SPEED_1000 == phydev->speed)
206 ctl |= BMCR_SPEED1000;
207 else if (SPEED_100 == phydev->speed)
208 ctl |= BMCR_SPEED100;
209
210 if (DUPLEX_FULL == phydev->duplex)
211 ctl |= BMCR_FULLDPLX;
212
213 ctl = phy_write(phydev, MII_BMCR, ctl);
214
215 if (ctl < 0)
216 return ctl;
217
218 /* We just reset the device, so we'd better configure any
219 * settings the PHY requires to operate */
220 if (phydev->drv->config_init)
221 ctl = phydev->drv->config_init(phydev);
222
223 return ctl;
224}
225
226
227/* Enable and Restart Autonegotiation */
228int genphy_restart_aneg(struct phy_device *phydev)
229{
230 int ctl;
231
232 ctl = phy_read(phydev, MII_BMCR);
233
234 if (ctl < 0)
235 return ctl;
236
237 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
238
239 /* Don't isolate the PHY if we're negotiating */
240 ctl &= ~(BMCR_ISOLATE);
241
242 ctl = phy_write(phydev, MII_BMCR, ctl);
243
244 return ctl;
245}
246
247
248/* genphy_config_aneg
249 *
250 * description: If auto-negotiation is enabled, we configure the
251 * advertising, and then restart auto-negotiation. If it is not
252 * enabled, then we write the BMCR
253 */
254int genphy_config_aneg(struct phy_device *phydev)
255{
256 int err = 0;
257
258 if (AUTONEG_ENABLE == phydev->autoneg) {
259 err = genphy_config_advert(phydev);
260
261 if (err < 0)
262 return err;
263
264 err = genphy_restart_aneg(phydev);
265 } else
266 err = genphy_setup_forced(phydev);
267
268 return err;
269}
270EXPORT_SYMBOL(genphy_config_aneg);
271
272/* genphy_update_link
273 *
274 * description: Update the value in phydev->link to reflect the
275 * current link value. In order to do this, we need to read
276 * the status register twice, keeping the second value
277 */
278int genphy_update_link(struct phy_device *phydev)
279{
280 int status;
281
282 /* Do a fake read */
283 status = phy_read(phydev, MII_BMSR);
284
285 if (status < 0)
286 return status;
287
288 /* Read link and autonegotiation status */
289 status = phy_read(phydev, MII_BMSR);
290
291 if (status < 0)
292 return status;
293
294 if ((status & BMSR_LSTATUS) == 0)
295 phydev->link = 0;
296 else
297 phydev->link = 1;
298
299 return 0;
300}
301
302/* genphy_read_status
303 *
304 * description: Check the link, then figure out the current state
305 * by comparing what we advertise with what the link partner
306 * advertises. Start by checking the gigabit possibilities,
307 * then move on to 10/100.
308 */
309int genphy_read_status(struct phy_device *phydev)
310{
311 int adv;
312 int err;
313 int lpa;
314 int lpagb = 0;
315
316 /* Update the link, but return if there
317 * was an error */
318 err = genphy_update_link(phydev);
319 if (err)
320 return err;
321
322 if (AUTONEG_ENABLE == phydev->autoneg) {
323 if (phydev->supported & (SUPPORTED_1000baseT_Half
324 | SUPPORTED_1000baseT_Full)) {
325 lpagb = phy_read(phydev, MII_STAT1000);
326
327 if (lpagb < 0)
328 return lpagb;
329
330 adv = phy_read(phydev, MII_CTRL1000);
331
332 if (adv < 0)
333 return adv;
334
335 lpagb &= adv << 2;
336 }
337
338 lpa = phy_read(phydev, MII_LPA);
339
340 if (lpa < 0)
341 return lpa;
342
343 adv = phy_read(phydev, MII_ADVERTISE);
344
345 if (adv < 0)
346 return adv;
347
348 lpa &= adv;
349
350 phydev->speed = SPEED_10;
351 phydev->duplex = DUPLEX_HALF;
352 phydev->pause = phydev->asym_pause = 0;
353
354 if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
355 phydev->speed = SPEED_1000;
356
357 if (lpagb & LPA_1000FULL)
358 phydev->duplex = DUPLEX_FULL;
359 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
360 phydev->speed = SPEED_100;
361
362 if (lpa & LPA_100FULL)
363 phydev->duplex = DUPLEX_FULL;
364 } else
365 if (lpa & LPA_10FULL)
366 phydev->duplex = DUPLEX_FULL;
367
368 if (phydev->duplex == DUPLEX_FULL){
369 phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
370 phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
371 }
372 } else {
373 int bmcr = phy_read(phydev, MII_BMCR);
374 if (bmcr < 0)
375 return bmcr;
376
377 if (bmcr & BMCR_FULLDPLX)
378 phydev->duplex = DUPLEX_FULL;
379 else
380 phydev->duplex = DUPLEX_HALF;
381
382 if (bmcr & BMCR_SPEED1000)
383 phydev->speed = SPEED_1000;
384 else if (bmcr & BMCR_SPEED100)
385 phydev->speed = SPEED_100;
386 else
387 phydev->speed = SPEED_10;
388
389 phydev->pause = phydev->asym_pause = 0;
390 }
391
392 return 0;
393}
394EXPORT_SYMBOL(genphy_read_status);
395
396static int genphy_config_init(struct phy_device *phydev)
397{
398 u32 val;
399 u32 features;
400
401 /* For now, I'll claim that the generic driver supports
402 * all possible port types */
403 features = (SUPPORTED_TP | SUPPORTED_MII
404 | SUPPORTED_AUI | SUPPORTED_FIBRE |
405 SUPPORTED_BNC);
406
407 /* Do we support autonegotiation? */
408 val = phy_read(phydev, MII_BMSR);
409
410 if (val < 0)
411 return val;
412
413 if (val & BMSR_ANEGCAPABLE)
414 features |= SUPPORTED_Autoneg;
415
416 if (val & BMSR_100FULL)
417 features |= SUPPORTED_100baseT_Full;
418 if (val & BMSR_100HALF)
419 features |= SUPPORTED_100baseT_Half;
420 if (val & BMSR_10FULL)
421 features |= SUPPORTED_10baseT_Full;
422 if (val & BMSR_10HALF)
423 features |= SUPPORTED_10baseT_Half;
424
425 if (val & BMSR_ESTATEN) {
426 val = phy_read(phydev, MII_ESTATUS);
427
428 if (val < 0)
429 return val;
430
431 if (val & ESTATUS_1000_TFULL)
432 features |= SUPPORTED_1000baseT_Full;
433 if (val & ESTATUS_1000_THALF)
434 features |= SUPPORTED_1000baseT_Half;
435 }
436
437 phydev->supported = features;
438 phydev->advertising = features;
439
440 return 0;
441}
442
443
444/* phy_probe
445 *
446 * description: Take care of setting up the phy_device structure,
447 * set the state to READY (the driver's init function should
448 * set it to STARTING if needed).
449 */
450static int phy_probe(struct device *dev)
451{
452 struct phy_device *phydev;
453 struct phy_driver *phydrv;
454 struct device_driver *drv;
455 int err = 0;
456
457 phydev = to_phy_device(dev);
458
459 /* Make sure the driver is held.
460 * XXX -- Is this correct? */
461 drv = get_driver(phydev->dev.driver);
462 phydrv = to_phy_driver(drv);
463 phydev->drv = phydrv;
464
465 /* Disable the interrupt if the PHY doesn't support it */
466 if (!(phydrv->flags & PHY_HAS_INTERRUPT))
467 phydev->irq = PHY_POLL;
468
469 spin_lock(&phydev->lock);
470
471 /* Start out supporting everything. Eventually,
472 * a controller will attach, and may modify one
473 * or both of these values */
474 phydev->supported = phydrv->features;
475 phydev->advertising = phydrv->features;
476
477 /* Set the state to READY by default */
478 phydev->state = PHY_READY;
479
480 if (phydev->drv->probe)
481 err = phydev->drv->probe(phydev);
482
483 spin_unlock(&phydev->lock);
484
485 if (err < 0)
486 return err;
487
488 if (phydev->drv->config_init)
489 err = phydev->drv->config_init(phydev);
490
491 return err;
492}
493
494static int phy_remove(struct device *dev)
495{
496 struct phy_device *phydev;
497
498 phydev = to_phy_device(dev);
499
500 spin_lock(&phydev->lock);
501 phydev->state = PHY_DOWN;
502 spin_unlock(&phydev->lock);
503
504 if (phydev->drv->remove)
505 phydev->drv->remove(phydev);
506
507 put_driver(dev->driver);
508 phydev->drv = NULL;
509
510 return 0;
511}
512
513int phy_driver_register(struct phy_driver *new_driver)
514{
515 int retval;
516
517 memset(&new_driver->driver, 0, sizeof(new_driver->driver));
518 new_driver->driver.name = new_driver->name;
519 new_driver->driver.bus = &mdio_bus_type;
520 new_driver->driver.probe = phy_probe;
521 new_driver->driver.remove = phy_remove;
522
523 retval = driver_register(&new_driver->driver);
524
525 if (retval) {
526 printk(KERN_ERR "%s: Error %d in registering driver\n",
527 new_driver->name, retval);
528
529 return retval;
530 }
531
532 pr_info("%s: Registered new driver\n", new_driver->name);
533
534 return 0;
535}
536EXPORT_SYMBOL(phy_driver_register);
537
538void phy_driver_unregister(struct phy_driver *drv)
539{
540 driver_unregister(&drv->driver);
541}
542EXPORT_SYMBOL(phy_driver_unregister);
543
544
545static int __init phy_init(void)
546{
547 int rc;
548 extern int mdio_bus_init(void);
549
550 rc = phy_driver_register(&genphy_driver);
551 if (rc)
552 goto out;
553
554 rc = mdio_bus_init();
555 if (rc)
556 goto out_unreg;
557
558 return 0;
559
560out_unreg:
561 phy_driver_unregister(&genphy_driver);
562out:
563 return rc;
564}
565
566static void __exit phy_exit(void)
567{
568 phy_driver_unregister(&genphy_driver);
569}
570
571module_init(phy_init);
572module_exit(phy_exit);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
new file mode 100644
index 000000000000..d461ba457631
--- /dev/null
+++ b/drivers/net/phy/qsemi.c
@@ -0,0 +1,143 @@
1/*
2 * drivers/net/phy/qsemi.c
3 *
4 * Driver for Quality Semiconductor PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* ------------------------------------------------------------------------- */
42/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
43
44/* register definitions */
45
46#define MII_QS6612_MCR 17 /* Mode Control Register */
47#define MII_QS6612_FTR 27 /* Factory Test Register */
48#define MII_QS6612_MCO 28 /* Misc. Control Register */
49#define MII_QS6612_ISR 29 /* Interrupt Source Register */
50#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
51#define MII_QS6612_IMR_INIT 0x003a
52#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
53
54#define QS6612_PCR_AN_COMPLETE 0x1000
55#define QS6612_PCR_RLBEN 0x0200
56#define QS6612_PCR_DCREN 0x0100
57#define QS6612_PCR_4B5BEN 0x0040
58#define QS6612_PCR_TX_ISOLATE 0x0020
59#define QS6612_PCR_MLT3_DIS 0x0002
60#define QS6612_PCR_SCRM_DESCRM 0x0001
61
62MODULE_DESCRIPTION("Quality Semiconductor PHY driver");
63MODULE_AUTHOR("Andy Fleming");
64MODULE_LICENSE("GPL");
65
66/* Returns 0, unless there's a write error */
67static int qs6612_config_init(struct phy_device *phydev)
68{
69 /* The PHY powers up isolated on the RPX,
70 * so send a command to allow operation.
71 * XXX - My docs indicate this should be 0x0940
72 * ...or something. The current value sets three
73 * reserved bits, bit 11, which specifies it should be
74 * set to one, bit 10, which specifies it should be set
75 * to 0, and bit 7, which doesn't specify. However, my
76 * docs are preliminary, and I will leave it like this
77 * until someone more knowledgable corrects me or it.
78 * -- Andy Fleming
79 */
80 return phy_write(phydev, MII_QS6612_PCR, 0x0dc0);
81}
82
83static int qs6612_ack_interrupt(struct phy_device *phydev)
84{
85 int err;
86
87 err = phy_read(phydev, MII_QS6612_ISR);
88
89 if (err < 0)
90 return err;
91
92 err = phy_read(phydev, MII_BMSR);
93
94 if (err < 0)
95 return err;
96
97 err = phy_read(phydev, MII_EXPANSION);
98
99 if (err < 0)
100 return err;
101
102 return 0;
103}
104
105static int qs6612_config_intr(struct phy_device *phydev)
106{
107 int err;
108 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
109 err = phy_write(phydev, MII_QS6612_IMR,
110 MII_QS6612_IMR_INIT);
111 else
112 err = phy_write(phydev, MII_QS6612_IMR, 0);
113
114 return err;
115
116}
117
118static struct phy_driver qs6612_driver = {
119 .phy_id = 0x00181440,
120 .name = "QS6612",
121 .phy_id_mask = 0xfffffff0,
122 .features = PHY_BASIC_FEATURES,
123 .flags = PHY_HAS_INTERRUPT,
124 .config_init = qs6612_config_init,
125 .config_aneg = genphy_config_aneg,
126 .read_status = genphy_read_status,
127 .ack_interrupt = qs6612_ack_interrupt,
128 .config_intr = qs6612_config_intr,
129 .driver = { .owner = THIS_MODULE,},
130};
131
132static int __init qs6612_init(void)
133{
134 return phy_driver_register(&qs6612_driver);
135}
136
137static void __exit qs6612_exit(void)
138{
139 phy_driver_unregister(&qs6612_driver);
140}
141
142module_init(qs6612_init);
143module_exit(qs6612_exit);
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 7092ca6b277e..2234a8f05eb2 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -62,6 +62,7 @@ typedef struct _XENA_dev_config {
62#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6) 62#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6)
63#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7) 63#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7)
64#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8) 64#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
65#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
65#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8) 66#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
66#define ADAPTER_STATUS_MC_DRAM_READY BIT(24) 67#define ADAPTER_STATUS_MC_DRAM_READY BIT(24)
67#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25) 68#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25)
@@ -77,21 +78,34 @@ typedef struct _XENA_dev_config {
77#define ADAPTER_ECC_EN BIT(55) 78#define ADAPTER_ECC_EN BIT(55)
78 79
79 u64 serr_source; 80 u64 serr_source;
80#define SERR_SOURCE_PIC BIT(0) 81#define SERR_SOURCE_PIC BIT(0)
81#define SERR_SOURCE_TXDMA BIT(1) 82#define SERR_SOURCE_TXDMA BIT(1)
82#define SERR_SOURCE_RXDMA BIT(2) 83#define SERR_SOURCE_RXDMA BIT(2)
83#define SERR_SOURCE_MAC BIT(3) 84#define SERR_SOURCE_MAC BIT(3)
84#define SERR_SOURCE_MC BIT(4) 85#define SERR_SOURCE_MC BIT(4)
85#define SERR_SOURCE_XGXS BIT(5) 86#define SERR_SOURCE_XGXS BIT(5)
86#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \ 87#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
87 SERR_SOURCE_TXDMA | \ 88 SERR_SOURCE_TXDMA | \
88 SERR_SOURCE_RXDMA | \ 89 SERR_SOURCE_RXDMA | \
89 SERR_SOURCE_MAC | \ 90 SERR_SOURCE_MAC | \
90 SERR_SOURCE_MC | \ 91 SERR_SOURCE_MC | \
91 SERR_SOURCE_XGXS) 92 SERR_SOURCE_XGXS)
92 93
93 94 u64 pci_mode;
94 u8 unused_0[0x800 - 0x120]; 95#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
96#define PCI_MODE_PCI_33 0
97#define PCI_MODE_PCI_66 0x1
98#define PCI_MODE_PCIX_M1_66 0x2
99#define PCI_MODE_PCIX_M1_100 0x3
100#define PCI_MODE_PCIX_M1_133 0x4
101#define PCI_MODE_PCIX_M2_66 0x5
102#define PCI_MODE_PCIX_M2_100 0x6
103#define PCI_MODE_PCIX_M2_133 0x7
104#define PCI_MODE_UNSUPPORTED BIT(0)
105#define PCI_MODE_32_BITS BIT(8)
106#define PCI_MODE_UNKNOWN_MODE BIT(9)
107
108 u8 unused_0[0x800 - 0x128];
95 109
96/* PCI-X Controller registers */ 110/* PCI-X Controller registers */
97 u64 pic_int_status; 111 u64 pic_int_status;
@@ -153,7 +167,11 @@ typedef struct _XENA_dev_config {
153 u8 unused4[0x08]; 167 u8 unused4[0x08];
154 168
155 u64 gpio_int_reg; 169 u64 gpio_int_reg;
170#define GPIO_INT_REG_LINK_DOWN BIT(1)
171#define GPIO_INT_REG_LINK_UP BIT(2)
156 u64 gpio_int_mask; 172 u64 gpio_int_mask;
173#define GPIO_INT_MASK_LINK_DOWN BIT(1)
174#define GPIO_INT_MASK_LINK_UP BIT(2)
157 u64 gpio_alarms; 175 u64 gpio_alarms;
158 176
159 u8 unused5[0x38]; 177 u8 unused5[0x38];
@@ -223,19 +241,16 @@ typedef struct _XENA_dev_config {
223 u64 xmsi_data; 241 u64 xmsi_data;
224 242
225 u64 rx_mat; 243 u64 rx_mat;
244#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
226 245
227 u8 unused6[0x8]; 246 u8 unused6[0x8];
228 247
229 u64 tx_mat0_7; 248 u64 tx_mat0_n[0x8];
230 u64 tx_mat8_15; 249#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
231 u64 tx_mat16_23;
232 u64 tx_mat24_31;
233 u64 tx_mat32_39;
234 u64 tx_mat40_47;
235 u64 tx_mat48_55;
236 u64 tx_mat56_63;
237 250
238 u8 unused_1[0x10]; 251 u8 unused_1[0x8];
252 u64 stat_byte_cnt;
253#define STAT_BC(n) vBIT(n,4,12)
239 254
240 /* Automated statistics collection */ 255 /* Automated statistics collection */
241 u64 stat_cfg; 256 u64 stat_cfg;
@@ -246,6 +261,7 @@ typedef struct _XENA_dev_config {
246#define STAT_TRSF_PER(n) TBD 261#define STAT_TRSF_PER(n) TBD
247#define PER_SEC 0x208d5 262#define PER_SEC 0x208d5
248#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32) 263#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
264#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
249 265
250 u64 stat_addr; 266 u64 stat_addr;
251 267
@@ -267,8 +283,15 @@ typedef struct _XENA_dev_config {
267 283
268 u64 gpio_control; 284 u64 gpio_control;
269#define GPIO_CTRL_GPIO_0 BIT(8) 285#define GPIO_CTRL_GPIO_0 BIT(8)
286 u64 misc_control;
287#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
288
289 u8 unused7_1[0x240 - 0x208];
290
291 u64 wreq_split_mask;
292#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
270 293
271 u8 unused7[0x600]; 294 u8 unused7_2[0x800 - 0x248];
272 295
273/* TxDMA registers */ 296/* TxDMA registers */
274 u64 txdma_int_status; 297 u64 txdma_int_status;
@@ -290,6 +313,7 @@ typedef struct _XENA_dev_config {
290 313
291 u64 pcc_err_reg; 314 u64 pcc_err_reg;
292#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8) 315#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8)
316#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
293 317
294 u64 pcc_err_mask; 318 u64 pcc_err_mask;
295 u64 pcc_err_alarm; 319 u64 pcc_err_alarm;
@@ -468,6 +492,7 @@ typedef struct _XENA_dev_config {
468#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23)) 492#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23))
469#define PRC_CTRL_NO_SNOOP_DESC BIT(22) 493#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
470#define PRC_CTRL_NO_SNOOP_BUFF BIT(23) 494#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
495#define PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
471#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24) 496#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
472 497
473 u64 prc_alarm_action; 498 u64 prc_alarm_action;
@@ -691,6 +716,10 @@ typedef struct _XENA_dev_config {
691#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22) 716#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
692#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) 717#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
693#define MC_ERR_REG_SM_ERR BIT(31) 718#define MC_ERR_REG_SM_ERR BIT(31)
719#define MC_ERR_REG_ECC_ALL_SNG (BIT(6) | \
720 BIT(7) | BIT(17) | BIT(19))
721#define MC_ERR_REG_ECC_ALL_DBL (BIT(14) | \
722 BIT(15) | BIT(18) | BIT(20))
694 u64 mc_err_mask; 723 u64 mc_err_mask;
695 u64 mc_err_alarm; 724 u64 mc_err_alarm;
696 725
@@ -736,7 +765,19 @@ typedef struct _XENA_dev_config {
736 u64 mc_rldram_test_d1; 765 u64 mc_rldram_test_d1;
737 u8 unused24[0x300 - 0x288]; 766 u8 unused24[0x300 - 0x288];
738 u64 mc_rldram_test_d2; 767 u64 mc_rldram_test_d2;
739 u8 unused25[0x700 - 0x308]; 768
769 u8 unused24_1[0x360 - 0x308];
770 u64 mc_rldram_ctrl;
771#define MC_RLDRAM_ENABLE_ODT BIT(7)
772
773 u8 unused24_2[0x640 - 0x368];
774 u64 mc_rldram_ref_per_herc;
775#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
776
777 u8 unused24_3[0x660 - 0x648];
778 u64 mc_rldram_mrs_herc;
779
780 u8 unused25[0x700 - 0x668];
740 u64 mc_debug_ctrl; 781 u64 mc_debug_ctrl;
741 782
742 u8 unused26[0x3000 - 0x2f08]; 783 u8 unused26[0x3000 - 0x2f08];
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ea638b162d3f..e083351e3f42 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -11,29 +11,28 @@
11 * See the file COPYING in this distribution for more information. 11 * See the file COPYING in this distribution for more information.
12 * 12 *
13 * Credits: 13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some 15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for 16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable 17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues. 18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some 19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel. 20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were 21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments. 22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture 23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code. 24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver. 25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 * 26 *
27 * The module loadable parameters that are supported by the driver and a brief 27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables. 28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used 29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver. 30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This 31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO. 35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/ 36 ************************************************************************/
38 37
39#include <linux/config.h> 38#include <linux/config.h>
@@ -56,27 +55,39 @@
56#include <linux/ethtool.h> 55#include <linux/ethtool.h>
57#include <linux/version.h> 56#include <linux/version.h>
58#include <linux/workqueue.h> 57#include <linux/workqueue.h>
58#include <linux/if_vlan.h>
59 59
60#include <asm/io.h>
61#include <asm/system.h> 60#include <asm/system.h>
62#include <asm/uaccess.h> 61#include <asm/uaccess.h>
62#include <asm/io.h>
63 63
64/* local include */ 64/* local include */
65#include "s2io.h" 65#include "s2io.h"
66#include "s2io-regs.h" 66#include "s2io-regs.h"
67 67
68/* S2io Driver name & version. */ 68/* S2io Driver name & version. */
69static char s2io_driver_name[] = "s2io"; 69static char s2io_driver_name[] = "Neterion";
70static char s2io_driver_version[] = "Version 1.7.7.1"; 70static char s2io_driver_version[] = "Version 2.0.2.1";
71
72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
71 78
72/* 79 return ret;
80}
81
82/*
73 * Cards with following subsystem_id have a link state indication 83 * Cards with following subsystem_id have a link state indication
74 * problem, 600B, 600C, 600D, 640B, 640C and 640D. 84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
75 * macro below identifies these cards given the subsystem_id. 85 * macro below identifies these cards given the subsystem_id.
76 */ 86 */
77#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \ 87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
78 (((subid >= 0x600B) && (subid <= 0x600D)) || \ 88 (dev_type == XFRAME_I_DEVICE) ? \
79 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0 89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
80 91
81#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ 92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
82 ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
@@ -86,9 +97,12 @@ static char s2io_driver_version[] = "Version 1.7.7.1";
86static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) 97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
87{ 98{
88 int level = 0; 99 int level = 0;
89 if ((sp->pkt_cnt[ring] - rxb_size) > 16) { 100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
90 level = LOW; 104 level = LOW;
91 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) { 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
92 level = PANIC; 106 level = PANIC;
93 } 107 }
94 } 108 }
@@ -145,6 +159,9 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
145 {"rmac_pause_cnt"}, 159 {"rmac_pause_cnt"},
146 {"rmac_accepted_ip"}, 160 {"rmac_accepted_ip"},
147 {"rmac_err_tcp"}, 161 {"rmac_err_tcp"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
148}; 165};
149 166
150#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN 167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
@@ -153,8 +170,37 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
153#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN 170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
154#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN 171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
155 172
173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
156 202
157/* 203/*
158 * Constants to be programmed into the Xena's registers, to configure 204 * Constants to be programmed into the Xena's registers, to configure
159 * the XAUI. 205 * the XAUI.
160 */ 206 */
@@ -162,7 +208,24 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
162#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
163#define END_SIGN 0x0 209#define END_SIGN 0x0
164 210
165static u64 default_mdio_cfg[] = { 211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
213 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
214 /* Write data */
215 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
221 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
222 /* Write data */
223 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
224 /* Done */
225 END_SIGN
226};
227
228static u64 xena_mdio_cfg[] = {
166 /* Reset PMA PLL */ 229 /* Reset PMA PLL */
167 0xC001010000000000ULL, 0xC0010100000000E0ULL, 230 0xC001010000000000ULL, 0xC0010100000000E0ULL,
168 0xC0010100008000E4ULL, 231 0xC0010100008000E4ULL,
@@ -172,7 +235,7 @@ static u64 default_mdio_cfg[] = {
172 END_SIGN 235 END_SIGN
173}; 236};
174 237
175static u64 default_dtx_cfg[] = { 238static u64 xena_dtx_cfg[] = {
176 0x8000051500000000ULL, 0x80000515000000E0ULL, 239 0x8000051500000000ULL, 0x80000515000000E0ULL,
177 0x80000515D93500E4ULL, 0x8001051500000000ULL, 240 0x80000515D93500E4ULL, 0x8001051500000000ULL,
178 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 241 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
@@ -196,8 +259,7 @@ static u64 default_dtx_cfg[] = {
196 END_SIGN 259 END_SIGN
197}; 260};
198 261
199 262/*
200/*
201 * Constants for Fixing the MacAddress problem seen mostly on 263 * Constants for Fixing the MacAddress problem seen mostly on
202 * Alpha machines. 264 * Alpha machines.
203 */ 265 */
@@ -226,20 +288,25 @@ static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
226static unsigned int rx_ring_num = 1; 288static unsigned int rx_ring_num = 1;
227static unsigned int rx_ring_sz[MAX_RX_RINGS] = 289static unsigned int rx_ring_sz[MAX_RX_RINGS] =
228 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 290 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
229static unsigned int Stats_refresh_time = 4; 291static unsigned int rts_frm_len[MAX_RX_RINGS] =
292 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
293static unsigned int use_continuous_tx_intrs = 1;
230static unsigned int rmac_pause_time = 65535; 294static unsigned int rmac_pause_time = 65535;
231static unsigned int mc_pause_threshold_q0q3 = 187; 295static unsigned int mc_pause_threshold_q0q3 = 187;
232static unsigned int mc_pause_threshold_q4q7 = 187; 296static unsigned int mc_pause_threshold_q4q7 = 187;
233static unsigned int shared_splits; 297static unsigned int shared_splits;
234static unsigned int tmac_util_period = 5; 298static unsigned int tmac_util_period = 5;
235static unsigned int rmac_util_period = 5; 299static unsigned int rmac_util_period = 5;
300static unsigned int bimodal = 0;
236#ifndef CONFIG_S2IO_NAPI 301#ifndef CONFIG_S2IO_NAPI
237static unsigned int indicate_max_pkts; 302static unsigned int indicate_max_pkts;
238#endif 303#endif
304/* Frequency of Rx desc syncs expressed as power of 2 */
305static unsigned int rxsync_frequency = 3;
239 306
240/* 307/*
241 * S2IO device table. 308 * S2IO device table.
242 * This table lists all the devices that this driver supports. 309 * This table lists all the devices that this driver supports.
243 */ 310 */
244static struct pci_device_id s2io_tbl[] __devinitdata = { 311static struct pci_device_id s2io_tbl[] __devinitdata = {
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, 312 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
@@ -247,9 +314,9 @@ static struct pci_device_id s2io_tbl[] __devinitdata = {
247 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, 314 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
248 PCI_ANY_ID, PCI_ANY_ID}, 315 PCI_ANY_ID, PCI_ANY_ID},
249 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, 316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
250 PCI_ANY_ID, PCI_ANY_ID}, 317 PCI_ANY_ID, PCI_ANY_ID},
251 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, 318 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
252 PCI_ANY_ID, PCI_ANY_ID}, 319 PCI_ANY_ID, PCI_ANY_ID},
253 {0,} 320 {0,}
254}; 321};
255 322
@@ -268,8 +335,8 @@ static struct pci_driver s2io_driver = {
268/** 335/**
269 * init_shared_mem - Allocation and Initialization of Memory 336 * init_shared_mem - Allocation and Initialization of Memory
270 * @nic: Device private variable. 337 * @nic: Device private variable.
271 * Description: The function allocates all the memory areas shared 338 * Description: The function allocates all the memory areas shared
272 * between the NIC and the driver. This includes Tx descriptors, 339 * between the NIC and the driver. This includes Tx descriptors,
273 * Rx descriptors and the statistics block. 340 * Rx descriptors and the statistics block.
274 */ 341 */
275 342
@@ -279,11 +346,11 @@ static int init_shared_mem(struct s2io_nic *nic)
279 void *tmp_v_addr, *tmp_v_addr_next; 346 void *tmp_v_addr, *tmp_v_addr_next;
280 dma_addr_t tmp_p_addr, tmp_p_addr_next; 347 dma_addr_t tmp_p_addr, tmp_p_addr_next;
281 RxD_block_t *pre_rxd_blk = NULL; 348 RxD_block_t *pre_rxd_blk = NULL;
282 int i, j, blk_cnt; 349 int i, j, blk_cnt, rx_sz, tx_sz;
283 int lst_size, lst_per_page; 350 int lst_size, lst_per_page;
284 struct net_device *dev = nic->dev; 351 struct net_device *dev = nic->dev;
285#ifdef CONFIG_2BUFF_MODE 352#ifdef CONFIG_2BUFF_MODE
286 unsigned long tmp; 353 u64 tmp;
287 buffAdd_t *ba; 354 buffAdd_t *ba;
288#endif 355#endif
289 356
@@ -300,36 +367,41 @@ static int init_shared_mem(struct s2io_nic *nic)
300 size += config->tx_cfg[i].fifo_len; 367 size += config->tx_cfg[i].fifo_len;
301 } 368 }
302 if (size > MAX_AVAILABLE_TXDS) { 369 if (size > MAX_AVAILABLE_TXDS) {
303 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ", 370 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
304 dev->name); 371 __FUNCTION__);
305 DBG_PRINT(ERR_DBG, "exceeds the maximum value "); 372 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
306 DBG_PRINT(ERR_DBG, "that can be used\n");
307 return FAILURE; 373 return FAILURE;
308 } 374 }
309 375
310 lst_size = (sizeof(TxD_t) * config->max_txds); 376 lst_size = (sizeof(TxD_t) * config->max_txds);
377 tx_sz = lst_size * size;
311 lst_per_page = PAGE_SIZE / lst_size; 378 lst_per_page = PAGE_SIZE / lst_size;
312 379
313 for (i = 0; i < config->tx_fifo_num; i++) { 380 for (i = 0; i < config->tx_fifo_num; i++) {
314 int fifo_len = config->tx_cfg[i].fifo_len; 381 int fifo_len = config->tx_cfg[i].fifo_len;
315 int list_holder_size = fifo_len * sizeof(list_info_hold_t); 382 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
316 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL); 383 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
317 if (!nic->list_info[i]) { 384 GFP_KERNEL);
385 if (!mac_control->fifos[i].list_info) {
318 DBG_PRINT(ERR_DBG, 386 DBG_PRINT(ERR_DBG,
319 "Malloc failed for list_info\n"); 387 "Malloc failed for list_info\n");
320 return -ENOMEM; 388 return -ENOMEM;
321 } 389 }
322 memset(nic->list_info[i], 0, list_holder_size); 390 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
323 } 391 }
324 for (i = 0; i < config->tx_fifo_num; i++) { 392 for (i = 0; i < config->tx_fifo_num; i++) {
325 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, 393 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
326 lst_per_page); 394 lst_per_page);
327 mac_control->tx_curr_put_info[i].offset = 0; 395 mac_control->fifos[i].tx_curr_put_info.offset = 0;
328 mac_control->tx_curr_put_info[i].fifo_len = 396 mac_control->fifos[i].tx_curr_put_info.fifo_len =
329 config->tx_cfg[i].fifo_len - 1; 397 config->tx_cfg[i].fifo_len - 1;
330 mac_control->tx_curr_get_info[i].offset = 0; 398 mac_control->fifos[i].tx_curr_get_info.offset = 0;
331 mac_control->tx_curr_get_info[i].fifo_len = 399 mac_control->fifos[i].tx_curr_get_info.fifo_len =
332 config->tx_cfg[i].fifo_len - 1; 400 config->tx_cfg[i].fifo_len - 1;
401 mac_control->fifos[i].fifo_no = i;
402 mac_control->fifos[i].nic = nic;
403 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
404
333 for (j = 0; j < page_num; j++) { 405 for (j = 0; j < page_num; j++) {
334 int k = 0; 406 int k = 0;
335 dma_addr_t tmp_p; 407 dma_addr_t tmp_p;
@@ -345,16 +417,15 @@ static int init_shared_mem(struct s2io_nic *nic)
345 while (k < lst_per_page) { 417 while (k < lst_per_page) {
346 int l = (j * lst_per_page) + k; 418 int l = (j * lst_per_page) + k;
347 if (l == config->tx_cfg[i].fifo_len) 419 if (l == config->tx_cfg[i].fifo_len)
348 goto end_txd_alloc; 420 break;
349 nic->list_info[i][l].list_virt_addr = 421 mac_control->fifos[i].list_info[l].list_virt_addr =
350 tmp_v + (k * lst_size); 422 tmp_v + (k * lst_size);
351 nic->list_info[i][l].list_phy_addr = 423 mac_control->fifos[i].list_info[l].list_phy_addr =
352 tmp_p + (k * lst_size); 424 tmp_p + (k * lst_size);
353 k++; 425 k++;
354 } 426 }
355 } 427 }
356 } 428 }
357 end_txd_alloc:
358 429
359 /* Allocation and initialization of RXDs in Rings */ 430 /* Allocation and initialization of RXDs in Rings */
360 size = 0; 431 size = 0;
@@ -367,21 +438,26 @@ static int init_shared_mem(struct s2io_nic *nic)
367 return FAILURE; 438 return FAILURE;
368 } 439 }
369 size += config->rx_cfg[i].num_rxd; 440 size += config->rx_cfg[i].num_rxd;
370 nic->block_count[i] = 441 mac_control->rings[i].block_count =
371 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
372 nic->pkt_cnt[i] = 443 mac_control->rings[i].pkt_cnt =
373 config->rx_cfg[i].num_rxd - nic->block_count[i]; 444 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
374 } 445 }
446 size = (size * (sizeof(RxD_t)));
447 rx_sz = size;
375 448
376 for (i = 0; i < config->rx_ring_num; i++) { 449 for (i = 0; i < config->rx_ring_num; i++) {
377 mac_control->rx_curr_get_info[i].block_index = 0; 450 mac_control->rings[i].rx_curr_get_info.block_index = 0;
378 mac_control->rx_curr_get_info[i].offset = 0; 451 mac_control->rings[i].rx_curr_get_info.offset = 0;
379 mac_control->rx_curr_get_info[i].ring_len = 452 mac_control->rings[i].rx_curr_get_info.ring_len =
380 config->rx_cfg[i].num_rxd - 1; 453 config->rx_cfg[i].num_rxd - 1;
381 mac_control->rx_curr_put_info[i].block_index = 0; 454 mac_control->rings[i].rx_curr_put_info.block_index = 0;
382 mac_control->rx_curr_put_info[i].offset = 0; 455 mac_control->rings[i].rx_curr_put_info.offset = 0;
383 mac_control->rx_curr_put_info[i].ring_len = 456 mac_control->rings[i].rx_curr_put_info.ring_len =
384 config->rx_cfg[i].num_rxd - 1; 457 config->rx_cfg[i].num_rxd - 1;
458 mac_control->rings[i].nic = nic;
459 mac_control->rings[i].ring_no = i;
460
385 blk_cnt = 461 blk_cnt =
386 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 462 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
387 /* Allocating all the Rx blocks */ 463 /* Allocating all the Rx blocks */
@@ -395,32 +471,36 @@ static int init_shared_mem(struct s2io_nic *nic)
395 &tmp_p_addr); 471 &tmp_p_addr);
396 if (tmp_v_addr == NULL) { 472 if (tmp_v_addr == NULL) {
397 /* 473 /*
398 * In case of failure, free_shared_mem() 474 * In case of failure, free_shared_mem()
399 * is called, which should free any 475 * is called, which should free any
400 * memory that was alloced till the 476 * memory that was alloced till the
401 * failure happened. 477 * failure happened.
402 */ 478 */
403 nic->rx_blocks[i][j].block_virt_addr = 479 mac_control->rings[i].rx_blocks[j].block_virt_addr =
404 tmp_v_addr; 480 tmp_v_addr;
405 return -ENOMEM; 481 return -ENOMEM;
406 } 482 }
407 memset(tmp_v_addr, 0, size); 483 memset(tmp_v_addr, 0, size);
408 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr; 484 mac_control->rings[i].rx_blocks[j].block_virt_addr =
409 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr; 485 tmp_v_addr;
486 mac_control->rings[i].rx_blocks[j].block_dma_addr =
487 tmp_p_addr;
410 } 488 }
411 /* Interlinking all Rx Blocks */ 489 /* Interlinking all Rx Blocks */
412 for (j = 0; j < blk_cnt; j++) { 490 for (j = 0; j < blk_cnt; j++) {
413 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr; 491 tmp_v_addr =
492 mac_control->rings[i].rx_blocks[j].block_virt_addr;
414 tmp_v_addr_next = 493 tmp_v_addr_next =
415 nic->rx_blocks[i][(j + 1) % 494 mac_control->rings[i].rx_blocks[(j + 1) %
416 blk_cnt].block_virt_addr; 495 blk_cnt].block_virt_addr;
417 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr; 496 tmp_p_addr =
497 mac_control->rings[i].rx_blocks[j].block_dma_addr;
418 tmp_p_addr_next = 498 tmp_p_addr_next =
419 nic->rx_blocks[i][(j + 1) % 499 mac_control->rings[i].rx_blocks[(j + 1) %
420 blk_cnt].block_dma_addr; 500 blk_cnt].block_dma_addr;
421 501
422 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 502 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
423 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD 503 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
424 * marker. 504 * marker.
425 */ 505 */
426#ifndef CONFIG_2BUFF_MODE 506#ifndef CONFIG_2BUFF_MODE
@@ -433,43 +513,43 @@ static int init_shared_mem(struct s2io_nic *nic)
433 } 513 }
434 514
435#ifdef CONFIG_2BUFF_MODE 515#ifdef CONFIG_2BUFF_MODE
436 /* 516 /*
437 * Allocation of Storages for buffer addresses in 2BUFF mode 517 * Allocation of Storages for buffer addresses in 2BUFF mode
438 * and the buffers as well. 518 * and the buffers as well.
439 */ 519 */
440 for (i = 0; i < config->rx_ring_num; i++) { 520 for (i = 0; i < config->rx_ring_num; i++) {
441 blk_cnt = 521 blk_cnt =
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 522 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 523 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
444 GFP_KERNEL); 524 GFP_KERNEL);
445 if (!nic->ba[i]) 525 if (!mac_control->rings[i].ba)
446 return -ENOMEM; 526 return -ENOMEM;
447 for (j = 0; j < blk_cnt; j++) { 527 for (j = 0; j < blk_cnt; j++) {
448 int k = 0; 528 int k = 0;
449 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) * 529 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
450 (MAX_RXDS_PER_BLOCK + 1)), 530 (MAX_RXDS_PER_BLOCK + 1)),
451 GFP_KERNEL); 531 GFP_KERNEL);
452 if (!nic->ba[i][j]) 532 if (!mac_control->rings[i].ba[j])
453 return -ENOMEM; 533 return -ENOMEM;
454 while (k != MAX_RXDS_PER_BLOCK) { 534 while (k != MAX_RXDS_PER_BLOCK) {
455 ba = &nic->ba[i][j][k]; 535 ba = &mac_control->rings[i].ba[j][k];
456 536
457 ba->ba_0_org = kmalloc 537 ba->ba_0_org = (void *) kmalloc
458 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 538 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
459 if (!ba->ba_0_org) 539 if (!ba->ba_0_org)
460 return -ENOMEM; 540 return -ENOMEM;
461 tmp = (unsigned long) ba->ba_0_org; 541 tmp = (u64) ba->ba_0_org;
462 tmp += ALIGN_SIZE; 542 tmp += ALIGN_SIZE;
463 tmp &= ~((unsigned long) ALIGN_SIZE); 543 tmp &= ~((u64) ALIGN_SIZE);
464 ba->ba_0 = (void *) tmp; 544 ba->ba_0 = (void *) tmp;
465 545
466 ba->ba_1_org = kmalloc 546 ba->ba_1_org = (void *) kmalloc
467 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 547 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
468 if (!ba->ba_1_org) 548 if (!ba->ba_1_org)
469 return -ENOMEM; 549 return -ENOMEM;
470 tmp = (unsigned long) ba->ba_1_org; 550 tmp = (u64) ba->ba_1_org;
471 tmp += ALIGN_SIZE; 551 tmp += ALIGN_SIZE;
472 tmp &= ~((unsigned long) ALIGN_SIZE); 552 tmp &= ~((u64) ALIGN_SIZE);
473 ba->ba_1 = (void *) tmp; 553 ba->ba_1 = (void *) tmp;
474 k++; 554 k++;
475 } 555 }
@@ -483,9 +563,9 @@ static int init_shared_mem(struct s2io_nic *nic)
483 (nic->pdev, size, &mac_control->stats_mem_phy); 563 (nic->pdev, size, &mac_control->stats_mem_phy);
484 564
485 if (!mac_control->stats_mem) { 565 if (!mac_control->stats_mem) {
486 /* 566 /*
487 * In case of failure, free_shared_mem() is called, which 567 * In case of failure, free_shared_mem() is called, which
488 * should free any memory that was alloced till the 568 * should free any memory that was alloced till the
489 * failure happened. 569 * failure happened.
490 */ 570 */
491 return -ENOMEM; 571 return -ENOMEM;
@@ -495,15 +575,14 @@ static int init_shared_mem(struct s2io_nic *nic)
495 tmp_v_addr = mac_control->stats_mem; 575 tmp_v_addr = mac_control->stats_mem;
496 mac_control->stats_info = (StatInfo_t *) tmp_v_addr; 576 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
497 memset(tmp_v_addr, 0, size); 577 memset(tmp_v_addr, 0, size);
498
499 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, 578 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
500 (unsigned long long) tmp_p_addr); 579 (unsigned long long) tmp_p_addr);
501 580
502 return SUCCESS; 581 return SUCCESS;
503} 582}
504 583
505/** 584/**
506 * free_shared_mem - Free the allocated Memory 585 * free_shared_mem - Free the allocated Memory
507 * @nic: Device private variable. 586 * @nic: Device private variable.
508 * Description: This function is to free all memory locations allocated by 587 * Description: This function is to free all memory locations allocated by
509 * the init_shared_mem() function and return it to the kernel. 588 * the init_shared_mem() function and return it to the kernel.
@@ -533,15 +612,19 @@ static void free_shared_mem(struct s2io_nic *nic)
533 lst_per_page); 612 lst_per_page);
534 for (j = 0; j < page_num; j++) { 613 for (j = 0; j < page_num; j++) {
535 int mem_blks = (j * lst_per_page); 614 int mem_blks = (j * lst_per_page);
536 if (!nic->list_info[i][mem_blks].list_virt_addr) 615 if ((!mac_control->fifos[i].list_info) ||
616 (!mac_control->fifos[i].list_info[mem_blks].
617 list_virt_addr))
537 break; 618 break;
538 pci_free_consistent(nic->pdev, PAGE_SIZE, 619 pci_free_consistent(nic->pdev, PAGE_SIZE,
539 nic->list_info[i][mem_blks]. 620 mac_control->fifos[i].
621 list_info[mem_blks].
540 list_virt_addr, 622 list_virt_addr,
541 nic->list_info[i][mem_blks]. 623 mac_control->fifos[i].
624 list_info[mem_blks].
542 list_phy_addr); 625 list_phy_addr);
543 } 626 }
544 kfree(nic->list_info[i]); 627 kfree(mac_control->fifos[i].list_info);
545 } 628 }
546 629
547#ifndef CONFIG_2BUFF_MODE 630#ifndef CONFIG_2BUFF_MODE
@@ -550,10 +633,12 @@ static void free_shared_mem(struct s2io_nic *nic)
550 size = SIZE_OF_BLOCK; 633 size = SIZE_OF_BLOCK;
551#endif 634#endif
552 for (i = 0; i < config->rx_ring_num; i++) { 635 for (i = 0; i < config->rx_ring_num; i++) {
553 blk_cnt = nic->block_count[i]; 636 blk_cnt = mac_control->rings[i].block_count;
554 for (j = 0; j < blk_cnt; j++) { 637 for (j = 0; j < blk_cnt; j++) {
555 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr; 638 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
556 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr; 639 block_virt_addr;
640 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
641 block_dma_addr;
557 if (tmp_v_addr == NULL) 642 if (tmp_v_addr == NULL)
558 break; 643 break;
559 pci_free_consistent(nic->pdev, size, 644 pci_free_consistent(nic->pdev, size,
@@ -566,35 +651,21 @@ static void free_shared_mem(struct s2io_nic *nic)
566 for (i = 0; i < config->rx_ring_num; i++) { 651 for (i = 0; i < config->rx_ring_num; i++) {
567 blk_cnt = 652 blk_cnt =
568 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 653 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
569 if (!nic->ba[i])
570 goto end_free;
571 for (j = 0; j < blk_cnt; j++) { 654 for (j = 0; j < blk_cnt; j++) {
572 int k = 0; 655 int k = 0;
573 if (!nic->ba[i][j]) { 656 if (!mac_control->rings[i].ba[j])
574 kfree(nic->ba[i]); 657 continue;
575 goto end_free;
576 }
577 while (k != MAX_RXDS_PER_BLOCK) { 658 while (k != MAX_RXDS_PER_BLOCK) {
578 buffAdd_t *ba = &nic->ba[i][j][k]; 659 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
579 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
580 {
581 kfree(nic->ba[i]);
582 kfree(nic->ba[i][j]);
583 if(ba->ba_0_org)
584 kfree(ba->ba_0_org);
585 if(ba->ba_1_org)
586 kfree(ba->ba_1_org);
587 goto end_free;
588 }
589 kfree(ba->ba_0_org); 660 kfree(ba->ba_0_org);
590 kfree(ba->ba_1_org); 661 kfree(ba->ba_1_org);
591 k++; 662 k++;
592 } 663 }
593 kfree(nic->ba[i][j]); 664 kfree(mac_control->rings[i].ba[j]);
594 } 665 }
595 kfree(nic->ba[i]); 666 if (mac_control->rings[i].ba)
667 kfree(mac_control->rings[i].ba);
596 } 668 }
597end_free:
598#endif 669#endif
599 670
600 if (mac_control->stats_mem) { 671 if (mac_control->stats_mem) {
@@ -605,12 +676,93 @@ end_free:
605 } 676 }
606} 677}
607 678
608/** 679/**
609 * init_nic - Initialization of hardware 680 * s2io_verify_pci_mode -
681 */
682
683static int s2io_verify_pci_mode(nic_t *nic)
684{
685 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
686 register u64 val64 = 0;
687 int mode;
688
689 val64 = readq(&bar0->pci_mode);
690 mode = (u8)GET_PCI_MODE(val64);
691
692 if ( val64 & PCI_MODE_UNKNOWN_MODE)
693 return -1; /* Unknown PCI mode */
694 return mode;
695}
696
697
698/**
699 * s2io_print_pci_mode -
700 */
701static int s2io_print_pci_mode(nic_t *nic)
702{
703 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
704 register u64 val64 = 0;
705 int mode;
706 struct config_param *config = &nic->config;
707
708 val64 = readq(&bar0->pci_mode);
709 mode = (u8)GET_PCI_MODE(val64);
710
711 if ( val64 & PCI_MODE_UNKNOWN_MODE)
712 return -1; /* Unknown PCI mode */
713
714 if (val64 & PCI_MODE_32_BITS) {
715 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
716 } else {
717 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
718 }
719
720 switch(mode) {
721 case PCI_MODE_PCI_33:
722 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
723 config->bus_speed = 33;
724 break;
725 case PCI_MODE_PCI_66:
726 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
727 config->bus_speed = 133;
728 break;
729 case PCI_MODE_PCIX_M1_66:
730 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
731 config->bus_speed = 133; /* Herc doubles the clock rate */
732 break;
733 case PCI_MODE_PCIX_M1_100:
734 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
735 config->bus_speed = 200;
736 break;
737 case PCI_MODE_PCIX_M1_133:
738 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
739 config->bus_speed = 266;
740 break;
741 case PCI_MODE_PCIX_M2_66:
742 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
743 config->bus_speed = 133;
744 break;
745 case PCI_MODE_PCIX_M2_100:
746 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
747 config->bus_speed = 200;
748 break;
749 case PCI_MODE_PCIX_M2_133:
750 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
751 config->bus_speed = 266;
752 break;
753 default:
754 return -1; /* Unsupported bus speed */
755 }
756
757 return mode;
758}
759
760/**
761 * init_nic - Initialization of hardware
610 * @nic: device peivate variable 762 * @nic: device peivate variable
611 * Description: The function sequentially configures every block 763 * Description: The function sequentially configures every block
612 * of the H/W from their reset values. 764 * of the H/W from their reset values.
613 * Return Value: SUCCESS on success and 765 * Return Value: SUCCESS on success and
614 * '-1' on failure (endian settings incorrect). 766 * '-1' on failure (endian settings incorrect).
615 */ 767 */
616 768
@@ -626,21 +778,32 @@ static int init_nic(struct s2io_nic *nic)
626 struct config_param *config; 778 struct config_param *config;
627 int mdio_cnt = 0, dtx_cnt = 0; 779 int mdio_cnt = 0, dtx_cnt = 0;
628 unsigned long long mem_share; 780 unsigned long long mem_share;
781 int mem_size;
629 782
630 mac_control = &nic->mac_control; 783 mac_control = &nic->mac_control;
631 config = &nic->config; 784 config = &nic->config;
632 785
633 /* Initialize swapper control register */ 786 /* to set the swapper controle on the card */
634 if (s2io_set_swapper(nic)) { 787 if(s2io_set_swapper(nic)) {
635 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n"); 788 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
636 return -1; 789 return -1;
637 } 790 }
638 791
792 /*
793 * Herc requires EOI to be removed from reset before XGXS, so..
794 */
795 if (nic->device_type & XFRAME_II_DEVICE) {
796 val64 = 0xA500000000ULL;
797 writeq(val64, &bar0->sw_reset);
798 msleep(500);
799 val64 = readq(&bar0->sw_reset);
800 }
801
639 /* Remove XGXS from reset state */ 802 /* Remove XGXS from reset state */
640 val64 = 0; 803 val64 = 0;
641 writeq(val64, &bar0->sw_reset); 804 writeq(val64, &bar0->sw_reset);
642 val64 = readq(&bar0->sw_reset);
643 msleep(500); 805 msleep(500);
806 val64 = readq(&bar0->sw_reset);
644 807
645 /* Enable Receiving broadcasts */ 808 /* Enable Receiving broadcasts */
646 add = &bar0->mac_cfg; 809 add = &bar0->mac_cfg;
@@ -660,48 +823,58 @@ static int init_nic(struct s2io_nic *nic)
660 val64 = dev->mtu; 823 val64 = dev->mtu;
661 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 824 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
662 825
663 /* 826 /*
664 * Configuring the XAUI Interface of Xena. 827 * Configuring the XAUI Interface of Xena.
665 * *************************************** 828 * ***************************************
666 * To Configure the Xena's XAUI, one has to write a series 829 * To Configure the Xena's XAUI, one has to write a series
667 * of 64 bit values into two registers in a particular 830 * of 64 bit values into two registers in a particular
668 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 831 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
669 * which will be defined in the array of configuration values 832 * which will be defined in the array of configuration values
670 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 833 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
671 * to switch writing from one regsiter to another. We continue 834 * to switch writing from one regsiter to another. We continue
672 * writing these values until we encounter the 'END_SIGN' macro. 835 * writing these values until we encounter the 'END_SIGN' macro.
673 * For example, After making a series of 21 writes into 836 * For example, After making a series of 21 writes into
674 * dtx_control register the 'SWITCH_SIGN' appears and hence we 837 * dtx_control register the 'SWITCH_SIGN' appears and hence we
675 * start writing into mdio_control until we encounter END_SIGN. 838 * start writing into mdio_control until we encounter END_SIGN.
676 */ 839 */
677 while (1) { 840 if (nic->device_type & XFRAME_II_DEVICE) {
678 dtx_cfg: 841 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
679 while (default_dtx_cfg[dtx_cnt] != END_SIGN) { 842 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
680 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
681 dtx_cnt++;
682 goto mdio_cfg;
683 }
684 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
685 &bar0->dtx_control, UF); 843 &bar0->dtx_control, UF);
686 val64 = readq(&bar0->dtx_control); 844 if (dtx_cnt & 0x1)
845 msleep(1); /* Necessary!! */
687 dtx_cnt++; 846 dtx_cnt++;
688 } 847 }
689 mdio_cfg: 848 } else {
690 while (default_mdio_cfg[mdio_cnt] != END_SIGN) { 849 while (1) {
691 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { 850 dtx_cfg:
851 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
852 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
853 dtx_cnt++;
854 goto mdio_cfg;
855 }
856 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
857 &bar0->dtx_control, UF);
858 val64 = readq(&bar0->dtx_control);
859 dtx_cnt++;
860 }
861 mdio_cfg:
862 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
863 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
864 mdio_cnt++;
865 goto dtx_cfg;
866 }
867 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
868 &bar0->mdio_control, UF);
869 val64 = readq(&bar0->mdio_control);
692 mdio_cnt++; 870 mdio_cnt++;
871 }
872 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
873 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
874 break;
875 } else {
693 goto dtx_cfg; 876 goto dtx_cfg;
694 } 877 }
695 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
696 &bar0->mdio_control, UF);
697 val64 = readq(&bar0->mdio_control);
698 mdio_cnt++;
699 }
700 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
701 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
702 break;
703 } else {
704 goto dtx_cfg;
705 } 878 }
706 } 879 }
707 880
@@ -748,12 +921,20 @@ static int init_nic(struct s2io_nic *nic)
748 val64 |= BIT(0); /* To enable the FIFO partition. */ 921 val64 |= BIT(0); /* To enable the FIFO partition. */
749 writeq(val64, &bar0->tx_fifo_partition_0); 922 writeq(val64, &bar0->tx_fifo_partition_0);
750 923
924 /*
925 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
926 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
927 */
928 if ((nic->device_type == XFRAME_I_DEVICE) &&
929 (get_xena_rev_id(nic->pdev) < 4))
930 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
931
751 val64 = readq(&bar0->tx_fifo_partition_0); 932 val64 = readq(&bar0->tx_fifo_partition_0);
752 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n", 933 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
753 &bar0->tx_fifo_partition_0, (unsigned long long) val64); 934 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
754 935
755 /* 936 /*
756 * Initialization of Tx_PA_CONFIG register to ignore packet 937 * Initialization of Tx_PA_CONFIG register to ignore packet
757 * integrity checking. 938 * integrity checking.
758 */ 939 */
759 val64 = readq(&bar0->tx_pa_cfg); 940 val64 = readq(&bar0->tx_pa_cfg);
@@ -770,85 +951,304 @@ static int init_nic(struct s2io_nic *nic)
770 } 951 }
771 writeq(val64, &bar0->rx_queue_priority); 952 writeq(val64, &bar0->rx_queue_priority);
772 953
773 /* 954 /*
774 * Allocating equal share of memory to all the 955 * Allocating equal share of memory to all the
775 * configured Rings. 956 * configured Rings.
776 */ 957 */
777 val64 = 0; 958 val64 = 0;
959 if (nic->device_type & XFRAME_II_DEVICE)
960 mem_size = 32;
961 else
962 mem_size = 64;
963
778 for (i = 0; i < config->rx_ring_num; i++) { 964 for (i = 0; i < config->rx_ring_num; i++) {
779 switch (i) { 965 switch (i) {
780 case 0: 966 case 0:
781 mem_share = (64 / config->rx_ring_num + 967 mem_share = (mem_size / config->rx_ring_num +
782 64 % config->rx_ring_num); 968 mem_size % config->rx_ring_num);
783 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share); 969 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
784 continue; 970 continue;
785 case 1: 971 case 1:
786 mem_share = (64 / config->rx_ring_num); 972 mem_share = (mem_size / config->rx_ring_num);
787 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share); 973 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
788 continue; 974 continue;
789 case 2: 975 case 2:
790 mem_share = (64 / config->rx_ring_num); 976 mem_share = (mem_size / config->rx_ring_num);
791 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share); 977 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
792 continue; 978 continue;
793 case 3: 979 case 3:
794 mem_share = (64 / config->rx_ring_num); 980 mem_share = (mem_size / config->rx_ring_num);
795 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share); 981 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
796 continue; 982 continue;
797 case 4: 983 case 4:
798 mem_share = (64 / config->rx_ring_num); 984 mem_share = (mem_size / config->rx_ring_num);
799 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share); 985 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
800 continue; 986 continue;
801 case 5: 987 case 5:
802 mem_share = (64 / config->rx_ring_num); 988 mem_share = (mem_size / config->rx_ring_num);
803 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share); 989 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
804 continue; 990 continue;
805 case 6: 991 case 6:
806 mem_share = (64 / config->rx_ring_num); 992 mem_share = (mem_size / config->rx_ring_num);
807 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share); 993 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
808 continue; 994 continue;
809 case 7: 995 case 7:
810 mem_share = (64 / config->rx_ring_num); 996 mem_share = (mem_size / config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share); 997 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
812 continue; 998 continue;
813 } 999 }
814 } 1000 }
815 writeq(val64, &bar0->rx_queue_cfg); 1001 writeq(val64, &bar0->rx_queue_cfg);
816 1002
817 /* 1003 /*
818 * Initializing the Tx round robin registers to 0. 1004 * Filling Tx round robin registers
819 * Filling Tx and Rx round robin registers as per the 1005 * as per the number of FIFOs
820 * number of FIFOs and Rings is still TODO.
821 */
822 writeq(0, &bar0->tx_w_round_robin_0);
823 writeq(0, &bar0->tx_w_round_robin_1);
824 writeq(0, &bar0->tx_w_round_robin_2);
825 writeq(0, &bar0->tx_w_round_robin_3);
826 writeq(0, &bar0->tx_w_round_robin_4);
827
828 /*
829 * TODO
830 * Disable Rx steering. Hard coding all packets be steered to
831 * Queue 0 for now.
832 */ 1006 */
833 val64 = 0x8080808080808080ULL; 1007 switch (config->tx_fifo_num) {
834 writeq(val64, &bar0->rts_qos_steering); 1008 case 1:
1009 val64 = 0x0000000000000000ULL;
1010 writeq(val64, &bar0->tx_w_round_robin_0);
1011 writeq(val64, &bar0->tx_w_round_robin_1);
1012 writeq(val64, &bar0->tx_w_round_robin_2);
1013 writeq(val64, &bar0->tx_w_round_robin_3);
1014 writeq(val64, &bar0->tx_w_round_robin_4);
1015 break;
1016 case 2:
1017 val64 = 0x0000010000010000ULL;
1018 writeq(val64, &bar0->tx_w_round_robin_0);
1019 val64 = 0x0100000100000100ULL;
1020 writeq(val64, &bar0->tx_w_round_robin_1);
1021 val64 = 0x0001000001000001ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_2);
1023 val64 = 0x0000010000010000ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_3);
1025 val64 = 0x0100000000000000ULL;
1026 writeq(val64, &bar0->tx_w_round_robin_4);
1027 break;
1028 case 3:
1029 val64 = 0x0001000102000001ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_0);
1031 val64 = 0x0001020000010001ULL;
1032 writeq(val64, &bar0->tx_w_round_robin_1);
1033 val64 = 0x0200000100010200ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_2);
1035 val64 = 0x0001000102000001ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_3);
1037 val64 = 0x0001020000000000ULL;
1038 writeq(val64, &bar0->tx_w_round_robin_4);
1039 break;
1040 case 4:
1041 val64 = 0x0001020300010200ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_0);
1043 val64 = 0x0100000102030001ULL;
1044 writeq(val64, &bar0->tx_w_round_robin_1);
1045 val64 = 0x0200010000010203ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_2);
1047 val64 = 0x0001020001000001ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_3);
1049 val64 = 0x0203000100000000ULL;
1050 writeq(val64, &bar0->tx_w_round_robin_4);
1051 break;
1052 case 5:
1053 val64 = 0x0001000203000102ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_0);
1055 val64 = 0x0001020001030004ULL;
1056 writeq(val64, &bar0->tx_w_round_robin_1);
1057 val64 = 0x0001000203000102ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_2);
1059 val64 = 0x0001020001030004ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_3);
1061 val64 = 0x0001000000000000ULL;
1062 writeq(val64, &bar0->tx_w_round_robin_4);
1063 break;
1064 case 6:
1065 val64 = 0x0001020304000102ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_0);
1067 val64 = 0x0304050001020001ULL;
1068 writeq(val64, &bar0->tx_w_round_robin_1);
1069 val64 = 0x0203000100000102ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_2);
1071 val64 = 0x0304000102030405ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_3);
1073 val64 = 0x0001000200000000ULL;
1074 writeq(val64, &bar0->tx_w_round_robin_4);
1075 break;
1076 case 7:
1077 val64 = 0x0001020001020300ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_0);
1079 val64 = 0x0102030400010203ULL;
1080 writeq(val64, &bar0->tx_w_round_robin_1);
1081 val64 = 0x0405060001020001ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_2);
1083 val64 = 0x0304050000010200ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_3);
1085 val64 = 0x0102030000000000ULL;
1086 writeq(val64, &bar0->tx_w_round_robin_4);
1087 break;
1088 case 8:
1089 val64 = 0x0001020300040105ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_0);
1091 val64 = 0x0200030106000204ULL;
1092 writeq(val64, &bar0->tx_w_round_robin_1);
1093 val64 = 0x0103000502010007ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_2);
1095 val64 = 0x0304010002060500ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_3);
1097 val64 = 0x0103020400000000ULL;
1098 writeq(val64, &bar0->tx_w_round_robin_4);
1099 break;
1100 }
1101
1102 /* Filling the Rx round robin registers as per the
1103 * number of Rings and steering based on QoS.
1104 */
1105 switch (config->rx_ring_num) {
1106 case 1:
1107 val64 = 0x8080808080808080ULL;
1108 writeq(val64, &bar0->rts_qos_steering);
1109 break;
1110 case 2:
1111 val64 = 0x0000010000010000ULL;
1112 writeq(val64, &bar0->rx_w_round_robin_0);
1113 val64 = 0x0100000100000100ULL;
1114 writeq(val64, &bar0->rx_w_round_robin_1);
1115 val64 = 0x0001000001000001ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_2);
1117 val64 = 0x0000010000010000ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_3);
1119 val64 = 0x0100000000000000ULL;
1120 writeq(val64, &bar0->rx_w_round_robin_4);
1121
1122 val64 = 0x8080808040404040ULL;
1123 writeq(val64, &bar0->rts_qos_steering);
1124 break;
1125 case 3:
1126 val64 = 0x0001000102000001ULL;
1127 writeq(val64, &bar0->rx_w_round_robin_0);
1128 val64 = 0x0001020000010001ULL;
1129 writeq(val64, &bar0->rx_w_round_robin_1);
1130 val64 = 0x0200000100010200ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_2);
1132 val64 = 0x0001000102000001ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_3);
1134 val64 = 0x0001020000000000ULL;
1135 writeq(val64, &bar0->rx_w_round_robin_4);
1136
1137 val64 = 0x8080804040402020ULL;
1138 writeq(val64, &bar0->rts_qos_steering);
1139 break;
1140 case 4:
1141 val64 = 0x0001020300010200ULL;
1142 writeq(val64, &bar0->rx_w_round_robin_0);
1143 val64 = 0x0100000102030001ULL;
1144 writeq(val64, &bar0->rx_w_round_robin_1);
1145 val64 = 0x0200010000010203ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_2);
1147 val64 = 0x0001020001000001ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_3);
1149 val64 = 0x0203000100000000ULL;
1150 writeq(val64, &bar0->rx_w_round_robin_4);
1151
1152 val64 = 0x8080404020201010ULL;
1153 writeq(val64, &bar0->rts_qos_steering);
1154 break;
1155 case 5:
1156 val64 = 0x0001000203000102ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_0);
1158 val64 = 0x0001020001030004ULL;
1159 writeq(val64, &bar0->rx_w_round_robin_1);
1160 val64 = 0x0001000203000102ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_2);
1162 val64 = 0x0001020001030004ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_3);
1164 val64 = 0x0001000000000000ULL;
1165 writeq(val64, &bar0->rx_w_round_robin_4);
1166
1167 val64 = 0x8080404020201008ULL;
1168 writeq(val64, &bar0->rts_qos_steering);
1169 break;
1170 case 6:
1171 val64 = 0x0001020304000102ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_0);
1173 val64 = 0x0304050001020001ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_1);
1175 val64 = 0x0203000100000102ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_2);
1177 val64 = 0x0304000102030405ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_3);
1179 val64 = 0x0001000200000000ULL;
1180 writeq(val64, &bar0->rx_w_round_robin_4);
1181
1182 val64 = 0x8080404020100804ULL;
1183 writeq(val64, &bar0->rts_qos_steering);
1184 break;
1185 case 7:
1186 val64 = 0x0001020001020300ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_0);
1188 val64 = 0x0102030400010203ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_1);
1190 val64 = 0x0405060001020001ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_2);
1192 val64 = 0x0304050000010200ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_3);
1194 val64 = 0x0102030000000000ULL;
1195 writeq(val64, &bar0->rx_w_round_robin_4);
1196
1197 val64 = 0x8080402010080402ULL;
1198 writeq(val64, &bar0->rts_qos_steering);
1199 break;
1200 case 8:
1201 val64 = 0x0001020300040105ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_0);
1203 val64 = 0x0200030106000204ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_1);
1205 val64 = 0x0103000502010007ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_2);
1207 val64 = 0x0304010002060500ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_3);
1209 val64 = 0x0103020400000000ULL;
1210 writeq(val64, &bar0->rx_w_round_robin_4);
1211
1212 val64 = 0x8040201008040201ULL;
1213 writeq(val64, &bar0->rts_qos_steering);
1214 break;
1215 }
835 1216
836 /* UDP Fix */ 1217 /* UDP Fix */
837 val64 = 0; 1218 val64 = 0;
838 for (i = 1; i < 8; i++) 1219 for (i = 0; i < 8; i++)
1220 writeq(val64, &bar0->rts_frm_len_n[i]);
1221
1222 /* Set the default rts frame length for the rings configured */
1223 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1224 for (i = 0 ; i < config->rx_ring_num ; i++)
839 writeq(val64, &bar0->rts_frm_len_n[i]); 1225 writeq(val64, &bar0->rts_frm_len_n[i]);
840 1226
841 /* Set rts_frm_len register for fifo 0 */ 1227 /* Set the frame length for the configured rings
842 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22), 1228 * desired by the user
843 &bar0->rts_frm_len_n[0]); 1229 */
1230 for (i = 0; i < config->rx_ring_num; i++) {
1231 /* If rts_frm_len[i] == 0 then it is assumed that user not
1232 * specified frame length steering.
1233 * If the user provides the frame length then program
1234 * the rts_frm_len register for those values or else
1235 * leave it as it is.
1236 */
1237 if (rts_frm_len[i] != 0) {
1238 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1239 &bar0->rts_frm_len_n[i]);
1240 }
1241 }
844 1242
845 /* Enable statistics */ 1243 /* Program statistics memory */
846 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); 1244 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
847 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
848 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
849 writeq(val64, &bar0->stat_cfg);
850 1245
851 /* 1246 if (nic->device_type == XFRAME_II_DEVICE) {
1247 val64 = STAT_BC(0x320);
1248 writeq(val64, &bar0->stat_byte_cnt);
1249 }
1250
1251 /*
852 * Initializing the sampling rate for the device to calculate the 1252 * Initializing the sampling rate for the device to calculate the
853 * bandwidth utilization. 1253 * bandwidth utilization.
854 */ 1254 */
@@ -857,30 +1257,38 @@ static int init_nic(struct s2io_nic *nic)
857 writeq(val64, &bar0->mac_link_util); 1257 writeq(val64, &bar0->mac_link_util);
858 1258
859 1259
860 /* 1260 /*
861 * Initializing the Transmit and Receive Traffic Interrupt 1261 * Initializing the Transmit and Receive Traffic Interrupt
862 * Scheme. 1262 * Scheme.
863 */ 1263 */
864 /* TTI Initialization. Default Tx timer gets us about 1264 /*
1265 * TTI Initialization. Default Tx timer gets us about
865 * 250 interrupts per sec. Continuous interrupts are enabled 1266 * 250 interrupts per sec. Continuous interrupts are enabled
866 * by default. 1267 * by default.
867 */ 1268 */
868 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) | 1269 if (nic->device_type == XFRAME_II_DEVICE) {
869 TTI_DATA1_MEM_TX_URNG_A(0xA) | 1270 int count = (nic->config.bus_speed * 125)/2;
1271 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1272 } else {
1273
1274 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1275 }
1276 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
870 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1277 TTI_DATA1_MEM_TX_URNG_B(0x10) |
871 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN | 1278 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
872 TTI_DATA1_MEM_TX_TIMER_CI_EN; 1279 if (use_continuous_tx_intrs)
1280 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
873 writeq(val64, &bar0->tti_data1_mem); 1281 writeq(val64, &bar0->tti_data1_mem);
874 1282
875 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1283 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
876 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1284 TTI_DATA2_MEM_TX_UFC_B(0x20) |
877 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80); 1285 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
878 writeq(val64, &bar0->tti_data2_mem); 1286 writeq(val64, &bar0->tti_data2_mem);
879 1287
880 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; 1288 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
881 writeq(val64, &bar0->tti_command_mem); 1289 writeq(val64, &bar0->tti_command_mem);
882 1290
883 /* 1291 /*
884 * Once the operation completes, the Strobe bit of the command 1292 * Once the operation completes, the Strobe bit of the command
885 * register will be reset. We poll for this particular condition 1293 * register will be reset. We poll for this particular condition
886 * We wait for a maximum of 500ms for the operation to complete, 1294 * We wait for a maximum of 500ms for the operation to complete,
@@ -901,52 +1309,97 @@ static int init_nic(struct s2io_nic *nic)
901 time++; 1309 time++;
902 } 1310 }
903 1311
904 /* RTI Initialization */ 1312 if (nic->config.bimodal) {
905 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) | 1313 int k = 0;
906 RTI_DATA1_MEM_RX_URNG_A(0xA) | 1314 for (k = 0; k < config->rx_ring_num; k++) {
907 RTI_DATA1_MEM_RX_URNG_B(0x10) | 1315 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
908 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; 1316 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1317 writeq(val64, &bar0->tti_command_mem);
1318
1319 /*
1320 * Once the operation completes, the Strobe bit of the command
1321 * register will be reset. We poll for this particular condition
1322 * We wait for a maximum of 500ms for the operation to complete,
1323 * if it's not complete by then we return error.
1324 */
1325 time = 0;
1326 while (TRUE) {
1327 val64 = readq(&bar0->tti_command_mem);
1328 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1329 break;
1330 }
1331 if (time > 10) {
1332 DBG_PRINT(ERR_DBG,
1333 "%s: TTI init Failed\n",
1334 dev->name);
1335 return -1;
1336 }
1337 time++;
1338 msleep(50);
1339 }
1340 }
1341 } else {
909 1342
910 writeq(val64, &bar0->rti_data1_mem); 1343 /* RTI Initialization */
1344 if (nic->device_type == XFRAME_II_DEVICE) {
1345 /*
1346 * Programmed to generate Apprx 500 Intrs per
1347 * second
1348 */
1349 int count = (nic->config.bus_speed * 125)/4;
1350 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1351 } else {
1352 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1353 }
1354 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1355 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1356 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
911 1357
912 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | 1358 writeq(val64, &bar0->rti_data1_mem);
913 RTI_DATA2_MEM_RX_UFC_B(0x2) |
914 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
915 writeq(val64, &bar0->rti_data2_mem);
916 1359
917 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD; 1360 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
918 writeq(val64, &bar0->rti_command_mem); 1361 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1362 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1363 writeq(val64, &bar0->rti_data2_mem);
919 1364
920 /* 1365 for (i = 0; i < config->rx_ring_num; i++) {
921 * Once the operation completes, the Strobe bit of the command 1366 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
922 * register will be reset. We poll for this particular condition 1367 | RTI_CMD_MEM_OFFSET(i);
923 * We wait for a maximum of 500ms for the operation to complete, 1368 writeq(val64, &bar0->rti_command_mem);
924 * if it's not complete by then we return error. 1369
925 */ 1370 /*
926 time = 0; 1371 * Once the operation completes, the Strobe bit of the
927 while (TRUE) { 1372 * command register will be reset. We poll for this
928 val64 = readq(&bar0->rti_command_mem); 1373 * particular condition. We wait for a maximum of 500ms
929 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) { 1374 * for the operation to complete, if it's not complete
930 break; 1375 * by then we return error.
931 } 1376 */
932 if (time > 10) { 1377 time = 0;
933 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", 1378 while (TRUE) {
934 dev->name); 1379 val64 = readq(&bar0->rti_command_mem);
935 return -1; 1380 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1381 break;
1382 }
1383 if (time > 10) {
1384 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1385 dev->name);
1386 return -1;
1387 }
1388 time++;
1389 msleep(50);
1390 }
936 } 1391 }
937 time++;
938 msleep(50);
939 } 1392 }
940 1393
941 /* 1394 /*
942 * Initializing proper values as Pause threshold into all 1395 * Initializing proper values as Pause threshold into all
943 * the 8 Queues on Rx side. 1396 * the 8 Queues on Rx side.
944 */ 1397 */
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3); 1398 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
946 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); 1399 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
947 1400
948 /* Disable RMAC PAD STRIPPING */ 1401 /* Disable RMAC PAD STRIPPING */
949 add = &bar0->mac_cfg; 1402 add = (void *) &bar0->mac_cfg;
950 val64 = readq(&bar0->mac_cfg); 1403 val64 = readq(&bar0->mac_cfg);
951 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD); 1404 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1405 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
@@ -955,8 +1408,8 @@ static int init_nic(struct s2io_nic *nic)
955 writel((u32) (val64 >> 32), (add + 4)); 1408 writel((u32) (val64 >> 32), (add + 4));
956 val64 = readq(&bar0->mac_cfg); 1409 val64 = readq(&bar0->mac_cfg);
957 1410
958 /* 1411 /*
959 * Set the time value to be inserted in the pause frame 1412 * Set the time value to be inserted in the pause frame
960 * generated by xena. 1413 * generated by xena.
961 */ 1414 */
962 val64 = readq(&bar0->rmac_pause_cfg); 1415 val64 = readq(&bar0->rmac_pause_cfg);
@@ -964,7 +1417,7 @@ static int init_nic(struct s2io_nic *nic)
964 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time); 1417 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
965 writeq(val64, &bar0->rmac_pause_cfg); 1418 writeq(val64, &bar0->rmac_pause_cfg);
966 1419
967 /* 1420 /*
968 * Set the Threshold Limit for Generating the pause frame 1421 * Set the Threshold Limit for Generating the pause frame
969 * If the amount of data in any Queue exceeds ratio of 1422 * If the amount of data in any Queue exceeds ratio of
970 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 1423 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
@@ -988,25 +1441,54 @@ static int init_nic(struct s2io_nic *nic)
988 } 1441 }
989 writeq(val64, &bar0->mc_pause_thresh_q4q7); 1442 writeq(val64, &bar0->mc_pause_thresh_q4q7);
990 1443
991 /* 1444 /*
992 * TxDMA will stop Read request if the number of read split has 1445 * TxDMA will stop Read request if the number of read split has
993 * exceeded the limit pointed by shared_splits 1446 * exceeded the limit pointed by shared_splits
994 */ 1447 */
995 val64 = readq(&bar0->pic_control); 1448 val64 = readq(&bar0->pic_control);
996 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits); 1449 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
997 writeq(val64, &bar0->pic_control); 1450 writeq(val64, &bar0->pic_control);
998 1451
1452 /*
1453 * Programming the Herc to split every write transaction
1454 * that does not start on an ADB to reduce disconnects.
1455 */
1456 if (nic->device_type == XFRAME_II_DEVICE) {
1457 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1458 writeq(val64, &bar0->wreq_split_mask);
1459 }
1460
1461 /* Setting Link stability period to 64 ms */
1462 if (nic->device_type == XFRAME_II_DEVICE) {
1463 val64 = MISC_LINK_STABILITY_PRD(3);
1464 writeq(val64, &bar0->misc_control);
1465 }
1466
999 return SUCCESS; 1467 return SUCCESS;
1000} 1468}
1469#define LINK_UP_DOWN_INTERRUPT 1
1470#define MAC_RMAC_ERR_TIMER 2
1001 1471
1002/** 1472#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1003 * en_dis_able_nic_intrs - Enable or Disable the interrupts 1473#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1474#else
1475int s2io_link_fault_indication(nic_t *nic)
1476{
1477 if (nic->device_type == XFRAME_II_DEVICE)
1478 return LINK_UP_DOWN_INTERRUPT;
1479 else
1480 return MAC_RMAC_ERR_TIMER;
1481}
1482#endif
1483
1484/**
1485 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1004 * @nic: device private variable, 1486 * @nic: device private variable,
1005 * @mask: A mask indicating which Intr block must be modified and, 1487 * @mask: A mask indicating which Intr block must be modified and,
1006 * @flag: A flag indicating whether to enable or disable the Intrs. 1488 * @flag: A flag indicating whether to enable or disable the Intrs.
1007 * Description: This function will either disable or enable the interrupts 1489 * Description: This function will either disable or enable the interrupts
1008 * depending on the flag argument. The mask argument can be used to 1490 * depending on the flag argument. The mask argument can be used to
1009 * enable/disable any Intr block. 1491 * enable/disable any Intr block.
1010 * Return Value: NONE. 1492 * Return Value: NONE.
1011 */ 1493 */
1012 1494
@@ -1024,20 +1506,31 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1024 temp64 = readq(&bar0->general_int_mask); 1506 temp64 = readq(&bar0->general_int_mask);
1025 temp64 &= ~((u64) val64); 1507 temp64 &= ~((u64) val64);
1026 writeq(temp64, &bar0->general_int_mask); 1508 writeq(temp64, &bar0->general_int_mask);
1027 /* 1509 /*
1028 * Disabled all PCIX, Flash, MDIO, IIC and GPIO 1510 * If Hercules adapter enable GPIO otherwise
1029 * interrupts for now. 1511 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1030 * TODO 1512 * interrupts for now.
1513 * TODO
1031 */ 1514 */
1032 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 1515 if (s2io_link_fault_indication(nic) ==
1033 /* 1516 LINK_UP_DOWN_INTERRUPT ) {
1517 temp64 = readq(&bar0->pic_int_mask);
1518 temp64 &= ~((u64) PIC_INT_GPIO);
1519 writeq(temp64, &bar0->pic_int_mask);
1520 temp64 = readq(&bar0->gpio_int_mask);
1521 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1522 writeq(temp64, &bar0->gpio_int_mask);
1523 } else {
1524 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1525 }
1526 /*
1034 * No MSI Support is available presently, so TTI and 1527 * No MSI Support is available presently, so TTI and
1035 * RTI interrupts are also disabled. 1528 * RTI interrupts are also disabled.
1036 */ 1529 */
1037 } else if (flag == DISABLE_INTRS) { 1530 } else if (flag == DISABLE_INTRS) {
1038 /* 1531 /*
1039 * Disable PIC Intrs in the general 1532 * Disable PIC Intrs in the general
1040 * intr mask register 1533 * intr mask register
1041 */ 1534 */
1042 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 1535 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 temp64 = readq(&bar0->general_int_mask); 1536 temp64 = readq(&bar0->general_int_mask);
@@ -1055,27 +1548,27 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1055 temp64 = readq(&bar0->general_int_mask); 1548 temp64 = readq(&bar0->general_int_mask);
1056 temp64 &= ~((u64) val64); 1549 temp64 &= ~((u64) val64);
1057 writeq(temp64, &bar0->general_int_mask); 1550 writeq(temp64, &bar0->general_int_mask);
1058 /* 1551 /*
1059 * Keep all interrupts other than PFC interrupt 1552 * Keep all interrupts other than PFC interrupt
1060 * and PCC interrupt disabled in DMA level. 1553 * and PCC interrupt disabled in DMA level.
1061 */ 1554 */
1062 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M | 1555 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1063 TXDMA_PCC_INT_M); 1556 TXDMA_PCC_INT_M);
1064 writeq(val64, &bar0->txdma_int_mask); 1557 writeq(val64, &bar0->txdma_int_mask);
1065 /* 1558 /*
1066 * Enable only the MISC error 1 interrupt in PFC block 1559 * Enable only the MISC error 1 interrupt in PFC block
1067 */ 1560 */
1068 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1); 1561 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1069 writeq(val64, &bar0->pfc_err_mask); 1562 writeq(val64, &bar0->pfc_err_mask);
1070 /* 1563 /*
1071 * Enable only the FB_ECC error interrupt in PCC block 1564 * Enable only the FB_ECC error interrupt in PCC block
1072 */ 1565 */
1073 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR); 1566 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1074 writeq(val64, &bar0->pcc_err_mask); 1567 writeq(val64, &bar0->pcc_err_mask);
1075 } else if (flag == DISABLE_INTRS) { 1568 } else if (flag == DISABLE_INTRS) {
1076 /* 1569 /*
1077 * Disable TxDMA Intrs in the general intr mask 1570 * Disable TxDMA Intrs in the general intr mask
1078 * register 1571 * register
1079 */ 1572 */
1080 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask); 1573 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1081 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask); 1574 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
@@ -1093,15 +1586,15 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1093 temp64 = readq(&bar0->general_int_mask); 1586 temp64 = readq(&bar0->general_int_mask);
1094 temp64 &= ~((u64) val64); 1587 temp64 &= ~((u64) val64);
1095 writeq(temp64, &bar0->general_int_mask); 1588 writeq(temp64, &bar0->general_int_mask);
1096 /* 1589 /*
1097 * All RxDMA block interrupts are disabled for now 1590 * All RxDMA block interrupts are disabled for now
1098 * TODO 1591 * TODO
1099 */ 1592 */
1100 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask); 1593 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1101 } else if (flag == DISABLE_INTRS) { 1594 } else if (flag == DISABLE_INTRS) {
1102 /* 1595 /*
1103 * Disable RxDMA Intrs in the general intr mask 1596 * Disable RxDMA Intrs in the general intr mask
1104 * register 1597 * register
1105 */ 1598 */
1106 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask); 1599 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1107 temp64 = readq(&bar0->general_int_mask); 1600 temp64 = readq(&bar0->general_int_mask);
@@ -1118,22 +1611,13 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1118 temp64 = readq(&bar0->general_int_mask); 1611 temp64 = readq(&bar0->general_int_mask);
1119 temp64 &= ~((u64) val64); 1612 temp64 &= ~((u64) val64);
1120 writeq(temp64, &bar0->general_int_mask); 1613 writeq(temp64, &bar0->general_int_mask);
1121 /* 1614 /*
1122 * All MAC block error interrupts are disabled for now 1615 * All MAC block error interrupts are disabled for now
1123 * except the link status change interrupt.
1124 * TODO 1616 * TODO
1125 */ 1617 */
1126 val64 = MAC_INT_STATUS_RMAC_INT;
1127 temp64 = readq(&bar0->mac_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->mac_int_mask);
1130
1131 val64 = readq(&bar0->mac_rmac_err_mask);
1132 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1133 writeq(val64, &bar0->mac_rmac_err_mask);
1134 } else if (flag == DISABLE_INTRS) { 1618 } else if (flag == DISABLE_INTRS) {
1135 /* 1619 /*
1136 * Disable MAC Intrs in the general intr mask register 1620 * Disable MAC Intrs in the general intr mask register
1137 */ 1621 */
1138 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask); 1622 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1139 writeq(DISABLE_ALL_INTRS, 1623 writeq(DISABLE_ALL_INTRS,
@@ -1152,14 +1636,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1152 temp64 = readq(&bar0->general_int_mask); 1636 temp64 = readq(&bar0->general_int_mask);
1153 temp64 &= ~((u64) val64); 1637 temp64 &= ~((u64) val64);
1154 writeq(temp64, &bar0->general_int_mask); 1638 writeq(temp64, &bar0->general_int_mask);
1155 /* 1639 /*
1156 * All XGXS block error interrupts are disabled for now 1640 * All XGXS block error interrupts are disabled for now
1157 * TODO 1641 * TODO
1158 */ 1642 */
1159 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask); 1643 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1160 } else if (flag == DISABLE_INTRS) { 1644 } else if (flag == DISABLE_INTRS) {
1161 /* 1645 /*
1162 * Disable MC Intrs in the general intr mask register 1646 * Disable MC Intrs in the general intr mask register
1163 */ 1647 */
1164 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask); 1648 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1165 temp64 = readq(&bar0->general_int_mask); 1649 temp64 = readq(&bar0->general_int_mask);
@@ -1175,11 +1659,11 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1175 temp64 = readq(&bar0->general_int_mask); 1659 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64); 1660 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask); 1661 writeq(temp64, &bar0->general_int_mask);
1178 /* 1662 /*
1179 * All MC block error interrupts are disabled for now 1663 * Enable all MC Intrs.
1180 * TODO
1181 */ 1664 */
1182 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask); 1665 writeq(0x0, &bar0->mc_int_mask);
1666 writeq(0x0, &bar0->mc_err_mask);
1183 } else if (flag == DISABLE_INTRS) { 1667 } else if (flag == DISABLE_INTRS) {
1184 /* 1668 /*
1185 * Disable MC Intrs in the general intr mask register 1669 * Disable MC Intrs in the general intr mask register
@@ -1199,14 +1683,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1199 temp64 = readq(&bar0->general_int_mask); 1683 temp64 = readq(&bar0->general_int_mask);
1200 temp64 &= ~((u64) val64); 1684 temp64 &= ~((u64) val64);
1201 writeq(temp64, &bar0->general_int_mask); 1685 writeq(temp64, &bar0->general_int_mask);
1202 /* 1686 /*
1203 * Enable all the Tx side interrupts 1687 * Enable all the Tx side interrupts
1204 * writing 0 Enables all 64 TX interrupt levels 1688 * writing 0 Enables all 64 TX interrupt levels
1205 */ 1689 */
1206 writeq(0x0, &bar0->tx_traffic_mask); 1690 writeq(0x0, &bar0->tx_traffic_mask);
1207 } else if (flag == DISABLE_INTRS) { 1691 } else if (flag == DISABLE_INTRS) {
1208 /* 1692 /*
1209 * Disable Tx Traffic Intrs in the general intr mask 1693 * Disable Tx Traffic Intrs in the general intr mask
1210 * register. 1694 * register.
1211 */ 1695 */
1212 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); 1696 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
@@ -1226,8 +1710,8 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1226 /* writing 0 Enables all 8 RX interrupt levels */ 1710 /* writing 0 Enables all 8 RX interrupt levels */
1227 writeq(0x0, &bar0->rx_traffic_mask); 1711 writeq(0x0, &bar0->rx_traffic_mask);
1228 } else if (flag == DISABLE_INTRS) { 1712 } else if (flag == DISABLE_INTRS) {
1229 /* 1713 /*
1230 * Disable Rx Traffic Intrs in the general intr mask 1714 * Disable Rx Traffic Intrs in the general intr mask
1231 * register. 1715 * register.
1232 */ 1716 */
1233 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); 1717 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
@@ -1238,24 +1722,66 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1238 } 1722 }
1239} 1723}
1240 1724
1241/** 1725static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1242 * verify_xena_quiescence - Checks whether the H/W is ready 1726{
1727 int ret = 0;
1728
1729 if (flag == FALSE) {
1730 if ((!herc && (rev_id >= 4)) || herc) {
1731 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1732 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1733 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1734 ret = 1;
1735 }
1736 }else {
1737 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1738 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1739 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1740 ret = 1;
1741 }
1742 }
1743 } else {
1744 if ((!herc && (rev_id >= 4)) || herc) {
1745 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1746 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1747 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1748 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1749 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1750 ret = 1;
1751 }
1752 } else {
1753 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1754 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1755 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1756 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1757 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1758 ret = 1;
1759 }
1760 }
1761 }
1762
1763 return ret;
1764}
1765/**
1766 * verify_xena_quiescence - Checks whether the H/W is ready
1243 * @val64 : Value read from adapter status register. 1767 * @val64 : Value read from adapter status register.
1244 * @flag : indicates if the adapter enable bit was ever written once 1768 * @flag : indicates if the adapter enable bit was ever written once
1245 * before. 1769 * before.
1246 * Description: Returns whether the H/W is ready to go or not. Depending 1770 * Description: Returns whether the H/W is ready to go or not. Depending
1247 * on whether adapter enable bit was written or not the comparison 1771 * on whether adapter enable bit was written or not the comparison
1248 * differs and the calling function passes the input argument flag to 1772 * differs and the calling function passes the input argument flag to
1249 * indicate this. 1773 * indicate this.
1250 * Return: 1 If xena is quiescence 1774 * Return: 1 If xena is quiescence
1251 * 0 If Xena is not quiescence 1775 * 0 If Xena is not quiescence
1252 */ 1776 */
1253 1777
1254static int verify_xena_quiescence(u64 val64, int flag) 1778static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1255{ 1779{
1256 int ret = 0; 1780 int ret = 0, herc;
1257 u64 tmp64 = ~((u64) val64); 1781 u64 tmp64 = ~((u64) val64);
1782 int rev_id = get_xena_rev_id(sp->pdev);
1258 1783
1784 herc = (sp->device_type == XFRAME_II_DEVICE);
1259 if (! 1785 if (!
1260 (tmp64 & 1786 (tmp64 &
1261 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY | 1787 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
@@ -1263,25 +1789,7 @@ static int verify_xena_quiescence(u64 val64, int flag)
1263 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY | 1789 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1264 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK | 1790 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1265 ADAPTER_STATUS_P_PLL_LOCK))) { 1791 ADAPTER_STATUS_P_PLL_LOCK))) {
1266 if (flag == FALSE) { 1792 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1267 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1268 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1269 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1270
1271 ret = 1;
1272
1273 }
1274 } else {
1275 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1276 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1277 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1278 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1279 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1280
1281 ret = 1;
1282
1283 }
1284 }
1285 } 1793 }
1286 1794
1287 return ret; 1795 return ret;
@@ -1290,12 +1798,12 @@ static int verify_xena_quiescence(u64 val64, int flag)
1290/** 1798/**
1291 * fix_mac_address - Fix for Mac addr problem on Alpha platforms 1799 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1292 * @sp: Pointer to device specifc structure 1800 * @sp: Pointer to device specifc structure
1293 * Description : 1801 * Description :
1294 * New procedure to clear mac address reading problems on Alpha platforms 1802 * New procedure to clear mac address reading problems on Alpha platforms
1295 * 1803 *
1296 */ 1804 */
1297 1805
1298static void fix_mac_address(nic_t * sp) 1806void fix_mac_address(nic_t * sp)
1299{ 1807{
1300 XENA_dev_config_t __iomem *bar0 = sp->bar0; 1808 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1301 u64 val64; 1809 u64 val64;
@@ -1303,20 +1811,21 @@ static void fix_mac_address(nic_t * sp)
1303 1811
1304 while (fix_mac[i] != END_SIGN) { 1812 while (fix_mac[i] != END_SIGN) {
1305 writeq(fix_mac[i++], &bar0->gpio_control); 1813 writeq(fix_mac[i++], &bar0->gpio_control);
1814 udelay(10);
1306 val64 = readq(&bar0->gpio_control); 1815 val64 = readq(&bar0->gpio_control);
1307 } 1816 }
1308} 1817}
1309 1818
1310/** 1819/**
1311 * start_nic - Turns the device on 1820 * start_nic - Turns the device on
1312 * @nic : device private variable. 1821 * @nic : device private variable.
1313 * Description: 1822 * Description:
1314 * This function actually turns the device on. Before this function is 1823 * This function actually turns the device on. Before this function is
1315 * called,all Registers are configured from their reset states 1824 * called,all Registers are configured from their reset states
1316 * and shared memory is allocated but the NIC is still quiescent. On 1825 * and shared memory is allocated but the NIC is still quiescent. On
1317 * calling this function, the device interrupts are cleared and the NIC is 1826 * calling this function, the device interrupts are cleared and the NIC is
1318 * literally switched on by writing into the adapter control register. 1827 * literally switched on by writing into the adapter control register.
1319 * Return Value: 1828 * Return Value:
1320 * SUCCESS on success and -1 on failure. 1829 * SUCCESS on success and -1 on failure.
1321 */ 1830 */
1322 1831
@@ -1325,8 +1834,8 @@ static int start_nic(struct s2io_nic *nic)
1325 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1834 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1326 struct net_device *dev = nic->dev; 1835 struct net_device *dev = nic->dev;
1327 register u64 val64 = 0; 1836 register u64 val64 = 0;
1328 u16 interruptible, i; 1837 u16 interruptible;
1329 u16 subid; 1838 u16 subid, i;
1330 mac_info_t *mac_control; 1839 mac_info_t *mac_control;
1331 struct config_param *config; 1840 struct config_param *config;
1332 1841
@@ -1335,10 +1844,12 @@ static int start_nic(struct s2io_nic *nic)
1335 1844
1336 /* PRC Initialization and configuration */ 1845 /* PRC Initialization and configuration */
1337 for (i = 0; i < config->rx_ring_num; i++) { 1846 for (i = 0; i < config->rx_ring_num; i++) {
1338 writeq((u64) nic->rx_blocks[i][0].block_dma_addr, 1847 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1339 &bar0->prc_rxd0_n[i]); 1848 &bar0->prc_rxd0_n[i]);
1340 1849
1341 val64 = readq(&bar0->prc_ctrl_n[i]); 1850 val64 = readq(&bar0->prc_ctrl_n[i]);
1851 if (nic->config.bimodal)
1852 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1342#ifndef CONFIG_2BUFF_MODE 1853#ifndef CONFIG_2BUFF_MODE
1343 val64 |= PRC_CTRL_RC_ENABLED; 1854 val64 |= PRC_CTRL_RC_ENABLED;
1344#else 1855#else
@@ -1354,7 +1865,7 @@ static int start_nic(struct s2io_nic *nic)
1354 writeq(val64, &bar0->rx_pa_cfg); 1865 writeq(val64, &bar0->rx_pa_cfg);
1355#endif 1866#endif
1356 1867
1357 /* 1868 /*
1358 * Enabling MC-RLDRAM. After enabling the device, we timeout 1869 * Enabling MC-RLDRAM. After enabling the device, we timeout
1359 * for around 100ms, which is approximately the time required 1870 * for around 100ms, which is approximately the time required
1360 * for the device to be ready for operation. 1871 * for the device to be ready for operation.
@@ -1364,27 +1875,27 @@ static int start_nic(struct s2io_nic *nic)
1364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); 1875 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1365 val64 = readq(&bar0->mc_rldram_mrs); 1876 val64 = readq(&bar0->mc_rldram_mrs);
1366 1877
1367 msleep(100); /* Delay by around 100 ms. */ 1878 msleep(100); /* Delay by around 100 ms. */
1368 1879
1369 /* Enabling ECC Protection. */ 1880 /* Enabling ECC Protection. */
1370 val64 = readq(&bar0->adapter_control); 1881 val64 = readq(&bar0->adapter_control);
1371 val64 &= ~ADAPTER_ECC_EN; 1882 val64 &= ~ADAPTER_ECC_EN;
1372 writeq(val64, &bar0->adapter_control); 1883 writeq(val64, &bar0->adapter_control);
1373 1884
1374 /* 1885 /*
1375 * Clearing any possible Link state change interrupts that 1886 * Clearing any possible Link state change interrupts that
1376 * could have popped up just before Enabling the card. 1887 * could have popped up just before Enabling the card.
1377 */ 1888 */
1378 val64 = readq(&bar0->mac_rmac_err_reg); 1889 val64 = readq(&bar0->mac_rmac_err_reg);
1379 if (val64) 1890 if (val64)
1380 writeq(val64, &bar0->mac_rmac_err_reg); 1891 writeq(val64, &bar0->mac_rmac_err_reg);
1381 1892
1382 /* 1893 /*
1383 * Verify if the device is ready to be enabled, if so enable 1894 * Verify if the device is ready to be enabled, if so enable
1384 * it. 1895 * it.
1385 */ 1896 */
1386 val64 = readq(&bar0->adapter_status); 1897 val64 = readq(&bar0->adapter_status);
1387 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) { 1898 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1388 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); 1899 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1389 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", 1900 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1390 (unsigned long long) val64); 1901 (unsigned long long) val64);
@@ -1392,16 +1903,18 @@ static int start_nic(struct s2io_nic *nic)
1392 } 1903 }
1393 1904
1394 /* Enable select interrupts */ 1905 /* Enable select interrupts */
1395 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR | 1906 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
1396 RX_MAC_INTR; 1907 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1908 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1909
1397 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); 1910 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1398 1911
1399 /* 1912 /*
1400 * With some switches, link might be already up at this point. 1913 * With some switches, link might be already up at this point.
1401 * Because of this weird behavior, when we enable laser, 1914 * Because of this weird behavior, when we enable laser,
1402 * we may not get link. We need to handle this. We cannot 1915 * we may not get link. We need to handle this. We cannot
1403 * figure out which switch is misbehaving. So we are forced to 1916 * figure out which switch is misbehaving. So we are forced to
1404 * make a global change. 1917 * make a global change.
1405 */ 1918 */
1406 1919
1407 /* Enabling Laser. */ 1920 /* Enabling Laser. */
@@ -1411,44 +1924,30 @@ static int start_nic(struct s2io_nic *nic)
1411 1924
1412 /* SXE-002: Initialize link and activity LED */ 1925 /* SXE-002: Initialize link and activity LED */
1413 subid = nic->pdev->subsystem_device; 1926 subid = nic->pdev->subsystem_device;
1414 if ((subid & 0xFF) >= 0x07) { 1927 if (((subid & 0xFF) >= 0x07) &&
1928 (nic->device_type == XFRAME_I_DEVICE)) {
1415 val64 = readq(&bar0->gpio_control); 1929 val64 = readq(&bar0->gpio_control);
1416 val64 |= 0x0000800000000000ULL; 1930 val64 |= 0x0000800000000000ULL;
1417 writeq(val64, &bar0->gpio_control); 1931 writeq(val64, &bar0->gpio_control);
1418 val64 = 0x0411040400000000ULL; 1932 val64 = 0x0411040400000000ULL;
1419 writeq(val64, (void __iomem *) bar0 + 0x2700); 1933 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1420 } 1934 }
1421 1935
1422 /* 1936 /*
1423 * Don't see link state interrupts on certain switches, so 1937 * Don't see link state interrupts on certain switches, so
1424 * directly scheduling a link state task from here. 1938 * directly scheduling a link state task from here.
1425 */ 1939 */
1426 schedule_work(&nic->set_link_task); 1940 schedule_work(&nic->set_link_task);
1427 1941
1428 /*
1429 * Here we are performing soft reset on XGXS to
1430 * force link down. Since link is already up, we will get
1431 * link state change interrupt after this reset
1432 */
1433 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1434 val64 = readq(&bar0->dtx_control);
1435 udelay(50);
1436 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1437 val64 = readq(&bar0->dtx_control);
1438 udelay(50);
1439 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1440 val64 = readq(&bar0->dtx_control);
1441 udelay(50);
1442
1443 return SUCCESS; 1942 return SUCCESS;
1444} 1943}
1445 1944
1446/** 1945/**
1447 * free_tx_buffers - Free all queued Tx buffers 1946 * free_tx_buffers - Free all queued Tx buffers
1448 * @nic : device private variable. 1947 * @nic : device private variable.
1449 * Description: 1948 * Description:
1450 * Free all queued Tx buffers. 1949 * Free all queued Tx buffers.
1451 * Return Value: void 1950 * Return Value: void
1452*/ 1951*/
1453 1952
1454static void free_tx_buffers(struct s2io_nic *nic) 1953static void free_tx_buffers(struct s2io_nic *nic)
@@ -1459,39 +1958,61 @@ static void free_tx_buffers(struct s2io_nic *nic)
1459 int i, j; 1958 int i, j;
1460 mac_info_t *mac_control; 1959 mac_info_t *mac_control;
1461 struct config_param *config; 1960 struct config_param *config;
1462 int cnt = 0; 1961 int cnt = 0, frg_cnt;
1463 1962
1464 mac_control = &nic->mac_control; 1963 mac_control = &nic->mac_control;
1465 config = &nic->config; 1964 config = &nic->config;
1466 1965
1467 for (i = 0; i < config->tx_fifo_num; i++) { 1966 for (i = 0; i < config->tx_fifo_num; i++) {
1468 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 1967 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1469 txdp = (TxD_t *) nic->list_info[i][j]. 1968 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1470 list_virt_addr; 1969 list_virt_addr;
1471 skb = 1970 skb =
1472 (struct sk_buff *) ((unsigned long) txdp-> 1971 (struct sk_buff *) ((unsigned long) txdp->
1473 Host_Control); 1972 Host_Control);
1474 if (skb == NULL) { 1973 if (skb == NULL) {
1475 memset(txdp, 0, sizeof(TxD_t)); 1974 memset(txdp, 0, sizeof(TxD_t) *
1975 config->max_txds);
1476 continue; 1976 continue;
1477 } 1977 }
1978 frg_cnt = skb_shinfo(skb)->nr_frags;
1979 pci_unmap_single(nic->pdev, (dma_addr_t)
1980 txdp->Buffer_Pointer,
1981 skb->len - skb->data_len,
1982 PCI_DMA_TODEVICE);
1983 if (frg_cnt) {
1984 TxD_t *temp;
1985 temp = txdp;
1986 txdp++;
1987 for (j = 0; j < frg_cnt; j++, txdp++) {
1988 skb_frag_t *frag =
1989 &skb_shinfo(skb)->frags[j];
1990 pci_unmap_page(nic->pdev,
1991 (dma_addr_t)
1992 txdp->
1993 Buffer_Pointer,
1994 frag->size,
1995 PCI_DMA_TODEVICE);
1996 }
1997 txdp = temp;
1998 }
1478 dev_kfree_skb(skb); 1999 dev_kfree_skb(skb);
1479 memset(txdp, 0, sizeof(TxD_t)); 2000 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1480 cnt++; 2001 cnt++;
1481 } 2002 }
1482 DBG_PRINT(INTR_DBG, 2003 DBG_PRINT(INTR_DBG,
1483 "%s:forcibly freeing %d skbs on FIFO%d\n", 2004 "%s:forcibly freeing %d skbs on FIFO%d\n",
1484 dev->name, cnt, i); 2005 dev->name, cnt, i);
1485 mac_control->tx_curr_get_info[i].offset = 0; 2006 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1486 mac_control->tx_curr_put_info[i].offset = 0; 2007 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1487 } 2008 }
1488} 2009}
1489 2010
1490/** 2011/**
1491 * stop_nic - To stop the nic 2012 * stop_nic - To stop the nic
1492 * @nic ; device private variable. 2013 * @nic ; device private variable.
1493 * Description: 2014 * Description:
1494 * This function does exactly the opposite of what the start_nic() 2015 * This function does exactly the opposite of what the start_nic()
1495 * function does. This function is called to stop the device. 2016 * function does. This function is called to stop the device.
1496 * Return Value: 2017 * Return Value:
1497 * void. 2018 * void.
@@ -1509,8 +2030,9 @@ static void stop_nic(struct s2io_nic *nic)
1509 config = &nic->config; 2030 config = &nic->config;
1510 2031
1511 /* Disable all interrupts */ 2032 /* Disable all interrupts */
1512 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR | 2033 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
1513 RX_MAC_INTR; 2034 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2035 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1514 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); 2036 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1515 2037
1516 /* Disable PRCs */ 2038 /* Disable PRCs */
@@ -1521,11 +2043,11 @@ static void stop_nic(struct s2io_nic *nic)
1521 } 2043 }
1522} 2044}
1523 2045
1524/** 2046/**
1525 * fill_rx_buffers - Allocates the Rx side skbs 2047 * fill_rx_buffers - Allocates the Rx side skbs
1526 * @nic: device private variable 2048 * @nic: device private variable
1527 * @ring_no: ring number 2049 * @ring_no: ring number
1528 * Description: 2050 * Description:
1529 * The function allocates Rx side skbs and puts the physical 2051 * The function allocates Rx side skbs and puts the physical
1530 * address of these buffers into the RxD buffer pointers, so that the NIC 2052 * address of these buffers into the RxD buffer pointers, so that the NIC
1531 * can DMA the received frame into these locations. 2053 * can DMA the received frame into these locations.
@@ -1533,8 +2055,8 @@ static void stop_nic(struct s2io_nic *nic)
1533 * 1. single buffer, 2055 * 1. single buffer,
1534 * 2. three buffer and 2056 * 2. three buffer and
1535 * 3. Five buffer modes. 2057 * 3. Five buffer modes.
1536 * Each mode defines how many fragments the received frame will be split 2058 * Each mode defines how many fragments the received frame will be split
1537 * up into by the NIC. The frame is split into L3 header, L4 Header, 2059 * up into by the NIC. The frame is split into L3 header, L4 Header,
1538 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself 2060 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1539 * is split into 3 fragments. As of now only single buffer mode is 2061 * is split into 3 fragments. As of now only single buffer mode is
1540 * supported. 2062 * supported.
@@ -1542,7 +2064,7 @@ static void stop_nic(struct s2io_nic *nic)
1542 * SUCCESS on success or an appropriate -ve value on failure. 2064 * SUCCESS on success or an appropriate -ve value on failure.
1543 */ 2065 */
1544 2066
1545static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) 2067int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1546{ 2068{
1547 struct net_device *dev = nic->dev; 2069 struct net_device *dev = nic->dev;
1548 struct sk_buff *skb; 2070 struct sk_buff *skb;
@@ -1550,34 +2072,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1550 int off, off1, size, block_no, block_no1; 2072 int off, off1, size, block_no, block_no1;
1551 int offset, offset1; 2073 int offset, offset1;
1552 u32 alloc_tab = 0; 2074 u32 alloc_tab = 0;
1553 u32 alloc_cnt = nic->pkt_cnt[ring_no] - 2075 u32 alloc_cnt;
1554 atomic_read(&nic->rx_bufs_left[ring_no]);
1555 mac_info_t *mac_control; 2076 mac_info_t *mac_control;
1556 struct config_param *config; 2077 struct config_param *config;
1557#ifdef CONFIG_2BUFF_MODE 2078#ifdef CONFIG_2BUFF_MODE
1558 RxD_t *rxdpnext; 2079 RxD_t *rxdpnext;
1559 int nextblk; 2080 int nextblk;
1560 unsigned long tmp; 2081 u64 tmp;
1561 buffAdd_t *ba; 2082 buffAdd_t *ba;
1562 dma_addr_t rxdpphys; 2083 dma_addr_t rxdpphys;
1563#endif 2084#endif
1564#ifndef CONFIG_S2IO_NAPI 2085#ifndef CONFIG_S2IO_NAPI
1565 unsigned long flags; 2086 unsigned long flags;
1566#endif 2087#endif
2088 RxD_t *first_rxdp = NULL;
1567 2089
1568 mac_control = &nic->mac_control; 2090 mac_control = &nic->mac_control;
1569 config = &nic->config; 2091 config = &nic->config;
1570 2092 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2093 atomic_read(&nic->rx_bufs_left[ring_no]);
1571 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2094 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1572 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2095 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1573 2096
1574 while (alloc_tab < alloc_cnt) { 2097 while (alloc_tab < alloc_cnt) {
1575 block_no = mac_control->rx_curr_put_info[ring_no]. 2098 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1576 block_index; 2099 block_index;
1577 block_no1 = mac_control->rx_curr_get_info[ring_no]. 2100 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1578 block_index; 2101 block_index;
1579 off = mac_control->rx_curr_put_info[ring_no].offset; 2102 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1580 off1 = mac_control->rx_curr_get_info[ring_no].offset; 2103 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1581#ifndef CONFIG_2BUFF_MODE 2104#ifndef CONFIG_2BUFF_MODE
1582 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off; 2105 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1583 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1; 2106 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
@@ -1586,7 +2109,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1586 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1; 2109 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1587#endif 2110#endif
1588 2111
1589 rxdp = nic->rx_blocks[ring_no][block_no]. 2112 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1590 block_virt_addr + off; 2113 block_virt_addr + off;
1591 if ((offset == offset1) && (rxdp->Host_Control)) { 2114 if ((offset == offset1) && (rxdp->Host_Control)) {
1592 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2115 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
@@ -1595,15 +2118,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1595 } 2118 }
1596#ifndef CONFIG_2BUFF_MODE 2119#ifndef CONFIG_2BUFF_MODE
1597 if (rxdp->Control_1 == END_OF_BLOCK) { 2120 if (rxdp->Control_1 == END_OF_BLOCK) {
1598 mac_control->rx_curr_put_info[ring_no]. 2121 mac_control->rings[ring_no].rx_curr_put_info.
1599 block_index++; 2122 block_index++;
1600 mac_control->rx_curr_put_info[ring_no]. 2123 mac_control->rings[ring_no].rx_curr_put_info.
1601 block_index %= nic->block_count[ring_no]; 2124 block_index %= mac_control->rings[ring_no].block_count;
1602 block_no = mac_control->rx_curr_put_info 2125 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1603 [ring_no].block_index; 2126 block_index;
1604 off++; 2127 off++;
1605 off %= (MAX_RXDS_PER_BLOCK + 1); 2128 off %= (MAX_RXDS_PER_BLOCK + 1);
1606 mac_control->rx_curr_put_info[ring_no].offset = 2129 mac_control->rings[ring_no].rx_curr_put_info.offset =
1607 off; 2130 off;
1608 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2); 2131 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1609 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2132 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
@@ -1611,30 +2134,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1611 } 2134 }
1612#ifndef CONFIG_S2IO_NAPI 2135#ifndef CONFIG_S2IO_NAPI
1613 spin_lock_irqsave(&nic->put_lock, flags); 2136 spin_lock_irqsave(&nic->put_lock, flags);
1614 nic->put_pos[ring_no] = 2137 mac_control->rings[ring_no].put_pos =
1615 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2138 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1616 spin_unlock_irqrestore(&nic->put_lock, flags); 2139 spin_unlock_irqrestore(&nic->put_lock, flags);
1617#endif 2140#endif
1618#else 2141#else
1619 if (rxdp->Host_Control == END_OF_BLOCK) { 2142 if (rxdp->Host_Control == END_OF_BLOCK) {
1620 mac_control->rx_curr_put_info[ring_no]. 2143 mac_control->rings[ring_no].rx_curr_put_info.
1621 block_index++; 2144 block_index++;
1622 mac_control->rx_curr_put_info[ring_no]. 2145 mac_control->rings[ring_no].rx_curr_put_info.block_index
1623 block_index %= nic->block_count[ring_no]; 2146 %= mac_control->rings[ring_no].block_count;
1624 block_no = mac_control->rx_curr_put_info 2147 block_no = mac_control->rings[ring_no].rx_curr_put_info
1625 [ring_no].block_index; 2148 .block_index;
1626 off = 0; 2149 off = 0;
1627 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n", 2150 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1628 dev->name, block_no, 2151 dev->name, block_no,
1629 (unsigned long long) rxdp->Control_1); 2152 (unsigned long long) rxdp->Control_1);
1630 mac_control->rx_curr_put_info[ring_no].offset = 2153 mac_control->rings[ring_no].rx_curr_put_info.offset =
1631 off; 2154 off;
1632 rxdp = nic->rx_blocks[ring_no][block_no]. 2155 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1633 block_virt_addr; 2156 block_virt_addr;
1634 } 2157 }
1635#ifndef CONFIG_S2IO_NAPI 2158#ifndef CONFIG_S2IO_NAPI
1636 spin_lock_irqsave(&nic->put_lock, flags); 2159 spin_lock_irqsave(&nic->put_lock, flags);
1637 nic->put_pos[ring_no] = (block_no * 2160 mac_control->rings[ring_no].put_pos = (block_no *
1638 (MAX_RXDS_PER_BLOCK + 1)) + off; 2161 (MAX_RXDS_PER_BLOCK + 1)) + off;
1639 spin_unlock_irqrestore(&nic->put_lock, flags); 2162 spin_unlock_irqrestore(&nic->put_lock, flags);
1640#endif 2163#endif
@@ -1646,27 +2169,27 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1646 if (rxdp->Control_2 & BIT(0)) 2169 if (rxdp->Control_2 & BIT(0))
1647#endif 2170#endif
1648 { 2171 {
1649 mac_control->rx_curr_put_info[ring_no]. 2172 mac_control->rings[ring_no].rx_curr_put_info.
1650 offset = off; 2173 offset = off;
1651 goto end; 2174 goto end;
1652 } 2175 }
1653#ifdef CONFIG_2BUFF_MODE 2176#ifdef CONFIG_2BUFF_MODE
1654 /* 2177 /*
1655 * RxDs Spanning cache lines will be replenished only 2178 * RxDs Spanning cache lines will be replenished only
1656 * if the succeeding RxD is also owned by Host. It 2179 * if the succeeding RxD is also owned by Host. It
1657 * will always be the ((8*i)+3) and ((8*i)+6) 2180 * will always be the ((8*i)+3) and ((8*i)+6)
1658 * descriptors for the 48 byte descriptor. The offending 2181 * descriptors for the 48 byte descriptor. The offending
1659 * decsriptor is of-course the 3rd descriptor. 2182 * decsriptor is of-course the 3rd descriptor.
1660 */ 2183 */
1661 rxdpphys = nic->rx_blocks[ring_no][block_no]. 2184 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1662 block_dma_addr + (off * sizeof(RxD_t)); 2185 block_dma_addr + (off * sizeof(RxD_t));
1663 if (((u64) (rxdpphys)) % 128 > 80) { 2186 if (((u64) (rxdpphys)) % 128 > 80) {
1664 rxdpnext = nic->rx_blocks[ring_no][block_no]. 2187 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1665 block_virt_addr + (off + 1); 2188 block_virt_addr + (off + 1);
1666 if (rxdpnext->Host_Control == END_OF_BLOCK) { 2189 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1667 nextblk = (block_no + 1) % 2190 nextblk = (block_no + 1) %
1668 (nic->block_count[ring_no]); 2191 (mac_control->rings[ring_no].block_count);
1669 rxdpnext = nic->rx_blocks[ring_no] 2192 rxdpnext = mac_control->rings[ring_no].rx_blocks
1670 [nextblk].block_virt_addr; 2193 [nextblk].block_virt_addr;
1671 } 2194 }
1672 if (rxdpnext->Control_2 & BIT(0)) 2195 if (rxdpnext->Control_2 & BIT(0))
@@ -1682,6 +2205,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1682 if (!skb) { 2205 if (!skb) {
1683 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2206 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1684 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2207 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2208 if (first_rxdp) {
2209 wmb();
2210 first_rxdp->Control_1 |= RXD_OWN_XENA;
2211 }
1685 return -ENOMEM; 2212 return -ENOMEM;
1686 } 2213 }
1687#ifndef CONFIG_2BUFF_MODE 2214#ifndef CONFIG_2BUFF_MODE
@@ -1692,12 +2219,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1692 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE); 2219 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1693 rxdp->Control_2 |= SET_BUFFER0_SIZE(size); 2220 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1694 rxdp->Host_Control = (unsigned long) (skb); 2221 rxdp->Host_Control = (unsigned long) (skb);
1695 rxdp->Control_1 |= RXD_OWN_XENA; 2222 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2223 rxdp->Control_1 |= RXD_OWN_XENA;
1696 off++; 2224 off++;
1697 off %= (MAX_RXDS_PER_BLOCK + 1); 2225 off %= (MAX_RXDS_PER_BLOCK + 1);
1698 mac_control->rx_curr_put_info[ring_no].offset = off; 2226 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1699#else 2227#else
1700 ba = &nic->ba[ring_no][block_no][off]; 2228 ba = &mac_control->rings[ring_no].ba[block_no][off];
1701 skb_reserve(skb, BUF0_LEN); 2229 skb_reserve(skb, BUF0_LEN);
1702 tmp = ((unsigned long) skb->data & ALIGN_SIZE); 2230 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1703 if (tmp) 2231 if (tmp)
@@ -1719,22 +2247,41 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1719 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */ 2247 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1720 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */ 2248 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1721 rxdp->Host_Control = (u64) ((unsigned long) (skb)); 2249 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1722 rxdp->Control_1 |= RXD_OWN_XENA; 2250 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2251 rxdp->Control_1 |= RXD_OWN_XENA;
1723 off++; 2252 off++;
1724 mac_control->rx_curr_put_info[ring_no].offset = off; 2253 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1725#endif 2254#endif
2255 rxdp->Control_2 |= SET_RXD_MARKER;
2256
2257 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2258 if (first_rxdp) {
2259 wmb();
2260 first_rxdp->Control_1 |= RXD_OWN_XENA;
2261 }
2262 first_rxdp = rxdp;
2263 }
1726 atomic_inc(&nic->rx_bufs_left[ring_no]); 2264 atomic_inc(&nic->rx_bufs_left[ring_no]);
1727 alloc_tab++; 2265 alloc_tab++;
1728 } 2266 }
1729 2267
1730 end: 2268 end:
2269 /* Transfer ownership of first descriptor to adapter just before
2270 * exiting. Before that, use memory barrier so that ownership
2271 * and other fields are seen by adapter correctly.
2272 */
2273 if (first_rxdp) {
2274 wmb();
2275 first_rxdp->Control_1 |= RXD_OWN_XENA;
2276 }
2277
1731 return SUCCESS; 2278 return SUCCESS;
1732} 2279}
1733 2280
1734/** 2281/**
1735 * free_rx_buffers - Frees all Rx buffers 2282 * free_rx_buffers - Frees all Rx buffers
1736 * @sp: device private variable. 2283 * @sp: device private variable.
1737 * Description: 2284 * Description:
1738 * This function will free all Rx buffers allocated by host. 2285 * This function will free all Rx buffers allocated by host.
1739 * Return Value: 2286 * Return Value:
1740 * NONE. 2287 * NONE.
@@ -1758,7 +2305,8 @@ static void free_rx_buffers(struct s2io_nic *sp)
1758 for (i = 0; i < config->rx_ring_num; i++) { 2305 for (i = 0; i < config->rx_ring_num; i++) {
1759 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2306 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1760 off = j % (MAX_RXDS_PER_BLOCK + 1); 2307 off = j % (MAX_RXDS_PER_BLOCK + 1);
1761 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off; 2308 rxdp = mac_control->rings[i].rx_blocks[blk].
2309 block_virt_addr + off;
1762 2310
1763#ifndef CONFIG_2BUFF_MODE 2311#ifndef CONFIG_2BUFF_MODE
1764 if (rxdp->Control_1 == END_OF_BLOCK) { 2312 if (rxdp->Control_1 == END_OF_BLOCK) {
@@ -1793,7 +2341,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
1793 HEADER_SNAP_SIZE, 2341 HEADER_SNAP_SIZE,
1794 PCI_DMA_FROMDEVICE); 2342 PCI_DMA_FROMDEVICE);
1795#else 2343#else
1796 ba = &sp->ba[i][blk][off]; 2344 ba = &mac_control->rings[i].ba[blk][off];
1797 pci_unmap_single(sp->pdev, (dma_addr_t) 2345 pci_unmap_single(sp->pdev, (dma_addr_t)
1798 rxdp->Buffer0_ptr, 2346 rxdp->Buffer0_ptr,
1799 BUF0_LEN, 2347 BUF0_LEN,
@@ -1813,10 +2361,10 @@ static void free_rx_buffers(struct s2io_nic *sp)
1813 } 2361 }
1814 memset(rxdp, 0, sizeof(RxD_t)); 2362 memset(rxdp, 0, sizeof(RxD_t));
1815 } 2363 }
1816 mac_control->rx_curr_put_info[i].block_index = 0; 2364 mac_control->rings[i].rx_curr_put_info.block_index = 0;
1817 mac_control->rx_curr_get_info[i].block_index = 0; 2365 mac_control->rings[i].rx_curr_get_info.block_index = 0;
1818 mac_control->rx_curr_put_info[i].offset = 0; 2366 mac_control->rings[i].rx_curr_put_info.offset = 0;
1819 mac_control->rx_curr_get_info[i].offset = 0; 2367 mac_control->rings[i].rx_curr_get_info.offset = 0;
1820 atomic_set(&sp->rx_bufs_left[i], 0); 2368 atomic_set(&sp->rx_bufs_left[i], 0);
1821 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", 2369 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1822 dev->name, buf_cnt, i); 2370 dev->name, buf_cnt, i);
@@ -1826,7 +2374,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
1826/** 2374/**
1827 * s2io_poll - Rx interrupt handler for NAPI support 2375 * s2io_poll - Rx interrupt handler for NAPI support
1828 * @dev : pointer to the device structure. 2376 * @dev : pointer to the device structure.
1829 * @budget : The number of packets that were budgeted to be processed 2377 * @budget : The number of packets that were budgeted to be processed
1830 * during one pass through the 'Poll" function. 2378 * during one pass through the 'Poll" function.
1831 * Description: 2379 * Description:
1832 * Comes into picture only if NAPI support has been incorporated. It does 2380 * Comes into picture only if NAPI support has been incorporated. It does
@@ -1836,160 +2384,36 @@ static void free_rx_buffers(struct s2io_nic *sp)
1836 * 0 on success and 1 if there are No Rx packets to be processed. 2384 * 0 on success and 1 if there are No Rx packets to be processed.
1837 */ 2385 */
1838 2386
1839#ifdef CONFIG_S2IO_NAPI 2387#if defined(CONFIG_S2IO_NAPI)
1840static int s2io_poll(struct net_device *dev, int *budget) 2388static int s2io_poll(struct net_device *dev, int *budget)
1841{ 2389{
1842 nic_t *nic = dev->priv; 2390 nic_t *nic = dev->priv;
1843 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2391 int pkt_cnt = 0, org_pkts_to_process;
1844 int pkts_to_process = *budget, pkt_cnt = 0;
1845 register u64 val64 = 0;
1846 rx_curr_get_info_t get_info, put_info;
1847 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1848#ifndef CONFIG_2BUFF_MODE
1849 u16 val16, cksum;
1850#endif
1851 struct sk_buff *skb;
1852 RxD_t *rxdp;
1853 mac_info_t *mac_control; 2392 mac_info_t *mac_control;
1854 struct config_param *config; 2393 struct config_param *config;
1855#ifdef CONFIG_2BUFF_MODE 2394 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
1856 buffAdd_t *ba; 2395 u64 val64;
1857#endif 2396 int i;
1858 2397
2398 atomic_inc(&nic->isr_cnt);
1859 mac_control = &nic->mac_control; 2399 mac_control = &nic->mac_control;
1860 config = &nic->config; 2400 config = &nic->config;
1861 2401
1862 if (pkts_to_process > dev->quota) 2402 nic->pkts_to_process = *budget;
1863 pkts_to_process = dev->quota; 2403 if (nic->pkts_to_process > dev->quota)
2404 nic->pkts_to_process = dev->quota;
2405 org_pkts_to_process = nic->pkts_to_process;
1864 2406
1865 val64 = readq(&bar0->rx_traffic_int); 2407 val64 = readq(&bar0->rx_traffic_int);
1866 writeq(val64, &bar0->rx_traffic_int); 2408 writeq(val64, &bar0->rx_traffic_int);
1867 2409
1868 for (i = 0; i < config->rx_ring_num; i++) { 2410 for (i = 0; i < config->rx_ring_num; i++) {
1869 get_info = mac_control->rx_curr_get_info[i]; 2411 rx_intr_handler(&mac_control->rings[i]);
1870 get_block = get_info.block_index; 2412 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
1871 put_info = mac_control->rx_curr_put_info[i]; 2413 if (!nic->pkts_to_process) {
1872 put_block = put_info.block_index; 2414 /* Quota for the current iteration has been met */
1873 ring_bufs = config->rx_cfg[i].num_rxd; 2415 goto no_rx;
1874 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1875 get_info.offset;
1876#ifndef CONFIG_2BUFF_MODE
1877 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1878 get_info.offset;
1879 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1880 put_info.offset;
1881 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1882 (((get_offset + 1) % ring_bufs) != put_offset)) {
1883 if (--pkts_to_process < 0) {
1884 goto no_rx;
1885 }
1886 if (rxdp->Control_1 == END_OF_BLOCK) {
1887 rxdp =
1888 (RxD_t *) ((unsigned long) rxdp->
1889 Control_2);
1890 get_info.offset++;
1891 get_info.offset %=
1892 (MAX_RXDS_PER_BLOCK + 1);
1893 get_block++;
1894 get_block %= nic->block_count[i];
1895 mac_control->rx_curr_get_info[i].
1896 offset = get_info.offset;
1897 mac_control->rx_curr_get_info[i].
1898 block_index = get_block;
1899 continue;
1900 }
1901 get_offset =
1902 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1903 get_info.offset;
1904 skb =
1905 (struct sk_buff *) ((unsigned long) rxdp->
1906 Host_Control);
1907 if (skb == NULL) {
1908 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1909 dev->name);
1910 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1911 goto no_rx;
1912 }
1913 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1914 val16 = (u16) (val64 >> 48);
1915 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1916 pci_unmap_single(nic->pdev, (dma_addr_t)
1917 rxdp->Buffer0_ptr,
1918 dev->mtu +
1919 HEADER_ETHERNET_II_802_3_SIZE +
1920 HEADER_802_2_SIZE +
1921 HEADER_SNAP_SIZE,
1922 PCI_DMA_FROMDEVICE);
1923 rx_osm_handler(nic, val16, rxdp, i);
1924 pkt_cnt++;
1925 get_info.offset++;
1926 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1927 rxdp =
1928 nic->rx_blocks[i][get_block].block_virt_addr +
1929 get_info.offset;
1930 mac_control->rx_curr_get_info[i].offset =
1931 get_info.offset;
1932 } 2416 }
1933#else
1934 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1935 get_info.offset;
1936 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1937 put_info.offset;
1938 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1939 !(rxdp->Control_2 & BIT(0))) &&
1940 (((get_offset + 1) % ring_bufs) != put_offset)) {
1941 if (--pkts_to_process < 0) {
1942 goto no_rx;
1943 }
1944 skb = (struct sk_buff *) ((unsigned long)
1945 rxdp->Host_Control);
1946 if (skb == NULL) {
1947 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1948 dev->name);
1949 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1950 goto no_rx;
1951 }
1952
1953 pci_unmap_single(nic->pdev, (dma_addr_t)
1954 rxdp->Buffer0_ptr,
1955 BUF0_LEN, PCI_DMA_FROMDEVICE);
1956 pci_unmap_single(nic->pdev, (dma_addr_t)
1957 rxdp->Buffer1_ptr,
1958 BUF1_LEN, PCI_DMA_FROMDEVICE);
1959 pci_unmap_single(nic->pdev, (dma_addr_t)
1960 rxdp->Buffer2_ptr,
1961 dev->mtu + BUF0_LEN + 4,
1962 PCI_DMA_FROMDEVICE);
1963 ba = &nic->ba[i][get_block][get_info.offset];
1964
1965 rx_osm_handler(nic, rxdp, i, ba);
1966
1967 get_info.offset++;
1968 mac_control->rx_curr_get_info[i].offset =
1969 get_info.offset;
1970 rxdp =
1971 nic->rx_blocks[i][get_block].block_virt_addr +
1972 get_info.offset;
1973
1974 if (get_info.offset &&
1975 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1976 get_info.offset = 0;
1977 mac_control->rx_curr_get_info[i].
1978 offset = get_info.offset;
1979 get_block++;
1980 get_block %= nic->block_count[i];
1981 mac_control->rx_curr_get_info[i].
1982 block_index = get_block;
1983 rxdp =
1984 nic->rx_blocks[i][get_block].
1985 block_virt_addr;
1986 }
1987 get_offset =
1988 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1989 get_info.offset;
1990 pkt_cnt++;
1991 }
1992#endif
1993 } 2417 }
1994 if (!pkt_cnt) 2418 if (!pkt_cnt)
1995 pkt_cnt = 1; 2419 pkt_cnt = 1;
@@ -2007,9 +2431,10 @@ static int s2io_poll(struct net_device *dev, int *budget)
2007 } 2431 }
2008 /* Re enable the Rx interrupts. */ 2432 /* Re enable the Rx interrupts. */
2009 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS); 2433 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2434 atomic_dec(&nic->isr_cnt);
2010 return 0; 2435 return 0;
2011 2436
2012 no_rx: 2437no_rx:
2013 dev->quota -= pkt_cnt; 2438 dev->quota -= pkt_cnt;
2014 *budget -= pkt_cnt; 2439 *budget -= pkt_cnt;
2015 2440
@@ -2020,279 +2445,204 @@ static int s2io_poll(struct net_device *dev, int *budget)
2020 break; 2445 break;
2021 } 2446 }
2022 } 2447 }
2448 atomic_dec(&nic->isr_cnt);
2023 return 1; 2449 return 1;
2024} 2450}
2025#else 2451#endif
2026/** 2452
2453/**
2027 * rx_intr_handler - Rx interrupt handler 2454 * rx_intr_handler - Rx interrupt handler
2028 * @nic: device private variable. 2455 * @nic: device private variable.
2029 * Description: 2456 * Description:
2030 * If the interrupt is because of a received frame or if the 2457 * If the interrupt is because of a received frame or if the
2031 * receive ring contains fresh as yet un-processed frames,this function is 2458 * receive ring contains fresh as yet un-processed frames,this function is
2032 * called. It picks out the RxD at which place the last Rx processing had 2459 * called. It picks out the RxD at which place the last Rx processing had
2033 * stopped and sends the skb to the OSM's Rx handler and then increments 2460 * stopped and sends the skb to the OSM's Rx handler and then increments
2034 * the offset. 2461 * the offset.
2035 * Return Value: 2462 * Return Value:
2036 * NONE. 2463 * NONE.
2037 */ 2464 */
2038 2465static void rx_intr_handler(ring_info_t *ring_data)
2039static void rx_intr_handler(struct s2io_nic *nic)
2040{ 2466{
2467 nic_t *nic = ring_data->nic;
2041 struct net_device *dev = (struct net_device *) nic->dev; 2468 struct net_device *dev = (struct net_device *) nic->dev;
2042 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 2469 int get_block, get_offset, put_block, put_offset, ring_bufs;
2043 rx_curr_get_info_t get_info, put_info; 2470 rx_curr_get_info_t get_info, put_info;
2044 RxD_t *rxdp; 2471 RxD_t *rxdp;
2045 struct sk_buff *skb; 2472 struct sk_buff *skb;
2046#ifndef CONFIG_2BUFF_MODE 2473#ifndef CONFIG_S2IO_NAPI
2047 u16 val16, cksum; 2474 int pkt_cnt = 0;
2048#endif
2049 register u64 val64 = 0;
2050 int get_block, get_offset, put_block, put_offset, ring_bufs;
2051 int i, pkt_cnt = 0;
2052 mac_info_t *mac_control;
2053 struct config_param *config;
2054#ifdef CONFIG_2BUFF_MODE
2055 buffAdd_t *ba;
2056#endif 2475#endif
2476 spin_lock(&nic->rx_lock);
2477 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2478 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2479 __FUNCTION__, dev->name);
2480 spin_unlock(&nic->rx_lock);
2481 }
2057 2482
2058 mac_control = &nic->mac_control; 2483 get_info = ring_data->rx_curr_get_info;
2059 config = &nic->config; 2484 get_block = get_info.block_index;
2060 2485 put_info = ring_data->rx_curr_put_info;
2061 /* 2486 put_block = put_info.block_index;
2062 * rx_traffic_int reg is an R1 register, hence we read and write back 2487 ring_bufs = get_info.ring_len+1;
2063 * the samevalue in the register to clear it. 2488 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2064 */
2065 val64 = readq(&bar0->rx_traffic_int);
2066 writeq(val64, &bar0->rx_traffic_int);
2067
2068 for (i = 0; i < config->rx_ring_num; i++) {
2069 get_info = mac_control->rx_curr_get_info[i];
2070 get_block = get_info.block_index;
2071 put_info = mac_control->rx_curr_put_info[i];
2072 put_block = put_info.block_index;
2073 ring_bufs = config->rx_cfg[i].num_rxd;
2074 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2075 get_info.offset;
2076#ifndef CONFIG_2BUFF_MODE
2077 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2078 get_info.offset; 2489 get_info.offset;
2079 spin_lock(&nic->put_lock); 2490 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2080 put_offset = nic->put_pos[i]; 2491 get_info.offset;
2081 spin_unlock(&nic->put_lock); 2492#ifndef CONFIG_S2IO_NAPI
2082 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) && 2493 spin_lock(&nic->put_lock);
2083 (((get_offset + 1) % ring_bufs) != put_offset)) { 2494 put_offset = ring_data->put_pos;
2084 if (rxdp->Control_1 == END_OF_BLOCK) { 2495 spin_unlock(&nic->put_lock);
2085 rxdp = (RxD_t *) ((unsigned long) 2496#else
2086 rxdp->Control_2); 2497 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2087 get_info.offset++; 2498 put_info.offset;
2088 get_info.offset %= 2499#endif
2089 (MAX_RXDS_PER_BLOCK + 1); 2500 while (RXD_IS_UP2DT(rxdp) &&
2090 get_block++; 2501 (((get_offset + 1) % ring_bufs) != put_offset)) {
2091 get_block %= nic->block_count[i]; 2502 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2092 mac_control->rx_curr_get_info[i]. 2503 if (skb == NULL) {
2093 offset = get_info.offset; 2504 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2094 mac_control->rx_curr_get_info[i]. 2505 dev->name);
2095 block_index = get_block; 2506 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2096 continue; 2507 spin_unlock(&nic->rx_lock);
2097 } 2508 return;
2098 get_offset =
2099 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2100 get_info.offset;
2101 skb = (struct sk_buff *) ((unsigned long)
2102 rxdp->Host_Control);
2103 if (skb == NULL) {
2104 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2105 dev->name);
2106 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2107 return;
2108 }
2109 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2110 val16 = (u16) (val64 >> 48);
2111 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2112 pci_unmap_single(nic->pdev, (dma_addr_t)
2113 rxdp->Buffer0_ptr,
2114 dev->mtu +
2115 HEADER_ETHERNET_II_802_3_SIZE +
2116 HEADER_802_2_SIZE +
2117 HEADER_SNAP_SIZE,
2118 PCI_DMA_FROMDEVICE);
2119 rx_osm_handler(nic, val16, rxdp, i);
2120 get_info.offset++;
2121 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2122 rxdp =
2123 nic->rx_blocks[i][get_block].block_virt_addr +
2124 get_info.offset;
2125 mac_control->rx_curr_get_info[i].offset =
2126 get_info.offset;
2127 pkt_cnt++;
2128 if ((indicate_max_pkts)
2129 && (pkt_cnt > indicate_max_pkts))
2130 break;
2131 } 2509 }
2510#ifndef CONFIG_2BUFF_MODE
2511 pci_unmap_single(nic->pdev, (dma_addr_t)
2512 rxdp->Buffer0_ptr,
2513 dev->mtu +
2514 HEADER_ETHERNET_II_802_3_SIZE +
2515 HEADER_802_2_SIZE +
2516 HEADER_SNAP_SIZE,
2517 PCI_DMA_FROMDEVICE);
2132#else 2518#else
2133 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) + 2519 pci_unmap_single(nic->pdev, (dma_addr_t)
2520 rxdp->Buffer0_ptr,
2521 BUF0_LEN, PCI_DMA_FROMDEVICE);
2522 pci_unmap_single(nic->pdev, (dma_addr_t)
2523 rxdp->Buffer1_ptr,
2524 BUF1_LEN, PCI_DMA_FROMDEVICE);
2525 pci_unmap_single(nic->pdev, (dma_addr_t)
2526 rxdp->Buffer2_ptr,
2527 dev->mtu + BUF0_LEN + 4,
2528 PCI_DMA_FROMDEVICE);
2529#endif
2530 rx_osm_handler(ring_data, rxdp);
2531 get_info.offset++;
2532 ring_data->rx_curr_get_info.offset =
2134 get_info.offset; 2533 get_info.offset;
2135 spin_lock(&nic->put_lock); 2534 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2136 put_offset = nic->put_pos[i]; 2535 get_info.offset;
2137 spin_unlock(&nic->put_lock); 2536 if (get_info.offset &&
2138 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) && 2537 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2139 !(rxdp->Control_2 & BIT(0))) && 2538 get_info.offset = 0;
2140 (((get_offset + 1) % ring_bufs) != put_offset)) { 2539 ring_data->rx_curr_get_info.offset
2141 skb = (struct sk_buff *) ((unsigned long) 2540 = get_info.offset;
2142 rxdp->Host_Control); 2541 get_block++;
2143 if (skb == NULL) { 2542 get_block %= ring_data->block_count;
2144 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2543 ring_data->rx_curr_get_info.block_index
2145 dev->name); 2544 = get_block;
2146 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2545 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2147 return; 2546 }
2148 }
2149
2150 pci_unmap_single(nic->pdev, (dma_addr_t)
2151 rxdp->Buffer0_ptr,
2152 BUF0_LEN, PCI_DMA_FROMDEVICE);
2153 pci_unmap_single(nic->pdev, (dma_addr_t)
2154 rxdp->Buffer1_ptr,
2155 BUF1_LEN, PCI_DMA_FROMDEVICE);
2156 pci_unmap_single(nic->pdev, (dma_addr_t)
2157 rxdp->Buffer2_ptr,
2158 dev->mtu + BUF0_LEN + 4,
2159 PCI_DMA_FROMDEVICE);
2160 ba = &nic->ba[i][get_block][get_info.offset];
2161
2162 rx_osm_handler(nic, rxdp, i, ba);
2163
2164 get_info.offset++;
2165 mac_control->rx_curr_get_info[i].offset =
2166 get_info.offset;
2167 rxdp =
2168 nic->rx_blocks[i][get_block].block_virt_addr +
2169 get_info.offset;
2170 2547
2171 if (get_info.offset && 2548 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2172 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2173 get_info.offset = 0;
2174 mac_control->rx_curr_get_info[i].
2175 offset = get_info.offset;
2176 get_block++;
2177 get_block %= nic->block_count[i];
2178 mac_control->rx_curr_get_info[i].
2179 block_index = get_block;
2180 rxdp =
2181 nic->rx_blocks[i][get_block].
2182 block_virt_addr;
2183 }
2184 get_offset =
2185 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2186 get_info.offset; 2549 get_info.offset;
2187 pkt_cnt++; 2550#ifdef CONFIG_S2IO_NAPI
2188 if ((indicate_max_pkts) 2551 nic->pkts_to_process -= 1;
2189 && (pkt_cnt > indicate_max_pkts)) 2552 if (!nic->pkts_to_process)
2190 break; 2553 break;
2191 } 2554#else
2192#endif 2555 pkt_cnt++;
2193 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 2556 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2194 break; 2557 break;
2558#endif
2195 } 2559 }
2560 spin_unlock(&nic->rx_lock);
2196} 2561}
2197#endif 2562
2198/** 2563/**
2199 * tx_intr_handler - Transmit interrupt handler 2564 * tx_intr_handler - Transmit interrupt handler
2200 * @nic : device private variable 2565 * @nic : device private variable
2201 * Description: 2566 * Description:
2202 * If an interrupt was raised to indicate DMA complete of the 2567 * If an interrupt was raised to indicate DMA complete of the
2203 * Tx packet, this function is called. It identifies the last TxD 2568 * Tx packet, this function is called. It identifies the last TxD
2204 * whose buffer was freed and frees all skbs whose data have already 2569 * whose buffer was freed and frees all skbs whose data have already
2205 * DMA'ed into the NICs internal memory. 2570 * DMA'ed into the NICs internal memory.
2206 * Return Value: 2571 * Return Value:
2207 * NONE 2572 * NONE
2208 */ 2573 */
2209 2574
2210static void tx_intr_handler(struct s2io_nic *nic) 2575static void tx_intr_handler(fifo_info_t *fifo_data)
2211{ 2576{
2212 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2577 nic_t *nic = fifo_data->nic;
2213 struct net_device *dev = (struct net_device *) nic->dev; 2578 struct net_device *dev = (struct net_device *) nic->dev;
2214 tx_curr_get_info_t get_info, put_info; 2579 tx_curr_get_info_t get_info, put_info;
2215 struct sk_buff *skb; 2580 struct sk_buff *skb;
2216 TxD_t *txdlp; 2581 TxD_t *txdlp;
2217 register u64 val64 = 0;
2218 int i;
2219 u16 j, frg_cnt; 2582 u16 j, frg_cnt;
2220 mac_info_t *mac_control;
2221 struct config_param *config;
2222 2583
2223 mac_control = &nic->mac_control; 2584 get_info = fifo_data->tx_curr_get_info;
2224 config = &nic->config; 2585 put_info = fifo_data->tx_curr_put_info;
2225 2586 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2226 /* 2587 list_virt_addr;
2227 * tx_traffic_int reg is an R1 register, hence we read and write 2588 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2228 * back the samevalue in the register to clear it. 2589 (get_info.offset != put_info.offset) &&
2229 */ 2590 (txdlp->Host_Control)) {
2230 val64 = readq(&bar0->tx_traffic_int); 2591 /* Check for TxD errors */
2231 writeq(val64, &bar0->tx_traffic_int); 2592 if (txdlp->Control_1 & TXD_T_CODE) {
2232 2593 unsigned long long err;
2233 for (i = 0; i < config->tx_fifo_num; i++) { 2594 err = txdlp->Control_1 & TXD_T_CODE;
2234 get_info = mac_control->tx_curr_get_info[i]; 2595 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2235 put_info = mac_control->tx_curr_put_info[i]; 2596 err);
2236 txdlp = (TxD_t *) nic->list_info[i][get_info.offset]. 2597 }
2237 list_virt_addr;
2238 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2239 (get_info.offset != put_info.offset) &&
2240 (txdlp->Host_Control)) {
2241 /* Check for TxD errors */
2242 if (txdlp->Control_1 & TXD_T_CODE) {
2243 unsigned long long err;
2244 err = txdlp->Control_1 & TXD_T_CODE;
2245 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2246 err);
2247 }
2248
2249 skb = (struct sk_buff *) ((unsigned long)
2250 txdlp->Host_Control);
2251 if (skb == NULL) {
2252 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2253 dev->name);
2254 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2255 return;
2256 }
2257 nic->tx_pkt_count++;
2258 2598
2259 frg_cnt = skb_shinfo(skb)->nr_frags; 2599 skb = (struct sk_buff *) ((unsigned long)
2600 txdlp->Host_Control);
2601 if (skb == NULL) {
2602 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2603 __FUNCTION__);
2604 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2605 return;
2606 }
2260 2607
2261 /* For unfragmented skb */ 2608 frg_cnt = skb_shinfo(skb)->nr_frags;
2262 pci_unmap_single(nic->pdev, (dma_addr_t) 2609 nic->tx_pkt_count++;
2263 txdlp->Buffer_Pointer, 2610
2264 skb->len - skb->data_len, 2611 pci_unmap_single(nic->pdev, (dma_addr_t)
2265 PCI_DMA_TODEVICE); 2612 txdlp->Buffer_Pointer,
2266 if (frg_cnt) { 2613 skb->len - skb->data_len,
2267 TxD_t *temp = txdlp; 2614 PCI_DMA_TODEVICE);
2268 txdlp++; 2615 if (frg_cnt) {
2269 for (j = 0; j < frg_cnt; j++, txdlp++) { 2616 TxD_t *temp;
2270 skb_frag_t *frag = 2617 temp = txdlp;
2271 &skb_shinfo(skb)->frags[j]; 2618 txdlp++;
2272 pci_unmap_page(nic->pdev, 2619 for (j = 0; j < frg_cnt; j++, txdlp++) {
2273 (dma_addr_t) 2620 skb_frag_t *frag =
2274 txdlp-> 2621 &skb_shinfo(skb)->frags[j];
2275 Buffer_Pointer, 2622 if (!txdlp->Buffer_Pointer)
2276 frag->size, 2623 break;
2277 PCI_DMA_TODEVICE); 2624 pci_unmap_page(nic->pdev,
2278 } 2625 (dma_addr_t)
2279 txdlp = temp; 2626 txdlp->
2627 Buffer_Pointer,
2628 frag->size,
2629 PCI_DMA_TODEVICE);
2280 } 2630 }
2281 memset(txdlp, 0, 2631 txdlp = temp;
2282 (sizeof(TxD_t) * config->max_txds));
2283
2284 /* Updating the statistics block */
2285 nic->stats.tx_packets++;
2286 nic->stats.tx_bytes += skb->len;
2287 dev_kfree_skb_irq(skb);
2288
2289 get_info.offset++;
2290 get_info.offset %= get_info.fifo_len + 1;
2291 txdlp = (TxD_t *) nic->list_info[i]
2292 [get_info.offset].list_virt_addr;
2293 mac_control->tx_curr_get_info[i].offset =
2294 get_info.offset;
2295 } 2632 }
2633 memset(txdlp, 0,
2634 (sizeof(TxD_t) * fifo_data->max_txds));
2635
2636 /* Updating the statistics block */
2637 nic->stats.tx_bytes += skb->len;
2638 dev_kfree_skb_irq(skb);
2639
2640 get_info.offset++;
2641 get_info.offset %= get_info.fifo_len + 1;
2642 txdlp = (TxD_t *) fifo_data->list_info
2643 [get_info.offset].list_virt_addr;
2644 fifo_data->tx_curr_get_info.offset =
2645 get_info.offset;
2296 } 2646 }
2297 2647
2298 spin_lock(&nic->tx_lock); 2648 spin_lock(&nic->tx_lock);
@@ -2301,13 +2651,13 @@ static void tx_intr_handler(struct s2io_nic *nic)
2301 spin_unlock(&nic->tx_lock); 2651 spin_unlock(&nic->tx_lock);
2302} 2652}
2303 2653
2304/** 2654/**
2305 * alarm_intr_handler - Alarm Interrrupt handler 2655 * alarm_intr_handler - Alarm Interrrupt handler
2306 * @nic: device private variable 2656 * @nic: device private variable
2307 * Description: If the interrupt was neither because of Rx packet or Tx 2657 * Description: If the interrupt was neither because of Rx packet or Tx
2308 * complete, this function is called. If the interrupt was to indicate 2658 * complete, this function is called. If the interrupt was to indicate
2309 * a loss of link, the OSM link status handler is invoked for any other 2659 * a loss of link, the OSM link status handler is invoked for any other
2310 * alarm interrupt the block that raised the interrupt is displayed 2660 * alarm interrupt the block that raised the interrupt is displayed
2311 * and a H/W reset is issued. 2661 * and a H/W reset is issued.
2312 * Return Value: 2662 * Return Value:
2313 * NONE 2663 * NONE
@@ -2320,10 +2670,30 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2320 register u64 val64 = 0, err_reg = 0; 2670 register u64 val64 = 0, err_reg = 0;
2321 2671
2322 /* Handling link status change error Intr */ 2672 /* Handling link status change error Intr */
2323 err_reg = readq(&bar0->mac_rmac_err_reg); 2673 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2324 writeq(err_reg, &bar0->mac_rmac_err_reg); 2674 err_reg = readq(&bar0->mac_rmac_err_reg);
2325 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) { 2675 writeq(err_reg, &bar0->mac_rmac_err_reg);
2326 schedule_work(&nic->set_link_task); 2676 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2677 schedule_work(&nic->set_link_task);
2678 }
2679 }
2680
2681 /* Handling Ecc errors */
2682 val64 = readq(&bar0->mc_err_reg);
2683 writeq(val64, &bar0->mc_err_reg);
2684 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2685 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2686 nic->mac_control.stats_info->sw_stat.
2687 double_ecc_errs++;
2688 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2689 dev->name);
2690 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2691 netif_stop_queue(dev);
2692 schedule_work(&nic->rst_timer_task);
2693 } else {
2694 nic->mac_control.stats_info->sw_stat.
2695 single_ecc_errs++;
2696 }
2327 } 2697 }
2328 2698
2329 /* In case of a serious error, the device will be Reset. */ 2699 /* In case of a serious error, the device will be Reset. */
@@ -2338,7 +2708,7 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2338 /* 2708 /*
2339 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC 2709 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2340 * Error occurs, the adapter will be recycled by disabling the 2710 * Error occurs, the adapter will be recycled by disabling the
2341 * adapter enable bit and enabling it again after the device 2711 * adapter enable bit and enabling it again after the device
2342 * becomes Quiescent. 2712 * becomes Quiescent.
2343 */ 2713 */
2344 val64 = readq(&bar0->pcc_err_reg); 2714 val64 = readq(&bar0->pcc_err_reg);
@@ -2354,18 +2724,18 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2354 /* Other type of interrupts are not being handled now, TODO */ 2724 /* Other type of interrupts are not being handled now, TODO */
2355} 2725}
2356 2726
2357/** 2727/**
2358 * wait_for_cmd_complete - waits for a command to complete. 2728 * wait_for_cmd_complete - waits for a command to complete.
2359 * @sp : private member of the device structure, which is a pointer to the 2729 * @sp : private member of the device structure, which is a pointer to the
2360 * s2io_nic structure. 2730 * s2io_nic structure.
2361 * Description: Function that waits for a command to Write into RMAC 2731 * Description: Function that waits for a command to Write into RMAC
2362 * ADDR DATA registers to be completed and returns either success or 2732 * ADDR DATA registers to be completed and returns either success or
2363 * error depending on whether the command was complete or not. 2733 * error depending on whether the command was complete or not.
2364 * Return value: 2734 * Return value:
2365 * SUCCESS on success and FAILURE on failure. 2735 * SUCCESS on success and FAILURE on failure.
2366 */ 2736 */
2367 2737
2368static int wait_for_cmd_complete(nic_t * sp) 2738int wait_for_cmd_complete(nic_t * sp)
2369{ 2739{
2370 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2740 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2371 int ret = FAILURE, cnt = 0; 2741 int ret = FAILURE, cnt = 0;
@@ -2385,29 +2755,33 @@ static int wait_for_cmd_complete(nic_t * sp)
2385 return ret; 2755 return ret;
2386} 2756}
2387 2757
2388/** 2758/**
2389 * s2io_reset - Resets the card. 2759 * s2io_reset - Resets the card.
2390 * @sp : private member of the device structure. 2760 * @sp : private member of the device structure.
2391 * Description: Function to Reset the card. This function then also 2761 * Description: Function to Reset the card. This function then also
2392 * restores the previously saved PCI configuration space registers as 2762 * restores the previously saved PCI configuration space registers as
2393 * the card reset also resets the configuration space. 2763 * the card reset also resets the configuration space.
2394 * Return value: 2764 * Return value:
2395 * void. 2765 * void.
2396 */ 2766 */
2397 2767
2398static void s2io_reset(nic_t * sp) 2768void s2io_reset(nic_t * sp)
2399{ 2769{
2400 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2770 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2401 u64 val64; 2771 u64 val64;
2402 u16 subid; 2772 u16 subid, pci_cmd;
2773
2774 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2775 if (sp->device_type == XFRAME_I_DEVICE)
2776 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2403 2777
2404 val64 = SW_RESET_ALL; 2778 val64 = SW_RESET_ALL;
2405 writeq(val64, &bar0->sw_reset); 2779 writeq(val64, &bar0->sw_reset);
2406 2780
2407 /* 2781 /*
2408 * At this stage, if the PCI write is indeed completed, the 2782 * At this stage, if the PCI write is indeed completed, the
2409 * card is reset and so is the PCI Config space of the device. 2783 * card is reset and so is the PCI Config space of the device.
2410 * So a read cannot be issued at this stage on any of the 2784 * So a read cannot be issued at this stage on any of the
2411 * registers to ensure the write into "sw_reset" register 2785 * registers to ensure the write into "sw_reset" register
2412 * has gone through. 2786 * has gone through.
2413 * Question: Is there any system call that will explicitly force 2787 * Question: Is there any system call that will explicitly force
@@ -2418,42 +2792,76 @@ static void s2io_reset(nic_t * sp)
2418 */ 2792 */
2419 msleep(250); 2793 msleep(250);
2420 2794
2421 /* Restore the PCI state saved during initializarion. */ 2795 if (!(sp->device_type & XFRAME_II_DEVICE)) {
2422 pci_restore_state(sp->pdev); 2796 /* Restore the PCI state saved during initializarion. */
2797 pci_restore_state(sp->pdev);
2798 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2799 pci_cmd);
2800 } else {
2801 pci_set_master(sp->pdev);
2802 }
2423 s2io_init_pci(sp); 2803 s2io_init_pci(sp);
2424 2804
2425 msleep(250); 2805 msleep(250);
2426 2806
2807 /* Set swapper to enable I/O register access */
2808 s2io_set_swapper(sp);
2809
2810 /* Clear certain PCI/PCI-X fields after reset */
2811 if (sp->device_type == XFRAME_II_DEVICE) {
2812 /* Clear parity err detect bit */
2813 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2814
2815 /* Clearing PCIX Ecc status register */
2816 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2817
2818 /* Clearing PCI_STATUS error reflected here */
2819 writeq(BIT(62), &bar0->txpic_int_reg);
2820 }
2821
2822 /* Reset device statistics maintained by OS */
2823 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2824
2427 /* SXE-002: Configure link and activity LED to turn it off */ 2825 /* SXE-002: Configure link and activity LED to turn it off */
2428 subid = sp->pdev->subsystem_device; 2826 subid = sp->pdev->subsystem_device;
2429 if ((subid & 0xFF) >= 0x07) { 2827 if (((subid & 0xFF) >= 0x07) &&
2828 (sp->device_type == XFRAME_I_DEVICE)) {
2430 val64 = readq(&bar0->gpio_control); 2829 val64 = readq(&bar0->gpio_control);
2431 val64 |= 0x0000800000000000ULL; 2830 val64 |= 0x0000800000000000ULL;
2432 writeq(val64, &bar0->gpio_control); 2831 writeq(val64, &bar0->gpio_control);
2433 val64 = 0x0411040400000000ULL; 2832 val64 = 0x0411040400000000ULL;
2434 writeq(val64, (void __iomem *) bar0 + 0x2700); 2833 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2834 }
2835
2836 /*
2837 * Clear spurious ECC interrupts that would have occured on
2838 * XFRAME II cards after reset.
2839 */
2840 if (sp->device_type == XFRAME_II_DEVICE) {
2841 val64 = readq(&bar0->pcc_err_reg);
2842 writeq(val64, &bar0->pcc_err_reg);
2435 } 2843 }
2436 2844
2437 sp->device_enabled_once = FALSE; 2845 sp->device_enabled_once = FALSE;
2438} 2846}
2439 2847
2440/** 2848/**
2441 * s2io_set_swapper - to set the swapper controle on the card 2849 * s2io_set_swapper - to set the swapper controle on the card
2442 * @sp : private member of the device structure, 2850 * @sp : private member of the device structure,
2443 * pointer to the s2io_nic structure. 2851 * pointer to the s2io_nic structure.
2444 * Description: Function to set the swapper control on the card 2852 * Description: Function to set the swapper control on the card
2445 * correctly depending on the 'endianness' of the system. 2853 * correctly depending on the 'endianness' of the system.
2446 * Return value: 2854 * Return value:
2447 * SUCCESS on success and FAILURE on failure. 2855 * SUCCESS on success and FAILURE on failure.
2448 */ 2856 */
2449 2857
2450static int s2io_set_swapper(nic_t * sp) 2858int s2io_set_swapper(nic_t * sp)
2451{ 2859{
2452 struct net_device *dev = sp->dev; 2860 struct net_device *dev = sp->dev;
2453 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2861 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2454 u64 val64, valt, valr; 2862 u64 val64, valt, valr;
2455 2863
2456 /* 2864 /*
2457 * Set proper endian settings and verify the same by reading 2865 * Set proper endian settings and verify the same by reading
2458 * the PIF Feed-back register. 2866 * the PIF Feed-back register.
2459 */ 2867 */
@@ -2505,8 +2913,9 @@ static int s2io_set_swapper(nic_t * sp)
2505 i++; 2913 i++;
2506 } 2914 }
2507 if(i == 4) { 2915 if(i == 4) {
2916 unsigned long long x = val64;
2508 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr "); 2917 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2509 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64); 2918 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2510 return FAILURE; 2919 return FAILURE;
2511 } 2920 }
2512 } 2921 }
@@ -2514,8 +2923,8 @@ static int s2io_set_swapper(nic_t * sp)
2514 val64 &= 0xFFFF000000000000ULL; 2923 val64 &= 0xFFFF000000000000ULL;
2515 2924
2516#ifdef __BIG_ENDIAN 2925#ifdef __BIG_ENDIAN
2517 /* 2926 /*
2518 * The device by default set to a big endian format, so a 2927 * The device by default set to a big endian format, so a
2519 * big endian driver need not set anything. 2928 * big endian driver need not set anything.
2520 */ 2929 */
2521 val64 |= (SWAPPER_CTRL_TXP_FE | 2930 val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2531,9 +2940,9 @@ static int s2io_set_swapper(nic_t * sp)
2531 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); 2940 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2532 writeq(val64, &bar0->swapper_ctrl); 2941 writeq(val64, &bar0->swapper_ctrl);
2533#else 2942#else
2534 /* 2943 /*
2535 * Initially we enable all bits to make it accessible by the 2944 * Initially we enable all bits to make it accessible by the
2536 * driver, then we selectively enable only those bits that 2945 * driver, then we selectively enable only those bits that
2537 * we want to set. 2946 * we want to set.
2538 */ 2947 */
2539 val64 |= (SWAPPER_CTRL_TXP_FE | 2948 val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2555,8 +2964,8 @@ static int s2io_set_swapper(nic_t * sp)
2555#endif 2964#endif
2556 val64 = readq(&bar0->swapper_ctrl); 2965 val64 = readq(&bar0->swapper_ctrl);
2557 2966
2558 /* 2967 /*
2559 * Verifying if endian settings are accurate by reading a 2968 * Verifying if endian settings are accurate by reading a
2560 * feedback register. 2969 * feedback register.
2561 */ 2970 */
2562 val64 = readq(&bar0->pif_rd_swapper_fb); 2971 val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -2576,55 +2985,63 @@ static int s2io_set_swapper(nic_t * sp)
2576 * Functions defined below concern the OS part of the driver * 2985 * Functions defined below concern the OS part of the driver *
2577 * ********************************************************* */ 2986 * ********************************************************* */
2578 2987
2579/** 2988/**
2580 * s2io_open - open entry point of the driver 2989 * s2io_open - open entry point of the driver
2581 * @dev : pointer to the device structure. 2990 * @dev : pointer to the device structure.
2582 * Description: 2991 * Description:
2583 * This function is the open entry point of the driver. It mainly calls a 2992 * This function is the open entry point of the driver. It mainly calls a
2584 * function to allocate Rx buffers and inserts them into the buffer 2993 * function to allocate Rx buffers and inserts them into the buffer
2585 * descriptors and then enables the Rx part of the NIC. 2994 * descriptors and then enables the Rx part of the NIC.
2586 * Return value: 2995 * Return value:
2587 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2996 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2588 * file on failure. 2997 * file on failure.
2589 */ 2998 */
2590 2999
2591static int s2io_open(struct net_device *dev) 3000int s2io_open(struct net_device *dev)
2592{ 3001{
2593 nic_t *sp = dev->priv; 3002 nic_t *sp = dev->priv;
2594 int err = 0; 3003 int err = 0;
2595 3004
2596 /* 3005 /*
2597 * Make sure you have link off by default every time 3006 * Make sure you have link off by default every time
2598 * Nic is initialized 3007 * Nic is initialized
2599 */ 3008 */
2600 netif_carrier_off(dev); 3009 netif_carrier_off(dev);
2601 sp->last_link_state = LINK_DOWN; 3010 sp->last_link_state = 0;
2602 3011
2603 /* Initialize H/W and enable interrupts */ 3012 /* Initialize H/W and enable interrupts */
2604 if (s2io_card_up(sp)) { 3013 if (s2io_card_up(sp)) {
2605 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", 3014 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2606 dev->name); 3015 dev->name);
2607 return -ENODEV; 3016 err = -ENODEV;
3017 goto hw_init_failed;
2608 } 3018 }
2609 3019
2610 /* After proper initialization of H/W, register ISR */ 3020 /* After proper initialization of H/W, register ISR */
2611 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ, 3021 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2612 sp->name, dev); 3022 sp->name, dev);
2613 if (err) { 3023 if (err) {
2614 s2io_reset(sp);
2615 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 3024 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2616 dev->name); 3025 dev->name);
2617 return err; 3026 goto isr_registration_failed;
2618 } 3027 }
2619 3028
2620 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { 3029 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2621 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); 3030 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2622 s2io_reset(sp); 3031 err = -ENODEV;
2623 return -ENODEV; 3032 goto setting_mac_address_failed;
2624 } 3033 }
2625 3034
2626 netif_start_queue(dev); 3035 netif_start_queue(dev);
2627 return 0; 3036 return 0;
3037
3038setting_mac_address_failed:
3039 free_irq(sp->pdev->irq, dev);
3040isr_registration_failed:
3041 del_timer_sync(&sp->alarm_timer);
3042 s2io_reset(sp);
3043hw_init_failed:
3044 return err;
2628} 3045}
2629 3046
2630/** 3047/**
@@ -2640,16 +3057,15 @@ static int s2io_open(struct net_device *dev)
2640 * file on failure. 3057 * file on failure.
2641 */ 3058 */
2642 3059
2643static int s2io_close(struct net_device *dev) 3060int s2io_close(struct net_device *dev)
2644{ 3061{
2645 nic_t *sp = dev->priv; 3062 nic_t *sp = dev->priv;
2646
2647 flush_scheduled_work(); 3063 flush_scheduled_work();
2648 netif_stop_queue(dev); 3064 netif_stop_queue(dev);
2649 /* Reset card, kill tasklet and free Tx and Rx buffers. */ 3065 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2650 s2io_card_down(sp); 3066 s2io_card_down(sp);
2651 3067
2652 free_irq(dev->irq, dev); 3068 free_irq(sp->pdev->irq, dev);
2653 sp->device_close_flag = TRUE; /* Device is shut down. */ 3069 sp->device_close_flag = TRUE; /* Device is shut down. */
2654 return 0; 3070 return 0;
2655} 3071}
@@ -2667,7 +3083,7 @@ static int s2io_close(struct net_device *dev)
2667 * 0 on success & 1 on failure. 3083 * 0 on success & 1 on failure.
2668 */ 3084 */
2669 3085
2670static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 3086int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2671{ 3087{
2672 nic_t *sp = dev->priv; 3088 nic_t *sp = dev->priv;
2673 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 3089 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
@@ -2678,29 +3094,39 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2678#ifdef NETIF_F_TSO 3094#ifdef NETIF_F_TSO
2679 int mss; 3095 int mss;
2680#endif 3096#endif
3097 u16 vlan_tag = 0;
3098 int vlan_priority = 0;
2681 mac_info_t *mac_control; 3099 mac_info_t *mac_control;
2682 struct config_param *config; 3100 struct config_param *config;
2683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2684 3101
2685 mac_control = &sp->mac_control; 3102 mac_control = &sp->mac_control;
2686 config = &sp->config; 3103 config = &sp->config;
2687 3104
2688 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name); 3105 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2689 spin_lock_irqsave(&sp->tx_lock, flags); 3106 spin_lock_irqsave(&sp->tx_lock, flags);
2690
2691 if (atomic_read(&sp->card_state) == CARD_DOWN) { 3107 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2692 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n", 3108 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2693 dev->name); 3109 dev->name);
2694 spin_unlock_irqrestore(&sp->tx_lock, flags); 3110 spin_unlock_irqrestore(&sp->tx_lock, flags);
2695 return 1; 3111 dev_kfree_skb(skb);
3112 return 0;
2696 } 3113 }
2697 3114
2698 queue = 0; 3115 queue = 0;
2699 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2700 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2701 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2702 3116
2703 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1; 3117 /* Get Fifo number to Transmit based on vlan priority */
3118 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3119 vlan_tag = vlan_tx_tag_get(skb);
3120 vlan_priority = vlan_tag >> 13;
3121 queue = config->fifo_mapping[vlan_priority];
3122 }
3123
3124 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3125 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3126 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3127 list_virt_addr;
3128
3129 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2704 /* Avoid "put" pointer going beyond "get" pointer */ 3130 /* Avoid "put" pointer going beyond "get" pointer */
2705 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) { 3131 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2706 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n"); 3132 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
@@ -2709,6 +3135,15 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2709 spin_unlock_irqrestore(&sp->tx_lock, flags); 3135 spin_unlock_irqrestore(&sp->tx_lock, flags);
2710 return 0; 3136 return 0;
2711 } 3137 }
3138
3139 /* A buffer with no data will be dropped */
3140 if (!skb->len) {
3141 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3142 dev_kfree_skb(skb);
3143 spin_unlock_irqrestore(&sp->tx_lock, flags);
3144 return 0;
3145 }
3146
2712#ifdef NETIF_F_TSO 3147#ifdef NETIF_F_TSO
2713 mss = skb_shinfo(skb)->tso_size; 3148 mss = skb_shinfo(skb)->tso_size;
2714 if (mss) { 3149 if (mss) {
@@ -2720,9 +3155,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2720 frg_cnt = skb_shinfo(skb)->nr_frags; 3155 frg_cnt = skb_shinfo(skb)->nr_frags;
2721 frg_len = skb->len - skb->data_len; 3156 frg_len = skb->len - skb->data_len;
2722 3157
2723 txdp->Host_Control = (unsigned long) skb;
2724 txdp->Buffer_Pointer = pci_map_single 3158 txdp->Buffer_Pointer = pci_map_single
2725 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3159 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3160 txdp->Host_Control = (unsigned long) skb;
2726 if (skb->ip_summed == CHECKSUM_HW) { 3161 if (skb->ip_summed == CHECKSUM_HW) {
2727 txdp->Control_2 |= 3162 txdp->Control_2 |=
2728 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3163 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -2731,6 +3166,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2731 3166
2732 txdp->Control_2 |= config->tx_intr_type; 3167 txdp->Control_2 |= config->tx_intr_type;
2733 3168
3169 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3170 txdp->Control_2 |= TXD_VLAN_ENABLE;
3171 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3172 }
3173
2734 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) | 3174 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2735 TXD_GATHER_CODE_FIRST); 3175 TXD_GATHER_CODE_FIRST);
2736 txdp->Control_1 |= TXD_LIST_OWN_XENA; 3176 txdp->Control_1 |= TXD_LIST_OWN_XENA;
@@ -2738,6 +3178,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2738 /* For fragmented SKB. */ 3178 /* For fragmented SKB. */
2739 for (i = 0; i < frg_cnt; i++) { 3179 for (i = 0; i < frg_cnt; i++) {
2740 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3180 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3181 /* A '0' length fragment will be ignored */
3182 if (!frag->size)
3183 continue;
2741 txdp++; 3184 txdp++;
2742 txdp->Buffer_Pointer = (u64) pci_map_page 3185 txdp->Buffer_Pointer = (u64) pci_map_page
2743 (sp->pdev, frag->page, frag->page_offset, 3186 (sp->pdev, frag->page, frag->page_offset,
@@ -2747,23 +3190,23 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2747 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3190 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2748 3191
2749 tx_fifo = mac_control->tx_FIFO_start[queue]; 3192 tx_fifo = mac_control->tx_FIFO_start[queue];
2750 val64 = sp->list_info[queue][put_off].list_phy_addr; 3193 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2751 writeq(val64, &tx_fifo->TxDL_Pointer); 3194 writeq(val64, &tx_fifo->TxDL_Pointer);
2752 3195
2753 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3196 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2754 TX_FIFO_LAST_LIST); 3197 TX_FIFO_LAST_LIST);
3198
2755#ifdef NETIF_F_TSO 3199#ifdef NETIF_F_TSO
2756 if (mss) 3200 if (mss)
2757 val64 |= TX_FIFO_SPECIAL_FUNC; 3201 val64 |= TX_FIFO_SPECIAL_FUNC;
2758#endif 3202#endif
2759 writeq(val64, &tx_fifo->List_Control); 3203 writeq(val64, &tx_fifo->List_Control);
2760 3204
2761 /* Perform a PCI read to flush previous writes */ 3205 mmiowb();
2762 val64 = readq(&bar0->general_int_status);
2763 3206
2764 put_off++; 3207 put_off++;
2765 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1; 3208 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2766 mac_control->tx_curr_put_info[queue].offset = put_off; 3209 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2767 3210
2768 /* Avoid "put" pointer going beyond "get" pointer */ 3211 /* Avoid "put" pointer going beyond "get" pointer */
2769 if (((put_off + 1) % queue_len) == get_off) { 3212 if (((put_off + 1) % queue_len) == get_off) {
@@ -2779,18 +3222,74 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2779 return 0; 3222 return 0;
2780} 3223}
2781 3224
3225static void
3226s2io_alarm_handle(unsigned long data)
3227{
3228 nic_t *sp = (nic_t *)data;
3229
3230 alarm_intr_handler(sp);
3231 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3232}
3233
3234static void s2io_txpic_intr_handle(nic_t *sp)
3235{
3236 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3237 u64 val64;
3238
3239 val64 = readq(&bar0->pic_int_status);
3240 if (val64 & PIC_INT_GPIO) {
3241 val64 = readq(&bar0->gpio_int_reg);
3242 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3243 (val64 & GPIO_INT_REG_LINK_UP)) {
3244 val64 |= GPIO_INT_REG_LINK_DOWN;
3245 val64 |= GPIO_INT_REG_LINK_UP;
3246 writeq(val64, &bar0->gpio_int_reg);
3247 goto masking;
3248 }
3249
3250 if (((sp->last_link_state == LINK_UP) &&
3251 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3252 ((sp->last_link_state == LINK_DOWN) &&
3253 (val64 & GPIO_INT_REG_LINK_UP))) {
3254 val64 = readq(&bar0->gpio_int_mask);
3255 val64 |= GPIO_INT_MASK_LINK_DOWN;
3256 val64 |= GPIO_INT_MASK_LINK_UP;
3257 writeq(val64, &bar0->gpio_int_mask);
3258 s2io_set_link((unsigned long)sp);
3259 }
3260masking:
3261 if (sp->last_link_state == LINK_UP) {
3262 /*enable down interrupt */
3263 val64 = readq(&bar0->gpio_int_mask);
3264 /* unmasks link down intr */
3265 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3266 /* masks link up intr */
3267 val64 |= GPIO_INT_MASK_LINK_UP;
3268 writeq(val64, &bar0->gpio_int_mask);
3269 } else {
3270 /*enable UP Interrupt */
3271 val64 = readq(&bar0->gpio_int_mask);
3272 /* unmasks link up interrupt */
3273 val64 &= ~GPIO_INT_MASK_LINK_UP;
3274 /* masks link down interrupt */
3275 val64 |= GPIO_INT_MASK_LINK_DOWN;
3276 writeq(val64, &bar0->gpio_int_mask);
3277 }
3278 }
3279}
3280
2782/** 3281/**
2783 * s2io_isr - ISR handler of the device . 3282 * s2io_isr - ISR handler of the device .
2784 * @irq: the irq of the device. 3283 * @irq: the irq of the device.
2785 * @dev_id: a void pointer to the dev structure of the NIC. 3284 * @dev_id: a void pointer to the dev structure of the NIC.
2786 * @pt_regs: pointer to the registers pushed on the stack. 3285 * @pt_regs: pointer to the registers pushed on the stack.
2787 * Description: This function is the ISR handler of the device. It 3286 * Description: This function is the ISR handler of the device. It
2788 * identifies the reason for the interrupt and calls the relevant 3287 * identifies the reason for the interrupt and calls the relevant
2789 * service routines. As a contongency measure, this ISR allocates the 3288 * service routines. As a contongency measure, this ISR allocates the
2790 * recv buffers, if their numbers are below the panic value which is 3289 * recv buffers, if their numbers are below the panic value which is
2791 * presently set to 25% of the original number of rcv buffers allocated. 3290 * presently set to 25% of the original number of rcv buffers allocated.
2792 * Return value: 3291 * Return value:
2793 * IRQ_HANDLED: will be returned if IRQ was handled by this routine 3292 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2794 * IRQ_NONE: will be returned if interrupt is not from our device 3293 * IRQ_NONE: will be returned if interrupt is not from our device
2795 */ 3294 */
2796static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) 3295static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2798,40 +3297,31 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2798 struct net_device *dev = (struct net_device *) dev_id; 3297 struct net_device *dev = (struct net_device *) dev_id;
2799 nic_t *sp = dev->priv; 3298 nic_t *sp = dev->priv;
2800 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3299 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801#ifndef CONFIG_S2IO_NAPI 3300 int i;
2802 int i, ret; 3301 u64 reason = 0, val64;
2803#endif
2804 u64 reason = 0;
2805 mac_info_t *mac_control; 3302 mac_info_t *mac_control;
2806 struct config_param *config; 3303 struct config_param *config;
2807 3304
3305 atomic_inc(&sp->isr_cnt);
2808 mac_control = &sp->mac_control; 3306 mac_control = &sp->mac_control;
2809 config = &sp->config; 3307 config = &sp->config;
2810 3308
2811 /* 3309 /*
2812 * Identify the cause for interrupt and call the appropriate 3310 * Identify the cause for interrupt and call the appropriate
2813 * interrupt handler. Causes for the interrupt could be; 3311 * interrupt handler. Causes for the interrupt could be;
2814 * 1. Rx of packet. 3312 * 1. Rx of packet.
2815 * 2. Tx complete. 3313 * 2. Tx complete.
2816 * 3. Link down. 3314 * 3. Link down.
2817 * 4. Error in any functional blocks of the NIC. 3315 * 4. Error in any functional blocks of the NIC.
2818 */ 3316 */
2819 reason = readq(&bar0->general_int_status); 3317 reason = readq(&bar0->general_int_status);
2820 3318
2821 if (!reason) { 3319 if (!reason) {
2822 /* The interrupt was not raised by Xena. */ 3320 /* The interrupt was not raised by Xena. */
3321 atomic_dec(&sp->isr_cnt);
2823 return IRQ_NONE; 3322 return IRQ_NONE;
2824 } 3323 }
2825 3324
2826 /* If Intr is because of Tx Traffic */
2827 if (reason & GEN_INTR_TXTRAFFIC) {
2828 tx_intr_handler(sp);
2829 }
2830
2831 /* If Intr is because of an error */
2832 if (reason & (GEN_ERROR_INTR))
2833 alarm_intr_handler(sp);
2834
2835#ifdef CONFIG_S2IO_NAPI 3325#ifdef CONFIG_S2IO_NAPI
2836 if (reason & GEN_INTR_RXTRAFFIC) { 3326 if (reason & GEN_INTR_RXTRAFFIC) {
2837 if (netif_rx_schedule_prep(dev)) { 3327 if (netif_rx_schedule_prep(dev)) {
@@ -2843,17 +3333,43 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2843#else 3333#else
2844 /* If Intr is because of Rx Traffic */ 3334 /* If Intr is because of Rx Traffic */
2845 if (reason & GEN_INTR_RXTRAFFIC) { 3335 if (reason & GEN_INTR_RXTRAFFIC) {
2846 rx_intr_handler(sp); 3336 /*
3337 * rx_traffic_int reg is an R1 register, writing all 1's
3338 * will ensure that the actual interrupt causing bit get's
3339 * cleared and hence a read can be avoided.
3340 */
3341 val64 = 0xFFFFFFFFFFFFFFFFULL;
3342 writeq(val64, &bar0->rx_traffic_int);
3343 for (i = 0; i < config->rx_ring_num; i++) {
3344 rx_intr_handler(&mac_control->rings[i]);
3345 }
2847 } 3346 }
2848#endif 3347#endif
2849 3348
2850 /* 3349 /* If Intr is because of Tx Traffic */
2851 * If the Rx buffer count is below the panic threshold then 3350 if (reason & GEN_INTR_TXTRAFFIC) {
2852 * reallocate the buffers from the interrupt handler itself, 3351 /*
3352 * tx_traffic_int reg is an R1 register, writing all 1's
3353 * will ensure that the actual interrupt causing bit get's
3354 * cleared and hence a read can be avoided.
3355 */
3356 val64 = 0xFFFFFFFFFFFFFFFFULL;
3357 writeq(val64, &bar0->tx_traffic_int);
3358
3359 for (i = 0; i < config->tx_fifo_num; i++)
3360 tx_intr_handler(&mac_control->fifos[i]);
3361 }
3362
3363 if (reason & GEN_INTR_TXPIC)
3364 s2io_txpic_intr_handle(sp);
3365 /*
3366 * If the Rx buffer count is below the panic threshold then
3367 * reallocate the buffers from the interrupt handler itself,
2853 * else schedule a tasklet to reallocate the buffers. 3368 * else schedule a tasklet to reallocate the buffers.
2854 */ 3369 */
2855#ifndef CONFIG_S2IO_NAPI 3370#ifndef CONFIG_S2IO_NAPI
2856 for (i = 0; i < config->rx_ring_num; i++) { 3371 for (i = 0; i < config->rx_ring_num; i++) {
3372 int ret;
2857 int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 3373 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2858 int level = rx_buffer_level(sp, rxb_size, i); 3374 int level = rx_buffer_level(sp, rxb_size, i);
2859 3375
@@ -2865,6 +3381,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2865 dev->name); 3381 dev->name);
2866 DBG_PRINT(ERR_DBG, " in ISR!!\n"); 3382 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2867 clear_bit(0, (&sp->tasklet_status)); 3383 clear_bit(0, (&sp->tasklet_status));
3384 atomic_dec(&sp->isr_cnt);
2868 return IRQ_HANDLED; 3385 return IRQ_HANDLED;
2869 } 3386 }
2870 clear_bit(0, (&sp->tasklet_status)); 3387 clear_bit(0, (&sp->tasklet_status));
@@ -2874,33 +3391,69 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2874 } 3391 }
2875#endif 3392#endif
2876 3393
3394 atomic_dec(&sp->isr_cnt);
2877 return IRQ_HANDLED; 3395 return IRQ_HANDLED;
2878} 3396}
2879 3397
2880/** 3398/**
2881 * s2io_get_stats - Updates the device statistics structure. 3399 * s2io_updt_stats -
3400 */
3401static void s2io_updt_stats(nic_t *sp)
3402{
3403 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3404 u64 val64;
3405 int cnt = 0;
3406
3407 if (atomic_read(&sp->card_state) == CARD_UP) {
3408 /* Apprx 30us on a 133 MHz bus */
3409 val64 = SET_UPDT_CLICKS(10) |
3410 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3411 writeq(val64, &bar0->stat_cfg);
3412 do {
3413 udelay(100);
3414 val64 = readq(&bar0->stat_cfg);
3415 if (!(val64 & BIT(0)))
3416 break;
3417 cnt++;
3418 if (cnt == 5)
3419 break; /* Updt failed */
3420 } while(1);
3421 }
3422}
3423
3424/**
3425 * s2io_get_stats - Updates the device statistics structure.
2882 * @dev : pointer to the device structure. 3426 * @dev : pointer to the device structure.
2883 * Description: 3427 * Description:
2884 * This function updates the device statistics structure in the s2io_nic 3428 * This function updates the device statistics structure in the s2io_nic
2885 * structure and returns a pointer to the same. 3429 * structure and returns a pointer to the same.
2886 * Return value: 3430 * Return value:
2887 * pointer to the updated net_device_stats structure. 3431 * pointer to the updated net_device_stats structure.
2888 */ 3432 */
2889 3433
2890static struct net_device_stats *s2io_get_stats(struct net_device *dev) 3434struct net_device_stats *s2io_get_stats(struct net_device *dev)
2891{ 3435{
2892 nic_t *sp = dev->priv; 3436 nic_t *sp = dev->priv;
2893 mac_info_t *mac_control; 3437 mac_info_t *mac_control;
2894 struct config_param *config; 3438 struct config_param *config;
2895 3439
3440
2896 mac_control = &sp->mac_control; 3441 mac_control = &sp->mac_control;
2897 config = &sp->config; 3442 config = &sp->config;
2898 3443
2899 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms; 3444 /* Configure Stats for immediate updt */
2900 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms; 3445 s2io_updt_stats(sp);
2901 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms; 3446
3447 sp->stats.tx_packets =
3448 le32_to_cpu(mac_control->stats_info->tmac_frms);
3449 sp->stats.tx_errors =
3450 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3451 sp->stats.rx_errors =
3452 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3453 sp->stats.multicast =
3454 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
2902 sp->stats.rx_length_errors = 3455 sp->stats.rx_length_errors =
2903 mac_control->stats_info->rmac_long_frms; 3456 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
2904 3457
2905 return (&sp->stats); 3458 return (&sp->stats);
2906} 3459}
@@ -2909,8 +3462,8 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2909 * s2io_set_multicast - entry point for multicast address enable/disable. 3462 * s2io_set_multicast - entry point for multicast address enable/disable.
2910 * @dev : pointer to the device structure 3463 * @dev : pointer to the device structure
2911 * Description: 3464 * Description:
2912 * This function is a driver entry point which gets called by the kernel 3465 * This function is a driver entry point which gets called by the kernel
2913 * whenever multicast addresses must be enabled/disabled. This also gets 3466 * whenever multicast addresses must be enabled/disabled. This also gets
2914 * called to set/reset promiscuous mode. Depending on the deivce flag, we 3467 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2915 * determine, if multicast address must be enabled or if promiscuous mode 3468 * determine, if multicast address must be enabled or if promiscuous mode
2916 * is to be disabled etc. 3469 * is to be disabled etc.
@@ -2948,6 +3501,8 @@ static void s2io_set_multicast(struct net_device *dev)
2948 /* Disable all Multicast addresses */ 3501 /* Disable all Multicast addresses */
2949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 3502 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2950 &bar0->rmac_addr_data0_mem); 3503 &bar0->rmac_addr_data0_mem);
3504 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3505 &bar0->rmac_addr_data1_mem);
2951 val64 = RMAC_ADDR_CMD_MEM_WE | 3506 val64 = RMAC_ADDR_CMD_MEM_WE |
2952 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3507 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2953 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); 3508 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
@@ -3010,7 +3565,7 @@ static void s2io_set_multicast(struct net_device *dev)
3010 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 3565 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3011 &bar0->rmac_addr_data0_mem); 3566 &bar0->rmac_addr_data0_mem);
3012 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 3567 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3013 &bar0->rmac_addr_data1_mem); 3568 &bar0->rmac_addr_data1_mem);
3014 val64 = RMAC_ADDR_CMD_MEM_WE | 3569 val64 = RMAC_ADDR_CMD_MEM_WE |
3015 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3570 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3016 RMAC_ADDR_CMD_MEM_OFFSET 3571 RMAC_ADDR_CMD_MEM_OFFSET
@@ -3039,8 +3594,7 @@ static void s2io_set_multicast(struct net_device *dev)
3039 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), 3594 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3040 &bar0->rmac_addr_data0_mem); 3595 &bar0->rmac_addr_data0_mem);
3041 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 3596 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3042 &bar0->rmac_addr_data1_mem); 3597 &bar0->rmac_addr_data1_mem);
3043
3044 val64 = RMAC_ADDR_CMD_MEM_WE | 3598 val64 = RMAC_ADDR_CMD_MEM_WE |
3045 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3599 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3046 RMAC_ADDR_CMD_MEM_OFFSET 3600 RMAC_ADDR_CMD_MEM_OFFSET
@@ -3059,12 +3613,12 @@ static void s2io_set_multicast(struct net_device *dev)
3059} 3613}
3060 3614
3061/** 3615/**
3062 * s2io_set_mac_addr - Programs the Xframe mac address 3616 * s2io_set_mac_addr - Programs the Xframe mac address
3063 * @dev : pointer to the device structure. 3617 * @dev : pointer to the device structure.
3064 * @addr: a uchar pointer to the new mac address which is to be set. 3618 * @addr: a uchar pointer to the new mac address which is to be set.
3065 * Description : This procedure will program the Xframe to receive 3619 * Description : This procedure will program the Xframe to receive
3066 * frames with new Mac Address 3620 * frames with new Mac Address
3067 * Return value: SUCCESS on success and an appropriate (-)ve integer 3621 * Return value: SUCCESS on success and an appropriate (-)ve integer
3068 * as defined in errno.h file on failure. 3622 * as defined in errno.h file on failure.
3069 */ 3623 */
3070 3624
@@ -3075,10 +3629,10 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3075 register u64 val64, mac_addr = 0; 3629 register u64 val64, mac_addr = 0;
3076 int i; 3630 int i;
3077 3631
3078 /* 3632 /*
3079 * Set the new MAC address as the new unicast filter and reflect this 3633 * Set the new MAC address as the new unicast filter and reflect this
3080 * change on the device address registered with the OS. It will be 3634 * change on the device address registered with the OS. It will be
3081 * at offset 0. 3635 * at offset 0.
3082 */ 3636 */
3083 for (i = 0; i < ETH_ALEN; i++) { 3637 for (i = 0; i < ETH_ALEN; i++) {
3084 mac_addr <<= 8; 3638 mac_addr <<= 8;
@@ -3102,12 +3656,12 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3102} 3656}
3103 3657
3104/** 3658/**
3105 * s2io_ethtool_sset - Sets different link parameters. 3659 * s2io_ethtool_sset - Sets different link parameters.
3106 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 3660 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3107 * @info: pointer to the structure with parameters given by ethtool to set 3661 * @info: pointer to the structure with parameters given by ethtool to set
3108 * link information. 3662 * link information.
3109 * Description: 3663 * Description:
3110 * The function sets different link parameters provided by the user onto 3664 * The function sets different link parameters provided by the user onto
3111 * the NIC. 3665 * the NIC.
3112 * Return value: 3666 * Return value:
3113 * 0 on success. 3667 * 0 on success.
@@ -3129,7 +3683,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
3129} 3683}
3130 3684
3131/** 3685/**
3132 * s2io_ethtol_gset - Return link specific information. 3686 * s2io_ethtol_gset - Return link specific information.
3133 * @sp : private member of the device structure, pointer to the 3687 * @sp : private member of the device structure, pointer to the
3134 * s2io_nic structure. 3688 * s2io_nic structure.
3135 * @info : pointer to the structure with parameters given by ethtool 3689 * @info : pointer to the structure with parameters given by ethtool
@@ -3161,8 +3715,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3161} 3715}
3162 3716
3163/** 3717/**
3164 * s2io_ethtool_gdrvinfo - Returns driver specific information. 3718 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3165 * @sp : private member of the device structure, which is a pointer to the 3719 * @sp : private member of the device structure, which is a pointer to the
3166 * s2io_nic structure. 3720 * s2io_nic structure.
3167 * @info : pointer to the structure with parameters given by ethtool to 3721 * @info : pointer to the structure with parameters given by ethtool to
3168 * return driver information. 3722 * return driver information.
@@ -3190,9 +3744,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3190 3744
3191/** 3745/**
3192 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer. 3746 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3193 * @sp: private member of the device structure, which is a pointer to the 3747 * @sp: private member of the device structure, which is a pointer to the
3194 * s2io_nic structure. 3748 * s2io_nic structure.
3195 * @regs : pointer to the structure with parameters given by ethtool for 3749 * @regs : pointer to the structure with parameters given by ethtool for
3196 * dumping the registers. 3750 * dumping the registers.
3197 * @reg_space: The input argumnet into which all the registers are dumped. 3751 * @reg_space: The input argumnet into which all the registers are dumped.
3198 * Description: 3752 * Description:
@@ -3221,11 +3775,11 @@ static void s2io_ethtool_gregs(struct net_device *dev,
3221 3775
3222/** 3776/**
3223 * s2io_phy_id - timer function that alternates adapter LED. 3777 * s2io_phy_id - timer function that alternates adapter LED.
3224 * @data : address of the private member of the device structure, which 3778 * @data : address of the private member of the device structure, which
3225 * is a pointer to the s2io_nic structure, provided as an u32. 3779 * is a pointer to the s2io_nic structure, provided as an u32.
3226 * Description: This is actually the timer function that alternates the 3780 * Description: This is actually the timer function that alternates the
3227 * adapter LED bit of the adapter control bit to set/reset every time on 3781 * adapter LED bit of the adapter control bit to set/reset every time on
3228 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks 3782 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3229 * once every second. 3783 * once every second.
3230*/ 3784*/
3231static void s2io_phy_id(unsigned long data) 3785static void s2io_phy_id(unsigned long data)
@@ -3236,7 +3790,8 @@ static void s2io_phy_id(unsigned long data)
3236 u16 subid; 3790 u16 subid;
3237 3791
3238 subid = sp->pdev->subsystem_device; 3792 subid = sp->pdev->subsystem_device;
3239 if ((subid & 0xFF) >= 0x07) { 3793 if ((sp->device_type == XFRAME_II_DEVICE) ||
3794 ((subid & 0xFF) >= 0x07)) {
3240 val64 = readq(&bar0->gpio_control); 3795 val64 = readq(&bar0->gpio_control);
3241 val64 ^= GPIO_CTRL_GPIO_0; 3796 val64 ^= GPIO_CTRL_GPIO_0;
3242 writeq(val64, &bar0->gpio_control); 3797 writeq(val64, &bar0->gpio_control);
@@ -3253,12 +3808,12 @@ static void s2io_phy_id(unsigned long data)
3253 * s2io_ethtool_idnic - To physically identify the nic on the system. 3808 * s2io_ethtool_idnic - To physically identify the nic on the system.
3254 * @sp : private member of the device structure, which is a pointer to the 3809 * @sp : private member of the device structure, which is a pointer to the
3255 * s2io_nic structure. 3810 * s2io_nic structure.
3256 * @id : pointer to the structure with identification parameters given by 3811 * @id : pointer to the structure with identification parameters given by
3257 * ethtool. 3812 * ethtool.
3258 * Description: Used to physically identify the NIC on the system. 3813 * Description: Used to physically identify the NIC on the system.
3259 * The Link LED will blink for a time specified by the user for 3814 * The Link LED will blink for a time specified by the user for
3260 * identification. 3815 * identification.
3261 * NOTE: The Link has to be Up to be able to blink the LED. Hence 3816 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3262 * identification is possible only if it's link is up. 3817 * identification is possible only if it's link is up.
3263 * Return value: 3818 * Return value:
3264 * int , returns 0 on success 3819 * int , returns 0 on success
@@ -3273,7 +3828,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3273 3828
3274 subid = sp->pdev->subsystem_device; 3829 subid = sp->pdev->subsystem_device;
3275 last_gpio_ctrl_val = readq(&bar0->gpio_control); 3830 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3276 if ((subid & 0xFF) < 0x07) { 3831 if ((sp->device_type == XFRAME_I_DEVICE) &&
3832 ((subid & 0xFF) < 0x07)) {
3277 val64 = readq(&bar0->adapter_control); 3833 val64 = readq(&bar0->adapter_control);
3278 if (!(val64 & ADAPTER_CNTL_EN)) { 3834 if (!(val64 & ADAPTER_CNTL_EN)) {
3279 printk(KERN_ERR 3835 printk(KERN_ERR
@@ -3288,12 +3844,12 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3288 } 3844 }
3289 mod_timer(&sp->id_timer, jiffies); 3845 mod_timer(&sp->id_timer, jiffies);
3290 if (data) 3846 if (data)
3291 msleep(data * 1000); 3847 msleep_interruptible(data * HZ);
3292 else 3848 else
3293 msleep(0xFFFFFFFF); 3849 msleep_interruptible(MAX_FLICKER_TIME);
3294 del_timer_sync(&sp->id_timer); 3850 del_timer_sync(&sp->id_timer);
3295 3851
3296 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 3852 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3297 writeq(last_gpio_ctrl_val, &bar0->gpio_control); 3853 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3298 last_gpio_ctrl_val = readq(&bar0->gpio_control); 3854 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3299 } 3855 }
@@ -3303,7 +3859,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3303 3859
3304/** 3860/**
3305 * s2io_ethtool_getpause_data -Pause frame frame generation and reception. 3861 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3306 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 3862 * @sp : private member of the device structure, which is a pointer to the
3863 * s2io_nic structure.
3307 * @ep : pointer to the structure with pause parameters given by ethtool. 3864 * @ep : pointer to the structure with pause parameters given by ethtool.
3308 * Description: 3865 * Description:
3309 * Returns the Pause frame generation and reception capability of the NIC. 3866 * Returns the Pause frame generation and reception capability of the NIC.
@@ -3327,7 +3884,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
3327 3884
3328/** 3885/**
3329 * s2io_ethtool_setpause_data - set/reset pause frame generation. 3886 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3330 * @sp : private member of the device structure, which is a pointer to the 3887 * @sp : private member of the device structure, which is a pointer to the
3331 * s2io_nic structure. 3888 * s2io_nic structure.
3332 * @ep : pointer to the structure with pause parameters given by ethtool. 3889 * @ep : pointer to the structure with pause parameters given by ethtool.
3333 * Description: 3890 * Description:
@@ -3338,7 +3895,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
3338 */ 3895 */
3339 3896
3340static int s2io_ethtool_setpause_data(struct net_device *dev, 3897static int s2io_ethtool_setpause_data(struct net_device *dev,
3341 struct ethtool_pauseparam *ep) 3898 struct ethtool_pauseparam *ep)
3342{ 3899{
3343 u64 val64; 3900 u64 val64;
3344 nic_t *sp = dev->priv; 3901 nic_t *sp = dev->priv;
@@ -3359,13 +3916,13 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
3359 3916
3360/** 3917/**
3361 * read_eeprom - reads 4 bytes of data from user given offset. 3918 * read_eeprom - reads 4 bytes of data from user given offset.
3362 * @sp : private member of the device structure, which is a pointer to the 3919 * @sp : private member of the device structure, which is a pointer to the
3363 * s2io_nic structure. 3920 * s2io_nic structure.
3364 * @off : offset at which the data must be written 3921 * @off : offset at which the data must be written
3365 * @data : Its an output parameter where the data read at the given 3922 * @data : Its an output parameter where the data read at the given
3366 * offset is stored. 3923 * offset is stored.
3367 * Description: 3924 * Description:
3368 * Will read 4 bytes of data from the user given offset and return the 3925 * Will read 4 bytes of data from the user given offset and return the
3369 * read data. 3926 * read data.
3370 * NOTE: Will allow to read only part of the EEPROM visible through the 3927 * NOTE: Will allow to read only part of the EEPROM visible through the
3371 * I2C bus. 3928 * I2C bus.
@@ -3406,7 +3963,7 @@ static int read_eeprom(nic_t * sp, int off, u32 * data)
3406 * s2io_nic structure. 3963 * s2io_nic structure.
3407 * @off : offset at which the data must be written 3964 * @off : offset at which the data must be written
3408 * @data : The data that is to be written 3965 * @data : The data that is to be written
3409 * @cnt : Number of bytes of the data that are actually to be written into 3966 * @cnt : Number of bytes of the data that are actually to be written into
3410 * the Eeprom. (max of 3) 3967 * the Eeprom. (max of 3)
3411 * Description: 3968 * Description:
3412 * Actually writes the relevant part of the data value into the Eeprom 3969 * Actually writes the relevant part of the data value into the Eeprom
@@ -3443,7 +4000,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3443/** 4000/**
3444 * s2io_ethtool_geeprom - reads the value stored in the Eeprom. 4001 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3445 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 4002 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3446 * @eeprom : pointer to the user level structure provided by ethtool, 4003 * @eeprom : pointer to the user level structure provided by ethtool,
3447 * containing all relevant information. 4004 * containing all relevant information.
3448 * @data_buf : user defined value to be written into Eeprom. 4005 * @data_buf : user defined value to be written into Eeprom.
3449 * Description: Reads the values stored in the Eeprom at given offset 4006 * Description: Reads the values stored in the Eeprom at given offset
@@ -3454,7 +4011,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3454 */ 4011 */
3455 4012
3456static int s2io_ethtool_geeprom(struct net_device *dev, 4013static int s2io_ethtool_geeprom(struct net_device *dev,
3457 struct ethtool_eeprom *eeprom, u8 * data_buf) 4014 struct ethtool_eeprom *eeprom, u8 * data_buf)
3458{ 4015{
3459 u32 data, i, valid; 4016 u32 data, i, valid;
3460 nic_t *sp = dev->priv; 4017 nic_t *sp = dev->priv;
@@ -3479,7 +4036,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
3479 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom 4036 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3480 * @sp : private member of the device structure, which is a pointer to the 4037 * @sp : private member of the device structure, which is a pointer to the
3481 * s2io_nic structure. 4038 * s2io_nic structure.
3482 * @eeprom : pointer to the user level structure provided by ethtool, 4039 * @eeprom : pointer to the user level structure provided by ethtool,
3483 * containing all relevant information. 4040 * containing all relevant information.
3484 * @data_buf ; user defined value to be written into Eeprom. 4041 * @data_buf ; user defined value to be written into Eeprom.
3485 * Description: 4042 * Description:
@@ -3527,8 +4084,8 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
3527} 4084}
3528 4085
3529/** 4086/**
3530 * s2io_register_test - reads and writes into all clock domains. 4087 * s2io_register_test - reads and writes into all clock domains.
3531 * @sp : private member of the device structure, which is a pointer to the 4088 * @sp : private member of the device structure, which is a pointer to the
3532 * s2io_nic structure. 4089 * s2io_nic structure.
3533 * @data : variable that returns the result of each of the test conducted b 4090 * @data : variable that returns the result of each of the test conducted b
3534 * by the driver. 4091 * by the driver.
@@ -3545,8 +4102,8 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
3545 u64 val64 = 0; 4102 u64 val64 = 0;
3546 int fail = 0; 4103 int fail = 0;
3547 4104
3548 val64 = readq(&bar0->pcc_enable); 4105 val64 = readq(&bar0->pif_rd_swapper_fb);
3549 if (val64 != 0xff00000000000000ULL) { 4106 if (val64 != 0x123456789abcdefULL) {
3550 fail = 1; 4107 fail = 1;
3551 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n"); 4108 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3552 } 4109 }
@@ -3590,13 +4147,13 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
3590} 4147}
3591 4148
3592/** 4149/**
3593 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed. 4150 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3594 * @sp : private member of the device structure, which is a pointer to the 4151 * @sp : private member of the device structure, which is a pointer to the
3595 * s2io_nic structure. 4152 * s2io_nic structure.
3596 * @data:variable that returns the result of each of the test conducted by 4153 * @data:variable that returns the result of each of the test conducted by
3597 * the driver. 4154 * the driver.
3598 * Description: 4155 * Description:
3599 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL 4156 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3600 * register. 4157 * register.
3601 * Return value: 4158 * Return value:
3602 * 0 on success. 4159 * 0 on success.
@@ -3661,14 +4218,14 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3661 4218
3662/** 4219/**
3663 * s2io_bist_test - invokes the MemBist test of the card . 4220 * s2io_bist_test - invokes the MemBist test of the card .
3664 * @sp : private member of the device structure, which is a pointer to the 4221 * @sp : private member of the device structure, which is a pointer to the
3665 * s2io_nic structure. 4222 * s2io_nic structure.
3666 * @data:variable that returns the result of each of the test conducted by 4223 * @data:variable that returns the result of each of the test conducted by
3667 * the driver. 4224 * the driver.
3668 * Description: 4225 * Description:
3669 * This invokes the MemBist test of the card. We give around 4226 * This invokes the MemBist test of the card. We give around
3670 * 2 secs time for the Test to complete. If it's still not complete 4227 * 2 secs time for the Test to complete. If it's still not complete
3671 * within this peiod, we consider that the test failed. 4228 * within this peiod, we consider that the test failed.
3672 * Return value: 4229 * Return value:
3673 * 0 on success and -1 on failure. 4230 * 0 on success and -1 on failure.
3674 */ 4231 */
@@ -3697,13 +4254,13 @@ static int s2io_bist_test(nic_t * sp, uint64_t * data)
3697} 4254}
3698 4255
3699/** 4256/**
3700 * s2io-link_test - verifies the link state of the nic 4257 * s2io-link_test - verifies the link state of the nic
3701 * @sp ; private member of the device structure, which is a pointer to the 4258 * @sp ; private member of the device structure, which is a pointer to the
3702 * s2io_nic structure. 4259 * s2io_nic structure.
3703 * @data: variable that returns the result of each of the test conducted by 4260 * @data: variable that returns the result of each of the test conducted by
3704 * the driver. 4261 * the driver.
3705 * Description: 4262 * Description:
3706 * The function verifies the link state of the NIC and updates the input 4263 * The function verifies the link state of the NIC and updates the input
3707 * argument 'data' appropriately. 4264 * argument 'data' appropriately.
3708 * Return value: 4265 * Return value:
3709 * 0 on success. 4266 * 0 on success.
@@ -3722,13 +4279,13 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
3722} 4279}
3723 4280
3724/** 4281/**
3725 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC 4282 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3726 * @sp - private member of the device structure, which is a pointer to the 4283 * @sp - private member of the device structure, which is a pointer to the
3727 * s2io_nic structure. 4284 * s2io_nic structure.
3728 * @data - variable that returns the result of each of the test 4285 * @data - variable that returns the result of each of the test
3729 * conducted by the driver. 4286 * conducted by the driver.
3730 * Description: 4287 * Description:
3731 * This is one of the offline test that tests the read and write 4288 * This is one of the offline test that tests the read and write
3732 * access to the RldRam chip on the NIC. 4289 * access to the RldRam chip on the NIC.
3733 * Return value: 4290 * Return value:
3734 * 0 on success. 4291 * 0 on success.
@@ -3833,7 +4390,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3833 * s2io_nic structure. 4390 * s2io_nic structure.
3834 * @ethtest : pointer to a ethtool command specific structure that will be 4391 * @ethtest : pointer to a ethtool command specific structure that will be
3835 * returned to the user. 4392 * returned to the user.
3836 * @data : variable that returns the result of each of the test 4393 * @data : variable that returns the result of each of the test
3837 * conducted by the driver. 4394 * conducted by the driver.
3838 * Description: 4395 * Description:
3839 * This function conducts 6 tests ( 4 offline and 2 online) to determine 4396 * This function conducts 6 tests ( 4 offline and 2 online) to determine
@@ -3851,23 +4408,18 @@ static void s2io_ethtool_test(struct net_device *dev,
3851 4408
3852 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 4409 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3853 /* Offline Tests. */ 4410 /* Offline Tests. */
3854 if (orig_state) { 4411 if (orig_state)
3855 s2io_close(sp->dev); 4412 s2io_close(sp->dev);
3856 s2io_set_swapper(sp);
3857 } else
3858 s2io_set_swapper(sp);
3859 4413
3860 if (s2io_register_test(sp, &data[0])) 4414 if (s2io_register_test(sp, &data[0]))
3861 ethtest->flags |= ETH_TEST_FL_FAILED; 4415 ethtest->flags |= ETH_TEST_FL_FAILED;
3862 4416
3863 s2io_reset(sp); 4417 s2io_reset(sp);
3864 s2io_set_swapper(sp);
3865 4418
3866 if (s2io_rldram_test(sp, &data[3])) 4419 if (s2io_rldram_test(sp, &data[3]))
3867 ethtest->flags |= ETH_TEST_FL_FAILED; 4420 ethtest->flags |= ETH_TEST_FL_FAILED;
3868 4421
3869 s2io_reset(sp); 4422 s2io_reset(sp);
3870 s2io_set_swapper(sp);
3871 4423
3872 if (s2io_eeprom_test(sp, &data[1])) 4424 if (s2io_eeprom_test(sp, &data[1]))
3873 ethtest->flags |= ETH_TEST_FL_FAILED; 4425 ethtest->flags |= ETH_TEST_FL_FAILED;
@@ -3910,61 +4462,111 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
3910 nic_t *sp = dev->priv; 4462 nic_t *sp = dev->priv;
3911 StatInfo_t *stat_info = sp->mac_control.stats_info; 4463 StatInfo_t *stat_info = sp->mac_control.stats_info;
3912 4464
3913 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms); 4465 s2io_updt_stats(sp);
3914 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets); 4466 tmp_stats[i++] =
4467 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4468 le32_to_cpu(stat_info->tmac_frms);
4469 tmp_stats[i++] =
4470 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4471 le32_to_cpu(stat_info->tmac_data_octets);
3915 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms); 4472 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3916 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms); 4473 tmp_stats[i++] =
3917 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms); 4474 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4475 le32_to_cpu(stat_info->tmac_mcst_frms);
4476 tmp_stats[i++] =
4477 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4478 le32_to_cpu(stat_info->tmac_bcst_frms);
3918 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms); 4479 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3919 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms); 4480 tmp_stats[i++] =
4481 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4482 le32_to_cpu(stat_info->tmac_any_err_frms);
3920 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets); 4483 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3921 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip); 4484 tmp_stats[i++] =
3922 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip); 4485 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp); 4486 le32_to_cpu(stat_info->tmac_vld_ip);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp); 4487 tmp_stats[i++] =
4488 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4489 le32_to_cpu(stat_info->tmac_drop_ip);
4490 tmp_stats[i++] =
4491 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4492 le32_to_cpu(stat_info->tmac_icmp);
4493 tmp_stats[i++] =
4494 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4495 le32_to_cpu(stat_info->tmac_rst_tcp);
3925 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp); 4496 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3926 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp); 4497 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
3927 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms); 4498 le32_to_cpu(stat_info->tmac_udp);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets); 4499 tmp_stats[i++] =
4500 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4501 le32_to_cpu(stat_info->rmac_vld_frms);
4502 tmp_stats[i++] =
4503 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4504 le32_to_cpu(stat_info->rmac_data_octets);
3929 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms); 4505 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms); 4506 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3931 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms); 4507 tmp_stats[i++] =
3932 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms); 4508 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4509 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4510 tmp_stats[i++] =
4511 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4512 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms); 4513 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3934 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms); 4514 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3935 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms); 4515 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3936 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms); 4516 tmp_stats[i++] =
3937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms); 4517 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms); 4518 le32_to_cpu(stat_info->rmac_discarded_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms); 4519 tmp_stats[i++] =
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms); 4520 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip); 4521 le32_to_cpu(stat_info->rmac_usized_frms);
4522 tmp_stats[i++] =
4523 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4524 le32_to_cpu(stat_info->rmac_osized_frms);
4525 tmp_stats[i++] =
4526 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4527 le32_to_cpu(stat_info->rmac_frag_frms);
4528 tmp_stats[i++] =
4529 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4530 le32_to_cpu(stat_info->rmac_jabber_frms);
4531 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4532 le32_to_cpu(stat_info->rmac_ip);
3942 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets); 4533 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3943 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip); 4534 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3944 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip); 4535 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp); 4536 le32_to_cpu(stat_info->rmac_drop_ip);
4537 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4538 le32_to_cpu(stat_info->rmac_icmp);
3946 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp); 4539 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp); 4540 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
3948 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp); 4541 le32_to_cpu(stat_info->rmac_udp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt); 4542 tmp_stats[i++] =
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip); 4543 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4544 le32_to_cpu(stat_info->rmac_err_drp_udp);
4545 tmp_stats[i++] =
4546 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4547 le32_to_cpu(stat_info->rmac_pause_cnt);
4548 tmp_stats[i++] =
4549 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4550 le32_to_cpu(stat_info->rmac_accepted_ip);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp); 4551 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4552 tmp_stats[i++] = 0;
4553 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4554 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
3952} 4555}
3953 4556
3954static int s2io_ethtool_get_regs_len(struct net_device *dev) 4557int s2io_ethtool_get_regs_len(struct net_device *dev)
3955{ 4558{
3956 return (XENA_REG_SPACE); 4559 return (XENA_REG_SPACE);
3957} 4560}
3958 4561
3959 4562
3960static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) 4563u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3961{ 4564{
3962 nic_t *sp = dev->priv; 4565 nic_t *sp = dev->priv;
3963 4566
3964 return (sp->rx_csum); 4567 return (sp->rx_csum);
3965} 4568}
3966 4569int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3967static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3968{ 4570{
3969 nic_t *sp = dev->priv; 4571 nic_t *sp = dev->priv;
3970 4572
@@ -3975,19 +4577,17 @@ static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3975 4577
3976 return 0; 4578 return 0;
3977} 4579}
3978 4580int s2io_get_eeprom_len(struct net_device *dev)
3979static int s2io_get_eeprom_len(struct net_device *dev)
3980{ 4581{
3981 return (XENA_EEPROM_SPACE); 4582 return (XENA_EEPROM_SPACE);
3982} 4583}
3983 4584
3984static int s2io_ethtool_self_test_count(struct net_device *dev) 4585int s2io_ethtool_self_test_count(struct net_device *dev)
3985{ 4586{
3986 return (S2IO_TEST_LEN); 4587 return (S2IO_TEST_LEN);
3987} 4588}
3988 4589void s2io_ethtool_get_strings(struct net_device *dev,
3989static void s2io_ethtool_get_strings(struct net_device *dev, 4590 u32 stringset, u8 * data)
3990 u32 stringset, u8 * data)
3991{ 4591{
3992 switch (stringset) { 4592 switch (stringset) {
3993 case ETH_SS_TEST: 4593 case ETH_SS_TEST:
@@ -3998,13 +4598,12 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
3998 sizeof(ethtool_stats_keys)); 4598 sizeof(ethtool_stats_keys));
3999 } 4599 }
4000} 4600}
4001
4002static int s2io_ethtool_get_stats_count(struct net_device *dev) 4601static int s2io_ethtool_get_stats_count(struct net_device *dev)
4003{ 4602{
4004 return (S2IO_STAT_LEN); 4603 return (S2IO_STAT_LEN);
4005} 4604}
4006 4605
4007static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 4606int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4008{ 4607{
4009 if (data) 4608 if (data)
4010 dev->features |= NETIF_F_IP_CSUM; 4609 dev->features |= NETIF_F_IP_CSUM;
@@ -4046,21 +4645,18 @@ static struct ethtool_ops netdev_ethtool_ops = {
4046}; 4645};
4047 4646
4048/** 4647/**
4049 * s2io_ioctl - Entry point for the Ioctl 4648 * s2io_ioctl - Entry point for the Ioctl
4050 * @dev : Device pointer. 4649 * @dev : Device pointer.
4051 * @ifr : An IOCTL specefic structure, that can contain a pointer to 4650 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4052 * a proprietary structure used to pass information to the driver. 4651 * a proprietary structure used to pass information to the driver.
4053 * @cmd : This is used to distinguish between the different commands that 4652 * @cmd : This is used to distinguish between the different commands that
4054 * can be passed to the IOCTL functions. 4653 * can be passed to the IOCTL functions.
4055 * Description: 4654 * Description:
4056 * This function has support for ethtool, adding multiple MAC addresses on 4655 * Currently there are no special functionality supported in IOCTL, hence
4057 * the NIC and some DBG commands for the util tool. 4656 * function always return EOPNOTSUPPORTED
4058 * Return value:
4059 * Currently the IOCTL supports no operations, hence by default this
4060 * function returns OP NOT SUPPORTED value.
4061 */ 4657 */
4062 4658
4063static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 4659int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4064{ 4660{
4065 return -EOPNOTSUPP; 4661 return -EOPNOTSUPP;
4066} 4662}
@@ -4076,17 +4672,9 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4076 * file on failure. 4672 * file on failure.
4077 */ 4673 */
4078 4674
4079static int s2io_change_mtu(struct net_device *dev, int new_mtu) 4675int s2io_change_mtu(struct net_device *dev, int new_mtu)
4080{ 4676{
4081 nic_t *sp = dev->priv; 4677 nic_t *sp = dev->priv;
4082 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4083 register u64 val64;
4084
4085 if (netif_running(dev)) {
4086 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4087 DBG_PRINT(ERR_DBG, "change its MTU \n");
4088 return -EBUSY;
4089 }
4090 4678
4091 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { 4679 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4092 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", 4680 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -4094,11 +4682,22 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4094 return -EPERM; 4682 return -EPERM;
4095 } 4683 }
4096 4684
4097 /* Set the new MTU into the PYLD register of the NIC */
4098 val64 = new_mtu;
4099 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4100
4101 dev->mtu = new_mtu; 4685 dev->mtu = new_mtu;
4686 if (netif_running(dev)) {
4687 s2io_card_down(sp);
4688 netif_stop_queue(dev);
4689 if (s2io_card_up(sp)) {
4690 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4691 __FUNCTION__);
4692 }
4693 if (netif_queue_stopped(dev))
4694 netif_wake_queue(dev);
4695 } else { /* Device is down */
4696 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4697 u64 val64 = new_mtu;
4698
4699 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4700 }
4102 4701
4103 return 0; 4702 return 0;
4104} 4703}
@@ -4108,9 +4707,9 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4108 * @dev_adr : address of the device structure in dma_addr_t format. 4707 * @dev_adr : address of the device structure in dma_addr_t format.
4109 * Description: 4708 * Description:
4110 * This is the tasklet or the bottom half of the ISR. This is 4709 * This is the tasklet or the bottom half of the ISR. This is
4111 * an extension of the ISR which is scheduled by the scheduler to be run 4710 * an extension of the ISR which is scheduled by the scheduler to be run
4112 * when the load on the CPU is low. All low priority tasks of the ISR can 4711 * when the load on the CPU is low. All low priority tasks of the ISR can
4113 * be pushed into the tasklet. For now the tasklet is used only to 4712 * be pushed into the tasklet. For now the tasklet is used only to
4114 * replenish the Rx buffers in the Rx buffer descriptors. 4713 * replenish the Rx buffers in the Rx buffer descriptors.
4115 * Return value: 4714 * Return value:
4116 * void. 4715 * void.
@@ -4166,19 +4765,22 @@ static void s2io_set_link(unsigned long data)
4166 } 4765 }
4167 4766
4168 subid = nic->pdev->subsystem_device; 4767 subid = nic->pdev->subsystem_device;
4169 /* 4768 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4170 * Allow a small delay for the NICs self initiated 4769 /*
4171 * cleanup to complete. 4770 * Allow a small delay for the NICs self initiated
4172 */ 4771 * cleanup to complete.
4173 msleep(100); 4772 */
4773 msleep(100);
4774 }
4174 4775
4175 val64 = readq(&bar0->adapter_status); 4776 val64 = readq(&bar0->adapter_status);
4176 if (verify_xena_quiescence(val64, nic->device_enabled_once)) { 4777 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4177 if (LINK_IS_UP(val64)) { 4778 if (LINK_IS_UP(val64)) {
4178 val64 = readq(&bar0->adapter_control); 4779 val64 = readq(&bar0->adapter_control);
4179 val64 |= ADAPTER_CNTL_EN; 4780 val64 |= ADAPTER_CNTL_EN;
4180 writeq(val64, &bar0->adapter_control); 4781 writeq(val64, &bar0->adapter_control);
4181 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 4782 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4783 subid)) {
4182 val64 = readq(&bar0->gpio_control); 4784 val64 = readq(&bar0->gpio_control);
4183 val64 |= GPIO_CTRL_GPIO_0; 4785 val64 |= GPIO_CTRL_GPIO_0;
4184 writeq(val64, &bar0->gpio_control); 4786 writeq(val64, &bar0->gpio_control);
@@ -4187,20 +4789,24 @@ static void s2io_set_link(unsigned long data)
4187 val64 |= ADAPTER_LED_ON; 4789 val64 |= ADAPTER_LED_ON;
4188 writeq(val64, &bar0->adapter_control); 4790 writeq(val64, &bar0->adapter_control);
4189 } 4791 }
4190 val64 = readq(&bar0->adapter_status); 4792 if (s2io_link_fault_indication(nic) ==
4191 if (!LINK_IS_UP(val64)) { 4793 MAC_RMAC_ERR_TIMER) {
4192 DBG_PRINT(ERR_DBG, "%s:", dev->name); 4794 val64 = readq(&bar0->adapter_status);
4193 DBG_PRINT(ERR_DBG, " Link down"); 4795 if (!LINK_IS_UP(val64)) {
4194 DBG_PRINT(ERR_DBG, "after "); 4796 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4195 DBG_PRINT(ERR_DBG, "enabling "); 4797 DBG_PRINT(ERR_DBG, " Link down");
4196 DBG_PRINT(ERR_DBG, "device \n"); 4798 DBG_PRINT(ERR_DBG, "after ");
4799 DBG_PRINT(ERR_DBG, "enabling ");
4800 DBG_PRINT(ERR_DBG, "device \n");
4801 }
4197 } 4802 }
4198 if (nic->device_enabled_once == FALSE) { 4803 if (nic->device_enabled_once == FALSE) {
4199 nic->device_enabled_once = TRUE; 4804 nic->device_enabled_once = TRUE;
4200 } 4805 }
4201 s2io_link(nic, LINK_UP); 4806 s2io_link(nic, LINK_UP);
4202 } else { 4807 } else {
4203 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 4808 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4809 subid)) {
4204 val64 = readq(&bar0->gpio_control); 4810 val64 = readq(&bar0->gpio_control);
4205 val64 &= ~GPIO_CTRL_GPIO_0; 4811 val64 &= ~GPIO_CTRL_GPIO_0;
4206 writeq(val64, &bar0->gpio_control); 4812 writeq(val64, &bar0->gpio_control);
@@ -4223,9 +4829,11 @@ static void s2io_card_down(nic_t * sp)
4223 unsigned long flags; 4829 unsigned long flags;
4224 register u64 val64 = 0; 4830 register u64 val64 = 0;
4225 4831
4832 del_timer_sync(&sp->alarm_timer);
4226 /* If s2io_set_link task is executing, wait till it completes. */ 4833 /* If s2io_set_link task is executing, wait till it completes. */
4227 while (test_and_set_bit(0, &(sp->link_state))) 4834 while (test_and_set_bit(0, &(sp->link_state))) {
4228 msleep(50); 4835 msleep(50);
4836 }
4229 atomic_set(&sp->card_state, CARD_DOWN); 4837 atomic_set(&sp->card_state, CARD_DOWN);
4230 4838
4231 /* disable Tx and Rx traffic on the NIC */ 4839 /* disable Tx and Rx traffic on the NIC */
@@ -4237,7 +4845,7 @@ static void s2io_card_down(nic_t * sp)
4237 /* Check if the device is Quiescent and then Reset the NIC */ 4845 /* Check if the device is Quiescent and then Reset the NIC */
4238 do { 4846 do {
4239 val64 = readq(&bar0->adapter_status); 4847 val64 = readq(&bar0->adapter_status);
4240 if (verify_xena_quiescence(val64, sp->device_enabled_once)) { 4848 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4241 break; 4849 break;
4242 } 4850 }
4243 4851
@@ -4251,14 +4859,27 @@ static void s2io_card_down(nic_t * sp)
4251 break; 4859 break;
4252 } 4860 }
4253 } while (1); 4861 } while (1);
4254 spin_lock_irqsave(&sp->tx_lock, flags);
4255 s2io_reset(sp); 4862 s2io_reset(sp);
4256 4863
4257 /* Free all unused Tx and Rx buffers */ 4864 /* Waiting till all Interrupt handlers are complete */
4865 cnt = 0;
4866 do {
4867 msleep(10);
4868 if (!atomic_read(&sp->isr_cnt))
4869 break;
4870 cnt++;
4871 } while(cnt < 5);
4872
4873 spin_lock_irqsave(&sp->tx_lock, flags);
4874 /* Free all Tx buffers */
4258 free_tx_buffers(sp); 4875 free_tx_buffers(sp);
4876 spin_unlock_irqrestore(&sp->tx_lock, flags);
4877
4878 /* Free all Rx buffers */
4879 spin_lock_irqsave(&sp->rx_lock, flags);
4259 free_rx_buffers(sp); 4880 free_rx_buffers(sp);
4881 spin_unlock_irqrestore(&sp->rx_lock, flags);
4260 4882
4261 spin_unlock_irqrestore(&sp->tx_lock, flags);
4262 clear_bit(0, &(sp->link_state)); 4883 clear_bit(0, &(sp->link_state));
4263} 4884}
4264 4885
@@ -4276,8 +4897,8 @@ static int s2io_card_up(nic_t * sp)
4276 return -ENODEV; 4897 return -ENODEV;
4277 } 4898 }
4278 4899
4279 /* 4900 /*
4280 * Initializing the Rx buffers. For now we are considering only 1 4901 * Initializing the Rx buffers. For now we are considering only 1
4281 * Rx ring and initializing buffers into 30 Rx blocks 4902 * Rx ring and initializing buffers into 30 Rx blocks
4282 */ 4903 */
4283 mac_control = &sp->mac_control; 4904 mac_control = &sp->mac_control;
@@ -4311,16 +4932,18 @@ static int s2io_card_up(nic_t * sp)
4311 return -ENODEV; 4932 return -ENODEV;
4312 } 4933 }
4313 4934
4935 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4936
4314 atomic_set(&sp->card_state, CARD_UP); 4937 atomic_set(&sp->card_state, CARD_UP);
4315 return 0; 4938 return 0;
4316} 4939}
4317 4940
4318/** 4941/**
4319 * s2io_restart_nic - Resets the NIC. 4942 * s2io_restart_nic - Resets the NIC.
4320 * @data : long pointer to the device private structure 4943 * @data : long pointer to the device private structure
4321 * Description: 4944 * Description:
4322 * This function is scheduled to be run by the s2io_tx_watchdog 4945 * This function is scheduled to be run by the s2io_tx_watchdog
4323 * function after 0.5 secs to reset the NIC. The idea is to reduce 4946 * function after 0.5 secs to reset the NIC. The idea is to reduce
4324 * the run time of the watch dog routine which is run holding a 4947 * the run time of the watch dog routine which is run holding a
4325 * spin lock. 4948 * spin lock.
4326 */ 4949 */
@@ -4338,10 +4961,11 @@ static void s2io_restart_nic(unsigned long data)
4338 netif_wake_queue(dev); 4961 netif_wake_queue(dev);
4339 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", 4962 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4340 dev->name); 4963 dev->name);
4964
4341} 4965}
4342 4966
4343/** 4967/**
4344 * s2io_tx_watchdog - Watchdog for transmit side. 4968 * s2io_tx_watchdog - Watchdog for transmit side.
4345 * @dev : Pointer to net device structure 4969 * @dev : Pointer to net device structure
4346 * Description: 4970 * Description:
4347 * This function is triggered if the Tx Queue is stopped 4971 * This function is triggered if the Tx Queue is stopped
@@ -4369,7 +4993,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
4369 * @len : length of the packet 4993 * @len : length of the packet
4370 * @cksum : FCS checksum of the frame. 4994 * @cksum : FCS checksum of the frame.
4371 * @ring_no : the ring from which this RxD was extracted. 4995 * @ring_no : the ring from which this RxD was extracted.
4372 * Description: 4996 * Description:
4373 * This function is called by the Tx interrupt serivce routine to perform 4997 * This function is called by the Tx interrupt serivce routine to perform
4374 * some OS related operations on the SKB before passing it to the upper 4998 * some OS related operations on the SKB before passing it to the upper
4375 * layers. It mainly checks if the checksum is OK, if so adds it to the 4999 * layers. It mainly checks if the checksum is OK, if so adds it to the
@@ -4379,35 +5003,68 @@ static void s2io_tx_watchdog(struct net_device *dev)
4379 * Return value: 5003 * Return value:
4380 * SUCCESS on success and -1 on failure. 5004 * SUCCESS on success and -1 on failure.
4381 */ 5005 */
4382#ifndef CONFIG_2BUFF_MODE 5006static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4383static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4384#else
4385static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4386 buffAdd_t * ba)
4387#endif
4388{ 5007{
5008 nic_t *sp = ring_data->nic;
4389 struct net_device *dev = (struct net_device *) sp->dev; 5009 struct net_device *dev = (struct net_device *) sp->dev;
4390 struct sk_buff *skb = 5010 struct sk_buff *skb = (struct sk_buff *)
4391 (struct sk_buff *) ((unsigned long) rxdp->Host_Control); 5011 ((unsigned long) rxdp->Host_Control);
5012 int ring_no = ring_data->ring_no;
4392 u16 l3_csum, l4_csum; 5013 u16 l3_csum, l4_csum;
4393#ifdef CONFIG_2BUFF_MODE 5014#ifdef CONFIG_2BUFF_MODE
4394 int buf0_len, buf2_len; 5015 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5016 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5017 int get_block = ring_data->rx_curr_get_info.block_index;
5018 int get_off = ring_data->rx_curr_get_info.offset;
5019 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4395 unsigned char *buff; 5020 unsigned char *buff;
5021#else
5022 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4396#endif 5023#endif
5024 skb->dev = dev;
5025 if (rxdp->Control_1 & RXD_T_CODE) {
5026 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5027 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5028 dev->name, err);
5029 dev_kfree_skb(skb);
5030 sp->stats.rx_crc_errors++;
5031 atomic_dec(&sp->rx_bufs_left[ring_no]);
5032 rxdp->Host_Control = 0;
5033 return 0;
5034 }
4397 5035
4398 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 5036 /* Updating statistics */
4399 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) { 5037 rxdp->Host_Control = 0;
5038 sp->rx_pkt_count++;
5039 sp->stats.rx_packets++;
5040#ifndef CONFIG_2BUFF_MODE
5041 sp->stats.rx_bytes += len;
5042#else
5043 sp->stats.rx_bytes += buf0_len + buf2_len;
5044#endif
5045
5046#ifndef CONFIG_2BUFF_MODE
5047 skb_put(skb, len);
5048#else
5049 buff = skb_push(skb, buf0_len);
5050 memcpy(buff, ba->ba_0, buf0_len);
5051 skb_put(skb, buf2_len);
5052#endif
5053
5054 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5055 (sp->rx_csum)) {
5056 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4400 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 5057 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4401 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) { 5058 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4402 /* 5059 /*
4403 * NIC verifies if the Checksum of the received 5060 * NIC verifies if the Checksum of the received
4404 * frame is Ok or not and accordingly returns 5061 * frame is Ok or not and accordingly returns
4405 * a flag in the RxD. 5062 * a flag in the RxD.
4406 */ 5063 */
4407 skb->ip_summed = CHECKSUM_UNNECESSARY; 5064 skb->ip_summed = CHECKSUM_UNNECESSARY;
4408 } else { 5065 } else {
4409 /* 5066 /*
4410 * Packet with erroneous checksum, let the 5067 * Packet with erroneous checksum, let the
4411 * upper layers deal with it. 5068 * upper layers deal with it.
4412 */ 5069 */
4413 skb->ip_summed = CHECKSUM_NONE; 5070 skb->ip_summed = CHECKSUM_NONE;
@@ -4416,44 +5073,26 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4416 skb->ip_summed = CHECKSUM_NONE; 5073 skb->ip_summed = CHECKSUM_NONE;
4417 } 5074 }
4418 5075
4419 if (rxdp->Control_1 & RXD_T_CODE) {
4420 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4421 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4422 dev->name, err);
4423 }
4424#ifdef CONFIG_2BUFF_MODE
4425 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4426 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4427#endif
4428
4429 skb->dev = dev;
4430#ifndef CONFIG_2BUFF_MODE
4431 skb_put(skb, len);
4432 skb->protocol = eth_type_trans(skb, dev); 5076 skb->protocol = eth_type_trans(skb, dev);
4433#else
4434 buff = skb_push(skb, buf0_len);
4435 memcpy(buff, ba->ba_0, buf0_len);
4436 skb_put(skb, buf2_len);
4437 skb->protocol = eth_type_trans(skb, dev);
4438#endif
4439
4440#ifdef CONFIG_S2IO_NAPI 5077#ifdef CONFIG_S2IO_NAPI
4441 netif_receive_skb(skb); 5078 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5079 /* Queueing the vlan frame to the upper layer */
5080 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5081 RXD_GET_VLAN_TAG(rxdp->Control_2));
5082 } else {
5083 netif_receive_skb(skb);
5084 }
4442#else 5085#else
4443 netif_rx(skb); 5086 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5087 /* Queueing the vlan frame to the upper layer */
5088 vlan_hwaccel_rx(skb, sp->vlgrp,
5089 RXD_GET_VLAN_TAG(rxdp->Control_2));
5090 } else {
5091 netif_rx(skb);
5092 }
4444#endif 5093#endif
4445
4446 dev->last_rx = jiffies; 5094 dev->last_rx = jiffies;
4447 sp->rx_pkt_count++;
4448 sp->stats.rx_packets++;
4449#ifndef CONFIG_2BUFF_MODE
4450 sp->stats.rx_bytes += len;
4451#else
4452 sp->stats.rx_bytes += buf0_len + buf2_len;
4453#endif
4454
4455 atomic_dec(&sp->rx_bufs_left[ring_no]); 5095 atomic_dec(&sp->rx_bufs_left[ring_no]);
4456 rxdp->Host_Control = 0;
4457 return SUCCESS; 5096 return SUCCESS;
4458} 5097}
4459 5098
@@ -4464,13 +5103,13 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4464 * @link : inidicates whether link is UP/DOWN. 5103 * @link : inidicates whether link is UP/DOWN.
4465 * Description: 5104 * Description:
4466 * This function stops/starts the Tx queue depending on whether the link 5105 * This function stops/starts the Tx queue depending on whether the link
4467 * status of the NIC is is down or up. This is called by the Alarm 5106 * status of the NIC is is down or up. This is called by the Alarm
4468 * interrupt handler whenever a link change interrupt comes up. 5107 * interrupt handler whenever a link change interrupt comes up.
4469 * Return value: 5108 * Return value:
4470 * void. 5109 * void.
4471 */ 5110 */
4472 5111
4473static void s2io_link(nic_t * sp, int link) 5112void s2io_link(nic_t * sp, int link)
4474{ 5113{
4475 struct net_device *dev = (struct net_device *) sp->dev; 5114 struct net_device *dev = (struct net_device *) sp->dev;
4476 5115
@@ -4487,8 +5126,25 @@ static void s2io_link(nic_t * sp, int link)
4487} 5126}
4488 5127
4489/** 5128/**
4490 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers . 5129 * get_xena_rev_id - to identify revision ID of xena.
4491 * @sp : private member of the device structure, which is a pointer to the 5130 * @pdev : PCI Dev structure
5131 * Description:
5132 * Function to identify the Revision ID of xena.
5133 * Return value:
5134 * returns the revision ID of the device.
5135 */
5136
5137int get_xena_rev_id(struct pci_dev *pdev)
5138{
5139 u8 id = 0;
5140 int ret;
5141 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5142 return id;
5143}
5144
5145/**
5146 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5147 * @sp : private member of the device structure, which is a pointer to the
4492 * s2io_nic structure. 5148 * s2io_nic structure.
4493 * Description: 5149 * Description:
4494 * This function initializes a few of the PCI and PCI-X configuration registers 5150 * This function initializes a few of the PCI and PCI-X configuration registers
@@ -4499,15 +5155,15 @@ static void s2io_link(nic_t * sp, int link)
4499 5155
4500static void s2io_init_pci(nic_t * sp) 5156static void s2io_init_pci(nic_t * sp)
4501{ 5157{
4502 u16 pci_cmd = 0; 5158 u16 pci_cmd = 0, pcix_cmd = 0;
4503 5159
4504 /* Enable Data Parity Error Recovery in PCI-X command register. */ 5160 /* Enable Data Parity Error Recovery in PCI-X command register. */
4505 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5161 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4506 &(sp->pcix_cmd)); 5162 &(pcix_cmd));
4507 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5163 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4508 (sp->pcix_cmd | 1)); 5164 (pcix_cmd | 1));
4509 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5165 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4510 &(sp->pcix_cmd)); 5166 &(pcix_cmd));
4511 5167
4512 /* Set the PErr Response bit in PCI command register. */ 5168 /* Set the PErr Response bit in PCI command register. */
4513 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 5169 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
@@ -4515,53 +5171,43 @@ static void s2io_init_pci(nic_t * sp)
4515 (pci_cmd | PCI_COMMAND_PARITY)); 5171 (pci_cmd | PCI_COMMAND_PARITY));
4516 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 5172 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4517 5173
4518 /* Set MMRB count to 1024 in PCI-X Command register. */
4519 sp->pcix_cmd &= 0xFFF3;
4520 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4521 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4522 &(sp->pcix_cmd));
4523
4524 /* Setting Maximum outstanding splits based on system type. */
4525 sp->pcix_cmd &= 0xFF8F;
4526
4527 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4528 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4529 sp->pcix_cmd);
4530 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4531 &(sp->pcix_cmd));
4532 /* Forcibly disabling relaxed ordering capability of the card. */ 5174 /* Forcibly disabling relaxed ordering capability of the card. */
4533 sp->pcix_cmd &= 0xfffd; 5175 pcix_cmd &= 0xfffd;
4534 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5176 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4535 sp->pcix_cmd); 5177 pcix_cmd);
4536 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5178 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4537 &(sp->pcix_cmd)); 5179 &(pcix_cmd));
4538} 5180}
4539 5181
4540MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 5182MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4541MODULE_LICENSE("GPL"); 5183MODULE_LICENSE("GPL");
4542module_param(tx_fifo_num, int, 0); 5184module_param(tx_fifo_num, int, 0);
4543module_param_array(tx_fifo_len, int, NULL, 0);
4544module_param(rx_ring_num, int, 0); 5185module_param(rx_ring_num, int, 0);
4545module_param_array(rx_ring_sz, int, NULL, 0); 5186module_param_array(tx_fifo_len, uint, NULL, 0);
4546module_param(Stats_refresh_time, int, 0); 5187module_param_array(rx_ring_sz, uint, NULL, 0);
5188module_param_array(rts_frm_len, uint, NULL, 0);
5189module_param(use_continuous_tx_intrs, int, 1);
4547module_param(rmac_pause_time, int, 0); 5190module_param(rmac_pause_time, int, 0);
4548module_param(mc_pause_threshold_q0q3, int, 0); 5191module_param(mc_pause_threshold_q0q3, int, 0);
4549module_param(mc_pause_threshold_q4q7, int, 0); 5192module_param(mc_pause_threshold_q4q7, int, 0);
4550module_param(shared_splits, int, 0); 5193module_param(shared_splits, int, 0);
4551module_param(tmac_util_period, int, 0); 5194module_param(tmac_util_period, int, 0);
4552module_param(rmac_util_period, int, 0); 5195module_param(rmac_util_period, int, 0);
5196module_param(bimodal, bool, 0);
4553#ifndef CONFIG_S2IO_NAPI 5197#ifndef CONFIG_S2IO_NAPI
4554module_param(indicate_max_pkts, int, 0); 5198module_param(indicate_max_pkts, int, 0);
4555#endif 5199#endif
5200module_param(rxsync_frequency, int, 0);
5201
4556/** 5202/**
4557 * s2io_init_nic - Initialization of the adapter . 5203 * s2io_init_nic - Initialization of the adapter .
4558 * @pdev : structure containing the PCI related information of the device. 5204 * @pdev : structure containing the PCI related information of the device.
4559 * @pre: List of PCI devices supported by the driver listed in s2io_tbl. 5205 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4560 * Description: 5206 * Description:
4561 * The function initializes an adapter identified by the pci_dec structure. 5207 * The function initializes an adapter identified by the pci_dec structure.
4562 * All OS related initialization including memory and device structure and 5208 * All OS related initialization including memory and device structure and
4563 * initlaization of the device private variable is done. Also the swapper 5209 * initlaization of the device private variable is done. Also the swapper
4564 * control register is initialized to enable read and write into the I/O 5210 * control register is initialized to enable read and write into the I/O
4565 * registers of the device. 5211 * registers of the device.
4566 * Return value: 5212 * Return value:
4567 * returns 0 on success and negative on failure. 5213 * returns 0 on success and negative on failure.
@@ -4572,7 +5218,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4572{ 5218{
4573 nic_t *sp; 5219 nic_t *sp;
4574 struct net_device *dev; 5220 struct net_device *dev;
4575 char *dev_name = "S2IO 10GE NIC";
4576 int i, j, ret; 5221 int i, j, ret;
4577 int dma_flag = FALSE; 5222 int dma_flag = FALSE;
4578 u32 mac_up, mac_down; 5223 u32 mac_up, mac_down;
@@ -4581,10 +5226,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4581 u16 subid; 5226 u16 subid;
4582 mac_info_t *mac_control; 5227 mac_info_t *mac_control;
4583 struct config_param *config; 5228 struct config_param *config;
5229 int mode;
4584 5230
4585 5231#ifdef CONFIG_S2IO_NAPI
4586 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n", 5232 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4587 s2io_driver_version); 5233#endif
4588 5234
4589 if ((ret = pci_enable_device(pdev))) { 5235 if ((ret = pci_enable_device(pdev))) {
4590 DBG_PRINT(ERR_DBG, 5236 DBG_PRINT(ERR_DBG,
@@ -4595,7 +5241,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4595 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 5241 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4596 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n"); 5242 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4597 dma_flag = TRUE; 5243 dma_flag = TRUE;
4598
4599 if (pci_set_consistent_dma_mask 5244 if (pci_set_consistent_dma_mask
4600 (pdev, DMA_64BIT_MASK)) { 5245 (pdev, DMA_64BIT_MASK)) {
4601 DBG_PRINT(ERR_DBG, 5246 DBG_PRINT(ERR_DBG,
@@ -4635,34 +5280,41 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4635 memset(sp, 0, sizeof(nic_t)); 5280 memset(sp, 0, sizeof(nic_t));
4636 sp->dev = dev; 5281 sp->dev = dev;
4637 sp->pdev = pdev; 5282 sp->pdev = pdev;
4638 sp->vendor_id = pdev->vendor;
4639 sp->device_id = pdev->device;
4640 sp->high_dma_flag = dma_flag; 5283 sp->high_dma_flag = dma_flag;
4641 sp->irq = pdev->irq;
4642 sp->device_enabled_once = FALSE; 5284 sp->device_enabled_once = FALSE;
4643 strcpy(sp->name, dev_name); 5285
5286 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5287 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5288 sp->device_type = XFRAME_II_DEVICE;
5289 else
5290 sp->device_type = XFRAME_I_DEVICE;
4644 5291
4645 /* Initialize some PCI/PCI-X fields of the NIC. */ 5292 /* Initialize some PCI/PCI-X fields of the NIC. */
4646 s2io_init_pci(sp); 5293 s2io_init_pci(sp);
4647 5294
4648 /* 5295 /*
4649 * Setting the device configuration parameters. 5296 * Setting the device configuration parameters.
4650 * Most of these parameters can be specified by the user during 5297 * Most of these parameters can be specified by the user during
4651 * module insertion as they are module loadable parameters. If 5298 * module insertion as they are module loadable parameters. If
4652 * these parameters are not not specified during load time, they 5299 * these parameters are not not specified during load time, they
4653 * are initialized with default values. 5300 * are initialized with default values.
4654 */ 5301 */
4655 mac_control = &sp->mac_control; 5302 mac_control = &sp->mac_control;
4656 config = &sp->config; 5303 config = &sp->config;
4657 5304
4658 /* Tx side parameters. */ 5305 /* Tx side parameters. */
4659 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */ 5306 if (tx_fifo_len[0] == 0)
5307 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4660 config->tx_fifo_num = tx_fifo_num; 5308 config->tx_fifo_num = tx_fifo_num;
4661 for (i = 0; i < MAX_TX_FIFOS; i++) { 5309 for (i = 0; i < MAX_TX_FIFOS; i++) {
4662 config->tx_cfg[i].fifo_len = tx_fifo_len[i]; 5310 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4663 config->tx_cfg[i].fifo_priority = i; 5311 config->tx_cfg[i].fifo_priority = i;
4664 } 5312 }
4665 5313
5314 /* mapping the QoS priority to the configured fifos */
5315 for (i = 0; i < MAX_TX_FIFOS; i++)
5316 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5317
4666 config->tx_intr_type = TXD_INT_TYPE_UTILZ; 5318 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4667 for (i = 0; i < config->tx_fifo_num; i++) { 5319 for (i = 0; i < config->tx_fifo_num; i++) {
4668 config->tx_cfg[i].f_no_snoop = 5320 config->tx_cfg[i].f_no_snoop =
@@ -4675,7 +5327,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4675 config->max_txds = MAX_SKB_FRAGS; 5327 config->max_txds = MAX_SKB_FRAGS;
4676 5328
4677 /* Rx side parameters. */ 5329 /* Rx side parameters. */
4678 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */ 5330 if (rx_ring_sz[0] == 0)
5331 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4679 config->rx_ring_num = rx_ring_num; 5332 config->rx_ring_num = rx_ring_num;
4680 for (i = 0; i < MAX_RX_RINGS; i++) { 5333 for (i = 0; i < MAX_RX_RINGS; i++) {
4681 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5334 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -4699,10 +5352,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4699 for (i = 0; i < config->rx_ring_num; i++) 5352 for (i = 0; i < config->rx_ring_num; i++)
4700 atomic_set(&sp->rx_bufs_left[i], 0); 5353 atomic_set(&sp->rx_bufs_left[i], 0);
4701 5354
5355 /* Initialize the number of ISRs currently running */
5356 atomic_set(&sp->isr_cnt, 0);
5357
4702 /* initialize the shared memory used by the NIC and the host */ 5358 /* initialize the shared memory used by the NIC and the host */
4703 if (init_shared_mem(sp)) { 5359 if (init_shared_mem(sp)) {
4704 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 5360 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4705 dev->name); 5361 __FUNCTION__);
4706 ret = -ENOMEM; 5362 ret = -ENOMEM;
4707 goto mem_alloc_failed; 5363 goto mem_alloc_failed;
4708 } 5364 }
@@ -4743,13 +5399,17 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4743 dev->do_ioctl = &s2io_ioctl; 5399 dev->do_ioctl = &s2io_ioctl;
4744 dev->change_mtu = &s2io_change_mtu; 5400 dev->change_mtu = &s2io_change_mtu;
4745 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 5401 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5402 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5403 dev->vlan_rx_register = s2io_vlan_rx_register;
5404 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5405
4746 /* 5406 /*
4747 * will use eth_mac_addr() for dev->set_mac_address 5407 * will use eth_mac_addr() for dev->set_mac_address
4748 * mac address will be set every time dev->open() is called 5408 * mac address will be set every time dev->open() is called
4749 */ 5409 */
4750#ifdef CONFIG_S2IO_NAPI 5410#if defined(CONFIG_S2IO_NAPI)
4751 dev->poll = s2io_poll; 5411 dev->poll = s2io_poll;
4752 dev->weight = 90; 5412 dev->weight = 32;
4753#endif 5413#endif
4754 5414
4755 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 5415 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
@@ -4766,7 +5426,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4766 INIT_WORK(&sp->set_link_task, 5426 INIT_WORK(&sp->set_link_task,
4767 (void (*)(void *)) s2io_set_link, sp); 5427 (void (*)(void *)) s2io_set_link, sp);
4768 5428
4769 pci_save_state(sp->pdev); 5429 if (!(sp->device_type & XFRAME_II_DEVICE)) {
5430 pci_save_state(sp->pdev);
5431 }
4770 5432
4771 /* Setting swapper control on the NIC, for proper reset operation */ 5433 /* Setting swapper control on the NIC, for proper reset operation */
4772 if (s2io_set_swapper(sp)) { 5434 if (s2io_set_swapper(sp)) {
@@ -4776,22 +5438,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4776 goto set_swap_failed; 5438 goto set_swap_failed;
4777 } 5439 }
4778 5440
4779 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */ 5441 /* Verify if the Herc works on the slot its placed into */
4780 fix_mac_address(sp); 5442 if (sp->device_type & XFRAME_II_DEVICE) {
4781 s2io_reset(sp); 5443 mode = s2io_verify_pci_mode(sp);
5444 if (mode < 0) {
5445 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5446 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5447 ret = -EBADSLT;
5448 goto set_swap_failed;
5449 }
5450 }
4782 5451
4783 /* 5452 /* Not needed for Herc */
4784 * Setting swapper control on the NIC, so the MAC address can be read. 5453 if (sp->device_type & XFRAME_I_DEVICE) {
4785 */ 5454 /*
4786 if (s2io_set_swapper(sp)) { 5455 * Fix for all "FFs" MAC address problems observed on
4787 DBG_PRINT(ERR_DBG, 5456 * Alpha platforms
4788 "%s: S2IO: swapper settings are wrong\n", 5457 */
4789 dev->name); 5458 fix_mac_address(sp);
4790 ret = -EAGAIN; 5459 s2io_reset(sp);
4791 goto set_swap_failed;
4792 } 5460 }
4793 5461
4794 /* 5462 /*
4795 * MAC address initialization. 5463 * MAC address initialization.
4796 * For now only one mac address will be read and used. 5464 * For now only one mac address will be read and used.
4797 */ 5465 */
@@ -4814,37 +5482,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4814 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16); 5482 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4815 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24); 5483 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4816 5484
4817 DBG_PRINT(INIT_DBG,
4818 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4819 sp->def_mac_addr[0].mac_addr[0],
4820 sp->def_mac_addr[0].mac_addr[1],
4821 sp->def_mac_addr[0].mac_addr[2],
4822 sp->def_mac_addr[0].mac_addr[3],
4823 sp->def_mac_addr[0].mac_addr[4],
4824 sp->def_mac_addr[0].mac_addr[5]);
4825
4826 /* Set the factory defined MAC address initially */ 5485 /* Set the factory defined MAC address initially */
4827 dev->addr_len = ETH_ALEN; 5486 dev->addr_len = ETH_ALEN;
4828 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 5487 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4829 5488
4830 /* 5489 /*
4831 * Initialize the tasklet status and link state flags 5490 * Initialize the tasklet status and link state flags
4832 * and the card statte parameter 5491 * and the card state parameter
4833 */ 5492 */
4834 atomic_set(&(sp->card_state), 0); 5493 atomic_set(&(sp->card_state), 0);
4835 sp->tasklet_status = 0; 5494 sp->tasklet_status = 0;
4836 sp->link_state = 0; 5495 sp->link_state = 0;
4837 5496
4838
4839 /* Initialize spinlocks */ 5497 /* Initialize spinlocks */
4840 spin_lock_init(&sp->tx_lock); 5498 spin_lock_init(&sp->tx_lock);
4841#ifndef CONFIG_S2IO_NAPI 5499#ifndef CONFIG_S2IO_NAPI
4842 spin_lock_init(&sp->put_lock); 5500 spin_lock_init(&sp->put_lock);
4843#endif 5501#endif
5502 spin_lock_init(&sp->rx_lock);
4844 5503
4845 /* 5504 /*
4846 * SXE-002: Configure link and activity LED to init state 5505 * SXE-002: Configure link and activity LED to init state
4847 * on driver load. 5506 * on driver load.
4848 */ 5507 */
4849 subid = sp->pdev->subsystem_device; 5508 subid = sp->pdev->subsystem_device;
4850 if ((subid & 0xFF) >= 0x07) { 5509 if ((subid & 0xFF) >= 0x07) {
@@ -4864,13 +5523,61 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4864 goto register_failed; 5523 goto register_failed;
4865 } 5524 }
4866 5525
4867 /* 5526 if (sp->device_type & XFRAME_II_DEVICE) {
4868 * Make Link state as off at this point, when the Link change 5527 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
4869 * interrupt comes the state will be automatically changed to 5528 dev->name);
5529 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5530 get_xena_rev_id(sp->pdev),
5531 s2io_driver_version);
5532 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5533 sp->def_mac_addr[0].mac_addr[0],
5534 sp->def_mac_addr[0].mac_addr[1],
5535 sp->def_mac_addr[0].mac_addr[2],
5536 sp->def_mac_addr[0].mac_addr[3],
5537 sp->def_mac_addr[0].mac_addr[4],
5538 sp->def_mac_addr[0].mac_addr[5]);
5539 mode = s2io_print_pci_mode(sp);
5540 if (mode < 0) {
5541 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5542 ret = -EBADSLT;
5543 goto set_swap_failed;
5544 }
5545 } else {
5546 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5547 dev->name);
5548 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5549 get_xena_rev_id(sp->pdev),
5550 s2io_driver_version);
5551 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5552 sp->def_mac_addr[0].mac_addr[0],
5553 sp->def_mac_addr[0].mac_addr[1],
5554 sp->def_mac_addr[0].mac_addr[2],
5555 sp->def_mac_addr[0].mac_addr[3],
5556 sp->def_mac_addr[0].mac_addr[4],
5557 sp->def_mac_addr[0].mac_addr[5]);
5558 }
5559
5560 /* Initialize device name */
5561 strcpy(sp->name, dev->name);
5562 if (sp->device_type & XFRAME_II_DEVICE)
5563 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5564 else
5565 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5566
5567 /* Initialize bimodal Interrupts */
5568 sp->config.bimodal = bimodal;
5569 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5570 sp->config.bimodal = 0;
5571 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5572 dev->name);
5573 }
5574
5575 /*
5576 * Make Link state as off at this point, when the Link change
5577 * interrupt comes the state will be automatically changed to
4870 * the right state. 5578 * the right state.
4871 */ 5579 */
4872 netif_carrier_off(dev); 5580 netif_carrier_off(dev);
4873 sp->last_link_state = LINK_DOWN;
4874 5581
4875 return 0; 5582 return 0;
4876 5583
@@ -4891,11 +5598,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4891} 5598}
4892 5599
4893/** 5600/**
4894 * s2io_rem_nic - Free the PCI device 5601 * s2io_rem_nic - Free the PCI device
4895 * @pdev: structure containing the PCI related information of the device. 5602 * @pdev: structure containing the PCI related information of the device.
4896 * Description: This function is called by the Pci subsystem to release a 5603 * Description: This function is called by the Pci subsystem to release a
4897 * PCI device and free up all resource held up by the device. This could 5604 * PCI device and free up all resource held up by the device. This could
4898 * be in response to a Hot plug event or when the driver is to be removed 5605 * be in response to a Hot plug event or when the driver is to be removed
4899 * from memory. 5606 * from memory.
4900 */ 5607 */
4901 5608
@@ -4919,7 +5626,6 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4919 pci_disable_device(pdev); 5626 pci_disable_device(pdev);
4920 pci_release_regions(pdev); 5627 pci_release_regions(pdev);
4921 pci_set_drvdata(pdev, NULL); 5628 pci_set_drvdata(pdev, NULL);
4922
4923 free_netdev(dev); 5629 free_netdev(dev);
4924} 5630}
4925 5631
@@ -4935,11 +5641,11 @@ int __init s2io_starter(void)
4935} 5641}
4936 5642
4937/** 5643/**
4938 * s2io_closer - Cleanup routine for the driver 5644 * s2io_closer - Cleanup routine for the driver
4939 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 5645 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4940 */ 5646 */
4941 5647
4942static void s2io_closer(void) 5648void s2io_closer(void)
4943{ 5649{
4944 pci_unregister_driver(&s2io_driver); 5650 pci_unregister_driver(&s2io_driver);
4945 DBG_PRINT(INIT_DBG, "cleanup done\n"); 5651 DBG_PRINT(INIT_DBG, "cleanup done\n");
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 1711c8c3dc99..5d9270730ca2 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,9 @@
31#define SUCCESS 0 31#define SUCCESS 0
32#define FAILURE -1 32#define FAILURE -1
33 33
34/* Maximum time to flicker LED when asked to identify NIC using ethtool */
35#define MAX_FLICKER_TIME 60000 /* 60 Secs */
36
34/* Maximum outstanding splits to be configured into xena. */ 37/* Maximum outstanding splits to be configured into xena. */
35typedef enum xena_max_outstanding_splits { 38typedef enum xena_max_outstanding_splits {
36 XENA_ONE_SPLIT_TRANSACTION = 0, 39 XENA_ONE_SPLIT_TRANSACTION = 0,
@@ -45,10 +48,10 @@ typedef enum xena_max_outstanding_splits {
45#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) 48#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
46 49
47/* OS concerned variables and constants */ 50/* OS concerned variables and constants */
48#define WATCH_DOG_TIMEOUT 5*HZ 51#define WATCH_DOG_TIMEOUT 15*HZ
49#define EFILL 0x1234 52#define EFILL 0x1234
50#define ALIGN_SIZE 127 53#define ALIGN_SIZE 127
51#define PCIX_COMMAND_REGISTER 0x62 54#define PCIX_COMMAND_REGISTER 0x62
52 55
53/* 56/*
54 * Debug related variables. 57 * Debug related variables.
@@ -61,7 +64,7 @@ typedef enum xena_max_outstanding_splits {
61#define INTR_DBG 4 64#define INTR_DBG 4
62 65
63/* Global variable that defines the present debug level of the driver. */ 66/* Global variable that defines the present debug level of the driver. */
64static int debug_level = ERR_DBG; /* Default level. */ 67int debug_level = ERR_DBG; /* Default level. */
65 68
66/* DEBUG message print. */ 69/* DEBUG message print. */
67#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) 70#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
@@ -71,6 +74,12 @@ static int debug_level = ERR_DBG; /* Default level. */
71#define L4_CKSUM_OK 0xFFFF 74#define L4_CKSUM_OK 0xFFFF
72#define S2IO_JUMBO_SIZE 9600 75#define S2IO_JUMBO_SIZE 9600
73 76
77/* Driver statistics maintained by driver */
78typedef struct {
79 unsigned long long single_ecc_errs;
80 unsigned long long double_ecc_errs;
81} swStat_t;
82
74/* The statistics block of Xena */ 83/* The statistics block of Xena */
75typedef struct stat_block { 84typedef struct stat_block {
76/* Tx MAC statistics counters. */ 85/* Tx MAC statistics counters. */
@@ -186,12 +195,90 @@ typedef struct stat_block {
186 u32 rxd_rd_cnt; 195 u32 rxd_rd_cnt;
187 u32 rxf_wr_cnt; 196 u32 rxf_wr_cnt;
188 u32 txf_rd_cnt; 197 u32 txf_rd_cnt;
198
199/* Tx MAC statistics overflow counters. */
200 u32 tmac_data_octets_oflow;
201 u32 tmac_frms_oflow;
202 u32 tmac_bcst_frms_oflow;
203 u32 tmac_mcst_frms_oflow;
204 u32 tmac_ucst_frms_oflow;
205 u32 tmac_ttl_octets_oflow;
206 u32 tmac_any_err_frms_oflow;
207 u32 tmac_nucst_frms_oflow;
208 u64 tmac_vlan_frms;
209 u32 tmac_drop_ip_oflow;
210 u32 tmac_vld_ip_oflow;
211 u32 tmac_rst_tcp_oflow;
212 u32 tmac_icmp_oflow;
213 u32 tpa_unknown_protocol;
214 u32 tmac_udp_oflow;
215 u32 reserved_10;
216 u32 tpa_parse_failure;
217
218/* Rx MAC Statistics overflow counters. */
219 u32 rmac_data_octets_oflow;
220 u32 rmac_vld_frms_oflow;
221 u32 rmac_vld_bcst_frms_oflow;
222 u32 rmac_vld_mcst_frms_oflow;
223 u32 rmac_accepted_ucst_frms_oflow;
224 u32 rmac_ttl_octets_oflow;
225 u32 rmac_discarded_frms_oflow;
226 u32 rmac_accepted_nucst_frms_oflow;
227 u32 rmac_usized_frms_oflow;
228 u32 rmac_drop_events_oflow;
229 u32 rmac_frag_frms_oflow;
230 u32 rmac_osized_frms_oflow;
231 u32 rmac_ip_oflow;
232 u32 rmac_jabber_frms_oflow;
233 u32 rmac_icmp_oflow;
234 u32 rmac_drop_ip_oflow;
235 u32 rmac_err_drp_udp_oflow;
236 u32 rmac_udp_oflow;
237 u32 reserved_11;
238 u32 rmac_pause_cnt_oflow;
239 u64 rmac_ttl_1519_4095_frms;
240 u64 rmac_ttl_4096_8191_frms;
241 u64 rmac_ttl_8192_max_frms;
242 u64 rmac_ttl_gt_max_frms;
243 u64 rmac_osized_alt_frms;
244 u64 rmac_jabber_alt_frms;
245 u64 rmac_gt_max_alt_frms;
246 u64 rmac_vlan_frms;
247 u32 rmac_len_discard;
248 u32 rmac_fcs_discard;
249 u32 rmac_pf_discard;
250 u32 rmac_da_discard;
251 u32 rmac_red_discard;
252 u32 rmac_rts_discard;
253 u32 reserved_12;
254 u32 rmac_ingm_full_discard;
255 u32 reserved_13;
256 u32 rmac_accepted_ip_oflow;
257 u32 reserved_14;
258 u32 link_fault_cnt;
259 swStat_t sw_stat;
189} StatInfo_t; 260} StatInfo_t;
190 261
191/* Structures representing different init time configuration 262/*
263 * Structures representing different init time configuration
192 * parameters of the NIC. 264 * parameters of the NIC.
193 */ 265 */
194 266
267#define MAX_TX_FIFOS 8
268#define MAX_RX_RINGS 8
269
270/* FIFO mappings for all possible number of fifos configured */
271int fifo_map[][MAX_TX_FIFOS] = {
272 {0, 0, 0, 0, 0, 0, 0, 0},
273 {0, 0, 0, 0, 1, 1, 1, 1},
274 {0, 0, 0, 1, 1, 1, 2, 2},
275 {0, 0, 1, 1, 2, 2, 3, 3},
276 {0, 0, 1, 1, 2, 2, 3, 4},
277 {0, 0, 1, 1, 2, 3, 4, 5},
278 {0, 0, 1, 2, 3, 4, 5, 6},
279 {0, 1, 2, 3, 4, 5, 6, 7},
280};
281
195/* Maintains Per FIFO related information. */ 282/* Maintains Per FIFO related information. */
196typedef struct tx_fifo_config { 283typedef struct tx_fifo_config {
197#define MAX_AVAILABLE_TXDS 8192 284#define MAX_AVAILABLE_TXDS 8192
@@ -237,14 +324,14 @@ typedef struct rx_ring_config {
237#define NO_SNOOP_RXD_BUFFER 0x02 324#define NO_SNOOP_RXD_BUFFER 0x02
238} rx_ring_config_t; 325} rx_ring_config_t;
239 326
240/* This structure provides contains values of the tunable parameters 327/* This structure provides contains values of the tunable parameters
241 * of the H/W 328 * of the H/W
242 */ 329 */
243struct config_param { 330struct config_param {
244/* Tx Side */ 331/* Tx Side */
245 u32 tx_fifo_num; /*Number of Tx FIFOs */ 332 u32 tx_fifo_num; /*Number of Tx FIFOs */
246#define MAX_TX_FIFOS 8
247 333
334 u8 fifo_mapping[MAX_TX_FIFOS];
248 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ 335 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
249 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ 336 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
250 u64 tx_intr_type; 337 u64 tx_intr_type;
@@ -252,10 +339,10 @@ struct config_param {
252 339
253/* Rx Side */ 340/* Rx Side */
254 u32 rx_ring_num; /*Number of receive rings */ 341 u32 rx_ring_num; /*Number of receive rings */
255#define MAX_RX_RINGS 8
256#define MAX_RX_BLOCKS_PER_RING 150 342#define MAX_RX_BLOCKS_PER_RING 150
257 343
258 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ 344 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
345 u8 bimodal; /*Flag for setting bimodal interrupts*/
259 346
260#define HEADER_ETHERNET_II_802_3_SIZE 14 347#define HEADER_ETHERNET_II_802_3_SIZE 14
261#define HEADER_802_2_SIZE 3 348#define HEADER_802_2_SIZE 3
@@ -269,6 +356,7 @@ struct config_param {
269#define MAX_PYLD_JUMBO 9600 356#define MAX_PYLD_JUMBO 9600
270#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18) 357#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
271#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22) 358#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
359 u16 bus_speed;
272}; 360};
273 361
274/* Structure representing MAC Addrs */ 362/* Structure representing MAC Addrs */
@@ -277,7 +365,7 @@ typedef struct mac_addr {
277} macaddr_t; 365} macaddr_t;
278 366
279/* Structure that represent every FIFO element in the BAR1 367/* Structure that represent every FIFO element in the BAR1
280 * Address location. 368 * Address location.
281 */ 369 */
282typedef struct _TxFIFO_element { 370typedef struct _TxFIFO_element {
283 u64 TxDL_Pointer; 371 u64 TxDL_Pointer;
@@ -339,6 +427,7 @@ typedef struct _RxD_t {
339#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8) 427#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
340#define RXD_FRAME_PROTO_IPV4 BIT(27) 428#define RXD_FRAME_PROTO_IPV4 BIT(27)
341#define RXD_FRAME_PROTO_IPV6 BIT(28) 429#define RXD_FRAME_PROTO_IPV6 BIT(28)
430#define RXD_FRAME_IP_FRAG BIT(29)
342#define RXD_FRAME_PROTO_TCP BIT(30) 431#define RXD_FRAME_PROTO_TCP BIT(30)
343#define RXD_FRAME_PROTO_UDP BIT(31) 432#define RXD_FRAME_PROTO_UDP BIT(31)
344#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP) 433#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
@@ -346,11 +435,15 @@ typedef struct _RxD_t {
346#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF) 435#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
347 436
348 u64 Control_2; 437 u64 Control_2;
438#define THE_RXD_MARK 0x3
439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
441
349#ifndef CONFIG_2BUFF_MODE 442#ifndef CONFIG_2BUFF_MODE
350#define MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16) 443#define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14)
351#define SET_BUFFER0_SIZE(val) vBIT(val,0,16) 444#define SET_BUFFER0_SIZE(val) vBIT(val,2,14)
352#else 445#else
353#define MASK_BUFFER0_SIZE vBIT(0xFF,0,16) 446#define MASK_BUFFER0_SIZE vBIT(0xFF,2,14)
354#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) 447#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
355#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) 448#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
356#define SET_BUFFER0_SIZE(val) vBIT(val,8,8) 449#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
@@ -363,7 +456,7 @@ typedef struct _RxD_t {
363#define SET_NUM_TAG(val) vBIT(val,16,32) 456#define SET_NUM_TAG(val) vBIT(val,16,32)
364 457
365#ifndef CONFIG_2BUFF_MODE 458#ifndef CONFIG_2BUFF_MODE
366#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0xFFFF,0,16))) 459#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14)))
367#else 460#else
368#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \ 461#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \
369 >> 48) 462 >> 48)
@@ -382,7 +475,7 @@ typedef struct _RxD_t {
382#endif 475#endif
383} RxD_t; 476} RxD_t;
384 477
385/* Structure that represents the Rx descriptor block which contains 478/* Structure that represents the Rx descriptor block which contains
386 * 128 Rx descriptors. 479 * 128 Rx descriptors.
387 */ 480 */
388#ifndef CONFIG_2BUFF_MODE 481#ifndef CONFIG_2BUFF_MODE
@@ -392,11 +485,11 @@ typedef struct _RxD_block {
392 485
393 u64 reserved_0; 486 u64 reserved_0;
394#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 487#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
395 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last 488 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
396 * Rxd in this blk */ 489 * Rxd in this blk */
397 u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */ 490 u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
398 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch 491 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
399 * the upper 32 bits should 492 * the upper 32 bits should
400 * be 0 */ 493 * be 0 */
401} RxD_block_t; 494} RxD_block_t;
402#else 495#else
@@ -405,13 +498,13 @@ typedef struct _RxD_block {
405 RxD_t rxd[MAX_RXDS_PER_BLOCK]; 498 RxD_t rxd[MAX_RXDS_PER_BLOCK];
406 499
407#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 500#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
408 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd 501 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
409 * in this blk */ 502 * in this blk */
410 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */ 503 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
411} RxD_block_t; 504} RxD_block_t;
412#define SIZE_OF_BLOCK 4096 505#define SIZE_OF_BLOCK 4096
413 506
414/* Structure to hold virtual addresses of Buf0 and Buf1 in 507/* Structure to hold virtual addresses of Buf0 and Buf1 in
415 * 2buf mode. */ 508 * 2buf mode. */
416typedef struct bufAdd { 509typedef struct bufAdd {
417 void *ba_0_org; 510 void *ba_0_org;
@@ -423,8 +516,8 @@ typedef struct bufAdd {
423 516
424/* Structure which stores all the MAC control parameters */ 517/* Structure which stores all the MAC control parameters */
425 518
426/* This structure stores the offset of the RxD in the ring 519/* This structure stores the offset of the RxD in the ring
427 * from which the Rx Interrupt processor can start picking 520 * from which the Rx Interrupt processor can start picking
428 * up the RxDs for processing. 521 * up the RxDs for processing.
429 */ 522 */
430typedef struct _rx_curr_get_info_t { 523typedef struct _rx_curr_get_info_t {
@@ -436,7 +529,7 @@ typedef struct _rx_curr_get_info_t {
436typedef rx_curr_get_info_t rx_curr_put_info_t; 529typedef rx_curr_get_info_t rx_curr_put_info_t;
437 530
438/* This structure stores the offset of the TxDl in the FIFO 531/* This structure stores the offset of the TxDl in the FIFO
439 * from which the Tx Interrupt processor can start picking 532 * from which the Tx Interrupt processor can start picking
440 * up the TxDLs for send complete interrupt processing. 533 * up the TxDLs for send complete interrupt processing.
441 */ 534 */
442typedef struct { 535typedef struct {
@@ -446,32 +539,96 @@ typedef struct {
446 539
447typedef tx_curr_get_info_t tx_curr_put_info_t; 540typedef tx_curr_get_info_t tx_curr_put_info_t;
448 541
449/* Infomation related to the Tx and Rx FIFOs and Rings of Xena 542/* Structure that holds the Phy and virt addresses of the Blocks */
450 * is maintained in this structure. 543typedef struct rx_block_info {
451 */ 544 RxD_t *block_virt_addr;
452typedef struct mac_info { 545 dma_addr_t block_dma_addr;
453/* rx side stuff */ 546} rx_block_info_t;
454 /* Put pointer info which indictes which RxD has to be replenished 547
548/* pre declaration of the nic structure */
549typedef struct s2io_nic nic_t;
550
551/* Ring specific structure */
552typedef struct ring_info {
553 /* The ring number */
554 int ring_no;
555
556 /*
557 * Place holders for the virtual and physical addresses of
558 * all the Rx Blocks
559 */
560 rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING];
561 int block_count;
562 int pkt_cnt;
563
564 /*
565 * Put pointer info which indictes which RxD has to be replenished
455 * with a new buffer. 566 * with a new buffer.
456 */ 567 */
457 rx_curr_put_info_t rx_curr_put_info[MAX_RX_RINGS]; 568 rx_curr_put_info_t rx_curr_put_info;
458 569
459 /* Get pointer info which indictes which is the last RxD that was 570 /*
571 * Get pointer info which indictes which is the last RxD that was
460 * processed by the driver. 572 * processed by the driver.
461 */ 573 */
462 rx_curr_get_info_t rx_curr_get_info[MAX_RX_RINGS]; 574 rx_curr_get_info_t rx_curr_get_info;
463 575
464 u16 rmac_pause_time; 576#ifndef CONFIG_S2IO_NAPI
465 u16 mc_pause_threshold_q0q3; 577 /* Index to the absolute position of the put pointer of Rx ring */
466 u16 mc_pause_threshold_q4q7; 578 int put_pos;
579#endif
580
581#ifdef CONFIG_2BUFF_MODE
582 /* Buffer Address store. */
583 buffAdd_t **ba;
584#endif
585 nic_t *nic;
586} ring_info_t;
467 587
588/* Fifo specific structure */
589typedef struct fifo_info {
590 /* FIFO number */
591 int fifo_no;
592
593 /* Maximum TxDs per TxDL */
594 int max_txds;
595
596 /* Place holder of all the TX List's Phy and Virt addresses. */
597 list_info_hold_t *list_info;
598
599 /*
600 * Current offset within the tx FIFO where driver would write
601 * new Tx frame
602 */
603 tx_curr_put_info_t tx_curr_put_info;
604
605 /*
606 * Current offset within tx FIFO from where the driver would start freeing
607 * the buffers
608 */
609 tx_curr_get_info_t tx_curr_get_info;
610
611 nic_t *nic;
612}fifo_info_t;
613
614/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
615 * is maintained in this structure.
616 */
617typedef struct mac_info {
468/* tx side stuff */ 618/* tx side stuff */
469 /* logical pointer of start of each Tx FIFO */ 619 /* logical pointer of start of each Tx FIFO */
470 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS]; 620 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS];
471 621
472/* Current offset within tx_FIFO_start, where driver would write new Tx frame*/ 622 /* Fifo specific structure */
473 tx_curr_put_info_t tx_curr_put_info[MAX_TX_FIFOS]; 623 fifo_info_t fifos[MAX_TX_FIFOS];
474 tx_curr_get_info_t tx_curr_get_info[MAX_TX_FIFOS]; 624
625/* rx side stuff */
626 /* Ring specific structure */
627 ring_info_t rings[MAX_RX_RINGS];
628
629 u16 rmac_pause_time;
630 u16 mc_pause_threshold_q0q3;
631 u16 mc_pause_threshold_q4q7;
475 632
476 void *stats_mem; /* orignal pointer to allocated mem */ 633 void *stats_mem; /* orignal pointer to allocated mem */
477 dma_addr_t stats_mem_phy; /* Physical address of the stat block */ 634 dma_addr_t stats_mem_phy; /* Physical address of the stat block */
@@ -485,12 +642,6 @@ typedef struct {
485 int usage_cnt; 642 int usage_cnt;
486} usr_addr_t; 643} usr_addr_t;
487 644
488/* Structure that holds the Phy and virt addresses of the Blocks */
489typedef struct rx_block_info {
490 RxD_t *block_virt_addr;
491 dma_addr_t block_dma_addr;
492} rx_block_info_t;
493
494/* Default Tunable parameters of the NIC. */ 645/* Default Tunable parameters of the NIC. */
495#define DEFAULT_FIFO_LEN 4096 646#define DEFAULT_FIFO_LEN 4096
496#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1) 647#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
@@ -499,7 +650,20 @@ typedef struct rx_block_info {
499#define LARGE_BLK_CNT 100 650#define LARGE_BLK_CNT 100
500 651
501/* Structure representing one instance of the NIC */ 652/* Structure representing one instance of the NIC */
502typedef struct s2io_nic { 653struct s2io_nic {
654#ifdef CONFIG_S2IO_NAPI
655 /*
656 * Count of packets to be processed in a given iteration, it will be indicated
657 * by the quota field of the device structure when NAPI is enabled.
658 */
659 int pkts_to_process;
660#endif
661 struct net_device *dev;
662 mac_info_t mac_control;
663 struct config_param config;
664 struct pci_dev *pdev;
665 void __iomem *bar0;
666 void __iomem *bar1;
503#define MAX_MAC_SUPPORTED 16 667#define MAX_MAC_SUPPORTED 16
504#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED 668#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
505 669
@@ -507,33 +671,20 @@ typedef struct s2io_nic {
507 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED]; 671 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
508 672
509 struct net_device_stats stats; 673 struct net_device_stats stats;
510 void __iomem *bar0;
511 void __iomem *bar1;
512 struct config_param config;
513 mac_info_t mac_control;
514 int high_dma_flag; 674 int high_dma_flag;
515 int device_close_flag; 675 int device_close_flag;
516 int device_enabled_once; 676 int device_enabled_once;
517 677
518 char name[32]; 678 char name[50];
519 struct tasklet_struct task; 679 struct tasklet_struct task;
520 volatile unsigned long tasklet_status; 680 volatile unsigned long tasklet_status;
521 struct timer_list timer;
522 struct net_device *dev;
523 struct pci_dev *pdev;
524 681
525 u16 vendor_id; 682 /* Timer that handles I/O errors/exceptions */
526 u16 device_id; 683 struct timer_list alarm_timer;
527 u16 ccmd; 684
528 u32 cbar0_1; 685 /* Space to back up the PCI config space */
529 u32 cbar0_2; 686 u32 config_space[256 / sizeof(u32)];
530 u32 cbar1_1; 687
531 u32 cbar1_2;
532 u32 cirq;
533 u8 cache_line;
534 u32 rom_expansion;
535 u16 pcix_cmd;
536 u32 irq;
537 atomic_t rx_bufs_left[MAX_RX_RINGS]; 688 atomic_t rx_bufs_left[MAX_RX_RINGS];
538 689
539 spinlock_t tx_lock; 690 spinlock_t tx_lock;
@@ -558,27 +709,11 @@ typedef struct s2io_nic {
558 u16 tx_err_count; 709 u16 tx_err_count;
559 u16 rx_err_count; 710 u16 rx_err_count;
560 711
561#ifndef CONFIG_S2IO_NAPI
562 /* Index to the absolute position of the put pointer of Rx ring. */
563 int put_pos[MAX_RX_RINGS];
564#endif
565
566 /*
567 * Place holders for the virtual and physical addresses of
568 * all the Rx Blocks
569 */
570 rx_block_info_t rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
571 int block_count[MAX_RX_RINGS];
572 int pkt_cnt[MAX_RX_RINGS];
573
574 /* Place holder of all the TX List's Phy and Virt addresses. */
575 list_info_hold_t *list_info[MAX_TX_FIFOS];
576
577 /* Id timer, used to blink NIC to physically identify NIC. */ 712 /* Id timer, used to blink NIC to physically identify NIC. */
578 struct timer_list id_timer; 713 struct timer_list id_timer;
579 714
580 /* Restart timer, used to restart NIC if the device is stuck and 715 /* Restart timer, used to restart NIC if the device is stuck and
581 * a schedule task that will set the correct Link state once the 716 * a schedule task that will set the correct Link state once the
582 * NIC's PHY has stabilized after a state change. 717 * NIC's PHY has stabilized after a state change.
583 */ 718 */
584#ifdef INIT_TQUEUE 719#ifdef INIT_TQUEUE
@@ -589,12 +724,12 @@ typedef struct s2io_nic {
589 struct work_struct set_link_task; 724 struct work_struct set_link_task;
590#endif 725#endif
591 726
592 /* Flag that can be used to turn on or turn off the Rx checksum 727 /* Flag that can be used to turn on or turn off the Rx checksum
593 * offload feature. 728 * offload feature.
594 */ 729 */
595 int rx_csum; 730 int rx_csum;
596 731
597 /* after blink, the adapter must be restored with original 732 /* after blink, the adapter must be restored with original
598 * values. 733 * values.
599 */ 734 */
600 u64 adapt_ctrl_org; 735 u64 adapt_ctrl_org;
@@ -604,16 +739,19 @@ typedef struct s2io_nic {
604#define LINK_DOWN 1 739#define LINK_DOWN 1
605#define LINK_UP 2 740#define LINK_UP 2
606 741
607#ifdef CONFIG_2BUFF_MODE
608 /* Buffer Address store. */
609 buffAdd_t **ba[MAX_RX_RINGS];
610#endif
611 int task_flag; 742 int task_flag;
612#define CARD_DOWN 1 743#define CARD_DOWN 1
613#define CARD_UP 2 744#define CARD_UP 2
614 atomic_t card_state; 745 atomic_t card_state;
615 volatile unsigned long link_state; 746 volatile unsigned long link_state;
616} nic_t; 747 struct vlan_group *vlgrp;
748#define XFRAME_I_DEVICE 1
749#define XFRAME_II_DEVICE 2
750 u8 device_type;
751
752 spinlock_t rx_lock;
753 atomic_t isr_cnt;
754};
617 755
618#define RESET_ERROR 1; 756#define RESET_ERROR 1;
619#define CMD_ERROR 2; 757#define CMD_ERROR 2;
@@ -622,9 +760,10 @@ typedef struct s2io_nic {
622#ifndef readq 760#ifndef readq
623static inline u64 readq(void __iomem *addr) 761static inline u64 readq(void __iomem *addr)
624{ 762{
625 u64 ret = readl(addr + 4); 763 u64 ret = 0;
626 ret <<= 32; 764 ret = readl(addr + 4);
627 ret |= readl(addr); 765 (u64) ret <<= 32;
766 (u64) ret |= readl(addr);
628 767
629 return ret; 768 return ret;
630} 769}
@@ -637,10 +776,10 @@ static inline void writeq(u64 val, void __iomem *addr)
637 writel((u32) (val >> 32), (addr + 4)); 776 writel((u32) (val >> 32), (addr + 4));
638} 777}
639 778
640/* In 32 bit modes, some registers have to be written in a 779/* In 32 bit modes, some registers have to be written in a
641 * particular order to expect correct hardware operation. The 780 * particular order to expect correct hardware operation. The
642 * macro SPECIAL_REG_WRITE is used to perform such ordered 781 * macro SPECIAL_REG_WRITE is used to perform such ordered
643 * writes. Defines UF (Upper First) and LF (Lower First) will 782 * writes. Defines UF (Upper First) and LF (Lower First) will
644 * be used to specify the required write order. 783 * be used to specify the required write order.
645 */ 784 */
646#define UF 1 785#define UF 1
@@ -716,6 +855,7 @@ static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
716#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate 855#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
717 PCC_FB_ECC Error. */ 856 PCC_FB_ECC Error. */
718 857
858#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
719/* 859/*
720 * Prototype declaration. 860 * Prototype declaration.
721 */ 861 */
@@ -725,36 +865,30 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
725static int init_shared_mem(struct s2io_nic *sp); 865static int init_shared_mem(struct s2io_nic *sp);
726static void free_shared_mem(struct s2io_nic *sp); 866static void free_shared_mem(struct s2io_nic *sp);
727static int init_nic(struct s2io_nic *nic); 867static int init_nic(struct s2io_nic *nic);
728#ifndef CONFIG_S2IO_NAPI 868static void rx_intr_handler(ring_info_t *ring_data);
729static void rx_intr_handler(struct s2io_nic *sp); 869static void tx_intr_handler(fifo_info_t *fifo_data);
730#endif
731static void tx_intr_handler(struct s2io_nic *sp);
732static void alarm_intr_handler(struct s2io_nic *sp); 870static void alarm_intr_handler(struct s2io_nic *sp);
733 871
734static int s2io_starter(void); 872static int s2io_starter(void);
735static void s2io_closer(void); 873void s2io_closer(void);
736static void s2io_tx_watchdog(struct net_device *dev); 874static void s2io_tx_watchdog(struct net_device *dev);
737static void s2io_tasklet(unsigned long dev_addr); 875static void s2io_tasklet(unsigned long dev_addr);
738static void s2io_set_multicast(struct net_device *dev); 876static void s2io_set_multicast(struct net_device *dev);
739#ifndef CONFIG_2BUFF_MODE 877static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp);
740static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no); 878void s2io_link(nic_t * sp, int link);
741#else 879void s2io_reset(nic_t * sp);
742static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no, 880#if defined(CONFIG_S2IO_NAPI)
743 buffAdd_t * ba);
744#endif
745static void s2io_link(nic_t * sp, int link);
746static void s2io_reset(nic_t * sp);
747#ifdef CONFIG_S2IO_NAPI
748static int s2io_poll(struct net_device *dev, int *budget); 881static int s2io_poll(struct net_device *dev, int *budget);
749#endif 882#endif
750static void s2io_init_pci(nic_t * sp); 883static void s2io_init_pci(nic_t * sp);
751static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 884int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
885static void s2io_alarm_handle(unsigned long data);
752static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); 886static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
753static int verify_xena_quiescence(u64 val64, int flag); 887static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
754static struct ethtool_ops netdev_ethtool_ops; 888static struct ethtool_ops netdev_ethtool_ops;
755static void s2io_set_link(unsigned long data); 889static void s2io_set_link(unsigned long data);
756static int s2io_set_swapper(nic_t * sp); 890int s2io_set_swapper(nic_t * sp);
757static void s2io_card_down(nic_t * nic); 891static void s2io_card_down(nic_t *nic);
758static int s2io_card_up(nic_t * nic); 892static int s2io_card_up(nic_t *nic);
759 893int get_xena_rev_id(struct pci_dev *pdev);
760#endif /* _S2IO_H */ 894#endif /* _S2IO_H */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index f15739481d62..48a43b84ea5f 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.8" 45#define DRV_VERSION "0.9"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -189,7 +189,7 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
189{ 189{
190 u32 supported; 190 u32 supported;
191 191
192 if (iscopper(hw)) { 192 if (hw->copper) {
193 supported = SUPPORTED_10baseT_Half 193 supported = SUPPORTED_10baseT_Half
194 | SUPPORTED_10baseT_Full 194 | SUPPORTED_10baseT_Full
195 | SUPPORTED_100baseT_Half 195 | SUPPORTED_100baseT_Half
@@ -222,7 +222,7 @@ static int skge_get_settings(struct net_device *dev,
222 ecmd->transceiver = XCVR_INTERNAL; 222 ecmd->transceiver = XCVR_INTERNAL;
223 ecmd->supported = skge_supported_modes(hw); 223 ecmd->supported = skge_supported_modes(hw);
224 224
225 if (iscopper(hw)) { 225 if (hw->copper) {
226 ecmd->port = PORT_TP; 226 ecmd->port = PORT_TP;
227 ecmd->phy_address = hw->phy_addr; 227 ecmd->phy_address = hw->phy_addr;
228 } else 228 } else
@@ -876,6 +876,9 @@ static int skge_rx_fill(struct skge_port *skge)
876 876
877static void skge_link_up(struct skge_port *skge) 877static void skge_link_up(struct skge_port *skge)
878{ 878{
879 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
880 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
881
879 netif_carrier_on(skge->netdev); 882 netif_carrier_on(skge->netdev);
880 if (skge->tx_avail > MAX_SKB_FRAGS + 1) 883 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
881 netif_wake_queue(skge->netdev); 884 netif_wake_queue(skge->netdev);
@@ -894,6 +897,7 @@ static void skge_link_up(struct skge_port *skge)
894 897
895static void skge_link_down(struct skge_port *skge) 898static void skge_link_down(struct skge_port *skge)
896{ 899{
900 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
897 netif_carrier_off(skge->netdev); 901 netif_carrier_off(skge->netdev);
898 netif_stop_queue(skge->netdev); 902 netif_stop_queue(skge->netdev);
899 903
@@ -1599,7 +1603,7 @@ static void yukon_init(struct skge_hw *hw, int port)
1599 adv = PHY_AN_CSMA; 1603 adv = PHY_AN_CSMA;
1600 1604
1601 if (skge->autoneg == AUTONEG_ENABLE) { 1605 if (skge->autoneg == AUTONEG_ENABLE) {
1602 if (iscopper(hw)) { 1606 if (hw->copper) {
1603 if (skge->advertising & ADVERTISED_1000baseT_Full) 1607 if (skge->advertising & ADVERTISED_1000baseT_Full)
1604 ct1000 |= PHY_M_1000C_AFD; 1608 ct1000 |= PHY_M_1000C_AFD;
1605 if (skge->advertising & ADVERTISED_1000baseT_Half) 1609 if (skge->advertising & ADVERTISED_1000baseT_Half)
@@ -1691,7 +1695,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1691 /* Set hardware config mode */ 1695 /* Set hardware config mode */
1692 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 1696 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1693 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; 1697 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1694 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 1698 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1695 1699
1696 /* Clear GMC reset */ 1700 /* Clear GMC reset */
1697 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1701 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
@@ -1780,7 +1784,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1780 reg &= ~GMF_RX_F_FL_ON; 1784 reg &= ~GMF_RX_F_FL_ON;
1781 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1785 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1782 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1786 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1783 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); 1787 /*
1788 * because Pause Packet Truncation in GMAC is not working
1789 * we have to increase the Flush Threshold to 64 bytes
1790 * in order to flush pause packets in Rx FIFO on Yukon-1
1791 */
1792 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1784 1793
1785 /* Configure Tx MAC FIFO */ 1794 /* Configure Tx MAC FIFO */
1786 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 1795 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
@@ -2670,18 +2679,6 @@ static void skge_error_irq(struct skge_hw *hw)
2670 /* Timestamp (unused) overflow */ 2679 /* Timestamp (unused) overflow */
2671 if (hwstatus & IS_IRQ_TIST_OV) 2680 if (hwstatus & IS_IRQ_TIST_OV)
2672 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2681 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2673
2674 if (hwstatus & IS_IRQ_SENSOR) {
2675 /* no sensors on 32-bit Yukon */
2676 if (!(skge_read16(hw, B0_CTST) & CS_BUS_SLOT_SZ)) {
2677 printk(KERN_ERR PFX "ignoring bogus sensor interrups\n");
2678 skge_write32(hw, B0_HWE_IMSK,
2679 IS_ERR_MSK & ~IS_IRQ_SENSOR);
2680 } else
2681 printk(KERN_WARNING PFX "sensor interrupt\n");
2682 }
2683
2684
2685 } 2682 }
2686 2683
2687 if (hwstatus & IS_RAM_RD_PAR) { 2684 if (hwstatus & IS_RAM_RD_PAR) {
@@ -2712,9 +2709,10 @@ static void skge_error_irq(struct skge_hw *hw)
2712 2709
2713 skge_pci_clear(hw); 2710 skge_pci_clear(hw);
2714 2711
2712 /* if error still set then just ignore it */
2715 hwstatus = skge_read32(hw, B0_HWE_ISRC); 2713 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2716 if (hwstatus & IS_IRQ_STAT) { 2714 if (hwstatus & IS_IRQ_STAT) {
2717 printk(KERN_WARNING PFX "IRQ status %x: still set ignoring hardware errors\n", 2715 pr_debug("IRQ status %x: still set ignoring hardware errors\n",
2718 hwstatus); 2716 hwstatus);
2719 hw->intr_mask &= ~IS_HW_ERR; 2717 hw->intr_mask &= ~IS_HW_ERR;
2720 } 2718 }
@@ -2876,7 +2874,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
2876static int skge_reset(struct skge_hw *hw) 2874static int skge_reset(struct skge_hw *hw)
2877{ 2875{
2878 u16 ctst; 2876 u16 ctst;
2879 u8 t8, mac_cfg; 2877 u8 t8, mac_cfg, pmd_type, phy_type;
2880 int i; 2878 int i;
2881 2879
2882 ctst = skge_read16(hw, B0_CTST); 2880 ctst = skge_read16(hw, B0_CTST);
@@ -2895,18 +2893,19 @@ static int skge_reset(struct skge_hw *hw)
2895 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); 2893 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2896 2894
2897 hw->chip_id = skge_read8(hw, B2_CHIP_ID); 2895 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2898 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 2896 phy_type = skge_read8(hw, B2_E_1) & 0xf;
2899 hw->pmd_type = skge_read8(hw, B2_PMD_TYP); 2897 pmd_type = skge_read8(hw, B2_PMD_TYP);
2898 hw->copper = (pmd_type == 'T' || pmd_type == '1');
2900 2899
2901 switch (hw->chip_id) { 2900 switch (hw->chip_id) {
2902 case CHIP_ID_GENESIS: 2901 case CHIP_ID_GENESIS:
2903 switch (hw->phy_type) { 2902 switch (phy_type) {
2904 case SK_PHY_BCOM: 2903 case SK_PHY_BCOM:
2905 hw->phy_addr = PHY_ADDR_BCOM; 2904 hw->phy_addr = PHY_ADDR_BCOM;
2906 break; 2905 break;
2907 default: 2906 default:
2908 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n", 2907 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
2909 pci_name(hw->pdev), hw->phy_type); 2908 pci_name(hw->pdev), phy_type);
2910 return -EOPNOTSUPP; 2909 return -EOPNOTSUPP;
2911 } 2910 }
2912 break; 2911 break;
@@ -2914,13 +2913,10 @@ static int skge_reset(struct skge_hw *hw)
2914 case CHIP_ID_YUKON: 2913 case CHIP_ID_YUKON:
2915 case CHIP_ID_YUKON_LITE: 2914 case CHIP_ID_YUKON_LITE:
2916 case CHIP_ID_YUKON_LP: 2915 case CHIP_ID_YUKON_LP:
2917 if (hw->phy_type < SK_PHY_MARV_COPPER && hw->pmd_type != 'S') 2916 if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2918 hw->phy_type = SK_PHY_MARV_COPPER; 2917 hw->copper = 1;
2919 2918
2920 hw->phy_addr = PHY_ADDR_MARV; 2919 hw->phy_addr = PHY_ADDR_MARV;
2921 if (!iscopper(hw))
2922 hw->phy_type = SK_PHY_MARV_FIBER;
2923
2924 break; 2920 break;
2925 2921
2926 default: 2922 default:
@@ -2948,12 +2944,20 @@ static int skge_reset(struct skge_hw *hw)
2948 else 2944 else
2949 hw->ram_size = t8 * 4096; 2945 hw->ram_size = t8 * 4096;
2950 2946
2947 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
2951 if (hw->chip_id == CHIP_ID_GENESIS) 2948 if (hw->chip_id == CHIP_ID_GENESIS)
2952 genesis_init(hw); 2949 genesis_init(hw);
2953 else { 2950 else {
2954 /* switch power to VCC (WA for VAUX problem) */ 2951 /* switch power to VCC (WA for VAUX problem) */
2955 skge_write8(hw, B0_POWER_CTRL, 2952 skge_write8(hw, B0_POWER_CTRL,
2956 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 2953 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
2954 /* avoid boards with stuck Hardware error bits */
2955 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2956 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
2957 printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
2958 hw->intr_mask &= ~IS_HW_ERR;
2959 }
2960
2957 for (i = 0; i < hw->ports; i++) { 2961 for (i = 0; i < hw->ports; i++) {
2958 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2962 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2959 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2963 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
@@ -2994,7 +2998,6 @@ static int skge_reset(struct skge_hw *hw)
2994 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 2998 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
2995 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 2999 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
2996 3000
2997 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
2998 skge_write32(hw, B0_IMSK, hw->intr_mask); 3001 skge_write32(hw, B0_IMSK, hw->intr_mask);
2999 3002
3000 if (hw->chip_id != CHIP_ID_GENESIS) 3003 if (hw->chip_id != CHIP_ID_GENESIS)
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index b432f1bb8168..f1680beb8e68 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -214,8 +214,6 @@ enum {
214 214
215/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ 215/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
216enum { 216enum {
217 IS_ERR_MSK = 0x00003fff,/* All Error bits */
218
219 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ 217 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
220 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ 218 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
221 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ 219 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
@@ -230,6 +228,12 @@ enum {
230 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ 228 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
231 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ 229 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
232 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ 230 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
231
232 IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
233 | IS_NO_STAT_M1 | IS_NO_STAT_M2
234 | IS_RAM_RD_PAR | IS_RAM_WR_PAR
235 | IS_M1_PAR_ERR | IS_M2_PAR_ERR
236 | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
233}; 237};
234 238
235/* B2_TST_CTRL1 8 bit Test Control Register 1 */ 239/* B2_TST_CTRL1 8 bit Test Control Register 1 */
@@ -2456,24 +2460,17 @@ struct skge_hw {
2456 2460
2457 u8 chip_id; 2461 u8 chip_id;
2458 u8 chip_rev; 2462 u8 chip_rev;
2459 u8 phy_type; 2463 u8 copper;
2460 u8 pmd_type;
2461 u16 phy_addr;
2462 u8 ports; 2464 u8 ports;
2463 2465
2464 u32 ram_size; 2466 u32 ram_size;
2465 u32 ram_offset; 2467 u32 ram_offset;
2468 u16 phy_addr;
2466 2469
2467 struct tasklet_struct ext_tasklet; 2470 struct tasklet_struct ext_tasklet;
2468 spinlock_t phy_lock; 2471 spinlock_t phy_lock;
2469}; 2472};
2470 2473
2471
2472static inline int iscopper(const struct skge_hw *hw)
2473{
2474 return (hw->pmd_type == 'T');
2475}
2476
2477enum { 2474enum {
2478 FLOW_MODE_NONE = 0, /* No Flow-Control */ 2475 FLOW_MODE_NONE = 0, /* No Flow-Control */
2479 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */ 2476 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 87103c400999..f1e4ef1188e4 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -139,7 +139,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
139 */ 139 */
140 dev->base_addr += 0x10; 140 dev->base_addr += 0x10;
141 141
142 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev); 142 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, &pdev->dev);
143 if (ret) { 143 if (ret) {
144 printk("%s: unable to get memory for dev->priv.\n", 144 printk("%s: unable to get memory for dev->priv.\n",
145 dev->name); 145 dev->name);
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 40ad0fde28af..0a9597738d6c 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -62,8 +62,7 @@ static int dmalist[] __initdata = {
62}; 62};
63 63
64static char cardname[] = "Proteon 1392\0"; 64static char cardname[] = "Proteon 1392\0";
65 65static u64 dma_mask = ISA_MAX_ADDRESS;
66struct net_device *proteon_probe(int unit);
67static int proteon_open(struct net_device *dev); 66static int proteon_open(struct net_device *dev);
68static void proteon_read_eeprom(struct net_device *dev); 67static void proteon_read_eeprom(struct net_device *dev);
69static unsigned short proteon_setnselout_pins(struct net_device *dev); 68static unsigned short proteon_setnselout_pins(struct net_device *dev);
@@ -116,7 +115,7 @@ nodev:
116 return -ENODEV; 115 return -ENODEV;
117} 116}
118 117
119static int __init setup_card(struct net_device *dev) 118static int __init setup_card(struct net_device *dev, struct device *pdev)
120{ 119{
121 struct net_local *tp; 120 struct net_local *tp;
122 static int versionprinted; 121 static int versionprinted;
@@ -137,7 +136,7 @@ static int __init setup_card(struct net_device *dev)
137 } 136 }
138 } 137 }
139 if (err) 138 if (err)
140 goto out4; 139 goto out5;
141 140
142 /* At this point we have found a valid card. */ 141 /* At this point we have found a valid card. */
143 142
@@ -145,14 +144,15 @@ static int __init setup_card(struct net_device *dev)
145 printk(KERN_DEBUG "%s", version); 144 printk(KERN_DEBUG "%s", version);
146 145
147 err = -EIO; 146 err = -EIO;
148 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) 147 pdev->dma_mask = &dma_mask;
148 if (tmsdev_init(dev, ISA_MAX_ADDRESS, pdev))
149 goto out4; 149 goto out4;
150 150
151 dev->base_addr &= ~3; 151 dev->base_addr &= ~3;
152 152
153 proteon_read_eeprom(dev); 153 proteon_read_eeprom(dev);
154 154
155 printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name); 155 printk(KERN_DEBUG "proteon.c: Ring Station Address: ");
156 printk("%2.2x", dev->dev_addr[0]); 156 printk("%2.2x", dev->dev_addr[0]);
157 for (j = 1; j < 6; j++) 157 for (j = 1; j < 6; j++)
158 printk(":%2.2x", dev->dev_addr[j]); 158 printk(":%2.2x", dev->dev_addr[j]);
@@ -185,7 +185,7 @@ static int __init setup_card(struct net_device *dev)
185 185
186 if(irqlist[j] == 0) 186 if(irqlist[j] == 0)
187 { 187 {
188 printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name); 188 printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
189 goto out3; 189 goto out3;
190 } 190 }
191 } 191 }
@@ -196,15 +196,15 @@ static int __init setup_card(struct net_device *dev)
196 break; 196 break;
197 if (irqlist[j] == 0) 197 if (irqlist[j] == 0)
198 { 198 {
199 printk(KERN_INFO "%s: Illegal IRQ %d specified\n", 199 printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
200 dev->name, dev->irq); 200 dev->irq);
201 goto out3; 201 goto out3;
202 } 202 }
203 if (request_irq(dev->irq, tms380tr_interrupt, 0, 203 if (request_irq(dev->irq, tms380tr_interrupt, 0,
204 cardname, dev)) 204 cardname, dev))
205 { 205 {
206 printk(KERN_INFO "%s: Selected IRQ %d not available\n", 206 printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
207 dev->name, dev->irq); 207 dev->irq);
208 goto out3; 208 goto out3;
209 } 209 }
210 } 210 }
@@ -220,7 +220,7 @@ static int __init setup_card(struct net_device *dev)
220 220
221 if(dmalist[j] == 0) 221 if(dmalist[j] == 0)
222 { 222 {
223 printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name); 223 printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
224 goto out2; 224 goto out2;
225 } 225 }
226 } 226 }
@@ -231,25 +231,25 @@ static int __init setup_card(struct net_device *dev)
231 break; 231 break;
232 if (dmalist[j] == 0) 232 if (dmalist[j] == 0)
233 { 233 {
234 printk(KERN_INFO "%s: Illegal DMA %d specified\n", 234 printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
235 dev->name, dev->dma); 235 dev->dma);
236 goto out2; 236 goto out2;
237 } 237 }
238 if (request_dma(dev->dma, cardname)) 238 if (request_dma(dev->dma, cardname))
239 { 239 {
240 printk(KERN_INFO "%s: Selected DMA %d not available\n", 240 printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
241 dev->name, dev->dma); 241 dev->dma);
242 goto out2; 242 goto out2;
243 } 243 }
244 } 244 }
245 245
246 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
247 dev->name, dev->base_addr, dev->irq, dev->dma);
248
249 err = register_netdev(dev); 246 err = register_netdev(dev);
250 if (err) 247 if (err)
251 goto out; 248 goto out;
252 249
250 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
251 dev->name, dev->base_addr, dev->irq, dev->dma);
252
253 return 0; 253 return 0;
254out: 254out:
255 free_dma(dev->dma); 255 free_dma(dev->dma);
@@ -258,34 +258,11 @@ out2:
258out3: 258out3:
259 tmsdev_term(dev); 259 tmsdev_term(dev);
260out4: 260out4:
261 release_region(dev->base_addr, PROTEON_IO_EXTENT); 261 release_region(dev->base_addr, PROTEON_IO_EXTENT);
262out5:
262 return err; 263 return err;
263} 264}
264 265
265struct net_device * __init proteon_probe(int unit)
266{
267 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
268 int err = 0;
269
270 if (!dev)
271 return ERR_PTR(-ENOMEM);
272
273 if (unit >= 0) {
274 sprintf(dev->name, "tr%d", unit);
275 netdev_boot_setup_check(dev);
276 }
277
278 err = setup_card(dev);
279 if (err)
280 goto out;
281
282 return dev;
283
284out:
285 free_netdev(dev);
286 return ERR_PTR(err);
287}
288
289/* 266/*
290 * Reads MAC address from adapter RAM, which should've read it from 267 * Reads MAC address from adapter RAM, which should've read it from
291 * the onboard ROM. 268 * the onboard ROM.
@@ -352,8 +329,6 @@ static int proteon_open(struct net_device *dev)
352 return tms380tr_open(dev); 329 return tms380tr_open(dev);
353} 330}
354 331
355#ifdef MODULE
356
357#define ISATR_MAX_ADAPTERS 3 332#define ISATR_MAX_ADAPTERS 3
358 333
359static int io[ISATR_MAX_ADAPTERS]; 334static int io[ISATR_MAX_ADAPTERS];
@@ -366,13 +341,23 @@ module_param_array(io, int, NULL, 0);
366module_param_array(irq, int, NULL, 0); 341module_param_array(irq, int, NULL, 0);
367module_param_array(dma, int, NULL, 0); 342module_param_array(dma, int, NULL, 0);
368 343
369static struct net_device *proteon_dev[ISATR_MAX_ADAPTERS]; 344static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
345
346static struct device_driver proteon_driver = {
347 .name = "proteon",
348 .bus = &platform_bus_type,
349};
370 350
371int init_module(void) 351static int __init proteon_init(void)
372{ 352{
373 struct net_device *dev; 353 struct net_device *dev;
354 struct platform_device *pdev;
374 int i, num = 0, err = 0; 355 int i, num = 0, err = 0;
375 356
357 err = driver_register(&proteon_driver);
358 if (err)
359 return err;
360
376 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 361 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
377 dev = alloc_trdev(sizeof(struct net_local)); 362 dev = alloc_trdev(sizeof(struct net_local));
378 if (!dev) 363 if (!dev)
@@ -381,11 +366,15 @@ int init_module(void)
381 dev->base_addr = io[i]; 366 dev->base_addr = io[i];
382 dev->irq = irq[i]; 367 dev->irq = irq[i];
383 dev->dma = dma[i]; 368 dev->dma = dma[i];
384 err = setup_card(dev); 369 pdev = platform_device_register_simple("proteon",
370 i, NULL, 0);
371 err = setup_card(dev, &pdev->dev);
385 if (!err) { 372 if (!err) {
386 proteon_dev[i] = dev; 373 proteon_dev[i] = pdev;
374 dev_set_drvdata(&pdev->dev, dev);
387 ++num; 375 ++num;
388 } else { 376 } else {
377 platform_device_unregister(pdev);
389 free_netdev(dev); 378 free_netdev(dev);
390 } 379 }
391 } 380 }
@@ -399,23 +388,28 @@ int init_module(void)
399 return (0); 388 return (0);
400} 389}
401 390
402void cleanup_module(void) 391static void __exit proteon_cleanup(void)
403{ 392{
393 struct net_device *dev;
404 int i; 394 int i;
405 395
406 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 396 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
407 struct net_device *dev = proteon_dev[i]; 397 struct platform_device *pdev = proteon_dev[i];
408 398
409 if (!dev) 399 if (!pdev)
410 continue; 400 continue;
411 401 dev = dev_get_drvdata(&pdev->dev);
412 unregister_netdev(dev); 402 unregister_netdev(dev);
413 release_region(dev->base_addr, PROTEON_IO_EXTENT); 403 release_region(dev->base_addr, PROTEON_IO_EXTENT);
414 free_irq(dev->irq, dev); 404 free_irq(dev->irq, dev);
415 free_dma(dev->dma); 405 free_dma(dev->dma);
416 tmsdev_term(dev); 406 tmsdev_term(dev);
417 free_netdev(dev); 407 free_netdev(dev);
408 dev_set_drvdata(&pdev->dev, NULL);
409 platform_device_unregister(pdev);
418 } 410 }
411 driver_unregister(&proteon_driver);
419} 412}
420#endif /* MODULE */
421 413
414module_init(proteon_init);
415module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index f26796e2d0e5..03f061941d77 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -68,8 +68,7 @@ static int dmalist[] __initdata = {
68}; 68};
69 69
70static char isa_cardname[] = "SK NET TR 4/16 ISA\0"; 70static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
71 71static u64 dma_mask = ISA_MAX_ADDRESS;
72struct net_device *sk_isa_probe(int unit);
73static int sk_isa_open(struct net_device *dev); 72static int sk_isa_open(struct net_device *dev);
74static void sk_isa_read_eeprom(struct net_device *dev); 73static void sk_isa_read_eeprom(struct net_device *dev);
75static unsigned short sk_isa_setnselout_pins(struct net_device *dev); 74static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
@@ -133,7 +132,7 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
133 return 0; 132 return 0;
134} 133}
135 134
136static int __init setup_card(struct net_device *dev) 135static int __init setup_card(struct net_device *dev, struct device *pdev)
137{ 136{
138 struct net_local *tp; 137 struct net_local *tp;
139 static int versionprinted; 138 static int versionprinted;
@@ -154,7 +153,7 @@ static int __init setup_card(struct net_device *dev)
154 } 153 }
155 } 154 }
156 if (err) 155 if (err)
157 goto out4; 156 goto out5;
158 157
159 /* At this point we have found a valid card. */ 158 /* At this point we have found a valid card. */
160 159
@@ -162,14 +161,15 @@ static int __init setup_card(struct net_device *dev)
162 printk(KERN_DEBUG "%s", version); 161 printk(KERN_DEBUG "%s", version);
163 162
164 err = -EIO; 163 err = -EIO;
165 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) 164 pdev->dma_mask = &dma_mask;
165 if (tmsdev_init(dev, ISA_MAX_ADDRESS, pdev))
166 goto out4; 166 goto out4;
167 167
168 dev->base_addr &= ~3; 168 dev->base_addr &= ~3;
169 169
170 sk_isa_read_eeprom(dev); 170 sk_isa_read_eeprom(dev);
171 171
172 printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name); 172 printk(KERN_DEBUG "skisa.c: Ring Station Address: ");
173 printk("%2.2x", dev->dev_addr[0]); 173 printk("%2.2x", dev->dev_addr[0]);
174 for (j = 1; j < 6; j++) 174 for (j = 1; j < 6; j++)
175 printk(":%2.2x", dev->dev_addr[j]); 175 printk(":%2.2x", dev->dev_addr[j]);
@@ -202,7 +202,7 @@ static int __init setup_card(struct net_device *dev)
202 202
203 if(irqlist[j] == 0) 203 if(irqlist[j] == 0)
204 { 204 {
205 printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name); 205 printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
206 goto out3; 206 goto out3;
207 } 207 }
208 } 208 }
@@ -213,15 +213,15 @@ static int __init setup_card(struct net_device *dev)
213 break; 213 break;
214 if (irqlist[j] == 0) 214 if (irqlist[j] == 0)
215 { 215 {
216 printk(KERN_INFO "%s: Illegal IRQ %d specified\n", 216 printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
217 dev->name, dev->irq); 217 dev->irq);
218 goto out3; 218 goto out3;
219 } 219 }
220 if (request_irq(dev->irq, tms380tr_interrupt, 0, 220 if (request_irq(dev->irq, tms380tr_interrupt, 0,
221 isa_cardname, dev)) 221 isa_cardname, dev))
222 { 222 {
223 printk(KERN_INFO "%s: Selected IRQ %d not available\n", 223 printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
224 dev->name, dev->irq); 224 dev->irq);
225 goto out3; 225 goto out3;
226 } 226 }
227 } 227 }
@@ -237,7 +237,7 @@ static int __init setup_card(struct net_device *dev)
237 237
238 if(dmalist[j] == 0) 238 if(dmalist[j] == 0)
239 { 239 {
240 printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name); 240 printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
241 goto out2; 241 goto out2;
242 } 242 }
243 } 243 }
@@ -248,25 +248,25 @@ static int __init setup_card(struct net_device *dev)
248 break; 248 break;
249 if (dmalist[j] == 0) 249 if (dmalist[j] == 0)
250 { 250 {
251 printk(KERN_INFO "%s: Illegal DMA %d specified\n", 251 printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
252 dev->name, dev->dma); 252 dev->dma);
253 goto out2; 253 goto out2;
254 } 254 }
255 if (request_dma(dev->dma, isa_cardname)) 255 if (request_dma(dev->dma, isa_cardname))
256 { 256 {
257 printk(KERN_INFO "%s: Selected DMA %d not available\n", 257 printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
258 dev->name, dev->dma); 258 dev->dma);
259 goto out2; 259 goto out2;
260 } 260 }
261 } 261 }
262 262
263 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
264 dev->name, dev->base_addr, dev->irq, dev->dma);
265
266 err = register_netdev(dev); 263 err = register_netdev(dev);
267 if (err) 264 if (err)
268 goto out; 265 goto out;
269 266
267 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
268 dev->name, dev->base_addr, dev->irq, dev->dma);
269
270 return 0; 270 return 0;
271out: 271out:
272 free_dma(dev->dma); 272 free_dma(dev->dma);
@@ -275,33 +275,11 @@ out2:
275out3: 275out3:
276 tmsdev_term(dev); 276 tmsdev_term(dev);
277out4: 277out4:
278 release_region(dev->base_addr, SK_ISA_IO_EXTENT); 278 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
279out5:
279 return err; 280 return err;
280} 281}
281 282
282struct net_device * __init sk_isa_probe(int unit)
283{
284 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
285 int err = 0;
286
287 if (!dev)
288 return ERR_PTR(-ENOMEM);
289
290 if (unit >= 0) {
291 sprintf(dev->name, "tr%d", unit);
292 netdev_boot_setup_check(dev);
293 }
294
295 err = setup_card(dev);
296 if (err)
297 goto out;
298
299 return dev;
300out:
301 free_netdev(dev);
302 return ERR_PTR(err);
303}
304
305/* 283/*
306 * Reads MAC address from adapter RAM, which should've read it from 284 * Reads MAC address from adapter RAM, which should've read it from
307 * the onboard ROM. 285 * the onboard ROM.
@@ -361,8 +339,6 @@ static int sk_isa_open(struct net_device *dev)
361 return tms380tr_open(dev); 339 return tms380tr_open(dev);
362} 340}
363 341
364#ifdef MODULE
365
366#define ISATR_MAX_ADAPTERS 3 342#define ISATR_MAX_ADAPTERS 3
367 343
368static int io[ISATR_MAX_ADAPTERS]; 344static int io[ISATR_MAX_ADAPTERS];
@@ -375,13 +351,23 @@ module_param_array(io, int, NULL, 0);
375module_param_array(irq, int, NULL, 0); 351module_param_array(irq, int, NULL, 0);
376module_param_array(dma, int, NULL, 0); 352module_param_array(dma, int, NULL, 0);
377 353
378static struct net_device *sk_isa_dev[ISATR_MAX_ADAPTERS]; 354static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
379 355
380int init_module(void) 356static struct device_driver sk_isa_driver = {
357 .name = "skisa",
358 .bus = &platform_bus_type,
359};
360
361static int __init sk_isa_init(void)
381{ 362{
382 struct net_device *dev; 363 struct net_device *dev;
364 struct platform_device *pdev;
383 int i, num = 0, err = 0; 365 int i, num = 0, err = 0;
384 366
367 err = driver_register(&sk_isa_driver);
368 if (err)
369 return err;
370
385 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 371 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
386 dev = alloc_trdev(sizeof(struct net_local)); 372 dev = alloc_trdev(sizeof(struct net_local));
387 if (!dev) 373 if (!dev)
@@ -390,12 +376,15 @@ int init_module(void)
390 dev->base_addr = io[i]; 376 dev->base_addr = io[i];
391 dev->irq = irq[i]; 377 dev->irq = irq[i];
392 dev->dma = dma[i]; 378 dev->dma = dma[i];
393 err = setup_card(dev); 379 pdev = platform_device_register_simple("skisa",
394 380 i, NULL, 0);
381 err = setup_card(dev, &pdev->dev);
395 if (!err) { 382 if (!err) {
396 sk_isa_dev[i] = dev; 383 sk_isa_dev[i] = pdev;
384 dev_set_drvdata(&sk_isa_dev[i]->dev, dev);
397 ++num; 385 ++num;
398 } else { 386 } else {
387 platform_device_unregister(pdev);
399 free_netdev(dev); 388 free_netdev(dev);
400 } 389 }
401 } 390 }
@@ -409,23 +398,28 @@ int init_module(void)
409 return (0); 398 return (0);
410} 399}
411 400
412void cleanup_module(void) 401static void __exit sk_isa_cleanup(void)
413{ 402{
403 struct net_device *dev;
414 int i; 404 int i;
415 405
416 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 406 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
417 struct net_device *dev = sk_isa_dev[i]; 407 struct platform_device *pdev = sk_isa_dev[i];
418 408
419 if (!dev) 409 if (!pdev)
420 continue; 410 continue;
421 411 dev = dev_get_drvdata(&pdev->dev);
422 unregister_netdev(dev); 412 unregister_netdev(dev);
423 release_region(dev->base_addr, SK_ISA_IO_EXTENT); 413 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
424 free_irq(dev->irq, dev); 414 free_irq(dev->irq, dev);
425 free_dma(dev->dma); 415 free_dma(dev->dma);
426 tmsdev_term(dev); 416 tmsdev_term(dev);
427 free_netdev(dev); 417 free_netdev(dev);
418 dev_set_drvdata(&pdev->dev, NULL);
419 platform_device_unregister(pdev);
428 } 420 }
421 driver_unregister(&sk_isa_driver);
429} 422}
430#endif /* MODULE */
431 423
424module_init(sk_isa_init);
425module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 5e0b0ce98ed7..9a543fe2d0e6 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -62,6 +62,7 @@
62 * normal operation. 62 * normal operation.
63 * 30-Dec-02 JF Removed incorrect __init from 63 * 30-Dec-02 JF Removed incorrect __init from
64 * tms380tr_init_card. 64 * tms380tr_init_card.
65 * 22-Jul-05 JF Converted to dma-mapping.
65 * 66 *
66 * To do: 67 * To do:
67 * 1. Multi/Broadcast packet handling (this may have fixed itself) 68 * 1. Multi/Broadcast packet handling (this may have fixed itself)
@@ -89,7 +90,7 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
89#include <linux/time.h> 90#include <linux/time.h>
90#include <linux/errno.h> 91#include <linux/errno.h>
91#include <linux/init.h> 92#include <linux/init.h>
92#include <linux/pci.h> 93#include <linux/dma-mapping.h>
93#include <linux/delay.h> 94#include <linux/delay.h>
94#include <linux/netdevice.h> 95#include <linux/netdevice.h>
95#include <linux/etherdevice.h> 96#include <linux/etherdevice.h>
@@ -114,8 +115,6 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
114#endif 115#endif
115static unsigned int tms380tr_debug = TMS380TR_DEBUG; 116static unsigned int tms380tr_debug = TMS380TR_DEBUG;
116 117
117static struct device tms_device;
118
119/* Index to functions, as function prototypes. 118/* Index to functions, as function prototypes.
120 * Alphabetical by function name. 119 * Alphabetical by function name.
121 */ 120 */
@@ -434,7 +433,7 @@ static void tms380tr_init_net_local(struct net_device *dev)
434 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize); 433 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
435 434
436 /* data unreachable for DMA ? then use local buffer */ 435 /* data unreachable for DMA ? then use local buffer */
437 dmabuf = pci_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE); 436 dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
438 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) 437 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
439 { 438 {
440 tp->Rpl[i].SkbStat = SKB_DATA_COPY; 439 tp->Rpl[i].SkbStat = SKB_DATA_COPY;
@@ -638,10 +637,10 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
638 /* Is buffer reachable for Busmaster-DMA? */ 637 /* Is buffer reachable for Busmaster-DMA? */
639 638
640 length = skb->len; 639 length = skb->len;
641 dmabuf = pci_map_single(tp->pdev, skb->data, length, PCI_DMA_TODEVICE); 640 dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
642 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) { 641 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
643 /* Copy frame to local buffer */ 642 /* Copy frame to local buffer */
644 pci_unmap_single(tp->pdev, dmabuf, length, PCI_DMA_TODEVICE); 643 dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
645 dmabuf = 0; 644 dmabuf = 0;
646 i = tp->TplFree->TPLIndex; 645 i = tp->TplFree->TPLIndex;
647 buf = tp->LocalTxBuffers[i]; 646 buf = tp->LocalTxBuffers[i];
@@ -1284,9 +1283,7 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1284 unsigned short count, c, count2; 1283 unsigned short count, c, count2;
1285 const struct firmware *fw_entry = NULL; 1284 const struct firmware *fw_entry = NULL;
1286 1285
1287 strncpy(tms_device.bus_id,dev->name, BUS_ID_SIZE); 1286 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
1288
1289 if (request_firmware(&fw_entry, "tms380tr.bin", &tms_device) != 0) {
1290 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", 1287 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
1291 dev->name, "tms380tr.bin"); 1288 dev->name, "tms380tr.bin");
1292 return (-1); 1289 return (-1);
@@ -2021,7 +2018,7 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
2021 2018
2022 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl); 2019 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
2023 if (tpl->DMABuff) 2020 if (tpl->DMABuff)
2024 pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE); 2021 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2025 dev_kfree_skb_any(tpl->Skb); 2022 dev_kfree_skb_any(tpl->Skb);
2026 } 2023 }
2027 2024
@@ -2090,7 +2087,7 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
2090 2087
2091 tp->MacStat.tx_packets++; 2088 tp->MacStat.tx_packets++;
2092 if (tpl->DMABuff) 2089 if (tpl->DMABuff)
2093 pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE); 2090 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2094 dev_kfree_skb_irq(tpl->Skb); 2091 dev_kfree_skb_irq(tpl->Skb);
2095 tpl->BusyFlag = 0; /* "free" TPL */ 2092 tpl->BusyFlag = 0; /* "free" TPL */
2096 } 2093 }
@@ -2209,7 +2206,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2209 tp->MacStat.rx_errors++; 2206 tp->MacStat.rx_errors++;
2210 } 2207 }
2211 if (rpl->DMABuff) 2208 if (rpl->DMABuff)
2212 pci_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, PCI_DMA_TODEVICE); 2209 dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
2213 rpl->DMABuff = 0; 2210 rpl->DMABuff = 0;
2214 2211
2215 /* Allocate new skb for rpl */ 2212 /* Allocate new skb for rpl */
@@ -2227,7 +2224,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2227 skb_put(rpl->Skb, tp->MaxPacketSize); 2224 skb_put(rpl->Skb, tp->MaxPacketSize);
2228 2225
2229 /* Data unreachable for DMA ? then use local buffer */ 2226 /* Data unreachable for DMA ? then use local buffer */
2230 dmabuf = pci_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE); 2227 dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
2231 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) 2228 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
2232 { 2229 {
2233 rpl->SkbStat = SKB_DATA_COPY; 2230 rpl->SkbStat = SKB_DATA_COPY;
@@ -2332,12 +2329,12 @@ void tmsdev_term(struct net_device *dev)
2332 struct net_local *tp; 2329 struct net_local *tp;
2333 2330
2334 tp = netdev_priv(dev); 2331 tp = netdev_priv(dev);
2335 pci_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local), 2332 dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
2336 PCI_DMA_BIDIRECTIONAL); 2333 DMA_BIDIRECTIONAL);
2337} 2334}
2338 2335
2339int tmsdev_init(struct net_device *dev, unsigned long dmalimit, 2336int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
2340 struct pci_dev *pdev) 2337 struct device *pdev)
2341{ 2338{
2342 struct net_local *tms_local; 2339 struct net_local *tms_local;
2343 2340
@@ -2346,8 +2343,8 @@ int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
2346 init_waitqueue_head(&tms_local->wait_for_tok_int); 2343 init_waitqueue_head(&tms_local->wait_for_tok_int);
2347 tms_local->dmalimit = dmalimit; 2344 tms_local->dmalimit = dmalimit;
2348 tms_local->pdev = pdev; 2345 tms_local->pdev = pdev;
2349 tms_local->dmabuffer = pci_map_single(pdev, (void *)tms_local, 2346 tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
2350 sizeof(struct net_local), PCI_DMA_BIDIRECTIONAL); 2347 sizeof(struct net_local), DMA_BIDIRECTIONAL);
2351 if (tms_local->dmabuffer + sizeof(struct net_local) > dmalimit) 2348 if (tms_local->dmabuffer + sizeof(struct net_local) > dmalimit)
2352 { 2349 {
2353 printk(KERN_INFO "%s: Memory not accessible for DMA\n", 2350 printk(KERN_INFO "%s: Memory not accessible for DMA\n",
@@ -2370,8 +2367,6 @@ int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
2370 return 0; 2367 return 0;
2371} 2368}
2372 2369
2373#ifdef MODULE
2374
2375EXPORT_SYMBOL(tms380tr_open); 2370EXPORT_SYMBOL(tms380tr_open);
2376EXPORT_SYMBOL(tms380tr_close); 2371EXPORT_SYMBOL(tms380tr_close);
2377EXPORT_SYMBOL(tms380tr_interrupt); 2372EXPORT_SYMBOL(tms380tr_interrupt);
@@ -2379,6 +2374,8 @@ EXPORT_SYMBOL(tmsdev_init);
2379EXPORT_SYMBOL(tmsdev_term); 2374EXPORT_SYMBOL(tmsdev_term);
2380EXPORT_SYMBOL(tms380tr_wait); 2375EXPORT_SYMBOL(tms380tr_wait);
2381 2376
2377#ifdef MODULE
2378
2382static struct module *TMS380_module = NULL; 2379static struct module *TMS380_module = NULL;
2383 2380
2384int init_module(void) 2381int init_module(void)
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index f2c5ba0f37a5..077f568d89d1 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -18,7 +18,7 @@ int tms380tr_open(struct net_device *dev);
18int tms380tr_close(struct net_device *dev); 18int tms380tr_close(struct net_device *dev);
19irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs); 19irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
20int tmsdev_init(struct net_device *dev, unsigned long dmalimit, 20int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
21 struct pci_dev *pdev); 21 struct device *pdev);
22void tmsdev_term(struct net_device *dev); 22void tmsdev_term(struct net_device *dev);
23void tms380tr_wait(unsigned long time); 23void tms380tr_wait(unsigned long time);
24 24
@@ -719,7 +719,7 @@ struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */
719 struct sk_buff *Skb; 719 struct sk_buff *Skb;
720 unsigned char TPLIndex; 720 unsigned char TPLIndex;
721 volatile unsigned char BusyFlag;/* Flag: TPL busy? */ 721 volatile unsigned char BusyFlag;/* Flag: TPL busy? */
722 dma_addr_t DMABuff; /* DMA IO bus address from pci_map */ 722 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
723}; 723};
724 724
725/* ---------------------Receive Functions-------------------------------* 725/* ---------------------Receive Functions-------------------------------*
@@ -1060,7 +1060,7 @@ struct s_RPL { /* Receive Parameter List */
1060 struct sk_buff *Skb; 1060 struct sk_buff *Skb;
1061 SKB_STAT SkbStat; 1061 SKB_STAT SkbStat;
1062 int RPLIndex; 1062 int RPLIndex;
1063 dma_addr_t DMABuff; /* DMA IO bus address from pci_map */ 1063 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
1064}; 1064};
1065 1065
1066/* Information that need to be kept for each board. */ 1066/* Information that need to be kept for each board. */
@@ -1091,7 +1091,7 @@ typedef struct net_local {
1091 RPL *RplTail; 1091 RPL *RplTail;
1092 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE]; 1092 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
1093 1093
1094 struct pci_dev *pdev; 1094 struct device *pdev;
1095 int DataRate; 1095 int DataRate;
1096 unsigned char ScbInUse; 1096 unsigned char ScbInUse;
1097 unsigned short CMDqueue; 1097 unsigned short CMDqueue;
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 2e18c0a46482..0014aef5c744 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -100,7 +100,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
100 unsigned int pci_irq_line; 100 unsigned int pci_irq_line;
101 unsigned long pci_ioaddr; 101 unsigned long pci_ioaddr;
102 struct card_info *cardinfo = &card_info_table[ent->driver_data]; 102 struct card_info *cardinfo = &card_info_table[ent->driver_data];
103 103
104 if (versionprinted++ == 0) 104 if (versionprinted++ == 0)
105 printk("%s", version); 105 printk("%s", version);
106 106
@@ -143,7 +143,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
143 printk(":%2.2x", dev->dev_addr[i]); 143 printk(":%2.2x", dev->dev_addr[i]);
144 printk("\n"); 144 printk("\n");
145 145
146 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev); 146 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, &pdev->dev);
147 if (ret) { 147 if (ret) {
148 printk("%s: unable to get memory for dev->priv.\n", dev->name); 148 printk("%s: unable to get memory for dev->priv.\n", dev->name);
149 goto err_out_irq; 149 goto err_out_irq;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index aabcdc2be05e..9c2d07cde010 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -4322,36 +4322,36 @@ static const struct iw_priv_args orinoco_privtab[] = {
4322 */ 4322 */
4323 4323
4324static const iw_handler orinoco_handler[] = { 4324static const iw_handler orinoco_handler[] = {
4325 [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) orinoco_ioctl_commit, 4325 [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit,
4326 [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getname, 4326 [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname,
4327 [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfreq, 4327 [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq,
4328 [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfreq, 4328 [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq,
4329 [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setmode, 4329 [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode,
4330 [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getmode, 4330 [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode,
4331 [SIOCSIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setsens, 4331 [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
4332 [SIOCGIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getsens, 4332 [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
4333 [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwrange, 4333 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
4334 [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setspy, 4334 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy,
4335 [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getspy, 4335 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy,
4336 [SIOCSIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setwap, 4336 [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
4337 [SIOCGIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getwap, 4337 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
4338 [SIOCSIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setscan, 4338 [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
4339 [SIOCGIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getscan, 4339 [SIOCGIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan,
4340 [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setessid, 4340 [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid,
4341 [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getessid, 4341 [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid,
4342 [SIOCSIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setnick, 4342 [SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick,
4343 [SIOCGIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getnick, 4343 [SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick,
4344 [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrate, 4344 [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate,
4345 [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrate, 4345 [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate,
4346 [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrts, 4346 [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts,
4347 [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrts, 4347 [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts,
4348 [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfrag, 4348 [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag,
4349 [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfrag, 4349 [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag,
4350 [SIOCGIWRETRY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getretry, 4350 [SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry,
4351 [SIOCSIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_setiwencode, 4351 [SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode,
4352 [SIOCGIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwencode, 4352 [SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode,
4353 [SIOCSIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setpower, 4353 [SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower,
4354 [SIOCGIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getpower, 4354 [SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower,
4355}; 4355};
4356 4356
4357 4357
@@ -4359,15 +4359,15 @@ static const iw_handler orinoco_handler[] = {
4359 Added typecasting since we no longer use iwreq_data -- Moustafa 4359 Added typecasting since we no longer use iwreq_data -- Moustafa
4360 */ 4360 */
4361static const iw_handler orinoco_private_handler[] = { 4361static const iw_handler orinoco_private_handler[] = {
4362 [0] (iw_handler) orinoco_ioctl_reset, 4362 [0] = (iw_handler) orinoco_ioctl_reset,
4363 [1] (iw_handler) orinoco_ioctl_reset, 4363 [1] = (iw_handler) orinoco_ioctl_reset,
4364 [2] (iw_handler) orinoco_ioctl_setport3, 4364 [2] = (iw_handler) orinoco_ioctl_setport3,
4365 [3] (iw_handler) orinoco_ioctl_getport3, 4365 [3] = (iw_handler) orinoco_ioctl_getport3,
4366 [4] (iw_handler) orinoco_ioctl_setpreamble, 4366 [4] = (iw_handler) orinoco_ioctl_setpreamble,
4367 [5] (iw_handler) orinoco_ioctl_getpreamble, 4367 [5] = (iw_handler) orinoco_ioctl_getpreamble,
4368 [6] (iw_handler) orinoco_ioctl_setibssport, 4368 [6] = (iw_handler) orinoco_ioctl_setibssport,
4369 [7] (iw_handler) orinoco_ioctl_getibssport, 4369 [7] = (iw_handler) orinoco_ioctl_getibssport,
4370 [9] (iw_handler) orinoco_ioctl_getrid, 4370 [9] = (iw_handler) orinoco_ioctl_getrid,
4371}; 4371};
4372 4372
4373static const struct iw_handler_def orinoco_handler_def = { 4373static const struct iw_handler_def orinoco_handler_def = {
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index a0ab26aab450..d7021c391b2b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -408,6 +408,8 @@ struct ethtool_ops {
408#define SUPPORTED_FIBRE (1 << 10) 408#define SUPPORTED_FIBRE (1 << 10)
409#define SUPPORTED_BNC (1 << 11) 409#define SUPPORTED_BNC (1 << 11)
410#define SUPPORTED_10000baseT_Full (1 << 12) 410#define SUPPORTED_10000baseT_Full (1 << 12)
411#define SUPPORTED_Pause (1 << 13)
412#define SUPPORTED_Asym_Pause (1 << 14)
411 413
412/* Indicates what features are advertised by the interface. */ 414/* Indicates what features are advertised by the interface. */
413#define ADVERTISED_10baseT_Half (1 << 0) 415#define ADVERTISED_10baseT_Half (1 << 0)
@@ -423,6 +425,8 @@ struct ethtool_ops {
423#define ADVERTISED_FIBRE (1 << 10) 425#define ADVERTISED_FIBRE (1 << 10)
424#define ADVERTISED_BNC (1 << 11) 426#define ADVERTISED_BNC (1 << 11)
425#define ADVERTISED_10000baseT_Full (1 << 12) 427#define ADVERTISED_10000baseT_Full (1 << 12)
428#define ADVERTISED_Pause (1 << 13)
429#define ADVERTISED_Asym_Pause (1 << 14)
426 430
427/* The following are all involved in forcing a particular link 431/* The following are all involved in forcing a particular link
428 * mode for the device for setting things. When getting the 432 * mode for the device for setting things. When getting the
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 374b615ea9ea..9b8d0476988a 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -22,6 +22,7 @@
22#define MII_EXPANSION 0x06 /* Expansion register */ 22#define MII_EXPANSION 0x06 /* Expansion register */
23#define MII_CTRL1000 0x09 /* 1000BASE-T control */ 23#define MII_CTRL1000 0x09 /* 1000BASE-T control */
24#define MII_STAT1000 0x0a /* 1000BASE-T status */ 24#define MII_STAT1000 0x0a /* 1000BASE-T status */
25#define MII_ESTATUS 0x0f /* Extended Status */
25#define MII_DCOUNTER 0x12 /* Disconnect counter */ 26#define MII_DCOUNTER 0x12 /* Disconnect counter */
26#define MII_FCSCOUNTER 0x13 /* False carrier counter */ 27#define MII_FCSCOUNTER 0x13 /* False carrier counter */
27#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ 28#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
@@ -54,7 +55,10 @@
54#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ 55#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
55#define BMSR_RFAULT 0x0010 /* Remote fault detected */ 56#define BMSR_RFAULT 0x0010 /* Remote fault detected */
56#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ 57#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
57#define BMSR_RESV 0x07c0 /* Unused... */ 58#define BMSR_RESV 0x00c0 /* Unused... */
59#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
60#define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */
61#define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */
58#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ 62#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
59#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ 63#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
60#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ 64#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
@@ -114,6 +118,9 @@
114#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */ 118#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */
115#define EXPANSION_RESV 0xffe0 /* Unused... */ 119#define EXPANSION_RESV 0xffe0 /* Unused... */
116 120
121#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
122#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
123
117/* N-way test register. */ 124/* N-way test register. */
118#define NWAYTEST_RESV1 0x00ff /* Unused... */ 125#define NWAYTEST_RESV1 0x00ff /* Unused... */
119#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */ 126#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
diff --git a/include/linux/phy.h b/include/linux/phy.h
new file mode 100644
index 000000000000..4f2b5effc16b
--- /dev/null
+++ b/include/linux/phy.h
@@ -0,0 +1,360 @@
1/*
2 * include/linux/phy.h
3 *
4 * Framework and drivers for configuring and reading different PHYs
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17
18#ifndef __PHY_H
19#define __PHY_H
20
21#include <linux/spinlock.h>
22#include <linux/device.h>
23
24#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
25 SUPPORTED_10baseT_Full | \
26 SUPPORTED_100baseT_Half | \
27 SUPPORTED_100baseT_Full | \
28 SUPPORTED_Autoneg | \
29 SUPPORTED_TP | \
30 SUPPORTED_MII)
31
32#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
33 SUPPORTED_1000baseT_Half | \
34 SUPPORTED_1000baseT_Full)
35
36/* Set phydev->irq to PHY_POLL if interrupts are not supported,
37 * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
38 * the attached driver handles the interrupt
39 */
40#define PHY_POLL -1
41#define PHY_IGNORE_INTERRUPT -2
42
43#define PHY_HAS_INTERRUPT 0x00000001
44#define PHY_HAS_MAGICANEG 0x00000002
45
46#define MII_BUS_MAX 4
47
48
49#define PHY_INIT_TIMEOUT 100000
50#define PHY_STATE_TIME 1
51#define PHY_FORCE_TIMEOUT 10
52#define PHY_AN_TIMEOUT 10
53
54#define PHY_MAX_ADDR 32
55
56/* The Bus class for PHYs. Devices which provide access to
57 * PHYs should register using this structure */
58struct mii_bus {
59 const char *name;
60 int id;
61 void *priv;
62 int (*read)(struct mii_bus *bus, int phy_id, int regnum);
63 int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
64 int (*reset)(struct mii_bus *bus);
65
66 /* A lock to ensure that only one thing can read/write
67 * the MDIO bus at a time */
68 spinlock_t mdio_lock;
69
70 struct device *dev;
71
72 /* list of all PHYs on bus */
73 struct phy_device *phy_map[PHY_MAX_ADDR];
74
75 /* Pointer to an array of interrupts, each PHY's
76 * interrupt at the index matching its address */
77 int *irq;
78};
79
80#define PHY_INTERRUPT_DISABLED 0x0
81#define PHY_INTERRUPT_ENABLED 0x80000000
82
83/* PHY state machine states:
84 *
85 * DOWN: PHY device and driver are not ready for anything. probe
86 * should be called if and only if the PHY is in this state,
87 * given that the PHY device exists.
88 * - PHY driver probe function will, depending on the PHY, set
89 * the state to STARTING or READY
90 *
91 * STARTING: PHY device is coming up, and the ethernet driver is
92 * not ready. PHY drivers may set this in the probe function.
93 * If they do, they are responsible for making sure the state is
94 * eventually set to indicate whether the PHY is UP or READY,
95 * depending on the state when the PHY is done starting up.
96 * - PHY driver will set the state to READY
97 * - start will set the state to PENDING
98 *
99 * READY: PHY is ready to send and receive packets, but the
100 * controller is not. By default, PHYs which do not implement
101 * probe will be set to this state by phy_probe(). If the PHY
102 * driver knows the PHY is ready, and the PHY state is STARTING,
103 * then it sets this STATE.
104 * - start will set the state to UP
105 *
106 * PENDING: PHY device is coming up, but the ethernet driver is
107 * ready. phy_start will set this state if the PHY state is
108 * STARTING.
109 * - PHY driver will set the state to UP when the PHY is ready
110 *
111 * UP: The PHY and attached device are ready to do work.
112 * Interrupts should be started here.
113 * - timer moves to AN
114 *
115 * AN: The PHY is currently negotiating the link state. Link is
116 * therefore down for now. phy_timer will set this state when it
117 * detects the state is UP. config_aneg will set this state
118 * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
119 * - If autonegotiation finishes, but there's no link, it sets
120 * the state to NOLINK.
121 * - If aneg finishes with link, it sets the state to RUNNING,
122 * and calls adjust_link
123 * - If autonegotiation did not finish after an arbitrary amount
124 * of time, autonegotiation should be tried again if the PHY
125 * supports "magic" autonegotiation (back to AN)
126 * - If it didn't finish, and no magic_aneg, move to FORCING.
127 *
128 * NOLINK: PHY is up, but not currently plugged in.
129 * - If the timer notes that the link comes back, we move to RUNNING
130 * - config_aneg moves to AN
131 * - phy_stop moves to HALTED
132 *
133 * FORCING: PHY is being configured with forced settings
134 * - if link is up, move to RUNNING
135 * - If link is down, we drop to the next highest setting, and
136 * retry (FORCING) after a timeout
137 * - phy_stop moves to HALTED
138 *
139 * RUNNING: PHY is currently up, running, and possibly sending
140 * and/or receiving packets
141 * - timer will set CHANGELINK if we're polling (this ensures the
142 * link state is polled every other cycle of this state machine,
143 * which makes it every other second)
144 * - irq will set CHANGELINK
145 * - config_aneg will set AN
146 * - phy_stop moves to HALTED
147 *
148 * CHANGELINK: PHY experienced a change in link state
149 * - timer moves to RUNNING if link
150 * - timer moves to NOLINK if the link is down
151 * - phy_stop moves to HALTED
152 *
153 * HALTED: PHY is up, but no polling or interrupts are done. Or
154 * PHY is in an error state.
155 *
156 * - phy_start moves to RESUMING
157 *
158 * RESUMING: PHY was halted, but now wants to run again.
159 * - If we are forcing, or aneg is done, timer moves to RUNNING
160 * - If aneg is not done, timer moves to AN
161 * - phy_stop moves to HALTED
162 */
163enum phy_state {
164 PHY_DOWN=0,
165 PHY_STARTING,
166 PHY_READY,
167 PHY_PENDING,
168 PHY_UP,
169 PHY_AN,
170 PHY_RUNNING,
171 PHY_NOLINK,
172 PHY_FORCING,
173 PHY_CHANGELINK,
174 PHY_HALTED,
175 PHY_RESUMING
176};
177
178/* phy_device: An instance of a PHY
179 *
180 * drv: Pointer to the driver for this PHY instance
181 * bus: Pointer to the bus this PHY is on
182 * dev: driver model device structure for this PHY
183 * phy_id: UID for this device found during discovery
184 * state: state of the PHY for management purposes
185 * dev_flags: Device-specific flags used by the PHY driver.
186 * addr: Bus address of PHY
187 * link_timeout: The number of timer firings to wait before the
188 * giving up on the current attempt at acquiring a link
189 * irq: IRQ number of the PHY's interrupt (-1 if none)
190 * phy_timer: The timer for handling the state machine
191 * phy_queue: A work_queue for the interrupt
192 * attached_dev: The attached enet driver's device instance ptr
193 * adjust_link: Callback for the enet controller to respond to
194 * changes in the link state.
195 * adjust_state: Callback for the enet driver to respond to
196 * changes in the state machine.
197 *
198 * speed, duplex, pause, supported, advertising, and
199 * autoneg are used like in mii_if_info
200 *
201 * interrupts currently only supports enabled or disabled,
202 * but could be changed in the future to support enabling
203 * and disabling specific interrupts
204 *
205 * Contains some infrastructure for polling and interrupt
206 * handling, as well as handling shifts in PHY hardware state
207 */
208struct phy_device {
209 /* Information about the PHY type */
210 /* And management functions */
211 struct phy_driver *drv;
212
213 struct mii_bus *bus;
214
215 struct device dev;
216
217 u32 phy_id;
218
219 enum phy_state state;
220
221 u32 dev_flags;
222
223 /* Bus address of the PHY (0-32) */
224 int addr;
225
226 /* forced speed & duplex (no autoneg)
227 * partner speed & duplex & pause (autoneg)
228 */
229 int speed;
230 int duplex;
231 int pause;
232 int asym_pause;
233
234 /* The most recently read link state */
235 int link;
236
237 /* Enabled Interrupts */
238 u32 interrupts;
239
240 /* Union of PHY and Attached devices' supported modes */
241 /* See mii.h for more info */
242 u32 supported;
243 u32 advertising;
244
245 int autoneg;
246
247 int link_timeout;
248
249 /* Interrupt number for this PHY
250 * -1 means no interrupt */
251 int irq;
252
253 /* private data pointer */
254 /* For use by PHYs to maintain extra state */
255 void *priv;
256
257 /* Interrupt and Polling infrastructure */
258 struct work_struct phy_queue;
259 struct timer_list phy_timer;
260
261 spinlock_t lock;
262
263 struct net_device *attached_dev;
264
265 void (*adjust_link)(struct net_device *dev);
266
267 void (*adjust_state)(struct net_device *dev);
268};
269#define to_phy_device(d) container_of(d, struct phy_device, dev)
270
271/* struct phy_driver: Driver structure for a particular PHY type
272 *
273 * phy_id: The result of reading the UID registers of this PHY
274 * type, and ANDing them with the phy_id_mask. This driver
275 * only works for PHYs with IDs which match this field
276 * name: The friendly name of this PHY type
277 * phy_id_mask: Defines the important bits of the phy_id
278 * features: A list of features (speed, duplex, etc) supported
279 * by this PHY
280 * flags: A bitfield defining certain other features this PHY
281 * supports (like interrupts)
282 *
283 * The drivers must implement config_aneg and read_status. All
284 * other functions are optional. Note that none of these
285 * functions should be called from interrupt time. The goal is
286 * for the bus read/write functions to be able to block when the
287 * bus transaction is happening, and be freed up by an interrupt
288 * (The MPC85xx has this ability, though it is not currently
289 * supported in the driver).
290 */
291struct phy_driver {
292 u32 phy_id;
293 char *name;
294 unsigned int phy_id_mask;
295 u32 features;
296 u32 flags;
297
298 /* Called to initialize the PHY,
299 * including after a reset */
300 int (*config_init)(struct phy_device *phydev);
301
302 /* Called during discovery. Used to set
303 * up device-specific structures, if any */
304 int (*probe)(struct phy_device *phydev);
305
306 /* PHY Power Management */
307 int (*suspend)(struct phy_device *phydev);
308 int (*resume)(struct phy_device *phydev);
309
310 /* Configures the advertisement and resets
311 * autonegotiation if phydev->autoneg is on,
312 * forces the speed to the current settings in phydev
313 * if phydev->autoneg is off */
314 int (*config_aneg)(struct phy_device *phydev);
315
316 /* Determines the negotiated speed and duplex */
317 int (*read_status)(struct phy_device *phydev);
318
319 /* Clears any pending interrupts */
320 int (*ack_interrupt)(struct phy_device *phydev);
321
322 /* Enables or disables interrupts */
323 int (*config_intr)(struct phy_device *phydev);
324
325 /* Clears up any memory if needed */
326 void (*remove)(struct phy_device *phydev);
327
328 struct device_driver driver;
329};
330#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
331
332int phy_read(struct phy_device *phydev, u16 regnum);
333int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
334struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
335int phy_clear_interrupt(struct phy_device *phydev);
336int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
337
338static inline int phy_read_status(struct phy_device *phydev) {
339 return phydev->drv->read_status(phydev);
340}
341
342int genphy_setup_forced(struct phy_device *phydev);
343int genphy_restart_aneg(struct phy_device *phydev);
344int genphy_config_aneg(struct phy_device *phydev);
345int genphy_update_link(struct phy_device *phydev);
346int genphy_read_status(struct phy_device *phydev);
347void phy_driver_unregister(struct phy_driver *drv);
348int phy_driver_register(struct phy_driver *new_driver);
349void phy_prepare_link(struct phy_device *phydev,
350 void (*adjust_link)(struct net_device *));
351void phy_start_machine(struct phy_device *phydev,
352 void (*handler)(struct net_device *));
353void phy_stop_machine(struct phy_device *phydev);
354int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
355int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
356int phy_mii_ioctl(struct phy_device *phydev,
357 struct mii_ioctl_data *mii_data, int cmd);
358
359extern struct bus_type mdio_bus_type;
360#endif /* __PHY_H */