aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 13:04:37 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 13:04:37 -0400
commit3d963f5bb1949af53a37acf36d3b12e97ca9b1e5 (patch)
tree9449490978cdb7858a7c713ee88f15ffc26a6d71
parent5be1d85c208f135fc88f972f91b91a879b702b40 (diff)
parente13934563db047043ccead26412f552375cea90c (diff)
Merge refs/heads/upstream from master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
-rw-r--r--Documentation/networking/phy.txt288
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c12
-rw-r--r--drivers/net/bonding/bond_alb.c17
-rw-r--r--drivers/net/bonding/bond_main.c58
-rw-r--r--drivers/net/bonding/bonding.h3
-rw-r--r--drivers/net/e1000/e1000_main.c4
-rw-r--r--drivers/net/eepro100.c8
-rw-r--r--drivers/net/forcedeth.c582
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hamradio/baycom_par.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c3
-rw-r--r--drivers/net/hamradio/mkiss.c1086
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c170
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c59
-rw-r--r--drivers/net/ixgb/ixgb_hw.h9
-rw-r--r--drivers/net/ixgb/ixgb_main.c53
-rw-r--r--drivers/net/jazzsonic.c186
-rw-r--r--drivers/net/loopback.c22
-rw-r--r--drivers/net/macsonic.c538
-rw-r--r--drivers/net/mv643xx_eth.c29
-rw-r--r--drivers/net/mv643xx_eth.h4
-rw-r--r--drivers/net/pci-skeleton.c6
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c25
-rw-r--r--drivers/net/phy/Kconfig57
-rw-r--r--drivers/net/phy/Makefile10
-rw-r--r--drivers/net/phy/cicada.c134
-rw-r--r--drivers/net/phy/davicom.c195
-rw-r--r--drivers/net/phy/lxt.c179
-rw-r--r--drivers/net/phy/marvell.c140
-rw-r--r--drivers/net/phy/mdio_bus.c176
-rw-r--r--drivers/net/phy/phy.c871
-rw-r--r--drivers/net/phy/phy_device.c696
-rw-r--r--drivers/net/phy/qsemi.c143
-rw-r--r--drivers/net/r8169.c1
-rw-r--r--drivers/net/s2io-regs.h87
-rw-r--r--drivers/net/s2io.c3085
-rw-r--r--drivers/net/s2io.h364
-rw-r--r--drivers/net/skge.c65
-rw-r--r--drivers/net/skge.h19
-rw-r--r--drivers/net/sonic.c676
-rw-r--r--drivers/net/sonic.h460
-rw-r--r--drivers/net/tokenring/Kconfig4
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/madgemc.c521
-rw-r--r--drivers/net/tokenring/proteon.c104
-rw-r--r--drivers/net/tokenring/skisa.c104
-rw-r--r--drivers/net/tokenring/tms380tr.c46
-rw-r--r--drivers/net/tokenring/tms380tr.h9
-rw-r--r--drivers/net/tokenring/tmspci.c4
-rw-r--r--drivers/net/wan/cycx_drv.c24
-rw-r--r--drivers/net/wireless/orinoco.c78
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/mii.h9
-rw-r--r--include/linux/phy.h377
59 files changed, 8103 insertions, 3719 deletions
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
new file mode 100644
index 000000000000..29ccae409031
--- /dev/null
+++ b/Documentation/networking/phy.txt
@@ -0,0 +1,288 @@
1
2-------
3PHY Abstraction Layer
4(Updated 2005-07-21)
5
6Purpose
7
8 Most network devices consist of set of registers which provide an interface
9 to a MAC layer, which communicates with the physical connection through a
10 PHY. The PHY concerns itself with negotiating link parameters with the link
11 partner on the other side of the network connection (typically, an ethernet
12 cable), and provides a register interface to allow drivers to determine what
13 settings were chosen, and to configure what settings are allowed.
14
15 While these devices are distinct from the network devices, and conform to a
16 standard layout for the registers, it has been common practice to integrate
17 the PHY management code with the network driver. This has resulted in large
18 amounts of redundant code. Also, on embedded systems with multiple (and
19 sometimes quite different) ethernet controllers connected to the same
20 management bus, it is difficult to ensure safe use of the bus.
21
22 Since the PHYs are devices, and the management busses through which they are
23 accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
24 In doing so, it has these goals:
25
26 1) Increase code-reuse
27 2) Increase overall code-maintainability
28 3) Speed development time for new network drivers, and for new systems
29
30 Basically, this layer is meant to provide an interface to PHY devices which
31 allows network driver writers to write as little code as possible, while
32 still providing a full feature set.
33
34The MDIO bus
35
36 Most network devices are connected to a PHY by means of a management bus.
37 Different devices use different busses (though some share common interfaces).
38 In order to take advantage of the PAL, each bus interface needs to be
39 registered as a distinct device.
40
41 1) read and write functions must be implemented. Their prototypes are:
42
43 int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44 int read(struct mii_bus *bus, int mii_id, int regnum);
45
46 mii_id is the address on the bus for the PHY, and regnum is the register
47 number. These functions are guaranteed not to be called from interrupt
48 time, so it is safe for them to block, waiting for an interrupt to signal
49 the operation is complete
50
51 2) A reset function is necessary. This is used to return the bus to an
52 initialized state.
53
54 3) A probe function is needed. This function should set up anything the bus
55 driver needs, setup the mii_bus structure, and register with the PAL using
56 mdiobus_register. Similarly, there's a remove function to undo all of
57 that (use mdiobus_unregister).
58
59 4) Like any driver, the device_driver structure must be configured, and init
60 exit functions are used to register the driver.
61
62 5) The bus must also be declared somewhere as a device, and registered.
63
64 As an example for how one driver implemented an mdio bus driver, see
65 drivers/net/gianfar_mii.c and arch/ppc/syslib/mpc85xx_devices.c
66
67Connecting to a PHY
68
69 Sometime during startup, the network driver needs to establish a connection
70 between the PHY device, and the network device. At this time, the PHY's bus
71 and drivers need to all have been loaded, so it is ready for the connection.
72 At this point, there are several ways to connect to the PHY:
73
74 1) The PAL handles everything, and only calls the network driver when
75 the link state changes, so it can react.
76
77 2) The PAL handles everything except interrupts (usually because the
78 controller has the interrupt registers).
79
80 3) The PAL handles everything, but checks in with the driver every second,
81 allowing the network driver to react first to any changes before the PAL
82 does.
83
84 4) The PAL serves only as a library of functions, with the network device
85 manually calling functions to update status, and configure the PHY
86
87
88Letting the PHY Abstraction Layer do Everything
89
90 If you choose option 1 (The hope is that every driver can, but to still be
91 useful to drivers that can't), connecting to the PHY is simple:
92
93 First, you need a function to react to changes in the link state. This
94 function follows this protocol:
95
96 static void adjust_link(struct net_device *dev);
97
98 Next, you need to know the device name of the PHY connected to this device.
99 The name will look something like, "phy0:0", where the first number is the
100 bus id, and the second is the PHY's address on that bus.
101
102 Now, to connect, just call this function:
103
104 phydev = phy_connect(dev, phy_name, &adjust_link, flags);
105
106 phydev is a pointer to the phy_device structure which represents the PHY. If
107 phy_connect is successful, it will return the pointer. dev, here, is the
108 pointer to your net_device. Once done, this function will have started the
109 PHY's software state machine, and registered for the PHY's interrupt, if it
110 has one. The phydev structure will be populated with information about the
111 current state, though the PHY will not yet be truly operational at this
112 point.
113
114 flags is a u32 which can optionally contain phy-specific flags.
115 This is useful if the system has put hardware restrictions on
116 the PHY/controller, of which the PHY needs to be aware.
117
118 Now just make sure that phydev->supported and phydev->advertising have any
119 values pruned from them which don't make sense for your controller (a 10/100
120 controller may be connected to a gigabit capable PHY, so you would need to
121 mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
122 for these bitfields. Note that you should not SET any bits, or the PHY may
123 get put into an unsupported state.
124
125 Lastly, once the controller is ready to handle network traffic, you call
126 phy_start(phydev). This tells the PAL that you are ready, and configures the
127 PHY to connect to the network. If you want to handle your own interrupts,
128 just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start.
129 Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL.
130
131 When you want to disconnect from the network (even if just briefly), you call
132 phy_stop(phydev).
133
134Keeping Close Tabs on the PAL
135
136 It is possible that the PAL's built-in state machine needs a little help to
137 keep your network device and the PHY properly in sync. If so, you can
138 register a helper function when connecting to the PHY, which will be called
139 every second before the state machine reacts to any changes. To do this, you
140 need to manually call phy_attach() and phy_prepare_link(), and then call
141 phy_start_machine() with the second argument set to point to your special
142 handler.
143
144 Currently there are no examples of how to use this functionality, and testing
145 on it has been limited because the author does not have any drivers which use
146 it (they all use option 1). So Caveat Emptor.
147
148Doing it all yourself
149
150 There's a remote chance that the PAL's built-in state machine cannot track
151 the complex interactions between the PHY and your network device. If this is
152 so, you can simply call phy_attach(), and not call phy_start_machine or
153 phy_prepare_link(). This will mean that phydev->state is entirely yours to
154 handle (phy_start and phy_stop toggle between some of the states, so you
155 might need to avoid them).
156
157 An effort has been made to make sure that useful functionality can be
158 accessed without the state-machine running, and most of these functions are
159 descended from functions which did not interact with a complex state-machine.
160 However, again, no effort has been made so far to test running without the
161 state machine, so tryer beware.
162
163 Here is a brief rundown of the functions:
164
165 int phy_read(struct phy_device *phydev, u16 regnum);
166 int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
167
168 Simple read/write primitives. They invoke the bus's read/write function
169 pointers.
170
171 void phy_print_status(struct phy_device *phydev);
172
173 A convenience function to print out the PHY status neatly.
174
175 int phy_clear_interrupt(struct phy_device *phydev);
176 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
177
178 Clear the PHY's interrupt, and configure which ones are allowed,
179 respectively. Currently only supports all on, or all off.
180
181 int phy_enable_interrupts(struct phy_device *phydev);
182 int phy_disable_interrupts(struct phy_device *phydev);
183
184 Functions which enable/disable PHY interrupts, clearing them
185 before and after, respectively.
186
187 int phy_start_interrupts(struct phy_device *phydev);
188 int phy_stop_interrupts(struct phy_device *phydev);
189
190 Requests the IRQ for the PHY interrupts, then enables them for
191 start, or disables then frees them for stop.
192
193 struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
194 u32 flags);
195
196 Attaches a network device to a particular PHY, binding the PHY to a generic
197 driver if none was found during bus initialization. Passes in
198 any phy-specific flags as needed.
199
200 int phy_start_aneg(struct phy_device *phydev);
201
202 Using variables inside the phydev structure, either configures advertising
203 and resets autonegotiation, or disables autonegotiation, and configures
204 forced settings.
205
206 static inline int phy_read_status(struct phy_device *phydev);
207
208 Fills the phydev structure with up-to-date information about the current
209 settings in the PHY.
210
211 void phy_sanitize_settings(struct phy_device *phydev)
212
213 Resolves differences between currently desired settings, and
214 supported settings for the given PHY device. Does not make
215 the changes in the hardware, though.
216
217 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
218 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
219
220 Ethtool convenience functions.
221
222 int phy_mii_ioctl(struct phy_device *phydev,
223 struct mii_ioctl_data *mii_data, int cmd);
224
225 The MII ioctl. Note that this function will completely screw up the state
226 machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
227 use this only to write registers which are not standard, and don't set off
228 a renegotiation.
229
230
231PHY Device Drivers
232
233 With the PHY Abstraction Layer, adding support for new PHYs is
234 quite easy. In some cases, no work is required at all! However,
235 many PHYs require a little hand-holding to get up-and-running.
236
237Generic PHY driver
238
239 If the desired PHY doesn't have any errata, quirks, or special
240 features you want to support, then it may be best to not add
241 support, and let the PHY Abstraction Layer's Generic PHY Driver
242 do all of the work.
243
244Writing a PHY driver
245
246 If you do need to write a PHY driver, the first thing to do is
247 make sure it can be matched with an appropriate PHY device.
248 This is done during bus initialization by reading the device's
249 UID (stored in registers 2 and 3), then comparing it to each
250 driver's phy_id field by ANDing it with each driver's
251 phy_id_mask field. Also, it needs a name. Here's an example:
252
253 static struct phy_driver dm9161_driver = {
254 .phy_id = 0x0181b880,
255 .name = "Davicom DM9161E",
256 .phy_id_mask = 0x0ffffff0,
257 ...
258 }
259
260 Next, you need to specify what features (speed, duplex, autoneg,
261 etc) your PHY device and driver support. Most PHYs support
262 PHY_BASIC_FEATURES, but you can look in include/mii.h for other
263 features.
264
265 Each driver consists of a number of function pointers:
266
267 config_init: configures PHY into a sane state after a reset.
268 For instance, a Davicom PHY requires descrambling disabled.
269 probe: Does any setup needed by the driver
270 suspend/resume: power management
271 config_aneg: Changes the speed/duplex/negotiation settings
272 read_status: Reads the current speed/duplex/negotiation settings
273 ack_interrupt: Clear a pending interrupt
274 config_intr: Enable or disable interrupts
275 remove: Does any driver take-down
276
277 Of these, only config_aneg and read_status are required to be
278 assigned by the driver code. The rest are optional. Also, it is
279 preferred to use the generic phy driver's versions of these two
280 functions if at all possible: genphy_read_status and
281 genphy_config_aneg. If this is not possible, it is likely that
282 you only need to perform some actions before and after invoking
283 these functions, and so your functions will wrap the generic
284 ones.
285
286 Feel free to look at the Marvell, Cicada, and Davicom drivers in
287 drivers/net/phy/ for examples (the lxt and qsemi drivers have
288 not been tested as of this writing)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8edb6936fb9b..79e8aa6f2b9e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -131,6 +131,8 @@ config NET_SB1000
131 131
132 source "drivers/net/arcnet/Kconfig" 132 source "drivers/net/arcnet/Kconfig"
133 133
134source "drivers/net/phy/Kconfig"
135
134# 136#
135# Ethernet 137# Ethernet
136# 138#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 63c6d1e6d4d9..a369ae284a9a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
65# 65#
66 66
67obj-$(CONFIG_MII) += mii.o 67obj-$(CONFIG_MII) += mii.o
68obj-$(CONFIG_PHYLIB) += phy/
68 69
69obj-$(CONFIG_SUNDANCE) += sundance.o 70obj-$(CONFIG_SUNDANCE) += sundance.o
70obj-$(CONFIG_HAMACHI) += hamachi.o 71obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 3707df6b0cfa..60304f7e7e5b 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -87,7 +87,6 @@ extern struct net_device *mvme147lance_probe(int unit);
87extern struct net_device *tc515_probe(int unit); 87extern struct net_device *tc515_probe(int unit);
88extern struct net_device *lance_probe(int unit); 88extern struct net_device *lance_probe(int unit);
89extern struct net_device *mace_probe(int unit); 89extern struct net_device *mace_probe(int unit);
90extern struct net_device *macsonic_probe(int unit);
91extern struct net_device *mac8390_probe(int unit); 90extern struct net_device *mac8390_probe(int unit);
92extern struct net_device *mac89x0_probe(int unit); 91extern struct net_device *mac89x0_probe(int unit);
93extern struct net_device *mc32_probe(int unit); 92extern struct net_device *mc32_probe(int unit);
@@ -284,9 +283,6 @@ static struct devprobe2 m68k_probes[] __initdata = {
284#ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */ 283#ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */
285 {mace_probe, 0}, 284 {mace_probe, 0},
286#endif 285#endif
287#ifdef CONFIG_MACSONIC /* Mac SONIC-based Ethernet of all sorts */
288 {macsonic_probe, 0},
289#endif
290#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */ 286#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */
291 {mac8390_probe, 0}, 287 {mac8390_probe, 0},
292#endif 288#endif
@@ -318,17 +314,9 @@ static void __init ethif_probe2(int unit)
318#ifdef CONFIG_TR 314#ifdef CONFIG_TR
319/* Token-ring device probe */ 315/* Token-ring device probe */
320extern int ibmtr_probe_card(struct net_device *); 316extern int ibmtr_probe_card(struct net_device *);
321extern struct net_device *sk_isa_probe(int unit);
322extern struct net_device *proteon_probe(int unit);
323extern struct net_device *smctr_probe(int unit); 317extern struct net_device *smctr_probe(int unit);
324 318
325static struct devprobe2 tr_probes2[] __initdata = { 319static struct devprobe2 tr_probes2[] __initdata = {
326#ifdef CONFIG_SKISA
327 {sk_isa_probe, 0},
328#endif
329#ifdef CONFIG_PROTEON
330 {proteon_probe, 0},
331#endif
332#ifdef CONFIG_SMCTR 320#ifdef CONFIG_SMCTR
333 {smctr_probe, 0}, 321 {smctr_probe, 0},
334#endif 322#endif
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5ce606d9dc03..19e829b567d0 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1106,18 +1106,13 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1106 } 1106 }
1107 } 1107 }
1108 1108
1109 if (found) { 1109 if (!found)
1110 /* a slave was found that is using the mac address 1110 return 0;
1111 * of the new slave
1112 */
1113 printk(KERN_ERR DRV_NAME
1114 ": Error: the hw address of slave %s is not "
1115 "unique - cannot enslave it!",
1116 slave->dev->name);
1117 return -EINVAL;
1118 }
1119 1111
1120 return 0; 1112 /* Try setting slave mac to bond address and fall-through
1113 to code handling that situation below... */
1114 alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
1115 bond->alb_info.rlb_enabled);
1121 } 1116 }
1122 1117
1123 /* The slave's address is equal to the address of the bond. 1118 /* The slave's address is equal to the address of the bond.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2c930da90a85..94c9f68dd16b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1604,6 +1604,44 @@ static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_
1604 return 0; 1604 return 0;
1605} 1605}
1606 1606
1607#define BOND_INTERSECT_FEATURES \
1608 (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
1609
1610/*
1611 * Compute the features available to the bonding device by
1612 * intersection of all of the slave devices' BOND_INTERSECT_FEATURES.
1613 * Call this after attaching or detaching a slave to update the
1614 * bond's features.
1615 */
1616static int bond_compute_features(struct bonding *bond)
1617{
1618 int i;
1619 struct slave *slave;
1620 struct net_device *bond_dev = bond->dev;
1621 int features = bond->bond_features;
1622
1623 bond_for_each_slave(bond, slave, i) {
1624 struct net_device * slave_dev = slave->dev;
1625 if (i == 0) {
1626 features |= BOND_INTERSECT_FEATURES;
1627 }
1628 features &=
1629 ~(~slave_dev->features & BOND_INTERSECT_FEATURES);
1630 }
1631
1632 /* turn off NETIF_F_SG if we need a csum and h/w can't do it */
1633 if ((features & NETIF_F_SG) &&
1634 !(features & (NETIF_F_IP_CSUM |
1635 NETIF_F_NO_CSUM |
1636 NETIF_F_HW_CSUM))) {
1637 features &= ~NETIF_F_SG;
1638 }
1639
1640 bond_dev->features = features;
1641
1642 return 0;
1643}
1644
1607/* enslave device <slave> to bond device <master> */ 1645/* enslave device <slave> to bond device <master> */
1608static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1646static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1609{ 1647{
@@ -1811,6 +1849,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1811 new_slave->delay = 0; 1849 new_slave->delay = 0;
1812 new_slave->link_failure_count = 0; 1850 new_slave->link_failure_count = 0;
1813 1851
1852 bond_compute_features(bond);
1853
1814 if (bond->params.miimon && !bond->params.use_carrier) { 1854 if (bond->params.miimon && !bond->params.use_carrier) {
1815 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1855 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1816 1856
@@ -2015,7 +2055,7 @@ err_free:
2015 2055
2016err_undo_flags: 2056err_undo_flags:
2017 bond_dev->features = old_features; 2057 bond_dev->features = old_features;
2018 2058
2019 return res; 2059 return res;
2020} 2060}
2021 2061
@@ -2100,6 +2140,8 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2100 /* release the slave from its bond */ 2140 /* release the slave from its bond */
2101 bond_detach_slave(bond, slave); 2141 bond_detach_slave(bond, slave);
2102 2142
2143 bond_compute_features(bond);
2144
2103 if (bond->primary_slave == slave) { 2145 if (bond->primary_slave == slave) {
2104 bond->primary_slave = NULL; 2146 bond->primary_slave = NULL;
2105 } 2147 }
@@ -2243,6 +2285,8 @@ static int bond_release_all(struct net_device *bond_dev)
2243 bond_alb_deinit_slave(bond, slave); 2285 bond_alb_deinit_slave(bond, slave);
2244 } 2286 }
2245 2287
2288 bond_compute_features(bond);
2289
2246 /* now that the slave is detached, unlock and perform 2290 /* now that the slave is detached, unlock and perform
2247 * all the undo steps that should not be called from 2291 * all the undo steps that should not be called from
2248 * within a lock. 2292 * within a lock.
@@ -3588,6 +3632,7 @@ static int bond_master_netdev_event(unsigned long event, struct net_device *bond
3588static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) 3632static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev)
3589{ 3633{
3590 struct net_device *bond_dev = slave_dev->master; 3634 struct net_device *bond_dev = slave_dev->master;
3635 struct bonding *bond = bond_dev->priv;
3591 3636
3592 switch (event) { 3637 switch (event) {
3593 case NETDEV_UNREGISTER: 3638 case NETDEV_UNREGISTER:
@@ -3626,6 +3671,9 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
3626 * TODO: handle changing the primary's name 3671 * TODO: handle changing the primary's name
3627 */ 3672 */
3628 break; 3673 break;
3674 case NETDEV_FEAT_CHANGE:
3675 bond_compute_features(bond);
3676 break;
3629 default: 3677 default:
3630 break; 3678 break;
3631 } 3679 }
@@ -4526,6 +4574,11 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4526 } 4574 }
4527} 4575}
4528 4576
4577static struct ethtool_ops bond_ethtool_ops = {
4578 .get_tx_csum = ethtool_op_get_tx_csum,
4579 .get_sg = ethtool_op_get_sg,
4580};
4581
4529/* 4582/*
4530 * Does not allocate but creates a /proc entry. 4583 * Does not allocate but creates a /proc entry.
4531 * Allowed to fail. 4584 * Allowed to fail.
@@ -4555,6 +4608,7 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
4555 bond_dev->stop = bond_close; 4608 bond_dev->stop = bond_close;
4556 bond_dev->get_stats = bond_get_stats; 4609 bond_dev->get_stats = bond_get_stats;
4557 bond_dev->do_ioctl = bond_do_ioctl; 4610 bond_dev->do_ioctl = bond_do_ioctl;
4611 bond_dev->ethtool_ops = &bond_ethtool_ops;
4558 bond_dev->set_multicast_list = bond_set_multicast_list; 4612 bond_dev->set_multicast_list = bond_set_multicast_list;
4559 bond_dev->change_mtu = bond_change_mtu; 4613 bond_dev->change_mtu = bond_change_mtu;
4560 bond_dev->set_mac_address = bond_set_mac_address; 4614 bond_dev->set_mac_address = bond_set_mac_address;
@@ -4591,6 +4645,8 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
4591 NETIF_F_HW_VLAN_RX | 4645 NETIF_F_HW_VLAN_RX |
4592 NETIF_F_HW_VLAN_FILTER); 4646 NETIF_F_HW_VLAN_FILTER);
4593 4647
4648 bond->bond_features = bond_dev->features;
4649
4594#ifdef CONFIG_PROC_FS 4650#ifdef CONFIG_PROC_FS
4595 bond_create_proc_entry(bond); 4651 bond_create_proc_entry(bond);
4596#endif 4652#endif
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index d27f377b3eeb..388196980862 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -211,6 +211,9 @@ struct bonding {
211 struct bond_params params; 211 struct bond_params params;
212 struct list_head vlan_list; 212 struct list_head vlan_list;
213 struct vlan_group *vlgrp; 213 struct vlan_group *vlgrp;
214 /* the features the bonding device supports, independently
215 * of any slaves */
216 int bond_features;
214}; 217};
215 218
216/** 219/**
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b82fd15d0891..9b596e0bbf95 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2767,7 +2767,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2767 " next_to_use <%x>\n" 2767 " next_to_use <%x>\n"
2768 " next_to_clean <%x>\n" 2768 " next_to_clean <%x>\n"
2769 "buffer_info[next_to_clean]\n" 2769 "buffer_info[next_to_clean]\n"
2770 " dma <%zx>\n" 2770 " dma <%llx>\n"
2771 " time_stamp <%lx>\n" 2771 " time_stamp <%lx>\n"
2772 " next_to_watch <%x>\n" 2772 " next_to_watch <%x>\n"
2773 " jiffies <%lx>\n" 2773 " jiffies <%lx>\n"
@@ -2776,7 +2776,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2776 E1000_READ_REG(&adapter->hw, TDT), 2776 E1000_READ_REG(&adapter->hw, TDT),
2777 tx_ring->next_to_use, 2777 tx_ring->next_to_use,
2778 i, 2778 i,
2779 tx_ring->buffer_info[i].dma, 2779 (unsigned long long)tx_ring->buffer_info[i].dma,
2780 tx_ring->buffer_info[i].time_stamp, 2780 tx_ring->buffer_info[i].time_stamp,
2781 eop, 2781 eop,
2782 jiffies, 2782 jiffies,
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 1795425f512e..8c62ced2c9b2 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1263,8 +1263,8 @@ speedo_init_rx_ring(struct net_device *dev)
1263 for (i = 0; i < RX_RING_SIZE; i++) { 1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb; 1264 struct sk_buff *skb;
1265 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); 1265 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1266 /* XXX: do we really want to call this before the NULL check? --hch */ 1266 if (skb)
1267 rx_align(skb); /* Align IP on 16 byte boundary */ 1267 rx_align(skb); /* Align IP on 16 byte boundary */
1268 sp->rx_skbuff[i] = skb; 1268 sp->rx_skbuff[i] = skb;
1269 if (skb == NULL) 1269 if (skb == NULL)
1270 break; /* OK. Just initially short of Rx bufs. */ 1270 break; /* OK. Just initially short of Rx bufs. */
@@ -1654,8 +1654,8 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1654 struct sk_buff *skb; 1654 struct sk_buff *skb;
1655 /* Get a fresh skbuff to replace the consumed one. */ 1655 /* Get a fresh skbuff to replace the consumed one. */
1656 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); 1656 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1657 /* XXX: do we really want to call this before the NULL check? --hch */ 1657 if (skb)
1658 rx_align(skb); /* Align IP on 16 byte boundary */ 1658 rx_align(skb); /* Align IP on 16 byte boundary */
1659 sp->rx_skbuff[entry] = skb; 1659 sp->rx_skbuff[entry] = skb;
1660 if (skb == NULL) { 1660 if (skb == NULL) {
1661 sp->rx_ringp[entry] = NULL; 1661 sp->rx_ringp[entry] = NULL;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 64f0f697c958..7d93948aec83 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -85,6 +85,16 @@
85 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added. 87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * per-packet flags.
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * of nv_remove
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call
88 * 98 *
89 * Known bugs: 99 * Known bugs:
90 * We suspect that on some hardware no TX done interrupts are generated. 100 * We suspect that on some hardware no TX done interrupts are generated.
@@ -96,7 +106,7 @@
96 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 106 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
97 * superfluous timer interrupts from the nic. 107 * superfluous timer interrupts from the nic.
98 */ 108 */
99#define FORCEDETH_VERSION "0.35" 109#define FORCEDETH_VERSION "0.41"
100#define DRV_NAME "forcedeth" 110#define DRV_NAME "forcedeth"
101 111
102#include <linux/module.h> 112#include <linux/module.h>
@@ -131,11 +141,10 @@
131 * Hardware access: 141 * Hardware access:
132 */ 142 */
133 143
134#define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */ 144#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
135#define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */ 145#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
136#define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */ 146#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
137#define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */ 147#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
138#define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */
139 148
140enum { 149enum {
141 NvRegIrqStatus = 0x000, 150 NvRegIrqStatus = 0x000,
@@ -146,13 +155,16 @@ enum {
146#define NVREG_IRQ_RX 0x0002 155#define NVREG_IRQ_RX 0x0002
147#define NVREG_IRQ_RX_NOBUF 0x0004 156#define NVREG_IRQ_RX_NOBUF 0x0004
148#define NVREG_IRQ_TX_ERR 0x0008 157#define NVREG_IRQ_TX_ERR 0x0008
149#define NVREG_IRQ_TX2 0x0010 158#define NVREG_IRQ_TX_OK 0x0010
150#define NVREG_IRQ_TIMER 0x0020 159#define NVREG_IRQ_TIMER 0x0020
151#define NVREG_IRQ_LINK 0x0040 160#define NVREG_IRQ_LINK 0x0040
161#define NVREG_IRQ_TX_ERROR 0x0080
152#define NVREG_IRQ_TX1 0x0100 162#define NVREG_IRQ_TX1 0x0100
153#define NVREG_IRQMASK_WANTED_1 0x005f 163#define NVREG_IRQMASK_WANTED 0x00df
154#define NVREG_IRQMASK_WANTED_2 0x0147 164
155#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1)) 165#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
166 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
167 NVREG_IRQ_TX1))
156 168
157 NvRegUnknownSetupReg6 = 0x008, 169 NvRegUnknownSetupReg6 = 0x008,
158#define NVREG_UNKSETUP6_VAL 3 170#define NVREG_UNKSETUP6_VAL 3
@@ -286,6 +298,18 @@ struct ring_desc {
286 u32 FlagLen; 298 u32 FlagLen;
287}; 299};
288 300
301struct ring_desc_ex {
302 u32 PacketBufferHigh;
303 u32 PacketBufferLow;
304 u32 Reserved;
305 u32 FlagLen;
306};
307
308typedef union _ring_type {
309 struct ring_desc* orig;
310 struct ring_desc_ex* ex;
311} ring_type;
312
289#define FLAG_MASK_V1 0xffff0000 313#define FLAG_MASK_V1 0xffff0000
290#define FLAG_MASK_V2 0xffffc000 314#define FLAG_MASK_V2 0xffffc000
291#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 315#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
@@ -293,7 +317,7 @@ struct ring_desc {
293 317
294#define NV_TX_LASTPACKET (1<<16) 318#define NV_TX_LASTPACKET (1<<16)
295#define NV_TX_RETRYERROR (1<<19) 319#define NV_TX_RETRYERROR (1<<19)
296#define NV_TX_LASTPACKET1 (1<<24) 320#define NV_TX_FORCED_INTERRUPT (1<<24)
297#define NV_TX_DEFERRED (1<<26) 321#define NV_TX_DEFERRED (1<<26)
298#define NV_TX_CARRIERLOST (1<<27) 322#define NV_TX_CARRIERLOST (1<<27)
299#define NV_TX_LATECOLLISION (1<<28) 323#define NV_TX_LATECOLLISION (1<<28)
@@ -303,7 +327,7 @@ struct ring_desc {
303 327
304#define NV_TX2_LASTPACKET (1<<29) 328#define NV_TX2_LASTPACKET (1<<29)
305#define NV_TX2_RETRYERROR (1<<18) 329#define NV_TX2_RETRYERROR (1<<18)
306#define NV_TX2_LASTPACKET1 (1<<23) 330#define NV_TX2_FORCED_INTERRUPT (1<<30)
307#define NV_TX2_DEFERRED (1<<25) 331#define NV_TX2_DEFERRED (1<<25)
308#define NV_TX2_CARRIERLOST (1<<26) 332#define NV_TX2_CARRIERLOST (1<<26)
309#define NV_TX2_LATECOLLISION (1<<27) 333#define NV_TX2_LATECOLLISION (1<<27)
@@ -379,9 +403,13 @@ struct ring_desc {
379#define TX_LIMIT_START 62 403#define TX_LIMIT_START 62
380 404
381/* rx/tx mac addr + type + vlan + align + slack*/ 405/* rx/tx mac addr + type + vlan + align + slack*/
382#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) 406#define NV_RX_HEADERS (64)
383/* even more slack */ 407/* even more slack. */
384#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) 408#define NV_RX_ALLOC_PAD (64)
409
410/* maximum mtu size */
411#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
412#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
385 413
386#define OOM_REFILL (1+HZ/20) 414#define OOM_REFILL (1+HZ/20)
387#define POLL_WAIT (1+HZ/100) 415#define POLL_WAIT (1+HZ/100)
@@ -396,6 +424,7 @@ struct ring_desc {
396 */ 424 */
397#define DESC_VER_1 0x0 425#define DESC_VER_1 0x0
398#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) 426#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK)
427#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK)
399 428
400/* PHY defines */ 429/* PHY defines */
401#define PHY_OUI_MARVELL 0x5043 430#define PHY_OUI_MARVELL 0x5043
@@ -468,11 +497,12 @@ struct fe_priv {
468 /* rx specific fields. 497 /* rx specific fields.
469 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 498 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
470 */ 499 */
471 struct ring_desc *rx_ring; 500 ring_type rx_ring;
472 unsigned int cur_rx, refill_rx; 501 unsigned int cur_rx, refill_rx;
473 struct sk_buff *rx_skbuff[RX_RING]; 502 struct sk_buff *rx_skbuff[RX_RING];
474 dma_addr_t rx_dma[RX_RING]; 503 dma_addr_t rx_dma[RX_RING];
475 unsigned int rx_buf_sz; 504 unsigned int rx_buf_sz;
505 unsigned int pkt_limit;
476 struct timer_list oom_kick; 506 struct timer_list oom_kick;
477 struct timer_list nic_poll; 507 struct timer_list nic_poll;
478 508
@@ -484,7 +514,7 @@ struct fe_priv {
484 /* 514 /*
485 * tx specific fields. 515 * tx specific fields.
486 */ 516 */
487 struct ring_desc *tx_ring; 517 ring_type tx_ring;
488 unsigned int next_tx, nic_tx; 518 unsigned int next_tx, nic_tx;
489 struct sk_buff *tx_skbuff[TX_RING]; 519 struct sk_buff *tx_skbuff[TX_RING];
490 dma_addr_t tx_dma[TX_RING]; 520 dma_addr_t tx_dma[TX_RING];
@@ -519,6 +549,11 @@ static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
519 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 549 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
520} 550}
521 551
552static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
553{
554 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
555}
556
522static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 557static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
523 int delay, int delaymax, const char *msg) 558 int delay, int delaymax, const char *msg)
524{ 559{
@@ -792,7 +827,7 @@ static int nv_alloc_rx(struct net_device *dev)
792 nr = refill_rx % RX_RING; 827 nr = refill_rx % RX_RING;
793 if (np->rx_skbuff[nr] == NULL) { 828 if (np->rx_skbuff[nr] == NULL) {
794 829
795 skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); 830 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
796 if (!skb) 831 if (!skb)
797 break; 832 break;
798 833
@@ -803,9 +838,16 @@ static int nv_alloc_rx(struct net_device *dev)
803 } 838 }
804 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, 839 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
805 PCI_DMA_FROMDEVICE); 840 PCI_DMA_FROMDEVICE);
806 np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 841 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
807 wmb(); 842 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
808 np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); 843 wmb();
844 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
845 } else {
846 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
847 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
848 wmb();
849 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
850 }
809 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 851 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
810 dev->name, refill_rx); 852 dev->name, refill_rx);
811 refill_rx++; 853 refill_rx++;
@@ -831,19 +873,37 @@ static void nv_do_rx_refill(unsigned long data)
831 enable_irq(dev->irq); 873 enable_irq(dev->irq);
832} 874}
833 875
834static int nv_init_ring(struct net_device *dev) 876static void nv_init_rx(struct net_device *dev)
835{ 877{
836 struct fe_priv *np = get_nvpriv(dev); 878 struct fe_priv *np = get_nvpriv(dev);
837 int i; 879 int i;
838 880
839 np->next_tx = np->nic_tx = 0;
840 for (i = 0; i < TX_RING; i++)
841 np->tx_ring[i].FlagLen = 0;
842
843 np->cur_rx = RX_RING; 881 np->cur_rx = RX_RING;
844 np->refill_rx = 0; 882 np->refill_rx = 0;
845 for (i = 0; i < RX_RING; i++) 883 for (i = 0; i < RX_RING; i++)
846 np->rx_ring[i].FlagLen = 0; 884 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
885 np->rx_ring.orig[i].FlagLen = 0;
886 else
887 np->rx_ring.ex[i].FlagLen = 0;
888}
889
890static void nv_init_tx(struct net_device *dev)
891{
892 struct fe_priv *np = get_nvpriv(dev);
893 int i;
894
895 np->next_tx = np->nic_tx = 0;
896 for (i = 0; i < TX_RING; i++)
897 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
898 np->tx_ring.orig[i].FlagLen = 0;
899 else
900 np->tx_ring.ex[i].FlagLen = 0;
901}
902
903static int nv_init_ring(struct net_device *dev)
904{
905 nv_init_tx(dev);
906 nv_init_rx(dev);
847 return nv_alloc_rx(dev); 907 return nv_alloc_rx(dev);
848} 908}
849 909
@@ -852,7 +912,10 @@ static void nv_drain_tx(struct net_device *dev)
852 struct fe_priv *np = get_nvpriv(dev); 912 struct fe_priv *np = get_nvpriv(dev);
853 int i; 913 int i;
854 for (i = 0; i < TX_RING; i++) { 914 for (i = 0; i < TX_RING; i++) {
855 np->tx_ring[i].FlagLen = 0; 915 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
916 np->tx_ring.orig[i].FlagLen = 0;
917 else
918 np->tx_ring.ex[i].FlagLen = 0;
856 if (np->tx_skbuff[i]) { 919 if (np->tx_skbuff[i]) {
857 pci_unmap_single(np->pci_dev, np->tx_dma[i], 920 pci_unmap_single(np->pci_dev, np->tx_dma[i],
858 np->tx_skbuff[i]->len, 921 np->tx_skbuff[i]->len,
@@ -869,7 +932,10 @@ static void nv_drain_rx(struct net_device *dev)
869 struct fe_priv *np = get_nvpriv(dev); 932 struct fe_priv *np = get_nvpriv(dev);
870 int i; 933 int i;
871 for (i = 0; i < RX_RING; i++) { 934 for (i = 0; i < RX_RING; i++) {
872 np->rx_ring[i].FlagLen = 0; 935 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
936 np->rx_ring.orig[i].FlagLen = 0;
937 else
938 np->rx_ring.ex[i].FlagLen = 0;
873 wmb(); 939 wmb();
874 if (np->rx_skbuff[i]) { 940 if (np->rx_skbuff[i]) {
875 pci_unmap_single(np->pci_dev, np->rx_dma[i], 941 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -900,11 +966,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
900 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, 966 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
901 PCI_DMA_TODEVICE); 967 PCI_DMA_TODEVICE);
902 968
903 np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 969 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
970 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
971 else {
972 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
973 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
974 }
904 975
905 spin_lock_irq(&np->lock); 976 spin_lock_irq(&np->lock);
906 wmb(); 977 wmb();
907 np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); 978 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
979 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
980 else
981 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
908 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", 982 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
909 dev->name, np->next_tx); 983 dev->name, np->next_tx);
910 { 984 {
@@ -942,7 +1016,10 @@ static void nv_tx_done(struct net_device *dev)
942 while (np->nic_tx != np->next_tx) { 1016 while (np->nic_tx != np->next_tx) {
943 i = np->nic_tx % TX_RING; 1017 i = np->nic_tx % TX_RING;
944 1018
945 Flags = le32_to_cpu(np->tx_ring[i].FlagLen); 1019 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1020 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
1021 else
1022 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
946 1023
947 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1024 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
948 dev->name, np->nic_tx, Flags); 1025 dev->name, np->nic_tx, Flags);
@@ -993,9 +1070,56 @@ static void nv_tx_timeout(struct net_device *dev)
993 struct fe_priv *np = get_nvpriv(dev); 1070 struct fe_priv *np = get_nvpriv(dev);
994 u8 __iomem *base = get_hwbase(dev); 1071 u8 __iomem *base = get_hwbase(dev);
995 1072
996 dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name, 1073 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
997 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); 1074 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
998 1075
1076 {
1077 int i;
1078
1079 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
1080 dev->name, (unsigned long)np->ring_addr,
1081 np->next_tx, np->nic_tx);
1082 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1083 for (i=0;i<0x400;i+= 32) {
1084 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1085 i,
1086 readl(base + i + 0), readl(base + i + 4),
1087 readl(base + i + 8), readl(base + i + 12),
1088 readl(base + i + 16), readl(base + i + 20),
1089 readl(base + i + 24), readl(base + i + 28));
1090 }
1091 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
1092 for (i=0;i<TX_RING;i+= 4) {
1093 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1094 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1095 i,
1096 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
1097 le32_to_cpu(np->tx_ring.orig[i].FlagLen),
1098 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
1099 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
1100 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
1101 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
1102 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
1103 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
1104 } else {
1105 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1106 i,
1107 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
1108 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
1109 le32_to_cpu(np->tx_ring.ex[i].FlagLen),
1110 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
1111 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
1112 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
1113 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
1114 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
1115 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
1116 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
1117 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
1118 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
1119 }
1120 }
1121 }
1122
999 spin_lock_irq(&np->lock); 1123 spin_lock_irq(&np->lock);
1000 1124
1001 /* 1) stop tx engine */ 1125 /* 1) stop tx engine */
@@ -1009,7 +1133,10 @@ static void nv_tx_timeout(struct net_device *dev)
1009 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1133 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1010 nv_drain_tx(dev); 1134 nv_drain_tx(dev);
1011 np->next_tx = np->nic_tx = 0; 1135 np->next_tx = np->nic_tx = 0;
1012 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 1136 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1137 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1138 else
1139 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1013 netif_wake_queue(dev); 1140 netif_wake_queue(dev);
1014 } 1141 }
1015 1142
@@ -1084,8 +1211,13 @@ static void nv_rx_process(struct net_device *dev)
1084 break; /* we scanned the whole ring - do not continue */ 1211 break; /* we scanned the whole ring - do not continue */
1085 1212
1086 i = np->cur_rx % RX_RING; 1213 i = np->cur_rx % RX_RING;
1087 Flags = le32_to_cpu(np->rx_ring[i].FlagLen); 1214 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1088 len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver); 1215 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
1216 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1217 } else {
1218 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1219 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1220 }
1089 1221
1090 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1222 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1091 dev->name, np->cur_rx, Flags); 1223 dev->name, np->cur_rx, Flags);
@@ -1207,15 +1339,133 @@ next_pkt:
1207 } 1339 }
1208} 1340}
1209 1341
1342static void set_bufsize(struct net_device *dev)
1343{
1344 struct fe_priv *np = netdev_priv(dev);
1345
1346 if (dev->mtu <= ETH_DATA_LEN)
1347 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
1348 else
1349 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
1350}
1351
1210/* 1352/*
1211 * nv_change_mtu: dev->change_mtu function 1353 * nv_change_mtu: dev->change_mtu function
1212 * Called with dev_base_lock held for read. 1354 * Called with dev_base_lock held for read.
1213 */ 1355 */
1214static int nv_change_mtu(struct net_device *dev, int new_mtu) 1356static int nv_change_mtu(struct net_device *dev, int new_mtu)
1215{ 1357{
1216 if (new_mtu > ETH_DATA_LEN) 1358 struct fe_priv *np = get_nvpriv(dev);
1359 int old_mtu;
1360
1361 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1217 return -EINVAL; 1362 return -EINVAL;
1363
1364 old_mtu = dev->mtu;
1218 dev->mtu = new_mtu; 1365 dev->mtu = new_mtu;
1366
1367 /* return early if the buffer sizes will not change */
1368 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1369 return 0;
1370 if (old_mtu == new_mtu)
1371 return 0;
1372
1373 /* synchronized against open : rtnl_lock() held by caller */
1374 if (netif_running(dev)) {
1375 u8 *base = get_hwbase(dev);
1376 /*
1377 * It seems that the nic preloads valid ring entries into an
1378 * internal buffer. The procedure for flushing everything is
1379 * guessed, there is probably a simpler approach.
1380 * Changing the MTU is a rare event, it shouldn't matter.
1381 */
1382 disable_irq(dev->irq);
1383 spin_lock_bh(&dev->xmit_lock);
1384 spin_lock(&np->lock);
1385 /* stop engines */
1386 nv_stop_rx(dev);
1387 nv_stop_tx(dev);
1388 nv_txrx_reset(dev);
1389 /* drain rx queue */
1390 nv_drain_rx(dev);
1391 nv_drain_tx(dev);
1392 /* reinit driver view of the rx queue */
1393 nv_init_rx(dev);
1394 nv_init_tx(dev);
1395 /* alloc new rx buffers */
1396 set_bufsize(dev);
1397 if (nv_alloc_rx(dev)) {
1398 if (!np->in_shutdown)
1399 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1400 }
1401 /* reinit nic view of the rx queue */
1402 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1403 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1404 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1405 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1406 else
1407 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1408 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1409 base + NvRegRingSizes);
1410 pci_push(base);
1411 writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
1412 pci_push(base);
1413
1414 /* restart rx engine */
1415 nv_start_rx(dev);
1416 nv_start_tx(dev);
1417 spin_unlock(&np->lock);
1418 spin_unlock_bh(&dev->xmit_lock);
1419 enable_irq(dev->irq);
1420 }
1421 return 0;
1422}
1423
1424static void nv_copy_mac_to_hw(struct net_device *dev)
1425{
1426 u8 *base = get_hwbase(dev);
1427 u32 mac[2];
1428
1429 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1430 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1431 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1432
1433 writel(mac[0], base + NvRegMacAddrA);
1434 writel(mac[1], base + NvRegMacAddrB);
1435}
1436
1437/*
1438 * nv_set_mac_address: dev->set_mac_address function
1439 * Called with rtnl_lock() held.
1440 */
1441static int nv_set_mac_address(struct net_device *dev, void *addr)
1442{
1443 struct fe_priv *np = get_nvpriv(dev);
1444 struct sockaddr *macaddr = (struct sockaddr*)addr;
1445
1446 if(!is_valid_ether_addr(macaddr->sa_data))
1447 return -EADDRNOTAVAIL;
1448
1449 /* synchronized against open : rtnl_lock() held by caller */
1450 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
1451
1452 if (netif_running(dev)) {
1453 spin_lock_bh(&dev->xmit_lock);
1454 spin_lock_irq(&np->lock);
1455
1456 /* stop rx engine */
1457 nv_stop_rx(dev);
1458
1459 /* set mac address */
1460 nv_copy_mac_to_hw(dev);
1461
1462 /* restart rx engine */
1463 nv_start_rx(dev);
1464 spin_unlock_irq(&np->lock);
1465 spin_unlock_bh(&dev->xmit_lock);
1466 } else {
1467 nv_copy_mac_to_hw(dev);
1468 }
1219 return 0; 1469 return 0;
1220} 1470}
1221 1471
@@ -1470,7 +1720,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1470 if (!(events & np->irqmask)) 1720 if (!(events & np->irqmask))
1471 break; 1721 break;
1472 1722
1473 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) { 1723 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) {
1474 spin_lock(&np->lock); 1724 spin_lock(&np->lock);
1475 nv_tx_done(dev); 1725 nv_tx_done(dev);
1476 spin_unlock(&np->lock); 1726 spin_unlock(&np->lock);
@@ -1761,6 +2011,50 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1761 return 0; 2011 return 0;
1762} 2012}
1763 2013
2014#define FORCEDETH_REGS_VER 1
2015#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2016
2017static int nv_get_regs_len(struct net_device *dev)
2018{
2019 return FORCEDETH_REGS_SIZE;
2020}
2021
2022static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2023{
2024 struct fe_priv *np = get_nvpriv(dev);
2025 u8 __iomem *base = get_hwbase(dev);
2026 u32 *rbuf = buf;
2027 int i;
2028
2029 regs->version = FORCEDETH_REGS_VER;
2030 spin_lock_irq(&np->lock);
2031 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
2032 rbuf[i] = readl(base + i*sizeof(u32));
2033 spin_unlock_irq(&np->lock);
2034}
2035
2036static int nv_nway_reset(struct net_device *dev)
2037{
2038 struct fe_priv *np = get_nvpriv(dev);
2039 int ret;
2040
2041 spin_lock_irq(&np->lock);
2042 if (np->autoneg) {
2043 int bmcr;
2044
2045 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2046 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
2047 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2048
2049 ret = 0;
2050 } else {
2051 ret = -EINVAL;
2052 }
2053 spin_unlock_irq(&np->lock);
2054
2055 return ret;
2056}
2057
1764static struct ethtool_ops ops = { 2058static struct ethtool_ops ops = {
1765 .get_drvinfo = nv_get_drvinfo, 2059 .get_drvinfo = nv_get_drvinfo,
1766 .get_link = ethtool_op_get_link, 2060 .get_link = ethtool_op_get_link,
@@ -1768,6 +2062,9 @@ static struct ethtool_ops ops = {
1768 .set_wol = nv_set_wol, 2062 .set_wol = nv_set_wol,
1769 .get_settings = nv_get_settings, 2063 .get_settings = nv_get_settings,
1770 .set_settings = nv_set_settings, 2064 .set_settings = nv_set_settings,
2065 .get_regs_len = nv_get_regs_len,
2066 .get_regs = nv_get_regs,
2067 .nway_reset = nv_nway_reset,
1771}; 2068};
1772 2069
1773static int nv_open(struct net_device *dev) 2070static int nv_open(struct net_device *dev)
@@ -1792,6 +2089,7 @@ static int nv_open(struct net_device *dev)
1792 writel(0, base + NvRegAdapterControl); 2089 writel(0, base + NvRegAdapterControl);
1793 2090
1794 /* 2) initialize descriptor rings */ 2091 /* 2) initialize descriptor rings */
2092 set_bufsize(dev);
1795 oom = nv_init_ring(dev); 2093 oom = nv_init_ring(dev);
1796 2094
1797 writel(0, base + NvRegLinkSpeed); 2095 writel(0, base + NvRegLinkSpeed);
@@ -1802,20 +2100,14 @@ static int nv_open(struct net_device *dev)
1802 np->in_shutdown = 0; 2100 np->in_shutdown = 0;
1803 2101
1804 /* 3) set mac address */ 2102 /* 3) set mac address */
1805 { 2103 nv_copy_mac_to_hw(dev);
1806 u32 mac[2];
1807
1808 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1809 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1810 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1811
1812 writel(mac[0], base + NvRegMacAddrA);
1813 writel(mac[1], base + NvRegMacAddrB);
1814 }
1815 2104
1816 /* 4) give hw rings */ 2105 /* 4) give hw rings */
1817 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 2106 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1818 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 2107 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2108 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2109 else
2110 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1819 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 2111 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1820 base + NvRegRingSizes); 2112 base + NvRegRingSizes);
1821 2113
@@ -1837,7 +2129,7 @@ static int nv_open(struct net_device *dev)
1837 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 2129 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
1838 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 2130 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1839 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 2131 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1840 writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig); 2132 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1841 2133
1842 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 2134 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
1843 get_random_bytes(&i, sizeof(i)); 2135 get_random_bytes(&i, sizeof(i));
@@ -1888,6 +2180,9 @@ static int nv_open(struct net_device *dev)
1888 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 2180 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1889 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 2181 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
1890 } 2182 }
2183 /* set linkspeed to invalid value, thus force nv_update_linkspeed
2184 * to init hw */
2185 np->linkspeed = 0;
1891 ret = nv_update_linkspeed(dev); 2186 ret = nv_update_linkspeed(dev);
1892 nv_start_rx(dev); 2187 nv_start_rx(dev);
1893 nv_start_tx(dev); 2188 nv_start_tx(dev);
@@ -1942,6 +2237,12 @@ static int nv_close(struct net_device *dev)
1942 if (np->wolenabled) 2237 if (np->wolenabled)
1943 nv_start_rx(dev); 2238 nv_start_rx(dev);
1944 2239
2240 /* special op: write back the misordered MAC address - otherwise
2241 * the next nv_probe would see a wrong address.
2242 */
2243 writel(np->orig_mac[0], base + NvRegMacAddrA);
2244 writel(np->orig_mac[1], base + NvRegMacAddrB);
2245
1945 /* FIXME: power down nic */ 2246 /* FIXME: power down nic */
1946 2247
1947 return 0; 2248 return 0;
@@ -2006,32 +2307,55 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2006 } 2307 }
2007 2308
2008 /* handle different descriptor versions */ 2309 /* handle different descriptor versions */
2009 if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 || 2310 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2010 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 || 2311 /* packet format 3: supports 40-bit addressing */
2011 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 || 2312 np->desc_ver = DESC_VER_3;
2012 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 2313 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2013 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) 2314 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2014 np->desc_ver = DESC_VER_1; 2315 pci_name(pci_dev));
2015 else 2316 }
2317 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2318 /* packet format 2: supports jumbo frames */
2016 np->desc_ver = DESC_VER_2; 2319 np->desc_ver = DESC_VER_2;
2320 } else {
2321 /* original packet format */
2322 np->desc_ver = DESC_VER_1;
2323 }
2324
2325 np->pkt_limit = NV_PKTLIMIT_1;
2326 if (id->driver_data & DEV_HAS_LARGEDESC)
2327 np->pkt_limit = NV_PKTLIMIT_2;
2017 2328
2018 err = -ENOMEM; 2329 err = -ENOMEM;
2019 np->base = ioremap(addr, NV_PCI_REGSZ); 2330 np->base = ioremap(addr, NV_PCI_REGSZ);
2020 if (!np->base) 2331 if (!np->base)
2021 goto out_relreg; 2332 goto out_relreg;
2022 dev->base_addr = (unsigned long)np->base; 2333 dev->base_addr = (unsigned long)np->base;
2334
2023 dev->irq = pci_dev->irq; 2335 dev->irq = pci_dev->irq;
2024 np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), 2336
2025 &np->ring_addr); 2337 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2026 if (!np->rx_ring) 2338 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
2027 goto out_unmap; 2339 sizeof(struct ring_desc) * (RX_RING + TX_RING),
2028 np->tx_ring = &np->rx_ring[RX_RING]; 2340 &np->ring_addr);
2341 if (!np->rx_ring.orig)
2342 goto out_unmap;
2343 np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
2344 } else {
2345 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
2346 sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2347 &np->ring_addr);
2348 if (!np->rx_ring.ex)
2349 goto out_unmap;
2350 np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
2351 }
2029 2352
2030 dev->open = nv_open; 2353 dev->open = nv_open;
2031 dev->stop = nv_close; 2354 dev->stop = nv_close;
2032 dev->hard_start_xmit = nv_start_xmit; 2355 dev->hard_start_xmit = nv_start_xmit;
2033 dev->get_stats = nv_get_stats; 2356 dev->get_stats = nv_get_stats;
2034 dev->change_mtu = nv_change_mtu; 2357 dev->change_mtu = nv_change_mtu;
2358 dev->set_mac_address = nv_set_mac_address;
2035 dev->set_multicast_list = nv_set_multicast; 2359 dev->set_multicast_list = nv_set_multicast;
2036#ifdef CONFIG_NET_POLL_CONTROLLER 2360#ifdef CONFIG_NET_POLL_CONTROLLER
2037 dev->poll_controller = nv_poll_controller; 2361 dev->poll_controller = nv_poll_controller;
@@ -2080,17 +2404,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2080 2404
2081 if (np->desc_ver == DESC_VER_1) { 2405 if (np->desc_ver == DESC_VER_1) {
2082 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; 2406 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
2083 if (id->driver_data & DEV_NEED_LASTPACKET1)
2084 np->tx_flags |= NV_TX_LASTPACKET1;
2085 } else { 2407 } else {
2086 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; 2408 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
2087 if (id->driver_data & DEV_NEED_LASTPACKET1)
2088 np->tx_flags |= NV_TX2_LASTPACKET1;
2089 } 2409 }
2090 if (id->driver_data & DEV_IRQMASK_1) 2410 np->irqmask = NVREG_IRQMASK_WANTED;
2091 np->irqmask = NVREG_IRQMASK_WANTED_1;
2092 if (id->driver_data & DEV_IRQMASK_2)
2093 np->irqmask = NVREG_IRQMASK_WANTED_2;
2094 if (id->driver_data & DEV_NEED_TIMERIRQ) 2411 if (id->driver_data & DEV_NEED_TIMERIRQ)
2095 np->irqmask |= NVREG_IRQ_TIMER; 2412 np->irqmask |= NVREG_IRQ_TIMER;
2096 if (id->driver_data & DEV_NEED_LINKTIMER) { 2413 if (id->driver_data & DEV_NEED_LINKTIMER) {
@@ -2155,8 +2472,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2155 return 0; 2472 return 0;
2156 2473
2157out_freering: 2474out_freering:
2158 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), 2475 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2159 np->rx_ring, np->ring_addr); 2476 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2477 np->rx_ring.orig, np->ring_addr);
2478 else
2479 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2480 np->rx_ring.ex, np->ring_addr);
2160 pci_set_drvdata(pci_dev, NULL); 2481 pci_set_drvdata(pci_dev, NULL);
2161out_unmap: 2482out_unmap:
2162 iounmap(get_hwbase(dev)); 2483 iounmap(get_hwbase(dev));
@@ -2174,18 +2495,14 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
2174{ 2495{
2175 struct net_device *dev = pci_get_drvdata(pci_dev); 2496 struct net_device *dev = pci_get_drvdata(pci_dev);
2176 struct fe_priv *np = get_nvpriv(dev); 2497 struct fe_priv *np = get_nvpriv(dev);
2177 u8 __iomem *base = get_hwbase(dev);
2178 2498
2179 unregister_netdev(dev); 2499 unregister_netdev(dev);
2180 2500
2181 /* special op: write back the misordered MAC address - otherwise
2182 * the next nv_probe would see a wrong address.
2183 */
2184 writel(np->orig_mac[0], base + NvRegMacAddrA);
2185 writel(np->orig_mac[1], base + NvRegMacAddrB);
2186
2187 /* free all structures */ 2501 /* free all structures */
2188 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr); 2502 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2503 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
2504 else
2505 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
2189 iounmap(get_hwbase(dev)); 2506 iounmap(get_hwbase(dev));
2190 pci_release_regions(pci_dev); 2507 pci_release_regions(pci_dev);
2191 pci_disable_device(pci_dev); 2508 pci_disable_device(pci_dev);
@@ -2195,109 +2512,64 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
2195 2512
2196static struct pci_device_id pci_tbl[] = { 2513static struct pci_device_id pci_tbl[] = {
2197 { /* nForce Ethernet Controller */ 2514 { /* nForce Ethernet Controller */
2198 .vendor = PCI_VENDOR_ID_NVIDIA, 2515 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
2199 .device = PCI_DEVICE_ID_NVIDIA_NVENET_1, 2516 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2200 .subvendor = PCI_ANY_ID,
2201 .subdevice = PCI_ANY_ID,
2202 .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2203 }, 2517 },
2204 { /* nForce2 Ethernet Controller */ 2518 { /* nForce2 Ethernet Controller */
2205 .vendor = PCI_VENDOR_ID_NVIDIA, 2519 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
2206 .device = PCI_DEVICE_ID_NVIDIA_NVENET_2, 2520 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2207 .subvendor = PCI_ANY_ID,
2208 .subdevice = PCI_ANY_ID,
2209 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2210 }, 2521 },
2211 { /* nForce3 Ethernet Controller */ 2522 { /* nForce3 Ethernet Controller */
2212 .vendor = PCI_VENDOR_ID_NVIDIA, 2523 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
2213 .device = PCI_DEVICE_ID_NVIDIA_NVENET_3, 2524 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2214 .subvendor = PCI_ANY_ID,
2215 .subdevice = PCI_ANY_ID,
2216 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2217 }, 2525 },
2218 { /* nForce3 Ethernet Controller */ 2526 { /* nForce3 Ethernet Controller */
2219 .vendor = PCI_VENDOR_ID_NVIDIA, 2527 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
2220 .device = PCI_DEVICE_ID_NVIDIA_NVENET_4, 2528 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2221 .subvendor = PCI_ANY_ID,
2222 .subdevice = PCI_ANY_ID,
2223 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2224 }, 2529 },
2225 { /* nForce3 Ethernet Controller */ 2530 { /* nForce3 Ethernet Controller */
2226 .vendor = PCI_VENDOR_ID_NVIDIA, 2531 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
2227 .device = PCI_DEVICE_ID_NVIDIA_NVENET_5, 2532 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2228 .subvendor = PCI_ANY_ID,
2229 .subdevice = PCI_ANY_ID,
2230 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2231 }, 2533 },
2232 { /* nForce3 Ethernet Controller */ 2534 { /* nForce3 Ethernet Controller */
2233 .vendor = PCI_VENDOR_ID_NVIDIA, 2535 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
2234 .device = PCI_DEVICE_ID_NVIDIA_NVENET_6, 2536 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2235 .subvendor = PCI_ANY_ID,
2236 .subdevice = PCI_ANY_ID,
2237 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2238 }, 2537 },
2239 { /* nForce3 Ethernet Controller */ 2538 { /* nForce3 Ethernet Controller */
2240 .vendor = PCI_VENDOR_ID_NVIDIA, 2539 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
2241 .device = PCI_DEVICE_ID_NVIDIA_NVENET_7, 2540 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2242 .subvendor = PCI_ANY_ID,
2243 .subdevice = PCI_ANY_ID,
2244 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2245 }, 2541 },
2246 { /* CK804 Ethernet Controller */ 2542 { /* CK804 Ethernet Controller */
2247 .vendor = PCI_VENDOR_ID_NVIDIA, 2543 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
2248 .device = PCI_DEVICE_ID_NVIDIA_NVENET_8, 2544 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2249 .subvendor = PCI_ANY_ID,
2250 .subdevice = PCI_ANY_ID,
2251 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2252 }, 2545 },
2253 { /* CK804 Ethernet Controller */ 2546 { /* CK804 Ethernet Controller */
2254 .vendor = PCI_VENDOR_ID_NVIDIA, 2547 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
2255 .device = PCI_DEVICE_ID_NVIDIA_NVENET_9, 2548 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2256 .subvendor = PCI_ANY_ID,
2257 .subdevice = PCI_ANY_ID,
2258 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2259 }, 2549 },
2260 { /* MCP04 Ethernet Controller */ 2550 { /* MCP04 Ethernet Controller */
2261 .vendor = PCI_VENDOR_ID_NVIDIA, 2551 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
2262 .device = PCI_DEVICE_ID_NVIDIA_NVENET_10, 2552 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2263 .subvendor = PCI_ANY_ID,
2264 .subdevice = PCI_ANY_ID,
2265 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2266 }, 2553 },
2267 { /* MCP04 Ethernet Controller */ 2554 { /* MCP04 Ethernet Controller */
2268 .vendor = PCI_VENDOR_ID_NVIDIA, 2555 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
2269 .device = PCI_DEVICE_ID_NVIDIA_NVENET_11, 2556 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2270 .subvendor = PCI_ANY_ID,
2271 .subdevice = PCI_ANY_ID,
2272 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2273 }, 2557 },
2274 { /* MCP51 Ethernet Controller */ 2558 { /* MCP51 Ethernet Controller */
2275 .vendor = PCI_VENDOR_ID_NVIDIA, 2559 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
2276 .device = PCI_DEVICE_ID_NVIDIA_NVENET_12, 2560 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2277 .subvendor = PCI_ANY_ID,
2278 .subdevice = PCI_ANY_ID,
2279 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2280 }, 2561 },
2281 { /* MCP51 Ethernet Controller */ 2562 { /* MCP51 Ethernet Controller */
2282 .vendor = PCI_VENDOR_ID_NVIDIA, 2563 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
2283 .device = PCI_DEVICE_ID_NVIDIA_NVENET_13, 2564 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2284 .subvendor = PCI_ANY_ID,
2285 .subdevice = PCI_ANY_ID,
2286 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2287 }, 2565 },
2288 { /* MCP55 Ethernet Controller */ 2566 { /* MCP55 Ethernet Controller */
2289 .vendor = PCI_VENDOR_ID_NVIDIA, 2567 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
2290 .device = PCI_DEVICE_ID_NVIDIA_NVENET_14, 2568 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2291 .subvendor = PCI_ANY_ID,
2292 .subdevice = PCI_ANY_ID,
2293 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2294 }, 2569 },
2295 { /* MCP55 Ethernet Controller */ 2570 { /* MCP55 Ethernet Controller */
2296 .vendor = PCI_VENDOR_ID_NVIDIA, 2571 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
2297 .device = PCI_DEVICE_ID_NVIDIA_NVENET_15, 2572 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2298 .subvendor = PCI_ANY_ID,
2299 .subdevice = PCI_ANY_ID,
2300 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2301 }, 2573 },
2302 {0,}, 2574 {0,},
2303}; 2575};
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 0cd54306e636..de087cd609d9 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,6 @@
1config MKISS 1config MKISS
2 tristate "Serial port KISS driver" 2 tristate "Serial port KISS driver"
3 depends on AX25 && BROKEN_ON_SMP 3 depends on AX25
4 ---help--- 4 ---help---
5 KISS is a protocol used for the exchange of data between a computer 5 KISS is a protocol used for the exchange of data between a computer
6 and a Terminal Node Controller (a small embedded system commonly 6 and a Terminal Node Controller (a small embedded system commonly
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a7f15d9f13e5..5298096afbdb 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -54,6 +54,7 @@
54#include <linux/kmod.h> 54#include <linux/kmod.h>
55#include <linux/hdlcdrv.h> 55#include <linux/hdlcdrv.h>
56#include <linux/baycom.h> 56#include <linux/baycom.h>
57#include <linux/jiffies.h>
57#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 58#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
58/* prototypes for ax25_encapsulate and ax25_rebuild_header */ 59/* prototypes for ax25_encapsulate and ax25_rebuild_header */
59#include <net/ax25.h> 60#include <net/ax25.h>
@@ -287,7 +288,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
287 * measure the interrupt frequency 288 * measure the interrupt frequency
288 */ 289 */
289 bc->debug_vals.cur_intcnt++; 290 bc->debug_vals.cur_intcnt++;
290 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 291 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
291 bc->debug_vals.last_jiffies = cur_jiffies; 292 bc->debug_vals.last_jiffies = cur_jiffies;
292 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 293 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
293 bc->debug_vals.cur_intcnt = 0; 294 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 612ad452bee0..3b1bef1ee215 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -84,6 +84,7 @@
84#include <linux/baycom.h> 84#include <linux/baycom.h>
85#include <linux/parport.h> 85#include <linux/parport.h>
86#include <linux/bitops.h> 86#include <linux/bitops.h>
87#include <linux/jiffies.h>
87 88
88#include <asm/bug.h> 89#include <asm/bug.h>
89#include <asm/system.h> 90#include <asm/system.h>
@@ -165,7 +166,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc)
165 * measure the interrupt frequency 166 * measure the interrupt frequency
166 */ 167 */
167 bc->debug_vals.cur_intcnt++; 168 bc->debug_vals.cur_intcnt++;
168 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 169 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
169 bc->debug_vals.last_jiffies = cur_jiffies; 170 bc->debug_vals.last_jiffies = cur_jiffies;
170 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 171 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
171 bc->debug_vals.cur_intcnt = 0; 172 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 25f270b05378..232793d2ce6b 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -79,6 +79,7 @@
79#include <asm/io.h> 79#include <asm/io.h>
80#include <linux/hdlcdrv.h> 80#include <linux/hdlcdrv.h>
81#include <linux/baycom.h> 81#include <linux/baycom.h>
82#include <linux/jiffies.h>
82 83
83/* --------------------------------------------------------------------- */ 84/* --------------------------------------------------------------------- */
84 85
@@ -159,7 +160,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
159 * measure the interrupt frequency 160 * measure the interrupt frequency
160 */ 161 */
161 bc->debug_vals.cur_intcnt++; 162 bc->debug_vals.cur_intcnt++;
162 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 163 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
163 bc->debug_vals.last_jiffies = cur_jiffies; 164 bc->debug_vals.last_jiffies = cur_jiffies;
164 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 165 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
165 bc->debug_vals.cur_intcnt = 0; 166 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index eead85d00962..be596a3eb3fd 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -69,6 +69,7 @@
69#include <asm/io.h> 69#include <asm/io.h>
70#include <linux/hdlcdrv.h> 70#include <linux/hdlcdrv.h>
71#include <linux/baycom.h> 71#include <linux/baycom.h>
72#include <linux/jiffies.h>
72 73
73/* --------------------------------------------------------------------- */ 74/* --------------------------------------------------------------------- */
74 75
@@ -150,7 +151,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
150 * measure the interrupt frequency 151 * measure the interrupt frequency
151 */ 152 */
152 bc->debug_vals.cur_intcnt++; 153 bc->debug_vals.cur_intcnt++;
153 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 154 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
154 bc->debug_vals.last_jiffies = cur_jiffies; 155 bc->debug_vals.last_jiffies = cur_jiffies;
155 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 156 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
156 bc->debug_vals.cur_intcnt = 0; 157 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3035422f5ad8..63b1a2b86acb 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -1,30 +1,19 @@
1/* 1/*
2 * MKISS Driver 2 * This program is free software; you can distribute it and/or modify it
3 * under the terms of the GNU General Public License (Version 2) as
4 * published by the Free Software Foundation.
3 * 5 *
4 * This module: 6 * This program is distributed in the hope it will be useful, but WITHOUT
5 * This module is free software; you can redistribute it and/or 7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6 * modify it under the terms of the GNU General Public License 8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
7 * as published by the Free Software Foundation; either version 9 * for more details.
8 * 2 of the License, or (at your option) any later version.
9 * 10 *
10 * This module implements the AX.25 protocol for kernel-based 11 * You should have received a copy of the GNU General Public License along
11 * devices like TTYs. It interfaces between a raw TTY, and the 12 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * kernel's AX.25 protocol layers, just like slip.c. 13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
13 * AX.25 needs to be separated from slip.c while slip.c is no
14 * longer a static kernel device since it is a module.
15 * This method clears the way to implement other kiss protocols
16 * like mkiss smack g8bpq ..... so far only mkiss is implemented.
17 * 14 *
18 * Hans Alblas <hans@esrac.ele.tue.nl> 15 * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
19 * 16 * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
20 * History
21 * Jonathan (G4KLX) Fixed to match Linux networking changes - 2.1.15.
22 * Matthias (DG2FEF) Added support for FlexNet CRC (on special request)
23 * Fixed bug in ax25_close(): dev_lock_wait() was
24 * called twice, causing a deadlock.
25 * Jeroen (PE1RXQ) Removed old MKISS_MAGIC stuff and calls to
26 * MOD_*_USE_COUNT
27 * Remove cli() and fix rtnl lock usage.
28 */ 17 */
29 18
30#include <linux/config.h> 19#include <linux/config.h>
@@ -46,177 +35,300 @@
46#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
47#include <linux/skbuff.h> 36#include <linux/skbuff.h>
48#include <linux/if_arp.h> 37#include <linux/if_arp.h>
38#include <linux/jiffies.h>
49 39
50#include <net/ax25.h> 40#include <net/ax25.h>
51 41
52#include "mkiss.h"
53
54#ifdef CONFIG_INET 42#ifdef CONFIG_INET
55#include <linux/ip.h> 43#include <linux/ip.h>
56#include <linux/tcp.h> 44#include <linux/tcp.h>
57#endif 45#endif
58 46
59static char banner[] __initdata = KERN_INFO "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n"; 47#define AX_MTU 236
60 48
61typedef struct ax25_ctrl { 49/* SLIP/KISS protocol characters. */
62 struct ax_disp ctrl; /* */ 50#define END 0300 /* indicates end of frame */
63 struct net_device dev; /* the device */ 51#define ESC 0333 /* indicates byte stuffing */
64} ax25_ctrl_t; 52#define ESC_END 0334 /* ESC ESC_END means END 'data' */
65 53#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
66static ax25_ctrl_t **ax25_ctrls; 54
67 55struct mkiss {
68int ax25_maxdev = AX25_MAXDEV; /* Can be overridden with insmod! */ 56 struct tty_struct *tty; /* ptr to TTY structure */
69 57 struct net_device *dev; /* easy for intr handling */
70static struct tty_ldisc ax_ldisc; 58
71 59 /* These are pointers to the malloc()ed frame buffers. */
72static int ax25_init(struct net_device *); 60 spinlock_t buflock;/* lock for rbuf and xbuf */
73static int kiss_esc(unsigned char *, unsigned char *, int); 61 unsigned char *rbuff; /* receiver buffer */
74static int kiss_esc_crc(unsigned char *, unsigned char *, unsigned short, int); 62 int rcount; /* received chars counter */
75static void kiss_unesc(struct ax_disp *, unsigned char); 63 unsigned char *xbuff; /* transmitter buffer */
64 unsigned char *xhead; /* pointer to next byte to XMIT */
65 int xleft; /* bytes left in XMIT queue */
66
67 struct net_device_stats stats;
68
69 /* Detailed SLIP statistics. */
70 int mtu; /* Our mtu (to spot changes!) */
71 int buffsize; /* Max buffers sizes */
72
73 unsigned long flags; /* Flag values/ mode etc */
74 /* long req'd: used by set_bit --RR */
75#define AXF_INUSE 0 /* Channel in use */
76#define AXF_ESCAPE 1 /* ESC received */
77#define AXF_ERROR 2 /* Parity, etc. error */
78#define AXF_KEEPTEST 3 /* Keepalive test flag */
79#define AXF_OUTWAIT 4 /* is outpacket was flag */
80
81 int mode;
82 int crcmode; /* MW: for FlexNet, SMACK etc. */
83#define CRC_MODE_NONE 0
84#define CRC_MODE_FLEX 1
85#define CRC_MODE_SMACK 2
86
87 atomic_t refcnt;
88 struct semaphore dead_sem;
89};
76 90
77/*---------------------------------------------------------------------------*/ 91/*---------------------------------------------------------------------------*/
78 92
79static const unsigned short Crc_flex_table[] = { 93static const unsigned short crc_flex_table[] = {
80 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38, 94 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38,
81 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770, 95 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770,
82 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9, 96 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9,
83 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1, 97 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1,
84 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a, 98 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a,
85 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672, 99 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672,
86 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb, 100 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb,
87 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3, 101 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3,
88 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c, 102 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c,
89 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574, 103 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574,
90 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd, 104 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd,
91 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5, 105 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5,
92 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e, 106 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e,
93 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476, 107 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476,
94 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf, 108 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf,
95 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7, 109 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7,
96 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30, 110 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30,
97 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378, 111 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378,
98 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1, 112 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1,
99 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9, 113 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9,
100 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32, 114 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32,
101 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a, 115 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a,
102 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3, 116 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3,
103 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb, 117 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb,
104 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34, 118 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34,
105 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c, 119 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c,
106 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5, 120 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5,
107 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd, 121 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd,
108 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36, 122 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36,
109 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e, 123 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e,
110 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7, 124 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7,
111 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff 125 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff
112}; 126};
113 127
114/*---------------------------------------------------------------------------*/
115
116static unsigned short calc_crc_flex(unsigned char *cp, int size) 128static unsigned short calc_crc_flex(unsigned char *cp, int size)
117{ 129{
118 unsigned short crc = 0xffff; 130 unsigned short crc = 0xffff;
119
120 while (size--)
121 crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
122 131
123 return crc; 132 while (size--)
124} 133 crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
125 134
126/*---------------------------------------------------------------------------*/ 135 return crc;
136}
127 137
128static int check_crc_flex(unsigned char *cp, int size) 138static int check_crc_flex(unsigned char *cp, int size)
129{ 139{
130 unsigned short crc = 0xffff; 140 unsigned short crc = 0xffff;
131 141
132 if (size < 3) 142 if (size < 3)
133 return -1; 143 return -1;
134 144
135 while (size--) 145 while (size--)
136 crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff]; 146 crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
137 147
138 if ((crc & 0xffff) != 0x7070) 148 if ((crc & 0xffff) != 0x7070)
139 return -1; 149 return -1;
140 150
141 return 0; 151 return 0;
142} 152}
143 153
144/*---------------------------------------------------------------------------*/ 154/*
155 * Standard encapsulation
156 */
145 157
146/* Find a free channel, and link in this `tty' line. */ 158static int kiss_esc(unsigned char *s, unsigned char *d, int len)
147static inline struct ax_disp *ax_alloc(void)
148{ 159{
149 ax25_ctrl_t *axp=NULL; 160 unsigned char *ptr = d;
150 int i; 161 unsigned char c;
151 162
152 for (i = 0; i < ax25_maxdev; i++) { 163 /*
153 axp = ax25_ctrls[i]; 164 * Send an initial END character to flush out any data that may have
165 * accumulated in the receiver due to line noise.
166 */
154 167
155 /* Not allocated ? */ 168 *ptr++ = END;
156 if (axp == NULL)
157 break;
158 169
159 /* Not in use ? */ 170 while (len-- > 0) {
160 if (!test_and_set_bit(AXF_INUSE, &axp->ctrl.flags)) 171 switch (c = *s++) {
172 case END:
173 *ptr++ = ESC;
174 *ptr++ = ESC_END;
161 break; 175 break;
176 case ESC:
177 *ptr++ = ESC;
178 *ptr++ = ESC_ESC;
179 break;
180 default:
181 *ptr++ = c;
182 break;
183 }
162 } 184 }
163 185
164 /* Sorry, too many, all slots in use */ 186 *ptr++ = END;
165 if (i >= ax25_maxdev) 187
166 return NULL; 188 return ptr - d;
189}
190
191/*
192 * MW:
193 * OK its ugly, but tell me a better solution without copying the
194 * packet to a temporary buffer :-)
195 */
196static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc,
197 int len)
198{
199 unsigned char *ptr = d;
200 unsigned char c=0;
201
202 *ptr++ = END;
203 while (len > 0) {
204 if (len > 2)
205 c = *s++;
206 else if (len > 1)
207 c = crc >> 8;
208 else if (len > 0)
209 c = crc & 0xff;
210
211 len--;
167 212
168 /* If no channels are available, allocate one */ 213 switch (c) {
169 if (axp == NULL && (ax25_ctrls[i] = kmalloc(sizeof(ax25_ctrl_t), GFP_KERNEL)) != NULL) { 214 case END:
170 axp = ax25_ctrls[i]; 215 *ptr++ = ESC;
216 *ptr++ = ESC_END;
217 break;
218 case ESC:
219 *ptr++ = ESC;
220 *ptr++ = ESC_ESC;
221 break;
222 default:
223 *ptr++ = c;
224 break;
225 }
171 } 226 }
172 memset(axp, 0, sizeof(ax25_ctrl_t)); 227 *ptr++ = END;
173 228
174 /* Initialize channel control data */ 229 return ptr - d;
175 set_bit(AXF_INUSE, &axp->ctrl.flags); 230}
176 sprintf(axp->dev.name, "ax%d", i++); 231
177 axp->ctrl.tty = NULL; 232/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
178 axp->dev.base_addr = i; 233static void ax_bump(struct mkiss *ax)
179 axp->dev.priv = (void *)&axp->ctrl; 234{
180 axp->dev.next = NULL; 235 struct sk_buff *skb;
181 axp->dev.init = ax25_init; 236 int count;
182 237
183 if (axp != NULL) { 238 spin_lock_bh(&ax->buflock);
184 /* 239 if (ax->rbuff[0] > 0x0f) {
185 * register device so that it can be ifconfig'ed 240 if (ax->rbuff[0] & 0x20) {
186 * ax25_init() will be called as a side-effect 241 ax->crcmode = CRC_MODE_FLEX;
187 * SIDE-EFFECT WARNING: ax25_init() CLEARS axp->ctrl ! 242 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
188 */ 243 ax->stats.rx_errors++;
189 if (register_netdev(&axp->dev) == 0) { 244 return;
190 /* (Re-)Set the INUSE bit. Very Important! */ 245 }
191 set_bit(AXF_INUSE, &axp->ctrl.flags); 246 ax->rcount -= 2;
192 axp->ctrl.dev = &axp->dev; 247 /* dl9sau bugfix: the trailling two bytes flexnet crc
193 axp->dev.priv = (void *) &axp->ctrl; 248 * will not be passed to the kernel. thus we have
194 249 * to correct the kissparm signature, because it
195 return &axp->ctrl; 250 * indicates a crc but there's none
196 } else { 251 */
197 clear_bit(AXF_INUSE,&axp->ctrl.flags); 252 *ax->rbuff &= ~0x20;
198 printk(KERN_ERR "mkiss: ax_alloc() - register_netdev() failure.\n");
199 } 253 }
254 }
255 spin_unlock_bh(&ax->buflock);
256
257 count = ax->rcount;
258
259 if ((skb = dev_alloc_skb(count)) == NULL) {
260 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
261 ax->dev->name);
262 ax->stats.rx_dropped++;
263 return;
200 } 264 }
201 265
202 return NULL; 266 spin_lock_bh(&ax->buflock);
267 memcpy(skb_put(skb,count), ax->rbuff, count);
268 spin_unlock_bh(&ax->buflock);
269 skb->protocol = ax25_type_trans(skb, ax->dev);
270 netif_rx(skb);
271 ax->dev->last_rx = jiffies;
272 ax->stats.rx_packets++;
273 ax->stats.rx_bytes += count;
203} 274}
204 275
205/* Free an AX25 channel. */ 276static void kiss_unesc(struct mkiss *ax, unsigned char s)
206static inline void ax_free(struct ax_disp *ax)
207{ 277{
208 /* Free all AX25 frame buffers. */ 278 switch (s) {
209 if (ax->rbuff) 279 case END:
210 kfree(ax->rbuff); 280 /* drop keeptest bit = VSV */
211 ax->rbuff = NULL; 281 if (test_bit(AXF_KEEPTEST, &ax->flags))
212 if (ax->xbuff) 282 clear_bit(AXF_KEEPTEST, &ax->flags);
213 kfree(ax->xbuff); 283
214 ax->xbuff = NULL; 284 if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2))
215 if (!test_and_clear_bit(AXF_INUSE, &ax->flags)) 285 ax_bump(ax);
216 printk(KERN_ERR "mkiss: %s: ax_free for already free unit.\n", ax->dev->name); 286
287 clear_bit(AXF_ESCAPE, &ax->flags);
288 ax->rcount = 0;
289 return;
290
291 case ESC:
292 set_bit(AXF_ESCAPE, &ax->flags);
293 return;
294 case ESC_ESC:
295 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
296 s = ESC;
297 break;
298 case ESC_END:
299 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
300 s = END;
301 break;
302 }
303
304 spin_lock_bh(&ax->buflock);
305 if (!test_bit(AXF_ERROR, &ax->flags)) {
306 if (ax->rcount < ax->buffsize) {
307 ax->rbuff[ax->rcount++] = s;
308 spin_unlock_bh(&ax->buflock);
309 return;
310 }
311
312 ax->stats.rx_over_errors++;
313 set_bit(AXF_ERROR, &ax->flags);
314 }
315 spin_unlock_bh(&ax->buflock);
316}
317
318static int ax_set_mac_address(struct net_device *dev, void *addr)
319{
320 struct sockaddr_ax25 *sa = addr;
321
322 spin_lock_irq(&dev->xmit_lock);
323 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
324 spin_unlock_irq(&dev->xmit_lock);
325
326 return 0;
217} 327}
218 328
219static void ax_changedmtu(struct ax_disp *ax) 329/*---------------------------------------------------------------------------*/
330
331static void ax_changedmtu(struct mkiss *ax)
220{ 332{
221 struct net_device *dev = ax->dev; 333 struct net_device *dev = ax->dev;
222 unsigned char *xbuff, *rbuff, *oxbuff, *orbuff; 334 unsigned char *xbuff, *rbuff, *oxbuff, *orbuff;
@@ -236,7 +348,8 @@ static void ax_changedmtu(struct ax_disp *ax)
236 rbuff = kmalloc(len + 4, GFP_ATOMIC); 348 rbuff = kmalloc(len + 4, GFP_ATOMIC);
237 349
238 if (xbuff == NULL || rbuff == NULL) { 350 if (xbuff == NULL || rbuff == NULL) {
239 printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, MTU change cancelled.\n", 351 printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, "
352 "MTU change cancelled.\n",
240 ax->dev->name); 353 ax->dev->name);
241 dev->mtu = ax->mtu; 354 dev->mtu = ax->mtu;
242 if (xbuff != NULL) 355 if (xbuff != NULL)
@@ -258,7 +371,7 @@ static void ax_changedmtu(struct ax_disp *ax)
258 memcpy(ax->xbuff, ax->xhead, ax->xleft); 371 memcpy(ax->xbuff, ax->xhead, ax->xleft);
259 } else { 372 } else {
260 ax->xleft = 0; 373 ax->xleft = 0;
261 ax->tx_dropped++; 374 ax->stats.tx_dropped++;
262 } 375 }
263 } 376 }
264 377
@@ -269,7 +382,7 @@ static void ax_changedmtu(struct ax_disp *ax)
269 memcpy(ax->rbuff, orbuff, ax->rcount); 382 memcpy(ax->rbuff, orbuff, ax->rcount);
270 } else { 383 } else {
271 ax->rcount = 0; 384 ax->rcount = 0;
272 ax->rx_over_errors++; 385 ax->stats.rx_over_errors++;
273 set_bit(AXF_ERROR, &ax->flags); 386 set_bit(AXF_ERROR, &ax->flags);
274 } 387 }
275 } 388 }
@@ -279,72 +392,14 @@ static void ax_changedmtu(struct ax_disp *ax)
279 392
280 spin_unlock_bh(&ax->buflock); 393 spin_unlock_bh(&ax->buflock);
281 394
282 if (oxbuff != NULL) 395 kfree(oxbuff);
283 kfree(oxbuff); 396 kfree(orbuff);
284 if (orbuff != NULL)
285 kfree(orbuff);
286}
287
288
289/* Set the "sending" flag. This must be atomic. */
290static inline void ax_lock(struct ax_disp *ax)
291{
292 netif_stop_queue(ax->dev);
293}
294
295
296/* Clear the "sending" flag. This must be atomic. */
297static inline void ax_unlock(struct ax_disp *ax)
298{
299 netif_start_queue(ax->dev);
300}
301
302/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
303static void ax_bump(struct ax_disp *ax)
304{
305 struct sk_buff *skb;
306 int count;
307
308 spin_lock_bh(&ax->buflock);
309 if (ax->rbuff[0] > 0x0f) {
310 if (ax->rbuff[0] & 0x20) {
311 ax->crcmode = CRC_MODE_FLEX;
312 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
313 ax->rx_errors++;
314 return;
315 }
316 ax->rcount -= 2;
317 /* dl9sau bugfix: the trailling two bytes flexnet crc
318 * will not be passed to the kernel. thus we have
319 * to correct the kissparm signature, because it
320 * indicates a crc but there's none
321 */
322 *ax->rbuff &= ~0x20;
323 }
324 }
325 spin_unlock_bh(&ax->buflock);
326
327 count = ax->rcount;
328
329 if ((skb = dev_alloc_skb(count)) == NULL) {
330 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", ax->dev->name);
331 ax->rx_dropped++;
332 return;
333 }
334
335 spin_lock_bh(&ax->buflock);
336 memcpy(skb_put(skb,count), ax->rbuff, count);
337 spin_unlock_bh(&ax->buflock);
338 skb->protocol = ax25_type_trans(skb, ax->dev);
339 netif_rx(skb);
340 ax->dev->last_rx = jiffies;
341 ax->rx_packets++;
342 ax->rx_bytes+=count;
343} 397}
344 398
345/* Encapsulate one AX.25 packet and stuff into a TTY queue. */ 399/* Encapsulate one AX.25 packet and stuff into a TTY queue. */
346static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len) 400static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
347{ 401{
402 struct mkiss *ax = netdev_priv(dev);
348 unsigned char *p; 403 unsigned char *p;
349 int actual, count; 404 int actual, count;
350 405
@@ -354,8 +409,8 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
354 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ 409 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
355 len = ax->mtu; 410 len = ax->mtu;
356 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); 411 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
357 ax->tx_dropped++; 412 ax->stats.tx_dropped++;
358 ax_unlock(ax); 413 netif_start_queue(dev);
359 return; 414 return;
360 } 415 }
361 416
@@ -376,10 +431,11 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
376 break; 431 break;
377 } 432 }
378 433
379 ax->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 434 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
380 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); 435 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
381 ax->tx_packets++; 436 ax->stats.tx_packets++;
382 ax->tx_bytes+=actual; 437 ax->stats.tx_bytes += actual;
438
383 ax->dev->trans_start = jiffies; 439 ax->dev->trans_start = jiffies;
384 ax->xleft = count - actual; 440 ax->xleft = count - actual;
385 ax->xhead = ax->xbuff + actual; 441 ax->xhead = ax->xbuff + actual;
@@ -387,37 +443,10 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
387 spin_unlock_bh(&ax->buflock); 443 spin_unlock_bh(&ax->buflock);
388} 444}
389 445
390/*
391 * Called by the driver when there's room for more data. If we have
392 * more packets to send, we send them here.
393 */
394static void ax25_write_wakeup(struct tty_struct *tty)
395{
396 int actual;
397 struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
398
399 /* First make sure we're connected. */
400 if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev))
401 return;
402 if (ax->xleft <= 0) {
403 /* Now serial buffer is almost free & we can start
404 * transmission of another packet
405 */
406 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
407
408 netif_wake_queue(ax->dev);
409 return;
410 }
411
412 actual = tty->driver->write(tty, ax->xhead, ax->xleft);
413 ax->xleft -= actual;
414 ax->xhead += actual;
415}
416
417/* Encapsulate an AX.25 packet and kick it into a TTY queue. */ 446/* Encapsulate an AX.25 packet and kick it into a TTY queue. */
418static int ax_xmit(struct sk_buff *skb, struct net_device *dev) 447static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
419{ 448{
420 struct ax_disp *ax = netdev_priv(dev); 449 struct mkiss *ax = netdev_priv(dev);
421 450
422 if (!netif_running(dev)) { 451 if (!netif_running(dev)) {
423 printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name); 452 printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
@@ -429,7 +458,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
429 * May be we must check transmitter timeout here ? 458 * May be we must check transmitter timeout here ?
430 * 14 Oct 1994 Dmitry Gorodchanin. 459 * 14 Oct 1994 Dmitry Gorodchanin.
431 */ 460 */
432 if (jiffies - dev->trans_start < 20 * HZ) { 461 if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
433 /* 20 sec timeout not reached */ 462 /* 20 sec timeout not reached */
434 return 1; 463 return 1;
435 } 464 }
@@ -439,20 +468,30 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
439 "bad line quality" : "driver error"); 468 "bad line quality" : "driver error");
440 469
441 ax->xleft = 0; 470 ax->xleft = 0;
442 ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 471 clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
443 ax_unlock(ax); 472 netif_start_queue(dev);
444 } 473 }
445 474
446 /* We were not busy, so we are now... :-) */ 475 /* We were not busy, so we are now... :-) */
447 if (skb != NULL) { 476 if (skb != NULL) {
448 ax_lock(ax); 477 netif_stop_queue(dev);
449 ax_encaps(ax, skb->data, skb->len); 478 ax_encaps(dev, skb->data, skb->len);
450 kfree_skb(skb); 479 kfree_skb(skb);
451 } 480 }
452 481
453 return 0; 482 return 0;
454} 483}
455 484
485static int ax_open_dev(struct net_device *dev)
486{
487 struct mkiss *ax = netdev_priv(dev);
488
489 if (ax->tty == NULL)
490 return -ENODEV;
491
492 return 0;
493}
494
456#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 495#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
457 496
458/* Return the frame type ID */ 497/* Return the frame type ID */
@@ -481,7 +520,7 @@ static int ax_rebuild_header(struct sk_buff *skb)
481/* Open the low-level part of the AX25 channel. Easy! */ 520/* Open the low-level part of the AX25 channel. Easy! */
482static int ax_open(struct net_device *dev) 521static int ax_open(struct net_device *dev)
483{ 522{
484 struct ax_disp *ax = netdev_priv(dev); 523 struct mkiss *ax = netdev_priv(dev);
485 unsigned long len; 524 unsigned long len;
486 525
487 if (ax->tty == NULL) 526 if (ax->tty == NULL)
@@ -518,7 +557,6 @@ static int ax_open(struct net_device *dev)
518 557
519 spin_lock_init(&ax->buflock); 558 spin_lock_init(&ax->buflock);
520 559
521 netif_start_queue(dev);
522 return 0; 560 return 0;
523 561
524noxbuff: 562noxbuff:
@@ -532,68 +570,100 @@ norbuff:
532/* Close the low-level part of the AX25 channel. Easy! */ 570/* Close the low-level part of the AX25 channel. Easy! */
533static int ax_close(struct net_device *dev) 571static int ax_close(struct net_device *dev)
534{ 572{
535 struct ax_disp *ax = netdev_priv(dev); 573 struct mkiss *ax = netdev_priv(dev);
536 574
537 if (ax->tty == NULL) 575 if (ax->tty)
538 return -EBUSY; 576 clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
539
540 ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
541 577
542 netif_stop_queue(dev); 578 netif_stop_queue(dev);
543 579
544 return 0; 580 return 0;
545} 581}
546 582
547static int ax25_receive_room(struct tty_struct *tty) 583static struct net_device_stats *ax_get_stats(struct net_device *dev)
548{ 584{
549 return 65536; /* We can handle an infinite amount of data. :-) */ 585 struct mkiss *ax = netdev_priv(dev);
586
587 return &ax->stats;
588}
589
590static void ax_setup(struct net_device *dev)
591{
592 static char ax25_bcast[AX25_ADDR_LEN] =
593 {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
594 static char ax25_test[AX25_ADDR_LEN] =
595 {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
596
597 /* Finish setting up the DEVICE info. */
598 dev->mtu = AX_MTU;
599 dev->hard_start_xmit = ax_xmit;
600 dev->open = ax_open_dev;
601 dev->stop = ax_close;
602 dev->get_stats = ax_get_stats;
603 dev->set_mac_address = ax_set_mac_address;
604 dev->hard_header_len = 0;
605 dev->addr_len = 0;
606 dev->type = ARPHRD_AX25;
607 dev->tx_queue_len = 10;
608 dev->hard_header = ax_header;
609 dev->rebuild_header = ax_rebuild_header;
610
611 memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
612 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
613
614 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
550} 615}
551 616
552/* 617/*
553 * Handle the 'receiver data ready' interrupt. 618 * We have a potential race on dereferencing tty->disc_data, because the tty
554 * This function is called by the 'tty_io' module in the kernel when 619 * layer provides no locking at all - thus one cpu could be running
555 * a block of data has been received, which can now be decapsulated 620 * sixpack_receive_buf while another calls sixpack_close, which zeroes
556 * and sent on to the AX.25 layer for further processing. 621 * tty->disc_data and frees the memory that sixpack_receive_buf is using. The
622 * best way to fix this is to use a rwlock in the tty struct, but for now we
623 * use a single global rwlock for all ttys in ppp line discipline.
557 */ 624 */
558static void ax25_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) 625static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
626
627static struct mkiss *mkiss_get(struct tty_struct *tty)
559{ 628{
560 struct ax_disp *ax = (struct ax_disp *) tty->disc_data; 629 struct mkiss *ax;
561 630
562 if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev)) 631 read_lock(&disc_data_lock);
563 return; 632 ax = tty->disc_data;
633 if (ax)
634 atomic_inc(&ax->refcnt);
635 read_unlock(&disc_data_lock);
564 636
565 /* 637 return ax;
566 * Argh! mtu change time! - costs us the packet part received 638}
567 * at the change
568 */
569 if (ax->mtu != ax->dev->mtu + 73)
570 ax_changedmtu(ax);
571
572 /* Read the characters out of the buffer */
573 while (count--) {
574 if (fp != NULL && *fp++) {
575 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
576 ax->rx_errors++;
577 cp++;
578 continue;
579 }
580 639
581 kiss_unesc(ax, *cp++); 640static void mkiss_put(struct mkiss *ax)
582 } 641{
642 if (atomic_dec_and_test(&ax->refcnt))
643 up(&ax->dead_sem);
583} 644}
584 645
585static int ax25_open(struct tty_struct *tty) 646static int mkiss_open(struct tty_struct *tty)
586{ 647{
587 struct ax_disp *ax = (struct ax_disp *) tty->disc_data; 648 struct net_device *dev;
649 struct mkiss *ax;
588 int err; 650 int err;
589 651
590 /* First make sure we're not already connected. */ 652 if (!capable(CAP_NET_ADMIN))
591 if (ax && ax->magic == AX25_MAGIC) 653 return -EPERM;
592 return -EEXIST;
593 654
594 /* OK. Find a free AX25 channel to use. */ 655 dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
595 if ((ax = ax_alloc()) == NULL) 656 if (!dev) {
596 return -ENFILE; 657 err = -ENOMEM;
658 goto out;
659 }
660
661 ax = netdev_priv(dev);
662 ax->dev = dev;
663
664 spin_lock_init(&ax->buflock);
665 atomic_set(&ax->refcnt, 1);
666 init_MUTEX_LOCKED(&ax->dead_sem);
597 667
598 ax->tty = tty; 668 ax->tty = tty;
599 tty->disc_data = ax; 669 tty->disc_data = ax;
@@ -602,283 +672,212 @@ static int ax25_open(struct tty_struct *tty)
602 tty->driver->flush_buffer(tty); 672 tty->driver->flush_buffer(tty);
603 673
604 /* Restore default settings */ 674 /* Restore default settings */
605 ax->dev->type = ARPHRD_AX25; 675 dev->type = ARPHRD_AX25;
606 676
607 /* Perform the low-level AX25 initialization. */ 677 /* Perform the low-level AX25 initialization. */
608 if ((err = ax_open(ax->dev))) 678 if ((err = ax_open(ax->dev))) {
609 return err; 679 goto out_free_netdev;
680 }
610 681
611 /* Done. We have linked the TTY line to a channel. */ 682 if (register_netdev(dev))
612 return ax->dev->base_addr; 683 goto out_free_buffers;
613}
614 684
615static void ax25_close(struct tty_struct *tty) 685 netif_start_queue(dev);
616{
617 struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
618 686
619 /* First make sure we're connected. */ 687 /* Done. We have linked the TTY line to a channel. */
620 if (ax == NULL || ax->magic != AX25_MAGIC) 688 return 0;
621 return;
622 689
623 unregister_netdev(ax->dev); 690out_free_buffers:
691 kfree(ax->rbuff);
692 kfree(ax->xbuff);
624 693
625 tty->disc_data = NULL; 694out_free_netdev:
626 ax->tty = NULL; 695 free_netdev(dev);
627 696
628 ax_free(ax); 697out:
698 return err;
629} 699}
630 700
631 701static void mkiss_close(struct tty_struct *tty)
632static struct net_device_stats *ax_get_stats(struct net_device *dev)
633{ 702{
634 static struct net_device_stats stats; 703 struct mkiss *ax;
635 struct ax_disp *ax = netdev_priv(dev);
636
637 memset(&stats, 0, sizeof(struct net_device_stats));
638
639 stats.rx_packets = ax->rx_packets;
640 stats.tx_packets = ax->tx_packets;
641 stats.rx_bytes = ax->rx_bytes;
642 stats.tx_bytes = ax->tx_bytes;
643 stats.rx_dropped = ax->rx_dropped;
644 stats.tx_dropped = ax->tx_dropped;
645 stats.tx_errors = ax->tx_errors;
646 stats.rx_errors = ax->rx_errors;
647 stats.rx_over_errors = ax->rx_over_errors;
648
649 return &stats;
650}
651 704
705 write_lock(&disc_data_lock);
706 ax = tty->disc_data;
707 tty->disc_data = NULL;
708 write_unlock(&disc_data_lock);
652 709
653/************************************************************************ 710 if (ax == 0)
654 * STANDARD ENCAPSULATION * 711 return;
655 ************************************************************************/
656
657static int kiss_esc(unsigned char *s, unsigned char *d, int len)
658{
659 unsigned char *ptr = d;
660 unsigned char c;
661 712
662 /* 713 /*
663 * Send an initial END character to flush out any 714 * We have now ensured that nobody can start using ap from now on, but
664 * data that may have accumulated in the receiver 715 * we have to wait for all existing users to finish.
665 * due to line noise.
666 */ 716 */
717 if (!atomic_dec_and_test(&ax->refcnt))
718 down(&ax->dead_sem);
667 719
668 *ptr++ = END; 720 unregister_netdev(ax->dev);
669
670 while (len-- > 0) {
671 switch (c = *s++) {
672 case END:
673 *ptr++ = ESC;
674 *ptr++ = ESC_END;
675 break;
676 case ESC:
677 *ptr++ = ESC;
678 *ptr++ = ESC_ESC;
679 break;
680 default:
681 *ptr++ = c;
682 break;
683 }
684 }
685 721
686 *ptr++ = END; 722 /* Free all AX25 frame buffers. */
723 kfree(ax->rbuff);
724 kfree(ax->xbuff);
687 725
688 return ptr - d; 726 ax->tty = NULL;
689} 727}
690 728
691/* 729/* Perform I/O control on an active ax25 channel. */
692 * MW: 730static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
693 * OK its ugly, but tell me a better solution without copying the 731 unsigned int cmd, unsigned long arg)
694 * packet to a temporary buffer :-)
695 */
696static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc, int len)
697{ 732{
698 unsigned char *ptr = d; 733 struct mkiss *ax = mkiss_get(tty);
699 unsigned char c=0; 734 struct net_device *dev = ax->dev;
700 735 unsigned int tmp, err;
701 *ptr++ = END;
702 while (len > 0) {
703 if (len > 2)
704 c = *s++;
705 else if (len > 1)
706 c = crc >> 8;
707 else if (len > 0)
708 c = crc & 0xff;
709 736
710 len--; 737 /* First make sure we're connected. */
738 if (ax == NULL)
739 return -ENXIO;
711 740
712 switch (c) { 741 switch (cmd) {
713 case END: 742 case SIOCGIFNAME:
714 *ptr++ = ESC; 743 err = copy_to_user((void __user *) arg, ax->dev->name,
715 *ptr++ = ESC_END; 744 strlen(ax->dev->name) + 1) ? -EFAULT : 0;
716 break; 745 break;
717 case ESC: 746
718 *ptr++ = ESC; 747 case SIOCGIFENCAP:
719 *ptr++ = ESC_ESC; 748 err = put_user(4, (int __user *) arg);
720 break; 749 break;
721 default: 750
722 *ptr++ = c; 751 case SIOCSIFENCAP:
723 break; 752 if (get_user(tmp, (int __user *) arg)) {
753 err = -EFAULT;
754 break;
724 } 755 }
725 }
726 *ptr++ = END;
727 return ptr - d;
728}
729 756
730static void kiss_unesc(struct ax_disp *ax, unsigned char s) 757 ax->mode = tmp;
731{ 758 dev->addr_len = AX25_ADDR_LEN;
732 switch (s) { 759 dev->hard_header_len = AX25_KISS_HEADER_LEN +
733 case END: 760 AX25_MAX_HEADER_LEN + 3;
734 /* drop keeptest bit = VSV */ 761 dev->type = ARPHRD_AX25;
735 if (test_bit(AXF_KEEPTEST, &ax->flags))
736 clear_bit(AXF_KEEPTEST, &ax->flags);
737 762
738 if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2)) 763 err = 0;
739 ax_bump(ax); 764 break;
740 765
741 clear_bit(AXF_ESCAPE, &ax->flags); 766 case SIOCSIFHWADDR: {
742 ax->rcount = 0; 767 char addr[AX25_ADDR_LEN];
743 return; 768printk(KERN_INFO "In SIOCSIFHWADDR");
744 769
745 case ESC: 770 if (copy_from_user(&addr,
746 set_bit(AXF_ESCAPE, &ax->flags); 771 (void __user *) arg, AX25_ADDR_LEN)) {
747 return; 772 err = -EFAULT;
748 case ESC_ESC:
749 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
750 s = ESC;
751 break; 773 break;
752 case ESC_END:
753 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
754 s = END;
755 break;
756 }
757
758 spin_lock_bh(&ax->buflock);
759 if (!test_bit(AXF_ERROR, &ax->flags)) {
760 if (ax->rcount < ax->buffsize) {
761 ax->rbuff[ax->rcount++] = s;
762 spin_unlock_bh(&ax->buflock);
763 return;
764 } 774 }
765 775
766 ax->rx_over_errors++; 776 spin_lock_irq(&dev->xmit_lock);
767 set_bit(AXF_ERROR, &ax->flags); 777 memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
778 spin_unlock_irq(&dev->xmit_lock);
779
780 err = 0;
781 break;
782 }
783 default:
784 err = -ENOIOCTLCMD;
768 } 785 }
769 spin_unlock_bh(&ax->buflock);
770}
771 786
787 mkiss_put(ax);
772 788
773static int ax_set_mac_address(struct net_device *dev, void __user *addr) 789 return err;
774{
775 if (copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN))
776 return -EFAULT;
777 return 0;
778} 790}
779 791
780static int ax_set_dev_mac_address(struct net_device *dev, void *addr) 792/*
793 * Handle the 'receiver data ready' interrupt.
794 * This function is called by the 'tty_io' module in the kernel when
795 * a block of data has been received, which can now be decapsulated
796 * and sent on to the AX.25 layer for further processing.
797 */
798static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
799 char *fp, int count)
781{ 800{
782 struct sockaddr *sa = addr; 801 struct mkiss *ax = mkiss_get(tty);
783
784 memcpy(dev->dev_addr, sa->sa_data, AX25_ADDR_LEN);
785 802
786 return 0; 803 if (!ax)
787} 804 return;
788
789
790/* Perform I/O control on an active ax25 channel. */
791static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void __user *arg)
792{
793 struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
794 unsigned int tmp;
795 805
796 /* First make sure we're connected. */ 806 /*
797 if (ax == NULL || ax->magic != AX25_MAGIC) 807 * Argh! mtu change time! - costs us the packet part received
798 return -EINVAL; 808 * at the change
809 */
810 if (ax->mtu != ax->dev->mtu + 73)
811 ax_changedmtu(ax);
799 812
800 switch (cmd) { 813 /* Read the characters out of the buffer */
801 case SIOCGIFNAME: 814 while (count--) {
802 if (copy_to_user(arg, ax->dev->name, strlen(ax->dev->name) + 1)) 815 if (fp != NULL && *fp++) {
803 return -EFAULT; 816 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
804 return 0; 817 ax->stats.rx_errors++;
805 818 cp++;
806 case SIOCGIFENCAP: 819 continue;
807 return put_user(4, (int __user *)arg); 820 }
808
809 case SIOCSIFENCAP:
810 if (get_user(tmp, (int __user *)arg))
811 return -EFAULT;
812 ax->mode = tmp;
813 ax->dev->addr_len = AX25_ADDR_LEN; /* sizeof an AX.25 addr */
814 ax->dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3;
815 ax->dev->type = ARPHRD_AX25;
816 return 0;
817
818 case SIOCSIFHWADDR:
819 return ax_set_mac_address(ax->dev, arg);
820 821
821 default: 822 kiss_unesc(ax, *cp++);
822 return -ENOIOCTLCMD;
823 } 823 }
824
825 mkiss_put(ax);
826 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
827 && tty->driver->unthrottle)
828 tty->driver->unthrottle(tty);
824} 829}
825 830
826static int ax_open_dev(struct net_device *dev) 831static int mkiss_receive_room(struct tty_struct *tty)
827{ 832{
828 struct ax_disp *ax = netdev_priv(dev); 833 return 65536; /* We can handle an infinite amount of data. :-) */
829
830 if (ax->tty == NULL)
831 return -ENODEV;
832
833 return 0;
834} 834}
835 835
836 836/*
837/* Initialize the driver. Called by network startup. */ 837 * Called by the driver when there's room for more data. If we have
838static int ax25_init(struct net_device *dev) 838 * more packets to send, we send them here.
839 */
840static void mkiss_write_wakeup(struct tty_struct *tty)
839{ 841{
840 struct ax_disp *ax = netdev_priv(dev); 842 struct mkiss *ax = mkiss_get(tty);
841 843 int actual;
842 static char ax25_bcast[AX25_ADDR_LEN] =
843 {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
844 static char ax25_test[AX25_ADDR_LEN] =
845 {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
846
847 if (ax == NULL) /* Allocation failed ?? */
848 return -ENODEV;
849 844
850 /* Set up the "AX25 Control Block". (And clear statistics) */ 845 if (!ax)
851 memset(ax, 0, sizeof (struct ax_disp)); 846 return;
852 ax->magic = AX25_MAGIC;
853 ax->dev = dev;
854 847
855 /* Finish setting up the DEVICE info. */ 848 if (ax->xleft <= 0) {
856 dev->mtu = AX_MTU; 849 /* Now serial buffer is almost free & we can start
857 dev->hard_start_xmit = ax_xmit; 850 * transmission of another packet
858 dev->open = ax_open_dev; 851 */
859 dev->stop = ax_close; 852 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
860 dev->get_stats = ax_get_stats;
861 dev->set_mac_address = ax_set_dev_mac_address;
862 dev->hard_header_len = 0;
863 dev->addr_len = 0;
864 dev->type = ARPHRD_AX25;
865 dev->tx_queue_len = 10;
866 dev->hard_header = ax_header;
867 dev->rebuild_header = ax_rebuild_header;
868 853
869 memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN); 854 netif_wake_queue(ax->dev);
870 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN); 855 goto out;
856 }
871 857
872 /* New-style flags. */ 858 actual = tty->driver->write(tty, ax->xhead, ax->xleft);
873 dev->flags = IFF_BROADCAST | IFF_MULTICAST; 859 ax->xleft -= actual;
860 ax->xhead += actual;
874 861
875 return 0; 862out:
863 mkiss_put(ax);
876} 864}
877 865
866static struct tty_ldisc ax_ldisc = {
867 .magic = TTY_LDISC_MAGIC,
868 .name = "mkiss",
869 .open = mkiss_open,
870 .close = mkiss_close,
871 .ioctl = mkiss_ioctl,
872 .receive_buf = mkiss_receive_buf,
873 .receive_room = mkiss_receive_room,
874 .write_wakeup = mkiss_write_wakeup
875};
878 876
879/* ******************************************************************** */ 877static char banner[] __initdata = KERN_INFO \
880/* * Init MKISS driver * */ 878 "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n";
881/* ******************************************************************** */ 879static char msg_regfail[] __initdata = KERN_ERR \
880 "mkiss: can't register line discipline (err = %d)\n";
882 881
883static int __init mkiss_init_driver(void) 882static int __init mkiss_init_driver(void)
884{ 883{
@@ -886,64 +885,27 @@ static int __init mkiss_init_driver(void)
886 885
887 printk(banner); 886 printk(banner);
888 887
889 if (ax25_maxdev < 4) 888 if ((status = tty_register_ldisc(N_AX25, &ax_ldisc)) != 0)
890 ax25_maxdev = 4; /* Sanity */ 889 printk(msg_regfail);
891 890
892 if ((ax25_ctrls = kmalloc(sizeof(void *) * ax25_maxdev, GFP_KERNEL)) == NULL) {
893 printk(KERN_ERR "mkiss: Can't allocate ax25_ctrls[] array!\n");
894 return -ENOMEM;
895 }
896
897 /* Clear the pointer array, we allocate devices when we need them */
898 memset(ax25_ctrls, 0, sizeof(void*) * ax25_maxdev); /* Pointers */
899
900 /* Fill in our line protocol discipline, and register it */
901 ax_ldisc.magic = TTY_LDISC_MAGIC;
902 ax_ldisc.name = "mkiss";
903 ax_ldisc.open = ax25_open;
904 ax_ldisc.close = ax25_close;
905 ax_ldisc.ioctl = (int (*)(struct tty_struct *, struct file *,
906 unsigned int, unsigned long))ax25_disp_ioctl;
907 ax_ldisc.receive_buf = ax25_receive_buf;
908 ax_ldisc.receive_room = ax25_receive_room;
909 ax_ldisc.write_wakeup = ax25_write_wakeup;
910
911 if ((status = tty_register_ldisc(N_AX25, &ax_ldisc)) != 0) {
912 printk(KERN_ERR "mkiss: can't register line discipline (err = %d)\n", status);
913 kfree(ax25_ctrls);
914 }
915 return status; 891 return status;
916} 892}
917 893
894static const char msg_unregfail[] __exitdata = KERN_ERR \
895 "mkiss: can't unregister line discipline (err = %d)\n";
896
918static void __exit mkiss_exit_driver(void) 897static void __exit mkiss_exit_driver(void)
919{ 898{
920 int i; 899 int ret;
921
922 for (i = 0; i < ax25_maxdev; i++) {
923 if (ax25_ctrls[i]) {
924 /*
925 * VSV = if dev->start==0, then device
926 * unregistered while close proc.
927 */
928 if (netif_running(&ax25_ctrls[i]->dev))
929 unregister_netdev(&ax25_ctrls[i]->dev);
930 kfree(ax25_ctrls[i]);
931 }
932 }
933 900
934 kfree(ax25_ctrls); 901 if ((ret = tty_unregister_ldisc(N_AX25)))
935 ax25_ctrls = NULL; 902 printk(msg_unregfail, ret);
936
937 if ((i = tty_unregister_ldisc(N_AX25)))
938 printk(KERN_ERR "mkiss: can't unregister line discipline (err = %d)\n", i);
939} 903}
940 904
941MODULE_AUTHOR("Hans Albas PE1AYX <hans@esrac.ele.tue.nl>"); 905MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>");
942MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs"); 906MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
943MODULE_PARM(ax25_maxdev, "i");
944MODULE_PARM_DESC(ax25_maxdev, "number of MKISS devices");
945MODULE_LICENSE("GPL"); 907MODULE_LICENSE("GPL");
946MODULE_ALIAS_LDISC(N_AX25); 908MODULE_ALIAS_LDISC(N_AX25);
909
947module_init(mkiss_init_driver); 910module_init(mkiss_init_driver);
948module_exit(mkiss_exit_driver); 911module_exit(mkiss_exit_driver);
949
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f8d3385c7842..c83271b38621 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -119,7 +119,7 @@ struct ixgb_adapter;
119 * so a DMA handle can be stored along with the buffer */ 119 * so a DMA handle can be stored along with the buffer */
120struct ixgb_buffer { 120struct ixgb_buffer {
121 struct sk_buff *skb; 121 struct sk_buff *skb;
122 uint64_t dma; 122 dma_addr_t dma;
123 unsigned long time_stamp; 123 unsigned long time_stamp;
124 uint16_t length; 124 uint16_t length;
125 uint16_t next_to_watch; 125 uint16_t next_to_watch;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 3aae110c5560..661a46b95a61 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -565,24 +565,6 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
565 } 565 }
566} 566}
567 567
568/******************************************************************************
569 * return the compatibility flags from EEPROM
570 *
571 * hw - Struct containing variables accessed by shared code
572 *
573 * Returns:
574 * compatibility flags if EEPROM contents are valid, 0 otherwise
575 ******************************************************************************/
576uint16_t
577ixgb_get_ee_compatibility(struct ixgb_hw *hw)
578{
579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
580
581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
582 return (le16_to_cpu(ee_map->compatibility));
583
584 return(0);
585}
586 568
587/****************************************************************************** 569/******************************************************************************
588 * return the Printed Board Assembly number from EEPROM 570 * return the Printed Board Assembly number from EEPROM
@@ -602,81 +584,6 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
602 return(0); 584 return(0);
603} 585}
604 586
605/******************************************************************************
606 * return the Initialization Control Word 1 from EEPROM
607 *
608 * hw - Struct containing variables accessed by shared code
609 *
610 * Returns:
611 * Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
612 ******************************************************************************/
613uint16_t
614ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
615{
616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
617
618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
619 return (le16_to_cpu(ee_map->init_ctrl_reg_1));
620
621 return(0);
622}
623
624/******************************************************************************
625 * return the Initialization Control Word 2 from EEPROM
626 *
627 * hw - Struct containing variables accessed by shared code
628 *
629 * Returns:
630 * Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
631 ******************************************************************************/
632uint16_t
633ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
634{
635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
636
637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
638 return (le16_to_cpu(ee_map->init_ctrl_reg_2));
639
640 return(0);
641}
642
643/******************************************************************************
644 * return the Subsystem Id from EEPROM
645 *
646 * hw - Struct containing variables accessed by shared code
647 *
648 * Returns:
649 * Subsystem Id if EEPROM contents are valid, 0 otherwise
650 ******************************************************************************/
651uint16_t
652ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
653{
654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
655
656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
657 return (le16_to_cpu(ee_map->subsystem_id));
658
659 return(0);
660}
661
662/******************************************************************************
663 * return the Sub Vendor Id from EEPROM
664 *
665 * hw - Struct containing variables accessed by shared code
666 *
667 * Returns:
668 * Sub Vendor Id if EEPROM contents are valid, 0 otherwise
669 ******************************************************************************/
670uint16_t
671ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
672{
673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
674
675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
676 return (le16_to_cpu(ee_map->subvendor_id));
677
678 return(0);
679}
680 587
681/****************************************************************************** 588/******************************************************************************
682 * return the Device Id from EEPROM 589 * return the Device Id from EEPROM
@@ -694,81 +601,6 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 601 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
695 return (le16_to_cpu(ee_map->device_id)); 602 return (le16_to_cpu(ee_map->device_id));
696 603
697 return(0); 604 return (0);
698}
699
700/******************************************************************************
701 * return the Vendor Id from EEPROM
702 *
703 * hw - Struct containing variables accessed by shared code
704 *
705 * Returns:
706 * Device Id if EEPROM contents are valid, 0 otherwise
707 ******************************************************************************/
708uint16_t
709ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
710{
711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
712
713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
714 return (le16_to_cpu(ee_map->vendor_id));
715
716 return(0);
717}
718
719/******************************************************************************
720 * return the Software Defined Pins Register from EEPROM
721 *
722 * hw - Struct containing variables accessed by shared code
723 *
724 * Returns:
725 * SDP Register if EEPROM contents are valid, 0 otherwise
726 ******************************************************************************/
727uint16_t
728ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
729{
730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
731
732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
733 return (le16_to_cpu(ee_map->swdpins_reg));
734
735 return(0);
736} 605}
737 606
738/******************************************************************************
739 * return the D3 Power Management Bits from EEPROM
740 *
741 * hw - Struct containing variables accessed by shared code
742 *
743 * Returns:
744 * D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
745 ******************************************************************************/
746uint8_t
747ixgb_get_ee_d3_power(struct ixgb_hw *hw)
748{
749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
750
751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
752 return (le16_to_cpu(ee_map->d3_power));
753
754 return(0);
755}
756
757/******************************************************************************
758 * return the D0 Power Management Bits from EEPROM
759 *
760 * hw - Struct containing variables accessed by shared code
761 *
762 * Returns:
763 * D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
764 ******************************************************************************/
765uint8_t
766ixgb_get_ee_d0_power(struct ixgb_hw *hw)
767{
768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
769
770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
771 return (le16_to_cpu(ee_map->d0_power));
772
773 return(0);
774}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 3fa113854eeb..9d026ed77ddd 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -98,10 +98,10 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
98static int 98static int
99ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 99ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
100{ 100{
101 struct ixgb_adapter *adapter = netdev->priv; 101 struct ixgb_adapter *adapter = netdev_priv(netdev);
102 102
103 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 103 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
104 ecmd->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 104 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
105 ecmd->port = PORT_FIBRE; 105 ecmd->port = PORT_FIBRE;
106 ecmd->transceiver = XCVR_EXTERNAL; 106 ecmd->transceiver = XCVR_EXTERNAL;
107 107
@@ -120,7 +120,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
120static int 120static int
121ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 121ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
122{ 122{
123 struct ixgb_adapter *adapter = netdev->priv; 123 struct ixgb_adapter *adapter = netdev_priv(netdev);
124 124
125 if(ecmd->autoneg == AUTONEG_ENABLE || 125 if(ecmd->autoneg == AUTONEG_ENABLE ||
126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) 126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
@@ -130,6 +130,12 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
130 ixgb_down(adapter, TRUE); 130 ixgb_down(adapter, TRUE);
131 ixgb_reset(adapter); 131 ixgb_reset(adapter);
132 ixgb_up(adapter); 132 ixgb_up(adapter);
133 /* be optimistic about our link, since we were up before */
134 adapter->link_speed = 10000;
135 adapter->link_duplex = FULL_DUPLEX;
136 netif_carrier_on(netdev);
137 netif_wake_queue(netdev);
138
133 } else 139 } else
134 ixgb_reset(adapter); 140 ixgb_reset(adapter);
135 141
@@ -140,7 +146,7 @@ static void
140ixgb_get_pauseparam(struct net_device *netdev, 146ixgb_get_pauseparam(struct net_device *netdev,
141 struct ethtool_pauseparam *pause) 147 struct ethtool_pauseparam *pause)
142{ 148{
143 struct ixgb_adapter *adapter = netdev->priv; 149 struct ixgb_adapter *adapter = netdev_priv(netdev);
144 struct ixgb_hw *hw = &adapter->hw; 150 struct ixgb_hw *hw = &adapter->hw;
145 151
146 pause->autoneg = AUTONEG_DISABLE; 152 pause->autoneg = AUTONEG_DISABLE;
@@ -159,7 +165,7 @@ static int
159ixgb_set_pauseparam(struct net_device *netdev, 165ixgb_set_pauseparam(struct net_device *netdev,
160 struct ethtool_pauseparam *pause) 166 struct ethtool_pauseparam *pause)
161{ 167{
162 struct ixgb_adapter *adapter = netdev->priv; 168 struct ixgb_adapter *adapter = netdev_priv(netdev);
163 struct ixgb_hw *hw = &adapter->hw; 169 struct ixgb_hw *hw = &adapter->hw;
164 170
165 if(pause->autoneg == AUTONEG_ENABLE) 171 if(pause->autoneg == AUTONEG_ENABLE)
@@ -177,6 +183,11 @@ ixgb_set_pauseparam(struct net_device *netdev,
177 if(netif_running(adapter->netdev)) { 183 if(netif_running(adapter->netdev)) {
178 ixgb_down(adapter, TRUE); 184 ixgb_down(adapter, TRUE);
179 ixgb_up(adapter); 185 ixgb_up(adapter);
186 /* be optimistic about our link, since we were up before */
187 adapter->link_speed = 10000;
188 adapter->link_duplex = FULL_DUPLEX;
189 netif_carrier_on(netdev);
190 netif_wake_queue(netdev);
180 } else 191 } else
181 ixgb_reset(adapter); 192 ixgb_reset(adapter);
182 193
@@ -186,19 +197,26 @@ ixgb_set_pauseparam(struct net_device *netdev,
186static uint32_t 197static uint32_t
187ixgb_get_rx_csum(struct net_device *netdev) 198ixgb_get_rx_csum(struct net_device *netdev)
188{ 199{
189 struct ixgb_adapter *adapter = netdev->priv; 200 struct ixgb_adapter *adapter = netdev_priv(netdev);
201
190 return adapter->rx_csum; 202 return adapter->rx_csum;
191} 203}
192 204
193static int 205static int
194ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) 206ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
195{ 207{
196 struct ixgb_adapter *adapter = netdev->priv; 208 struct ixgb_adapter *adapter = netdev_priv(netdev);
209
197 adapter->rx_csum = data; 210 adapter->rx_csum = data;
198 211
199 if(netif_running(netdev)) { 212 if(netif_running(netdev)) {
200 ixgb_down(adapter,TRUE); 213 ixgb_down(adapter,TRUE);
201 ixgb_up(adapter); 214 ixgb_up(adapter);
215 /* be optimistic about our link, since we were up before */
216 adapter->link_speed = 10000;
217 adapter->link_duplex = FULL_DUPLEX;
218 netif_carrier_on(netdev);
219 netif_wake_queue(netdev);
202 } else 220 } else
203 ixgb_reset(adapter); 221 ixgb_reset(adapter);
204 return 0; 222 return 0;
@@ -246,14 +264,15 @@ static void
246ixgb_get_regs(struct net_device *netdev, 264ixgb_get_regs(struct net_device *netdev,
247 struct ethtool_regs *regs, void *p) 265 struct ethtool_regs *regs, void *p)
248{ 266{
249 struct ixgb_adapter *adapter = netdev->priv; 267 struct ixgb_adapter *adapter = netdev_priv(netdev);
250 struct ixgb_hw *hw = &adapter->hw; 268 struct ixgb_hw *hw = &adapter->hw;
251 uint32_t *reg = p; 269 uint32_t *reg = p;
252 uint32_t *reg_start = reg; 270 uint32_t *reg_start = reg;
253 uint8_t i; 271 uint8_t i;
254 272
255 /* the 1 (one) below indicates an attempt at versioning, if the 273 /* the 1 (one) below indicates an attempt at versioning, if the
256 * interface in ethtool or the driver this 1 should be incremented */ 274 * interface in ethtool or the driver changes, this 1 should be
275 * incremented */
257 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; 276 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
258 277
259 /* General Registers */ 278 /* General Registers */
@@ -283,7 +302,8 @@ ixgb_get_regs(struct net_device *netdev,
283 *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */ 302 *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
284 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ 303 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
285 304
286 for (i = 0; i < IXGB_RAR_ENTRIES; i++) { 305 /* there are 16 RAR entries in hardware, we only use 3 */
306 for(i = 0; i < 16; i++) {
287 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ 307 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
288 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ 308 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
289 } 309 }
@@ -391,7 +411,7 @@ static int
391ixgb_get_eeprom(struct net_device *netdev, 411ixgb_get_eeprom(struct net_device *netdev,
392 struct ethtool_eeprom *eeprom, uint8_t *bytes) 412 struct ethtool_eeprom *eeprom, uint8_t *bytes)
393{ 413{
394 struct ixgb_adapter *adapter = netdev->priv; 414 struct ixgb_adapter *adapter = netdev_priv(netdev);
395 struct ixgb_hw *hw = &adapter->hw; 415 struct ixgb_hw *hw = &adapter->hw;
396 uint16_t *eeprom_buff; 416 uint16_t *eeprom_buff;
397 int i, max_len, first_word, last_word; 417 int i, max_len, first_word, last_word;
@@ -439,7 +459,7 @@ static int
439ixgb_set_eeprom(struct net_device *netdev, 459ixgb_set_eeprom(struct net_device *netdev,
440 struct ethtool_eeprom *eeprom, uint8_t *bytes) 460 struct ethtool_eeprom *eeprom, uint8_t *bytes)
441{ 461{
442 struct ixgb_adapter *adapter = netdev->priv; 462 struct ixgb_adapter *adapter = netdev_priv(netdev);
443 struct ixgb_hw *hw = &adapter->hw; 463 struct ixgb_hw *hw = &adapter->hw;
444 uint16_t *eeprom_buff; 464 uint16_t *eeprom_buff;
445 void *ptr; 465 void *ptr;
@@ -497,7 +517,7 @@ static void
497ixgb_get_drvinfo(struct net_device *netdev, 517ixgb_get_drvinfo(struct net_device *netdev,
498 struct ethtool_drvinfo *drvinfo) 518 struct ethtool_drvinfo *drvinfo)
499{ 519{
500 struct ixgb_adapter *adapter = netdev->priv; 520 struct ixgb_adapter *adapter = netdev_priv(netdev);
501 521
502 strncpy(drvinfo->driver, ixgb_driver_name, 32); 522 strncpy(drvinfo->driver, ixgb_driver_name, 32);
503 strncpy(drvinfo->version, ixgb_driver_version, 32); 523 strncpy(drvinfo->version, ixgb_driver_version, 32);
@@ -512,7 +532,7 @@ static void
512ixgb_get_ringparam(struct net_device *netdev, 532ixgb_get_ringparam(struct net_device *netdev,
513 struct ethtool_ringparam *ring) 533 struct ethtool_ringparam *ring)
514{ 534{
515 struct ixgb_adapter *adapter = netdev->priv; 535 struct ixgb_adapter *adapter = netdev_priv(netdev);
516 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 536 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
517 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 537 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
518 538
@@ -530,7 +550,7 @@ static int
530ixgb_set_ringparam(struct net_device *netdev, 550ixgb_set_ringparam(struct net_device *netdev,
531 struct ethtool_ringparam *ring) 551 struct ethtool_ringparam *ring)
532{ 552{
533 struct ixgb_adapter *adapter = netdev->priv; 553 struct ixgb_adapter *adapter = netdev_priv(netdev);
534 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 554 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
535 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 555 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
536 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; 556 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
@@ -573,6 +593,11 @@ ixgb_set_ringparam(struct net_device *netdev,
573 adapter->tx_ring = tx_new; 593 adapter->tx_ring = tx_new;
574 if((err = ixgb_up(adapter))) 594 if((err = ixgb_up(adapter)))
575 return err; 595 return err;
596 /* be optimistic about our link, since we were up before */
597 adapter->link_speed = 10000;
598 adapter->link_duplex = FULL_DUPLEX;
599 netif_carrier_on(netdev);
600 netif_wake_queue(netdev);
576 } 601 }
577 602
578 return 0; 603 return 0;
@@ -607,7 +632,7 @@ ixgb_led_blink_callback(unsigned long data)
607static int 632static int
608ixgb_phys_id(struct net_device *netdev, uint32_t data) 633ixgb_phys_id(struct net_device *netdev, uint32_t data)
609{ 634{
610 struct ixgb_adapter *adapter = netdev->priv; 635 struct ixgb_adapter *adapter = netdev_priv(netdev);
611 636
612 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 637 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
613 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 638 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
@@ -643,7 +668,7 @@ static void
643ixgb_get_ethtool_stats(struct net_device *netdev, 668ixgb_get_ethtool_stats(struct net_device *netdev,
644 struct ethtool_stats *stats, uint64_t *data) 669 struct ethtool_stats *stats, uint64_t *data)
645{ 670{
646 struct ixgb_adapter *adapter = netdev->priv; 671 struct ixgb_adapter *adapter = netdev_priv(netdev);
647 int i; 672 int i;
648 673
649 ixgb_update_stats(adapter); 674 ixgb_update_stats(adapter);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 97898efe7cc8..8bcf31ed10c2 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -822,17 +822,8 @@ extern void ixgb_clear_vfta(struct ixgb_hw *hw);
822 822
823/* Access functions to eeprom data */ 823/* Access functions to eeprom data */
824void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); 824void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
825uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
826uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); 825uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
827uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
828uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw);
829uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw *hw);
830uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw *hw);
831uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); 826uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
832uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw *hw);
833uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw);
834uint8_t ixgb_get_ee_d3_power(struct ixgb_hw *hw);
835uint8_t ixgb_get_ee_d0_power(struct ixgb_hw *hw);
836boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw); 827boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
837uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); 828uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
838 829
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 097b90ccf575..5c555373adbe 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -29,6 +29,11 @@
29#include "ixgb.h" 29#include "ixgb.h"
30 30
31/* Change Log 31/* Change Log
32 * 1.0.96 04/19/05
33 * - Make needlessly global code static -- bunk@stusta.de
34 * - ethtool cleanup -- shemminger@osdl.org
35 * - Support for MODULE_VERSION -- linville@tuxdriver.com
36 * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
32 * 1.0.88 01/05/05 37 * 1.0.88 01/05/05
33 * - include fix to the condition that determines when to quit NAPI - Robert Olsson 38 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
34 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down 39 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
@@ -47,10 +52,9 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
47#else 52#else
48#define DRIVERNAPI "-NAPI" 53#define DRIVERNAPI "-NAPI"
49#endif 54#endif
50 55#define DRV_VERSION "1.0.100-k2"DRIVERNAPI
51#define DRV_VERSION "1.0.95-k2"DRIVERNAPI
52char ixgb_driver_version[] = DRV_VERSION; 56char ixgb_driver_version[] = DRV_VERSION;
53char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 57static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
54 58
55/* ixgb_pci_tbl - PCI Device ID Table 59/* ixgb_pci_tbl - PCI Device ID Table
56 * 60 *
@@ -145,10 +149,12 @@ MODULE_LICENSE("GPL");
145MODULE_VERSION(DRV_VERSION); 149MODULE_VERSION(DRV_VERSION);
146 150
147/* some defines for controlling descriptor fetches in h/w */ 151/* some defines for controlling descriptor fetches in h/w */
148#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
149#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
150 pushed this many descriptors from head */
151#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */ 152#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
153#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
154 * this */
155#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
156 * is pushed this many descriptors
157 * from head */
152 158
153/** 159/**
154 * ixgb_init_module - Driver Registration Routine 160 * ixgb_init_module - Driver Registration Routine
@@ -376,7 +382,7 @@ ixgb_probe(struct pci_dev *pdev,
376 SET_NETDEV_DEV(netdev, &pdev->dev); 382 SET_NETDEV_DEV(netdev, &pdev->dev);
377 383
378 pci_set_drvdata(pdev, netdev); 384 pci_set_drvdata(pdev, netdev);
379 adapter = netdev->priv; 385 adapter = netdev_priv(netdev);
380 adapter->netdev = netdev; 386 adapter->netdev = netdev;
381 adapter->pdev = pdev; 387 adapter->pdev = pdev;
382 adapter->hw.back = adapter; 388 adapter->hw.back = adapter;
@@ -512,7 +518,7 @@ static void __devexit
512ixgb_remove(struct pci_dev *pdev) 518ixgb_remove(struct pci_dev *pdev)
513{ 519{
514 struct net_device *netdev = pci_get_drvdata(pdev); 520 struct net_device *netdev = pci_get_drvdata(pdev);
515 struct ixgb_adapter *adapter = netdev->priv; 521 struct ixgb_adapter *adapter = netdev_priv(netdev);
516 522
517 unregister_netdev(netdev); 523 unregister_netdev(netdev);
518 524
@@ -583,7 +589,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
583static int 589static int
584ixgb_open(struct net_device *netdev) 590ixgb_open(struct net_device *netdev)
585{ 591{
586 struct ixgb_adapter *adapter = netdev->priv; 592 struct ixgb_adapter *adapter = netdev_priv(netdev);
587 int err; 593 int err;
588 594
589 /* allocate transmit descriptors */ 595 /* allocate transmit descriptors */
@@ -626,7 +632,7 @@ err_setup_tx:
626static int 632static int
627ixgb_close(struct net_device *netdev) 633ixgb_close(struct net_device *netdev)
628{ 634{
629 struct ixgb_adapter *adapter = netdev->priv; 635 struct ixgb_adapter *adapter = netdev_priv(netdev);
630 636
631 ixgb_down(adapter, TRUE); 637 ixgb_down(adapter, TRUE);
632 638
@@ -1017,7 +1023,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1017static int 1023static int
1018ixgb_set_mac(struct net_device *netdev, void *p) 1024ixgb_set_mac(struct net_device *netdev, void *p)
1019{ 1025{
1020 struct ixgb_adapter *adapter = netdev->priv; 1026 struct ixgb_adapter *adapter = netdev_priv(netdev);
1021 struct sockaddr *addr = p; 1027 struct sockaddr *addr = p;
1022 1028
1023 if(!is_valid_ether_addr(addr->sa_data)) 1029 if(!is_valid_ether_addr(addr->sa_data))
@@ -1043,7 +1049,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
1043static void 1049static void
1044ixgb_set_multi(struct net_device *netdev) 1050ixgb_set_multi(struct net_device *netdev)
1045{ 1051{
1046 struct ixgb_adapter *adapter = netdev->priv; 1052 struct ixgb_adapter *adapter = netdev_priv(netdev);
1047 struct ixgb_hw *hw = &adapter->hw; 1053 struct ixgb_hw *hw = &adapter->hw;
1048 struct dev_mc_list *mc_ptr; 1054 struct dev_mc_list *mc_ptr;
1049 uint32_t rctl; 1055 uint32_t rctl;
@@ -1371,7 +1377,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1371static int 1377static int
1372ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1378ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1373{ 1379{
1374 struct ixgb_adapter *adapter = netdev->priv; 1380 struct ixgb_adapter *adapter = netdev_priv(netdev);
1375 unsigned int first; 1381 unsigned int first;
1376 unsigned int tx_flags = 0; 1382 unsigned int tx_flags = 0;
1377 unsigned long flags; 1383 unsigned long flags;
@@ -1425,7 +1431,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1425static void 1431static void
1426ixgb_tx_timeout(struct net_device *netdev) 1432ixgb_tx_timeout(struct net_device *netdev)
1427{ 1433{
1428 struct ixgb_adapter *adapter = netdev->priv; 1434 struct ixgb_adapter *adapter = netdev_priv(netdev);
1429 1435
1430 /* Do the reset outside of interrupt context */ 1436 /* Do the reset outside of interrupt context */
1431 schedule_work(&adapter->tx_timeout_task); 1437 schedule_work(&adapter->tx_timeout_task);
@@ -1434,7 +1440,7 @@ ixgb_tx_timeout(struct net_device *netdev)
1434static void 1440static void
1435ixgb_tx_timeout_task(struct net_device *netdev) 1441ixgb_tx_timeout_task(struct net_device *netdev)
1436{ 1442{
1437 struct ixgb_adapter *adapter = netdev->priv; 1443 struct ixgb_adapter *adapter = netdev_priv(netdev);
1438 1444
1439 ixgb_down(adapter, TRUE); 1445 ixgb_down(adapter, TRUE);
1440 ixgb_up(adapter); 1446 ixgb_up(adapter);
@@ -1451,7 +1457,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
1451static struct net_device_stats * 1457static struct net_device_stats *
1452ixgb_get_stats(struct net_device *netdev) 1458ixgb_get_stats(struct net_device *netdev)
1453{ 1459{
1454 struct ixgb_adapter *adapter = netdev->priv; 1460 struct ixgb_adapter *adapter = netdev_priv(netdev);
1455 1461
1456 return &adapter->net_stats; 1462 return &adapter->net_stats;
1457} 1463}
@@ -1467,7 +1473,7 @@ ixgb_get_stats(struct net_device *netdev)
1467static int 1473static int
1468ixgb_change_mtu(struct net_device *netdev, int new_mtu) 1474ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1469{ 1475{
1470 struct ixgb_adapter *adapter = netdev->priv; 1476 struct ixgb_adapter *adapter = netdev_priv(netdev);
1471 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1477 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1472 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1478 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1473 1479
@@ -1522,7 +1528,8 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1522 1528
1523 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); 1529 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1524 /* fix up multicast stats by removing broadcasts */ 1530 /* fix up multicast stats by removing broadcasts */
1525 multi -= bcast; 1531 if(multi >= bcast)
1532 multi -= bcast;
1526 1533
1527 adapter->stats.mprcl += (multi & 0xFFFFFFFF); 1534 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1528 adapter->stats.mprch += (multi >> 32); 1535 adapter->stats.mprch += (multi >> 32);
@@ -1641,7 +1648,7 @@ static irqreturn_t
1641ixgb_intr(int irq, void *data, struct pt_regs *regs) 1648ixgb_intr(int irq, void *data, struct pt_regs *regs)
1642{ 1649{
1643 struct net_device *netdev = data; 1650 struct net_device *netdev = data;
1644 struct ixgb_adapter *adapter = netdev->priv; 1651 struct ixgb_adapter *adapter = netdev_priv(netdev);
1645 struct ixgb_hw *hw = &adapter->hw; 1652 struct ixgb_hw *hw = &adapter->hw;
1646 uint32_t icr = IXGB_READ_REG(hw, ICR); 1653 uint32_t icr = IXGB_READ_REG(hw, ICR);
1647#ifndef CONFIG_IXGB_NAPI 1654#ifndef CONFIG_IXGB_NAPI
@@ -1688,7 +1695,7 @@ ixgb_intr(int irq, void *data, struct pt_regs *regs)
1688static int 1695static int
1689ixgb_clean(struct net_device *netdev, int *budget) 1696ixgb_clean(struct net_device *netdev, int *budget)
1690{ 1697{
1691 struct ixgb_adapter *adapter = netdev->priv; 1698 struct ixgb_adapter *adapter = netdev_priv(netdev);
1692 int work_to_do = min(*budget, netdev->quota); 1699 int work_to_do = min(*budget, netdev->quota);
1693 int tx_cleaned; 1700 int tx_cleaned;
1694 int work_done = 0; 1701 int work_done = 0;
@@ -2017,7 +2024,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2017static void 2024static void
2018ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2025ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2019{ 2026{
2020 struct ixgb_adapter *adapter = netdev->priv; 2027 struct ixgb_adapter *adapter = netdev_priv(netdev);
2021 uint32_t ctrl, rctl; 2028 uint32_t ctrl, rctl;
2022 2029
2023 ixgb_irq_disable(adapter); 2030 ixgb_irq_disable(adapter);
@@ -2055,7 +2062,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2055static void 2062static void
2056ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 2063ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2057{ 2064{
2058 struct ixgb_adapter *adapter = netdev->priv; 2065 struct ixgb_adapter *adapter = netdev_priv(netdev);
2059 uint32_t vfta, index; 2066 uint32_t vfta, index;
2060 2067
2061 /* add VID to filter table */ 2068 /* add VID to filter table */
@@ -2069,7 +2076,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2069static void 2076static void
2070ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 2077ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2071{ 2078{
2072 struct ixgb_adapter *adapter = netdev->priv; 2079 struct ixgb_adapter *adapter = netdev_priv(netdev);
2073 uint32_t vfta, index; 2080 uint32_t vfta, index;
2074 2081
2075 ixgb_irq_disable(adapter); 2082 ixgb_irq_disable(adapter);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 7fec613e1675..8423cb6875f0 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -1,5 +1,10 @@
1/* 1/*
2 * sonic.c 2 * jazzsonic.c
3 *
4 * (C) 2005 Finn Thain
5 *
6 * Converted to DMA API, and (from the mac68k project) introduced
7 * dhd's support for 16-bit cards.
3 * 8 *
4 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) 9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
5 * 10 *
@@ -28,8 +33,8 @@
28#include <linux/netdevice.h> 33#include <linux/netdevice.h>
29#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
30#include <linux/skbuff.h> 35#include <linux/skbuff.h>
31#include <linux/bitops.h>
32#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/dma-mapping.h>
33 38
34#include <asm/bootinfo.h> 39#include <asm/bootinfo.h>
35#include <asm/system.h> 40#include <asm/system.h>
@@ -44,22 +49,20 @@ static struct platform_device *jazz_sonic_device;
44 49
45#define SONIC_MEM_SIZE 0x100 50#define SONIC_MEM_SIZE 0x100
46 51
47#define SREGS_PAD(n) u16 n;
48
49#include "sonic.h" 52#include "sonic.h"
50 53
51/* 54/*
52 * Macros to access SONIC registers 55 * Macros to access SONIC registers
53 */ 56 */
54#define SONIC_READ(reg) (*((volatile unsigned int *)base_addr+reg)) 57#define SONIC_READ(reg) (*((volatile unsigned int *)dev->base_addr+reg))
55 58
56#define SONIC_WRITE(reg,val) \ 59#define SONIC_WRITE(reg,val) \
57do { \ 60do { \
58 *((volatile unsigned int *)base_addr+(reg)) = (val); \ 61 *((volatile unsigned int *)dev->base_addr+(reg)) = (val); \
59} while (0) 62} while (0)
60 63
61 64
62/* use 0 for production, 1 for verification, >2 for debug */ 65/* use 0 for production, 1 for verification, >1 for debug */
63#ifdef SONIC_DEBUG 66#ifdef SONIC_DEBUG
64static unsigned int sonic_debug = SONIC_DEBUG; 67static unsigned int sonic_debug = SONIC_DEBUG;
65#else 68#else
@@ -85,18 +88,18 @@ static unsigned short known_revisions[] =
85 0xffff /* end of list */ 88 0xffff /* end of list */
86}; 89};
87 90
88static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr, 91static int __init sonic_probe1(struct net_device *dev)
89 unsigned int irq)
90{ 92{
91 static unsigned version_printed; 93 static unsigned version_printed;
92 unsigned int silicon_revision; 94 unsigned int silicon_revision;
93 unsigned int val; 95 unsigned int val;
94 struct sonic_local *lp; 96 struct sonic_local *lp = netdev_priv(dev);
95 int err = -ENODEV; 97 int err = -ENODEV;
96 int i; 98 int i;
97 99
98 if (!request_mem_region(base_addr, SONIC_MEM_SIZE, jazz_sonic_string)) 100 if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
99 return -EBUSY; 101 return -EBUSY;
102
100 /* 103 /*
101 * get the Silicon Revision ID. If this is one of the known 104 * get the Silicon Revision ID. If this is one of the known
102 * one assume that we found a SONIC ethernet controller at 105 * one assume that we found a SONIC ethernet controller at
@@ -120,11 +123,7 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
120 if (sonic_debug && version_printed++ == 0) 123 if (sonic_debug && version_printed++ == 0)
121 printk(version); 124 printk(version);
122 125
123 printk("%s: Sonic ethernet found at 0x%08lx, ", dev->name, base_addr); 126 printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", lp->device->bus_id, dev->base_addr);
124
125 /* Fill in the 'dev' fields. */
126 dev->base_addr = base_addr;
127 dev->irq = irq;
128 127
129 /* 128 /*
130 * Put the sonic into software reset, then 129 * Put the sonic into software reset, then
@@ -138,84 +137,44 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
138 dev->dev_addr[i*2+1] = val >> 8; 137 dev->dev_addr[i*2+1] = val >> 8;
139 } 138 }
140 139
141 printk("HW Address ");
142 for (i = 0; i < 6; i++) {
143 printk("%2.2x", dev->dev_addr[i]);
144 if (i<5)
145 printk(":");
146 }
147
148 printk(" IRQ %d\n", irq);
149
150 err = -ENOMEM; 140 err = -ENOMEM;
151 141
152 /* Initialize the device structure. */ 142 /* Initialize the device structure. */
153 if (dev->priv == NULL) {
154 /*
155 * the memory be located in the same 64kb segment
156 */
157 lp = NULL;
158 i = 0;
159 do {
160 lp = kmalloc(sizeof(*lp), GFP_KERNEL);
161 if ((unsigned long) lp >> 16
162 != ((unsigned long)lp + sizeof(*lp) ) >> 16) {
163 /* FIXME, free the memory later */
164 kfree(lp);
165 lp = NULL;
166 }
167 } while (lp == NULL && i++ < 20);
168
169 if (lp == NULL) {
170 printk("%s: couldn't allocate memory for descriptors\n",
171 dev->name);
172 goto out;
173 }
174 143
175 memset(lp, 0, sizeof(struct sonic_local)); 144 lp->dma_bitmode = SONIC_BITMODE32;
176
177 /* get the virtual dma address */
178 lp->cda_laddr = vdma_alloc(CPHYSADDR(lp),sizeof(*lp));
179 if (lp->cda_laddr == ~0UL) {
180 printk("%s: couldn't get DMA page entry for "
181 "descriptors\n", dev->name);
182 goto out1;
183 }
184
185 lp->tda_laddr = lp->cda_laddr + sizeof (lp->cda);
186 lp->rra_laddr = lp->tda_laddr + sizeof (lp->tda);
187 lp->rda_laddr = lp->rra_laddr + sizeof (lp->rra);
188
189 /* allocate receive buffer area */
190 /* FIXME, maybe we should use skbs */
191 lp->rba = kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL);
192 if (!lp->rba) {
193 printk("%s: couldn't allocate receive buffers\n",
194 dev->name);
195 goto out2;
196 }
197 145
198 /* get virtual dma address */ 146 /* Allocate the entire chunk of memory for the descriptors.
199 lp->rba_laddr = vdma_alloc(CPHYSADDR(lp->rba), 147 Note that this cannot cross a 64K boundary. */
200 SONIC_NUM_RRS * SONIC_RBSIZE); 148 if ((lp->descriptors = dma_alloc_coherent(lp->device,
201 if (lp->rba_laddr == ~0UL) { 149 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
202 printk("%s: couldn't get DMA page entry for receive " 150 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
203 "buffers\n",dev->name); 151 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id);
204 goto out3; 152 goto out;
205 }
206
207 /* now convert pointer to KSEG1 pointer */
208 lp->rba = (char *)KSEG1ADDR(lp->rba);
209 flush_cache_all();
210 dev->priv = (struct sonic_local *)KSEG1ADDR(lp);
211 } 153 }
212 154
213 lp = (struct sonic_local *)dev->priv; 155 /* Now set up the pointers to point to the appropriate places */
156 lp->cda = lp->descriptors;
157 lp->tda = lp->cda + (SIZEOF_SONIC_CDA
158 * SONIC_BUS_SCALE(lp->dma_bitmode));
159 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
160 * SONIC_BUS_SCALE(lp->dma_bitmode));
161 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
162 * SONIC_BUS_SCALE(lp->dma_bitmode));
163
164 lp->cda_laddr = lp->descriptors_laddr;
165 lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
166 * SONIC_BUS_SCALE(lp->dma_bitmode));
167 lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
168 * SONIC_BUS_SCALE(lp->dma_bitmode));
169 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
170 * SONIC_BUS_SCALE(lp->dma_bitmode));
171
214 dev->open = sonic_open; 172 dev->open = sonic_open;
215 dev->stop = sonic_close; 173 dev->stop = sonic_close;
216 dev->hard_start_xmit = sonic_send_packet; 174 dev->hard_start_xmit = sonic_send_packet;
217 dev->get_stats = sonic_get_stats; 175 dev->get_stats = sonic_get_stats;
218 dev->set_multicast_list = &sonic_multicast_list; 176 dev->set_multicast_list = &sonic_multicast_list;
177 dev->tx_timeout = sonic_tx_timeout;
219 dev->watchdog_timeo = TX_TIMEOUT; 178 dev->watchdog_timeo = TX_TIMEOUT;
220 179
221 /* 180 /*
@@ -226,14 +185,8 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
226 SONIC_WRITE(SONIC_MPT,0xffff); 185 SONIC_WRITE(SONIC_MPT,0xffff);
227 186
228 return 0; 187 return 0;
229out3:
230 kfree(lp->rba);
231out2:
232 vdma_free(lp->cda_laddr);
233out1:
234 kfree(lp);
235out: 188out:
236 release_region(base_addr, SONIC_MEM_SIZE); 189 release_region(dev->base_addr, SONIC_MEM_SIZE);
237 return err; 190 return err;
238} 191}
239 192
@@ -245,7 +198,6 @@ static int __init jazz_sonic_probe(struct device *device)
245{ 198{
246 struct net_device *dev; 199 struct net_device *dev;
247 struct sonic_local *lp; 200 struct sonic_local *lp;
248 unsigned long base_addr;
249 int err = 0; 201 int err = 0;
250 int i; 202 int i;
251 203
@@ -255,21 +207,26 @@ static int __init jazz_sonic_probe(struct device *device)
255 if (mips_machgroup != MACH_GROUP_JAZZ) 207 if (mips_machgroup != MACH_GROUP_JAZZ)
256 return -ENODEV; 208 return -ENODEV;
257 209
258 dev = alloc_etherdev(0); 210 dev = alloc_etherdev(sizeof(struct sonic_local));
259 if (!dev) 211 if (!dev)
260 return -ENOMEM; 212 return -ENOMEM;
261 213
214 lp = netdev_priv(dev);
215 lp->device = device;
216 SET_NETDEV_DEV(dev, device);
217 SET_MODULE_OWNER(dev);
218
262 netdev_boot_setup_check(dev); 219 netdev_boot_setup_check(dev);
263 base_addr = dev->base_addr;
264 220
265 if (base_addr >= KSEG0) { /* Check a single specified location. */ 221 if (dev->base_addr >= KSEG0) { /* Check a single specified location. */
266 err = sonic_probe1(dev, base_addr, dev->irq); 222 err = sonic_probe1(dev);
267 } else if (base_addr != 0) { /* Don't probe at all. */ 223 } else if (dev->base_addr != 0) { /* Don't probe at all. */
268 err = -ENXIO; 224 err = -ENXIO;
269 } else { 225 } else {
270 for (i = 0; sonic_portlist[i].port; i++) { 226 for (i = 0; sonic_portlist[i].port; i++) {
271 int io = sonic_portlist[i].port; 227 dev->base_addr = sonic_portlist[i].port;
272 if (sonic_probe1(dev, io, sonic_portlist[i].irq) == 0) 228 dev->irq = sonic_portlist[i].irq;
229 if (sonic_probe1(dev) == 0)
273 break; 230 break;
274 } 231 }
275 if (!sonic_portlist[i].port) 232 if (!sonic_portlist[i].port)
@@ -281,14 +238,17 @@ static int __init jazz_sonic_probe(struct device *device)
281 if (err) 238 if (err)
282 goto out1; 239 goto out1;
283 240
241 printk("%s: MAC ", dev->name);
242 for (i = 0; i < 6; i++) {
243 printk("%2.2x", dev->dev_addr[i]);
244 if (i < 5)
245 printk(":");
246 }
247 printk(" IRQ %d\n", dev->irq);
248
284 return 0; 249 return 0;
285 250
286out1: 251out1:
287 lp = dev->priv;
288 vdma_free(lp->rba_laddr);
289 kfree(lp->rba);
290 vdma_free(lp->cda_laddr);
291 kfree(lp);
292 release_region(dev->base_addr, SONIC_MEM_SIZE); 252 release_region(dev->base_addr, SONIC_MEM_SIZE);
293out: 253out:
294 free_netdev(dev); 254 free_netdev(dev);
@@ -296,21 +256,22 @@ out:
296 return err; 256 return err;
297} 257}
298 258
299/* 259MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
300 * SONIC uses a normal IRQ 260module_param(sonic_debug, int, 0);
301 */ 261MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
302#define sonic_request_irq request_irq
303#define sonic_free_irq free_irq
304 262
305#define sonic_chiptomem(x) KSEG1ADDR(vdma_log2phys(x)) 263#define SONIC_IRQ_FLAG SA_INTERRUPT
306 264
307#include "sonic.c" 265#include "sonic.c"
308 266
309static int __devexit jazz_sonic_device_remove (struct device *device) 267static int __devexit jazz_sonic_device_remove (struct device *device)
310{ 268{
311 struct net_device *dev = device->driver_data; 269 struct net_device *dev = device->driver_data;
270 struct sonic_local* lp = netdev_priv(dev);
312 271
313 unregister_netdev (dev); 272 unregister_netdev (dev);
273 dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
274 lp->descriptors, lp->descriptors_laddr);
314 release_region (dev->base_addr, SONIC_MEM_SIZE); 275 release_region (dev->base_addr, SONIC_MEM_SIZE);
315 free_netdev (dev); 276 free_netdev (dev);
316 277
@@ -323,7 +284,7 @@ static struct device_driver jazz_sonic_driver = {
323 .probe = jazz_sonic_probe, 284 .probe = jazz_sonic_probe,
324 .remove = __devexit_p(jazz_sonic_device_remove), 285 .remove = __devexit_p(jazz_sonic_device_remove),
325}; 286};
326 287
327static void jazz_sonic_platform_release (struct device *device) 288static void jazz_sonic_platform_release (struct device *device)
328{ 289{
329 struct platform_device *pldev; 290 struct platform_device *pldev;
@@ -336,10 +297,11 @@ static void jazz_sonic_platform_release (struct device *device)
336static int __init jazz_sonic_init_module(void) 297static int __init jazz_sonic_init_module(void)
337{ 298{
338 struct platform_device *pldev; 299 struct platform_device *pldev;
300 int err;
339 301
340 if (driver_register(&jazz_sonic_driver)) { 302 if ((err = driver_register(&jazz_sonic_driver))) {
341 printk(KERN_ERR "Driver registration failed\n"); 303 printk(KERN_ERR "Driver registration failed\n");
342 return -ENOMEM; 304 return err;
343 } 305 }
344 306
345 jazz_sonic_device = NULL; 307 jazz_sonic_device = NULL;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1f61f0cc95d8..690a1aae0b34 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,6 +68,7 @@ static DEFINE_PER_CPU(struct net_device_stats, loopback_stats);
68 * of largesending device modulo TCP checksum, which is ignored for loopback. 68 * of largesending device modulo TCP checksum, which is ignored for loopback.
69 */ 69 */
70 70
71#ifdef LOOPBACK_TSO
71static void emulate_large_send_offload(struct sk_buff *skb) 72static void emulate_large_send_offload(struct sk_buff *skb)
72{ 73{
73 struct iphdr *iph = skb->nh.iph; 74 struct iphdr *iph = skb->nh.iph;
@@ -119,6 +120,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
119 120
120 dev_kfree_skb(skb); 121 dev_kfree_skb(skb);
121} 122}
123#endif /* LOOPBACK_TSO */
122 124
123/* 125/*
124 * The higher levels take care of making this non-reentrant (it's 126 * The higher levels take care of making this non-reentrant (it's
@@ -130,12 +132,13 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
130 132
131 skb_orphan(skb); 133 skb_orphan(skb);
132 134
133 skb->protocol=eth_type_trans(skb,dev); 135 skb->protocol = eth_type_trans(skb,dev);
134 skb->dev=dev; 136 skb->dev = dev;
135#ifndef LOOPBACK_MUST_CHECKSUM 137#ifndef LOOPBACK_MUST_CHECKSUM
136 skb->ip_summed = CHECKSUM_UNNECESSARY; 138 skb->ip_summed = CHECKSUM_UNNECESSARY;
137#endif 139#endif
138 140
141#ifdef LOOPBACK_TSO
139 if (skb_shinfo(skb)->tso_size) { 142 if (skb_shinfo(skb)->tso_size) {
140 BUG_ON(skb->protocol != htons(ETH_P_IP)); 143 BUG_ON(skb->protocol != htons(ETH_P_IP));
141 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 144 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
@@ -143,14 +146,14 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
143 emulate_large_send_offload(skb); 146 emulate_large_send_offload(skb);
144 return 0; 147 return 0;
145 } 148 }
146 149#endif
147 dev->last_rx = jiffies; 150 dev->last_rx = jiffies;
148 151
149 lb_stats = &per_cpu(loopback_stats, get_cpu()); 152 lb_stats = &per_cpu(loopback_stats, get_cpu());
150 lb_stats->rx_bytes += skb->len; 153 lb_stats->rx_bytes += skb->len;
151 lb_stats->tx_bytes += skb->len; 154 lb_stats->tx_bytes = lb_stats->rx_bytes;
152 lb_stats->rx_packets++; 155 lb_stats->rx_packets++;
153 lb_stats->tx_packets++; 156 lb_stats->tx_packets = lb_stats->rx_packets;
154 put_cpu(); 157 put_cpu();
155 158
156 netif_rx(skb); 159 netif_rx(skb);
@@ -208,9 +211,12 @@ struct net_device loopback_dev = {
208 .type = ARPHRD_LOOPBACK, /* 0x0001*/ 211 .type = ARPHRD_LOOPBACK, /* 0x0001*/
209 .rebuild_header = eth_rebuild_header, 212 .rebuild_header = eth_rebuild_header,
210 .flags = IFF_LOOPBACK, 213 .flags = IFF_LOOPBACK,
211 .features = NETIF_F_SG|NETIF_F_FRAGLIST 214 .features = NETIF_F_SG | NETIF_F_FRAGLIST
212 |NETIF_F_NO_CSUM|NETIF_F_HIGHDMA 215#ifdef LOOPBACK_TSO
213 |NETIF_F_LLTX, 216 | NETIF_F_TSO
217#endif
218 | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA
219 | NETIF_F_LLTX,
214 .ethtool_ops = &loopback_ethtool_ops, 220 .ethtool_ops = &loopback_ethtool_ops,
215}; 221};
216 222
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index be28c65de729..405e18365ede 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -1,6 +1,12 @@
1/* 1/*
2 * macsonic.c 2 * macsonic.c
3 * 3 *
4 * (C) 2005 Finn Thain
5 *
6 * Converted to DMA API, converted to unified driver model, made it work as
7 * a module again, and from the mac68k project, introduced more 32-bit cards
8 * and dhd's support for 16-bit cards.
9 *
4 * (C) 1998 Alan Cox 10 * (C) 1998 Alan Cox
5 * 11 *
6 * Debugging Andreas Ehliar, Michael Schmitz 12 * Debugging Andreas Ehliar, Michael Schmitz
@@ -26,8 +32,8 @@
26 */ 32 */
27 33
28#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/module.h>
29#include <linux/types.h> 36#include <linux/types.h>
30#include <linux/ctype.h>
31#include <linux/fcntl.h> 37#include <linux/fcntl.h>
32#include <linux/interrupt.h> 38#include <linux/interrupt.h>
33#include <linux/init.h> 39#include <linux/init.h>
@@ -41,8 +47,8 @@
41#include <linux/netdevice.h> 47#include <linux/netdevice.h>
42#include <linux/etherdevice.h> 48#include <linux/etherdevice.h>
43#include <linux/skbuff.h> 49#include <linux/skbuff.h>
44#include <linux/module.h> 50#include <linux/device.h>
45#include <linux/bitops.h> 51#include <linux/dma-mapping.h>
46 52
47#include <asm/bootinfo.h> 53#include <asm/bootinfo.h>
48#include <asm/system.h> 54#include <asm/system.h>
@@ -54,25 +60,28 @@
54#include <asm/macints.h> 60#include <asm/macints.h>
55#include <asm/mac_via.h> 61#include <asm/mac_via.h>
56 62
57#define SREGS_PAD(n) u16 n; 63static char mac_sonic_string[] = "macsonic";
64static struct platform_device *mac_sonic_device;
58 65
59#include "sonic.h" 66#include "sonic.h"
60 67
61#define SONIC_READ(reg) \ 68/* These should basically be bus-size and endian independent (since
62 nubus_readl(base_addr+(reg)) 69 the SONIC is at least smart enough that it uses the same endianness
63#define SONIC_WRITE(reg,val) \ 70 as the host, unlike certain less enlightened Macintosh NICs) */
64 nubus_writel((val), base_addr+(reg)) 71#define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \
65#define sonic_read(dev, reg) \ 72 + lp->reg_offset))
66 nubus_readl((dev)->base_addr+(reg)) 73#define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \
67#define sonic_write(dev, reg, val) \ 74 + lp->reg_offset))
68 nubus_writel((val), (dev)->base_addr+(reg)) 75
69 76/* use 0 for production, 1 for verification, >1 for debug */
77#ifdef SONIC_DEBUG
78static unsigned int sonic_debug = SONIC_DEBUG;
79#else
80static unsigned int sonic_debug = 1;
81#endif
70 82
71static int sonic_debug;
72static int sonic_version_printed; 83static int sonic_version_printed;
73 84
74static int reg_offset;
75
76extern int mac_onboard_sonic_probe(struct net_device* dev); 85extern int mac_onboard_sonic_probe(struct net_device* dev);
77extern int mac_nubus_sonic_probe(struct net_device* dev); 86extern int mac_nubus_sonic_probe(struct net_device* dev);
78 87
@@ -108,40 +117,6 @@ enum macsonic_type {
108 117
109#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr) 118#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr)
110 119
111struct net_device * __init macsonic_probe(int unit)
112{
113 struct net_device *dev = alloc_etherdev(0);
114 int err;
115
116 if (!dev)
117 return ERR_PTR(-ENOMEM);
118
119 if (unit >= 0)
120 sprintf(dev->name, "eth%d", unit);
121
122 SET_MODULE_OWNER(dev);
123
124 /* This will catch fatal stuff like -ENOMEM as well as success */
125 err = mac_onboard_sonic_probe(dev);
126 if (err == 0)
127 goto found;
128 if (err != -ENODEV)
129 goto out;
130 err = mac_nubus_sonic_probe(dev);
131 if (err)
132 goto out;
133found:
134 err = register_netdev(dev);
135 if (err)
136 goto out1;
137 return dev;
138out1:
139 kfree(dev->priv);
140out:
141 free_netdev(dev);
142 return ERR_PTR(err);
143}
144
145/* 120/*
146 * For reversing the PROM address 121 * For reversing the PROM address
147 */ 122 */
@@ -160,103 +135,55 @@ static inline void bit_reverse_addr(unsigned char addr[6])
160 135
161int __init macsonic_init(struct net_device* dev) 136int __init macsonic_init(struct net_device* dev)
162{ 137{
163 struct sonic_local* lp = NULL; 138 struct sonic_local* lp = netdev_priv(dev);
164 int i;
165 139
166 /* Allocate the entire chunk of memory for the descriptors. 140 /* Allocate the entire chunk of memory for the descriptors.
167 Note that this cannot cross a 64K boundary. */ 141 Note that this cannot cross a 64K boundary. */
168 for (i = 0; i < 20; i++) { 142 if ((lp->descriptors = dma_alloc_coherent(lp->device,
169 unsigned long desc_base, desc_top; 143 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
170 if((lp = kmalloc(sizeof(struct sonic_local), GFP_KERNEL | GFP_DMA)) == NULL) { 144 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
171 printk(KERN_ERR "%s: couldn't allocate descriptor buffers\n", dev->name); 145 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id);
172 return -ENOMEM;
173 }
174
175 desc_base = (unsigned long) lp;
176 desc_top = desc_base + sizeof(struct sonic_local);
177 if ((desc_top & 0xffff) >= (desc_base & 0xffff))
178 break;
179 /* Hmm. try again (FIXME: does this actually work?) */
180 kfree(lp);
181 printk(KERN_DEBUG
182 "%s: didn't get continguous chunk [%08lx - %08lx], trying again\n",
183 dev->name, desc_base, desc_top);
184 }
185
186 if (lp == NULL) {
187 printk(KERN_ERR "%s: tried 20 times to allocate descriptor buffers, giving up.\n",
188 dev->name);
189 return -ENOMEM; 146 return -ENOMEM;
190 } 147 }
191
192 dev->priv = lp;
193
194#if 0
195 /* this code is only here as a curiousity... mainly, where the
196 fuck did SONIC_BUS_SCALE come from, and what was it supposed
197 to do? the normal allocation works great for 32 bit stuffs.. */
198 148
199 /* Now set up the pointers to point to the appropriate places */ 149 /* Now set up the pointers to point to the appropriate places */
200 lp->cda = lp->sonic_desc; 150 lp->cda = lp->descriptors;
201 lp->tda = lp->cda + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); 151 lp->tda = lp->cda + (SIZEOF_SONIC_CDA
152 * SONIC_BUS_SCALE(lp->dma_bitmode));
202 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS 153 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
203 * SONIC_BUS_SCALE(lp->dma_bitmode)); 154 * SONIC_BUS_SCALE(lp->dma_bitmode));
204 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS 155 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
205 * SONIC_BUS_SCALE(lp->dma_bitmode)); 156 * SONIC_BUS_SCALE(lp->dma_bitmode));
206 157
207#endif 158 lp->cda_laddr = lp->descriptors_laddr;
208 159 lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
209 memset(lp, 0, sizeof(struct sonic_local)); 160 * SONIC_BUS_SCALE(lp->dma_bitmode));
210 161 lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
211 lp->cda_laddr = (unsigned int)&(lp->cda); 162 * SONIC_BUS_SCALE(lp->dma_bitmode));
212 lp->tda_laddr = (unsigned int)lp->tda; 163 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
213 lp->rra_laddr = (unsigned int)lp->rra; 164 * SONIC_BUS_SCALE(lp->dma_bitmode));
214 lp->rda_laddr = (unsigned int)lp->rda;
215
216 /* FIXME, maybe we should use skbs */
217 if ((lp->rba = (char *)
218 kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL | GFP_DMA)) == NULL) {
219 printk(KERN_ERR "%s: couldn't allocate receive buffers\n", dev->name);
220 dev->priv = NULL;
221 kfree(lp);
222 return -ENOMEM;
223 }
224
225 lp->rba_laddr = (unsigned int)lp->rba;
226
227 {
228 int rs, ds;
229
230 /* almost always 12*4096, but let's not take chances */
231 rs = ((SONIC_NUM_RRS * SONIC_RBSIZE + 4095) / 4096) * 4096;
232 /* almost always under a page, but let's not take chances */
233 ds = ((sizeof(struct sonic_local) + 4095) / 4096) * 4096;
234 kernel_set_cachemode(lp->rba, rs, IOMAP_NOCACHE_SER);
235 kernel_set_cachemode(lp, ds, IOMAP_NOCACHE_SER);
236 }
237
238#if 0
239 flush_cache_all();
240#endif
241 165
242 dev->open = sonic_open; 166 dev->open = sonic_open;
243 dev->stop = sonic_close; 167 dev->stop = sonic_close;
244 dev->hard_start_xmit = sonic_send_packet; 168 dev->hard_start_xmit = sonic_send_packet;
245 dev->get_stats = sonic_get_stats; 169 dev->get_stats = sonic_get_stats;
246 dev->set_multicast_list = &sonic_multicast_list; 170 dev->set_multicast_list = &sonic_multicast_list;
171 dev->tx_timeout = sonic_tx_timeout;
172 dev->watchdog_timeo = TX_TIMEOUT;
247 173
248 /* 174 /*
249 * clear tally counter 175 * clear tally counter
250 */ 176 */
251 sonic_write(dev, SONIC_CRCT, 0xffff); 177 SONIC_WRITE(SONIC_CRCT, 0xffff);
252 sonic_write(dev, SONIC_FAET, 0xffff); 178 SONIC_WRITE(SONIC_FAET, 0xffff);
253 sonic_write(dev, SONIC_MPT, 0xffff); 179 SONIC_WRITE(SONIC_MPT, 0xffff);
254 180
255 return 0; 181 return 0;
256} 182}
257 183
258int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) 184int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
259{ 185{
186 struct sonic_local *lp = netdev_priv(dev);
260 const int prom_addr = ONBOARD_SONIC_PROM_BASE; 187 const int prom_addr = ONBOARD_SONIC_PROM_BASE;
261 int i; 188 int i;
262 189
@@ -270,6 +197,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
270 why this is so. */ 197 why this is so. */
271 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 198 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
272 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && 199 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
200 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
273 memcmp(dev->dev_addr, "\x00\x05\x02", 3)) 201 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
274 bit_reverse_addr(dev->dev_addr); 202 bit_reverse_addr(dev->dev_addr);
275 else 203 else
@@ -281,22 +209,23 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
281 the card... */ 209 the card... */
282 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 210 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
283 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && 211 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
212 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
284 memcmp(dev->dev_addr, "\x00\x05\x02", 3)) 213 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
285 { 214 {
286 unsigned short val; 215 unsigned short val;
287 216
288 printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n"); 217 printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n");
289 218
290 sonic_write(dev, SONIC_CMD, SONIC_CR_RST); 219 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
291 sonic_write(dev, SONIC_CEP, 15); 220 SONIC_WRITE(SONIC_CEP, 15);
292 221
293 val = sonic_read(dev, SONIC_CAP2); 222 val = SONIC_READ(SONIC_CAP2);
294 dev->dev_addr[5] = val >> 8; 223 dev->dev_addr[5] = val >> 8;
295 dev->dev_addr[4] = val & 0xff; 224 dev->dev_addr[4] = val & 0xff;
296 val = sonic_read(dev, SONIC_CAP1); 225 val = SONIC_READ(SONIC_CAP1);
297 dev->dev_addr[3] = val >> 8; 226 dev->dev_addr[3] = val >> 8;
298 dev->dev_addr[2] = val & 0xff; 227 dev->dev_addr[2] = val & 0xff;
299 val = sonic_read(dev, SONIC_CAP0); 228 val = SONIC_READ(SONIC_CAP0);
300 dev->dev_addr[1] = val >> 8; 229 dev->dev_addr[1] = val >> 8;
301 dev->dev_addr[0] = val & 0xff; 230 dev->dev_addr[0] = val & 0xff;
302 231
@@ -311,6 +240,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
311 240
312 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 241 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
313 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && 242 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
243 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
314 memcmp(dev->dev_addr, "\x00\x05\x02", 3)) 244 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
315 { 245 {
316 /* 246 /*
@@ -325,8 +255,9 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
325{ 255{
326 /* Bwahahaha */ 256 /* Bwahahaha */
327 static int once_is_more_than_enough; 257 static int once_is_more_than_enough;
328 int i; 258 struct sonic_local* lp = netdev_priv(dev);
329 int dma_bitmode; 259 int sr;
260 int commslot = 0;
330 261
331 if (once_is_more_than_enough) 262 if (once_is_more_than_enough)
332 return -ENODEV; 263 return -ENODEV;
@@ -335,20 +266,18 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
335 if (!MACH_IS_MAC) 266 if (!MACH_IS_MAC)
336 return -ENODEV; 267 return -ENODEV;
337 268
338 printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
339
340 if (macintosh_config->ether_type != MAC_ETHER_SONIC) 269 if (macintosh_config->ether_type != MAC_ETHER_SONIC)
341 {
342 printk("none.\n");
343 return -ENODEV; 270 return -ENODEV;
344 } 271
345 272 printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
273
346 /* Bogus probing, on the models which may or may not have 274 /* Bogus probing, on the models which may or may not have
347 Ethernet (BTW, the Ethernet *is* always at the same 275 Ethernet (BTW, the Ethernet *is* always at the same
348 address, and nothing else lives there, at least if Apple's 276 address, and nothing else lives there, at least if Apple's
349 documentation is to be believed) */ 277 documentation is to be believed) */
350 if (macintosh_config->ident == MAC_MODEL_Q630 || 278 if (macintosh_config->ident == MAC_MODEL_Q630 ||
351 macintosh_config->ident == MAC_MODEL_P588 || 279 macintosh_config->ident == MAC_MODEL_P588 ||
280 macintosh_config->ident == MAC_MODEL_P575 ||
352 macintosh_config->ident == MAC_MODEL_C610) { 281 macintosh_config->ident == MAC_MODEL_C610) {
353 unsigned long flags; 282 unsigned long flags;
354 int card_present; 283 int card_present;
@@ -361,13 +290,13 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
361 printk("none.\n"); 290 printk("none.\n");
362 return -ENODEV; 291 return -ENODEV;
363 } 292 }
293 commslot = 1;
364 } 294 }
365 295
366 printk("yes\n"); 296 printk("yes\n");
367 297
368 /* Danger! My arms are flailing wildly! You *must* set this 298 /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
369 before using sonic_read() */ 299 * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
370
371 dev->base_addr = ONBOARD_SONIC_REGISTERS; 300 dev->base_addr = ONBOARD_SONIC_REGISTERS;
372 if (via_alt_mapping) 301 if (via_alt_mapping)
373 dev->irq = IRQ_AUTO_3; 302 dev->irq = IRQ_AUTO_3;
@@ -379,84 +308,66 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
379 sonic_version_printed = 1; 308 sonic_version_printed = 1;
380 } 309 }
381 printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", 310 printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
382 dev->name, dev->base_addr); 311 lp->device->bus_id, dev->base_addr);
383
384 /* Now do a song and dance routine in an attempt to determine
385 the bus width */
386 312
387 /* The PowerBook's SONIC is 16 bit always. */ 313 /* The PowerBook's SONIC is 16 bit always. */
388 if (macintosh_config->ident == MAC_MODEL_PB520) { 314 if (macintosh_config->ident == MAC_MODEL_PB520) {
389 reg_offset = 0; 315 lp->reg_offset = 0;
390 dma_bitmode = 0; 316 lp->dma_bitmode = SONIC_BITMODE16;
391 } else if (macintosh_config->ident == MAC_MODEL_C610) { 317 sr = SONIC_READ(SONIC_SR);
392 reg_offset = 0; 318 } else if (commslot) {
393 dma_bitmode = 1;
394 } else {
395 /* Some of the comm-slot cards are 16 bit. But some 319 /* Some of the comm-slot cards are 16 bit. But some
396 of them are not. The 32-bit cards use offset 2 and 320 of them are not. The 32-bit cards use offset 2 and
397 pad with zeroes or sometimes ones (I think...) 321 have known revisions, we try reading the revision
398 Therefore, if we try offset 0 and get a silicon 322 register at offset 2, if we don't get a known revision
399 revision of 0, we assume 16 bit. */ 323 we assume 16 bit at offset 0. */
400 int sr; 324 lp->reg_offset = 2;
401 325 lp->dma_bitmode = SONIC_BITMODE16;
402 /* Technically this is not necessary since we zeroed 326
403 it above */ 327 sr = SONIC_READ(SONIC_SR);
404 reg_offset = 0; 328 if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101)
405 dma_bitmode = 0; 329 /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */
406 sr = sonic_read(dev, SONIC_SR); 330 lp->dma_bitmode = SONIC_BITMODE32;
407 if (sr == 0 || sr == 0xffff) { 331 else {
408 reg_offset = 2; 332 lp->dma_bitmode = SONIC_BITMODE16;
409 /* 83932 is 0x0004, 83934 is 0x0100 or 0x0101 */ 333 lp->reg_offset = 0;
410 sr = sonic_read(dev, SONIC_SR); 334 sr = SONIC_READ(SONIC_SR);
411 dma_bitmode = 1;
412
413 } 335 }
414 printk(KERN_INFO 336 } else {
415 "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", 337 /* All onboard cards are at offset 2 with 32 bit DMA. */
416 dev->name, sr, dma_bitmode?32:16, reg_offset); 338 lp->reg_offset = 2;
339 lp->dma_bitmode = SONIC_BITMODE32;
340 sr = SONIC_READ(SONIC_SR);
417 } 341 }
418 342 printk(KERN_INFO
343 "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
344 lp->device->bus_id, sr, lp->dma_bitmode?32:16, lp->reg_offset);
419 345
420 /* this carries my sincere apologies -- by the time I got to updating 346#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
421 the driver, support for "reg_offsets" appeares nowhere in the sonic 347 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id,
422 code, going back for over a year. Fortunately, my Mac does't seem 348 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
423 to use whatever this was. 349#endif
424 350
425 If you know how this is supposed to be implemented, either fix it,
426 or contact me (sammy@oh.verio.com) to explain what it is. --Sam */
427
428 if(reg_offset) {
429 printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name);
430 return -ENODEV;
431 }
432
433 /* Software reset, then initialize control registers. */ 351 /* Software reset, then initialize control registers. */
434 sonic_write(dev, SONIC_CMD, SONIC_CR_RST); 352 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
435 sonic_write(dev, SONIC_DCR, SONIC_DCR_BMS | 353
436 SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_EXBUS | 354 SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS |
437 (dma_bitmode ? SONIC_DCR_DW : 0)); 355 SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
356 (lp->dma_bitmode ? SONIC_DCR_DW : 0));
438 357
439 /* This *must* be written back to in order to restore the 358 /* This *must* be written back to in order to restore the
440 extended programmable output bits */ 359 * extended programmable output bits, as it may not have been
441 sonic_write(dev, SONIC_DCR2, 0); 360 * initialised since the hardware reset. */
361 SONIC_WRITE(SONIC_DCR2, 0);
442 362
443 /* Clear *and* disable interrupts to be on the safe side */ 363 /* Clear *and* disable interrupts to be on the safe side */
444 sonic_write(dev, SONIC_ISR,0x7fff); 364 SONIC_WRITE(SONIC_IMR, 0);
445 sonic_write(dev, SONIC_IMR,0); 365 SONIC_WRITE(SONIC_ISR, 0x7fff);
446 366
447 /* Now look for the MAC address. */ 367 /* Now look for the MAC address. */
448 if (mac_onboard_sonic_ethernet_addr(dev) != 0) 368 if (mac_onboard_sonic_ethernet_addr(dev) != 0)
449 return -ENODEV; 369 return -ENODEV;
450 370
451 printk(KERN_INFO "MAC ");
452 for (i = 0; i < 6; i++) {
453 printk("%2.2x", dev->dev_addr[i]);
454 if (i < 5)
455 printk(":");
456 }
457
458 printk(" IRQ %d\n", dev->irq);
459
460 /* Shared init code */ 371 /* Shared init code */
461 return macsonic_init(dev); 372 return macsonic_init(dev);
462} 373}
@@ -468,8 +379,10 @@ int __init mac_nubus_sonic_ethernet_addr(struct net_device* dev,
468 int i; 379 int i;
469 for(i = 0; i < 6; i++) 380 for(i = 0; i < 6; i++)
470 dev->dev_addr[i] = SONIC_READ_PROM(i); 381 dev->dev_addr[i] = SONIC_READ_PROM(i);
471 /* For now we are going to assume that they're all bit-reversed */ 382
472 bit_reverse_addr(dev->dev_addr); 383 /* Some of the addresses are bit-reversed */
384 if (id != MACSONIC_DAYNA)
385 bit_reverse_addr(dev->dev_addr);
473 386
474 return 0; 387 return 0;
475} 388}
@@ -487,6 +400,15 @@ int __init macsonic_ident(struct nubus_dev* ndev)
487 else 400 else
488 return MACSONIC_APPLE; 401 return MACSONIC_APPLE;
489 } 402 }
403
404 if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
405 ndev->dr_sw == NUBUS_DRSW_DAYNA)
406 return MACSONIC_DAYNA;
407
408 if (ndev->dr_hw == NUBUS_DRHW_SONIC_LC &&
409 ndev->dr_sw == 0) { /* huh? */
410 return MACSONIC_APPLE16;
411 }
490 return -1; 412 return -1;
491} 413}
492 414
@@ -494,12 +416,12 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
494{ 416{
495 static int slots; 417 static int slots;
496 struct nubus_dev* ndev = NULL; 418 struct nubus_dev* ndev = NULL;
419 struct sonic_local* lp = netdev_priv(dev);
497 unsigned long base_addr, prom_addr; 420 unsigned long base_addr, prom_addr;
498 u16 sonic_dcr; 421 u16 sonic_dcr;
499 int id; 422 int id = -1;
500 int i; 423 int reg_offset, dma_bitmode;
501 int dma_bitmode; 424
502
503 /* Find the first SONIC that hasn't been initialized already */ 425 /* Find the first SONIC that hasn't been initialized already */
504 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, 426 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
505 NUBUS_TYPE_ETHERNET, ndev)) != NULL) 427 NUBUS_TYPE_ETHERNET, ndev)) != NULL)
@@ -521,51 +443,52 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
521 case MACSONIC_DUODOCK: 443 case MACSONIC_DUODOCK:
522 base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS; 444 base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS;
523 prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE; 445 prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE;
524 sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 446 sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 |
525 | SONIC_DCR_TFT0; 447 SONIC_DCR_TFT0;
526 reg_offset = 2; 448 reg_offset = 2;
527 dma_bitmode = 1; 449 dma_bitmode = SONIC_BITMODE32;
528 break; 450 break;
529 case MACSONIC_APPLE: 451 case MACSONIC_APPLE:
530 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; 452 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
531 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; 453 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
532 sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; 454 sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0;
533 reg_offset = 0; 455 reg_offset = 0;
534 dma_bitmode = 1; 456 dma_bitmode = SONIC_BITMODE32;
535 break; 457 break;
536 case MACSONIC_APPLE16: 458 case MACSONIC_APPLE16:
537 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; 459 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
538 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; 460 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
539 sonic_dcr = SONIC_DCR_EXBUS 461 sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
540 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 462 SONIC_DCR_PO1 | SONIC_DCR_BMS;
541 | SONIC_DCR_PO1 | SONIC_DCR_BMS;
542 reg_offset = 0; 463 reg_offset = 0;
543 dma_bitmode = 0; 464 dma_bitmode = SONIC_BITMODE16;
544 break; 465 break;
545 case MACSONIC_DAYNALINK: 466 case MACSONIC_DAYNALINK:
546 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; 467 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
547 prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE; 468 prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE;
548 sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 469 sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
549 | SONIC_DCR_PO1 | SONIC_DCR_BMS; 470 SONIC_DCR_PO1 | SONIC_DCR_BMS;
550 reg_offset = 0; 471 reg_offset = 0;
551 dma_bitmode = 0; 472 dma_bitmode = SONIC_BITMODE16;
552 break; 473 break;
553 case MACSONIC_DAYNA: 474 case MACSONIC_DAYNA:
554 base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS; 475 base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS;
555 prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR; 476 prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR;
556 sonic_dcr = SONIC_DCR_BMS 477 sonic_dcr = SONIC_DCR_BMS |
557 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1; 478 SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
558 reg_offset = 0; 479 reg_offset = 0;
559 dma_bitmode = 0; 480 dma_bitmode = SONIC_BITMODE16;
560 break; 481 break;
561 default: 482 default:
562 printk(KERN_ERR "macsonic: WTF, id is %d\n", id); 483 printk(KERN_ERR "macsonic: WTF, id is %d\n", id);
563 return -ENODEV; 484 return -ENODEV;
564 } 485 }
565 486
566 /* Danger! My arms are flailing wildly! You *must* set this 487 /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
567 before using sonic_read() */ 488 * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
568 dev->base_addr = base_addr; 489 dev->base_addr = base_addr;
490 lp->reg_offset = reg_offset;
491 lp->dma_bitmode = dma_bitmode;
569 dev->irq = SLOT2IRQ(ndev->board->slot); 492 dev->irq = SLOT2IRQ(ndev->board->slot);
570 493
571 if (!sonic_version_printed) { 494 if (!sonic_version_printed) {
@@ -573,29 +496,66 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
573 sonic_version_printed = 1; 496 sonic_version_printed = 1;
574 } 497 }
575 printk(KERN_INFO "%s: %s in slot %X\n", 498 printk(KERN_INFO "%s: %s in slot %X\n",
576 dev->name, ndev->board->name, ndev->board->slot); 499 lp->device->bus_id, ndev->board->name, ndev->board->slot);
577 printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", 500 printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
578 dev->name, sonic_read(dev, SONIC_SR), dma_bitmode?32:16, reg_offset); 501 lp->device->bus_id, SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset);
579 502
580 if(reg_offset) { 503#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
581 printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name); 504 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id,
582 return -ENODEV; 505 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
583 } 506#endif
584 507
585 /* Software reset, then initialize control registers. */ 508 /* Software reset, then initialize control registers. */
586 sonic_write(dev, SONIC_CMD, SONIC_CR_RST); 509 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
587 sonic_write(dev, SONIC_DCR, sonic_dcr 510 SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0));
588 | (dma_bitmode ? SONIC_DCR_DW : 0)); 511 /* This *must* be written back to in order to restore the
512 * extended programmable output bits, since it may not have been
513 * initialised since the hardware reset. */
514 SONIC_WRITE(SONIC_DCR2, 0);
589 515
590 /* Clear *and* disable interrupts to be on the safe side */ 516 /* Clear *and* disable interrupts to be on the safe side */
591 sonic_write(dev, SONIC_ISR,0x7fff); 517 SONIC_WRITE(SONIC_IMR, 0);
592 sonic_write(dev, SONIC_IMR,0); 518 SONIC_WRITE(SONIC_ISR, 0x7fff);
593 519
594 /* Now look for the MAC address. */ 520 /* Now look for the MAC address. */
595 if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0) 521 if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0)
596 return -ENODEV; 522 return -ENODEV;
597 523
598 printk(KERN_INFO "MAC "); 524 /* Shared init code */
525 return macsonic_init(dev);
526}
527
528static int __init mac_sonic_probe(struct device *device)
529{
530 struct net_device *dev;
531 struct sonic_local *lp;
532 int err;
533 int i;
534
535 dev = alloc_etherdev(sizeof(struct sonic_local));
536 if (!dev)
537 return -ENOMEM;
538
539 lp = netdev_priv(dev);
540 lp->device = device;
541 SET_NETDEV_DEV(dev, device);
542 SET_MODULE_OWNER(dev);
543
544 /* This will catch fatal stuff like -ENOMEM as well as success */
545 err = mac_onboard_sonic_probe(dev);
546 if (err == 0)
547 goto found;
548 if (err != -ENODEV)
549 goto out;
550 err = mac_nubus_sonic_probe(dev);
551 if (err)
552 goto out;
553found:
554 err = register_netdev(dev);
555 if (err)
556 goto out;
557
558 printk("%s: MAC ", dev->name);
599 for (i = 0; i < 6; i++) { 559 for (i = 0; i < 6; i++) {
600 printk("%2.2x", dev->dev_addr[i]); 560 printk("%2.2x", dev->dev_addr[i]);
601 if (i < 5) 561 if (i < 5)
@@ -603,55 +563,95 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
603 } 563 }
604 printk(" IRQ %d\n", dev->irq); 564 printk(" IRQ %d\n", dev->irq);
605 565
606 /* Shared init code */ 566 return 0;
607 return macsonic_init(dev);
608}
609 567
610#ifdef MODULE 568out:
611static struct net_device *dev_macsonic; 569 free_netdev(dev);
612 570
613MODULE_PARM(sonic_debug, "i"); 571 return err;
572}
573
574MODULE_DESCRIPTION("Macintosh SONIC ethernet driver");
575module_param(sonic_debug, int, 0);
614MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)"); 576MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
615 577
616int 578#define SONIC_IRQ_FLAG IRQ_FLG_FAST
617init_module(void) 579
580#include "sonic.c"
581
582static int __devexit mac_sonic_device_remove (struct device *device)
618{ 583{
619 dev_macsonic = macsonic_probe(-1); 584 struct net_device *dev = device->driver_data;
620 if (IS_ERR(dev_macsonic)) { 585 struct sonic_local* lp = netdev_priv(dev);
621 printk(KERN_WARNING "macsonic.c: No card found\n"); 586
622 return PTR_ERR(dev_macsonic); 587 unregister_netdev (dev);
623 } 588 dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
589 lp->descriptors, lp->descriptors_laddr);
590 free_netdev (dev);
591
624 return 0; 592 return 0;
625} 593}
626 594
627void 595static struct device_driver mac_sonic_driver = {
628cleanup_module(void) 596 .name = mac_sonic_string,
597 .bus = &platform_bus_type,
598 .probe = mac_sonic_probe,
599 .remove = __devexit_p(mac_sonic_device_remove),
600};
601
602static void mac_sonic_platform_release(struct device *device)
629{ 603{
630 unregister_netdev(dev_macsonic); 604 struct platform_device *pldev;
631 kfree(dev_macsonic->priv); 605
632 free_netdev(dev_macsonic); 606 /* free device */
607 pldev = to_platform_device (device);
608 kfree (pldev);
633} 609}
634#endif /* MODULE */
635 610
611static int __init mac_sonic_init_module(void)
612{
613 struct platform_device *pldev;
614 int err;
636 615
637#define vdma_alloc(foo, bar) ((u32)foo) 616 if ((err = driver_register(&mac_sonic_driver))) {
638#define vdma_free(baz) 617 printk(KERN_ERR "Driver registration failed\n");
639#define sonic_chiptomem(bat) (bat) 618 return err;
640#define PHYSADDR(quux) (quux) 619 }
641#define CPHYSADDR(quux) (quux)
642 620
643#define sonic_request_irq request_irq 621 mac_sonic_device = NULL;
644#define sonic_free_irq free_irq
645 622
646#include "sonic.c" 623 if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
624 goto out_unregister;
625 }
647 626
648/* 627 memset(pldev, 0, sizeof (*pldev));
649 * Local variables: 628 pldev->name = mac_sonic_string;
650 * compile-command: "m68k-linux-gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c -o macsonic.o macsonic.c" 629 pldev->id = 0;
651 * version-control: t 630 pldev->dev.release = mac_sonic_platform_release;
652 * kept-new-versions: 5 631 mac_sonic_device = pldev;
653 * c-indent-level: 8 632
654 * tab-width: 8 633 if (platform_device_register (pldev)) {
655 * End: 634 kfree(pldev);
656 * 635 mac_sonic_device = NULL;
657 */ 636 }
637
638 return 0;
639
640out_unregister:
641 platform_device_unregister(pldev);
642
643 return -ENOMEM;
644}
645
646static void __exit mac_sonic_cleanup_module(void)
647{
648 driver_unregister(&mac_sonic_driver);
649
650 if (mac_sonic_device) {
651 platform_device_unregister(mac_sonic_device);
652 mac_sonic_device = NULL;
653 }
654}
655
656module_init(mac_sonic_init_module);
657module_exit(mac_sonic_cleanup_module);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 0405e1f0d3df..fb6b232069d6 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1157,16 +1157,20 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1157 if (!skb_shinfo(skb)->nr_frags) { 1157 if (!skb_shinfo(skb)->nr_frags) {
1158linear: 1158linear:
1159 if (skb->ip_summed != CHECKSUM_HW) { 1159 if (skb->ip_summed != CHECKSUM_HW) {
1160 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1160 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1161 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1161 ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC; 1162 ETH_TX_FIRST_DESC |
1163 ETH_TX_LAST_DESC |
1164 5 << ETH_TX_IHL_SHIFT;
1162 pkt_info.l4i_chk = 0; 1165 pkt_info.l4i_chk = 0;
1163 } else { 1166 } else {
1164 u32 ipheader = skb->nh.iph->ihl << 11;
1165 1167
1166 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1168 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1167 ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC | 1169 ETH_TX_FIRST_DESC |
1168 ETH_GEN_TCP_UDP_CHECKSUM | 1170 ETH_TX_LAST_DESC |
1169 ETH_GEN_IP_V_4_CHECKSUM | ipheader; 1171 ETH_GEN_TCP_UDP_CHECKSUM |
1172 ETH_GEN_IP_V_4_CHECKSUM |
1173 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1170 /* CPU already calculated pseudo header checksum. */ 1174 /* CPU already calculated pseudo header checksum. */
1171 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1175 if (skb->nh.iph->protocol == IPPROTO_UDP) {
1172 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1176 pkt_info.cmd_sts |= ETH_UDP_FRAME;
@@ -1193,7 +1197,6 @@ linear:
1193 stats->tx_bytes += pkt_info.byte_cnt; 1197 stats->tx_bytes += pkt_info.byte_cnt;
1194 } else { 1198 } else {
1195 unsigned int frag; 1199 unsigned int frag;
1196 u32 ipheader;
1197 1200
1198 /* Since hardware can't handle unaligned fragments smaller 1201 /* Since hardware can't handle unaligned fragments smaller
1199 * than 9 bytes, if we find any, we linearize the skb 1202 * than 9 bytes, if we find any, we linearize the skb
@@ -1222,12 +1225,16 @@ linear:
1222 DMA_TO_DEVICE); 1225 DMA_TO_DEVICE);
1223 pkt_info.l4i_chk = 0; 1226 pkt_info.l4i_chk = 0;
1224 pkt_info.return_info = 0; 1227 pkt_info.return_info = 0;
1225 pkt_info.cmd_sts = ETH_TX_FIRST_DESC;
1226 1228
1227 if (skb->ip_summed == CHECKSUM_HW) { 1229 if (skb->ip_summed != CHECKSUM_HW)
1228 ipheader = skb->nh.iph->ihl << 11; 1230 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1229 pkt_info.cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | 1231 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1230 ETH_GEN_IP_V_4_CHECKSUM | ipheader; 1232 5 << ETH_TX_IHL_SHIFT;
1233 else {
1234 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1235 ETH_GEN_TCP_UDP_CHECKSUM |
1236 ETH_GEN_IP_V_4_CHECKSUM |
1237 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1231 /* CPU already calculated pseudo header checksum. */ 1238 /* CPU already calculated pseudo header checksum. */
1232 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1239 if (skb->nh.iph->protocol == IPPROTO_UDP) {
1233 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1240 pkt_info.cmd_sts |= ETH_UDP_FRAME;
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 57c4f8fbfdb6..7678b59c2952 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -49,7 +49,7 @@
49/* Checksum offload for Tx works for most packets, but 49/* Checksum offload for Tx works for most packets, but
50 * fails if previous packet sent did not use hw csum 50 * fails if previous packet sent did not use hw csum
51 */ 51 */
52#undef MV643XX_CHECKSUM_OFFLOAD_TX 52#define MV643XX_CHECKSUM_OFFLOAD_TX
53#define MV643XX_NAPI 53#define MV643XX_NAPI
54#define MV643XX_TX_FAST_REFILL 54#define MV643XX_TX_FAST_REFILL
55#undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */ 55#undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */
@@ -217,6 +217,8 @@
217#define ETH_TX_ENABLE_INTERRUPT (BIT23) 217#define ETH_TX_ENABLE_INTERRUPT (BIT23)
218#define ETH_AUTO_MODE (BIT30) 218#define ETH_AUTO_MODE (BIT30)
219 219
220#define ETH_TX_IHL_SHIFT 11
221
220/* typedefs */ 222/* typedefs */
221 223
222typedef enum _eth_func_ret_status { 224typedef enum _eth_func_ret_status {
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 4a391ea0f58a..a1ac4bd1696e 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -486,9 +486,9 @@ struct netdrv_private {
486MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); 486MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
487MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver"); 487MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
488MODULE_LICENSE("GPL"); 488MODULE_LICENSE("GPL");
489MODULE_PARM (multicast_filter_limit, "i"); 489module_param(multicast_filter_limit, int, 0);
490MODULE_PARM (max_interrupt_work, "i"); 490module_param(max_interrupt_work, int, 0);
491MODULE_PARM (media, "1-" __MODULE_STRING(8) "i"); 491module_param_array(media, int, NULL, 0);
492MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses"); 492MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
493MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt"); 493MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
494MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex"); 494MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9d8197bb293a..384a736a0d2f 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -134,7 +134,7 @@ typedef struct local_info_t {
134 u_char mc_filter[8]; 134 u_char mc_filter[8];
135} local_info_t; 135} local_info_t;
136 136
137#define MC_FILTERBREAK 64 137#define MC_FILTERBREAK 8
138 138
139/*====================================================================*/ 139/*====================================================================*/
140/* 140/*
@@ -1012,7 +1012,7 @@ static void fjn_reset(struct net_device *dev)
1012 outb(BANK_1U, ioaddr + CONFIG_1); 1012 outb(BANK_1U, ioaddr + CONFIG_1);
1013 1013
1014 /* set the multicast table to accept none. */ 1014 /* set the multicast table to accept none. */
1015 for (i = 0; i < 6; i++) 1015 for (i = 0; i < 8; i++)
1016 outb(0x00, ioaddr + MAR_ADR + i); 1016 outb(0x00, ioaddr + MAR_ADR + i);
1017 1017
1018 /* Switch to bank 2 (runtime mode) */ 1018 /* Switch to bank 2 (runtime mode) */
@@ -1269,6 +1269,16 @@ static void set_rx_mode(struct net_device *dev)
1269 u_long flags; 1269 u_long flags;
1270 int i; 1270 int i;
1271 1271
1272 int saved_config_0 = inb(ioaddr + CONFIG_0);
1273
1274 local_irq_save(flags);
1275
1276 /* Disable Tx and Rx */
1277 if (sram_config == 0)
1278 outb(CONFIG0_RST, ioaddr + CONFIG_0);
1279 else
1280 outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
1281
1272 if (dev->flags & IFF_PROMISC) { 1282 if (dev->flags & IFF_PROMISC) {
1273 /* Unconditionally log net taps. */ 1283 /* Unconditionally log net taps. */
1274 printk("%s: Promiscuous mode enabled.\n", dev->name); 1284 printk("%s: Promiscuous mode enabled.\n", dev->name);
@@ -1290,20 +1300,23 @@ static void set_rx_mode(struct net_device *dev)
1290 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1300 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1291 i++, mclist = mclist->next) { 1301 i++, mclist = mclist->next) {
1292 unsigned int bit = 1302 unsigned int bit =
1293 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 1303 ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
1294 mc_filter[bit >> 3] |= (1 << bit); 1304 mc_filter[bit >> 3] |= (1 << (bit & 7));
1295 } 1305 }
1306 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
1296 } 1307 }
1297 1308
1298 local_irq_save(flags);
1299 if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) { 1309 if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
1300 int saved_bank = inb(ioaddr + CONFIG_1); 1310 int saved_bank = inb(ioaddr + CONFIG_1);
1301 /* Switch to bank 1 and set the multicast table. */ 1311 /* Switch to bank 1 and set the multicast table. */
1302 outb(0xe4, ioaddr + CONFIG_1); 1312 outb(0xe4, ioaddr + CONFIG_1);
1303 for (i = 0; i < 8; i++) 1313 for (i = 0; i < 8; i++)
1304 outb(mc_filter[i], ioaddr + 8 + i); 1314 outb(mc_filter[i], ioaddr + MAR_ADR + i);
1305 memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter)); 1315 memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
1306 outb(saved_bank, ioaddr + CONFIG_1); 1316 outb(saved_bank, ioaddr + CONFIG_1);
1307 } 1317 }
1318
1319 outb(saved_config_0, ioaddr + CONFIG_0);
1320
1308 local_irq_restore(flags); 1321 local_irq_restore(flags);
1309} 1322}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
new file mode 100644
index 000000000000..6a2fe3583478
--- /dev/null
+++ b/drivers/net/phy/Kconfig
@@ -0,0 +1,57 @@
1#
2# PHY Layer Configuration
3#
4
5menu "PHY device support"
6
7config PHYLIB
8 tristate "PHY Device support and infrastructure"
9 depends on NET_ETHERNET
10 help
11 Ethernet controllers are usually attached to PHY
12 devices. This option provides infrastructure for
13 managing PHY devices.
14
15config PHYCONTROL
16 bool " Support for automatically handling PHY state changes"
17 depends on PHYLIB
18 help
19 Adds code to perform all the work for keeping PHY link
20 state (speed/duplex/etc) up-to-date. Also handles
21 interrupts.
22
23comment "MII PHY device drivers"
24 depends on PHYLIB
25
26config MARVELL_PHY
27 tristate "Drivers for Marvell PHYs"
28 depends on PHYLIB
29 ---help---
30 Currently has a driver for the 88E1011S
31
32config DAVICOM_PHY
33 tristate "Drivers for Davicom PHYs"
34 depends on PHYLIB
35 ---help---
36 Currently supports dm9161e and dm9131
37
38config QSEMI_PHY
39 tristate "Drivers for Quality Semiconductor PHYs"
40 depends on PHYLIB
41 ---help---
42 Currently supports the qs6612
43
44config LXT_PHY
45 tristate "Drivers for the Intel LXT PHYs"
46 depends on PHYLIB
47 ---help---
48 Currently supports the lxt970, lxt971
49
50config CICADA_PHY
51 tristate "Drivers for the Cicada PHYs"
52 depends on PHYLIB
53 ---help---
54 Currently supports the cis8204
55
56endmenu
57
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
new file mode 100644
index 000000000000..e4116a5fbb4c
--- /dev/null
+++ b/drivers/net/phy/Makefile
@@ -0,0 +1,10 @@
1# Makefile for Linux PHY drivers
2
3libphy-objs := phy.o phy_device.o mdio_bus.o
4
5obj-$(CONFIG_PHYLIB) += libphy.o
6obj-$(CONFIG_MARVELL_PHY) += marvell.o
7obj-$(CONFIG_DAVICOM_PHY) += davicom.o
8obj-$(CONFIG_CICADA_PHY) += cicada.o
9obj-$(CONFIG_LXT_PHY) += lxt.o
10obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
new file mode 100644
index 000000000000..c47fb2ecd147
--- /dev/null
+++ b/drivers/net/phy/cicada.c
@@ -0,0 +1,134 @@
1/*
2 * drivers/net/phy/cicada.c
3 *
4 * Driver for Cicada PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* Cicada Extended Control Register 1 */
42#define MII_CIS8201_EXT_CON1 0x17
43#define MII_CIS8201_EXTCON1_INIT 0x0000
44
45/* Cicada Interrupt Mask Register */
46#define MII_CIS8201_IMASK 0x19
47#define MII_CIS8201_IMASK_IEN 0x8000
48#define MII_CIS8201_IMASK_SPEED 0x4000
49#define MII_CIS8201_IMASK_LINK 0x2000
50#define MII_CIS8201_IMASK_DUPLEX 0x1000
51#define MII_CIS8201_IMASK_MASK 0xf000
52
53/* Cicada Interrupt Status Register */
54#define MII_CIS8201_ISTAT 0x1a
55#define MII_CIS8201_ISTAT_STATUS 0x8000
56#define MII_CIS8201_ISTAT_SPEED 0x4000
57#define MII_CIS8201_ISTAT_LINK 0x2000
58#define MII_CIS8201_ISTAT_DUPLEX 0x1000
59
60/* Cicada Auxiliary Control/Status Register */
61#define MII_CIS8201_AUX_CONSTAT 0x1c
62#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
63#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
64#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
65#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
66#define MII_CIS8201_AUXCONSTAT_100 0x0008
67
68MODULE_DESCRIPTION("Cicadia PHY driver");
69MODULE_AUTHOR("Andy Fleming");
70MODULE_LICENSE("GPL");
71
72static int cis820x_config_init(struct phy_device *phydev)
73{
74 int err;
75
76 err = phy_write(phydev, MII_CIS8201_AUX_CONSTAT,
77 MII_CIS8201_AUXCONSTAT_INIT);
78
79 if (err < 0)
80 return err;
81
82 err = phy_write(phydev, MII_CIS8201_EXT_CON1,
83 MII_CIS8201_EXTCON1_INIT);
84
85 return err;
86}
87
88static int cis820x_ack_interrupt(struct phy_device *phydev)
89{
90 int err = phy_read(phydev, MII_CIS8201_ISTAT);
91
92 return (err < 0) ? err : 0;
93}
94
95static int cis820x_config_intr(struct phy_device *phydev)
96{
97 int err;
98
99 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
100 err = phy_write(phydev, MII_CIS8201_IMASK,
101 MII_CIS8201_IMASK_MASK);
102 else
103 err = phy_write(phydev, MII_CIS8201_IMASK, 0);
104
105 return err;
106}
107
108/* Cicada 820x */
109static struct phy_driver cis8204_driver = {
110 .phy_id = 0x000fc440,
111 .name = "Cicada Cis8204",
112 .phy_id_mask = 0x000fffc0,
113 .features = PHY_GBIT_FEATURES,
114 .flags = PHY_HAS_INTERRUPT,
115 .config_init = &cis820x_config_init,
116 .config_aneg = &genphy_config_aneg,
117 .read_status = &genphy_read_status,
118 .ack_interrupt = &cis820x_ack_interrupt,
119 .config_intr = &cis820x_config_intr,
120 .driver = { .owner = THIS_MODULE,},
121};
122
123static int __init cis8204_init(void)
124{
125 return phy_driver_register(&cis8204_driver);
126}
127
128static void __exit cis8204_exit(void)
129{
130 phy_driver_unregister(&cis8204_driver);
131}
132
133module_init(cis8204_init);
134module_exit(cis8204_exit);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
new file mode 100644
index 000000000000..6caf499fae32
--- /dev/null
+++ b/drivers/net/phy/davicom.c
@@ -0,0 +1,195 @@
1/*
2 * drivers/net/phy/davicom.c
3 *
4 * Driver for Davicom PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#define MII_DM9161_SCR 0x10
42#define MII_DM9161_SCR_INIT 0x0610
43
44/* DM9161 Interrupt Register */
45#define MII_DM9161_INTR 0x15
46#define MII_DM9161_INTR_PEND 0x8000
47#define MII_DM9161_INTR_DPLX_MASK 0x0800
48#define MII_DM9161_INTR_SPD_MASK 0x0400
49#define MII_DM9161_INTR_LINK_MASK 0x0200
50#define MII_DM9161_INTR_MASK 0x0100
51#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
52#define MII_DM9161_INTR_SPD_CHANGE 0x0008
53#define MII_DM9161_INTR_LINK_CHANGE 0x0004
54#define MII_DM9161_INTR_INIT 0x0000
55#define MII_DM9161_INTR_STOP \
56(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
57 | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
58
59/* DM9161 10BT Configuration/Status */
60#define MII_DM9161_10BTCSR 0x12
61#define MII_DM9161_10BTCSR_INIT 0x7800
62
63MODULE_DESCRIPTION("Davicom PHY driver");
64MODULE_AUTHOR("Andy Fleming");
65MODULE_LICENSE("GPL");
66
67
68#define DM9161_DELAY 1
69static int dm9161_config_intr(struct phy_device *phydev)
70{
71 int temp;
72
73 temp = phy_read(phydev, MII_DM9161_INTR);
74
75 if (temp < 0)
76 return temp;
77
78 if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
79 temp &= ~(MII_DM9161_INTR_STOP);
80 else
81 temp |= MII_DM9161_INTR_STOP;
82
83 temp = phy_write(phydev, MII_DM9161_INTR, temp);
84
85 return temp;
86}
87
88static int dm9161_config_aneg(struct phy_device *phydev)
89{
90 int err;
91
92 /* Isolate the PHY */
93 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
94
95 if (err < 0)
96 return err;
97
98 /* Configure the new settings */
99 err = genphy_config_aneg(phydev);
100
101 if (err < 0)
102 return err;
103
104 return 0;
105}
106
107static int dm9161_config_init(struct phy_device *phydev)
108{
109 int err;
110
111 /* Isolate the PHY */
112 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
113
114 if (err < 0)
115 return err;
116
117 /* Do not bypass the scrambler/descrambler */
118 err = phy_write(phydev, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
119
120 if (err < 0)
121 return err;
122
123 /* Clear 10BTCSR to default */
124 err = phy_write(phydev, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
125
126 if (err < 0)
127 return err;
128
129 /* Reconnect the PHY, and enable Autonegotiation */
130 err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
131
132 if (err < 0)
133 return err;
134
135 return 0;
136}
137
138static int dm9161_ack_interrupt(struct phy_device *phydev)
139{
140 int err = phy_read(phydev, MII_DM9161_INTR);
141
142 return (err < 0) ? err : 0;
143}
144
145static struct phy_driver dm9161_driver = {
146 .phy_id = 0x0181b880,
147 .name = "Davicom DM9161E",
148 .phy_id_mask = 0x0ffffff0,
149 .features = PHY_BASIC_FEATURES,
150 .config_init = dm9161_config_init,
151 .config_aneg = dm9161_config_aneg,
152 .read_status = genphy_read_status,
153 .driver = { .owner = THIS_MODULE,},
154};
155
156static struct phy_driver dm9131_driver = {
157 .phy_id = 0x00181b80,
158 .name = "Davicom DM9131",
159 .phy_id_mask = 0x0ffffff0,
160 .features = PHY_BASIC_FEATURES,
161 .flags = PHY_HAS_INTERRUPT,
162 .config_aneg = genphy_config_aneg,
163 .read_status = genphy_read_status,
164 .ack_interrupt = dm9161_ack_interrupt,
165 .config_intr = dm9161_config_intr,
166 .driver = { .owner = THIS_MODULE,},
167};
168
169static int __init davicom_init(void)
170{
171 int ret;
172
173 ret = phy_driver_register(&dm9161_driver);
174 if (ret)
175 goto err1;
176
177 ret = phy_driver_register(&dm9131_driver);
178 if (ret)
179 goto err2;
180 return 0;
181
182 err2:
183 phy_driver_unregister(&dm9161_driver);
184 err1:
185 return ret;
186}
187
188static void __exit davicom_exit(void)
189{
190 phy_driver_unregister(&dm9161_driver);
191 phy_driver_unregister(&dm9131_driver);
192}
193
194module_init(davicom_init);
195module_exit(davicom_exit);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
new file mode 100644
index 000000000000..4c840448ec86
--- /dev/null
+++ b/drivers/net/phy/lxt.c
@@ -0,0 +1,179 @@
1/*
2 * drivers/net/phy/lxt.c
3 *
4 * Driver for Intel LXT PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* The Level one LXT970 is used by many boards */
42
43#define MII_LXT970_IER 17 /* Interrupt Enable Register */
44
45#define MII_LXT970_IER_IEN 0x0002
46
47#define MII_LXT970_ISR 18 /* Interrupt Status Register */
48
49#define MII_LXT970_CONFIG 19 /* Configuration Register */
50
51/* ------------------------------------------------------------------------- */
52/* The Level one LXT971 is used on some of my custom boards */
53
54/* register definitions for the 971 */
55#define MII_LXT971_IER 18 /* Interrupt Enable Register */
56#define MII_LXT971_IER_IEN 0x00f2
57
58#define MII_LXT971_ISR 19 /* Interrupt Status Register */
59
60
61MODULE_DESCRIPTION("Intel LXT PHY driver");
62MODULE_AUTHOR("Andy Fleming");
63MODULE_LICENSE("GPL");
64
65static int lxt970_ack_interrupt(struct phy_device *phydev)
66{
67 int err;
68
69 err = phy_read(phydev, MII_BMSR);
70
71 if (err < 0)
72 return err;
73
74 err = phy_read(phydev, MII_LXT970_ISR);
75
76 if (err < 0)
77 return err;
78
79 return 0;
80}
81
82static int lxt970_config_intr(struct phy_device *phydev)
83{
84 int err;
85
86 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
87 err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
88 else
89 err = phy_write(phydev, MII_LXT970_IER, 0);
90
91 return err;
92}
93
94static int lxt970_config_init(struct phy_device *phydev)
95{
96 int err;
97
98 err = phy_write(phydev, MII_LXT970_CONFIG, 0);
99
100 return err;
101}
102
103
104static int lxt971_ack_interrupt(struct phy_device *phydev)
105{
106 int err = phy_read(phydev, MII_LXT971_ISR);
107
108 if (err < 0)
109 return err;
110
111 return 0;
112}
113
114static int lxt971_config_intr(struct phy_device *phydev)
115{
116 int err;
117
118 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
119 err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
120 else
121 err = phy_write(phydev, MII_LXT971_IER, 0);
122
123 return err;
124}
125
126static struct phy_driver lxt970_driver = {
127 .phy_id = 0x07810000,
128 .name = "LXT970",
129 .phy_id_mask = 0x0fffffff,
130 .features = PHY_BASIC_FEATURES,
131 .flags = PHY_HAS_INTERRUPT,
132 .config_init = lxt970_config_init,
133 .config_aneg = genphy_config_aneg,
134 .read_status = genphy_read_status,
135 .ack_interrupt = lxt970_ack_interrupt,
136 .config_intr = lxt970_config_intr,
137 .driver = { .owner = THIS_MODULE,},
138};
139
140static struct phy_driver lxt971_driver = {
141 .phy_id = 0x0001378e,
142 .name = "LXT971",
143 .phy_id_mask = 0x0fffffff,
144 .features = PHY_BASIC_FEATURES,
145 .flags = PHY_HAS_INTERRUPT,
146 .config_aneg = genphy_config_aneg,
147 .read_status = genphy_read_status,
148 .ack_interrupt = lxt971_ack_interrupt,
149 .config_intr = lxt971_config_intr,
150 .driver = { .owner = THIS_MODULE,},
151};
152
153static int __init lxt_init(void)
154{
155 int ret;
156
157 ret = phy_driver_register(&lxt970_driver);
158 if (ret)
159 goto err1;
160
161 ret = phy_driver_register(&lxt971_driver);
162 if (ret)
163 goto err2;
164 return 0;
165
166 err2:
167 phy_driver_unregister(&lxt970_driver);
168 err1:
169 return ret;
170}
171
172static void __exit lxt_exit(void)
173{
174 phy_driver_unregister(&lxt970_driver);
175 phy_driver_unregister(&lxt971_driver);
176}
177
178module_init(lxt_init);
179module_exit(lxt_exit);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
new file mode 100644
index 000000000000..4a72b025006b
--- /dev/null
+++ b/drivers/net/phy/marvell.c
@@ -0,0 +1,140 @@
1/*
2 * drivers/net/phy/marvell.c
3 *
4 * Driver for Marvell PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#define MII_M1011_IEVENT 0x13
42#define MII_M1011_IEVENT_CLEAR 0x0000
43
44#define MII_M1011_IMASK 0x12
45#define MII_M1011_IMASK_INIT 0x6400
46#define MII_M1011_IMASK_CLEAR 0x0000
47
48MODULE_DESCRIPTION("Marvell PHY driver");
49MODULE_AUTHOR("Andy Fleming");
50MODULE_LICENSE("GPL");
51
52static int marvell_ack_interrupt(struct phy_device *phydev)
53{
54 int err;
55
56 /* Clear the interrupts by reading the reg */
57 err = phy_read(phydev, MII_M1011_IEVENT);
58
59 if (err < 0)
60 return err;
61
62 return 0;
63}
64
65static int marvell_config_intr(struct phy_device *phydev)
66{
67 int err;
68
69 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
70 err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
71 else
72 err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
73
74 return err;
75}
76
77static int marvell_config_aneg(struct phy_device *phydev)
78{
79 int err;
80
81 /* The Marvell PHY has an errata which requires
82 * that certain registers get written in order
83 * to restart autonegotiation */
84 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
85
86 if (err < 0)
87 return err;
88
89 err = phy_write(phydev, 0x1d, 0x1f);
90 if (err < 0)
91 return err;
92
93 err = phy_write(phydev, 0x1e, 0x200c);
94 if (err < 0)
95 return err;
96
97 err = phy_write(phydev, 0x1d, 0x5);
98 if (err < 0)
99 return err;
100
101 err = phy_write(phydev, 0x1e, 0);
102 if (err < 0)
103 return err;
104
105 err = phy_write(phydev, 0x1e, 0x100);
106 if (err < 0)
107 return err;
108
109
110 err = genphy_config_aneg(phydev);
111
112 return err;
113}
114
115
116static struct phy_driver m88e1101_driver = {
117 .phy_id = 0x01410c00,
118 .phy_id_mask = 0xffffff00,
119 .name = "Marvell 88E1101",
120 .features = PHY_GBIT_FEATURES,
121 .flags = PHY_HAS_INTERRUPT,
122 .config_aneg = &marvell_config_aneg,
123 .read_status = &genphy_read_status,
124 .ack_interrupt = &marvell_ack_interrupt,
125 .config_intr = &marvell_config_intr,
126 .driver = { .owner = THIS_MODULE,},
127};
128
129static int __init marvell_init(void)
130{
131 return phy_driver_register(&m88e1101_driver);
132}
133
134static void __exit marvell_exit(void)
135{
136 phy_driver_unregister(&m88e1101_driver);
137}
138
139module_init(marvell_init);
140module_exit(marvell_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
new file mode 100644
index 000000000000..41f62c0c5fcb
--- /dev/null
+++ b/drivers/net/phy/mdio_bus.c
@@ -0,0 +1,176 @@
1/*
2 * drivers/net/phy/mdio_bus.c
3 *
4 * MDIO Bus interface
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* mdiobus_register
42 *
43 * description: Called by a bus driver to bring up all the PHYs
44 * on a given bus, and attach them to the bus
45 */
46int mdiobus_register(struct mii_bus *bus)
47{
48 int i;
49 int err = 0;
50
51 spin_lock_init(&bus->mdio_lock);
52
53 if (NULL == bus || NULL == bus->name ||
54 NULL == bus->read ||
55 NULL == bus->write)
56 return -EINVAL;
57
58 if (bus->reset)
59 bus->reset(bus);
60
61 for (i = 0; i < PHY_MAX_ADDR; i++) {
62 struct phy_device *phydev;
63
64 phydev = get_phy_device(bus, i);
65
66 if (IS_ERR(phydev))
67 return PTR_ERR(phydev);
68
69 /* There's a PHY at this address
70 * We need to set:
71 * 1) IRQ
72 * 2) bus_id
73 * 3) parent
74 * 4) bus
75 * 5) mii_bus
76 * And, we need to register it */
77 if (phydev) {
78 phydev->irq = bus->irq[i];
79
80 phydev->dev.parent = bus->dev;
81 phydev->dev.bus = &mdio_bus_type;
82 sprintf(phydev->dev.bus_id, "phy%d:%d", bus->id, i);
83
84 phydev->bus = bus;
85
86 err = device_register(&phydev->dev);
87
88 if (err)
89 printk(KERN_ERR "phy %d failed to register\n",
90 i);
91 }
92
93 bus->phy_map[i] = phydev;
94 }
95
96 pr_info("%s: probed\n", bus->name);
97
98 return err;
99}
100EXPORT_SYMBOL(mdiobus_register);
101
102void mdiobus_unregister(struct mii_bus *bus)
103{
104 int i;
105
106 for (i = 0; i < PHY_MAX_ADDR; i++) {
107 if (bus->phy_map[i]) {
108 device_unregister(&bus->phy_map[i]->dev);
109 kfree(bus->phy_map[i]);
110 }
111 }
112}
113EXPORT_SYMBOL(mdiobus_unregister);
114
115/* mdio_bus_match
116 *
117 * description: Given a PHY device, and a PHY driver, return 1 if
118 * the driver supports the device. Otherwise, return 0
119 */
120static int mdio_bus_match(struct device *dev, struct device_driver *drv)
121{
122 struct phy_device *phydev = to_phy_device(dev);
123 struct phy_driver *phydrv = to_phy_driver(drv);
124
125 return (phydrv->phy_id == (phydev->phy_id & phydrv->phy_id_mask));
126}
127
128/* Suspend and resume. Copied from platform_suspend and
129 * platform_resume
130 */
131static int mdio_bus_suspend(struct device * dev, u32 state)
132{
133 int ret = 0;
134 struct device_driver *drv = dev->driver;
135
136 if (drv && drv->suspend) {
137 ret = drv->suspend(dev, state, SUSPEND_DISABLE);
138 if (ret == 0)
139 ret = drv->suspend(dev, state, SUSPEND_SAVE_STATE);
140 if (ret == 0)
141 ret = drv->suspend(dev, state, SUSPEND_POWER_DOWN);
142 }
143 return ret;
144}
145
146static int mdio_bus_resume(struct device * dev)
147{
148 int ret = 0;
149 struct device_driver *drv = dev->driver;
150
151 if (drv && drv->resume) {
152 ret = drv->resume(dev, RESUME_POWER_ON);
153 if (ret == 0)
154 ret = drv->resume(dev, RESUME_RESTORE_STATE);
155 if (ret == 0)
156 ret = drv->resume(dev, RESUME_ENABLE);
157 }
158 return ret;
159}
160
161struct bus_type mdio_bus_type = {
162 .name = "mdio_bus",
163 .match = mdio_bus_match,
164 .suspend = mdio_bus_suspend,
165 .resume = mdio_bus_resume,
166};
167
168int __init mdio_bus_init(void)
169{
170 return bus_register(&mdio_bus_type);
171}
172
173void __exit mdio_bus_exit(void)
174{
175 bus_unregister(&mdio_bus_type);
176}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
new file mode 100644
index 000000000000..d9e11f93bf3a
--- /dev/null
+++ b/drivers/net/phy/phy.c
@@ -0,0 +1,871 @@
1/*
2 * drivers/net/phy/phy.c
3 *
4 * Framework for configuring and reading PHY devices
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h>
33#include <linux/version.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/phy.h>
37
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42/* Convenience function to print out the current phy status
43 */
44void phy_print_status(struct phy_device *phydev)
45{
46 pr_info("%s: Link is %s", phydev->dev.bus_id,
47 phydev->link ? "Up" : "Down");
48 if (phydev->link)
49 printk(" - %d/%s", phydev->speed,
50 DUPLEX_FULL == phydev->duplex ?
51 "Full" : "Half");
52
53 printk("\n");
54}
55EXPORT_SYMBOL(phy_print_status);
56
57
58/* Convenience functions for reading/writing a given PHY
59 * register. They MUST NOT be called from interrupt context,
60 * because the bus read/write functions may wait for an interrupt
61 * to conclude the operation. */
62int phy_read(struct phy_device *phydev, u16 regnum)
63{
64 int retval;
65 struct mii_bus *bus = phydev->bus;
66
67 spin_lock_bh(&bus->mdio_lock);
68 retval = bus->read(bus, phydev->addr, regnum);
69 spin_unlock_bh(&bus->mdio_lock);
70
71 return retval;
72}
73EXPORT_SYMBOL(phy_read);
74
75int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
76{
77 int err;
78 struct mii_bus *bus = phydev->bus;
79
80 spin_lock_bh(&bus->mdio_lock);
81 err = bus->write(bus, phydev->addr, regnum, val);
82 spin_unlock_bh(&bus->mdio_lock);
83
84 return err;
85}
86EXPORT_SYMBOL(phy_write);
87
88
89int phy_clear_interrupt(struct phy_device *phydev)
90{
91 int err = 0;
92
93 if (phydev->drv->ack_interrupt)
94 err = phydev->drv->ack_interrupt(phydev);
95
96 return err;
97}
98
99
100int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
101{
102 int err = 0;
103
104 phydev->interrupts = interrupts;
105 if (phydev->drv->config_intr)
106 err = phydev->drv->config_intr(phydev);
107
108 return err;
109}
110
111
112/* phy_aneg_done
113 *
114 * description: Reads the status register and returns 0 either if
115 * auto-negotiation is incomplete, or if there was an error.
116 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
117 */
118static inline int phy_aneg_done(struct phy_device *phydev)
119{
120 int retval;
121
122 retval = phy_read(phydev, MII_BMSR);
123
124 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
125}
126
127/* A structure for mapping a particular speed and duplex
128 * combination to a particular SUPPORTED and ADVERTISED value */
129struct phy_setting {
130 int speed;
131 int duplex;
132 u32 setting;
133};
134
135/* A mapping of all SUPPORTED settings to speed/duplex */
136static struct phy_setting settings[] = {
137 {
138 .speed = 10000,
139 .duplex = DUPLEX_FULL,
140 .setting = SUPPORTED_10000baseT_Full,
141 },
142 {
143 .speed = SPEED_1000,
144 .duplex = DUPLEX_FULL,
145 .setting = SUPPORTED_1000baseT_Full,
146 },
147 {
148 .speed = SPEED_1000,
149 .duplex = DUPLEX_HALF,
150 .setting = SUPPORTED_1000baseT_Half,
151 },
152 {
153 .speed = SPEED_100,
154 .duplex = DUPLEX_FULL,
155 .setting = SUPPORTED_100baseT_Full,
156 },
157 {
158 .speed = SPEED_100,
159 .duplex = DUPLEX_HALF,
160 .setting = SUPPORTED_100baseT_Half,
161 },
162 {
163 .speed = SPEED_10,
164 .duplex = DUPLEX_FULL,
165 .setting = SUPPORTED_10baseT_Full,
166 },
167 {
168 .speed = SPEED_10,
169 .duplex = DUPLEX_HALF,
170 .setting = SUPPORTED_10baseT_Half,
171 },
172};
173
174#define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting))
175
176/* phy_find_setting
177 *
178 * description: Searches the settings array for the setting which
179 * matches the desired speed and duplex, and returns the index
180 * of that setting. Returns the index of the last setting if
181 * none of the others match.
182 */
183static inline int phy_find_setting(int speed, int duplex)
184{
185 int idx = 0;
186
187 while (idx < ARRAY_SIZE(settings) &&
188 (settings[idx].speed != speed ||
189 settings[idx].duplex != duplex))
190 idx++;
191
192 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
193}
194
195/* phy_find_valid
196 * idx: The first index in settings[] to search
197 * features: A mask of the valid settings
198 *
199 * description: Returns the index of the first valid setting less
200 * than or equal to the one pointed to by idx, as determined by
201 * the mask in features. Returns the index of the last setting
202 * if nothing else matches.
203 */
204static inline int phy_find_valid(int idx, u32 features)
205{
206 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
207 idx++;
208
209 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
210}
211
212/* phy_sanitize_settings
213 *
214 * description: Make sure the PHY is set to supported speeds and
215 * duplexes. Drop down by one in this order: 1000/FULL,
216 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF
217 */
218void phy_sanitize_settings(struct phy_device *phydev)
219{
220 u32 features = phydev->supported;
221 int idx;
222
223 /* Sanitize settings based on PHY capabilities */
224 if ((features & SUPPORTED_Autoneg) == 0)
225 phydev->autoneg = 0;
226
227 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
228 features);
229
230 phydev->speed = settings[idx].speed;
231 phydev->duplex = settings[idx].duplex;
232}
233EXPORT_SYMBOL(phy_sanitize_settings);
234
235/* phy_ethtool_sset:
236 * A generic ethtool sset function. Handles all the details
237 *
238 * A few notes about parameter checking:
239 * - We don't set port or transceiver, so we don't care what they
240 * were set to.
241 * - phy_start_aneg() will make sure forced settings are sane, and
242 * choose the next best ones from the ones selected, so we don't
243 * care if ethtool tries to give us bad values
244 *
245 * A note about the PHYCONTROL Layer. If you turn off
246 * CONFIG_PHYCONTROL, you will need to read the PHY status
247 * registers after this function completes, and update your
248 * controller manually.
249 */
250int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
251{
252 if (cmd->phy_address != phydev->addr)
253 return -EINVAL;
254
255 /* We make sure that we don't pass unsupported
256 * values in to the PHY */
257 cmd->advertising &= phydev->supported;
258
259 /* Verify the settings we care about. */
260 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
261 return -EINVAL;
262
263 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
264 return -EINVAL;
265
266 if (cmd->autoneg == AUTONEG_DISABLE
267 && ((cmd->speed != SPEED_1000
268 && cmd->speed != SPEED_100
269 && cmd->speed != SPEED_10)
270 || (cmd->duplex != DUPLEX_HALF
271 && cmd->duplex != DUPLEX_FULL)))
272 return -EINVAL;
273
274 phydev->autoneg = cmd->autoneg;
275
276 phydev->speed = cmd->speed;
277
278 phydev->advertising = cmd->advertising;
279
280 if (AUTONEG_ENABLE == cmd->autoneg)
281 phydev->advertising |= ADVERTISED_Autoneg;
282 else
283 phydev->advertising &= ~ADVERTISED_Autoneg;
284
285 phydev->duplex = cmd->duplex;
286
287 /* Restart the PHY */
288 phy_start_aneg(phydev);
289
290 return 0;
291}
292
293int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
294{
295 cmd->supported = phydev->supported;
296
297 cmd->advertising = phydev->advertising;
298
299 cmd->speed = phydev->speed;
300 cmd->duplex = phydev->duplex;
301 cmd->port = PORT_MII;
302 cmd->phy_address = phydev->addr;
303 cmd->transceiver = XCVR_EXTERNAL;
304 cmd->autoneg = phydev->autoneg;
305
306 return 0;
307}
308
309
310/* Note that this function is currently incompatible with the
311 * PHYCONTROL layer. It changes registers without regard to
312 * current state. Use at own risk
313 */
314int phy_mii_ioctl(struct phy_device *phydev,
315 struct mii_ioctl_data *mii_data, int cmd)
316{
317 u16 val = mii_data->val_in;
318
319 switch (cmd) {
320 case SIOCGMIIPHY:
321 mii_data->phy_id = phydev->addr;
322 break;
323 case SIOCGMIIREG:
324 mii_data->val_out = phy_read(phydev, mii_data->reg_num);
325 break;
326
327 case SIOCSMIIREG:
328 if (!capable(CAP_NET_ADMIN))
329 return -EPERM;
330
331 if (mii_data->phy_id == phydev->addr) {
332 switch(mii_data->reg_num) {
333 case MII_BMCR:
334 if (val & (BMCR_RESET|BMCR_ANENABLE))
335 phydev->autoneg = AUTONEG_DISABLE;
336 else
337 phydev->autoneg = AUTONEG_ENABLE;
338 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
339 phydev->duplex = DUPLEX_FULL;
340 else
341 phydev->duplex = DUPLEX_HALF;
342 break;
343 case MII_ADVERTISE:
344 phydev->advertising = val;
345 break;
346 default:
347 /* do nothing */
348 break;
349 }
350 }
351
352 phy_write(phydev, mii_data->reg_num, val);
353
354 if (mii_data->reg_num == MII_BMCR
355 && val & BMCR_RESET
356 && phydev->drv->config_init)
357 phydev->drv->config_init(phydev);
358 break;
359 }
360
361 return 0;
362}
363
364/* phy_start_aneg
365 *
366 * description: Sanitizes the settings (if we're not
367 * autonegotiating them), and then calls the driver's
368 * config_aneg function. If the PHYCONTROL Layer is operating,
369 * we change the state to reflect the beginning of
370 * Auto-negotiation or forcing.
371 */
372int phy_start_aneg(struct phy_device *phydev)
373{
374 int err;
375
376 spin_lock(&phydev->lock);
377
378 if (AUTONEG_DISABLE == phydev->autoneg)
379 phy_sanitize_settings(phydev);
380
381 err = phydev->drv->config_aneg(phydev);
382
383#ifdef CONFIG_PHYCONTROL
384 if (err < 0)
385 goto out_unlock;
386
387 if (phydev->state != PHY_HALTED) {
388 if (AUTONEG_ENABLE == phydev->autoneg) {
389 phydev->state = PHY_AN;
390 phydev->link_timeout = PHY_AN_TIMEOUT;
391 } else {
392 phydev->state = PHY_FORCING;
393 phydev->link_timeout = PHY_FORCE_TIMEOUT;
394 }
395 }
396
397out_unlock:
398#endif
399 spin_unlock(&phydev->lock);
400 return err;
401}
402EXPORT_SYMBOL(phy_start_aneg);
403
404
405#ifdef CONFIG_PHYCONTROL
406static void phy_change(void *data);
407static void phy_timer(unsigned long data);
408
409/* phy_start_machine:
410 *
411 * description: The PHY infrastructure can run a state machine
412 * which tracks whether the PHY is starting up, negotiating,
413 * etc. This function starts the timer which tracks the state
414 * of the PHY. If you want to be notified when the state
415 * changes, pass in the callback, otherwise, pass NULL. If you
416 * want to maintain your own state machine, do not call this
417 * function. */
418void phy_start_machine(struct phy_device *phydev,
419 void (*handler)(struct net_device *))
420{
421 phydev->adjust_state = handler;
422
423 init_timer(&phydev->phy_timer);
424 phydev->phy_timer.function = &phy_timer;
425 phydev->phy_timer.data = (unsigned long) phydev;
426 mod_timer(&phydev->phy_timer, jiffies + HZ);
427}
428
429/* phy_stop_machine
430 *
431 * description: Stops the state machine timer, sets the state to
432 * UP (unless it wasn't up yet), and then frees the interrupt,
433 * if it is in use. This function must be called BEFORE
434 * phy_detach.
435 */
436void phy_stop_machine(struct phy_device *phydev)
437{
438 del_timer_sync(&phydev->phy_timer);
439
440 spin_lock(&phydev->lock);
441 if (phydev->state > PHY_UP)
442 phydev->state = PHY_UP;
443 spin_unlock(&phydev->lock);
444
445 if (phydev->irq != PHY_POLL)
446 phy_stop_interrupts(phydev);
447
448 phydev->adjust_state = NULL;
449}
450
451/* phy_force_reduction
452 *
453 * description: Reduces the speed/duplex settings by
454 * one notch. The order is so:
455 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF,
456 * 10/FULL, 10/HALF. The function bottoms out at 10/HALF.
457 */
458static void phy_force_reduction(struct phy_device *phydev)
459{
460 int idx;
461
462 idx = phy_find_setting(phydev->speed, phydev->duplex);
463
464 idx++;
465
466 idx = phy_find_valid(idx, phydev->supported);
467
468 phydev->speed = settings[idx].speed;
469 phydev->duplex = settings[idx].duplex;
470
471 pr_info("Trying %d/%s\n", phydev->speed,
472 DUPLEX_FULL == phydev->duplex ?
473 "FULL" : "HALF");
474}
475
476
477/* phy_error:
478 *
479 * Moves the PHY to the HALTED state in response to a read
480 * or write error, and tells the controller the link is down.
481 * Must not be called from interrupt context, or while the
482 * phydev->lock is held.
483 */
484void phy_error(struct phy_device *phydev)
485{
486 spin_lock(&phydev->lock);
487 phydev->state = PHY_HALTED;
488 spin_unlock(&phydev->lock);
489}
490
491/* phy_interrupt
492 *
493 * description: When a PHY interrupt occurs, the handler disables
494 * interrupts, and schedules a work task to clear the interrupt.
495 */
496static irqreturn_t phy_interrupt(int irq, void *phy_dat, struct pt_regs *regs)
497{
498 struct phy_device *phydev = phy_dat;
499
500 /* The MDIO bus is not allowed to be written in interrupt
501 * context, so we need to disable the irq here. A work
502 * queue will write the PHY to disable and clear the
503 * interrupt, and then reenable the irq line. */
504 disable_irq_nosync(irq);
505
506 schedule_work(&phydev->phy_queue);
507
508 return IRQ_HANDLED;
509}
510
511/* Enable the interrupts from the PHY side */
512int phy_enable_interrupts(struct phy_device *phydev)
513{
514 int err;
515
516 err = phy_clear_interrupt(phydev);
517
518 if (err < 0)
519 return err;
520
521 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
522
523 return err;
524}
525EXPORT_SYMBOL(phy_enable_interrupts);
526
527/* Disable the PHY interrupts from the PHY side */
528int phy_disable_interrupts(struct phy_device *phydev)
529{
530 int err;
531
532 /* Disable PHY interrupts */
533 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
534
535 if (err)
536 goto phy_err;
537
538 /* Clear the interrupt */
539 err = phy_clear_interrupt(phydev);
540
541 if (err)
542 goto phy_err;
543
544 return 0;
545
546phy_err:
547 phy_error(phydev);
548
549 return err;
550}
551EXPORT_SYMBOL(phy_disable_interrupts);
552
553/* phy_start_interrupts
554 *
555 * description: Request the interrupt for the given PHY. If
556 * this fails, then we set irq to PHY_POLL.
557 * Otherwise, we enable the interrupts in the PHY.
558 * Returns 0 on success.
559 * This should only be called with a valid IRQ number.
560 */
561int phy_start_interrupts(struct phy_device *phydev)
562{
563 int err = 0;
564
565 INIT_WORK(&phydev->phy_queue, phy_change, phydev);
566
567 if (request_irq(phydev->irq, phy_interrupt,
568 SA_SHIRQ,
569 "phy_interrupt",
570 phydev) < 0) {
571 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
572 phydev->bus->name,
573 phydev->irq);
574 phydev->irq = PHY_POLL;
575 return 0;
576 }
577
578 err = phy_enable_interrupts(phydev);
579
580 return err;
581}
582EXPORT_SYMBOL(phy_start_interrupts);
583
584int phy_stop_interrupts(struct phy_device *phydev)
585{
586 int err;
587
588 err = phy_disable_interrupts(phydev);
589
590 if (err)
591 phy_error(phydev);
592
593 free_irq(phydev->irq, phydev);
594
595 return err;
596}
597EXPORT_SYMBOL(phy_stop_interrupts);
598
599
600/* Scheduled by the phy_interrupt/timer to handle PHY changes */
601static void phy_change(void *data)
602{
603 int err;
604 struct phy_device *phydev = data;
605
606 err = phy_disable_interrupts(phydev);
607
608 if (err)
609 goto phy_err;
610
611 spin_lock(&phydev->lock);
612 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
613 phydev->state = PHY_CHANGELINK;
614 spin_unlock(&phydev->lock);
615
616 enable_irq(phydev->irq);
617
618 /* Reenable interrupts */
619 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
620
621 if (err)
622 goto irq_enable_err;
623
624 return;
625
626irq_enable_err:
627 disable_irq(phydev->irq);
628phy_err:
629 phy_error(phydev);
630}
631
632/* Bring down the PHY link, and stop checking the status. */
633void phy_stop(struct phy_device *phydev)
634{
635 spin_lock(&phydev->lock);
636
637 if (PHY_HALTED == phydev->state)
638 goto out_unlock;
639
640 if (phydev->irq != PHY_POLL) {
641 /* Clear any pending interrupts */
642 phy_clear_interrupt(phydev);
643
644 /* Disable PHY Interrupts */
645 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
646 }
647
648 phydev->state = PHY_HALTED;
649
650out_unlock:
651 spin_unlock(&phydev->lock);
652}
653
654
655/* phy_start
656 *
657 * description: Indicates the attached device's readiness to
658 * handle PHY-related work. Used during startup to start the
659 * PHY, and after a call to phy_stop() to resume operation.
660 * Also used to indicate the MDIO bus has cleared an error
661 * condition.
662 */
663void phy_start(struct phy_device *phydev)
664{
665 spin_lock(&phydev->lock);
666
667 switch (phydev->state) {
668 case PHY_STARTING:
669 phydev->state = PHY_PENDING;
670 break;
671 case PHY_READY:
672 phydev->state = PHY_UP;
673 break;
674 case PHY_HALTED:
675 phydev->state = PHY_RESUMING;
676 default:
677 break;
678 }
679 spin_unlock(&phydev->lock);
680}
681EXPORT_SYMBOL(phy_stop);
682EXPORT_SYMBOL(phy_start);
683
684/* PHY timer which handles the state machine */
685static void phy_timer(unsigned long data)
686{
687 struct phy_device *phydev = (struct phy_device *)data;
688 int needs_aneg = 0;
689 int err = 0;
690
691 spin_lock(&phydev->lock);
692
693 if (phydev->adjust_state)
694 phydev->adjust_state(phydev->attached_dev);
695
696 switch(phydev->state) {
697 case PHY_DOWN:
698 case PHY_STARTING:
699 case PHY_READY:
700 case PHY_PENDING:
701 break;
702 case PHY_UP:
703 needs_aneg = 1;
704
705 phydev->link_timeout = PHY_AN_TIMEOUT;
706
707 break;
708 case PHY_AN:
709 /* Check if negotiation is done. Break
710 * if there's an error */
711 err = phy_aneg_done(phydev);
712 if (err < 0)
713 break;
714
715 /* If auto-negotiation is done, we change to
716 * either RUNNING, or NOLINK */
717 if (err > 0) {
718 err = phy_read_status(phydev);
719
720 if (err)
721 break;
722
723 if (phydev->link) {
724 phydev->state = PHY_RUNNING;
725 netif_carrier_on(phydev->attached_dev);
726 } else {
727 phydev->state = PHY_NOLINK;
728 netif_carrier_off(phydev->attached_dev);
729 }
730
731 phydev->adjust_link(phydev->attached_dev);
732
733 } else if (0 == phydev->link_timeout--) {
734 /* The counter expired, so either we
735 * switch to forced mode, or the
736 * magic_aneg bit exists, and we try aneg
737 * again */
738 if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
739 int idx;
740
741 /* We'll start from the
742 * fastest speed, and work
743 * our way down */
744 idx = phy_find_valid(0,
745 phydev->supported);
746
747 phydev->speed = settings[idx].speed;
748 phydev->duplex = settings[idx].duplex;
749
750 phydev->autoneg = AUTONEG_DISABLE;
751 phydev->state = PHY_FORCING;
752 phydev->link_timeout =
753 PHY_FORCE_TIMEOUT;
754
755 pr_info("Trying %d/%s\n",
756 phydev->speed,
757 DUPLEX_FULL ==
758 phydev->duplex ?
759 "FULL" : "HALF");
760 }
761
762 needs_aneg = 1;
763 }
764 break;
765 case PHY_NOLINK:
766 err = phy_read_status(phydev);
767
768 if (err)
769 break;
770
771 if (phydev->link) {
772 phydev->state = PHY_RUNNING;
773 netif_carrier_on(phydev->attached_dev);
774 phydev->adjust_link(phydev->attached_dev);
775 }
776 break;
777 case PHY_FORCING:
778 err = phy_read_status(phydev);
779
780 if (err)
781 break;
782
783 if (phydev->link) {
784 phydev->state = PHY_RUNNING;
785 netif_carrier_on(phydev->attached_dev);
786 } else {
787 if (0 == phydev->link_timeout--) {
788 phy_force_reduction(phydev);
789 needs_aneg = 1;
790 }
791 }
792
793 phydev->adjust_link(phydev->attached_dev);
794 break;
795 case PHY_RUNNING:
796 /* Only register a CHANGE if we are
797 * polling */
798 if (PHY_POLL == phydev->irq)
799 phydev->state = PHY_CHANGELINK;
800 break;
801 case PHY_CHANGELINK:
802 err = phy_read_status(phydev);
803
804 if (err)
805 break;
806
807 if (phydev->link) {
808 phydev->state = PHY_RUNNING;
809 netif_carrier_on(phydev->attached_dev);
810 } else {
811 phydev->state = PHY_NOLINK;
812 netif_carrier_off(phydev->attached_dev);
813 }
814
815 phydev->adjust_link(phydev->attached_dev);
816
817 if (PHY_POLL != phydev->irq)
818 err = phy_config_interrupt(phydev,
819 PHY_INTERRUPT_ENABLED);
820 break;
821 case PHY_HALTED:
822 if (phydev->link) {
823 phydev->link = 0;
824 netif_carrier_off(phydev->attached_dev);
825 phydev->adjust_link(phydev->attached_dev);
826 }
827 break;
828 case PHY_RESUMING:
829
830 err = phy_clear_interrupt(phydev);
831
832 if (err)
833 break;
834
835 err = phy_config_interrupt(phydev,
836 PHY_INTERRUPT_ENABLED);
837
838 if (err)
839 break;
840
841 if (AUTONEG_ENABLE == phydev->autoneg) {
842 err = phy_aneg_done(phydev);
843 if (err < 0)
844 break;
845
846 /* err > 0 if AN is done.
847 * Otherwise, it's 0, and we're
848 * still waiting for AN */
849 if (err > 0) {
850 phydev->state = PHY_RUNNING;
851 } else {
852 phydev->state = PHY_AN;
853 phydev->link_timeout = PHY_AN_TIMEOUT;
854 }
855 } else
856 phydev->state = PHY_RUNNING;
857 break;
858 }
859
860 spin_unlock(&phydev->lock);
861
862 if (needs_aneg)
863 err = phy_start_aneg(phydev);
864
865 if (err < 0)
866 phy_error(phydev);
867
868 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
869}
870
871#endif /* CONFIG_PHYCONTROL */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
new file mode 100644
index 000000000000..33f7bdb5857c
--- /dev/null
+++ b/drivers/net/phy/phy_device.c
@@ -0,0 +1,696 @@
1/*
2 * drivers/net/phy/phy_device.c
3 *
4 * Framework for finding and configuring PHYs.
5 * Also contains generic PHY driver
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h>
33#include <linux/version.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/phy.h>
37
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42static struct phy_driver genphy_driver;
43extern int mdio_bus_init(void);
44extern void mdio_bus_exit(void);
45
46/* get_phy_device
47 *
48 * description: Reads the ID registers of the PHY at addr on the
49 * bus, then allocates and returns the phy_device to
50 * represent it.
51 */
52struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
53{
54 int phy_reg;
55 u32 phy_id;
56 struct phy_device *dev = NULL;
57
58 /* Grab the bits from PHYIR1, and put them
59 * in the upper half */
60 phy_reg = bus->read(bus, addr, MII_PHYSID1);
61
62 if (phy_reg < 0)
63 return ERR_PTR(phy_reg);
64
65 phy_id = (phy_reg & 0xffff) << 16;
66
67 /* Grab the bits from PHYIR2, and put them in the lower half */
68 phy_reg = bus->read(bus, addr, MII_PHYSID2);
69
70 if (phy_reg < 0)
71 return ERR_PTR(phy_reg);
72
73 phy_id |= (phy_reg & 0xffff);
74
75 /* If the phy_id is all Fs, there is no device there */
76 if (0xffffffff == phy_id)
77 return NULL;
78
79 /* Otherwise, we allocate the device, and initialize the
80 * default values */
81 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
82
83 if (NULL == dev)
84 return ERR_PTR(-ENOMEM);
85
86 dev->speed = 0;
87 dev->duplex = -1;
88 dev->pause = dev->asym_pause = 0;
89 dev->link = 1;
90
91 dev->autoneg = AUTONEG_ENABLE;
92
93 dev->addr = addr;
94 dev->phy_id = phy_id;
95 dev->bus = bus;
96
97 dev->state = PHY_DOWN;
98
99 spin_lock_init(&dev->lock);
100
101 return dev;
102}
103
104#ifdef CONFIG_PHYCONTROL
105/* phy_prepare_link:
106 *
107 * description: Tells the PHY infrastructure to handle the
108 * gory details on monitoring link status (whether through
109 * polling or an interrupt), and to call back to the
110 * connected device driver when the link status changes.
111 * If you want to monitor your own link state, don't call
112 * this function */
113void phy_prepare_link(struct phy_device *phydev,
114 void (*handler)(struct net_device *))
115{
116 phydev->adjust_link = handler;
117}
118
119/* phy_connect:
120 *
121 * description: Convenience function for connecting ethernet
122 * devices to PHY devices. The default behavior is for
123 * the PHY infrastructure to handle everything, and only notify
124 * the connected driver when the link status changes. If you
125 * don't want, or can't use the provided functionality, you may
126 * choose to call only the subset of functions which provide
127 * the desired functionality.
128 */
129struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
130 void (*handler)(struct net_device *), u32 flags)
131{
132 struct phy_device *phydev;
133
134 phydev = phy_attach(dev, phy_id, flags);
135
136 if (IS_ERR(phydev))
137 return phydev;
138
139 phy_prepare_link(phydev, handler);
140
141 phy_start_machine(phydev, NULL);
142
143 if (phydev->irq > 0)
144 phy_start_interrupts(phydev);
145
146 return phydev;
147}
148EXPORT_SYMBOL(phy_connect);
149
150void phy_disconnect(struct phy_device *phydev)
151{
152 if (phydev->irq > 0)
153 phy_stop_interrupts(phydev);
154
155 phy_stop_machine(phydev);
156
157 phydev->adjust_link = NULL;
158
159 phy_detach(phydev);
160}
161EXPORT_SYMBOL(phy_disconnect);
162
163#endif /* CONFIG_PHYCONTROL */
164
165/* phy_attach:
166 *
167 * description: Called by drivers to attach to a particular PHY
168 * device. The phy_device is found, and properly hooked up
169 * to the phy_driver. If no driver is attached, then the
170 * genphy_driver is used. The phy_device is given a ptr to
171 * the attaching device, and given a callback for link status
172 * change. The phy_device is returned to the attaching
173 * driver.
174 */
175static int phy_compare_id(struct device *dev, void *data)
176{
177 return strcmp((char *)data, dev->bus_id) ? 0 : 1;
178}
179
180struct phy_device *phy_attach(struct net_device *dev,
181 const char *phy_id, u32 flags)
182{
183 struct bus_type *bus = &mdio_bus_type;
184 struct phy_device *phydev;
185 struct device *d;
186
187 /* Search the list of PHY devices on the mdio bus for the
188 * PHY with the requested name */
189 d = bus_find_device(bus, NULL, (void *)phy_id, phy_compare_id);
190
191 if (d) {
192 phydev = to_phy_device(d);
193 } else {
194 printk(KERN_ERR "%s not found\n", phy_id);
195 return ERR_PTR(-ENODEV);
196 }
197
198 /* Assume that if there is no driver, that it doesn't
199 * exist, and we should use the genphy driver. */
200 if (NULL == d->driver) {
201 int err;
202 down_write(&d->bus->subsys.rwsem);
203 d->driver = &genphy_driver.driver;
204
205 err = d->driver->probe(d);
206
207 if (err < 0)
208 return ERR_PTR(err);
209
210 device_bind_driver(d);
211 up_write(&d->bus->subsys.rwsem);
212 }
213
214 if (phydev->attached_dev) {
215 printk(KERN_ERR "%s: %s already attached\n",
216 dev->name, phy_id);
217 return ERR_PTR(-EBUSY);
218 }
219
220 phydev->attached_dev = dev;
221
222 phydev->dev_flags = flags;
223
224 return phydev;
225}
226EXPORT_SYMBOL(phy_attach);
227
228void phy_detach(struct phy_device *phydev)
229{
230 phydev->attached_dev = NULL;
231
232 /* If the device had no specific driver before (i.e. - it
233 * was using the generic driver), we unbind the device
234 * from the generic driver so that there's a chance a
235 * real driver could be loaded */
236 if (phydev->dev.driver == &genphy_driver.driver) {
237 down_write(&phydev->dev.bus->subsys.rwsem);
238 device_release_driver(&phydev->dev);
239 up_write(&phydev->dev.bus->subsys.rwsem);
240 }
241}
242EXPORT_SYMBOL(phy_detach);
243
244
245/* Generic PHY support and helper functions */
246
247/* genphy_config_advert
248 *
249 * description: Writes MII_ADVERTISE with the appropriate values,
250 * after sanitizing the values to make sure we only advertise
251 * what is supported
252 */
253int genphy_config_advert(struct phy_device *phydev)
254{
255 u32 advertise;
256 int adv;
257 int err;
258
259 /* Only allow advertising what
260 * this PHY supports */
261 phydev->advertising &= phydev->supported;
262 advertise = phydev->advertising;
263
264 /* Setup standard advertisement */
265 adv = phy_read(phydev, MII_ADVERTISE);
266
267 if (adv < 0)
268 return adv;
269
270 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
271 ADVERTISE_PAUSE_ASYM);
272 if (advertise & ADVERTISED_10baseT_Half)
273 adv |= ADVERTISE_10HALF;
274 if (advertise & ADVERTISED_10baseT_Full)
275 adv |= ADVERTISE_10FULL;
276 if (advertise & ADVERTISED_100baseT_Half)
277 adv |= ADVERTISE_100HALF;
278 if (advertise & ADVERTISED_100baseT_Full)
279 adv |= ADVERTISE_100FULL;
280 if (advertise & ADVERTISED_Pause)
281 adv |= ADVERTISE_PAUSE_CAP;
282 if (advertise & ADVERTISED_Asym_Pause)
283 adv |= ADVERTISE_PAUSE_ASYM;
284
285 err = phy_write(phydev, MII_ADVERTISE, adv);
286
287 if (err < 0)
288 return err;
289
290 /* Configure gigabit if it's supported */
291 if (phydev->supported & (SUPPORTED_1000baseT_Half |
292 SUPPORTED_1000baseT_Full)) {
293 adv = phy_read(phydev, MII_CTRL1000);
294
295 if (adv < 0)
296 return adv;
297
298 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
299 if (advertise & SUPPORTED_1000baseT_Half)
300 adv |= ADVERTISE_1000HALF;
301 if (advertise & SUPPORTED_1000baseT_Full)
302 adv |= ADVERTISE_1000FULL;
303 err = phy_write(phydev, MII_CTRL1000, adv);
304
305 if (err < 0)
306 return err;
307 }
308
309 return adv;
310}
311EXPORT_SYMBOL(genphy_config_advert);
312
313/* genphy_setup_forced
314 *
315 * description: Configures MII_BMCR to force speed/duplex
316 * to the values in phydev. Assumes that the values are valid.
317 * Please see phy_sanitize_settings() */
318int genphy_setup_forced(struct phy_device *phydev)
319{
320 int ctl = BMCR_RESET;
321
322 phydev->pause = phydev->asym_pause = 0;
323
324 if (SPEED_1000 == phydev->speed)
325 ctl |= BMCR_SPEED1000;
326 else if (SPEED_100 == phydev->speed)
327 ctl |= BMCR_SPEED100;
328
329 if (DUPLEX_FULL == phydev->duplex)
330 ctl |= BMCR_FULLDPLX;
331
332 ctl = phy_write(phydev, MII_BMCR, ctl);
333
334 if (ctl < 0)
335 return ctl;
336
337 /* We just reset the device, so we'd better configure any
338 * settings the PHY requires to operate */
339 if (phydev->drv->config_init)
340 ctl = phydev->drv->config_init(phydev);
341
342 return ctl;
343}
344
345
346/* Enable and Restart Autonegotiation */
347int genphy_restart_aneg(struct phy_device *phydev)
348{
349 int ctl;
350
351 ctl = phy_read(phydev, MII_BMCR);
352
353 if (ctl < 0)
354 return ctl;
355
356 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
357
358 /* Don't isolate the PHY if we're negotiating */
359 ctl &= ~(BMCR_ISOLATE);
360
361 ctl = phy_write(phydev, MII_BMCR, ctl);
362
363 return ctl;
364}
365
366
367/* genphy_config_aneg
368 *
369 * description: If auto-negotiation is enabled, we configure the
370 * advertising, and then restart auto-negotiation. If it is not
371 * enabled, then we write the BMCR
372 */
373int genphy_config_aneg(struct phy_device *phydev)
374{
375 int err = 0;
376
377 if (AUTONEG_ENABLE == phydev->autoneg) {
378 err = genphy_config_advert(phydev);
379
380 if (err < 0)
381 return err;
382
383 err = genphy_restart_aneg(phydev);
384 } else
385 err = genphy_setup_forced(phydev);
386
387 return err;
388}
389EXPORT_SYMBOL(genphy_config_aneg);
390
391/* genphy_update_link
392 *
393 * description: Update the value in phydev->link to reflect the
394 * current link value. In order to do this, we need to read
395 * the status register twice, keeping the second value
396 */
397int genphy_update_link(struct phy_device *phydev)
398{
399 int status;
400
401 /* Do a fake read */
402 status = phy_read(phydev, MII_BMSR);
403
404 if (status < 0)
405 return status;
406
407 /* Read link and autonegotiation status */
408 status = phy_read(phydev, MII_BMSR);
409
410 if (status < 0)
411 return status;
412
413 if ((status & BMSR_LSTATUS) == 0)
414 phydev->link = 0;
415 else
416 phydev->link = 1;
417
418 return 0;
419}
420
421/* genphy_read_status
422 *
423 * description: Check the link, then figure out the current state
424 * by comparing what we advertise with what the link partner
425 * advertises. Start by checking the gigabit possibilities,
426 * then move on to 10/100.
427 */
428int genphy_read_status(struct phy_device *phydev)
429{
430 int adv;
431 int err;
432 int lpa;
433 int lpagb = 0;
434
435 /* Update the link, but return if there
436 * was an error */
437 err = genphy_update_link(phydev);
438 if (err)
439 return err;
440
441 if (AUTONEG_ENABLE == phydev->autoneg) {
442 if (phydev->supported & (SUPPORTED_1000baseT_Half
443 | SUPPORTED_1000baseT_Full)) {
444 lpagb = phy_read(phydev, MII_STAT1000);
445
446 if (lpagb < 0)
447 return lpagb;
448
449 adv = phy_read(phydev, MII_CTRL1000);
450
451 if (adv < 0)
452 return adv;
453
454 lpagb &= adv << 2;
455 }
456
457 lpa = phy_read(phydev, MII_LPA);
458
459 if (lpa < 0)
460 return lpa;
461
462 adv = phy_read(phydev, MII_ADVERTISE);
463
464 if (adv < 0)
465 return adv;
466
467 lpa &= adv;
468
469 phydev->speed = SPEED_10;
470 phydev->duplex = DUPLEX_HALF;
471 phydev->pause = phydev->asym_pause = 0;
472
473 if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
474 phydev->speed = SPEED_1000;
475
476 if (lpagb & LPA_1000FULL)
477 phydev->duplex = DUPLEX_FULL;
478 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
479 phydev->speed = SPEED_100;
480
481 if (lpa & LPA_100FULL)
482 phydev->duplex = DUPLEX_FULL;
483 } else
484 if (lpa & LPA_10FULL)
485 phydev->duplex = DUPLEX_FULL;
486
487 if (phydev->duplex == DUPLEX_FULL){
488 phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
489 phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
490 }
491 } else {
492 int bmcr = phy_read(phydev, MII_BMCR);
493 if (bmcr < 0)
494 return bmcr;
495
496 if (bmcr & BMCR_FULLDPLX)
497 phydev->duplex = DUPLEX_FULL;
498 else
499 phydev->duplex = DUPLEX_HALF;
500
501 if (bmcr & BMCR_SPEED1000)
502 phydev->speed = SPEED_1000;
503 else if (bmcr & BMCR_SPEED100)
504 phydev->speed = SPEED_100;
505 else
506 phydev->speed = SPEED_10;
507
508 phydev->pause = phydev->asym_pause = 0;
509 }
510
511 return 0;
512}
513EXPORT_SYMBOL(genphy_read_status);
514
515static int genphy_config_init(struct phy_device *phydev)
516{
517 u32 val;
518 u32 features;
519
520 /* For now, I'll claim that the generic driver supports
521 * all possible port types */
522 features = (SUPPORTED_TP | SUPPORTED_MII
523 | SUPPORTED_AUI | SUPPORTED_FIBRE |
524 SUPPORTED_BNC);
525
526 /* Do we support autonegotiation? */
527 val = phy_read(phydev, MII_BMSR);
528
529 if (val < 0)
530 return val;
531
532 if (val & BMSR_ANEGCAPABLE)
533 features |= SUPPORTED_Autoneg;
534
535 if (val & BMSR_100FULL)
536 features |= SUPPORTED_100baseT_Full;
537 if (val & BMSR_100HALF)
538 features |= SUPPORTED_100baseT_Half;
539 if (val & BMSR_10FULL)
540 features |= SUPPORTED_10baseT_Full;
541 if (val & BMSR_10HALF)
542 features |= SUPPORTED_10baseT_Half;
543
544 if (val & BMSR_ESTATEN) {
545 val = phy_read(phydev, MII_ESTATUS);
546
547 if (val < 0)
548 return val;
549
550 if (val & ESTATUS_1000_TFULL)
551 features |= SUPPORTED_1000baseT_Full;
552 if (val & ESTATUS_1000_THALF)
553 features |= SUPPORTED_1000baseT_Half;
554 }
555
556 phydev->supported = features;
557 phydev->advertising = features;
558
559 return 0;
560}
561
562
563/* phy_probe
564 *
565 * description: Take care of setting up the phy_device structure,
566 * set the state to READY (the driver's init function should
567 * set it to STARTING if needed).
568 */
569static int phy_probe(struct device *dev)
570{
571 struct phy_device *phydev;
572 struct phy_driver *phydrv;
573 struct device_driver *drv;
574 int err = 0;
575
576 phydev = to_phy_device(dev);
577
578 /* Make sure the driver is held.
579 * XXX -- Is this correct? */
580 drv = get_driver(phydev->dev.driver);
581 phydrv = to_phy_driver(drv);
582 phydev->drv = phydrv;
583
584 /* Disable the interrupt if the PHY doesn't support it */
585 if (!(phydrv->flags & PHY_HAS_INTERRUPT))
586 phydev->irq = PHY_POLL;
587
588 spin_lock(&phydev->lock);
589
590 /* Start out supporting everything. Eventually,
591 * a controller will attach, and may modify one
592 * or both of these values */
593 phydev->supported = phydrv->features;
594 phydev->advertising = phydrv->features;
595
596 /* Set the state to READY by default */
597 phydev->state = PHY_READY;
598
599 if (phydev->drv->probe)
600 err = phydev->drv->probe(phydev);
601
602 spin_unlock(&phydev->lock);
603
604 if (err < 0)
605 return err;
606
607 if (phydev->drv->config_init)
608 err = phydev->drv->config_init(phydev);
609
610 return err;
611}
612
613static int phy_remove(struct device *dev)
614{
615 struct phy_device *phydev;
616
617 phydev = to_phy_device(dev);
618
619 spin_lock(&phydev->lock);
620 phydev->state = PHY_DOWN;
621 spin_unlock(&phydev->lock);
622
623 if (phydev->drv->remove)
624 phydev->drv->remove(phydev);
625
626 put_driver(dev->driver);
627 phydev->drv = NULL;
628
629 return 0;
630}
631
632int phy_driver_register(struct phy_driver *new_driver)
633{
634 int retval;
635
636 memset(&new_driver->driver, 0, sizeof(new_driver->driver));
637 new_driver->driver.name = new_driver->name;
638 new_driver->driver.bus = &mdio_bus_type;
639 new_driver->driver.probe = phy_probe;
640 new_driver->driver.remove = phy_remove;
641
642 retval = driver_register(&new_driver->driver);
643
644 if (retval) {
645 printk(KERN_ERR "%s: Error %d in registering driver\n",
646 new_driver->name, retval);
647
648 return retval;
649 }
650
651 pr_info("%s: Registered new driver\n", new_driver->name);
652
653 return 0;
654}
655EXPORT_SYMBOL(phy_driver_register);
656
657void phy_driver_unregister(struct phy_driver *drv)
658{
659 driver_unregister(&drv->driver);
660}
661EXPORT_SYMBOL(phy_driver_unregister);
662
663static struct phy_driver genphy_driver = {
664 .phy_id = 0xffffffff,
665 .phy_id_mask = 0xffffffff,
666 .name = "Generic PHY",
667 .config_init = genphy_config_init,
668 .features = 0,
669 .config_aneg = genphy_config_aneg,
670 .read_status = genphy_read_status,
671 .driver = {.owner= THIS_MODULE, },
672};
673
674static int __init phy_init(void)
675{
676 int rc;
677
678 rc = mdio_bus_init();
679 if (rc)
680 return rc;
681
682 rc = phy_driver_register(&genphy_driver);
683 if (rc)
684 mdio_bus_exit();
685
686 return rc;
687}
688
689static void __exit phy_exit(void)
690{
691 phy_driver_unregister(&genphy_driver);
692 mdio_bus_exit();
693}
694
695subsys_initcall(phy_init);
696module_exit(phy_exit);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
new file mode 100644
index 000000000000..d461ba457631
--- /dev/null
+++ b/drivers/net/phy/qsemi.c
@@ -0,0 +1,143 @@
1/*
2 * drivers/net/phy/qsemi.c
3 *
4 * Driver for Quality Semiconductor PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* ------------------------------------------------------------------------- */
42/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
43
44/* register definitions */
45
46#define MII_QS6612_MCR 17 /* Mode Control Register */
47#define MII_QS6612_FTR 27 /* Factory Test Register */
48#define MII_QS6612_MCO 28 /* Misc. Control Register */
49#define MII_QS6612_ISR 29 /* Interrupt Source Register */
50#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
51#define MII_QS6612_IMR_INIT 0x003a
52#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
53
54#define QS6612_PCR_AN_COMPLETE 0x1000
55#define QS6612_PCR_RLBEN 0x0200
56#define QS6612_PCR_DCREN 0x0100
57#define QS6612_PCR_4B5BEN 0x0040
58#define QS6612_PCR_TX_ISOLATE 0x0020
59#define QS6612_PCR_MLT3_DIS 0x0002
60#define QS6612_PCR_SCRM_DESCRM 0x0001
61
62MODULE_DESCRIPTION("Quality Semiconductor PHY driver");
63MODULE_AUTHOR("Andy Fleming");
64MODULE_LICENSE("GPL");
65
66/* Returns 0, unless there's a write error */
67static int qs6612_config_init(struct phy_device *phydev)
68{
69 /* The PHY powers up isolated on the RPX,
70 * so send a command to allow operation.
71 * XXX - My docs indicate this should be 0x0940
72 * ...or something. The current value sets three
73 * reserved bits, bit 11, which specifies it should be
74 * set to one, bit 10, which specifies it should be set
75 * to 0, and bit 7, which doesn't specify. However, my
76 * docs are preliminary, and I will leave it like this
77 * until someone more knowledgable corrects me or it.
78 * -- Andy Fleming
79 */
80 return phy_write(phydev, MII_QS6612_PCR, 0x0dc0);
81}
82
83static int qs6612_ack_interrupt(struct phy_device *phydev)
84{
85 int err;
86
87 err = phy_read(phydev, MII_QS6612_ISR);
88
89 if (err < 0)
90 return err;
91
92 err = phy_read(phydev, MII_BMSR);
93
94 if (err < 0)
95 return err;
96
97 err = phy_read(phydev, MII_EXPANSION);
98
99 if (err < 0)
100 return err;
101
102 return 0;
103}
104
105static int qs6612_config_intr(struct phy_device *phydev)
106{
107 int err;
108 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
109 err = phy_write(phydev, MII_QS6612_IMR,
110 MII_QS6612_IMR_INIT);
111 else
112 err = phy_write(phydev, MII_QS6612_IMR, 0);
113
114 return err;
115
116}
117
118static struct phy_driver qs6612_driver = {
119 .phy_id = 0x00181440,
120 .name = "QS6612",
121 .phy_id_mask = 0xfffffff0,
122 .features = PHY_BASIC_FEATURES,
123 .flags = PHY_HAS_INTERRUPT,
124 .config_init = qs6612_config_init,
125 .config_aneg = genphy_config_aneg,
126 .read_status = genphy_read_status,
127 .ack_interrupt = qs6612_ack_interrupt,
128 .config_intr = qs6612_config_intr,
129 .driver = { .owner = THIS_MODULE,},
130};
131
132static int __init qs6612_init(void)
133{
134 return phy_driver_register(&qs6612_driver);
135}
136
137static void __exit qs6612_exit(void)
138{
139 phy_driver_unregister(&qs6612_driver);
140}
141
142module_init(qs6612_init);
143module_exit(qs6612_exit);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d5afe05cd826..f0471d102e3c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -187,6 +187,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
187 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), }, 187 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
188 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), }, 188 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
189 { PCI_DEVICE(0x16ec, 0x0116), }, 189 { PCI_DEVICE(0x16ec, 0x0116), },
190 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
190 {0,}, 191 {0,},
191}; 192};
192 193
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 7092ca6b277e..2234a8f05eb2 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -62,6 +62,7 @@ typedef struct _XENA_dev_config {
62#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6) 62#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6)
63#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7) 63#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7)
64#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8) 64#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
65#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
65#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8) 66#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
66#define ADAPTER_STATUS_MC_DRAM_READY BIT(24) 67#define ADAPTER_STATUS_MC_DRAM_READY BIT(24)
67#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25) 68#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25)
@@ -77,21 +78,34 @@ typedef struct _XENA_dev_config {
77#define ADAPTER_ECC_EN BIT(55) 78#define ADAPTER_ECC_EN BIT(55)
78 79
79 u64 serr_source; 80 u64 serr_source;
80#define SERR_SOURCE_PIC BIT(0) 81#define SERR_SOURCE_PIC BIT(0)
81#define SERR_SOURCE_TXDMA BIT(1) 82#define SERR_SOURCE_TXDMA BIT(1)
82#define SERR_SOURCE_RXDMA BIT(2) 83#define SERR_SOURCE_RXDMA BIT(2)
83#define SERR_SOURCE_MAC BIT(3) 84#define SERR_SOURCE_MAC BIT(3)
84#define SERR_SOURCE_MC BIT(4) 85#define SERR_SOURCE_MC BIT(4)
85#define SERR_SOURCE_XGXS BIT(5) 86#define SERR_SOURCE_XGXS BIT(5)
86#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \ 87#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
87 SERR_SOURCE_TXDMA | \ 88 SERR_SOURCE_TXDMA | \
88 SERR_SOURCE_RXDMA | \ 89 SERR_SOURCE_RXDMA | \
89 SERR_SOURCE_MAC | \ 90 SERR_SOURCE_MAC | \
90 SERR_SOURCE_MC | \ 91 SERR_SOURCE_MC | \
91 SERR_SOURCE_XGXS) 92 SERR_SOURCE_XGXS)
92 93
93 94 u64 pci_mode;
94 u8 unused_0[0x800 - 0x120]; 95#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
96#define PCI_MODE_PCI_33 0
97#define PCI_MODE_PCI_66 0x1
98#define PCI_MODE_PCIX_M1_66 0x2
99#define PCI_MODE_PCIX_M1_100 0x3
100#define PCI_MODE_PCIX_M1_133 0x4
101#define PCI_MODE_PCIX_M2_66 0x5
102#define PCI_MODE_PCIX_M2_100 0x6
103#define PCI_MODE_PCIX_M2_133 0x7
104#define PCI_MODE_UNSUPPORTED BIT(0)
105#define PCI_MODE_32_BITS BIT(8)
106#define PCI_MODE_UNKNOWN_MODE BIT(9)
107
108 u8 unused_0[0x800 - 0x128];
95 109
96/* PCI-X Controller registers */ 110/* PCI-X Controller registers */
97 u64 pic_int_status; 111 u64 pic_int_status;
@@ -153,7 +167,11 @@ typedef struct _XENA_dev_config {
153 u8 unused4[0x08]; 167 u8 unused4[0x08];
154 168
155 u64 gpio_int_reg; 169 u64 gpio_int_reg;
170#define GPIO_INT_REG_LINK_DOWN BIT(1)
171#define GPIO_INT_REG_LINK_UP BIT(2)
156 u64 gpio_int_mask; 172 u64 gpio_int_mask;
173#define GPIO_INT_MASK_LINK_DOWN BIT(1)
174#define GPIO_INT_MASK_LINK_UP BIT(2)
157 u64 gpio_alarms; 175 u64 gpio_alarms;
158 176
159 u8 unused5[0x38]; 177 u8 unused5[0x38];
@@ -223,19 +241,16 @@ typedef struct _XENA_dev_config {
223 u64 xmsi_data; 241 u64 xmsi_data;
224 242
225 u64 rx_mat; 243 u64 rx_mat;
244#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
226 245
227 u8 unused6[0x8]; 246 u8 unused6[0x8];
228 247
229 u64 tx_mat0_7; 248 u64 tx_mat0_n[0x8];
230 u64 tx_mat8_15; 249#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
231 u64 tx_mat16_23;
232 u64 tx_mat24_31;
233 u64 tx_mat32_39;
234 u64 tx_mat40_47;
235 u64 tx_mat48_55;
236 u64 tx_mat56_63;
237 250
238 u8 unused_1[0x10]; 251 u8 unused_1[0x8];
252 u64 stat_byte_cnt;
253#define STAT_BC(n) vBIT(n,4,12)
239 254
240 /* Automated statistics collection */ 255 /* Automated statistics collection */
241 u64 stat_cfg; 256 u64 stat_cfg;
@@ -246,6 +261,7 @@ typedef struct _XENA_dev_config {
246#define STAT_TRSF_PER(n) TBD 261#define STAT_TRSF_PER(n) TBD
247#define PER_SEC 0x208d5 262#define PER_SEC 0x208d5
248#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32) 263#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
264#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
249 265
250 u64 stat_addr; 266 u64 stat_addr;
251 267
@@ -267,8 +283,15 @@ typedef struct _XENA_dev_config {
267 283
268 u64 gpio_control; 284 u64 gpio_control;
269#define GPIO_CTRL_GPIO_0 BIT(8) 285#define GPIO_CTRL_GPIO_0 BIT(8)
286 u64 misc_control;
287#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
288
289 u8 unused7_1[0x240 - 0x208];
290
291 u64 wreq_split_mask;
292#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
270 293
271 u8 unused7[0x600]; 294 u8 unused7_2[0x800 - 0x248];
272 295
273/* TxDMA registers */ 296/* TxDMA registers */
274 u64 txdma_int_status; 297 u64 txdma_int_status;
@@ -290,6 +313,7 @@ typedef struct _XENA_dev_config {
290 313
291 u64 pcc_err_reg; 314 u64 pcc_err_reg;
292#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8) 315#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8)
316#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
293 317
294 u64 pcc_err_mask; 318 u64 pcc_err_mask;
295 u64 pcc_err_alarm; 319 u64 pcc_err_alarm;
@@ -468,6 +492,7 @@ typedef struct _XENA_dev_config {
468#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23)) 492#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23))
469#define PRC_CTRL_NO_SNOOP_DESC BIT(22) 493#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
470#define PRC_CTRL_NO_SNOOP_BUFF BIT(23) 494#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
495#define PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
471#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24) 496#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
472 497
473 u64 prc_alarm_action; 498 u64 prc_alarm_action;
@@ -691,6 +716,10 @@ typedef struct _XENA_dev_config {
691#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22) 716#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
692#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) 717#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
693#define MC_ERR_REG_SM_ERR BIT(31) 718#define MC_ERR_REG_SM_ERR BIT(31)
719#define MC_ERR_REG_ECC_ALL_SNG (BIT(6) | \
720 BIT(7) | BIT(17) | BIT(19))
721#define MC_ERR_REG_ECC_ALL_DBL (BIT(14) | \
722 BIT(15) | BIT(18) | BIT(20))
694 u64 mc_err_mask; 723 u64 mc_err_mask;
695 u64 mc_err_alarm; 724 u64 mc_err_alarm;
696 725
@@ -736,7 +765,19 @@ typedef struct _XENA_dev_config {
736 u64 mc_rldram_test_d1; 765 u64 mc_rldram_test_d1;
737 u8 unused24[0x300 - 0x288]; 766 u8 unused24[0x300 - 0x288];
738 u64 mc_rldram_test_d2; 767 u64 mc_rldram_test_d2;
739 u8 unused25[0x700 - 0x308]; 768
769 u8 unused24_1[0x360 - 0x308];
770 u64 mc_rldram_ctrl;
771#define MC_RLDRAM_ENABLE_ODT BIT(7)
772
773 u8 unused24_2[0x640 - 0x368];
774 u64 mc_rldram_ref_per_herc;
775#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
776
777 u8 unused24_3[0x660 - 0x648];
778 u64 mc_rldram_mrs_herc;
779
780 u8 unused25[0x700 - 0x668];
740 u64 mc_debug_ctrl; 781 u64 mc_debug_ctrl;
741 782
742 u8 unused26[0x3000 - 0x2f08]; 783 u8 unused26[0x3000 - 0x2f08];
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ea638b162d3f..7ca78228b104 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -11,29 +11,28 @@
11 * See the file COPYING in this distribution for more information. 11 * See the file COPYING in this distribution for more information.
12 * 12 *
13 * Credits: 13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some 15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for 16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable 17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues. 18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some 19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel. 20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were 21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments. 22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture 23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code. 24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver. 25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 * 26 *
27 * The module loadable parameters that are supported by the driver and a brief 27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables. 28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used 29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver. 30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This 31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO. 35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/ 36 ************************************************************************/
38 37
39#include <linux/config.h> 38#include <linux/config.h>
@@ -56,27 +55,39 @@
56#include <linux/ethtool.h> 55#include <linux/ethtool.h>
57#include <linux/version.h> 56#include <linux/version.h>
58#include <linux/workqueue.h> 57#include <linux/workqueue.h>
58#include <linux/if_vlan.h>
59 59
60#include <asm/io.h>
61#include <asm/system.h> 60#include <asm/system.h>
62#include <asm/uaccess.h> 61#include <asm/uaccess.h>
62#include <asm/io.h>
63 63
64/* local include */ 64/* local include */
65#include "s2io.h" 65#include "s2io.h"
66#include "s2io-regs.h" 66#include "s2io-regs.h"
67 67
68/* S2io Driver name & version. */ 68/* S2io Driver name & version. */
69static char s2io_driver_name[] = "s2io"; 69static char s2io_driver_name[] = "Neterion";
70static char s2io_driver_version[] = "Version 1.7.7.1"; 70static char s2io_driver_version[] = "Version 2.0.3.1";
71
72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
71 78
72/* 79 return ret;
80}
81
82/*
73 * Cards with following subsystem_id have a link state indication 83 * Cards with following subsystem_id have a link state indication
74 * problem, 600B, 600C, 600D, 640B, 640C and 640D. 84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
75 * macro below identifies these cards given the subsystem_id. 85 * macro below identifies these cards given the subsystem_id.
76 */ 86 */
77#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \ 87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
78 (((subid >= 0x600B) && (subid <= 0x600D)) || \ 88 (dev_type == XFRAME_I_DEVICE) ? \
79 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0 89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
80 91
81#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ 92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
82 ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
@@ -86,9 +97,12 @@ static char s2io_driver_version[] = "Version 1.7.7.1";
86static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) 97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
87{ 98{
88 int level = 0; 99 int level = 0;
89 if ((sp->pkt_cnt[ring] - rxb_size) > 16) { 100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
90 level = LOW; 104 level = LOW;
91 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) { 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
92 level = PANIC; 106 level = PANIC;
93 } 107 }
94 } 108 }
@@ -145,6 +159,9 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
145 {"rmac_pause_cnt"}, 159 {"rmac_pause_cnt"},
146 {"rmac_accepted_ip"}, 160 {"rmac_accepted_ip"},
147 {"rmac_err_tcp"}, 161 {"rmac_err_tcp"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
148}; 165};
149 166
150#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN 167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
@@ -153,8 +170,37 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
153#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN 170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
154#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN 171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
155 172
173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
156 202
157/* 203/*
158 * Constants to be programmed into the Xena's registers, to configure 204 * Constants to be programmed into the Xena's registers, to configure
159 * the XAUI. 205 * the XAUI.
160 */ 206 */
@@ -162,7 +208,28 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
162#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
163#define END_SIGN 0x0 209#define END_SIGN 0x0
164 210
165static u64 default_mdio_cfg[] = { 211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
213 0x8000051536750000ULL, 0x80000515367500E0ULL,
214 /* Write data */
215 0x8000051536750004ULL, 0x80000515367500E4ULL,
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
221 0x801205150D440000ULL, 0x801205150D4400E0ULL,
222 /* Write data */
223 0x801205150D440004ULL, 0x801205150D4400E4ULL,
224 /* Set address */
225 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
226 /* Write data */
227 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228 /* Done */
229 END_SIGN
230};
231
232static u64 xena_mdio_cfg[] = {
166 /* Reset PMA PLL */ 233 /* Reset PMA PLL */
167 0xC001010000000000ULL, 0xC0010100000000E0ULL, 234 0xC001010000000000ULL, 0xC0010100000000E0ULL,
168 0xC0010100008000E4ULL, 235 0xC0010100008000E4ULL,
@@ -172,7 +239,7 @@ static u64 default_mdio_cfg[] = {
172 END_SIGN 239 END_SIGN
173}; 240};
174 241
175static u64 default_dtx_cfg[] = { 242static u64 xena_dtx_cfg[] = {
176 0x8000051500000000ULL, 0x80000515000000E0ULL, 243 0x8000051500000000ULL, 0x80000515000000E0ULL,
177 0x80000515D93500E4ULL, 0x8001051500000000ULL, 244 0x80000515D93500E4ULL, 0x8001051500000000ULL,
178 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 245 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
@@ -196,8 +263,7 @@ static u64 default_dtx_cfg[] = {
196 END_SIGN 263 END_SIGN
197}; 264};
198 265
199 266/*
200/*
201 * Constants for Fixing the MacAddress problem seen mostly on 267 * Constants for Fixing the MacAddress problem seen mostly on
202 * Alpha machines. 268 * Alpha machines.
203 */ 269 */
@@ -226,20 +292,25 @@ static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
226static unsigned int rx_ring_num = 1; 292static unsigned int rx_ring_num = 1;
227static unsigned int rx_ring_sz[MAX_RX_RINGS] = 293static unsigned int rx_ring_sz[MAX_RX_RINGS] =
228 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 294 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
229static unsigned int Stats_refresh_time = 4; 295static unsigned int rts_frm_len[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297static unsigned int use_continuous_tx_intrs = 1;
230static unsigned int rmac_pause_time = 65535; 298static unsigned int rmac_pause_time = 65535;
231static unsigned int mc_pause_threshold_q0q3 = 187; 299static unsigned int mc_pause_threshold_q0q3 = 187;
232static unsigned int mc_pause_threshold_q4q7 = 187; 300static unsigned int mc_pause_threshold_q4q7 = 187;
233static unsigned int shared_splits; 301static unsigned int shared_splits;
234static unsigned int tmac_util_period = 5; 302static unsigned int tmac_util_period = 5;
235static unsigned int rmac_util_period = 5; 303static unsigned int rmac_util_period = 5;
304static unsigned int bimodal = 0;
236#ifndef CONFIG_S2IO_NAPI 305#ifndef CONFIG_S2IO_NAPI
237static unsigned int indicate_max_pkts; 306static unsigned int indicate_max_pkts;
238#endif 307#endif
308/* Frequency of Rx desc syncs expressed as power of 2 */
309static unsigned int rxsync_frequency = 3;
239 310
240/* 311/*
241 * S2IO device table. 312 * S2IO device table.
242 * This table lists all the devices that this driver supports. 313 * This table lists all the devices that this driver supports.
243 */ 314 */
244static struct pci_device_id s2io_tbl[] __devinitdata = { 315static struct pci_device_id s2io_tbl[] __devinitdata = {
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, 316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
@@ -247,9 +318,9 @@ static struct pci_device_id s2io_tbl[] __devinitdata = {
247 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, 318 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
248 PCI_ANY_ID, PCI_ANY_ID}, 319 PCI_ANY_ID, PCI_ANY_ID},
249 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, 320 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
250 PCI_ANY_ID, PCI_ANY_ID}, 321 PCI_ANY_ID, PCI_ANY_ID},
251 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, 322 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
252 PCI_ANY_ID, PCI_ANY_ID}, 323 PCI_ANY_ID, PCI_ANY_ID},
253 {0,} 324 {0,}
254}; 325};
255 326
@@ -268,8 +339,8 @@ static struct pci_driver s2io_driver = {
268/** 339/**
269 * init_shared_mem - Allocation and Initialization of Memory 340 * init_shared_mem - Allocation and Initialization of Memory
270 * @nic: Device private variable. 341 * @nic: Device private variable.
271 * Description: The function allocates all the memory areas shared 342 * Description: The function allocates all the memory areas shared
272 * between the NIC and the driver. This includes Tx descriptors, 343 * between the NIC and the driver. This includes Tx descriptors,
273 * Rx descriptors and the statistics block. 344 * Rx descriptors and the statistics block.
274 */ 345 */
275 346
@@ -279,11 +350,11 @@ static int init_shared_mem(struct s2io_nic *nic)
279 void *tmp_v_addr, *tmp_v_addr_next; 350 void *tmp_v_addr, *tmp_v_addr_next;
280 dma_addr_t tmp_p_addr, tmp_p_addr_next; 351 dma_addr_t tmp_p_addr, tmp_p_addr_next;
281 RxD_block_t *pre_rxd_blk = NULL; 352 RxD_block_t *pre_rxd_blk = NULL;
282 int i, j, blk_cnt; 353 int i, j, blk_cnt, rx_sz, tx_sz;
283 int lst_size, lst_per_page; 354 int lst_size, lst_per_page;
284 struct net_device *dev = nic->dev; 355 struct net_device *dev = nic->dev;
285#ifdef CONFIG_2BUFF_MODE 356#ifdef CONFIG_2BUFF_MODE
286 unsigned long tmp; 357 u64 tmp;
287 buffAdd_t *ba; 358 buffAdd_t *ba;
288#endif 359#endif
289 360
@@ -300,36 +371,41 @@ static int init_shared_mem(struct s2io_nic *nic)
300 size += config->tx_cfg[i].fifo_len; 371 size += config->tx_cfg[i].fifo_len;
301 } 372 }
302 if (size > MAX_AVAILABLE_TXDS) { 373 if (size > MAX_AVAILABLE_TXDS) {
303 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ", 374 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
304 dev->name); 375 __FUNCTION__);
305 DBG_PRINT(ERR_DBG, "exceeds the maximum value "); 376 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
306 DBG_PRINT(ERR_DBG, "that can be used\n");
307 return FAILURE; 377 return FAILURE;
308 } 378 }
309 379
310 lst_size = (sizeof(TxD_t) * config->max_txds); 380 lst_size = (sizeof(TxD_t) * config->max_txds);
381 tx_sz = lst_size * size;
311 lst_per_page = PAGE_SIZE / lst_size; 382 lst_per_page = PAGE_SIZE / lst_size;
312 383
313 for (i = 0; i < config->tx_fifo_num; i++) { 384 for (i = 0; i < config->tx_fifo_num; i++) {
314 int fifo_len = config->tx_cfg[i].fifo_len; 385 int fifo_len = config->tx_cfg[i].fifo_len;
315 int list_holder_size = fifo_len * sizeof(list_info_hold_t); 386 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
316 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL); 387 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
317 if (!nic->list_info[i]) { 388 GFP_KERNEL);
389 if (!mac_control->fifos[i].list_info) {
318 DBG_PRINT(ERR_DBG, 390 DBG_PRINT(ERR_DBG,
319 "Malloc failed for list_info\n"); 391 "Malloc failed for list_info\n");
320 return -ENOMEM; 392 return -ENOMEM;
321 } 393 }
322 memset(nic->list_info[i], 0, list_holder_size); 394 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
323 } 395 }
324 for (i = 0; i < config->tx_fifo_num; i++) { 396 for (i = 0; i < config->tx_fifo_num; i++) {
325 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, 397 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
326 lst_per_page); 398 lst_per_page);
327 mac_control->tx_curr_put_info[i].offset = 0; 399 mac_control->fifos[i].tx_curr_put_info.offset = 0;
328 mac_control->tx_curr_put_info[i].fifo_len = 400 mac_control->fifos[i].tx_curr_put_info.fifo_len =
329 config->tx_cfg[i].fifo_len - 1; 401 config->tx_cfg[i].fifo_len - 1;
330 mac_control->tx_curr_get_info[i].offset = 0; 402 mac_control->fifos[i].tx_curr_get_info.offset = 0;
331 mac_control->tx_curr_get_info[i].fifo_len = 403 mac_control->fifos[i].tx_curr_get_info.fifo_len =
332 config->tx_cfg[i].fifo_len - 1; 404 config->tx_cfg[i].fifo_len - 1;
405 mac_control->fifos[i].fifo_no = i;
406 mac_control->fifos[i].nic = nic;
407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
408
333 for (j = 0; j < page_num; j++) { 409 for (j = 0; j < page_num; j++) {
334 int k = 0; 410 int k = 0;
335 dma_addr_t tmp_p; 411 dma_addr_t tmp_p;
@@ -345,16 +421,15 @@ static int init_shared_mem(struct s2io_nic *nic)
345 while (k < lst_per_page) { 421 while (k < lst_per_page) {
346 int l = (j * lst_per_page) + k; 422 int l = (j * lst_per_page) + k;
347 if (l == config->tx_cfg[i].fifo_len) 423 if (l == config->tx_cfg[i].fifo_len)
348 goto end_txd_alloc; 424 break;
349 nic->list_info[i][l].list_virt_addr = 425 mac_control->fifos[i].list_info[l].list_virt_addr =
350 tmp_v + (k * lst_size); 426 tmp_v + (k * lst_size);
351 nic->list_info[i][l].list_phy_addr = 427 mac_control->fifos[i].list_info[l].list_phy_addr =
352 tmp_p + (k * lst_size); 428 tmp_p + (k * lst_size);
353 k++; 429 k++;
354 } 430 }
355 } 431 }
356 } 432 }
357 end_txd_alloc:
358 433
359 /* Allocation and initialization of RXDs in Rings */ 434 /* Allocation and initialization of RXDs in Rings */
360 size = 0; 435 size = 0;
@@ -367,21 +442,26 @@ static int init_shared_mem(struct s2io_nic *nic)
367 return FAILURE; 442 return FAILURE;
368 } 443 }
369 size += config->rx_cfg[i].num_rxd; 444 size += config->rx_cfg[i].num_rxd;
370 nic->block_count[i] = 445 mac_control->rings[i].block_count =
371 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 446 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
372 nic->pkt_cnt[i] = 447 mac_control->rings[i].pkt_cnt =
373 config->rx_cfg[i].num_rxd - nic->block_count[i]; 448 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
374 } 449 }
450 size = (size * (sizeof(RxD_t)));
451 rx_sz = size;
375 452
376 for (i = 0; i < config->rx_ring_num; i++) { 453 for (i = 0; i < config->rx_ring_num; i++) {
377 mac_control->rx_curr_get_info[i].block_index = 0; 454 mac_control->rings[i].rx_curr_get_info.block_index = 0;
378 mac_control->rx_curr_get_info[i].offset = 0; 455 mac_control->rings[i].rx_curr_get_info.offset = 0;
379 mac_control->rx_curr_get_info[i].ring_len = 456 mac_control->rings[i].rx_curr_get_info.ring_len =
380 config->rx_cfg[i].num_rxd - 1; 457 config->rx_cfg[i].num_rxd - 1;
381 mac_control->rx_curr_put_info[i].block_index = 0; 458 mac_control->rings[i].rx_curr_put_info.block_index = 0;
382 mac_control->rx_curr_put_info[i].offset = 0; 459 mac_control->rings[i].rx_curr_put_info.offset = 0;
383 mac_control->rx_curr_put_info[i].ring_len = 460 mac_control->rings[i].rx_curr_put_info.ring_len =
384 config->rx_cfg[i].num_rxd - 1; 461 config->rx_cfg[i].num_rxd - 1;
462 mac_control->rings[i].nic = nic;
463 mac_control->rings[i].ring_no = i;
464
385 blk_cnt = 465 blk_cnt =
386 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 466 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
387 /* Allocating all the Rx blocks */ 467 /* Allocating all the Rx blocks */
@@ -395,32 +475,36 @@ static int init_shared_mem(struct s2io_nic *nic)
395 &tmp_p_addr); 475 &tmp_p_addr);
396 if (tmp_v_addr == NULL) { 476 if (tmp_v_addr == NULL) {
397 /* 477 /*
398 * In case of failure, free_shared_mem() 478 * In case of failure, free_shared_mem()
399 * is called, which should free any 479 * is called, which should free any
400 * memory that was alloced till the 480 * memory that was alloced till the
401 * failure happened. 481 * failure happened.
402 */ 482 */
403 nic->rx_blocks[i][j].block_virt_addr = 483 mac_control->rings[i].rx_blocks[j].block_virt_addr =
404 tmp_v_addr; 484 tmp_v_addr;
405 return -ENOMEM; 485 return -ENOMEM;
406 } 486 }
407 memset(tmp_v_addr, 0, size); 487 memset(tmp_v_addr, 0, size);
408 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr; 488 mac_control->rings[i].rx_blocks[j].block_virt_addr =
409 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr; 489 tmp_v_addr;
490 mac_control->rings[i].rx_blocks[j].block_dma_addr =
491 tmp_p_addr;
410 } 492 }
411 /* Interlinking all Rx Blocks */ 493 /* Interlinking all Rx Blocks */
412 for (j = 0; j < blk_cnt; j++) { 494 for (j = 0; j < blk_cnt; j++) {
413 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr; 495 tmp_v_addr =
496 mac_control->rings[i].rx_blocks[j].block_virt_addr;
414 tmp_v_addr_next = 497 tmp_v_addr_next =
415 nic->rx_blocks[i][(j + 1) % 498 mac_control->rings[i].rx_blocks[(j + 1) %
416 blk_cnt].block_virt_addr; 499 blk_cnt].block_virt_addr;
417 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr; 500 tmp_p_addr =
501 mac_control->rings[i].rx_blocks[j].block_dma_addr;
418 tmp_p_addr_next = 502 tmp_p_addr_next =
419 nic->rx_blocks[i][(j + 1) % 503 mac_control->rings[i].rx_blocks[(j + 1) %
420 blk_cnt].block_dma_addr; 504 blk_cnt].block_dma_addr;
421 505
422 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 506 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
423 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD 507 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
424 * marker. 508 * marker.
425 */ 509 */
426#ifndef CONFIG_2BUFF_MODE 510#ifndef CONFIG_2BUFF_MODE
@@ -433,43 +517,43 @@ static int init_shared_mem(struct s2io_nic *nic)
433 } 517 }
434 518
435#ifdef CONFIG_2BUFF_MODE 519#ifdef CONFIG_2BUFF_MODE
436 /* 520 /*
437 * Allocation of Storages for buffer addresses in 2BUFF mode 521 * Allocation of Storages for buffer addresses in 2BUFF mode
438 * and the buffers as well. 522 * and the buffers as well.
439 */ 523 */
440 for (i = 0; i < config->rx_ring_num; i++) { 524 for (i = 0; i < config->rx_ring_num; i++) {
441 blk_cnt = 525 blk_cnt =
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 526 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 527 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
444 GFP_KERNEL); 528 GFP_KERNEL);
445 if (!nic->ba[i]) 529 if (!mac_control->rings[i].ba)
446 return -ENOMEM; 530 return -ENOMEM;
447 for (j = 0; j < blk_cnt; j++) { 531 for (j = 0; j < blk_cnt; j++) {
448 int k = 0; 532 int k = 0;
449 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) * 533 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
450 (MAX_RXDS_PER_BLOCK + 1)), 534 (MAX_RXDS_PER_BLOCK + 1)),
451 GFP_KERNEL); 535 GFP_KERNEL);
452 if (!nic->ba[i][j]) 536 if (!mac_control->rings[i].ba[j])
453 return -ENOMEM; 537 return -ENOMEM;
454 while (k != MAX_RXDS_PER_BLOCK) { 538 while (k != MAX_RXDS_PER_BLOCK) {
455 ba = &nic->ba[i][j][k]; 539 ba = &mac_control->rings[i].ba[j][k];
456 540
457 ba->ba_0_org = kmalloc 541 ba->ba_0_org = (void *) kmalloc
458 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 542 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
459 if (!ba->ba_0_org) 543 if (!ba->ba_0_org)
460 return -ENOMEM; 544 return -ENOMEM;
461 tmp = (unsigned long) ba->ba_0_org; 545 tmp = (u64) ba->ba_0_org;
462 tmp += ALIGN_SIZE; 546 tmp += ALIGN_SIZE;
463 tmp &= ~((unsigned long) ALIGN_SIZE); 547 tmp &= ~((u64) ALIGN_SIZE);
464 ba->ba_0 = (void *) tmp; 548 ba->ba_0 = (void *) tmp;
465 549
466 ba->ba_1_org = kmalloc 550 ba->ba_1_org = (void *) kmalloc
467 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 551 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
468 if (!ba->ba_1_org) 552 if (!ba->ba_1_org)
469 return -ENOMEM; 553 return -ENOMEM;
470 tmp = (unsigned long) ba->ba_1_org; 554 tmp = (u64) ba->ba_1_org;
471 tmp += ALIGN_SIZE; 555 tmp += ALIGN_SIZE;
472 tmp &= ~((unsigned long) ALIGN_SIZE); 556 tmp &= ~((u64) ALIGN_SIZE);
473 ba->ba_1 = (void *) tmp; 557 ba->ba_1 = (void *) tmp;
474 k++; 558 k++;
475 } 559 }
@@ -483,9 +567,9 @@ static int init_shared_mem(struct s2io_nic *nic)
483 (nic->pdev, size, &mac_control->stats_mem_phy); 567 (nic->pdev, size, &mac_control->stats_mem_phy);
484 568
485 if (!mac_control->stats_mem) { 569 if (!mac_control->stats_mem) {
486 /* 570 /*
487 * In case of failure, free_shared_mem() is called, which 571 * In case of failure, free_shared_mem() is called, which
488 * should free any memory that was alloced till the 572 * should free any memory that was alloced till the
489 * failure happened. 573 * failure happened.
490 */ 574 */
491 return -ENOMEM; 575 return -ENOMEM;
@@ -495,15 +579,14 @@ static int init_shared_mem(struct s2io_nic *nic)
495 tmp_v_addr = mac_control->stats_mem; 579 tmp_v_addr = mac_control->stats_mem;
496 mac_control->stats_info = (StatInfo_t *) tmp_v_addr; 580 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
497 memset(tmp_v_addr, 0, size); 581 memset(tmp_v_addr, 0, size);
498
499 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, 582 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
500 (unsigned long long) tmp_p_addr); 583 (unsigned long long) tmp_p_addr);
501 584
502 return SUCCESS; 585 return SUCCESS;
503} 586}
504 587
505/** 588/**
506 * free_shared_mem - Free the allocated Memory 589 * free_shared_mem - Free the allocated Memory
507 * @nic: Device private variable. 590 * @nic: Device private variable.
508 * Description: This function is to free all memory locations allocated by 591 * Description: This function is to free all memory locations allocated by
509 * the init_shared_mem() function and return it to the kernel. 592 * the init_shared_mem() function and return it to the kernel.
@@ -533,15 +616,19 @@ static void free_shared_mem(struct s2io_nic *nic)
533 lst_per_page); 616 lst_per_page);
534 for (j = 0; j < page_num; j++) { 617 for (j = 0; j < page_num; j++) {
535 int mem_blks = (j * lst_per_page); 618 int mem_blks = (j * lst_per_page);
536 if (!nic->list_info[i][mem_blks].list_virt_addr) 619 if ((!mac_control->fifos[i].list_info) ||
620 (!mac_control->fifos[i].list_info[mem_blks].
621 list_virt_addr))
537 break; 622 break;
538 pci_free_consistent(nic->pdev, PAGE_SIZE, 623 pci_free_consistent(nic->pdev, PAGE_SIZE,
539 nic->list_info[i][mem_blks]. 624 mac_control->fifos[i].
625 list_info[mem_blks].
540 list_virt_addr, 626 list_virt_addr,
541 nic->list_info[i][mem_blks]. 627 mac_control->fifos[i].
628 list_info[mem_blks].
542 list_phy_addr); 629 list_phy_addr);
543 } 630 }
544 kfree(nic->list_info[i]); 631 kfree(mac_control->fifos[i].list_info);
545 } 632 }
546 633
547#ifndef CONFIG_2BUFF_MODE 634#ifndef CONFIG_2BUFF_MODE
@@ -550,10 +637,12 @@ static void free_shared_mem(struct s2io_nic *nic)
550 size = SIZE_OF_BLOCK; 637 size = SIZE_OF_BLOCK;
551#endif 638#endif
552 for (i = 0; i < config->rx_ring_num; i++) { 639 for (i = 0; i < config->rx_ring_num; i++) {
553 blk_cnt = nic->block_count[i]; 640 blk_cnt = mac_control->rings[i].block_count;
554 for (j = 0; j < blk_cnt; j++) { 641 for (j = 0; j < blk_cnt; j++) {
555 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr; 642 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
556 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr; 643 block_virt_addr;
644 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
645 block_dma_addr;
557 if (tmp_v_addr == NULL) 646 if (tmp_v_addr == NULL)
558 break; 647 break;
559 pci_free_consistent(nic->pdev, size, 648 pci_free_consistent(nic->pdev, size,
@@ -566,35 +655,21 @@ static void free_shared_mem(struct s2io_nic *nic)
566 for (i = 0; i < config->rx_ring_num; i++) { 655 for (i = 0; i < config->rx_ring_num; i++) {
567 blk_cnt = 656 blk_cnt =
568 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 657 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
569 if (!nic->ba[i])
570 goto end_free;
571 for (j = 0; j < blk_cnt; j++) { 658 for (j = 0; j < blk_cnt; j++) {
572 int k = 0; 659 int k = 0;
573 if (!nic->ba[i][j]) { 660 if (!mac_control->rings[i].ba[j])
574 kfree(nic->ba[i]); 661 continue;
575 goto end_free;
576 }
577 while (k != MAX_RXDS_PER_BLOCK) { 662 while (k != MAX_RXDS_PER_BLOCK) {
578 buffAdd_t *ba = &nic->ba[i][j][k]; 663 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
579 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
580 {
581 kfree(nic->ba[i]);
582 kfree(nic->ba[i][j]);
583 if(ba->ba_0_org)
584 kfree(ba->ba_0_org);
585 if(ba->ba_1_org)
586 kfree(ba->ba_1_org);
587 goto end_free;
588 }
589 kfree(ba->ba_0_org); 664 kfree(ba->ba_0_org);
590 kfree(ba->ba_1_org); 665 kfree(ba->ba_1_org);
591 k++; 666 k++;
592 } 667 }
593 kfree(nic->ba[i][j]); 668 kfree(mac_control->rings[i].ba[j]);
594 } 669 }
595 kfree(nic->ba[i]); 670 if (mac_control->rings[i].ba)
671 kfree(mac_control->rings[i].ba);
596 } 672 }
597end_free:
598#endif 673#endif
599 674
600 if (mac_control->stats_mem) { 675 if (mac_control->stats_mem) {
@@ -605,12 +680,93 @@ end_free:
605 } 680 }
606} 681}
607 682
608/** 683/**
609 * init_nic - Initialization of hardware 684 * s2io_verify_pci_mode -
685 */
686
687static int s2io_verify_pci_mode(nic_t *nic)
688{
689 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
690 register u64 val64 = 0;
691 int mode;
692
693 val64 = readq(&bar0->pci_mode);
694 mode = (u8)GET_PCI_MODE(val64);
695
696 if ( val64 & PCI_MODE_UNKNOWN_MODE)
697 return -1; /* Unknown PCI mode */
698 return mode;
699}
700
701
702/**
703 * s2io_print_pci_mode -
704 */
705static int s2io_print_pci_mode(nic_t *nic)
706{
707 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
708 register u64 val64 = 0;
709 int mode;
710 struct config_param *config = &nic->config;
711
712 val64 = readq(&bar0->pci_mode);
713 mode = (u8)GET_PCI_MODE(val64);
714
715 if ( val64 & PCI_MODE_UNKNOWN_MODE)
716 return -1; /* Unknown PCI mode */
717
718 if (val64 & PCI_MODE_32_BITS) {
719 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
720 } else {
721 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
722 }
723
724 switch(mode) {
725 case PCI_MODE_PCI_33:
726 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
727 config->bus_speed = 33;
728 break;
729 case PCI_MODE_PCI_66:
730 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
731 config->bus_speed = 133;
732 break;
733 case PCI_MODE_PCIX_M1_66:
734 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
735 config->bus_speed = 133; /* Herc doubles the clock rate */
736 break;
737 case PCI_MODE_PCIX_M1_100:
738 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
739 config->bus_speed = 200;
740 break;
741 case PCI_MODE_PCIX_M1_133:
742 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
743 config->bus_speed = 266;
744 break;
745 case PCI_MODE_PCIX_M2_66:
746 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
747 config->bus_speed = 133;
748 break;
749 case PCI_MODE_PCIX_M2_100:
750 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
751 config->bus_speed = 200;
752 break;
753 case PCI_MODE_PCIX_M2_133:
754 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
755 config->bus_speed = 266;
756 break;
757 default:
758 return -1; /* Unsupported bus speed */
759 }
760
761 return mode;
762}
763
764/**
765 * init_nic - Initialization of hardware
610 * @nic: device peivate variable 766 * @nic: device peivate variable
611 * Description: The function sequentially configures every block 767 * Description: The function sequentially configures every block
612 * of the H/W from their reset values. 768 * of the H/W from their reset values.
613 * Return Value: SUCCESS on success and 769 * Return Value: SUCCESS on success and
614 * '-1' on failure (endian settings incorrect). 770 * '-1' on failure (endian settings incorrect).
615 */ 771 */
616 772
@@ -626,21 +782,32 @@ static int init_nic(struct s2io_nic *nic)
626 struct config_param *config; 782 struct config_param *config;
627 int mdio_cnt = 0, dtx_cnt = 0; 783 int mdio_cnt = 0, dtx_cnt = 0;
628 unsigned long long mem_share; 784 unsigned long long mem_share;
785 int mem_size;
629 786
630 mac_control = &nic->mac_control; 787 mac_control = &nic->mac_control;
631 config = &nic->config; 788 config = &nic->config;
632 789
633 /* Initialize swapper control register */ 790 /* to set the swapper controle on the card */
634 if (s2io_set_swapper(nic)) { 791 if(s2io_set_swapper(nic)) {
635 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n"); 792 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
636 return -1; 793 return -1;
637 } 794 }
638 795
796 /*
797 * Herc requires EOI to be removed from reset before XGXS, so..
798 */
799 if (nic->device_type & XFRAME_II_DEVICE) {
800 val64 = 0xA500000000ULL;
801 writeq(val64, &bar0->sw_reset);
802 msleep(500);
803 val64 = readq(&bar0->sw_reset);
804 }
805
639 /* Remove XGXS from reset state */ 806 /* Remove XGXS from reset state */
640 val64 = 0; 807 val64 = 0;
641 writeq(val64, &bar0->sw_reset); 808 writeq(val64, &bar0->sw_reset);
642 val64 = readq(&bar0->sw_reset);
643 msleep(500); 809 msleep(500);
810 val64 = readq(&bar0->sw_reset);
644 811
645 /* Enable Receiving broadcasts */ 812 /* Enable Receiving broadcasts */
646 add = &bar0->mac_cfg; 813 add = &bar0->mac_cfg;
@@ -660,48 +827,58 @@ static int init_nic(struct s2io_nic *nic)
660 val64 = dev->mtu; 827 val64 = dev->mtu;
661 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 828 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
662 829
663 /* 830 /*
664 * Configuring the XAUI Interface of Xena. 831 * Configuring the XAUI Interface of Xena.
665 * *************************************** 832 * ***************************************
666 * To Configure the Xena's XAUI, one has to write a series 833 * To Configure the Xena's XAUI, one has to write a series
667 * of 64 bit values into two registers in a particular 834 * of 64 bit values into two registers in a particular
668 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 835 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
669 * which will be defined in the array of configuration values 836 * which will be defined in the array of configuration values
670 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 837 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
671 * to switch writing from one regsiter to another. We continue 838 * to switch writing from one regsiter to another. We continue
672 * writing these values until we encounter the 'END_SIGN' macro. 839 * writing these values until we encounter the 'END_SIGN' macro.
673 * For example, After making a series of 21 writes into 840 * For example, After making a series of 21 writes into
674 * dtx_control register the 'SWITCH_SIGN' appears and hence we 841 * dtx_control register the 'SWITCH_SIGN' appears and hence we
675 * start writing into mdio_control until we encounter END_SIGN. 842 * start writing into mdio_control until we encounter END_SIGN.
676 */ 843 */
677 while (1) { 844 if (nic->device_type & XFRAME_II_DEVICE) {
678 dtx_cfg: 845 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
679 while (default_dtx_cfg[dtx_cnt] != END_SIGN) { 846 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
680 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
681 dtx_cnt++;
682 goto mdio_cfg;
683 }
684 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
685 &bar0->dtx_control, UF); 847 &bar0->dtx_control, UF);
686 val64 = readq(&bar0->dtx_control); 848 if (dtx_cnt & 0x1)
849 msleep(1); /* Necessary!! */
687 dtx_cnt++; 850 dtx_cnt++;
688 } 851 }
689 mdio_cfg: 852 } else {
690 while (default_mdio_cfg[mdio_cnt] != END_SIGN) { 853 while (1) {
691 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { 854 dtx_cfg:
855 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
856 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
857 dtx_cnt++;
858 goto mdio_cfg;
859 }
860 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
861 &bar0->dtx_control, UF);
862 val64 = readq(&bar0->dtx_control);
863 dtx_cnt++;
864 }
865 mdio_cfg:
866 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
867 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
868 mdio_cnt++;
869 goto dtx_cfg;
870 }
871 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
872 &bar0->mdio_control, UF);
873 val64 = readq(&bar0->mdio_control);
692 mdio_cnt++; 874 mdio_cnt++;
875 }
876 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
877 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
878 break;
879 } else {
693 goto dtx_cfg; 880 goto dtx_cfg;
694 } 881 }
695 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
696 &bar0->mdio_control, UF);
697 val64 = readq(&bar0->mdio_control);
698 mdio_cnt++;
699 }
700 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
701 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
702 break;
703 } else {
704 goto dtx_cfg;
705 } 882 }
706 } 883 }
707 884
@@ -748,12 +925,20 @@ static int init_nic(struct s2io_nic *nic)
748 val64 |= BIT(0); /* To enable the FIFO partition. */ 925 val64 |= BIT(0); /* To enable the FIFO partition. */
749 writeq(val64, &bar0->tx_fifo_partition_0); 926 writeq(val64, &bar0->tx_fifo_partition_0);
750 927
928 /*
929 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
930 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
931 */
932 if ((nic->device_type == XFRAME_I_DEVICE) &&
933 (get_xena_rev_id(nic->pdev) < 4))
934 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
935
751 val64 = readq(&bar0->tx_fifo_partition_0); 936 val64 = readq(&bar0->tx_fifo_partition_0);
752 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n", 937 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
753 &bar0->tx_fifo_partition_0, (unsigned long long) val64); 938 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
754 939
755 /* 940 /*
756 * Initialization of Tx_PA_CONFIG register to ignore packet 941 * Initialization of Tx_PA_CONFIG register to ignore packet
757 * integrity checking. 942 * integrity checking.
758 */ 943 */
759 val64 = readq(&bar0->tx_pa_cfg); 944 val64 = readq(&bar0->tx_pa_cfg);
@@ -770,85 +955,304 @@ static int init_nic(struct s2io_nic *nic)
770 } 955 }
771 writeq(val64, &bar0->rx_queue_priority); 956 writeq(val64, &bar0->rx_queue_priority);
772 957
773 /* 958 /*
774 * Allocating equal share of memory to all the 959 * Allocating equal share of memory to all the
775 * configured Rings. 960 * configured Rings.
776 */ 961 */
777 val64 = 0; 962 val64 = 0;
963 if (nic->device_type & XFRAME_II_DEVICE)
964 mem_size = 32;
965 else
966 mem_size = 64;
967
778 for (i = 0; i < config->rx_ring_num; i++) { 968 for (i = 0; i < config->rx_ring_num; i++) {
779 switch (i) { 969 switch (i) {
780 case 0: 970 case 0:
781 mem_share = (64 / config->rx_ring_num + 971 mem_share = (mem_size / config->rx_ring_num +
782 64 % config->rx_ring_num); 972 mem_size % config->rx_ring_num);
783 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share); 973 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
784 continue; 974 continue;
785 case 1: 975 case 1:
786 mem_share = (64 / config->rx_ring_num); 976 mem_share = (mem_size / config->rx_ring_num);
787 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share); 977 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
788 continue; 978 continue;
789 case 2: 979 case 2:
790 mem_share = (64 / config->rx_ring_num); 980 mem_share = (mem_size / config->rx_ring_num);
791 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share); 981 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
792 continue; 982 continue;
793 case 3: 983 case 3:
794 mem_share = (64 / config->rx_ring_num); 984 mem_share = (mem_size / config->rx_ring_num);
795 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share); 985 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
796 continue; 986 continue;
797 case 4: 987 case 4:
798 mem_share = (64 / config->rx_ring_num); 988 mem_share = (mem_size / config->rx_ring_num);
799 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share); 989 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
800 continue; 990 continue;
801 case 5: 991 case 5:
802 mem_share = (64 / config->rx_ring_num); 992 mem_share = (mem_size / config->rx_ring_num);
803 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share); 993 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
804 continue; 994 continue;
805 case 6: 995 case 6:
806 mem_share = (64 / config->rx_ring_num); 996 mem_share = (mem_size / config->rx_ring_num);
807 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share); 997 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
808 continue; 998 continue;
809 case 7: 999 case 7:
810 mem_share = (64 / config->rx_ring_num); 1000 mem_share = (mem_size / config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share); 1001 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
812 continue; 1002 continue;
813 } 1003 }
814 } 1004 }
815 writeq(val64, &bar0->rx_queue_cfg); 1005 writeq(val64, &bar0->rx_queue_cfg);
816 1006
817 /* 1007 /*
818 * Initializing the Tx round robin registers to 0. 1008 * Filling Tx round robin registers
819 * Filling Tx and Rx round robin registers as per the 1009 * as per the number of FIFOs
820 * number of FIFOs and Rings is still TODO.
821 */
822 writeq(0, &bar0->tx_w_round_robin_0);
823 writeq(0, &bar0->tx_w_round_robin_1);
824 writeq(0, &bar0->tx_w_round_robin_2);
825 writeq(0, &bar0->tx_w_round_robin_3);
826 writeq(0, &bar0->tx_w_round_robin_4);
827
828 /*
829 * TODO
830 * Disable Rx steering. Hard coding all packets be steered to
831 * Queue 0 for now.
832 */ 1010 */
833 val64 = 0x8080808080808080ULL; 1011 switch (config->tx_fifo_num) {
834 writeq(val64, &bar0->rts_qos_steering); 1012 case 1:
1013 val64 = 0x0000000000000000ULL;
1014 writeq(val64, &bar0->tx_w_round_robin_0);
1015 writeq(val64, &bar0->tx_w_round_robin_1);
1016 writeq(val64, &bar0->tx_w_round_robin_2);
1017 writeq(val64, &bar0->tx_w_round_robin_3);
1018 writeq(val64, &bar0->tx_w_round_robin_4);
1019 break;
1020 case 2:
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_0);
1023 val64 = 0x0100000100000100ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_1);
1025 val64 = 0x0001000001000001ULL;
1026 writeq(val64, &bar0->tx_w_round_robin_2);
1027 val64 = 0x0000010000010000ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_3);
1029 val64 = 0x0100000000000000ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_4);
1031 break;
1032 case 3:
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_0);
1035 val64 = 0x0001020000010001ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_1);
1037 val64 = 0x0200000100010200ULL;
1038 writeq(val64, &bar0->tx_w_round_robin_2);
1039 val64 = 0x0001000102000001ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_3);
1041 val64 = 0x0001020000000000ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_4);
1043 break;
1044 case 4:
1045 val64 = 0x0001020300010200ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_0);
1047 val64 = 0x0100000102030001ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_1);
1049 val64 = 0x0200010000010203ULL;
1050 writeq(val64, &bar0->tx_w_round_robin_2);
1051 val64 = 0x0001020001000001ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_3);
1053 val64 = 0x0203000100000000ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_4);
1055 break;
1056 case 5:
1057 val64 = 0x0001000203000102ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_0);
1059 val64 = 0x0001020001030004ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_1);
1061 val64 = 0x0001000203000102ULL;
1062 writeq(val64, &bar0->tx_w_round_robin_2);
1063 val64 = 0x0001020001030004ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_3);
1065 val64 = 0x0001000000000000ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_4);
1067 break;
1068 case 6:
1069 val64 = 0x0001020304000102ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_0);
1071 val64 = 0x0304050001020001ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_1);
1073 val64 = 0x0203000100000102ULL;
1074 writeq(val64, &bar0->tx_w_round_robin_2);
1075 val64 = 0x0304000102030405ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_3);
1077 val64 = 0x0001000200000000ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_4);
1079 break;
1080 case 7:
1081 val64 = 0x0001020001020300ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_0);
1083 val64 = 0x0102030400010203ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_1);
1085 val64 = 0x0405060001020001ULL;
1086 writeq(val64, &bar0->tx_w_round_robin_2);
1087 val64 = 0x0304050000010200ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_3);
1089 val64 = 0x0102030000000000ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_4);
1091 break;
1092 case 8:
1093 val64 = 0x0001020300040105ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_0);
1095 val64 = 0x0200030106000204ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_1);
1097 val64 = 0x0103000502010007ULL;
1098 writeq(val64, &bar0->tx_w_round_robin_2);
1099 val64 = 0x0304010002060500ULL;
1100 writeq(val64, &bar0->tx_w_round_robin_3);
1101 val64 = 0x0103020400000000ULL;
1102 writeq(val64, &bar0->tx_w_round_robin_4);
1103 break;
1104 }
1105
1106 /* Filling the Rx round robin registers as per the
1107 * number of Rings and steering based on QoS.
1108 */
1109 switch (config->rx_ring_num) {
1110 case 1:
1111 val64 = 0x8080808080808080ULL;
1112 writeq(val64, &bar0->rts_qos_steering);
1113 break;
1114 case 2:
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_0);
1117 val64 = 0x0100000100000100ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_1);
1119 val64 = 0x0001000001000001ULL;
1120 writeq(val64, &bar0->rx_w_round_robin_2);
1121 val64 = 0x0000010000010000ULL;
1122 writeq(val64, &bar0->rx_w_round_robin_3);
1123 val64 = 0x0100000000000000ULL;
1124 writeq(val64, &bar0->rx_w_round_robin_4);
1125
1126 val64 = 0x8080808040404040ULL;
1127 writeq(val64, &bar0->rts_qos_steering);
1128 break;
1129 case 3:
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_0);
1132 val64 = 0x0001020000010001ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_1);
1134 val64 = 0x0200000100010200ULL;
1135 writeq(val64, &bar0->rx_w_round_robin_2);
1136 val64 = 0x0001000102000001ULL;
1137 writeq(val64, &bar0->rx_w_round_robin_3);
1138 val64 = 0x0001020000000000ULL;
1139 writeq(val64, &bar0->rx_w_round_robin_4);
1140
1141 val64 = 0x8080804040402020ULL;
1142 writeq(val64, &bar0->rts_qos_steering);
1143 break;
1144 case 4:
1145 val64 = 0x0001020300010200ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_0);
1147 val64 = 0x0100000102030001ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_1);
1149 val64 = 0x0200010000010203ULL;
1150 writeq(val64, &bar0->rx_w_round_robin_2);
1151 val64 = 0x0001020001000001ULL;
1152 writeq(val64, &bar0->rx_w_round_robin_3);
1153 val64 = 0x0203000100000000ULL;
1154 writeq(val64, &bar0->rx_w_round_robin_4);
1155
1156 val64 = 0x8080404020201010ULL;
1157 writeq(val64, &bar0->rts_qos_steering);
1158 break;
1159 case 5:
1160 val64 = 0x0001000203000102ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_0);
1162 val64 = 0x0001020001030004ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_1);
1164 val64 = 0x0001000203000102ULL;
1165 writeq(val64, &bar0->rx_w_round_robin_2);
1166 val64 = 0x0001020001030004ULL;
1167 writeq(val64, &bar0->rx_w_round_robin_3);
1168 val64 = 0x0001000000000000ULL;
1169 writeq(val64, &bar0->rx_w_round_robin_4);
1170
1171 val64 = 0x8080404020201008ULL;
1172 writeq(val64, &bar0->rts_qos_steering);
1173 break;
1174 case 6:
1175 val64 = 0x0001020304000102ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_0);
1177 val64 = 0x0304050001020001ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_1);
1179 val64 = 0x0203000100000102ULL;
1180 writeq(val64, &bar0->rx_w_round_robin_2);
1181 val64 = 0x0304000102030405ULL;
1182 writeq(val64, &bar0->rx_w_round_robin_3);
1183 val64 = 0x0001000200000000ULL;
1184 writeq(val64, &bar0->rx_w_round_robin_4);
1185
1186 val64 = 0x8080404020100804ULL;
1187 writeq(val64, &bar0->rts_qos_steering);
1188 break;
1189 case 7:
1190 val64 = 0x0001020001020300ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_0);
1192 val64 = 0x0102030400010203ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_1);
1194 val64 = 0x0405060001020001ULL;
1195 writeq(val64, &bar0->rx_w_round_robin_2);
1196 val64 = 0x0304050000010200ULL;
1197 writeq(val64, &bar0->rx_w_round_robin_3);
1198 val64 = 0x0102030000000000ULL;
1199 writeq(val64, &bar0->rx_w_round_robin_4);
1200
1201 val64 = 0x8080402010080402ULL;
1202 writeq(val64, &bar0->rts_qos_steering);
1203 break;
1204 case 8:
1205 val64 = 0x0001020300040105ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_0);
1207 val64 = 0x0200030106000204ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_1);
1209 val64 = 0x0103000502010007ULL;
1210 writeq(val64, &bar0->rx_w_round_robin_2);
1211 val64 = 0x0304010002060500ULL;
1212 writeq(val64, &bar0->rx_w_round_robin_3);
1213 val64 = 0x0103020400000000ULL;
1214 writeq(val64, &bar0->rx_w_round_robin_4);
1215
1216 val64 = 0x8040201008040201ULL;
1217 writeq(val64, &bar0->rts_qos_steering);
1218 break;
1219 }
835 1220
836 /* UDP Fix */ 1221 /* UDP Fix */
837 val64 = 0; 1222 val64 = 0;
838 for (i = 1; i < 8; i++) 1223 for (i = 0; i < 8; i++)
1224 writeq(val64, &bar0->rts_frm_len_n[i]);
1225
1226 /* Set the default rts frame length for the rings configured */
1227 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1228 for (i = 0 ; i < config->rx_ring_num ; i++)
839 writeq(val64, &bar0->rts_frm_len_n[i]); 1229 writeq(val64, &bar0->rts_frm_len_n[i]);
840 1230
841 /* Set rts_frm_len register for fifo 0 */ 1231 /* Set the frame length for the configured rings
842 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22), 1232 * desired by the user
843 &bar0->rts_frm_len_n[0]); 1233 */
1234 for (i = 0; i < config->rx_ring_num; i++) {
1235 /* If rts_frm_len[i] == 0 then it is assumed that user not
1236 * specified frame length steering.
1237 * If the user provides the frame length then program
1238 * the rts_frm_len register for those values or else
1239 * leave it as it is.
1240 */
1241 if (rts_frm_len[i] != 0) {
1242 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1243 &bar0->rts_frm_len_n[i]);
1244 }
1245 }
844 1246
845 /* Enable statistics */ 1247 /* Program statistics memory */
846 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); 1248 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
847 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
848 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
849 writeq(val64, &bar0->stat_cfg);
850 1249
851 /* 1250 if (nic->device_type == XFRAME_II_DEVICE) {
1251 val64 = STAT_BC(0x320);
1252 writeq(val64, &bar0->stat_byte_cnt);
1253 }
1254
1255 /*
852 * Initializing the sampling rate for the device to calculate the 1256 * Initializing the sampling rate for the device to calculate the
853 * bandwidth utilization. 1257 * bandwidth utilization.
854 */ 1258 */
@@ -857,30 +1261,38 @@ static int init_nic(struct s2io_nic *nic)
857 writeq(val64, &bar0->mac_link_util); 1261 writeq(val64, &bar0->mac_link_util);
858 1262
859 1263
860 /* 1264 /*
861 * Initializing the Transmit and Receive Traffic Interrupt 1265 * Initializing the Transmit and Receive Traffic Interrupt
862 * Scheme. 1266 * Scheme.
863 */ 1267 */
864 /* TTI Initialization. Default Tx timer gets us about 1268 /*
1269 * TTI Initialization. Default Tx timer gets us about
865 * 250 interrupts per sec. Continuous interrupts are enabled 1270 * 250 interrupts per sec. Continuous interrupts are enabled
866 * by default. 1271 * by default.
867 */ 1272 */
868 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) | 1273 if (nic->device_type == XFRAME_II_DEVICE) {
869 TTI_DATA1_MEM_TX_URNG_A(0xA) | 1274 int count = (nic->config.bus_speed * 125)/2;
1275 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1276 } else {
1277
1278 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1279 }
1280 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
870 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1281 TTI_DATA1_MEM_TX_URNG_B(0x10) |
871 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN | 1282 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
872 TTI_DATA1_MEM_TX_TIMER_CI_EN; 1283 if (use_continuous_tx_intrs)
1284 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
873 writeq(val64, &bar0->tti_data1_mem); 1285 writeq(val64, &bar0->tti_data1_mem);
874 1286
875 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1287 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
876 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1288 TTI_DATA2_MEM_TX_UFC_B(0x20) |
877 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80); 1289 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
878 writeq(val64, &bar0->tti_data2_mem); 1290 writeq(val64, &bar0->tti_data2_mem);
879 1291
880 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; 1292 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
881 writeq(val64, &bar0->tti_command_mem); 1293 writeq(val64, &bar0->tti_command_mem);
882 1294
883 /* 1295 /*
884 * Once the operation completes, the Strobe bit of the command 1296 * Once the operation completes, the Strobe bit of the command
885 * register will be reset. We poll for this particular condition 1297 * register will be reset. We poll for this particular condition
886 * We wait for a maximum of 500ms for the operation to complete, 1298 * We wait for a maximum of 500ms for the operation to complete,
@@ -901,52 +1313,97 @@ static int init_nic(struct s2io_nic *nic)
901 time++; 1313 time++;
902 } 1314 }
903 1315
904 /* RTI Initialization */ 1316 if (nic->config.bimodal) {
905 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) | 1317 int k = 0;
906 RTI_DATA1_MEM_RX_URNG_A(0xA) | 1318 for (k = 0; k < config->rx_ring_num; k++) {
907 RTI_DATA1_MEM_RX_URNG_B(0x10) | 1319 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
908 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; 1320 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1321 writeq(val64, &bar0->tti_command_mem);
1322
1323 /*
1324 * Once the operation completes, the Strobe bit of the command
1325 * register will be reset. We poll for this particular condition
1326 * We wait for a maximum of 500ms for the operation to complete,
1327 * if it's not complete by then we return error.
1328 */
1329 time = 0;
1330 while (TRUE) {
1331 val64 = readq(&bar0->tti_command_mem);
1332 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1333 break;
1334 }
1335 if (time > 10) {
1336 DBG_PRINT(ERR_DBG,
1337 "%s: TTI init Failed\n",
1338 dev->name);
1339 return -1;
1340 }
1341 time++;
1342 msleep(50);
1343 }
1344 }
1345 } else {
909 1346
910 writeq(val64, &bar0->rti_data1_mem); 1347 /* RTI Initialization */
1348 if (nic->device_type == XFRAME_II_DEVICE) {
1349 /*
1350 * Programmed to generate Apprx 500 Intrs per
1351 * second
1352 */
1353 int count = (nic->config.bus_speed * 125)/4;
1354 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1355 } else {
1356 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1357 }
1358 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1359 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1360 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
911 1361
912 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | 1362 writeq(val64, &bar0->rti_data1_mem);
913 RTI_DATA2_MEM_RX_UFC_B(0x2) |
914 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
915 writeq(val64, &bar0->rti_data2_mem);
916 1363
917 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD; 1364 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
918 writeq(val64, &bar0->rti_command_mem); 1365 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1366 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1367 writeq(val64, &bar0->rti_data2_mem);
919 1368
920 /* 1369 for (i = 0; i < config->rx_ring_num; i++) {
921 * Once the operation completes, the Strobe bit of the command 1370 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
922 * register will be reset. We poll for this particular condition 1371 | RTI_CMD_MEM_OFFSET(i);
923 * We wait for a maximum of 500ms for the operation to complete, 1372 writeq(val64, &bar0->rti_command_mem);
924 * if it's not complete by then we return error. 1373
925 */ 1374 /*
926 time = 0; 1375 * Once the operation completes, the Strobe bit of the
927 while (TRUE) { 1376 * command register will be reset. We poll for this
928 val64 = readq(&bar0->rti_command_mem); 1377 * particular condition. We wait for a maximum of 500ms
929 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) { 1378 * for the operation to complete, if it's not complete
930 break; 1379 * by then we return error.
931 } 1380 */
932 if (time > 10) { 1381 time = 0;
933 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", 1382 while (TRUE) {
934 dev->name); 1383 val64 = readq(&bar0->rti_command_mem);
935 return -1; 1384 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1385 break;
1386 }
1387 if (time > 10) {
1388 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1389 dev->name);
1390 return -1;
1391 }
1392 time++;
1393 msleep(50);
1394 }
936 } 1395 }
937 time++;
938 msleep(50);
939 } 1396 }
940 1397
941 /* 1398 /*
942 * Initializing proper values as Pause threshold into all 1399 * Initializing proper values as Pause threshold into all
943 * the 8 Queues on Rx side. 1400 * the 8 Queues on Rx side.
944 */ 1401 */
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3); 1402 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
946 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); 1403 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
947 1404
948 /* Disable RMAC PAD STRIPPING */ 1405 /* Disable RMAC PAD STRIPPING */
949 add = &bar0->mac_cfg; 1406 add = (void *) &bar0->mac_cfg;
950 val64 = readq(&bar0->mac_cfg); 1407 val64 = readq(&bar0->mac_cfg);
951 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD); 1408 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1409 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
@@ -955,8 +1412,8 @@ static int init_nic(struct s2io_nic *nic)
955 writel((u32) (val64 >> 32), (add + 4)); 1412 writel((u32) (val64 >> 32), (add + 4));
956 val64 = readq(&bar0->mac_cfg); 1413 val64 = readq(&bar0->mac_cfg);
957 1414
958 /* 1415 /*
959 * Set the time value to be inserted in the pause frame 1416 * Set the time value to be inserted in the pause frame
960 * generated by xena. 1417 * generated by xena.
961 */ 1418 */
962 val64 = readq(&bar0->rmac_pause_cfg); 1419 val64 = readq(&bar0->rmac_pause_cfg);
@@ -964,7 +1421,7 @@ static int init_nic(struct s2io_nic *nic)
964 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time); 1421 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
965 writeq(val64, &bar0->rmac_pause_cfg); 1422 writeq(val64, &bar0->rmac_pause_cfg);
966 1423
967 /* 1424 /*
968 * Set the Threshold Limit for Generating the pause frame 1425 * Set the Threshold Limit for Generating the pause frame
969 * If the amount of data in any Queue exceeds ratio of 1426 * If the amount of data in any Queue exceeds ratio of
970 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 1427 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
@@ -988,25 +1445,54 @@ static int init_nic(struct s2io_nic *nic)
988 } 1445 }
989 writeq(val64, &bar0->mc_pause_thresh_q4q7); 1446 writeq(val64, &bar0->mc_pause_thresh_q4q7);
990 1447
991 /* 1448 /*
992 * TxDMA will stop Read request if the number of read split has 1449 * TxDMA will stop Read request if the number of read split has
993 * exceeded the limit pointed by shared_splits 1450 * exceeded the limit pointed by shared_splits
994 */ 1451 */
995 val64 = readq(&bar0->pic_control); 1452 val64 = readq(&bar0->pic_control);
996 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits); 1453 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
997 writeq(val64, &bar0->pic_control); 1454 writeq(val64, &bar0->pic_control);
998 1455
1456 /*
1457 * Programming the Herc to split every write transaction
1458 * that does not start on an ADB to reduce disconnects.
1459 */
1460 if (nic->device_type == XFRAME_II_DEVICE) {
1461 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1462 writeq(val64, &bar0->wreq_split_mask);
1463 }
1464
1465 /* Setting Link stability period to 64 ms */
1466 if (nic->device_type == XFRAME_II_DEVICE) {
1467 val64 = MISC_LINK_STABILITY_PRD(3);
1468 writeq(val64, &bar0->misc_control);
1469 }
1470
999 return SUCCESS; 1471 return SUCCESS;
1000} 1472}
1473#define LINK_UP_DOWN_INTERRUPT 1
1474#define MAC_RMAC_ERR_TIMER 2
1001 1475
1002/** 1476#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1003 * en_dis_able_nic_intrs - Enable or Disable the interrupts 1477#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1478#else
1479int s2io_link_fault_indication(nic_t *nic)
1480{
1481 if (nic->device_type == XFRAME_II_DEVICE)
1482 return LINK_UP_DOWN_INTERRUPT;
1483 else
1484 return MAC_RMAC_ERR_TIMER;
1485}
1486#endif
1487
1488/**
1489 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1004 * @nic: device private variable, 1490 * @nic: device private variable,
1005 * @mask: A mask indicating which Intr block must be modified and, 1491 * @mask: A mask indicating which Intr block must be modified and,
1006 * @flag: A flag indicating whether to enable or disable the Intrs. 1492 * @flag: A flag indicating whether to enable or disable the Intrs.
1007 * Description: This function will either disable or enable the interrupts 1493 * Description: This function will either disable or enable the interrupts
1008 * depending on the flag argument. The mask argument can be used to 1494 * depending on the flag argument. The mask argument can be used to
1009 * enable/disable any Intr block. 1495 * enable/disable any Intr block.
1010 * Return Value: NONE. 1496 * Return Value: NONE.
1011 */ 1497 */
1012 1498
@@ -1024,20 +1510,31 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1024 temp64 = readq(&bar0->general_int_mask); 1510 temp64 = readq(&bar0->general_int_mask);
1025 temp64 &= ~((u64) val64); 1511 temp64 &= ~((u64) val64);
1026 writeq(temp64, &bar0->general_int_mask); 1512 writeq(temp64, &bar0->general_int_mask);
1027 /* 1513 /*
1028 * Disabled all PCIX, Flash, MDIO, IIC and GPIO 1514 * If Hercules adapter enable GPIO otherwise
1029 * interrupts for now. 1515 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1030 * TODO 1516 * interrupts for now.
1517 * TODO
1031 */ 1518 */
1032 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 1519 if (s2io_link_fault_indication(nic) ==
1033 /* 1520 LINK_UP_DOWN_INTERRUPT ) {
1521 temp64 = readq(&bar0->pic_int_mask);
1522 temp64 &= ~((u64) PIC_INT_GPIO);
1523 writeq(temp64, &bar0->pic_int_mask);
1524 temp64 = readq(&bar0->gpio_int_mask);
1525 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1526 writeq(temp64, &bar0->gpio_int_mask);
1527 } else {
1528 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1529 }
1530 /*
1034 * No MSI Support is available presently, so TTI and 1531 * No MSI Support is available presently, so TTI and
1035 * RTI interrupts are also disabled. 1532 * RTI interrupts are also disabled.
1036 */ 1533 */
1037 } else if (flag == DISABLE_INTRS) { 1534 } else if (flag == DISABLE_INTRS) {
1038 /* 1535 /*
1039 * Disable PIC Intrs in the general 1536 * Disable PIC Intrs in the general
1040 * intr mask register 1537 * intr mask register
1041 */ 1538 */
1042 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 1539 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 temp64 = readq(&bar0->general_int_mask); 1540 temp64 = readq(&bar0->general_int_mask);
@@ -1055,27 +1552,27 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1055 temp64 = readq(&bar0->general_int_mask); 1552 temp64 = readq(&bar0->general_int_mask);
1056 temp64 &= ~((u64) val64); 1553 temp64 &= ~((u64) val64);
1057 writeq(temp64, &bar0->general_int_mask); 1554 writeq(temp64, &bar0->general_int_mask);
1058 /* 1555 /*
1059 * Keep all interrupts other than PFC interrupt 1556 * Keep all interrupts other than PFC interrupt
1060 * and PCC interrupt disabled in DMA level. 1557 * and PCC interrupt disabled in DMA level.
1061 */ 1558 */
1062 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M | 1559 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1063 TXDMA_PCC_INT_M); 1560 TXDMA_PCC_INT_M);
1064 writeq(val64, &bar0->txdma_int_mask); 1561 writeq(val64, &bar0->txdma_int_mask);
1065 /* 1562 /*
1066 * Enable only the MISC error 1 interrupt in PFC block 1563 * Enable only the MISC error 1 interrupt in PFC block
1067 */ 1564 */
1068 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1); 1565 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1069 writeq(val64, &bar0->pfc_err_mask); 1566 writeq(val64, &bar0->pfc_err_mask);
1070 /* 1567 /*
1071 * Enable only the FB_ECC error interrupt in PCC block 1568 * Enable only the FB_ECC error interrupt in PCC block
1072 */ 1569 */
1073 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR); 1570 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1074 writeq(val64, &bar0->pcc_err_mask); 1571 writeq(val64, &bar0->pcc_err_mask);
1075 } else if (flag == DISABLE_INTRS) { 1572 } else if (flag == DISABLE_INTRS) {
1076 /* 1573 /*
1077 * Disable TxDMA Intrs in the general intr mask 1574 * Disable TxDMA Intrs in the general intr mask
1078 * register 1575 * register
1079 */ 1576 */
1080 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask); 1577 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1081 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask); 1578 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
@@ -1093,15 +1590,15 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1093 temp64 = readq(&bar0->general_int_mask); 1590 temp64 = readq(&bar0->general_int_mask);
1094 temp64 &= ~((u64) val64); 1591 temp64 &= ~((u64) val64);
1095 writeq(temp64, &bar0->general_int_mask); 1592 writeq(temp64, &bar0->general_int_mask);
1096 /* 1593 /*
1097 * All RxDMA block interrupts are disabled for now 1594 * All RxDMA block interrupts are disabled for now
1098 * TODO 1595 * TODO
1099 */ 1596 */
1100 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask); 1597 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1101 } else if (flag == DISABLE_INTRS) { 1598 } else if (flag == DISABLE_INTRS) {
1102 /* 1599 /*
1103 * Disable RxDMA Intrs in the general intr mask 1600 * Disable RxDMA Intrs in the general intr mask
1104 * register 1601 * register
1105 */ 1602 */
1106 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask); 1603 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1107 temp64 = readq(&bar0->general_int_mask); 1604 temp64 = readq(&bar0->general_int_mask);
@@ -1118,22 +1615,13 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1118 temp64 = readq(&bar0->general_int_mask); 1615 temp64 = readq(&bar0->general_int_mask);
1119 temp64 &= ~((u64) val64); 1616 temp64 &= ~((u64) val64);
1120 writeq(temp64, &bar0->general_int_mask); 1617 writeq(temp64, &bar0->general_int_mask);
1121 /* 1618 /*
1122 * All MAC block error interrupts are disabled for now 1619 * All MAC block error interrupts are disabled for now
1123 * except the link status change interrupt.
1124 * TODO 1620 * TODO
1125 */ 1621 */
1126 val64 = MAC_INT_STATUS_RMAC_INT;
1127 temp64 = readq(&bar0->mac_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->mac_int_mask);
1130
1131 val64 = readq(&bar0->mac_rmac_err_mask);
1132 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1133 writeq(val64, &bar0->mac_rmac_err_mask);
1134 } else if (flag == DISABLE_INTRS) { 1622 } else if (flag == DISABLE_INTRS) {
1135 /* 1623 /*
1136 * Disable MAC Intrs in the general intr mask register 1624 * Disable MAC Intrs in the general intr mask register
1137 */ 1625 */
1138 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask); 1626 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1139 writeq(DISABLE_ALL_INTRS, 1627 writeq(DISABLE_ALL_INTRS,
@@ -1152,14 +1640,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1152 temp64 = readq(&bar0->general_int_mask); 1640 temp64 = readq(&bar0->general_int_mask);
1153 temp64 &= ~((u64) val64); 1641 temp64 &= ~((u64) val64);
1154 writeq(temp64, &bar0->general_int_mask); 1642 writeq(temp64, &bar0->general_int_mask);
1155 /* 1643 /*
1156 * All XGXS block error interrupts are disabled for now 1644 * All XGXS block error interrupts are disabled for now
1157 * TODO 1645 * TODO
1158 */ 1646 */
1159 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask); 1647 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1160 } else if (flag == DISABLE_INTRS) { 1648 } else if (flag == DISABLE_INTRS) {
1161 /* 1649 /*
1162 * Disable MC Intrs in the general intr mask register 1650 * Disable MC Intrs in the general intr mask register
1163 */ 1651 */
1164 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask); 1652 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1165 temp64 = readq(&bar0->general_int_mask); 1653 temp64 = readq(&bar0->general_int_mask);
@@ -1175,11 +1663,11 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1175 temp64 = readq(&bar0->general_int_mask); 1663 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64); 1664 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask); 1665 writeq(temp64, &bar0->general_int_mask);
1178 /* 1666 /*
1179 * All MC block error interrupts are disabled for now 1667 * Enable all MC Intrs.
1180 * TODO
1181 */ 1668 */
1182 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask); 1669 writeq(0x0, &bar0->mc_int_mask);
1670 writeq(0x0, &bar0->mc_err_mask);
1183 } else if (flag == DISABLE_INTRS) { 1671 } else if (flag == DISABLE_INTRS) {
1184 /* 1672 /*
1185 * Disable MC Intrs in the general intr mask register 1673 * Disable MC Intrs in the general intr mask register
@@ -1199,14 +1687,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1199 temp64 = readq(&bar0->general_int_mask); 1687 temp64 = readq(&bar0->general_int_mask);
1200 temp64 &= ~((u64) val64); 1688 temp64 &= ~((u64) val64);
1201 writeq(temp64, &bar0->general_int_mask); 1689 writeq(temp64, &bar0->general_int_mask);
1202 /* 1690 /*
1203 * Enable all the Tx side interrupts 1691 * Enable all the Tx side interrupts
1204 * writing 0 Enables all 64 TX interrupt levels 1692 * writing 0 Enables all 64 TX interrupt levels
1205 */ 1693 */
1206 writeq(0x0, &bar0->tx_traffic_mask); 1694 writeq(0x0, &bar0->tx_traffic_mask);
1207 } else if (flag == DISABLE_INTRS) { 1695 } else if (flag == DISABLE_INTRS) {
1208 /* 1696 /*
1209 * Disable Tx Traffic Intrs in the general intr mask 1697 * Disable Tx Traffic Intrs in the general intr mask
1210 * register. 1698 * register.
1211 */ 1699 */
1212 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); 1700 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
@@ -1226,8 +1714,8 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1226 /* writing 0 Enables all 8 RX interrupt levels */ 1714 /* writing 0 Enables all 8 RX interrupt levels */
1227 writeq(0x0, &bar0->rx_traffic_mask); 1715 writeq(0x0, &bar0->rx_traffic_mask);
1228 } else if (flag == DISABLE_INTRS) { 1716 } else if (flag == DISABLE_INTRS) {
1229 /* 1717 /*
1230 * Disable Rx Traffic Intrs in the general intr mask 1718 * Disable Rx Traffic Intrs in the general intr mask
1231 * register. 1719 * register.
1232 */ 1720 */
1233 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); 1721 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
@@ -1238,24 +1726,66 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1238 } 1726 }
1239} 1727}
1240 1728
1241/** 1729static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1242 * verify_xena_quiescence - Checks whether the H/W is ready 1730{
1731 int ret = 0;
1732
1733 if (flag == FALSE) {
1734 if ((!herc && (rev_id >= 4)) || herc) {
1735 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1736 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1737 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1738 ret = 1;
1739 }
1740 }else {
1741 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1742 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1743 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1744 ret = 1;
1745 }
1746 }
1747 } else {
1748 if ((!herc && (rev_id >= 4)) || herc) {
1749 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1750 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1751 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1752 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1753 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1754 ret = 1;
1755 }
1756 } else {
1757 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1758 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1759 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1760 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1761 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1762 ret = 1;
1763 }
1764 }
1765 }
1766
1767 return ret;
1768}
1769/**
1770 * verify_xena_quiescence - Checks whether the H/W is ready
1243 * @val64 : Value read from adapter status register. 1771 * @val64 : Value read from adapter status register.
1244 * @flag : indicates if the adapter enable bit was ever written once 1772 * @flag : indicates if the adapter enable bit was ever written once
1245 * before. 1773 * before.
1246 * Description: Returns whether the H/W is ready to go or not. Depending 1774 * Description: Returns whether the H/W is ready to go or not. Depending
1247 * on whether adapter enable bit was written or not the comparison 1775 * on whether adapter enable bit was written or not the comparison
1248 * differs and the calling function passes the input argument flag to 1776 * differs and the calling function passes the input argument flag to
1249 * indicate this. 1777 * indicate this.
1250 * Return: 1 If xena is quiescence 1778 * Return: 1 If xena is quiescence
1251 * 0 If Xena is not quiescence 1779 * 0 If Xena is not quiescence
1252 */ 1780 */
1253 1781
1254static int verify_xena_quiescence(u64 val64, int flag) 1782static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1255{ 1783{
1256 int ret = 0; 1784 int ret = 0, herc;
1257 u64 tmp64 = ~((u64) val64); 1785 u64 tmp64 = ~((u64) val64);
1786 int rev_id = get_xena_rev_id(sp->pdev);
1258 1787
1788 herc = (sp->device_type == XFRAME_II_DEVICE);
1259 if (! 1789 if (!
1260 (tmp64 & 1790 (tmp64 &
1261 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY | 1791 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
@@ -1263,25 +1793,7 @@ static int verify_xena_quiescence(u64 val64, int flag)
1263 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY | 1793 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1264 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK | 1794 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1265 ADAPTER_STATUS_P_PLL_LOCK))) { 1795 ADAPTER_STATUS_P_PLL_LOCK))) {
1266 if (flag == FALSE) { 1796 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1267 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1268 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1269 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1270
1271 ret = 1;
1272
1273 }
1274 } else {
1275 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1276 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1277 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1278 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1279 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1280
1281 ret = 1;
1282
1283 }
1284 }
1285 } 1797 }
1286 1798
1287 return ret; 1799 return ret;
@@ -1290,12 +1802,12 @@ static int verify_xena_quiescence(u64 val64, int flag)
1290/** 1802/**
1291 * fix_mac_address - Fix for Mac addr problem on Alpha platforms 1803 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1292 * @sp: Pointer to device specifc structure 1804 * @sp: Pointer to device specifc structure
1293 * Description : 1805 * Description :
1294 * New procedure to clear mac address reading problems on Alpha platforms 1806 * New procedure to clear mac address reading problems on Alpha platforms
1295 * 1807 *
1296 */ 1808 */
1297 1809
1298static void fix_mac_address(nic_t * sp) 1810void fix_mac_address(nic_t * sp)
1299{ 1811{
1300 XENA_dev_config_t __iomem *bar0 = sp->bar0; 1812 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1301 u64 val64; 1813 u64 val64;
@@ -1303,20 +1815,21 @@ static void fix_mac_address(nic_t * sp)
1303 1815
1304 while (fix_mac[i] != END_SIGN) { 1816 while (fix_mac[i] != END_SIGN) {
1305 writeq(fix_mac[i++], &bar0->gpio_control); 1817 writeq(fix_mac[i++], &bar0->gpio_control);
1818 udelay(10);
1306 val64 = readq(&bar0->gpio_control); 1819 val64 = readq(&bar0->gpio_control);
1307 } 1820 }
1308} 1821}
1309 1822
1310/** 1823/**
1311 * start_nic - Turns the device on 1824 * start_nic - Turns the device on
1312 * @nic : device private variable. 1825 * @nic : device private variable.
1313 * Description: 1826 * Description:
1314 * This function actually turns the device on. Before this function is 1827 * This function actually turns the device on. Before this function is
1315 * called,all Registers are configured from their reset states 1828 * called,all Registers are configured from their reset states
1316 * and shared memory is allocated but the NIC is still quiescent. On 1829 * and shared memory is allocated but the NIC is still quiescent. On
1317 * calling this function, the device interrupts are cleared and the NIC is 1830 * calling this function, the device interrupts are cleared and the NIC is
1318 * literally switched on by writing into the adapter control register. 1831 * literally switched on by writing into the adapter control register.
1319 * Return Value: 1832 * Return Value:
1320 * SUCCESS on success and -1 on failure. 1833 * SUCCESS on success and -1 on failure.
1321 */ 1834 */
1322 1835
@@ -1325,8 +1838,8 @@ static int start_nic(struct s2io_nic *nic)
1325 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1838 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1326 struct net_device *dev = nic->dev; 1839 struct net_device *dev = nic->dev;
1327 register u64 val64 = 0; 1840 register u64 val64 = 0;
1328 u16 interruptible, i; 1841 u16 interruptible;
1329 u16 subid; 1842 u16 subid, i;
1330 mac_info_t *mac_control; 1843 mac_info_t *mac_control;
1331 struct config_param *config; 1844 struct config_param *config;
1332 1845
@@ -1335,10 +1848,12 @@ static int start_nic(struct s2io_nic *nic)
1335 1848
1336 /* PRC Initialization and configuration */ 1849 /* PRC Initialization and configuration */
1337 for (i = 0; i < config->rx_ring_num; i++) { 1850 for (i = 0; i < config->rx_ring_num; i++) {
1338 writeq((u64) nic->rx_blocks[i][0].block_dma_addr, 1851 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1339 &bar0->prc_rxd0_n[i]); 1852 &bar0->prc_rxd0_n[i]);
1340 1853
1341 val64 = readq(&bar0->prc_ctrl_n[i]); 1854 val64 = readq(&bar0->prc_ctrl_n[i]);
1855 if (nic->config.bimodal)
1856 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1342#ifndef CONFIG_2BUFF_MODE 1857#ifndef CONFIG_2BUFF_MODE
1343 val64 |= PRC_CTRL_RC_ENABLED; 1858 val64 |= PRC_CTRL_RC_ENABLED;
1344#else 1859#else
@@ -1354,7 +1869,7 @@ static int start_nic(struct s2io_nic *nic)
1354 writeq(val64, &bar0->rx_pa_cfg); 1869 writeq(val64, &bar0->rx_pa_cfg);
1355#endif 1870#endif
1356 1871
1357 /* 1872 /*
1358 * Enabling MC-RLDRAM. After enabling the device, we timeout 1873 * Enabling MC-RLDRAM. After enabling the device, we timeout
1359 * for around 100ms, which is approximately the time required 1874 * for around 100ms, which is approximately the time required
1360 * for the device to be ready for operation. 1875 * for the device to be ready for operation.
@@ -1364,27 +1879,27 @@ static int start_nic(struct s2io_nic *nic)
1364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); 1879 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1365 val64 = readq(&bar0->mc_rldram_mrs); 1880 val64 = readq(&bar0->mc_rldram_mrs);
1366 1881
1367 msleep(100); /* Delay by around 100 ms. */ 1882 msleep(100); /* Delay by around 100 ms. */
1368 1883
1369 /* Enabling ECC Protection. */ 1884 /* Enabling ECC Protection. */
1370 val64 = readq(&bar0->adapter_control); 1885 val64 = readq(&bar0->adapter_control);
1371 val64 &= ~ADAPTER_ECC_EN; 1886 val64 &= ~ADAPTER_ECC_EN;
1372 writeq(val64, &bar0->adapter_control); 1887 writeq(val64, &bar0->adapter_control);
1373 1888
1374 /* 1889 /*
1375 * Clearing any possible Link state change interrupts that 1890 * Clearing any possible Link state change interrupts that
1376 * could have popped up just before Enabling the card. 1891 * could have popped up just before Enabling the card.
1377 */ 1892 */
1378 val64 = readq(&bar0->mac_rmac_err_reg); 1893 val64 = readq(&bar0->mac_rmac_err_reg);
1379 if (val64) 1894 if (val64)
1380 writeq(val64, &bar0->mac_rmac_err_reg); 1895 writeq(val64, &bar0->mac_rmac_err_reg);
1381 1896
1382 /* 1897 /*
1383 * Verify if the device is ready to be enabled, if so enable 1898 * Verify if the device is ready to be enabled, if so enable
1384 * it. 1899 * it.
1385 */ 1900 */
1386 val64 = readq(&bar0->adapter_status); 1901 val64 = readq(&bar0->adapter_status);
1387 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) { 1902 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1388 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); 1903 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1389 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", 1904 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1390 (unsigned long long) val64); 1905 (unsigned long long) val64);
@@ -1392,16 +1907,18 @@ static int start_nic(struct s2io_nic *nic)
1392 } 1907 }
1393 1908
1394 /* Enable select interrupts */ 1909 /* Enable select interrupts */
1395 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR | 1910 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1396 RX_MAC_INTR; 1911 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1912 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1913
1397 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); 1914 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1398 1915
1399 /* 1916 /*
1400 * With some switches, link might be already up at this point. 1917 * With some switches, link might be already up at this point.
1401 * Because of this weird behavior, when we enable laser, 1918 * Because of this weird behavior, when we enable laser,
1402 * we may not get link. We need to handle this. We cannot 1919 * we may not get link. We need to handle this. We cannot
1403 * figure out which switch is misbehaving. So we are forced to 1920 * figure out which switch is misbehaving. So we are forced to
1404 * make a global change. 1921 * make a global change.
1405 */ 1922 */
1406 1923
1407 /* Enabling Laser. */ 1924 /* Enabling Laser. */
@@ -1411,44 +1928,30 @@ static int start_nic(struct s2io_nic *nic)
1411 1928
1412 /* SXE-002: Initialize link and activity LED */ 1929 /* SXE-002: Initialize link and activity LED */
1413 subid = nic->pdev->subsystem_device; 1930 subid = nic->pdev->subsystem_device;
1414 if ((subid & 0xFF) >= 0x07) { 1931 if (((subid & 0xFF) >= 0x07) &&
1932 (nic->device_type == XFRAME_I_DEVICE)) {
1415 val64 = readq(&bar0->gpio_control); 1933 val64 = readq(&bar0->gpio_control);
1416 val64 |= 0x0000800000000000ULL; 1934 val64 |= 0x0000800000000000ULL;
1417 writeq(val64, &bar0->gpio_control); 1935 writeq(val64, &bar0->gpio_control);
1418 val64 = 0x0411040400000000ULL; 1936 val64 = 0x0411040400000000ULL;
1419 writeq(val64, (void __iomem *) bar0 + 0x2700); 1937 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1420 } 1938 }
1421 1939
1422 /* 1940 /*
1423 * Don't see link state interrupts on certain switches, so 1941 * Don't see link state interrupts on certain switches, so
1424 * directly scheduling a link state task from here. 1942 * directly scheduling a link state task from here.
1425 */ 1943 */
1426 schedule_work(&nic->set_link_task); 1944 schedule_work(&nic->set_link_task);
1427 1945
1428 /*
1429 * Here we are performing soft reset on XGXS to
1430 * force link down. Since link is already up, we will get
1431 * link state change interrupt after this reset
1432 */
1433 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1434 val64 = readq(&bar0->dtx_control);
1435 udelay(50);
1436 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1437 val64 = readq(&bar0->dtx_control);
1438 udelay(50);
1439 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1440 val64 = readq(&bar0->dtx_control);
1441 udelay(50);
1442
1443 return SUCCESS; 1946 return SUCCESS;
1444} 1947}
1445 1948
1446/** 1949/**
1447 * free_tx_buffers - Free all queued Tx buffers 1950 * free_tx_buffers - Free all queued Tx buffers
1448 * @nic : device private variable. 1951 * @nic : device private variable.
1449 * Description: 1952 * Description:
1450 * Free all queued Tx buffers. 1953 * Free all queued Tx buffers.
1451 * Return Value: void 1954 * Return Value: void
1452*/ 1955*/
1453 1956
1454static void free_tx_buffers(struct s2io_nic *nic) 1957static void free_tx_buffers(struct s2io_nic *nic)
@@ -1459,39 +1962,61 @@ static void free_tx_buffers(struct s2io_nic *nic)
1459 int i, j; 1962 int i, j;
1460 mac_info_t *mac_control; 1963 mac_info_t *mac_control;
1461 struct config_param *config; 1964 struct config_param *config;
1462 int cnt = 0; 1965 int cnt = 0, frg_cnt;
1463 1966
1464 mac_control = &nic->mac_control; 1967 mac_control = &nic->mac_control;
1465 config = &nic->config; 1968 config = &nic->config;
1466 1969
1467 for (i = 0; i < config->tx_fifo_num; i++) { 1970 for (i = 0; i < config->tx_fifo_num; i++) {
1468 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 1971 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1469 txdp = (TxD_t *) nic->list_info[i][j]. 1972 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1470 list_virt_addr; 1973 list_virt_addr;
1471 skb = 1974 skb =
1472 (struct sk_buff *) ((unsigned long) txdp-> 1975 (struct sk_buff *) ((unsigned long) txdp->
1473 Host_Control); 1976 Host_Control);
1474 if (skb == NULL) { 1977 if (skb == NULL) {
1475 memset(txdp, 0, sizeof(TxD_t)); 1978 memset(txdp, 0, sizeof(TxD_t) *
1979 config->max_txds);
1476 continue; 1980 continue;
1477 } 1981 }
1982 frg_cnt = skb_shinfo(skb)->nr_frags;
1983 pci_unmap_single(nic->pdev, (dma_addr_t)
1984 txdp->Buffer_Pointer,
1985 skb->len - skb->data_len,
1986 PCI_DMA_TODEVICE);
1987 if (frg_cnt) {
1988 TxD_t *temp;
1989 temp = txdp;
1990 txdp++;
1991 for (j = 0; j < frg_cnt; j++, txdp++) {
1992 skb_frag_t *frag =
1993 &skb_shinfo(skb)->frags[j];
1994 pci_unmap_page(nic->pdev,
1995 (dma_addr_t)
1996 txdp->
1997 Buffer_Pointer,
1998 frag->size,
1999 PCI_DMA_TODEVICE);
2000 }
2001 txdp = temp;
2002 }
1478 dev_kfree_skb(skb); 2003 dev_kfree_skb(skb);
1479 memset(txdp, 0, sizeof(TxD_t)); 2004 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1480 cnt++; 2005 cnt++;
1481 } 2006 }
1482 DBG_PRINT(INTR_DBG, 2007 DBG_PRINT(INTR_DBG,
1483 "%s:forcibly freeing %d skbs on FIFO%d\n", 2008 "%s:forcibly freeing %d skbs on FIFO%d\n",
1484 dev->name, cnt, i); 2009 dev->name, cnt, i);
1485 mac_control->tx_curr_get_info[i].offset = 0; 2010 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1486 mac_control->tx_curr_put_info[i].offset = 0; 2011 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1487 } 2012 }
1488} 2013}
1489 2014
1490/** 2015/**
1491 * stop_nic - To stop the nic 2016 * stop_nic - To stop the nic
1492 * @nic ; device private variable. 2017 * @nic ; device private variable.
1493 * Description: 2018 * Description:
1494 * This function does exactly the opposite of what the start_nic() 2019 * This function does exactly the opposite of what the start_nic()
1495 * function does. This function is called to stop the device. 2020 * function does. This function is called to stop the device.
1496 * Return Value: 2021 * Return Value:
1497 * void. 2022 * void.
@@ -1509,8 +2034,9 @@ static void stop_nic(struct s2io_nic *nic)
1509 config = &nic->config; 2034 config = &nic->config;
1510 2035
1511 /* Disable all interrupts */ 2036 /* Disable all interrupts */
1512 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR | 2037 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1513 RX_MAC_INTR; 2038 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2039 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1514 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); 2040 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1515 2041
1516 /* Disable PRCs */ 2042 /* Disable PRCs */
@@ -1521,11 +2047,11 @@ static void stop_nic(struct s2io_nic *nic)
1521 } 2047 }
1522} 2048}
1523 2049
1524/** 2050/**
1525 * fill_rx_buffers - Allocates the Rx side skbs 2051 * fill_rx_buffers - Allocates the Rx side skbs
1526 * @nic: device private variable 2052 * @nic: device private variable
1527 * @ring_no: ring number 2053 * @ring_no: ring number
1528 * Description: 2054 * Description:
1529 * The function allocates Rx side skbs and puts the physical 2055 * The function allocates Rx side skbs and puts the physical
1530 * address of these buffers into the RxD buffer pointers, so that the NIC 2056 * address of these buffers into the RxD buffer pointers, so that the NIC
1531 * can DMA the received frame into these locations. 2057 * can DMA the received frame into these locations.
@@ -1533,8 +2059,8 @@ static void stop_nic(struct s2io_nic *nic)
1533 * 1. single buffer, 2059 * 1. single buffer,
1534 * 2. three buffer and 2060 * 2. three buffer and
1535 * 3. Five buffer modes. 2061 * 3. Five buffer modes.
1536 * Each mode defines how many fragments the received frame will be split 2062 * Each mode defines how many fragments the received frame will be split
1537 * up into by the NIC. The frame is split into L3 header, L4 Header, 2063 * up into by the NIC. The frame is split into L3 header, L4 Header,
1538 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself 2064 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1539 * is split into 3 fragments. As of now only single buffer mode is 2065 * is split into 3 fragments. As of now only single buffer mode is
1540 * supported. 2066 * supported.
@@ -1542,7 +2068,7 @@ static void stop_nic(struct s2io_nic *nic)
1542 * SUCCESS on success or an appropriate -ve value on failure. 2068 * SUCCESS on success or an appropriate -ve value on failure.
1543 */ 2069 */
1544 2070
1545static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) 2071int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1546{ 2072{
1547 struct net_device *dev = nic->dev; 2073 struct net_device *dev = nic->dev;
1548 struct sk_buff *skb; 2074 struct sk_buff *skb;
@@ -1550,34 +2076,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1550 int off, off1, size, block_no, block_no1; 2076 int off, off1, size, block_no, block_no1;
1551 int offset, offset1; 2077 int offset, offset1;
1552 u32 alloc_tab = 0; 2078 u32 alloc_tab = 0;
1553 u32 alloc_cnt = nic->pkt_cnt[ring_no] - 2079 u32 alloc_cnt;
1554 atomic_read(&nic->rx_bufs_left[ring_no]);
1555 mac_info_t *mac_control; 2080 mac_info_t *mac_control;
1556 struct config_param *config; 2081 struct config_param *config;
1557#ifdef CONFIG_2BUFF_MODE 2082#ifdef CONFIG_2BUFF_MODE
1558 RxD_t *rxdpnext; 2083 RxD_t *rxdpnext;
1559 int nextblk; 2084 int nextblk;
1560 unsigned long tmp; 2085 u64 tmp;
1561 buffAdd_t *ba; 2086 buffAdd_t *ba;
1562 dma_addr_t rxdpphys; 2087 dma_addr_t rxdpphys;
1563#endif 2088#endif
1564#ifndef CONFIG_S2IO_NAPI 2089#ifndef CONFIG_S2IO_NAPI
1565 unsigned long flags; 2090 unsigned long flags;
1566#endif 2091#endif
2092 RxD_t *first_rxdp = NULL;
1567 2093
1568 mac_control = &nic->mac_control; 2094 mac_control = &nic->mac_control;
1569 config = &nic->config; 2095 config = &nic->config;
1570 2096 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2097 atomic_read(&nic->rx_bufs_left[ring_no]);
1571 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2098 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1572 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2099 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1573 2100
1574 while (alloc_tab < alloc_cnt) { 2101 while (alloc_tab < alloc_cnt) {
1575 block_no = mac_control->rx_curr_put_info[ring_no]. 2102 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1576 block_index; 2103 block_index;
1577 block_no1 = mac_control->rx_curr_get_info[ring_no]. 2104 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1578 block_index; 2105 block_index;
1579 off = mac_control->rx_curr_put_info[ring_no].offset; 2106 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1580 off1 = mac_control->rx_curr_get_info[ring_no].offset; 2107 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1581#ifndef CONFIG_2BUFF_MODE 2108#ifndef CONFIG_2BUFF_MODE
1582 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off; 2109 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1583 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1; 2110 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
@@ -1586,7 +2113,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1586 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1; 2113 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1587#endif 2114#endif
1588 2115
1589 rxdp = nic->rx_blocks[ring_no][block_no]. 2116 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1590 block_virt_addr + off; 2117 block_virt_addr + off;
1591 if ((offset == offset1) && (rxdp->Host_Control)) { 2118 if ((offset == offset1) && (rxdp->Host_Control)) {
1592 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2119 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
@@ -1595,15 +2122,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1595 } 2122 }
1596#ifndef CONFIG_2BUFF_MODE 2123#ifndef CONFIG_2BUFF_MODE
1597 if (rxdp->Control_1 == END_OF_BLOCK) { 2124 if (rxdp->Control_1 == END_OF_BLOCK) {
1598 mac_control->rx_curr_put_info[ring_no]. 2125 mac_control->rings[ring_no].rx_curr_put_info.
1599 block_index++; 2126 block_index++;
1600 mac_control->rx_curr_put_info[ring_no]. 2127 mac_control->rings[ring_no].rx_curr_put_info.
1601 block_index %= nic->block_count[ring_no]; 2128 block_index %= mac_control->rings[ring_no].block_count;
1602 block_no = mac_control->rx_curr_put_info 2129 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1603 [ring_no].block_index; 2130 block_index;
1604 off++; 2131 off++;
1605 off %= (MAX_RXDS_PER_BLOCK + 1); 2132 off %= (MAX_RXDS_PER_BLOCK + 1);
1606 mac_control->rx_curr_put_info[ring_no].offset = 2133 mac_control->rings[ring_no].rx_curr_put_info.offset =
1607 off; 2134 off;
1608 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2); 2135 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1609 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2136 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
@@ -1611,30 +2138,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1611 } 2138 }
1612#ifndef CONFIG_S2IO_NAPI 2139#ifndef CONFIG_S2IO_NAPI
1613 spin_lock_irqsave(&nic->put_lock, flags); 2140 spin_lock_irqsave(&nic->put_lock, flags);
1614 nic->put_pos[ring_no] = 2141 mac_control->rings[ring_no].put_pos =
1615 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2142 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1616 spin_unlock_irqrestore(&nic->put_lock, flags); 2143 spin_unlock_irqrestore(&nic->put_lock, flags);
1617#endif 2144#endif
1618#else 2145#else
1619 if (rxdp->Host_Control == END_OF_BLOCK) { 2146 if (rxdp->Host_Control == END_OF_BLOCK) {
1620 mac_control->rx_curr_put_info[ring_no]. 2147 mac_control->rings[ring_no].rx_curr_put_info.
1621 block_index++; 2148 block_index++;
1622 mac_control->rx_curr_put_info[ring_no]. 2149 mac_control->rings[ring_no].rx_curr_put_info.block_index
1623 block_index %= nic->block_count[ring_no]; 2150 %= mac_control->rings[ring_no].block_count;
1624 block_no = mac_control->rx_curr_put_info 2151 block_no = mac_control->rings[ring_no].rx_curr_put_info
1625 [ring_no].block_index; 2152 .block_index;
1626 off = 0; 2153 off = 0;
1627 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n", 2154 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1628 dev->name, block_no, 2155 dev->name, block_no,
1629 (unsigned long long) rxdp->Control_1); 2156 (unsigned long long) rxdp->Control_1);
1630 mac_control->rx_curr_put_info[ring_no].offset = 2157 mac_control->rings[ring_no].rx_curr_put_info.offset =
1631 off; 2158 off;
1632 rxdp = nic->rx_blocks[ring_no][block_no]. 2159 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1633 block_virt_addr; 2160 block_virt_addr;
1634 } 2161 }
1635#ifndef CONFIG_S2IO_NAPI 2162#ifndef CONFIG_S2IO_NAPI
1636 spin_lock_irqsave(&nic->put_lock, flags); 2163 spin_lock_irqsave(&nic->put_lock, flags);
1637 nic->put_pos[ring_no] = (block_no * 2164 mac_control->rings[ring_no].put_pos = (block_no *
1638 (MAX_RXDS_PER_BLOCK + 1)) + off; 2165 (MAX_RXDS_PER_BLOCK + 1)) + off;
1639 spin_unlock_irqrestore(&nic->put_lock, flags); 2166 spin_unlock_irqrestore(&nic->put_lock, flags);
1640#endif 2167#endif
@@ -1646,27 +2173,27 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1646 if (rxdp->Control_2 & BIT(0)) 2173 if (rxdp->Control_2 & BIT(0))
1647#endif 2174#endif
1648 { 2175 {
1649 mac_control->rx_curr_put_info[ring_no]. 2176 mac_control->rings[ring_no].rx_curr_put_info.
1650 offset = off; 2177 offset = off;
1651 goto end; 2178 goto end;
1652 } 2179 }
1653#ifdef CONFIG_2BUFF_MODE 2180#ifdef CONFIG_2BUFF_MODE
1654 /* 2181 /*
1655 * RxDs Spanning cache lines will be replenished only 2182 * RxDs Spanning cache lines will be replenished only
1656 * if the succeeding RxD is also owned by Host. It 2183 * if the succeeding RxD is also owned by Host. It
1657 * will always be the ((8*i)+3) and ((8*i)+6) 2184 * will always be the ((8*i)+3) and ((8*i)+6)
1658 * descriptors for the 48 byte descriptor. The offending 2185 * descriptors for the 48 byte descriptor. The offending
1659 * decsriptor is of-course the 3rd descriptor. 2186 * decsriptor is of-course the 3rd descriptor.
1660 */ 2187 */
1661 rxdpphys = nic->rx_blocks[ring_no][block_no]. 2188 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1662 block_dma_addr + (off * sizeof(RxD_t)); 2189 block_dma_addr + (off * sizeof(RxD_t));
1663 if (((u64) (rxdpphys)) % 128 > 80) { 2190 if (((u64) (rxdpphys)) % 128 > 80) {
1664 rxdpnext = nic->rx_blocks[ring_no][block_no]. 2191 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1665 block_virt_addr + (off + 1); 2192 block_virt_addr + (off + 1);
1666 if (rxdpnext->Host_Control == END_OF_BLOCK) { 2193 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1667 nextblk = (block_no + 1) % 2194 nextblk = (block_no + 1) %
1668 (nic->block_count[ring_no]); 2195 (mac_control->rings[ring_no].block_count);
1669 rxdpnext = nic->rx_blocks[ring_no] 2196 rxdpnext = mac_control->rings[ring_no].rx_blocks
1670 [nextblk].block_virt_addr; 2197 [nextblk].block_virt_addr;
1671 } 2198 }
1672 if (rxdpnext->Control_2 & BIT(0)) 2199 if (rxdpnext->Control_2 & BIT(0))
@@ -1682,6 +2209,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1682 if (!skb) { 2209 if (!skb) {
1683 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2210 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1684 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2211 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2212 if (first_rxdp) {
2213 wmb();
2214 first_rxdp->Control_1 |= RXD_OWN_XENA;
2215 }
1685 return -ENOMEM; 2216 return -ENOMEM;
1686 } 2217 }
1687#ifndef CONFIG_2BUFF_MODE 2218#ifndef CONFIG_2BUFF_MODE
@@ -1692,12 +2223,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1692 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE); 2223 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1693 rxdp->Control_2 |= SET_BUFFER0_SIZE(size); 2224 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1694 rxdp->Host_Control = (unsigned long) (skb); 2225 rxdp->Host_Control = (unsigned long) (skb);
1695 rxdp->Control_1 |= RXD_OWN_XENA; 2226 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2227 rxdp->Control_1 |= RXD_OWN_XENA;
1696 off++; 2228 off++;
1697 off %= (MAX_RXDS_PER_BLOCK + 1); 2229 off %= (MAX_RXDS_PER_BLOCK + 1);
1698 mac_control->rx_curr_put_info[ring_no].offset = off; 2230 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1699#else 2231#else
1700 ba = &nic->ba[ring_no][block_no][off]; 2232 ba = &mac_control->rings[ring_no].ba[block_no][off];
1701 skb_reserve(skb, BUF0_LEN); 2233 skb_reserve(skb, BUF0_LEN);
1702 tmp = ((unsigned long) skb->data & ALIGN_SIZE); 2234 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1703 if (tmp) 2235 if (tmp)
@@ -1719,22 +2251,41 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1719 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */ 2251 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1720 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */ 2252 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1721 rxdp->Host_Control = (u64) ((unsigned long) (skb)); 2253 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1722 rxdp->Control_1 |= RXD_OWN_XENA; 2254 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2255 rxdp->Control_1 |= RXD_OWN_XENA;
1723 off++; 2256 off++;
1724 mac_control->rx_curr_put_info[ring_no].offset = off; 2257 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1725#endif 2258#endif
2259 rxdp->Control_2 |= SET_RXD_MARKER;
2260
2261 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2262 if (first_rxdp) {
2263 wmb();
2264 first_rxdp->Control_1 |= RXD_OWN_XENA;
2265 }
2266 first_rxdp = rxdp;
2267 }
1726 atomic_inc(&nic->rx_bufs_left[ring_no]); 2268 atomic_inc(&nic->rx_bufs_left[ring_no]);
1727 alloc_tab++; 2269 alloc_tab++;
1728 } 2270 }
1729 2271
1730 end: 2272 end:
2273 /* Transfer ownership of first descriptor to adapter just before
2274 * exiting. Before that, use memory barrier so that ownership
2275 * and other fields are seen by adapter correctly.
2276 */
2277 if (first_rxdp) {
2278 wmb();
2279 first_rxdp->Control_1 |= RXD_OWN_XENA;
2280 }
2281
1731 return SUCCESS; 2282 return SUCCESS;
1732} 2283}
1733 2284
1734/** 2285/**
1735 * free_rx_buffers - Frees all Rx buffers 2286 * free_rx_buffers - Frees all Rx buffers
1736 * @sp: device private variable. 2287 * @sp: device private variable.
1737 * Description: 2288 * Description:
1738 * This function will free all Rx buffers allocated by host. 2289 * This function will free all Rx buffers allocated by host.
1739 * Return Value: 2290 * Return Value:
1740 * NONE. 2291 * NONE.
@@ -1758,7 +2309,8 @@ static void free_rx_buffers(struct s2io_nic *sp)
1758 for (i = 0; i < config->rx_ring_num; i++) { 2309 for (i = 0; i < config->rx_ring_num; i++) {
1759 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2310 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1760 off = j % (MAX_RXDS_PER_BLOCK + 1); 2311 off = j % (MAX_RXDS_PER_BLOCK + 1);
1761 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off; 2312 rxdp = mac_control->rings[i].rx_blocks[blk].
2313 block_virt_addr + off;
1762 2314
1763#ifndef CONFIG_2BUFF_MODE 2315#ifndef CONFIG_2BUFF_MODE
1764 if (rxdp->Control_1 == END_OF_BLOCK) { 2316 if (rxdp->Control_1 == END_OF_BLOCK) {
@@ -1793,7 +2345,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
1793 HEADER_SNAP_SIZE, 2345 HEADER_SNAP_SIZE,
1794 PCI_DMA_FROMDEVICE); 2346 PCI_DMA_FROMDEVICE);
1795#else 2347#else
1796 ba = &sp->ba[i][blk][off]; 2348 ba = &mac_control->rings[i].ba[blk][off];
1797 pci_unmap_single(sp->pdev, (dma_addr_t) 2349 pci_unmap_single(sp->pdev, (dma_addr_t)
1798 rxdp->Buffer0_ptr, 2350 rxdp->Buffer0_ptr,
1799 BUF0_LEN, 2351 BUF0_LEN,
@@ -1813,10 +2365,10 @@ static void free_rx_buffers(struct s2io_nic *sp)
1813 } 2365 }
1814 memset(rxdp, 0, sizeof(RxD_t)); 2366 memset(rxdp, 0, sizeof(RxD_t));
1815 } 2367 }
1816 mac_control->rx_curr_put_info[i].block_index = 0; 2368 mac_control->rings[i].rx_curr_put_info.block_index = 0;
1817 mac_control->rx_curr_get_info[i].block_index = 0; 2369 mac_control->rings[i].rx_curr_get_info.block_index = 0;
1818 mac_control->rx_curr_put_info[i].offset = 0; 2370 mac_control->rings[i].rx_curr_put_info.offset = 0;
1819 mac_control->rx_curr_get_info[i].offset = 0; 2371 mac_control->rings[i].rx_curr_get_info.offset = 0;
1820 atomic_set(&sp->rx_bufs_left[i], 0); 2372 atomic_set(&sp->rx_bufs_left[i], 0);
1821 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", 2373 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1822 dev->name, buf_cnt, i); 2374 dev->name, buf_cnt, i);
@@ -1826,7 +2378,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
1826/** 2378/**
1827 * s2io_poll - Rx interrupt handler for NAPI support 2379 * s2io_poll - Rx interrupt handler for NAPI support
1828 * @dev : pointer to the device structure. 2380 * @dev : pointer to the device structure.
1829 * @budget : The number of packets that were budgeted to be processed 2381 * @budget : The number of packets that were budgeted to be processed
1830 * during one pass through the 'Poll" function. 2382 * during one pass through the 'Poll" function.
1831 * Description: 2383 * Description:
1832 * Comes into picture only if NAPI support has been incorporated. It does 2384 * Comes into picture only if NAPI support has been incorporated. It does
@@ -1836,160 +2388,36 @@ static void free_rx_buffers(struct s2io_nic *sp)
1836 * 0 on success and 1 if there are No Rx packets to be processed. 2388 * 0 on success and 1 if there are No Rx packets to be processed.
1837 */ 2389 */
1838 2390
1839#ifdef CONFIG_S2IO_NAPI 2391#if defined(CONFIG_S2IO_NAPI)
1840static int s2io_poll(struct net_device *dev, int *budget) 2392static int s2io_poll(struct net_device *dev, int *budget)
1841{ 2393{
1842 nic_t *nic = dev->priv; 2394 nic_t *nic = dev->priv;
1843 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2395 int pkt_cnt = 0, org_pkts_to_process;
1844 int pkts_to_process = *budget, pkt_cnt = 0;
1845 register u64 val64 = 0;
1846 rx_curr_get_info_t get_info, put_info;
1847 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1848#ifndef CONFIG_2BUFF_MODE
1849 u16 val16, cksum;
1850#endif
1851 struct sk_buff *skb;
1852 RxD_t *rxdp;
1853 mac_info_t *mac_control; 2396 mac_info_t *mac_control;
1854 struct config_param *config; 2397 struct config_param *config;
1855#ifdef CONFIG_2BUFF_MODE 2398 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
1856 buffAdd_t *ba; 2399 u64 val64;
1857#endif 2400 int i;
1858 2401
2402 atomic_inc(&nic->isr_cnt);
1859 mac_control = &nic->mac_control; 2403 mac_control = &nic->mac_control;
1860 config = &nic->config; 2404 config = &nic->config;
1861 2405
1862 if (pkts_to_process > dev->quota) 2406 nic->pkts_to_process = *budget;
1863 pkts_to_process = dev->quota; 2407 if (nic->pkts_to_process > dev->quota)
2408 nic->pkts_to_process = dev->quota;
2409 org_pkts_to_process = nic->pkts_to_process;
1864 2410
1865 val64 = readq(&bar0->rx_traffic_int); 2411 val64 = readq(&bar0->rx_traffic_int);
1866 writeq(val64, &bar0->rx_traffic_int); 2412 writeq(val64, &bar0->rx_traffic_int);
1867 2413
1868 for (i = 0; i < config->rx_ring_num; i++) { 2414 for (i = 0; i < config->rx_ring_num; i++) {
1869 get_info = mac_control->rx_curr_get_info[i]; 2415 rx_intr_handler(&mac_control->rings[i]);
1870 get_block = get_info.block_index; 2416 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
1871 put_info = mac_control->rx_curr_put_info[i]; 2417 if (!nic->pkts_to_process) {
1872 put_block = put_info.block_index; 2418 /* Quota for the current iteration has been met */
1873 ring_bufs = config->rx_cfg[i].num_rxd; 2419 goto no_rx;
1874 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1875 get_info.offset;
1876#ifndef CONFIG_2BUFF_MODE
1877 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1878 get_info.offset;
1879 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1880 put_info.offset;
1881 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1882 (((get_offset + 1) % ring_bufs) != put_offset)) {
1883 if (--pkts_to_process < 0) {
1884 goto no_rx;
1885 }
1886 if (rxdp->Control_1 == END_OF_BLOCK) {
1887 rxdp =
1888 (RxD_t *) ((unsigned long) rxdp->
1889 Control_2);
1890 get_info.offset++;
1891 get_info.offset %=
1892 (MAX_RXDS_PER_BLOCK + 1);
1893 get_block++;
1894 get_block %= nic->block_count[i];
1895 mac_control->rx_curr_get_info[i].
1896 offset = get_info.offset;
1897 mac_control->rx_curr_get_info[i].
1898 block_index = get_block;
1899 continue;
1900 }
1901 get_offset =
1902 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1903 get_info.offset;
1904 skb =
1905 (struct sk_buff *) ((unsigned long) rxdp->
1906 Host_Control);
1907 if (skb == NULL) {
1908 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1909 dev->name);
1910 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1911 goto no_rx;
1912 }
1913 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1914 val16 = (u16) (val64 >> 48);
1915 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1916 pci_unmap_single(nic->pdev, (dma_addr_t)
1917 rxdp->Buffer0_ptr,
1918 dev->mtu +
1919 HEADER_ETHERNET_II_802_3_SIZE +
1920 HEADER_802_2_SIZE +
1921 HEADER_SNAP_SIZE,
1922 PCI_DMA_FROMDEVICE);
1923 rx_osm_handler(nic, val16, rxdp, i);
1924 pkt_cnt++;
1925 get_info.offset++;
1926 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1927 rxdp =
1928 nic->rx_blocks[i][get_block].block_virt_addr +
1929 get_info.offset;
1930 mac_control->rx_curr_get_info[i].offset =
1931 get_info.offset;
1932 } 2420 }
1933#else
1934 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1935 get_info.offset;
1936 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1937 put_info.offset;
1938 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1939 !(rxdp->Control_2 & BIT(0))) &&
1940 (((get_offset + 1) % ring_bufs) != put_offset)) {
1941 if (--pkts_to_process < 0) {
1942 goto no_rx;
1943 }
1944 skb = (struct sk_buff *) ((unsigned long)
1945 rxdp->Host_Control);
1946 if (skb == NULL) {
1947 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1948 dev->name);
1949 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1950 goto no_rx;
1951 }
1952
1953 pci_unmap_single(nic->pdev, (dma_addr_t)
1954 rxdp->Buffer0_ptr,
1955 BUF0_LEN, PCI_DMA_FROMDEVICE);
1956 pci_unmap_single(nic->pdev, (dma_addr_t)
1957 rxdp->Buffer1_ptr,
1958 BUF1_LEN, PCI_DMA_FROMDEVICE);
1959 pci_unmap_single(nic->pdev, (dma_addr_t)
1960 rxdp->Buffer2_ptr,
1961 dev->mtu + BUF0_LEN + 4,
1962 PCI_DMA_FROMDEVICE);
1963 ba = &nic->ba[i][get_block][get_info.offset];
1964
1965 rx_osm_handler(nic, rxdp, i, ba);
1966
1967 get_info.offset++;
1968 mac_control->rx_curr_get_info[i].offset =
1969 get_info.offset;
1970 rxdp =
1971 nic->rx_blocks[i][get_block].block_virt_addr +
1972 get_info.offset;
1973
1974 if (get_info.offset &&
1975 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1976 get_info.offset = 0;
1977 mac_control->rx_curr_get_info[i].
1978 offset = get_info.offset;
1979 get_block++;
1980 get_block %= nic->block_count[i];
1981 mac_control->rx_curr_get_info[i].
1982 block_index = get_block;
1983 rxdp =
1984 nic->rx_blocks[i][get_block].
1985 block_virt_addr;
1986 }
1987 get_offset =
1988 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1989 get_info.offset;
1990 pkt_cnt++;
1991 }
1992#endif
1993 } 2421 }
1994 if (!pkt_cnt) 2422 if (!pkt_cnt)
1995 pkt_cnt = 1; 2423 pkt_cnt = 1;
@@ -2007,9 +2435,10 @@ static int s2io_poll(struct net_device *dev, int *budget)
2007 } 2435 }
2008 /* Re enable the Rx interrupts. */ 2436 /* Re enable the Rx interrupts. */
2009 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS); 2437 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2438 atomic_dec(&nic->isr_cnt);
2010 return 0; 2439 return 0;
2011 2440
2012 no_rx: 2441no_rx:
2013 dev->quota -= pkt_cnt; 2442 dev->quota -= pkt_cnt;
2014 *budget -= pkt_cnt; 2443 *budget -= pkt_cnt;
2015 2444
@@ -2020,279 +2449,204 @@ static int s2io_poll(struct net_device *dev, int *budget)
2020 break; 2449 break;
2021 } 2450 }
2022 } 2451 }
2452 atomic_dec(&nic->isr_cnt);
2023 return 1; 2453 return 1;
2024} 2454}
2025#else 2455#endif
2026/** 2456
2457/**
2027 * rx_intr_handler - Rx interrupt handler 2458 * rx_intr_handler - Rx interrupt handler
2028 * @nic: device private variable. 2459 * @nic: device private variable.
2029 * Description: 2460 * Description:
2030 * If the interrupt is because of a received frame or if the 2461 * If the interrupt is because of a received frame or if the
2031 * receive ring contains fresh as yet un-processed frames,this function is 2462 * receive ring contains fresh as yet un-processed frames,this function is
2032 * called. It picks out the RxD at which place the last Rx processing had 2463 * called. It picks out the RxD at which place the last Rx processing had
2033 * stopped and sends the skb to the OSM's Rx handler and then increments 2464 * stopped and sends the skb to the OSM's Rx handler and then increments
2034 * the offset. 2465 * the offset.
2035 * Return Value: 2466 * Return Value:
2036 * NONE. 2467 * NONE.
2037 */ 2468 */
2038 2469static void rx_intr_handler(ring_info_t *ring_data)
2039static void rx_intr_handler(struct s2io_nic *nic)
2040{ 2470{
2471 nic_t *nic = ring_data->nic;
2041 struct net_device *dev = (struct net_device *) nic->dev; 2472 struct net_device *dev = (struct net_device *) nic->dev;
2042 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 2473 int get_block, get_offset, put_block, put_offset, ring_bufs;
2043 rx_curr_get_info_t get_info, put_info; 2474 rx_curr_get_info_t get_info, put_info;
2044 RxD_t *rxdp; 2475 RxD_t *rxdp;
2045 struct sk_buff *skb; 2476 struct sk_buff *skb;
2046#ifndef CONFIG_2BUFF_MODE 2477#ifndef CONFIG_S2IO_NAPI
2047 u16 val16, cksum; 2478 int pkt_cnt = 0;
2048#endif
2049 register u64 val64 = 0;
2050 int get_block, get_offset, put_block, put_offset, ring_bufs;
2051 int i, pkt_cnt = 0;
2052 mac_info_t *mac_control;
2053 struct config_param *config;
2054#ifdef CONFIG_2BUFF_MODE
2055 buffAdd_t *ba;
2056#endif 2479#endif
2480 spin_lock(&nic->rx_lock);
2481 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2482 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2483 __FUNCTION__, dev->name);
2484 spin_unlock(&nic->rx_lock);
2485 }
2057 2486
2058 mac_control = &nic->mac_control; 2487 get_info = ring_data->rx_curr_get_info;
2059 config = &nic->config; 2488 get_block = get_info.block_index;
2060 2489 put_info = ring_data->rx_curr_put_info;
2061 /* 2490 put_block = put_info.block_index;
2062 * rx_traffic_int reg is an R1 register, hence we read and write back 2491 ring_bufs = get_info.ring_len+1;
2063 * the samevalue in the register to clear it. 2492 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2064 */
2065 val64 = readq(&bar0->rx_traffic_int);
2066 writeq(val64, &bar0->rx_traffic_int);
2067
2068 for (i = 0; i < config->rx_ring_num; i++) {
2069 get_info = mac_control->rx_curr_get_info[i];
2070 get_block = get_info.block_index;
2071 put_info = mac_control->rx_curr_put_info[i];
2072 put_block = put_info.block_index;
2073 ring_bufs = config->rx_cfg[i].num_rxd;
2074 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2075 get_info.offset;
2076#ifndef CONFIG_2BUFF_MODE
2077 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2078 get_info.offset; 2493 get_info.offset;
2079 spin_lock(&nic->put_lock); 2494 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2080 put_offset = nic->put_pos[i]; 2495 get_info.offset;
2081 spin_unlock(&nic->put_lock); 2496#ifndef CONFIG_S2IO_NAPI
2082 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) && 2497 spin_lock(&nic->put_lock);
2083 (((get_offset + 1) % ring_bufs) != put_offset)) { 2498 put_offset = ring_data->put_pos;
2084 if (rxdp->Control_1 == END_OF_BLOCK) { 2499 spin_unlock(&nic->put_lock);
2085 rxdp = (RxD_t *) ((unsigned long) 2500#else
2086 rxdp->Control_2); 2501 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2087 get_info.offset++; 2502 put_info.offset;
2088 get_info.offset %= 2503#endif
2089 (MAX_RXDS_PER_BLOCK + 1); 2504 while (RXD_IS_UP2DT(rxdp) &&
2090 get_block++; 2505 (((get_offset + 1) % ring_bufs) != put_offset)) {
2091 get_block %= nic->block_count[i]; 2506 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2092 mac_control->rx_curr_get_info[i]. 2507 if (skb == NULL) {
2093 offset = get_info.offset; 2508 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2094 mac_control->rx_curr_get_info[i]. 2509 dev->name);
2095 block_index = get_block; 2510 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2096 continue; 2511 spin_unlock(&nic->rx_lock);
2097 } 2512 return;
2098 get_offset =
2099 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2100 get_info.offset;
2101 skb = (struct sk_buff *) ((unsigned long)
2102 rxdp->Host_Control);
2103 if (skb == NULL) {
2104 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2105 dev->name);
2106 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2107 return;
2108 }
2109 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2110 val16 = (u16) (val64 >> 48);
2111 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2112 pci_unmap_single(nic->pdev, (dma_addr_t)
2113 rxdp->Buffer0_ptr,
2114 dev->mtu +
2115 HEADER_ETHERNET_II_802_3_SIZE +
2116 HEADER_802_2_SIZE +
2117 HEADER_SNAP_SIZE,
2118 PCI_DMA_FROMDEVICE);
2119 rx_osm_handler(nic, val16, rxdp, i);
2120 get_info.offset++;
2121 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2122 rxdp =
2123 nic->rx_blocks[i][get_block].block_virt_addr +
2124 get_info.offset;
2125 mac_control->rx_curr_get_info[i].offset =
2126 get_info.offset;
2127 pkt_cnt++;
2128 if ((indicate_max_pkts)
2129 && (pkt_cnt > indicate_max_pkts))
2130 break;
2131 } 2513 }
2514#ifndef CONFIG_2BUFF_MODE
2515 pci_unmap_single(nic->pdev, (dma_addr_t)
2516 rxdp->Buffer0_ptr,
2517 dev->mtu +
2518 HEADER_ETHERNET_II_802_3_SIZE +
2519 HEADER_802_2_SIZE +
2520 HEADER_SNAP_SIZE,
2521 PCI_DMA_FROMDEVICE);
2132#else 2522#else
2133 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) + 2523 pci_unmap_single(nic->pdev, (dma_addr_t)
2524 rxdp->Buffer0_ptr,
2525 BUF0_LEN, PCI_DMA_FROMDEVICE);
2526 pci_unmap_single(nic->pdev, (dma_addr_t)
2527 rxdp->Buffer1_ptr,
2528 BUF1_LEN, PCI_DMA_FROMDEVICE);
2529 pci_unmap_single(nic->pdev, (dma_addr_t)
2530 rxdp->Buffer2_ptr,
2531 dev->mtu + BUF0_LEN + 4,
2532 PCI_DMA_FROMDEVICE);
2533#endif
2534 rx_osm_handler(ring_data, rxdp);
2535 get_info.offset++;
2536 ring_data->rx_curr_get_info.offset =
2134 get_info.offset; 2537 get_info.offset;
2135 spin_lock(&nic->put_lock); 2538 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2136 put_offset = nic->put_pos[i]; 2539 get_info.offset;
2137 spin_unlock(&nic->put_lock); 2540 if (get_info.offset &&
2138 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) && 2541 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2139 !(rxdp->Control_2 & BIT(0))) && 2542 get_info.offset = 0;
2140 (((get_offset + 1) % ring_bufs) != put_offset)) { 2543 ring_data->rx_curr_get_info.offset
2141 skb = (struct sk_buff *) ((unsigned long) 2544 = get_info.offset;
2142 rxdp->Host_Control); 2545 get_block++;
2143 if (skb == NULL) { 2546 get_block %= ring_data->block_count;
2144 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2547 ring_data->rx_curr_get_info.block_index
2145 dev->name); 2548 = get_block;
2146 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2549 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2147 return; 2550 }
2148 }
2149
2150 pci_unmap_single(nic->pdev, (dma_addr_t)
2151 rxdp->Buffer0_ptr,
2152 BUF0_LEN, PCI_DMA_FROMDEVICE);
2153 pci_unmap_single(nic->pdev, (dma_addr_t)
2154 rxdp->Buffer1_ptr,
2155 BUF1_LEN, PCI_DMA_FROMDEVICE);
2156 pci_unmap_single(nic->pdev, (dma_addr_t)
2157 rxdp->Buffer2_ptr,
2158 dev->mtu + BUF0_LEN + 4,
2159 PCI_DMA_FROMDEVICE);
2160 ba = &nic->ba[i][get_block][get_info.offset];
2161
2162 rx_osm_handler(nic, rxdp, i, ba);
2163
2164 get_info.offset++;
2165 mac_control->rx_curr_get_info[i].offset =
2166 get_info.offset;
2167 rxdp =
2168 nic->rx_blocks[i][get_block].block_virt_addr +
2169 get_info.offset;
2170 2551
2171 if (get_info.offset && 2552 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2172 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2173 get_info.offset = 0;
2174 mac_control->rx_curr_get_info[i].
2175 offset = get_info.offset;
2176 get_block++;
2177 get_block %= nic->block_count[i];
2178 mac_control->rx_curr_get_info[i].
2179 block_index = get_block;
2180 rxdp =
2181 nic->rx_blocks[i][get_block].
2182 block_virt_addr;
2183 }
2184 get_offset =
2185 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2186 get_info.offset; 2553 get_info.offset;
2187 pkt_cnt++; 2554#ifdef CONFIG_S2IO_NAPI
2188 if ((indicate_max_pkts) 2555 nic->pkts_to_process -= 1;
2189 && (pkt_cnt > indicate_max_pkts)) 2556 if (!nic->pkts_to_process)
2190 break; 2557 break;
2191 } 2558#else
2192#endif 2559 pkt_cnt++;
2193 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 2560 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2194 break; 2561 break;
2562#endif
2195 } 2563 }
2564 spin_unlock(&nic->rx_lock);
2196} 2565}
2197#endif 2566
2198/** 2567/**
2199 * tx_intr_handler - Transmit interrupt handler 2568 * tx_intr_handler - Transmit interrupt handler
2200 * @nic : device private variable 2569 * @nic : device private variable
2201 * Description: 2570 * Description:
2202 * If an interrupt was raised to indicate DMA complete of the 2571 * If an interrupt was raised to indicate DMA complete of the
2203 * Tx packet, this function is called. It identifies the last TxD 2572 * Tx packet, this function is called. It identifies the last TxD
2204 * whose buffer was freed and frees all skbs whose data have already 2573 * whose buffer was freed and frees all skbs whose data have already
2205 * DMA'ed into the NICs internal memory. 2574 * DMA'ed into the NICs internal memory.
2206 * Return Value: 2575 * Return Value:
2207 * NONE 2576 * NONE
2208 */ 2577 */
2209 2578
2210static void tx_intr_handler(struct s2io_nic *nic) 2579static void tx_intr_handler(fifo_info_t *fifo_data)
2211{ 2580{
2212 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2581 nic_t *nic = fifo_data->nic;
2213 struct net_device *dev = (struct net_device *) nic->dev; 2582 struct net_device *dev = (struct net_device *) nic->dev;
2214 tx_curr_get_info_t get_info, put_info; 2583 tx_curr_get_info_t get_info, put_info;
2215 struct sk_buff *skb; 2584 struct sk_buff *skb;
2216 TxD_t *txdlp; 2585 TxD_t *txdlp;
2217 register u64 val64 = 0;
2218 int i;
2219 u16 j, frg_cnt; 2586 u16 j, frg_cnt;
2220 mac_info_t *mac_control;
2221 struct config_param *config;
2222 2587
2223 mac_control = &nic->mac_control; 2588 get_info = fifo_data->tx_curr_get_info;
2224 config = &nic->config; 2589 put_info = fifo_data->tx_curr_put_info;
2225 2590 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2226 /* 2591 list_virt_addr;
2227 * tx_traffic_int reg is an R1 register, hence we read and write 2592 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2228 * back the samevalue in the register to clear it. 2593 (get_info.offset != put_info.offset) &&
2229 */ 2594 (txdlp->Host_Control)) {
2230 val64 = readq(&bar0->tx_traffic_int); 2595 /* Check for TxD errors */
2231 writeq(val64, &bar0->tx_traffic_int); 2596 if (txdlp->Control_1 & TXD_T_CODE) {
2597 unsigned long long err;
2598 err = txdlp->Control_1 & TXD_T_CODE;
2599 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2600 err);
2601 }
2232 2602
2233 for (i = 0; i < config->tx_fifo_num; i++) { 2603 skb = (struct sk_buff *) ((unsigned long)
2234 get_info = mac_control->tx_curr_get_info[i]; 2604 txdlp->Host_Control);
2235 put_info = mac_control->tx_curr_put_info[i]; 2605 if (skb == NULL) {
2236 txdlp = (TxD_t *) nic->list_info[i][get_info.offset]. 2606 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2237 list_virt_addr; 2607 __FUNCTION__);
2238 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && 2608 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2239 (get_info.offset != put_info.offset) && 2609 return;
2240 (txdlp->Host_Control)) { 2610 }
2241 /* Check for TxD errors */
2242 if (txdlp->Control_1 & TXD_T_CODE) {
2243 unsigned long long err;
2244 err = txdlp->Control_1 & TXD_T_CODE;
2245 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2246 err);
2247 }
2248 2611
2249 skb = (struct sk_buff *) ((unsigned long) 2612 frg_cnt = skb_shinfo(skb)->nr_frags;
2250 txdlp->Host_Control); 2613 nic->tx_pkt_count++;
2251 if (skb == NULL) { 2614
2252 DBG_PRINT(ERR_DBG, "%s: Null skb ", 2615 pci_unmap_single(nic->pdev, (dma_addr_t)
2253 dev->name); 2616 txdlp->Buffer_Pointer,
2254 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); 2617 skb->len - skb->data_len,
2255 return; 2618 PCI_DMA_TODEVICE);
2619 if (frg_cnt) {
2620 TxD_t *temp;
2621 temp = txdlp;
2622 txdlp++;
2623 for (j = 0; j < frg_cnt; j++, txdlp++) {
2624 skb_frag_t *frag =
2625 &skb_shinfo(skb)->frags[j];
2626 if (!txdlp->Buffer_Pointer)
2627 break;
2628 pci_unmap_page(nic->pdev,
2629 (dma_addr_t)
2630 txdlp->
2631 Buffer_Pointer,
2632 frag->size,
2633 PCI_DMA_TODEVICE);
2256 } 2634 }
2257 nic->tx_pkt_count++; 2635 txdlp = temp;
2258
2259 frg_cnt = skb_shinfo(skb)->nr_frags;
2260
2261 /* For unfragmented skb */
2262 pci_unmap_single(nic->pdev, (dma_addr_t)
2263 txdlp->Buffer_Pointer,
2264 skb->len - skb->data_len,
2265 PCI_DMA_TODEVICE);
2266 if (frg_cnt) {
2267 TxD_t *temp = txdlp;
2268 txdlp++;
2269 for (j = 0; j < frg_cnt; j++, txdlp++) {
2270 skb_frag_t *frag =
2271 &skb_shinfo(skb)->frags[j];
2272 pci_unmap_page(nic->pdev,
2273 (dma_addr_t)
2274 txdlp->
2275 Buffer_Pointer,
2276 frag->size,
2277 PCI_DMA_TODEVICE);
2278 }
2279 txdlp = temp;
2280 }
2281 memset(txdlp, 0,
2282 (sizeof(TxD_t) * config->max_txds));
2283
2284 /* Updating the statistics block */
2285 nic->stats.tx_packets++;
2286 nic->stats.tx_bytes += skb->len;
2287 dev_kfree_skb_irq(skb);
2288
2289 get_info.offset++;
2290 get_info.offset %= get_info.fifo_len + 1;
2291 txdlp = (TxD_t *) nic->list_info[i]
2292 [get_info.offset].list_virt_addr;
2293 mac_control->tx_curr_get_info[i].offset =
2294 get_info.offset;
2295 } 2636 }
2637 memset(txdlp, 0,
2638 (sizeof(TxD_t) * fifo_data->max_txds));
2639
2640 /* Updating the statistics block */
2641 nic->stats.tx_bytes += skb->len;
2642 dev_kfree_skb_irq(skb);
2643
2644 get_info.offset++;
2645 get_info.offset %= get_info.fifo_len + 1;
2646 txdlp = (TxD_t *) fifo_data->list_info
2647 [get_info.offset].list_virt_addr;
2648 fifo_data->tx_curr_get_info.offset =
2649 get_info.offset;
2296 } 2650 }
2297 2651
2298 spin_lock(&nic->tx_lock); 2652 spin_lock(&nic->tx_lock);
@@ -2301,13 +2655,13 @@ static void tx_intr_handler(struct s2io_nic *nic)
2301 spin_unlock(&nic->tx_lock); 2655 spin_unlock(&nic->tx_lock);
2302} 2656}
2303 2657
2304/** 2658/**
2305 * alarm_intr_handler - Alarm Interrrupt handler 2659 * alarm_intr_handler - Alarm Interrrupt handler
2306 * @nic: device private variable 2660 * @nic: device private variable
2307 * Description: If the interrupt was neither because of Rx packet or Tx 2661 * Description: If the interrupt was neither because of Rx packet or Tx
2308 * complete, this function is called. If the interrupt was to indicate 2662 * complete, this function is called. If the interrupt was to indicate
2309 * a loss of link, the OSM link status handler is invoked for any other 2663 * a loss of link, the OSM link status handler is invoked for any other
2310 * alarm interrupt the block that raised the interrupt is displayed 2664 * alarm interrupt the block that raised the interrupt is displayed
2311 * and a H/W reset is issued. 2665 * and a H/W reset is issued.
2312 * Return Value: 2666 * Return Value:
2313 * NONE 2667 * NONE
@@ -2320,10 +2674,32 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2320 register u64 val64 = 0, err_reg = 0; 2674 register u64 val64 = 0, err_reg = 0;
2321 2675
2322 /* Handling link status change error Intr */ 2676 /* Handling link status change error Intr */
2323 err_reg = readq(&bar0->mac_rmac_err_reg); 2677 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2324 writeq(err_reg, &bar0->mac_rmac_err_reg); 2678 err_reg = readq(&bar0->mac_rmac_err_reg);
2325 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) { 2679 writeq(err_reg, &bar0->mac_rmac_err_reg);
2326 schedule_work(&nic->set_link_task); 2680 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2681 schedule_work(&nic->set_link_task);
2682 }
2683 }
2684
2685 /* Handling Ecc errors */
2686 val64 = readq(&bar0->mc_err_reg);
2687 writeq(val64, &bar0->mc_err_reg);
2688 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2689 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2690 nic->mac_control.stats_info->sw_stat.
2691 double_ecc_errs++;
2692 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2693 dev->name);
2694 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2695 if (nic->device_type != XFRAME_II_DEVICE) {
2696 netif_stop_queue(dev);
2697 schedule_work(&nic->rst_timer_task);
2698 }
2699 } else {
2700 nic->mac_control.stats_info->sw_stat.
2701 single_ecc_errs++;
2702 }
2327 } 2703 }
2328 2704
2329 /* In case of a serious error, the device will be Reset. */ 2705 /* In case of a serious error, the device will be Reset. */
@@ -2338,7 +2714,7 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2338 /* 2714 /*
2339 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC 2715 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2340 * Error occurs, the adapter will be recycled by disabling the 2716 * Error occurs, the adapter will be recycled by disabling the
2341 * adapter enable bit and enabling it again after the device 2717 * adapter enable bit and enabling it again after the device
2342 * becomes Quiescent. 2718 * becomes Quiescent.
2343 */ 2719 */
2344 val64 = readq(&bar0->pcc_err_reg); 2720 val64 = readq(&bar0->pcc_err_reg);
@@ -2354,18 +2730,18 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2354 /* Other type of interrupts are not being handled now, TODO */ 2730 /* Other type of interrupts are not being handled now, TODO */
2355} 2731}
2356 2732
2357/** 2733/**
2358 * wait_for_cmd_complete - waits for a command to complete. 2734 * wait_for_cmd_complete - waits for a command to complete.
2359 * @sp : private member of the device structure, which is a pointer to the 2735 * @sp : private member of the device structure, which is a pointer to the
2360 * s2io_nic structure. 2736 * s2io_nic structure.
2361 * Description: Function that waits for a command to Write into RMAC 2737 * Description: Function that waits for a command to Write into RMAC
2362 * ADDR DATA registers to be completed and returns either success or 2738 * ADDR DATA registers to be completed and returns either success or
2363 * error depending on whether the command was complete or not. 2739 * error depending on whether the command was complete or not.
2364 * Return value: 2740 * Return value:
2365 * SUCCESS on success and FAILURE on failure. 2741 * SUCCESS on success and FAILURE on failure.
2366 */ 2742 */
2367 2743
2368static int wait_for_cmd_complete(nic_t * sp) 2744int wait_for_cmd_complete(nic_t * sp)
2369{ 2745{
2370 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2746 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2371 int ret = FAILURE, cnt = 0; 2747 int ret = FAILURE, cnt = 0;
@@ -2385,29 +2761,32 @@ static int wait_for_cmd_complete(nic_t * sp)
2385 return ret; 2761 return ret;
2386} 2762}
2387 2763
2388/** 2764/**
2389 * s2io_reset - Resets the card. 2765 * s2io_reset - Resets the card.
2390 * @sp : private member of the device structure. 2766 * @sp : private member of the device structure.
2391 * Description: Function to Reset the card. This function then also 2767 * Description: Function to Reset the card. This function then also
2392 * restores the previously saved PCI configuration space registers as 2768 * restores the previously saved PCI configuration space registers as
2393 * the card reset also resets the configuration space. 2769 * the card reset also resets the configuration space.
2394 * Return value: 2770 * Return value:
2395 * void. 2771 * void.
2396 */ 2772 */
2397 2773
2398static void s2io_reset(nic_t * sp) 2774void s2io_reset(nic_t * sp)
2399{ 2775{
2400 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2776 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2401 u64 val64; 2777 u64 val64;
2402 u16 subid; 2778 u16 subid, pci_cmd;
2779
2780 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2781 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2403 2782
2404 val64 = SW_RESET_ALL; 2783 val64 = SW_RESET_ALL;
2405 writeq(val64, &bar0->sw_reset); 2784 writeq(val64, &bar0->sw_reset);
2406 2785
2407 /* 2786 /*
2408 * At this stage, if the PCI write is indeed completed, the 2787 * At this stage, if the PCI write is indeed completed, the
2409 * card is reset and so is the PCI Config space of the device. 2788 * card is reset and so is the PCI Config space of the device.
2410 * So a read cannot be issued at this stage on any of the 2789 * So a read cannot be issued at this stage on any of the
2411 * registers to ensure the write into "sw_reset" register 2790 * registers to ensure the write into "sw_reset" register
2412 * has gone through. 2791 * has gone through.
2413 * Question: Is there any system call that will explicitly force 2792 * Question: Is there any system call that will explicitly force
@@ -2418,42 +2797,72 @@ static void s2io_reset(nic_t * sp)
2418 */ 2797 */
2419 msleep(250); 2798 msleep(250);
2420 2799
2421 /* Restore the PCI state saved during initializarion. */ 2800 /* Restore the PCI state saved during initialization. */
2422 pci_restore_state(sp->pdev); 2801 pci_restore_state(sp->pdev);
2802 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2803 pci_cmd);
2423 s2io_init_pci(sp); 2804 s2io_init_pci(sp);
2424 2805
2425 msleep(250); 2806 msleep(250);
2426 2807
2808 /* Set swapper to enable I/O register access */
2809 s2io_set_swapper(sp);
2810
2811 /* Clear certain PCI/PCI-X fields after reset */
2812 if (sp->device_type == XFRAME_II_DEVICE) {
2813 /* Clear parity err detect bit */
2814 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2815
2816 /* Clearing PCIX Ecc status register */
2817 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2818
2819 /* Clearing PCI_STATUS error reflected here */
2820 writeq(BIT(62), &bar0->txpic_int_reg);
2821 }
2822
2823 /* Reset device statistics maintained by OS */
2824 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2825
2427 /* SXE-002: Configure link and activity LED to turn it off */ 2826 /* SXE-002: Configure link and activity LED to turn it off */
2428 subid = sp->pdev->subsystem_device; 2827 subid = sp->pdev->subsystem_device;
2429 if ((subid & 0xFF) >= 0x07) { 2828 if (((subid & 0xFF) >= 0x07) &&
2829 (sp->device_type == XFRAME_I_DEVICE)) {
2430 val64 = readq(&bar0->gpio_control); 2830 val64 = readq(&bar0->gpio_control);
2431 val64 |= 0x0000800000000000ULL; 2831 val64 |= 0x0000800000000000ULL;
2432 writeq(val64, &bar0->gpio_control); 2832 writeq(val64, &bar0->gpio_control);
2433 val64 = 0x0411040400000000ULL; 2833 val64 = 0x0411040400000000ULL;
2434 writeq(val64, (void __iomem *) bar0 + 0x2700); 2834 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2835 }
2836
2837 /*
2838 * Clear spurious ECC interrupts that would have occured on
2839 * XFRAME II cards after reset.
2840 */
2841 if (sp->device_type == XFRAME_II_DEVICE) {
2842 val64 = readq(&bar0->pcc_err_reg);
2843 writeq(val64, &bar0->pcc_err_reg);
2435 } 2844 }
2436 2845
2437 sp->device_enabled_once = FALSE; 2846 sp->device_enabled_once = FALSE;
2438} 2847}
2439 2848
2440/** 2849/**
2441 * s2io_set_swapper - to set the swapper controle on the card 2850 * s2io_set_swapper - to set the swapper controle on the card
2442 * @sp : private member of the device structure, 2851 * @sp : private member of the device structure,
2443 * pointer to the s2io_nic structure. 2852 * pointer to the s2io_nic structure.
2444 * Description: Function to set the swapper control on the card 2853 * Description: Function to set the swapper control on the card
2445 * correctly depending on the 'endianness' of the system. 2854 * correctly depending on the 'endianness' of the system.
2446 * Return value: 2855 * Return value:
2447 * SUCCESS on success and FAILURE on failure. 2856 * SUCCESS on success and FAILURE on failure.
2448 */ 2857 */
2449 2858
2450static int s2io_set_swapper(nic_t * sp) 2859int s2io_set_swapper(nic_t * sp)
2451{ 2860{
2452 struct net_device *dev = sp->dev; 2861 struct net_device *dev = sp->dev;
2453 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2862 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2454 u64 val64, valt, valr; 2863 u64 val64, valt, valr;
2455 2864
2456 /* 2865 /*
2457 * Set proper endian settings and verify the same by reading 2866 * Set proper endian settings and verify the same by reading
2458 * the PIF Feed-back register. 2867 * the PIF Feed-back register.
2459 */ 2868 */
@@ -2505,8 +2914,9 @@ static int s2io_set_swapper(nic_t * sp)
2505 i++; 2914 i++;
2506 } 2915 }
2507 if(i == 4) { 2916 if(i == 4) {
2917 unsigned long long x = val64;
2508 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr "); 2918 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2509 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64); 2919 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2510 return FAILURE; 2920 return FAILURE;
2511 } 2921 }
2512 } 2922 }
@@ -2514,8 +2924,8 @@ static int s2io_set_swapper(nic_t * sp)
2514 val64 &= 0xFFFF000000000000ULL; 2924 val64 &= 0xFFFF000000000000ULL;
2515 2925
2516#ifdef __BIG_ENDIAN 2926#ifdef __BIG_ENDIAN
2517 /* 2927 /*
2518 * The device by default set to a big endian format, so a 2928 * The device by default set to a big endian format, so a
2519 * big endian driver need not set anything. 2929 * big endian driver need not set anything.
2520 */ 2930 */
2521 val64 |= (SWAPPER_CTRL_TXP_FE | 2931 val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2531,9 +2941,9 @@ static int s2io_set_swapper(nic_t * sp)
2531 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); 2941 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2532 writeq(val64, &bar0->swapper_ctrl); 2942 writeq(val64, &bar0->swapper_ctrl);
2533#else 2943#else
2534 /* 2944 /*
2535 * Initially we enable all bits to make it accessible by the 2945 * Initially we enable all bits to make it accessible by the
2536 * driver, then we selectively enable only those bits that 2946 * driver, then we selectively enable only those bits that
2537 * we want to set. 2947 * we want to set.
2538 */ 2948 */
2539 val64 |= (SWAPPER_CTRL_TXP_FE | 2949 val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2555,8 +2965,8 @@ static int s2io_set_swapper(nic_t * sp)
2555#endif 2965#endif
2556 val64 = readq(&bar0->swapper_ctrl); 2966 val64 = readq(&bar0->swapper_ctrl);
2557 2967
2558 /* 2968 /*
2559 * Verifying if endian settings are accurate by reading a 2969 * Verifying if endian settings are accurate by reading a
2560 * feedback register. 2970 * feedback register.
2561 */ 2971 */
2562 val64 = readq(&bar0->pif_rd_swapper_fb); 2972 val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -2576,55 +2986,63 @@ static int s2io_set_swapper(nic_t * sp)
2576 * Functions defined below concern the OS part of the driver * 2986 * Functions defined below concern the OS part of the driver *
2577 * ********************************************************* */ 2987 * ********************************************************* */
2578 2988
2579/** 2989/**
2580 * s2io_open - open entry point of the driver 2990 * s2io_open - open entry point of the driver
2581 * @dev : pointer to the device structure. 2991 * @dev : pointer to the device structure.
2582 * Description: 2992 * Description:
2583 * This function is the open entry point of the driver. It mainly calls a 2993 * This function is the open entry point of the driver. It mainly calls a
2584 * function to allocate Rx buffers and inserts them into the buffer 2994 * function to allocate Rx buffers and inserts them into the buffer
2585 * descriptors and then enables the Rx part of the NIC. 2995 * descriptors and then enables the Rx part of the NIC.
2586 * Return value: 2996 * Return value:
2587 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2997 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2588 * file on failure. 2998 * file on failure.
2589 */ 2999 */
2590 3000
2591static int s2io_open(struct net_device *dev) 3001int s2io_open(struct net_device *dev)
2592{ 3002{
2593 nic_t *sp = dev->priv; 3003 nic_t *sp = dev->priv;
2594 int err = 0; 3004 int err = 0;
2595 3005
2596 /* 3006 /*
2597 * Make sure you have link off by default every time 3007 * Make sure you have link off by default every time
2598 * Nic is initialized 3008 * Nic is initialized
2599 */ 3009 */
2600 netif_carrier_off(dev); 3010 netif_carrier_off(dev);
2601 sp->last_link_state = LINK_DOWN; 3011 sp->last_link_state = 0;
2602 3012
2603 /* Initialize H/W and enable interrupts */ 3013 /* Initialize H/W and enable interrupts */
2604 if (s2io_card_up(sp)) { 3014 if (s2io_card_up(sp)) {
2605 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", 3015 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2606 dev->name); 3016 dev->name);
2607 return -ENODEV; 3017 err = -ENODEV;
3018 goto hw_init_failed;
2608 } 3019 }
2609 3020
2610 /* After proper initialization of H/W, register ISR */ 3021 /* After proper initialization of H/W, register ISR */
2611 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ, 3022 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2612 sp->name, dev); 3023 sp->name, dev);
2613 if (err) { 3024 if (err) {
2614 s2io_reset(sp);
2615 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 3025 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2616 dev->name); 3026 dev->name);
2617 return err; 3027 goto isr_registration_failed;
2618 } 3028 }
2619 3029
2620 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { 3030 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2621 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); 3031 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2622 s2io_reset(sp); 3032 err = -ENODEV;
2623 return -ENODEV; 3033 goto setting_mac_address_failed;
2624 } 3034 }
2625 3035
2626 netif_start_queue(dev); 3036 netif_start_queue(dev);
2627 return 0; 3037 return 0;
3038
3039setting_mac_address_failed:
3040 free_irq(sp->pdev->irq, dev);
3041isr_registration_failed:
3042 del_timer_sync(&sp->alarm_timer);
3043 s2io_reset(sp);
3044hw_init_failed:
3045 return err;
2628} 3046}
2629 3047
2630/** 3048/**
@@ -2640,16 +3058,15 @@ static int s2io_open(struct net_device *dev)
2640 * file on failure. 3058 * file on failure.
2641 */ 3059 */
2642 3060
2643static int s2io_close(struct net_device *dev) 3061int s2io_close(struct net_device *dev)
2644{ 3062{
2645 nic_t *sp = dev->priv; 3063 nic_t *sp = dev->priv;
2646
2647 flush_scheduled_work(); 3064 flush_scheduled_work();
2648 netif_stop_queue(dev); 3065 netif_stop_queue(dev);
2649 /* Reset card, kill tasklet and free Tx and Rx buffers. */ 3066 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2650 s2io_card_down(sp); 3067 s2io_card_down(sp);
2651 3068
2652 free_irq(dev->irq, dev); 3069 free_irq(sp->pdev->irq, dev);
2653 sp->device_close_flag = TRUE; /* Device is shut down. */ 3070 sp->device_close_flag = TRUE; /* Device is shut down. */
2654 return 0; 3071 return 0;
2655} 3072}
@@ -2667,7 +3084,7 @@ static int s2io_close(struct net_device *dev)
2667 * 0 on success & 1 on failure. 3084 * 0 on success & 1 on failure.
2668 */ 3085 */
2669 3086
2670static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 3087int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2671{ 3088{
2672 nic_t *sp = dev->priv; 3089 nic_t *sp = dev->priv;
2673 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 3090 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
@@ -2678,29 +3095,39 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2678#ifdef NETIF_F_TSO 3095#ifdef NETIF_F_TSO
2679 int mss; 3096 int mss;
2680#endif 3097#endif
3098 u16 vlan_tag = 0;
3099 int vlan_priority = 0;
2681 mac_info_t *mac_control; 3100 mac_info_t *mac_control;
2682 struct config_param *config; 3101 struct config_param *config;
2683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2684 3102
2685 mac_control = &sp->mac_control; 3103 mac_control = &sp->mac_control;
2686 config = &sp->config; 3104 config = &sp->config;
2687 3105
2688 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name); 3106 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2689 spin_lock_irqsave(&sp->tx_lock, flags); 3107 spin_lock_irqsave(&sp->tx_lock, flags);
2690
2691 if (atomic_read(&sp->card_state) == CARD_DOWN) { 3108 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2692 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n", 3109 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2693 dev->name); 3110 dev->name);
2694 spin_unlock_irqrestore(&sp->tx_lock, flags); 3111 spin_unlock_irqrestore(&sp->tx_lock, flags);
2695 return 1; 3112 dev_kfree_skb(skb);
3113 return 0;
2696 } 3114 }
2697 3115
2698 queue = 0; 3116 queue = 0;
2699 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2700 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2701 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2702 3117
2703 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1; 3118 /* Get Fifo number to Transmit based on vlan priority */
3119 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3120 vlan_tag = vlan_tx_tag_get(skb);
3121 vlan_priority = vlan_tag >> 13;
3122 queue = config->fifo_mapping[vlan_priority];
3123 }
3124
3125 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3126 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3127 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3128 list_virt_addr;
3129
3130 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2704 /* Avoid "put" pointer going beyond "get" pointer */ 3131 /* Avoid "put" pointer going beyond "get" pointer */
2705 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) { 3132 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2706 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n"); 3133 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
@@ -2709,6 +3136,15 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2709 spin_unlock_irqrestore(&sp->tx_lock, flags); 3136 spin_unlock_irqrestore(&sp->tx_lock, flags);
2710 return 0; 3137 return 0;
2711 } 3138 }
3139
3140 /* A buffer with no data will be dropped */
3141 if (!skb->len) {
3142 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3143 dev_kfree_skb(skb);
3144 spin_unlock_irqrestore(&sp->tx_lock, flags);
3145 return 0;
3146 }
3147
2712#ifdef NETIF_F_TSO 3148#ifdef NETIF_F_TSO
2713 mss = skb_shinfo(skb)->tso_size; 3149 mss = skb_shinfo(skb)->tso_size;
2714 if (mss) { 3150 if (mss) {
@@ -2720,9 +3156,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2720 frg_cnt = skb_shinfo(skb)->nr_frags; 3156 frg_cnt = skb_shinfo(skb)->nr_frags;
2721 frg_len = skb->len - skb->data_len; 3157 frg_len = skb->len - skb->data_len;
2722 3158
2723 txdp->Host_Control = (unsigned long) skb;
2724 txdp->Buffer_Pointer = pci_map_single 3159 txdp->Buffer_Pointer = pci_map_single
2725 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3160 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3161 txdp->Host_Control = (unsigned long) skb;
2726 if (skb->ip_summed == CHECKSUM_HW) { 3162 if (skb->ip_summed == CHECKSUM_HW) {
2727 txdp->Control_2 |= 3163 txdp->Control_2 |=
2728 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3164 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -2731,6 +3167,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2731 3167
2732 txdp->Control_2 |= config->tx_intr_type; 3168 txdp->Control_2 |= config->tx_intr_type;
2733 3169
3170 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3171 txdp->Control_2 |= TXD_VLAN_ENABLE;
3172 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3173 }
3174
2734 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) | 3175 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2735 TXD_GATHER_CODE_FIRST); 3176 TXD_GATHER_CODE_FIRST);
2736 txdp->Control_1 |= TXD_LIST_OWN_XENA; 3177 txdp->Control_1 |= TXD_LIST_OWN_XENA;
@@ -2738,6 +3179,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2738 /* For fragmented SKB. */ 3179 /* For fragmented SKB. */
2739 for (i = 0; i < frg_cnt; i++) { 3180 for (i = 0; i < frg_cnt; i++) {
2740 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3181 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3182 /* A '0' length fragment will be ignored */
3183 if (!frag->size)
3184 continue;
2741 txdp++; 3185 txdp++;
2742 txdp->Buffer_Pointer = (u64) pci_map_page 3186 txdp->Buffer_Pointer = (u64) pci_map_page
2743 (sp->pdev, frag->page, frag->page_offset, 3187 (sp->pdev, frag->page, frag->page_offset,
@@ -2747,23 +3191,23 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2747 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3191 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2748 3192
2749 tx_fifo = mac_control->tx_FIFO_start[queue]; 3193 tx_fifo = mac_control->tx_FIFO_start[queue];
2750 val64 = sp->list_info[queue][put_off].list_phy_addr; 3194 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2751 writeq(val64, &tx_fifo->TxDL_Pointer); 3195 writeq(val64, &tx_fifo->TxDL_Pointer);
2752 3196
2753 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3197 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2754 TX_FIFO_LAST_LIST); 3198 TX_FIFO_LAST_LIST);
3199
2755#ifdef NETIF_F_TSO 3200#ifdef NETIF_F_TSO
2756 if (mss) 3201 if (mss)
2757 val64 |= TX_FIFO_SPECIAL_FUNC; 3202 val64 |= TX_FIFO_SPECIAL_FUNC;
2758#endif 3203#endif
2759 writeq(val64, &tx_fifo->List_Control); 3204 writeq(val64, &tx_fifo->List_Control);
2760 3205
2761 /* Perform a PCI read to flush previous writes */ 3206 mmiowb();
2762 val64 = readq(&bar0->general_int_status);
2763 3207
2764 put_off++; 3208 put_off++;
2765 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1; 3209 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2766 mac_control->tx_curr_put_info[queue].offset = put_off; 3210 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2767 3211
2768 /* Avoid "put" pointer going beyond "get" pointer */ 3212 /* Avoid "put" pointer going beyond "get" pointer */
2769 if (((put_off + 1) % queue_len) == get_off) { 3213 if (((put_off + 1) % queue_len) == get_off) {
@@ -2779,18 +3223,74 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2779 return 0; 3223 return 0;
2780} 3224}
2781 3225
3226static void
3227s2io_alarm_handle(unsigned long data)
3228{
3229 nic_t *sp = (nic_t *)data;
3230
3231 alarm_intr_handler(sp);
3232 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3233}
3234
3235static void s2io_txpic_intr_handle(nic_t *sp)
3236{
3237 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3238 u64 val64;
3239
3240 val64 = readq(&bar0->pic_int_status);
3241 if (val64 & PIC_INT_GPIO) {
3242 val64 = readq(&bar0->gpio_int_reg);
3243 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3244 (val64 & GPIO_INT_REG_LINK_UP)) {
3245 val64 |= GPIO_INT_REG_LINK_DOWN;
3246 val64 |= GPIO_INT_REG_LINK_UP;
3247 writeq(val64, &bar0->gpio_int_reg);
3248 goto masking;
3249 }
3250
3251 if (((sp->last_link_state == LINK_UP) &&
3252 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3253 ((sp->last_link_state == LINK_DOWN) &&
3254 (val64 & GPIO_INT_REG_LINK_UP))) {
3255 val64 = readq(&bar0->gpio_int_mask);
3256 val64 |= GPIO_INT_MASK_LINK_DOWN;
3257 val64 |= GPIO_INT_MASK_LINK_UP;
3258 writeq(val64, &bar0->gpio_int_mask);
3259 s2io_set_link((unsigned long)sp);
3260 }
3261masking:
3262 if (sp->last_link_state == LINK_UP) {
3263 /*enable down interrupt */
3264 val64 = readq(&bar0->gpio_int_mask);
3265 /* unmasks link down intr */
3266 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3267 /* masks link up intr */
3268 val64 |= GPIO_INT_MASK_LINK_UP;
3269 writeq(val64, &bar0->gpio_int_mask);
3270 } else {
3271 /*enable UP Interrupt */
3272 val64 = readq(&bar0->gpio_int_mask);
3273 /* unmasks link up interrupt */
3274 val64 &= ~GPIO_INT_MASK_LINK_UP;
3275 /* masks link down interrupt */
3276 val64 |= GPIO_INT_MASK_LINK_DOWN;
3277 writeq(val64, &bar0->gpio_int_mask);
3278 }
3279 }
3280}
3281
2782/** 3282/**
2783 * s2io_isr - ISR handler of the device . 3283 * s2io_isr - ISR handler of the device .
2784 * @irq: the irq of the device. 3284 * @irq: the irq of the device.
2785 * @dev_id: a void pointer to the dev structure of the NIC. 3285 * @dev_id: a void pointer to the dev structure of the NIC.
2786 * @pt_regs: pointer to the registers pushed on the stack. 3286 * @pt_regs: pointer to the registers pushed on the stack.
2787 * Description: This function is the ISR handler of the device. It 3287 * Description: This function is the ISR handler of the device. It
2788 * identifies the reason for the interrupt and calls the relevant 3288 * identifies the reason for the interrupt and calls the relevant
2789 * service routines. As a contongency measure, this ISR allocates the 3289 * service routines. As a contongency measure, this ISR allocates the
2790 * recv buffers, if their numbers are below the panic value which is 3290 * recv buffers, if their numbers are below the panic value which is
2791 * presently set to 25% of the original number of rcv buffers allocated. 3291 * presently set to 25% of the original number of rcv buffers allocated.
2792 * Return value: 3292 * Return value:
2793 * IRQ_HANDLED: will be returned if IRQ was handled by this routine 3293 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2794 * IRQ_NONE: will be returned if interrupt is not from our device 3294 * IRQ_NONE: will be returned if interrupt is not from our device
2795 */ 3295 */
2796static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) 3296static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2798,40 +3298,31 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2798 struct net_device *dev = (struct net_device *) dev_id; 3298 struct net_device *dev = (struct net_device *) dev_id;
2799 nic_t *sp = dev->priv; 3299 nic_t *sp = dev->priv;
2800 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3300 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801#ifndef CONFIG_S2IO_NAPI 3301 int i;
2802 int i, ret; 3302 u64 reason = 0, val64;
2803#endif
2804 u64 reason = 0;
2805 mac_info_t *mac_control; 3303 mac_info_t *mac_control;
2806 struct config_param *config; 3304 struct config_param *config;
2807 3305
3306 atomic_inc(&sp->isr_cnt);
2808 mac_control = &sp->mac_control; 3307 mac_control = &sp->mac_control;
2809 config = &sp->config; 3308 config = &sp->config;
2810 3309
2811 /* 3310 /*
2812 * Identify the cause for interrupt and call the appropriate 3311 * Identify the cause for interrupt and call the appropriate
2813 * interrupt handler. Causes for the interrupt could be; 3312 * interrupt handler. Causes for the interrupt could be;
2814 * 1. Rx of packet. 3313 * 1. Rx of packet.
2815 * 2. Tx complete. 3314 * 2. Tx complete.
2816 * 3. Link down. 3315 * 3. Link down.
2817 * 4. Error in any functional blocks of the NIC. 3316 * 4. Error in any functional blocks of the NIC.
2818 */ 3317 */
2819 reason = readq(&bar0->general_int_status); 3318 reason = readq(&bar0->general_int_status);
2820 3319
2821 if (!reason) { 3320 if (!reason) {
2822 /* The interrupt was not raised by Xena. */ 3321 /* The interrupt was not raised by Xena. */
3322 atomic_dec(&sp->isr_cnt);
2823 return IRQ_NONE; 3323 return IRQ_NONE;
2824 } 3324 }
2825 3325
2826 /* If Intr is because of Tx Traffic */
2827 if (reason & GEN_INTR_TXTRAFFIC) {
2828 tx_intr_handler(sp);
2829 }
2830
2831 /* If Intr is because of an error */
2832 if (reason & (GEN_ERROR_INTR))
2833 alarm_intr_handler(sp);
2834
2835#ifdef CONFIG_S2IO_NAPI 3326#ifdef CONFIG_S2IO_NAPI
2836 if (reason & GEN_INTR_RXTRAFFIC) { 3327 if (reason & GEN_INTR_RXTRAFFIC) {
2837 if (netif_rx_schedule_prep(dev)) { 3328 if (netif_rx_schedule_prep(dev)) {
@@ -2843,17 +3334,43 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2843#else 3334#else
2844 /* If Intr is because of Rx Traffic */ 3335 /* If Intr is because of Rx Traffic */
2845 if (reason & GEN_INTR_RXTRAFFIC) { 3336 if (reason & GEN_INTR_RXTRAFFIC) {
2846 rx_intr_handler(sp); 3337 /*
3338 * rx_traffic_int reg is an R1 register, writing all 1's
3339 * will ensure that the actual interrupt causing bit get's
3340 * cleared and hence a read can be avoided.
3341 */
3342 val64 = 0xFFFFFFFFFFFFFFFFULL;
3343 writeq(val64, &bar0->rx_traffic_int);
3344 for (i = 0; i < config->rx_ring_num; i++) {
3345 rx_intr_handler(&mac_control->rings[i]);
3346 }
2847 } 3347 }
2848#endif 3348#endif
2849 3349
2850 /* 3350 /* If Intr is because of Tx Traffic */
2851 * If the Rx buffer count is below the panic threshold then 3351 if (reason & GEN_INTR_TXTRAFFIC) {
2852 * reallocate the buffers from the interrupt handler itself, 3352 /*
3353 * tx_traffic_int reg is an R1 register, writing all 1's
3354 * will ensure that the actual interrupt causing bit get's
3355 * cleared and hence a read can be avoided.
3356 */
3357 val64 = 0xFFFFFFFFFFFFFFFFULL;
3358 writeq(val64, &bar0->tx_traffic_int);
3359
3360 for (i = 0; i < config->tx_fifo_num; i++)
3361 tx_intr_handler(&mac_control->fifos[i]);
3362 }
3363
3364 if (reason & GEN_INTR_TXPIC)
3365 s2io_txpic_intr_handle(sp);
3366 /*
3367 * If the Rx buffer count is below the panic threshold then
3368 * reallocate the buffers from the interrupt handler itself,
2853 * else schedule a tasklet to reallocate the buffers. 3369 * else schedule a tasklet to reallocate the buffers.
2854 */ 3370 */
2855#ifndef CONFIG_S2IO_NAPI 3371#ifndef CONFIG_S2IO_NAPI
2856 for (i = 0; i < config->rx_ring_num; i++) { 3372 for (i = 0; i < config->rx_ring_num; i++) {
3373 int ret;
2857 int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 3374 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2858 int level = rx_buffer_level(sp, rxb_size, i); 3375 int level = rx_buffer_level(sp, rxb_size, i);
2859 3376
@@ -2865,6 +3382,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2865 dev->name); 3382 dev->name);
2866 DBG_PRINT(ERR_DBG, " in ISR!!\n"); 3383 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2867 clear_bit(0, (&sp->tasklet_status)); 3384 clear_bit(0, (&sp->tasklet_status));
3385 atomic_dec(&sp->isr_cnt);
2868 return IRQ_HANDLED; 3386 return IRQ_HANDLED;
2869 } 3387 }
2870 clear_bit(0, (&sp->tasklet_status)); 3388 clear_bit(0, (&sp->tasklet_status));
@@ -2874,33 +3392,69 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2874 } 3392 }
2875#endif 3393#endif
2876 3394
3395 atomic_dec(&sp->isr_cnt);
2877 return IRQ_HANDLED; 3396 return IRQ_HANDLED;
2878} 3397}
2879 3398
2880/** 3399/**
2881 * s2io_get_stats - Updates the device statistics structure. 3400 * s2io_updt_stats -
3401 */
3402static void s2io_updt_stats(nic_t *sp)
3403{
3404 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3405 u64 val64;
3406 int cnt = 0;
3407
3408 if (atomic_read(&sp->card_state) == CARD_UP) {
3409 /* Apprx 30us on a 133 MHz bus */
3410 val64 = SET_UPDT_CLICKS(10) |
3411 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3412 writeq(val64, &bar0->stat_cfg);
3413 do {
3414 udelay(100);
3415 val64 = readq(&bar0->stat_cfg);
3416 if (!(val64 & BIT(0)))
3417 break;
3418 cnt++;
3419 if (cnt == 5)
3420 break; /* Updt failed */
3421 } while(1);
3422 }
3423}
3424
3425/**
3426 * s2io_get_stats - Updates the device statistics structure.
2882 * @dev : pointer to the device structure. 3427 * @dev : pointer to the device structure.
2883 * Description: 3428 * Description:
2884 * This function updates the device statistics structure in the s2io_nic 3429 * This function updates the device statistics structure in the s2io_nic
2885 * structure and returns a pointer to the same. 3430 * structure and returns a pointer to the same.
2886 * Return value: 3431 * Return value:
2887 * pointer to the updated net_device_stats structure. 3432 * pointer to the updated net_device_stats structure.
2888 */ 3433 */
2889 3434
2890static struct net_device_stats *s2io_get_stats(struct net_device *dev) 3435struct net_device_stats *s2io_get_stats(struct net_device *dev)
2891{ 3436{
2892 nic_t *sp = dev->priv; 3437 nic_t *sp = dev->priv;
2893 mac_info_t *mac_control; 3438 mac_info_t *mac_control;
2894 struct config_param *config; 3439 struct config_param *config;
2895 3440
3441
2896 mac_control = &sp->mac_control; 3442 mac_control = &sp->mac_control;
2897 config = &sp->config; 3443 config = &sp->config;
2898 3444
2899 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms; 3445 /* Configure Stats for immediate updt */
2900 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms; 3446 s2io_updt_stats(sp);
2901 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms; 3447
3448 sp->stats.tx_packets =
3449 le32_to_cpu(mac_control->stats_info->tmac_frms);
3450 sp->stats.tx_errors =
3451 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3452 sp->stats.rx_errors =
3453 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3454 sp->stats.multicast =
3455 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
2902 sp->stats.rx_length_errors = 3456 sp->stats.rx_length_errors =
2903 mac_control->stats_info->rmac_long_frms; 3457 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
2904 3458
2905 return (&sp->stats); 3459 return (&sp->stats);
2906} 3460}
@@ -2909,8 +3463,8 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2909 * s2io_set_multicast - entry point for multicast address enable/disable. 3463 * s2io_set_multicast - entry point for multicast address enable/disable.
2910 * @dev : pointer to the device structure 3464 * @dev : pointer to the device structure
2911 * Description: 3465 * Description:
2912 * This function is a driver entry point which gets called by the kernel 3466 * This function is a driver entry point which gets called by the kernel
2913 * whenever multicast addresses must be enabled/disabled. This also gets 3467 * whenever multicast addresses must be enabled/disabled. This also gets
2914 * called to set/reset promiscuous mode. Depending on the deivce flag, we 3468 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2915 * determine, if multicast address must be enabled or if promiscuous mode 3469 * determine, if multicast address must be enabled or if promiscuous mode
2916 * is to be disabled etc. 3470 * is to be disabled etc.
@@ -2948,6 +3502,8 @@ static void s2io_set_multicast(struct net_device *dev)
2948 /* Disable all Multicast addresses */ 3502 /* Disable all Multicast addresses */
2949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 3503 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2950 &bar0->rmac_addr_data0_mem); 3504 &bar0->rmac_addr_data0_mem);
3505 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3506 &bar0->rmac_addr_data1_mem);
2951 val64 = RMAC_ADDR_CMD_MEM_WE | 3507 val64 = RMAC_ADDR_CMD_MEM_WE |
2952 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3508 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2953 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); 3509 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
@@ -3010,7 +3566,7 @@ static void s2io_set_multicast(struct net_device *dev)
3010 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 3566 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3011 &bar0->rmac_addr_data0_mem); 3567 &bar0->rmac_addr_data0_mem);
3012 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 3568 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3013 &bar0->rmac_addr_data1_mem); 3569 &bar0->rmac_addr_data1_mem);
3014 val64 = RMAC_ADDR_CMD_MEM_WE | 3570 val64 = RMAC_ADDR_CMD_MEM_WE |
3015 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3571 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3016 RMAC_ADDR_CMD_MEM_OFFSET 3572 RMAC_ADDR_CMD_MEM_OFFSET
@@ -3039,8 +3595,7 @@ static void s2io_set_multicast(struct net_device *dev)
3039 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), 3595 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3040 &bar0->rmac_addr_data0_mem); 3596 &bar0->rmac_addr_data0_mem);
3041 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 3597 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3042 &bar0->rmac_addr_data1_mem); 3598 &bar0->rmac_addr_data1_mem);
3043
3044 val64 = RMAC_ADDR_CMD_MEM_WE | 3599 val64 = RMAC_ADDR_CMD_MEM_WE |
3045 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3600 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3046 RMAC_ADDR_CMD_MEM_OFFSET 3601 RMAC_ADDR_CMD_MEM_OFFSET
@@ -3059,12 +3614,12 @@ static void s2io_set_multicast(struct net_device *dev)
3059} 3614}
3060 3615
3061/** 3616/**
3062 * s2io_set_mac_addr - Programs the Xframe mac address 3617 * s2io_set_mac_addr - Programs the Xframe mac address
3063 * @dev : pointer to the device structure. 3618 * @dev : pointer to the device structure.
3064 * @addr: a uchar pointer to the new mac address which is to be set. 3619 * @addr: a uchar pointer to the new mac address which is to be set.
3065 * Description : This procedure will program the Xframe to receive 3620 * Description : This procedure will program the Xframe to receive
3066 * frames with new Mac Address 3621 * frames with new Mac Address
3067 * Return value: SUCCESS on success and an appropriate (-)ve integer 3622 * Return value: SUCCESS on success and an appropriate (-)ve integer
3068 * as defined in errno.h file on failure. 3623 * as defined in errno.h file on failure.
3069 */ 3624 */
3070 3625
@@ -3075,10 +3630,10 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3075 register u64 val64, mac_addr = 0; 3630 register u64 val64, mac_addr = 0;
3076 int i; 3631 int i;
3077 3632
3078 /* 3633 /*
3079 * Set the new MAC address as the new unicast filter and reflect this 3634 * Set the new MAC address as the new unicast filter and reflect this
3080 * change on the device address registered with the OS. It will be 3635 * change on the device address registered with the OS. It will be
3081 * at offset 0. 3636 * at offset 0.
3082 */ 3637 */
3083 for (i = 0; i < ETH_ALEN; i++) { 3638 for (i = 0; i < ETH_ALEN; i++) {
3084 mac_addr <<= 8; 3639 mac_addr <<= 8;
@@ -3102,12 +3657,12 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3102} 3657}
3103 3658
3104/** 3659/**
3105 * s2io_ethtool_sset - Sets different link parameters. 3660 * s2io_ethtool_sset - Sets different link parameters.
3106 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 3661 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3107 * @info: pointer to the structure with parameters given by ethtool to set 3662 * @info: pointer to the structure with parameters given by ethtool to set
3108 * link information. 3663 * link information.
3109 * Description: 3664 * Description:
3110 * The function sets different link parameters provided by the user onto 3665 * The function sets different link parameters provided by the user onto
3111 * the NIC. 3666 * the NIC.
3112 * Return value: 3667 * Return value:
3113 * 0 on success. 3668 * 0 on success.
@@ -3129,7 +3684,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
3129} 3684}
3130 3685
3131/** 3686/**
3132 * s2io_ethtol_gset - Return link specific information. 3687 * s2io_ethtol_gset - Return link specific information.
3133 * @sp : private member of the device structure, pointer to the 3688 * @sp : private member of the device structure, pointer to the
3134 * s2io_nic structure. 3689 * s2io_nic structure.
3135 * @info : pointer to the structure with parameters given by ethtool 3690 * @info : pointer to the structure with parameters given by ethtool
@@ -3161,8 +3716,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3161} 3716}
3162 3717
3163/** 3718/**
3164 * s2io_ethtool_gdrvinfo - Returns driver specific information. 3719 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3165 * @sp : private member of the device structure, which is a pointer to the 3720 * @sp : private member of the device structure, which is a pointer to the
3166 * s2io_nic structure. 3721 * s2io_nic structure.
3167 * @info : pointer to the structure with parameters given by ethtool to 3722 * @info : pointer to the structure with parameters given by ethtool to
3168 * return driver information. 3723 * return driver information.
@@ -3190,9 +3745,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3190 3745
3191/** 3746/**
3192 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer. 3747 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3193 * @sp: private member of the device structure, which is a pointer to the 3748 * @sp: private member of the device structure, which is a pointer to the
3194 * s2io_nic structure. 3749 * s2io_nic structure.
3195 * @regs : pointer to the structure with parameters given by ethtool for 3750 * @regs : pointer to the structure with parameters given by ethtool for
3196 * dumping the registers. 3751 * dumping the registers.
3197 * @reg_space: The input argumnet into which all the registers are dumped. 3752 * @reg_space: The input argumnet into which all the registers are dumped.
3198 * Description: 3753 * Description:
@@ -3221,11 +3776,11 @@ static void s2io_ethtool_gregs(struct net_device *dev,
3221 3776
3222/** 3777/**
3223 * s2io_phy_id - timer function that alternates adapter LED. 3778 * s2io_phy_id - timer function that alternates adapter LED.
3224 * @data : address of the private member of the device structure, which 3779 * @data : address of the private member of the device structure, which
3225 * is a pointer to the s2io_nic structure, provided as an u32. 3780 * is a pointer to the s2io_nic structure, provided as an u32.
3226 * Description: This is actually the timer function that alternates the 3781 * Description: This is actually the timer function that alternates the
3227 * adapter LED bit of the adapter control bit to set/reset every time on 3782 * adapter LED bit of the adapter control bit to set/reset every time on
3228 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks 3783 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3229 * once every second. 3784 * once every second.
3230*/ 3785*/
3231static void s2io_phy_id(unsigned long data) 3786static void s2io_phy_id(unsigned long data)
@@ -3236,7 +3791,8 @@ static void s2io_phy_id(unsigned long data)
3236 u16 subid; 3791 u16 subid;
3237 3792
3238 subid = sp->pdev->subsystem_device; 3793 subid = sp->pdev->subsystem_device;
3239 if ((subid & 0xFF) >= 0x07) { 3794 if ((sp->device_type == XFRAME_II_DEVICE) ||
3795 ((subid & 0xFF) >= 0x07)) {
3240 val64 = readq(&bar0->gpio_control); 3796 val64 = readq(&bar0->gpio_control);
3241 val64 ^= GPIO_CTRL_GPIO_0; 3797 val64 ^= GPIO_CTRL_GPIO_0;
3242 writeq(val64, &bar0->gpio_control); 3798 writeq(val64, &bar0->gpio_control);
@@ -3253,12 +3809,12 @@ static void s2io_phy_id(unsigned long data)
3253 * s2io_ethtool_idnic - To physically identify the nic on the system. 3809 * s2io_ethtool_idnic - To physically identify the nic on the system.
3254 * @sp : private member of the device structure, which is a pointer to the 3810 * @sp : private member of the device structure, which is a pointer to the
3255 * s2io_nic structure. 3811 * s2io_nic structure.
3256 * @id : pointer to the structure with identification parameters given by 3812 * @id : pointer to the structure with identification parameters given by
3257 * ethtool. 3813 * ethtool.
3258 * Description: Used to physically identify the NIC on the system. 3814 * Description: Used to physically identify the NIC on the system.
3259 * The Link LED will blink for a time specified by the user for 3815 * The Link LED will blink for a time specified by the user for
3260 * identification. 3816 * identification.
3261 * NOTE: The Link has to be Up to be able to blink the LED. Hence 3817 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3262 * identification is possible only if it's link is up. 3818 * identification is possible only if it's link is up.
3263 * Return value: 3819 * Return value:
3264 * int , returns 0 on success 3820 * int , returns 0 on success
@@ -3273,7 +3829,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3273 3829
3274 subid = sp->pdev->subsystem_device; 3830 subid = sp->pdev->subsystem_device;
3275 last_gpio_ctrl_val = readq(&bar0->gpio_control); 3831 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3276 if ((subid & 0xFF) < 0x07) { 3832 if ((sp->device_type == XFRAME_I_DEVICE) &&
3833 ((subid & 0xFF) < 0x07)) {
3277 val64 = readq(&bar0->adapter_control); 3834 val64 = readq(&bar0->adapter_control);
3278 if (!(val64 & ADAPTER_CNTL_EN)) { 3835 if (!(val64 & ADAPTER_CNTL_EN)) {
3279 printk(KERN_ERR 3836 printk(KERN_ERR
@@ -3288,12 +3845,12 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3288 } 3845 }
3289 mod_timer(&sp->id_timer, jiffies); 3846 mod_timer(&sp->id_timer, jiffies);
3290 if (data) 3847 if (data)
3291 msleep(data * 1000); 3848 msleep_interruptible(data * HZ);
3292 else 3849 else
3293 msleep(0xFFFFFFFF); 3850 msleep_interruptible(MAX_FLICKER_TIME);
3294 del_timer_sync(&sp->id_timer); 3851 del_timer_sync(&sp->id_timer);
3295 3852
3296 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 3853 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3297 writeq(last_gpio_ctrl_val, &bar0->gpio_control); 3854 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3298 last_gpio_ctrl_val = readq(&bar0->gpio_control); 3855 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3299 } 3856 }
@@ -3303,7 +3860,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3303 3860
3304/** 3861/**
3305 * s2io_ethtool_getpause_data -Pause frame frame generation and reception. 3862 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3306 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 3863 * @sp : private member of the device structure, which is a pointer to the
3864 * s2io_nic structure.
3307 * @ep : pointer to the structure with pause parameters given by ethtool. 3865 * @ep : pointer to the structure with pause parameters given by ethtool.
3308 * Description: 3866 * Description:
3309 * Returns the Pause frame generation and reception capability of the NIC. 3867 * Returns the Pause frame generation and reception capability of the NIC.
@@ -3327,7 +3885,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
3327 3885
3328/** 3886/**
3329 * s2io_ethtool_setpause_data - set/reset pause frame generation. 3887 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3330 * @sp : private member of the device structure, which is a pointer to the 3888 * @sp : private member of the device structure, which is a pointer to the
3331 * s2io_nic structure. 3889 * s2io_nic structure.
3332 * @ep : pointer to the structure with pause parameters given by ethtool. 3890 * @ep : pointer to the structure with pause parameters given by ethtool.
3333 * Description: 3891 * Description:
@@ -3338,7 +3896,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
3338 */ 3896 */
3339 3897
3340static int s2io_ethtool_setpause_data(struct net_device *dev, 3898static int s2io_ethtool_setpause_data(struct net_device *dev,
3341 struct ethtool_pauseparam *ep) 3899 struct ethtool_pauseparam *ep)
3342{ 3900{
3343 u64 val64; 3901 u64 val64;
3344 nic_t *sp = dev->priv; 3902 nic_t *sp = dev->priv;
@@ -3359,13 +3917,13 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
3359 3917
3360/** 3918/**
3361 * read_eeprom - reads 4 bytes of data from user given offset. 3919 * read_eeprom - reads 4 bytes of data from user given offset.
3362 * @sp : private member of the device structure, which is a pointer to the 3920 * @sp : private member of the device structure, which is a pointer to the
3363 * s2io_nic structure. 3921 * s2io_nic structure.
3364 * @off : offset at which the data must be written 3922 * @off : offset at which the data must be written
3365 * @data : Its an output parameter where the data read at the given 3923 * @data : Its an output parameter where the data read at the given
3366 * offset is stored. 3924 * offset is stored.
3367 * Description: 3925 * Description:
3368 * Will read 4 bytes of data from the user given offset and return the 3926 * Will read 4 bytes of data from the user given offset and return the
3369 * read data. 3927 * read data.
3370 * NOTE: Will allow to read only part of the EEPROM visible through the 3928 * NOTE: Will allow to read only part of the EEPROM visible through the
3371 * I2C bus. 3929 * I2C bus.
@@ -3406,7 +3964,7 @@ static int read_eeprom(nic_t * sp, int off, u32 * data)
3406 * s2io_nic structure. 3964 * s2io_nic structure.
3407 * @off : offset at which the data must be written 3965 * @off : offset at which the data must be written
3408 * @data : The data that is to be written 3966 * @data : The data that is to be written
3409 * @cnt : Number of bytes of the data that are actually to be written into 3967 * @cnt : Number of bytes of the data that are actually to be written into
3410 * the Eeprom. (max of 3) 3968 * the Eeprom. (max of 3)
3411 * Description: 3969 * Description:
3412 * Actually writes the relevant part of the data value into the Eeprom 3970 * Actually writes the relevant part of the data value into the Eeprom
@@ -3443,7 +4001,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3443/** 4001/**
3444 * s2io_ethtool_geeprom - reads the value stored in the Eeprom. 4002 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3445 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 4003 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3446 * @eeprom : pointer to the user level structure provided by ethtool, 4004 * @eeprom : pointer to the user level structure provided by ethtool,
3447 * containing all relevant information. 4005 * containing all relevant information.
3448 * @data_buf : user defined value to be written into Eeprom. 4006 * @data_buf : user defined value to be written into Eeprom.
3449 * Description: Reads the values stored in the Eeprom at given offset 4007 * Description: Reads the values stored in the Eeprom at given offset
@@ -3454,7 +4012,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3454 */ 4012 */
3455 4013
3456static int s2io_ethtool_geeprom(struct net_device *dev, 4014static int s2io_ethtool_geeprom(struct net_device *dev,
3457 struct ethtool_eeprom *eeprom, u8 * data_buf) 4015 struct ethtool_eeprom *eeprom, u8 * data_buf)
3458{ 4016{
3459 u32 data, i, valid; 4017 u32 data, i, valid;
3460 nic_t *sp = dev->priv; 4018 nic_t *sp = dev->priv;
@@ -3479,7 +4037,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
3479 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom 4037 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3480 * @sp : private member of the device structure, which is a pointer to the 4038 * @sp : private member of the device structure, which is a pointer to the
3481 * s2io_nic structure. 4039 * s2io_nic structure.
3482 * @eeprom : pointer to the user level structure provided by ethtool, 4040 * @eeprom : pointer to the user level structure provided by ethtool,
3483 * containing all relevant information. 4041 * containing all relevant information.
3484 * @data_buf ; user defined value to be written into Eeprom. 4042 * @data_buf ; user defined value to be written into Eeprom.
3485 * Description: 4043 * Description:
@@ -3527,8 +4085,8 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
3527} 4085}
3528 4086
3529/** 4087/**
3530 * s2io_register_test - reads and writes into all clock domains. 4088 * s2io_register_test - reads and writes into all clock domains.
3531 * @sp : private member of the device structure, which is a pointer to the 4089 * @sp : private member of the device structure, which is a pointer to the
3532 * s2io_nic structure. 4090 * s2io_nic structure.
3533 * @data : variable that returns the result of each of the test conducted b 4091 * @data : variable that returns the result of each of the test conducted b
3534 * by the driver. 4092 * by the driver.
@@ -3545,8 +4103,8 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
3545 u64 val64 = 0; 4103 u64 val64 = 0;
3546 int fail = 0; 4104 int fail = 0;
3547 4105
3548 val64 = readq(&bar0->pcc_enable); 4106 val64 = readq(&bar0->pif_rd_swapper_fb);
3549 if (val64 != 0xff00000000000000ULL) { 4107 if (val64 != 0x123456789abcdefULL) {
3550 fail = 1; 4108 fail = 1;
3551 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n"); 4109 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3552 } 4110 }
@@ -3590,13 +4148,13 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
3590} 4148}
3591 4149
3592/** 4150/**
3593 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed. 4151 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3594 * @sp : private member of the device structure, which is a pointer to the 4152 * @sp : private member of the device structure, which is a pointer to the
3595 * s2io_nic structure. 4153 * s2io_nic structure.
3596 * @data:variable that returns the result of each of the test conducted by 4154 * @data:variable that returns the result of each of the test conducted by
3597 * the driver. 4155 * the driver.
3598 * Description: 4156 * Description:
3599 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL 4157 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3600 * register. 4158 * register.
3601 * Return value: 4159 * Return value:
3602 * 0 on success. 4160 * 0 on success.
@@ -3661,14 +4219,14 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3661 4219
3662/** 4220/**
3663 * s2io_bist_test - invokes the MemBist test of the card . 4221 * s2io_bist_test - invokes the MemBist test of the card .
3664 * @sp : private member of the device structure, which is a pointer to the 4222 * @sp : private member of the device structure, which is a pointer to the
3665 * s2io_nic structure. 4223 * s2io_nic structure.
3666 * @data:variable that returns the result of each of the test conducted by 4224 * @data:variable that returns the result of each of the test conducted by
3667 * the driver. 4225 * the driver.
3668 * Description: 4226 * Description:
3669 * This invokes the MemBist test of the card. We give around 4227 * This invokes the MemBist test of the card. We give around
3670 * 2 secs time for the Test to complete. If it's still not complete 4228 * 2 secs time for the Test to complete. If it's still not complete
3671 * within this peiod, we consider that the test failed. 4229 * within this peiod, we consider that the test failed.
3672 * Return value: 4230 * Return value:
3673 * 0 on success and -1 on failure. 4231 * 0 on success and -1 on failure.
3674 */ 4232 */
@@ -3697,13 +4255,13 @@ static int s2io_bist_test(nic_t * sp, uint64_t * data)
3697} 4255}
3698 4256
3699/** 4257/**
3700 * s2io-link_test - verifies the link state of the nic 4258 * s2io-link_test - verifies the link state of the nic
3701 * @sp ; private member of the device structure, which is a pointer to the 4259 * @sp ; private member of the device structure, which is a pointer to the
3702 * s2io_nic structure. 4260 * s2io_nic structure.
3703 * @data: variable that returns the result of each of the test conducted by 4261 * @data: variable that returns the result of each of the test conducted by
3704 * the driver. 4262 * the driver.
3705 * Description: 4263 * Description:
3706 * The function verifies the link state of the NIC and updates the input 4264 * The function verifies the link state of the NIC and updates the input
3707 * argument 'data' appropriately. 4265 * argument 'data' appropriately.
3708 * Return value: 4266 * Return value:
3709 * 0 on success. 4267 * 0 on success.
@@ -3722,13 +4280,13 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
3722} 4280}
3723 4281
3724/** 4282/**
3725 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC 4283 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3726 * @sp - private member of the device structure, which is a pointer to the 4284 * @sp - private member of the device structure, which is a pointer to the
3727 * s2io_nic structure. 4285 * s2io_nic structure.
3728 * @data - variable that returns the result of each of the test 4286 * @data - variable that returns the result of each of the test
3729 * conducted by the driver. 4287 * conducted by the driver.
3730 * Description: 4288 * Description:
3731 * This is one of the offline test that tests the read and write 4289 * This is one of the offline test that tests the read and write
3732 * access to the RldRam chip on the NIC. 4290 * access to the RldRam chip on the NIC.
3733 * Return value: 4291 * Return value:
3734 * 0 on success. 4292 * 0 on success.
@@ -3833,7 +4391,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3833 * s2io_nic structure. 4391 * s2io_nic structure.
3834 * @ethtest : pointer to a ethtool command specific structure that will be 4392 * @ethtest : pointer to a ethtool command specific structure that will be
3835 * returned to the user. 4393 * returned to the user.
3836 * @data : variable that returns the result of each of the test 4394 * @data : variable that returns the result of each of the test
3837 * conducted by the driver. 4395 * conducted by the driver.
3838 * Description: 4396 * Description:
3839 * This function conducts 6 tests ( 4 offline and 2 online) to determine 4397 * This function conducts 6 tests ( 4 offline and 2 online) to determine
@@ -3851,23 +4409,18 @@ static void s2io_ethtool_test(struct net_device *dev,
3851 4409
3852 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 4410 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3853 /* Offline Tests. */ 4411 /* Offline Tests. */
3854 if (orig_state) { 4412 if (orig_state)
3855 s2io_close(sp->dev); 4413 s2io_close(sp->dev);
3856 s2io_set_swapper(sp);
3857 } else
3858 s2io_set_swapper(sp);
3859 4414
3860 if (s2io_register_test(sp, &data[0])) 4415 if (s2io_register_test(sp, &data[0]))
3861 ethtest->flags |= ETH_TEST_FL_FAILED; 4416 ethtest->flags |= ETH_TEST_FL_FAILED;
3862 4417
3863 s2io_reset(sp); 4418 s2io_reset(sp);
3864 s2io_set_swapper(sp);
3865 4419
3866 if (s2io_rldram_test(sp, &data[3])) 4420 if (s2io_rldram_test(sp, &data[3]))
3867 ethtest->flags |= ETH_TEST_FL_FAILED; 4421 ethtest->flags |= ETH_TEST_FL_FAILED;
3868 4422
3869 s2io_reset(sp); 4423 s2io_reset(sp);
3870 s2io_set_swapper(sp);
3871 4424
3872 if (s2io_eeprom_test(sp, &data[1])) 4425 if (s2io_eeprom_test(sp, &data[1]))
3873 ethtest->flags |= ETH_TEST_FL_FAILED; 4426 ethtest->flags |= ETH_TEST_FL_FAILED;
@@ -3910,61 +4463,111 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
3910 nic_t *sp = dev->priv; 4463 nic_t *sp = dev->priv;
3911 StatInfo_t *stat_info = sp->mac_control.stats_info; 4464 StatInfo_t *stat_info = sp->mac_control.stats_info;
3912 4465
3913 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms); 4466 s2io_updt_stats(sp);
3914 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets); 4467 tmp_stats[i++] =
4468 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4469 le32_to_cpu(stat_info->tmac_frms);
4470 tmp_stats[i++] =
4471 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4472 le32_to_cpu(stat_info->tmac_data_octets);
3915 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms); 4473 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3916 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms); 4474 tmp_stats[i++] =
3917 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms); 4475 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4476 le32_to_cpu(stat_info->tmac_mcst_frms);
4477 tmp_stats[i++] =
4478 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4479 le32_to_cpu(stat_info->tmac_bcst_frms);
3918 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms); 4480 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3919 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms); 4481 tmp_stats[i++] =
4482 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4483 le32_to_cpu(stat_info->tmac_any_err_frms);
3920 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets); 4484 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3921 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip); 4485 tmp_stats[i++] =
3922 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip); 4486 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp); 4487 le32_to_cpu(stat_info->tmac_vld_ip);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp); 4488 tmp_stats[i++] =
4489 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4490 le32_to_cpu(stat_info->tmac_drop_ip);
4491 tmp_stats[i++] =
4492 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4493 le32_to_cpu(stat_info->tmac_icmp);
4494 tmp_stats[i++] =
4495 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4496 le32_to_cpu(stat_info->tmac_rst_tcp);
3925 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp); 4497 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3926 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp); 4498 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
3927 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms); 4499 le32_to_cpu(stat_info->tmac_udp);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets); 4500 tmp_stats[i++] =
4501 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4502 le32_to_cpu(stat_info->rmac_vld_frms);
4503 tmp_stats[i++] =
4504 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4505 le32_to_cpu(stat_info->rmac_data_octets);
3929 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms); 4506 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms); 4507 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3931 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms); 4508 tmp_stats[i++] =
3932 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms); 4509 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4510 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4511 tmp_stats[i++] =
4512 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4513 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms); 4514 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3934 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms); 4515 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3935 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms); 4516 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3936 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms); 4517 tmp_stats[i++] =
3937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms); 4518 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms); 4519 le32_to_cpu(stat_info->rmac_discarded_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms); 4520 tmp_stats[i++] =
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms); 4521 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip); 4522 le32_to_cpu(stat_info->rmac_usized_frms);
4523 tmp_stats[i++] =
4524 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4525 le32_to_cpu(stat_info->rmac_osized_frms);
4526 tmp_stats[i++] =
4527 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4528 le32_to_cpu(stat_info->rmac_frag_frms);
4529 tmp_stats[i++] =
4530 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4531 le32_to_cpu(stat_info->rmac_jabber_frms);
4532 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4533 le32_to_cpu(stat_info->rmac_ip);
3942 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets); 4534 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3943 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip); 4535 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3944 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip); 4536 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp); 4537 le32_to_cpu(stat_info->rmac_drop_ip);
4538 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4539 le32_to_cpu(stat_info->rmac_icmp);
3946 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp); 4540 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp); 4541 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
3948 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp); 4542 le32_to_cpu(stat_info->rmac_udp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt); 4543 tmp_stats[i++] =
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip); 4544 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4545 le32_to_cpu(stat_info->rmac_err_drp_udp);
4546 tmp_stats[i++] =
4547 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4548 le32_to_cpu(stat_info->rmac_pause_cnt);
4549 tmp_stats[i++] =
4550 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4551 le32_to_cpu(stat_info->rmac_accepted_ip);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp); 4552 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4553 tmp_stats[i++] = 0;
4554 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4555 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
3952} 4556}
3953 4557
3954static int s2io_ethtool_get_regs_len(struct net_device *dev) 4558int s2io_ethtool_get_regs_len(struct net_device *dev)
3955{ 4559{
3956 return (XENA_REG_SPACE); 4560 return (XENA_REG_SPACE);
3957} 4561}
3958 4562
3959 4563
3960static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) 4564u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3961{ 4565{
3962 nic_t *sp = dev->priv; 4566 nic_t *sp = dev->priv;
3963 4567
3964 return (sp->rx_csum); 4568 return (sp->rx_csum);
3965} 4569}
3966 4570int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3967static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3968{ 4571{
3969 nic_t *sp = dev->priv; 4572 nic_t *sp = dev->priv;
3970 4573
@@ -3975,19 +4578,17 @@ static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3975 4578
3976 return 0; 4579 return 0;
3977} 4580}
3978 4581int s2io_get_eeprom_len(struct net_device *dev)
3979static int s2io_get_eeprom_len(struct net_device *dev)
3980{ 4582{
3981 return (XENA_EEPROM_SPACE); 4583 return (XENA_EEPROM_SPACE);
3982} 4584}
3983 4585
3984static int s2io_ethtool_self_test_count(struct net_device *dev) 4586int s2io_ethtool_self_test_count(struct net_device *dev)
3985{ 4587{
3986 return (S2IO_TEST_LEN); 4588 return (S2IO_TEST_LEN);
3987} 4589}
3988 4590void s2io_ethtool_get_strings(struct net_device *dev,
3989static void s2io_ethtool_get_strings(struct net_device *dev, 4591 u32 stringset, u8 * data)
3990 u32 stringset, u8 * data)
3991{ 4592{
3992 switch (stringset) { 4593 switch (stringset) {
3993 case ETH_SS_TEST: 4594 case ETH_SS_TEST:
@@ -3998,13 +4599,12 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
3998 sizeof(ethtool_stats_keys)); 4599 sizeof(ethtool_stats_keys));
3999 } 4600 }
4000} 4601}
4001
4002static int s2io_ethtool_get_stats_count(struct net_device *dev) 4602static int s2io_ethtool_get_stats_count(struct net_device *dev)
4003{ 4603{
4004 return (S2IO_STAT_LEN); 4604 return (S2IO_STAT_LEN);
4005} 4605}
4006 4606
4007static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 4607int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4008{ 4608{
4009 if (data) 4609 if (data)
4010 dev->features |= NETIF_F_IP_CSUM; 4610 dev->features |= NETIF_F_IP_CSUM;
@@ -4046,21 +4646,18 @@ static struct ethtool_ops netdev_ethtool_ops = {
4046}; 4646};
4047 4647
4048/** 4648/**
4049 * s2io_ioctl - Entry point for the Ioctl 4649 * s2io_ioctl - Entry point for the Ioctl
4050 * @dev : Device pointer. 4650 * @dev : Device pointer.
4051 * @ifr : An IOCTL specefic structure, that can contain a pointer to 4651 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4052 * a proprietary structure used to pass information to the driver. 4652 * a proprietary structure used to pass information to the driver.
4053 * @cmd : This is used to distinguish between the different commands that 4653 * @cmd : This is used to distinguish between the different commands that
4054 * can be passed to the IOCTL functions. 4654 * can be passed to the IOCTL functions.
4055 * Description: 4655 * Description:
4056 * This function has support for ethtool, adding multiple MAC addresses on 4656 * Currently there are no special functionality supported in IOCTL, hence
4057 * the NIC and some DBG commands for the util tool. 4657 * function always return EOPNOTSUPPORTED
4058 * Return value:
4059 * Currently the IOCTL supports no operations, hence by default this
4060 * function returns OP NOT SUPPORTED value.
4061 */ 4658 */
4062 4659
4063static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 4660int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4064{ 4661{
4065 return -EOPNOTSUPP; 4662 return -EOPNOTSUPP;
4066} 4663}
@@ -4076,17 +4673,9 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4076 * file on failure. 4673 * file on failure.
4077 */ 4674 */
4078 4675
4079static int s2io_change_mtu(struct net_device *dev, int new_mtu) 4676int s2io_change_mtu(struct net_device *dev, int new_mtu)
4080{ 4677{
4081 nic_t *sp = dev->priv; 4678 nic_t *sp = dev->priv;
4082 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4083 register u64 val64;
4084
4085 if (netif_running(dev)) {
4086 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4087 DBG_PRINT(ERR_DBG, "change its MTU \n");
4088 return -EBUSY;
4089 }
4090 4679
4091 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { 4680 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4092 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", 4681 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -4094,11 +4683,22 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4094 return -EPERM; 4683 return -EPERM;
4095 } 4684 }
4096 4685
4097 /* Set the new MTU into the PYLD register of the NIC */
4098 val64 = new_mtu;
4099 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4100
4101 dev->mtu = new_mtu; 4686 dev->mtu = new_mtu;
4687 if (netif_running(dev)) {
4688 s2io_card_down(sp);
4689 netif_stop_queue(dev);
4690 if (s2io_card_up(sp)) {
4691 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4692 __FUNCTION__);
4693 }
4694 if (netif_queue_stopped(dev))
4695 netif_wake_queue(dev);
4696 } else { /* Device is down */
4697 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4698 u64 val64 = new_mtu;
4699
4700 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4701 }
4102 4702
4103 return 0; 4703 return 0;
4104} 4704}
@@ -4108,9 +4708,9 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4108 * @dev_adr : address of the device structure in dma_addr_t format. 4708 * @dev_adr : address of the device structure in dma_addr_t format.
4109 * Description: 4709 * Description:
4110 * This is the tasklet or the bottom half of the ISR. This is 4710 * This is the tasklet or the bottom half of the ISR. This is
4111 * an extension of the ISR which is scheduled by the scheduler to be run 4711 * an extension of the ISR which is scheduled by the scheduler to be run
4112 * when the load on the CPU is low. All low priority tasks of the ISR can 4712 * when the load on the CPU is low. All low priority tasks of the ISR can
4113 * be pushed into the tasklet. For now the tasklet is used only to 4713 * be pushed into the tasklet. For now the tasklet is used only to
4114 * replenish the Rx buffers in the Rx buffer descriptors. 4714 * replenish the Rx buffers in the Rx buffer descriptors.
4115 * Return value: 4715 * Return value:
4116 * void. 4716 * void.
@@ -4166,19 +4766,22 @@ static void s2io_set_link(unsigned long data)
4166 } 4766 }
4167 4767
4168 subid = nic->pdev->subsystem_device; 4768 subid = nic->pdev->subsystem_device;
4169 /* 4769 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4170 * Allow a small delay for the NICs self initiated 4770 /*
4171 * cleanup to complete. 4771 * Allow a small delay for the NICs self initiated
4172 */ 4772 * cleanup to complete.
4173 msleep(100); 4773 */
4774 msleep(100);
4775 }
4174 4776
4175 val64 = readq(&bar0->adapter_status); 4777 val64 = readq(&bar0->adapter_status);
4176 if (verify_xena_quiescence(val64, nic->device_enabled_once)) { 4778 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4177 if (LINK_IS_UP(val64)) { 4779 if (LINK_IS_UP(val64)) {
4178 val64 = readq(&bar0->adapter_control); 4780 val64 = readq(&bar0->adapter_control);
4179 val64 |= ADAPTER_CNTL_EN; 4781 val64 |= ADAPTER_CNTL_EN;
4180 writeq(val64, &bar0->adapter_control); 4782 writeq(val64, &bar0->adapter_control);
4181 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 4783 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4784 subid)) {
4182 val64 = readq(&bar0->gpio_control); 4785 val64 = readq(&bar0->gpio_control);
4183 val64 |= GPIO_CTRL_GPIO_0; 4786 val64 |= GPIO_CTRL_GPIO_0;
4184 writeq(val64, &bar0->gpio_control); 4787 writeq(val64, &bar0->gpio_control);
@@ -4187,20 +4790,24 @@ static void s2io_set_link(unsigned long data)
4187 val64 |= ADAPTER_LED_ON; 4790 val64 |= ADAPTER_LED_ON;
4188 writeq(val64, &bar0->adapter_control); 4791 writeq(val64, &bar0->adapter_control);
4189 } 4792 }
4190 val64 = readq(&bar0->adapter_status); 4793 if (s2io_link_fault_indication(nic) ==
4191 if (!LINK_IS_UP(val64)) { 4794 MAC_RMAC_ERR_TIMER) {
4192 DBG_PRINT(ERR_DBG, "%s:", dev->name); 4795 val64 = readq(&bar0->adapter_status);
4193 DBG_PRINT(ERR_DBG, " Link down"); 4796 if (!LINK_IS_UP(val64)) {
4194 DBG_PRINT(ERR_DBG, "after "); 4797 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4195 DBG_PRINT(ERR_DBG, "enabling "); 4798 DBG_PRINT(ERR_DBG, " Link down");
4196 DBG_PRINT(ERR_DBG, "device \n"); 4799 DBG_PRINT(ERR_DBG, "after ");
4800 DBG_PRINT(ERR_DBG, "enabling ");
4801 DBG_PRINT(ERR_DBG, "device \n");
4802 }
4197 } 4803 }
4198 if (nic->device_enabled_once == FALSE) { 4804 if (nic->device_enabled_once == FALSE) {
4199 nic->device_enabled_once = TRUE; 4805 nic->device_enabled_once = TRUE;
4200 } 4806 }
4201 s2io_link(nic, LINK_UP); 4807 s2io_link(nic, LINK_UP);
4202 } else { 4808 } else {
4203 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 4809 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4810 subid)) {
4204 val64 = readq(&bar0->gpio_control); 4811 val64 = readq(&bar0->gpio_control);
4205 val64 &= ~GPIO_CTRL_GPIO_0; 4812 val64 &= ~GPIO_CTRL_GPIO_0;
4206 writeq(val64, &bar0->gpio_control); 4813 writeq(val64, &bar0->gpio_control);
@@ -4223,9 +4830,11 @@ static void s2io_card_down(nic_t * sp)
4223 unsigned long flags; 4830 unsigned long flags;
4224 register u64 val64 = 0; 4831 register u64 val64 = 0;
4225 4832
4833 del_timer_sync(&sp->alarm_timer);
4226 /* If s2io_set_link task is executing, wait till it completes. */ 4834 /* If s2io_set_link task is executing, wait till it completes. */
4227 while (test_and_set_bit(0, &(sp->link_state))) 4835 while (test_and_set_bit(0, &(sp->link_state))) {
4228 msleep(50); 4836 msleep(50);
4837 }
4229 atomic_set(&sp->card_state, CARD_DOWN); 4838 atomic_set(&sp->card_state, CARD_DOWN);
4230 4839
4231 /* disable Tx and Rx traffic on the NIC */ 4840 /* disable Tx and Rx traffic on the NIC */
@@ -4237,7 +4846,7 @@ static void s2io_card_down(nic_t * sp)
4237 /* Check if the device is Quiescent and then Reset the NIC */ 4846 /* Check if the device is Quiescent and then Reset the NIC */
4238 do { 4847 do {
4239 val64 = readq(&bar0->adapter_status); 4848 val64 = readq(&bar0->adapter_status);
4240 if (verify_xena_quiescence(val64, sp->device_enabled_once)) { 4849 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4241 break; 4850 break;
4242 } 4851 }
4243 4852
@@ -4251,14 +4860,27 @@ static void s2io_card_down(nic_t * sp)
4251 break; 4860 break;
4252 } 4861 }
4253 } while (1); 4862 } while (1);
4254 spin_lock_irqsave(&sp->tx_lock, flags);
4255 s2io_reset(sp); 4863 s2io_reset(sp);
4256 4864
4257 /* Free all unused Tx and Rx buffers */ 4865 /* Waiting till all Interrupt handlers are complete */
4866 cnt = 0;
4867 do {
4868 msleep(10);
4869 if (!atomic_read(&sp->isr_cnt))
4870 break;
4871 cnt++;
4872 } while(cnt < 5);
4873
4874 spin_lock_irqsave(&sp->tx_lock, flags);
4875 /* Free all Tx buffers */
4258 free_tx_buffers(sp); 4876 free_tx_buffers(sp);
4877 spin_unlock_irqrestore(&sp->tx_lock, flags);
4878
4879 /* Free all Rx buffers */
4880 spin_lock_irqsave(&sp->rx_lock, flags);
4259 free_rx_buffers(sp); 4881 free_rx_buffers(sp);
4882 spin_unlock_irqrestore(&sp->rx_lock, flags);
4260 4883
4261 spin_unlock_irqrestore(&sp->tx_lock, flags);
4262 clear_bit(0, &(sp->link_state)); 4884 clear_bit(0, &(sp->link_state));
4263} 4885}
4264 4886
@@ -4276,8 +4898,8 @@ static int s2io_card_up(nic_t * sp)
4276 return -ENODEV; 4898 return -ENODEV;
4277 } 4899 }
4278 4900
4279 /* 4901 /*
4280 * Initializing the Rx buffers. For now we are considering only 1 4902 * Initializing the Rx buffers. For now we are considering only 1
4281 * Rx ring and initializing buffers into 30 Rx blocks 4903 * Rx ring and initializing buffers into 30 Rx blocks
4282 */ 4904 */
4283 mac_control = &sp->mac_control; 4905 mac_control = &sp->mac_control;
@@ -4311,16 +4933,18 @@ static int s2io_card_up(nic_t * sp)
4311 return -ENODEV; 4933 return -ENODEV;
4312 } 4934 }
4313 4935
4936 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4937
4314 atomic_set(&sp->card_state, CARD_UP); 4938 atomic_set(&sp->card_state, CARD_UP);
4315 return 0; 4939 return 0;
4316} 4940}
4317 4941
4318/** 4942/**
4319 * s2io_restart_nic - Resets the NIC. 4943 * s2io_restart_nic - Resets the NIC.
4320 * @data : long pointer to the device private structure 4944 * @data : long pointer to the device private structure
4321 * Description: 4945 * Description:
4322 * This function is scheduled to be run by the s2io_tx_watchdog 4946 * This function is scheduled to be run by the s2io_tx_watchdog
4323 * function after 0.5 secs to reset the NIC. The idea is to reduce 4947 * function after 0.5 secs to reset the NIC. The idea is to reduce
4324 * the run time of the watch dog routine which is run holding a 4948 * the run time of the watch dog routine which is run holding a
4325 * spin lock. 4949 * spin lock.
4326 */ 4950 */
@@ -4338,10 +4962,11 @@ static void s2io_restart_nic(unsigned long data)
4338 netif_wake_queue(dev); 4962 netif_wake_queue(dev);
4339 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", 4963 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4340 dev->name); 4964 dev->name);
4965
4341} 4966}
4342 4967
4343/** 4968/**
4344 * s2io_tx_watchdog - Watchdog for transmit side. 4969 * s2io_tx_watchdog - Watchdog for transmit side.
4345 * @dev : Pointer to net device structure 4970 * @dev : Pointer to net device structure
4346 * Description: 4971 * Description:
4347 * This function is triggered if the Tx Queue is stopped 4972 * This function is triggered if the Tx Queue is stopped
@@ -4369,7 +4994,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
4369 * @len : length of the packet 4994 * @len : length of the packet
4370 * @cksum : FCS checksum of the frame. 4995 * @cksum : FCS checksum of the frame.
4371 * @ring_no : the ring from which this RxD was extracted. 4996 * @ring_no : the ring from which this RxD was extracted.
4372 * Description: 4997 * Description:
4373 * This function is called by the Tx interrupt serivce routine to perform 4998 * This function is called by the Tx interrupt serivce routine to perform
4374 * some OS related operations on the SKB before passing it to the upper 4999 * some OS related operations on the SKB before passing it to the upper
4375 * layers. It mainly checks if the checksum is OK, if so adds it to the 5000 * layers. It mainly checks if the checksum is OK, if so adds it to the
@@ -4379,35 +5004,68 @@ static void s2io_tx_watchdog(struct net_device *dev)
4379 * Return value: 5004 * Return value:
4380 * SUCCESS on success and -1 on failure. 5005 * SUCCESS on success and -1 on failure.
4381 */ 5006 */
4382#ifndef CONFIG_2BUFF_MODE 5007static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4383static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4384#else
4385static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4386 buffAdd_t * ba)
4387#endif
4388{ 5008{
5009 nic_t *sp = ring_data->nic;
4389 struct net_device *dev = (struct net_device *) sp->dev; 5010 struct net_device *dev = (struct net_device *) sp->dev;
4390 struct sk_buff *skb = 5011 struct sk_buff *skb = (struct sk_buff *)
4391 (struct sk_buff *) ((unsigned long) rxdp->Host_Control); 5012 ((unsigned long) rxdp->Host_Control);
5013 int ring_no = ring_data->ring_no;
4392 u16 l3_csum, l4_csum; 5014 u16 l3_csum, l4_csum;
4393#ifdef CONFIG_2BUFF_MODE 5015#ifdef CONFIG_2BUFF_MODE
4394 int buf0_len, buf2_len; 5016 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5017 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5018 int get_block = ring_data->rx_curr_get_info.block_index;
5019 int get_off = ring_data->rx_curr_get_info.offset;
5020 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4395 unsigned char *buff; 5021 unsigned char *buff;
5022#else
5023 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4396#endif 5024#endif
5025 skb->dev = dev;
5026 if (rxdp->Control_1 & RXD_T_CODE) {
5027 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5028 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5029 dev->name, err);
5030 dev_kfree_skb(skb);
5031 sp->stats.rx_crc_errors++;
5032 atomic_dec(&sp->rx_bufs_left[ring_no]);
5033 rxdp->Host_Control = 0;
5034 return 0;
5035 }
4397 5036
4398 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 5037 /* Updating statistics */
4399 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) { 5038 rxdp->Host_Control = 0;
5039 sp->rx_pkt_count++;
5040 sp->stats.rx_packets++;
5041#ifndef CONFIG_2BUFF_MODE
5042 sp->stats.rx_bytes += len;
5043#else
5044 sp->stats.rx_bytes += buf0_len + buf2_len;
5045#endif
5046
5047#ifndef CONFIG_2BUFF_MODE
5048 skb_put(skb, len);
5049#else
5050 buff = skb_push(skb, buf0_len);
5051 memcpy(buff, ba->ba_0, buf0_len);
5052 skb_put(skb, buf2_len);
5053#endif
5054
5055 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5056 (sp->rx_csum)) {
5057 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4400 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 5058 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4401 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) { 5059 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4402 /* 5060 /*
4403 * NIC verifies if the Checksum of the received 5061 * NIC verifies if the Checksum of the received
4404 * frame is Ok or not and accordingly returns 5062 * frame is Ok or not and accordingly returns
4405 * a flag in the RxD. 5063 * a flag in the RxD.
4406 */ 5064 */
4407 skb->ip_summed = CHECKSUM_UNNECESSARY; 5065 skb->ip_summed = CHECKSUM_UNNECESSARY;
4408 } else { 5066 } else {
4409 /* 5067 /*
4410 * Packet with erroneous checksum, let the 5068 * Packet with erroneous checksum, let the
4411 * upper layers deal with it. 5069 * upper layers deal with it.
4412 */ 5070 */
4413 skb->ip_summed = CHECKSUM_NONE; 5071 skb->ip_summed = CHECKSUM_NONE;
@@ -4416,44 +5074,26 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4416 skb->ip_summed = CHECKSUM_NONE; 5074 skb->ip_summed = CHECKSUM_NONE;
4417 } 5075 }
4418 5076
4419 if (rxdp->Control_1 & RXD_T_CODE) {
4420 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4421 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4422 dev->name, err);
4423 }
4424#ifdef CONFIG_2BUFF_MODE
4425 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4426 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4427#endif
4428
4429 skb->dev = dev;
4430#ifndef CONFIG_2BUFF_MODE
4431 skb_put(skb, len);
4432 skb->protocol = eth_type_trans(skb, dev);
4433#else
4434 buff = skb_push(skb, buf0_len);
4435 memcpy(buff, ba->ba_0, buf0_len);
4436 skb_put(skb, buf2_len);
4437 skb->protocol = eth_type_trans(skb, dev); 5077 skb->protocol = eth_type_trans(skb, dev);
4438#endif
4439
4440#ifdef CONFIG_S2IO_NAPI 5078#ifdef CONFIG_S2IO_NAPI
4441 netif_receive_skb(skb); 5079 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5080 /* Queueing the vlan frame to the upper layer */
5081 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5082 RXD_GET_VLAN_TAG(rxdp->Control_2));
5083 } else {
5084 netif_receive_skb(skb);
5085 }
4442#else 5086#else
4443 netif_rx(skb); 5087 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5088 /* Queueing the vlan frame to the upper layer */
5089 vlan_hwaccel_rx(skb, sp->vlgrp,
5090 RXD_GET_VLAN_TAG(rxdp->Control_2));
5091 } else {
5092 netif_rx(skb);
5093 }
4444#endif 5094#endif
4445
4446 dev->last_rx = jiffies; 5095 dev->last_rx = jiffies;
4447 sp->rx_pkt_count++;
4448 sp->stats.rx_packets++;
4449#ifndef CONFIG_2BUFF_MODE
4450 sp->stats.rx_bytes += len;
4451#else
4452 sp->stats.rx_bytes += buf0_len + buf2_len;
4453#endif
4454
4455 atomic_dec(&sp->rx_bufs_left[ring_no]); 5096 atomic_dec(&sp->rx_bufs_left[ring_no]);
4456 rxdp->Host_Control = 0;
4457 return SUCCESS; 5097 return SUCCESS;
4458} 5098}
4459 5099
@@ -4464,13 +5104,13 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4464 * @link : inidicates whether link is UP/DOWN. 5104 * @link : inidicates whether link is UP/DOWN.
4465 * Description: 5105 * Description:
4466 * This function stops/starts the Tx queue depending on whether the link 5106 * This function stops/starts the Tx queue depending on whether the link
4467 * status of the NIC is is down or up. This is called by the Alarm 5107 * status of the NIC is is down or up. This is called by the Alarm
4468 * interrupt handler whenever a link change interrupt comes up. 5108 * interrupt handler whenever a link change interrupt comes up.
4469 * Return value: 5109 * Return value:
4470 * void. 5110 * void.
4471 */ 5111 */
4472 5112
4473static void s2io_link(nic_t * sp, int link) 5113void s2io_link(nic_t * sp, int link)
4474{ 5114{
4475 struct net_device *dev = (struct net_device *) sp->dev; 5115 struct net_device *dev = (struct net_device *) sp->dev;
4476 5116
@@ -4487,8 +5127,25 @@ static void s2io_link(nic_t * sp, int link)
4487} 5127}
4488 5128
4489/** 5129/**
4490 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers . 5130 * get_xena_rev_id - to identify revision ID of xena.
4491 * @sp : private member of the device structure, which is a pointer to the 5131 * @pdev : PCI Dev structure
5132 * Description:
5133 * Function to identify the Revision ID of xena.
5134 * Return value:
5135 * returns the revision ID of the device.
5136 */
5137
5138int get_xena_rev_id(struct pci_dev *pdev)
5139{
5140 u8 id = 0;
5141 int ret;
5142 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5143 return id;
5144}
5145
5146/**
5147 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5148 * @sp : private member of the device structure, which is a pointer to the
4492 * s2io_nic structure. 5149 * s2io_nic structure.
4493 * Description: 5150 * Description:
4494 * This function initializes a few of the PCI and PCI-X configuration registers 5151 * This function initializes a few of the PCI and PCI-X configuration registers
@@ -4499,15 +5156,15 @@ static void s2io_link(nic_t * sp, int link)
4499 5156
4500static void s2io_init_pci(nic_t * sp) 5157static void s2io_init_pci(nic_t * sp)
4501{ 5158{
4502 u16 pci_cmd = 0; 5159 u16 pci_cmd = 0, pcix_cmd = 0;
4503 5160
4504 /* Enable Data Parity Error Recovery in PCI-X command register. */ 5161 /* Enable Data Parity Error Recovery in PCI-X command register. */
4505 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5162 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4506 &(sp->pcix_cmd)); 5163 &(pcix_cmd));
4507 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5164 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4508 (sp->pcix_cmd | 1)); 5165 (pcix_cmd | 1));
4509 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5166 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4510 &(sp->pcix_cmd)); 5167 &(pcix_cmd));
4511 5168
4512 /* Set the PErr Response bit in PCI command register. */ 5169 /* Set the PErr Response bit in PCI command register. */
4513 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 5170 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
@@ -4515,53 +5172,43 @@ static void s2io_init_pci(nic_t * sp)
4515 (pci_cmd | PCI_COMMAND_PARITY)); 5172 (pci_cmd | PCI_COMMAND_PARITY));
4516 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 5173 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4517 5174
4518 /* Set MMRB count to 1024 in PCI-X Command register. */
4519 sp->pcix_cmd &= 0xFFF3;
4520 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4521 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4522 &(sp->pcix_cmd));
4523
4524 /* Setting Maximum outstanding splits based on system type. */
4525 sp->pcix_cmd &= 0xFF8F;
4526
4527 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4528 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4529 sp->pcix_cmd);
4530 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4531 &(sp->pcix_cmd));
4532 /* Forcibly disabling relaxed ordering capability of the card. */ 5175 /* Forcibly disabling relaxed ordering capability of the card. */
4533 sp->pcix_cmd &= 0xfffd; 5176 pcix_cmd &= 0xfffd;
4534 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5177 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4535 sp->pcix_cmd); 5178 pcix_cmd);
4536 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5179 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4537 &(sp->pcix_cmd)); 5180 &(pcix_cmd));
4538} 5181}
4539 5182
4540MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 5183MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4541MODULE_LICENSE("GPL"); 5184MODULE_LICENSE("GPL");
4542module_param(tx_fifo_num, int, 0); 5185module_param(tx_fifo_num, int, 0);
4543module_param_array(tx_fifo_len, int, NULL, 0);
4544module_param(rx_ring_num, int, 0); 5186module_param(rx_ring_num, int, 0);
4545module_param_array(rx_ring_sz, int, NULL, 0); 5187module_param_array(tx_fifo_len, uint, NULL, 0);
4546module_param(Stats_refresh_time, int, 0); 5188module_param_array(rx_ring_sz, uint, NULL, 0);
5189module_param_array(rts_frm_len, uint, NULL, 0);
5190module_param(use_continuous_tx_intrs, int, 1);
4547module_param(rmac_pause_time, int, 0); 5191module_param(rmac_pause_time, int, 0);
4548module_param(mc_pause_threshold_q0q3, int, 0); 5192module_param(mc_pause_threshold_q0q3, int, 0);
4549module_param(mc_pause_threshold_q4q7, int, 0); 5193module_param(mc_pause_threshold_q4q7, int, 0);
4550module_param(shared_splits, int, 0); 5194module_param(shared_splits, int, 0);
4551module_param(tmac_util_period, int, 0); 5195module_param(tmac_util_period, int, 0);
4552module_param(rmac_util_period, int, 0); 5196module_param(rmac_util_period, int, 0);
5197module_param(bimodal, bool, 0);
4553#ifndef CONFIG_S2IO_NAPI 5198#ifndef CONFIG_S2IO_NAPI
4554module_param(indicate_max_pkts, int, 0); 5199module_param(indicate_max_pkts, int, 0);
4555#endif 5200#endif
5201module_param(rxsync_frequency, int, 0);
5202
4556/** 5203/**
4557 * s2io_init_nic - Initialization of the adapter . 5204 * s2io_init_nic - Initialization of the adapter .
4558 * @pdev : structure containing the PCI related information of the device. 5205 * @pdev : structure containing the PCI related information of the device.
4559 * @pre: List of PCI devices supported by the driver listed in s2io_tbl. 5206 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4560 * Description: 5207 * Description:
4561 * The function initializes an adapter identified by the pci_dec structure. 5208 * The function initializes an adapter identified by the pci_dec structure.
4562 * All OS related initialization including memory and device structure and 5209 * All OS related initialization including memory and device structure and
4563 * initlaization of the device private variable is done. Also the swapper 5210 * initlaization of the device private variable is done. Also the swapper
4564 * control register is initialized to enable read and write into the I/O 5211 * control register is initialized to enable read and write into the I/O
4565 * registers of the device. 5212 * registers of the device.
4566 * Return value: 5213 * Return value:
4567 * returns 0 on success and negative on failure. 5214 * returns 0 on success and negative on failure.
@@ -4572,7 +5219,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4572{ 5219{
4573 nic_t *sp; 5220 nic_t *sp;
4574 struct net_device *dev; 5221 struct net_device *dev;
4575 char *dev_name = "S2IO 10GE NIC";
4576 int i, j, ret; 5222 int i, j, ret;
4577 int dma_flag = FALSE; 5223 int dma_flag = FALSE;
4578 u32 mac_up, mac_down; 5224 u32 mac_up, mac_down;
@@ -4581,10 +5227,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4581 u16 subid; 5227 u16 subid;
4582 mac_info_t *mac_control; 5228 mac_info_t *mac_control;
4583 struct config_param *config; 5229 struct config_param *config;
5230 int mode;
4584 5231
4585 5232#ifdef CONFIG_S2IO_NAPI
4586 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n", 5233 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4587 s2io_driver_version); 5234#endif
4588 5235
4589 if ((ret = pci_enable_device(pdev))) { 5236 if ((ret = pci_enable_device(pdev))) {
4590 DBG_PRINT(ERR_DBG, 5237 DBG_PRINT(ERR_DBG,
@@ -4595,7 +5242,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4595 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 5242 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4596 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n"); 5243 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4597 dma_flag = TRUE; 5244 dma_flag = TRUE;
4598
4599 if (pci_set_consistent_dma_mask 5245 if (pci_set_consistent_dma_mask
4600 (pdev, DMA_64BIT_MASK)) { 5246 (pdev, DMA_64BIT_MASK)) {
4601 DBG_PRINT(ERR_DBG, 5247 DBG_PRINT(ERR_DBG,
@@ -4635,34 +5281,41 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4635 memset(sp, 0, sizeof(nic_t)); 5281 memset(sp, 0, sizeof(nic_t));
4636 sp->dev = dev; 5282 sp->dev = dev;
4637 sp->pdev = pdev; 5283 sp->pdev = pdev;
4638 sp->vendor_id = pdev->vendor;
4639 sp->device_id = pdev->device;
4640 sp->high_dma_flag = dma_flag; 5284 sp->high_dma_flag = dma_flag;
4641 sp->irq = pdev->irq;
4642 sp->device_enabled_once = FALSE; 5285 sp->device_enabled_once = FALSE;
4643 strcpy(sp->name, dev_name); 5286
5287 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5288 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5289 sp->device_type = XFRAME_II_DEVICE;
5290 else
5291 sp->device_type = XFRAME_I_DEVICE;
4644 5292
4645 /* Initialize some PCI/PCI-X fields of the NIC. */ 5293 /* Initialize some PCI/PCI-X fields of the NIC. */
4646 s2io_init_pci(sp); 5294 s2io_init_pci(sp);
4647 5295
4648 /* 5296 /*
4649 * Setting the device configuration parameters. 5297 * Setting the device configuration parameters.
4650 * Most of these parameters can be specified by the user during 5298 * Most of these parameters can be specified by the user during
4651 * module insertion as they are module loadable parameters. If 5299 * module insertion as they are module loadable parameters. If
4652 * these parameters are not not specified during load time, they 5300 * these parameters are not not specified during load time, they
4653 * are initialized with default values. 5301 * are initialized with default values.
4654 */ 5302 */
4655 mac_control = &sp->mac_control; 5303 mac_control = &sp->mac_control;
4656 config = &sp->config; 5304 config = &sp->config;
4657 5305
4658 /* Tx side parameters. */ 5306 /* Tx side parameters. */
4659 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */ 5307 if (tx_fifo_len[0] == 0)
5308 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4660 config->tx_fifo_num = tx_fifo_num; 5309 config->tx_fifo_num = tx_fifo_num;
4661 for (i = 0; i < MAX_TX_FIFOS; i++) { 5310 for (i = 0; i < MAX_TX_FIFOS; i++) {
4662 config->tx_cfg[i].fifo_len = tx_fifo_len[i]; 5311 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4663 config->tx_cfg[i].fifo_priority = i; 5312 config->tx_cfg[i].fifo_priority = i;
4664 } 5313 }
4665 5314
5315 /* mapping the QoS priority to the configured fifos */
5316 for (i = 0; i < MAX_TX_FIFOS; i++)
5317 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5318
4666 config->tx_intr_type = TXD_INT_TYPE_UTILZ; 5319 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4667 for (i = 0; i < config->tx_fifo_num; i++) { 5320 for (i = 0; i < config->tx_fifo_num; i++) {
4668 config->tx_cfg[i].f_no_snoop = 5321 config->tx_cfg[i].f_no_snoop =
@@ -4675,7 +5328,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4675 config->max_txds = MAX_SKB_FRAGS; 5328 config->max_txds = MAX_SKB_FRAGS;
4676 5329
4677 /* Rx side parameters. */ 5330 /* Rx side parameters. */
4678 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */ 5331 if (rx_ring_sz[0] == 0)
5332 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4679 config->rx_ring_num = rx_ring_num; 5333 config->rx_ring_num = rx_ring_num;
4680 for (i = 0; i < MAX_RX_RINGS; i++) { 5334 for (i = 0; i < MAX_RX_RINGS; i++) {
4681 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5335 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -4699,10 +5353,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4699 for (i = 0; i < config->rx_ring_num; i++) 5353 for (i = 0; i < config->rx_ring_num; i++)
4700 atomic_set(&sp->rx_bufs_left[i], 0); 5354 atomic_set(&sp->rx_bufs_left[i], 0);
4701 5355
5356 /* Initialize the number of ISRs currently running */
5357 atomic_set(&sp->isr_cnt, 0);
5358
4702 /* initialize the shared memory used by the NIC and the host */ 5359 /* initialize the shared memory used by the NIC and the host */
4703 if (init_shared_mem(sp)) { 5360 if (init_shared_mem(sp)) {
4704 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 5361 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4705 dev->name); 5362 __FUNCTION__);
4706 ret = -ENOMEM; 5363 ret = -ENOMEM;
4707 goto mem_alloc_failed; 5364 goto mem_alloc_failed;
4708 } 5365 }
@@ -4743,13 +5400,17 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4743 dev->do_ioctl = &s2io_ioctl; 5400 dev->do_ioctl = &s2io_ioctl;
4744 dev->change_mtu = &s2io_change_mtu; 5401 dev->change_mtu = &s2io_change_mtu;
4745 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 5402 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5403 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5404 dev->vlan_rx_register = s2io_vlan_rx_register;
5405 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5406
4746 /* 5407 /*
4747 * will use eth_mac_addr() for dev->set_mac_address 5408 * will use eth_mac_addr() for dev->set_mac_address
4748 * mac address will be set every time dev->open() is called 5409 * mac address will be set every time dev->open() is called
4749 */ 5410 */
4750#ifdef CONFIG_S2IO_NAPI 5411#if defined(CONFIG_S2IO_NAPI)
4751 dev->poll = s2io_poll; 5412 dev->poll = s2io_poll;
4752 dev->weight = 90; 5413 dev->weight = 32;
4753#endif 5414#endif
4754 5415
4755 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 5416 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
@@ -4776,22 +5437,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4776 goto set_swap_failed; 5437 goto set_swap_failed;
4777 } 5438 }
4778 5439
4779 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */ 5440 /* Verify if the Herc works on the slot its placed into */
4780 fix_mac_address(sp); 5441 if (sp->device_type & XFRAME_II_DEVICE) {
4781 s2io_reset(sp); 5442 mode = s2io_verify_pci_mode(sp);
5443 if (mode < 0) {
5444 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5445 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5446 ret = -EBADSLT;
5447 goto set_swap_failed;
5448 }
5449 }
4782 5450
4783 /* 5451 /* Not needed for Herc */
4784 * Setting swapper control on the NIC, so the MAC address can be read. 5452 if (sp->device_type & XFRAME_I_DEVICE) {
4785 */ 5453 /*
4786 if (s2io_set_swapper(sp)) { 5454 * Fix for all "FFs" MAC address problems observed on
4787 DBG_PRINT(ERR_DBG, 5455 * Alpha platforms
4788 "%s: S2IO: swapper settings are wrong\n", 5456 */
4789 dev->name); 5457 fix_mac_address(sp);
4790 ret = -EAGAIN; 5458 s2io_reset(sp);
4791 goto set_swap_failed;
4792 } 5459 }
4793 5460
4794 /* 5461 /*
4795 * MAC address initialization. 5462 * MAC address initialization.
4796 * For now only one mac address will be read and used. 5463 * For now only one mac address will be read and used.
4797 */ 5464 */
@@ -4814,37 +5481,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4814 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16); 5481 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4815 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24); 5482 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4816 5483
4817 DBG_PRINT(INIT_DBG,
4818 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4819 sp->def_mac_addr[0].mac_addr[0],
4820 sp->def_mac_addr[0].mac_addr[1],
4821 sp->def_mac_addr[0].mac_addr[2],
4822 sp->def_mac_addr[0].mac_addr[3],
4823 sp->def_mac_addr[0].mac_addr[4],
4824 sp->def_mac_addr[0].mac_addr[5]);
4825
4826 /* Set the factory defined MAC address initially */ 5484 /* Set the factory defined MAC address initially */
4827 dev->addr_len = ETH_ALEN; 5485 dev->addr_len = ETH_ALEN;
4828 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 5486 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4829 5487
4830 /* 5488 /*
4831 * Initialize the tasklet status and link state flags 5489 * Initialize the tasklet status and link state flags
4832 * and the card statte parameter 5490 * and the card state parameter
4833 */ 5491 */
4834 atomic_set(&(sp->card_state), 0); 5492 atomic_set(&(sp->card_state), 0);
4835 sp->tasklet_status = 0; 5493 sp->tasklet_status = 0;
4836 sp->link_state = 0; 5494 sp->link_state = 0;
4837 5495
4838
4839 /* Initialize spinlocks */ 5496 /* Initialize spinlocks */
4840 spin_lock_init(&sp->tx_lock); 5497 spin_lock_init(&sp->tx_lock);
4841#ifndef CONFIG_S2IO_NAPI 5498#ifndef CONFIG_S2IO_NAPI
4842 spin_lock_init(&sp->put_lock); 5499 spin_lock_init(&sp->put_lock);
4843#endif 5500#endif
5501 spin_lock_init(&sp->rx_lock);
4844 5502
4845 /* 5503 /*
4846 * SXE-002: Configure link and activity LED to init state 5504 * SXE-002: Configure link and activity LED to init state
4847 * on driver load. 5505 * on driver load.
4848 */ 5506 */
4849 subid = sp->pdev->subsystem_device; 5507 subid = sp->pdev->subsystem_device;
4850 if ((subid & 0xFF) >= 0x07) { 5508 if ((subid & 0xFF) >= 0x07) {
@@ -4864,13 +5522,61 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4864 goto register_failed; 5522 goto register_failed;
4865 } 5523 }
4866 5524
4867 /* 5525 if (sp->device_type & XFRAME_II_DEVICE) {
4868 * Make Link state as off at this point, when the Link change 5526 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
4869 * interrupt comes the state will be automatically changed to 5527 dev->name);
5528 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5529 get_xena_rev_id(sp->pdev),
5530 s2io_driver_version);
5531 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5532 sp->def_mac_addr[0].mac_addr[0],
5533 sp->def_mac_addr[0].mac_addr[1],
5534 sp->def_mac_addr[0].mac_addr[2],
5535 sp->def_mac_addr[0].mac_addr[3],
5536 sp->def_mac_addr[0].mac_addr[4],
5537 sp->def_mac_addr[0].mac_addr[5]);
5538 mode = s2io_print_pci_mode(sp);
5539 if (mode < 0) {
5540 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5541 ret = -EBADSLT;
5542 goto set_swap_failed;
5543 }
5544 } else {
5545 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5546 dev->name);
5547 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5548 get_xena_rev_id(sp->pdev),
5549 s2io_driver_version);
5550 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5551 sp->def_mac_addr[0].mac_addr[0],
5552 sp->def_mac_addr[0].mac_addr[1],
5553 sp->def_mac_addr[0].mac_addr[2],
5554 sp->def_mac_addr[0].mac_addr[3],
5555 sp->def_mac_addr[0].mac_addr[4],
5556 sp->def_mac_addr[0].mac_addr[5]);
5557 }
5558
5559 /* Initialize device name */
5560 strcpy(sp->name, dev->name);
5561 if (sp->device_type & XFRAME_II_DEVICE)
5562 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5563 else
5564 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5565
5566 /* Initialize bimodal Interrupts */
5567 sp->config.bimodal = bimodal;
5568 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5569 sp->config.bimodal = 0;
5570 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5571 dev->name);
5572 }
5573
5574 /*
5575 * Make Link state as off at this point, when the Link change
5576 * interrupt comes the state will be automatically changed to
4870 * the right state. 5577 * the right state.
4871 */ 5578 */
4872 netif_carrier_off(dev); 5579 netif_carrier_off(dev);
4873 sp->last_link_state = LINK_DOWN;
4874 5580
4875 return 0; 5581 return 0;
4876 5582
@@ -4891,11 +5597,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4891} 5597}
4892 5598
4893/** 5599/**
4894 * s2io_rem_nic - Free the PCI device 5600 * s2io_rem_nic - Free the PCI device
4895 * @pdev: structure containing the PCI related information of the device. 5601 * @pdev: structure containing the PCI related information of the device.
4896 * Description: This function is called by the Pci subsystem to release a 5602 * Description: This function is called by the Pci subsystem to release a
4897 * PCI device and free up all resource held up by the device. This could 5603 * PCI device and free up all resource held up by the device. This could
4898 * be in response to a Hot plug event or when the driver is to be removed 5604 * be in response to a Hot plug event or when the driver is to be removed
4899 * from memory. 5605 * from memory.
4900 */ 5606 */
4901 5607
@@ -4919,7 +5625,6 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4919 pci_disable_device(pdev); 5625 pci_disable_device(pdev);
4920 pci_release_regions(pdev); 5626 pci_release_regions(pdev);
4921 pci_set_drvdata(pdev, NULL); 5627 pci_set_drvdata(pdev, NULL);
4922
4923 free_netdev(dev); 5628 free_netdev(dev);
4924} 5629}
4925 5630
@@ -4935,11 +5640,11 @@ int __init s2io_starter(void)
4935} 5640}
4936 5641
4937/** 5642/**
4938 * s2io_closer - Cleanup routine for the driver 5643 * s2io_closer - Cleanup routine for the driver
4939 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 5644 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4940 */ 5645 */
4941 5646
4942static void s2io_closer(void) 5647void s2io_closer(void)
4943{ 5648{
4944 pci_unregister_driver(&s2io_driver); 5649 pci_unregister_driver(&s2io_driver);
4945 DBG_PRINT(INIT_DBG, "cleanup done\n"); 5650 DBG_PRINT(INIT_DBG, "cleanup done\n");
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 1711c8c3dc99..5d9270730ca2 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,9 @@
31#define SUCCESS 0 31#define SUCCESS 0
32#define FAILURE -1 32#define FAILURE -1
33 33
34/* Maximum time to flicker LED when asked to identify NIC using ethtool */
35#define MAX_FLICKER_TIME 60000 /* 60 Secs */
36
34/* Maximum outstanding splits to be configured into xena. */ 37/* Maximum outstanding splits to be configured into xena. */
35typedef enum xena_max_outstanding_splits { 38typedef enum xena_max_outstanding_splits {
36 XENA_ONE_SPLIT_TRANSACTION = 0, 39 XENA_ONE_SPLIT_TRANSACTION = 0,
@@ -45,10 +48,10 @@ typedef enum xena_max_outstanding_splits {
45#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) 48#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
46 49
47/* OS concerned variables and constants */ 50/* OS concerned variables and constants */
48#define WATCH_DOG_TIMEOUT 5*HZ 51#define WATCH_DOG_TIMEOUT 15*HZ
49#define EFILL 0x1234 52#define EFILL 0x1234
50#define ALIGN_SIZE 127 53#define ALIGN_SIZE 127
51#define PCIX_COMMAND_REGISTER 0x62 54#define PCIX_COMMAND_REGISTER 0x62
52 55
53/* 56/*
54 * Debug related variables. 57 * Debug related variables.
@@ -61,7 +64,7 @@ typedef enum xena_max_outstanding_splits {
61#define INTR_DBG 4 64#define INTR_DBG 4
62 65
63/* Global variable that defines the present debug level of the driver. */ 66/* Global variable that defines the present debug level of the driver. */
64static int debug_level = ERR_DBG; /* Default level. */ 67int debug_level = ERR_DBG; /* Default level. */
65 68
66/* DEBUG message print. */ 69/* DEBUG message print. */
67#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) 70#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
@@ -71,6 +74,12 @@ static int debug_level = ERR_DBG; /* Default level. */
71#define L4_CKSUM_OK 0xFFFF 74#define L4_CKSUM_OK 0xFFFF
72#define S2IO_JUMBO_SIZE 9600 75#define S2IO_JUMBO_SIZE 9600
73 76
77/* Driver statistics maintained by driver */
78typedef struct {
79 unsigned long long single_ecc_errs;
80 unsigned long long double_ecc_errs;
81} swStat_t;
82
74/* The statistics block of Xena */ 83/* The statistics block of Xena */
75typedef struct stat_block { 84typedef struct stat_block {
76/* Tx MAC statistics counters. */ 85/* Tx MAC statistics counters. */
@@ -186,12 +195,90 @@ typedef struct stat_block {
186 u32 rxd_rd_cnt; 195 u32 rxd_rd_cnt;
187 u32 rxf_wr_cnt; 196 u32 rxf_wr_cnt;
188 u32 txf_rd_cnt; 197 u32 txf_rd_cnt;
198
199/* Tx MAC statistics overflow counters. */
200 u32 tmac_data_octets_oflow;
201 u32 tmac_frms_oflow;
202 u32 tmac_bcst_frms_oflow;
203 u32 tmac_mcst_frms_oflow;
204 u32 tmac_ucst_frms_oflow;
205 u32 tmac_ttl_octets_oflow;
206 u32 tmac_any_err_frms_oflow;
207 u32 tmac_nucst_frms_oflow;
208 u64 tmac_vlan_frms;
209 u32 tmac_drop_ip_oflow;
210 u32 tmac_vld_ip_oflow;
211 u32 tmac_rst_tcp_oflow;
212 u32 tmac_icmp_oflow;
213 u32 tpa_unknown_protocol;
214 u32 tmac_udp_oflow;
215 u32 reserved_10;
216 u32 tpa_parse_failure;
217
218/* Rx MAC Statistics overflow counters. */
219 u32 rmac_data_octets_oflow;
220 u32 rmac_vld_frms_oflow;
221 u32 rmac_vld_bcst_frms_oflow;
222 u32 rmac_vld_mcst_frms_oflow;
223 u32 rmac_accepted_ucst_frms_oflow;
224 u32 rmac_ttl_octets_oflow;
225 u32 rmac_discarded_frms_oflow;
226 u32 rmac_accepted_nucst_frms_oflow;
227 u32 rmac_usized_frms_oflow;
228 u32 rmac_drop_events_oflow;
229 u32 rmac_frag_frms_oflow;
230 u32 rmac_osized_frms_oflow;
231 u32 rmac_ip_oflow;
232 u32 rmac_jabber_frms_oflow;
233 u32 rmac_icmp_oflow;
234 u32 rmac_drop_ip_oflow;
235 u32 rmac_err_drp_udp_oflow;
236 u32 rmac_udp_oflow;
237 u32 reserved_11;
238 u32 rmac_pause_cnt_oflow;
239 u64 rmac_ttl_1519_4095_frms;
240 u64 rmac_ttl_4096_8191_frms;
241 u64 rmac_ttl_8192_max_frms;
242 u64 rmac_ttl_gt_max_frms;
243 u64 rmac_osized_alt_frms;
244 u64 rmac_jabber_alt_frms;
245 u64 rmac_gt_max_alt_frms;
246 u64 rmac_vlan_frms;
247 u32 rmac_len_discard;
248 u32 rmac_fcs_discard;
249 u32 rmac_pf_discard;
250 u32 rmac_da_discard;
251 u32 rmac_red_discard;
252 u32 rmac_rts_discard;
253 u32 reserved_12;
254 u32 rmac_ingm_full_discard;
255 u32 reserved_13;
256 u32 rmac_accepted_ip_oflow;
257 u32 reserved_14;
258 u32 link_fault_cnt;
259 swStat_t sw_stat;
189} StatInfo_t; 260} StatInfo_t;
190 261
191/* Structures representing different init time configuration 262/*
263 * Structures representing different init time configuration
192 * parameters of the NIC. 264 * parameters of the NIC.
193 */ 265 */
194 266
267#define MAX_TX_FIFOS 8
268#define MAX_RX_RINGS 8
269
270/* FIFO mappings for all possible number of fifos configured */
271int fifo_map[][MAX_TX_FIFOS] = {
272 {0, 0, 0, 0, 0, 0, 0, 0},
273 {0, 0, 0, 0, 1, 1, 1, 1},
274 {0, 0, 0, 1, 1, 1, 2, 2},
275 {0, 0, 1, 1, 2, 2, 3, 3},
276 {0, 0, 1, 1, 2, 2, 3, 4},
277 {0, 0, 1, 1, 2, 3, 4, 5},
278 {0, 0, 1, 2, 3, 4, 5, 6},
279 {0, 1, 2, 3, 4, 5, 6, 7},
280};
281
195/* Maintains Per FIFO related information. */ 282/* Maintains Per FIFO related information. */
196typedef struct tx_fifo_config { 283typedef struct tx_fifo_config {
197#define MAX_AVAILABLE_TXDS 8192 284#define MAX_AVAILABLE_TXDS 8192
@@ -237,14 +324,14 @@ typedef struct rx_ring_config {
237#define NO_SNOOP_RXD_BUFFER 0x02 324#define NO_SNOOP_RXD_BUFFER 0x02
238} rx_ring_config_t; 325} rx_ring_config_t;
239 326
240/* This structure provides contains values of the tunable parameters 327/* This structure provides contains values of the tunable parameters
241 * of the H/W 328 * of the H/W
242 */ 329 */
243struct config_param { 330struct config_param {
244/* Tx Side */ 331/* Tx Side */
245 u32 tx_fifo_num; /*Number of Tx FIFOs */ 332 u32 tx_fifo_num; /*Number of Tx FIFOs */
246#define MAX_TX_FIFOS 8
247 333
334 u8 fifo_mapping[MAX_TX_FIFOS];
248 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ 335 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
249 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ 336 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
250 u64 tx_intr_type; 337 u64 tx_intr_type;
@@ -252,10 +339,10 @@ struct config_param {
252 339
253/* Rx Side */ 340/* Rx Side */
254 u32 rx_ring_num; /*Number of receive rings */ 341 u32 rx_ring_num; /*Number of receive rings */
255#define MAX_RX_RINGS 8
256#define MAX_RX_BLOCKS_PER_RING 150 342#define MAX_RX_BLOCKS_PER_RING 150
257 343
258 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ 344 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
345 u8 bimodal; /*Flag for setting bimodal interrupts*/
259 346
260#define HEADER_ETHERNET_II_802_3_SIZE 14 347#define HEADER_ETHERNET_II_802_3_SIZE 14
261#define HEADER_802_2_SIZE 3 348#define HEADER_802_2_SIZE 3
@@ -269,6 +356,7 @@ struct config_param {
269#define MAX_PYLD_JUMBO 9600 356#define MAX_PYLD_JUMBO 9600
270#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18) 357#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
271#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22) 358#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
359 u16 bus_speed;
272}; 360};
273 361
274/* Structure representing MAC Addrs */ 362/* Structure representing MAC Addrs */
@@ -277,7 +365,7 @@ typedef struct mac_addr {
277} macaddr_t; 365} macaddr_t;
278 366
279/* Structure that represent every FIFO element in the BAR1 367/* Structure that represent every FIFO element in the BAR1
280 * Address location. 368 * Address location.
281 */ 369 */
282typedef struct _TxFIFO_element { 370typedef struct _TxFIFO_element {
283 u64 TxDL_Pointer; 371 u64 TxDL_Pointer;
@@ -339,6 +427,7 @@ typedef struct _RxD_t {
339#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8) 427#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
340#define RXD_FRAME_PROTO_IPV4 BIT(27) 428#define RXD_FRAME_PROTO_IPV4 BIT(27)
341#define RXD_FRAME_PROTO_IPV6 BIT(28) 429#define RXD_FRAME_PROTO_IPV6 BIT(28)
430#define RXD_FRAME_IP_FRAG BIT(29)
342#define RXD_FRAME_PROTO_TCP BIT(30) 431#define RXD_FRAME_PROTO_TCP BIT(30)
343#define RXD_FRAME_PROTO_UDP BIT(31) 432#define RXD_FRAME_PROTO_UDP BIT(31)
344#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP) 433#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
@@ -346,11 +435,15 @@ typedef struct _RxD_t {
346#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF) 435#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
347 436
348 u64 Control_2; 437 u64 Control_2;
438#define THE_RXD_MARK 0x3
439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
441
349#ifndef CONFIG_2BUFF_MODE 442#ifndef CONFIG_2BUFF_MODE
350#define MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16) 443#define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14)
351#define SET_BUFFER0_SIZE(val) vBIT(val,0,16) 444#define SET_BUFFER0_SIZE(val) vBIT(val,2,14)
352#else 445#else
353#define MASK_BUFFER0_SIZE vBIT(0xFF,0,16) 446#define MASK_BUFFER0_SIZE vBIT(0xFF,2,14)
354#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) 447#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
355#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) 448#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
356#define SET_BUFFER0_SIZE(val) vBIT(val,8,8) 449#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
@@ -363,7 +456,7 @@ typedef struct _RxD_t {
363#define SET_NUM_TAG(val) vBIT(val,16,32) 456#define SET_NUM_TAG(val) vBIT(val,16,32)
364 457
365#ifndef CONFIG_2BUFF_MODE 458#ifndef CONFIG_2BUFF_MODE
366#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0xFFFF,0,16))) 459#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14)))
367#else 460#else
368#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \ 461#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \
369 >> 48) 462 >> 48)
@@ -382,7 +475,7 @@ typedef struct _RxD_t {
382#endif 475#endif
383} RxD_t; 476} RxD_t;
384 477
385/* Structure that represents the Rx descriptor block which contains 478/* Structure that represents the Rx descriptor block which contains
386 * 128 Rx descriptors. 479 * 128 Rx descriptors.
387 */ 480 */
388#ifndef CONFIG_2BUFF_MODE 481#ifndef CONFIG_2BUFF_MODE
@@ -392,11 +485,11 @@ typedef struct _RxD_block {
392 485
393 u64 reserved_0; 486 u64 reserved_0;
394#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 487#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
395 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last 488 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
396 * Rxd in this blk */ 489 * Rxd in this blk */
397 u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */ 490 u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
398 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch 491 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
399 * the upper 32 bits should 492 * the upper 32 bits should
400 * be 0 */ 493 * be 0 */
401} RxD_block_t; 494} RxD_block_t;
402#else 495#else
@@ -405,13 +498,13 @@ typedef struct _RxD_block {
405 RxD_t rxd[MAX_RXDS_PER_BLOCK]; 498 RxD_t rxd[MAX_RXDS_PER_BLOCK];
406 499
407#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 500#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
408 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd 501 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
409 * in this blk */ 502 * in this blk */
410 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */ 503 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
411} RxD_block_t; 504} RxD_block_t;
412#define SIZE_OF_BLOCK 4096 505#define SIZE_OF_BLOCK 4096
413 506
414/* Structure to hold virtual addresses of Buf0 and Buf1 in 507/* Structure to hold virtual addresses of Buf0 and Buf1 in
415 * 2buf mode. */ 508 * 2buf mode. */
416typedef struct bufAdd { 509typedef struct bufAdd {
417 void *ba_0_org; 510 void *ba_0_org;
@@ -423,8 +516,8 @@ typedef struct bufAdd {
423 516
424/* Structure which stores all the MAC control parameters */ 517/* Structure which stores all the MAC control parameters */
425 518
426/* This structure stores the offset of the RxD in the ring 519/* This structure stores the offset of the RxD in the ring
427 * from which the Rx Interrupt processor can start picking 520 * from which the Rx Interrupt processor can start picking
428 * up the RxDs for processing. 521 * up the RxDs for processing.
429 */ 522 */
430typedef struct _rx_curr_get_info_t { 523typedef struct _rx_curr_get_info_t {
@@ -436,7 +529,7 @@ typedef struct _rx_curr_get_info_t {
436typedef rx_curr_get_info_t rx_curr_put_info_t; 529typedef rx_curr_get_info_t rx_curr_put_info_t;
437 530
438/* This structure stores the offset of the TxDl in the FIFO 531/* This structure stores the offset of the TxDl in the FIFO
439 * from which the Tx Interrupt processor can start picking 532 * from which the Tx Interrupt processor can start picking
440 * up the TxDLs for send complete interrupt processing. 533 * up the TxDLs for send complete interrupt processing.
441 */ 534 */
442typedef struct { 535typedef struct {
@@ -446,32 +539,96 @@ typedef struct {
446 539
447typedef tx_curr_get_info_t tx_curr_put_info_t; 540typedef tx_curr_get_info_t tx_curr_put_info_t;
448 541
449/* Infomation related to the Tx and Rx FIFOs and Rings of Xena 542/* Structure that holds the Phy and virt addresses of the Blocks */
450 * is maintained in this structure. 543typedef struct rx_block_info {
451 */ 544 RxD_t *block_virt_addr;
452typedef struct mac_info { 545 dma_addr_t block_dma_addr;
453/* rx side stuff */ 546} rx_block_info_t;
454 /* Put pointer info which indictes which RxD has to be replenished 547
548/* pre declaration of the nic structure */
549typedef struct s2io_nic nic_t;
550
551/* Ring specific structure */
552typedef struct ring_info {
553 /* The ring number */
554 int ring_no;
555
556 /*
557 * Place holders for the virtual and physical addresses of
558 * all the Rx Blocks
559 */
560 rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING];
561 int block_count;
562 int pkt_cnt;
563
564 /*
565 * Put pointer info which indictes which RxD has to be replenished
455 * with a new buffer. 566 * with a new buffer.
456 */ 567 */
457 rx_curr_put_info_t rx_curr_put_info[MAX_RX_RINGS]; 568 rx_curr_put_info_t rx_curr_put_info;
458 569
459 /* Get pointer info which indictes which is the last RxD that was 570 /*
571 * Get pointer info which indictes which is the last RxD that was
460 * processed by the driver. 572 * processed by the driver.
461 */ 573 */
462 rx_curr_get_info_t rx_curr_get_info[MAX_RX_RINGS]; 574 rx_curr_get_info_t rx_curr_get_info;
463 575
464 u16 rmac_pause_time; 576#ifndef CONFIG_S2IO_NAPI
465 u16 mc_pause_threshold_q0q3; 577 /* Index to the absolute position of the put pointer of Rx ring */
466 u16 mc_pause_threshold_q4q7; 578 int put_pos;
579#endif
580
581#ifdef CONFIG_2BUFF_MODE
582 /* Buffer Address store. */
583 buffAdd_t **ba;
584#endif
585 nic_t *nic;
586} ring_info_t;
467 587
588/* Fifo specific structure */
589typedef struct fifo_info {
590 /* FIFO number */
591 int fifo_no;
592
593 /* Maximum TxDs per TxDL */
594 int max_txds;
595
596 /* Place holder of all the TX List's Phy and Virt addresses. */
597 list_info_hold_t *list_info;
598
599 /*
600 * Current offset within the tx FIFO where driver would write
601 * new Tx frame
602 */
603 tx_curr_put_info_t tx_curr_put_info;
604
605 /*
606 * Current offset within tx FIFO from where the driver would start freeing
607 * the buffers
608 */
609 tx_curr_get_info_t tx_curr_get_info;
610
611 nic_t *nic;
612}fifo_info_t;
613
614/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
615 * is maintained in this structure.
616 */
617typedef struct mac_info {
468/* tx side stuff */ 618/* tx side stuff */
469 /* logical pointer of start of each Tx FIFO */ 619 /* logical pointer of start of each Tx FIFO */
470 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS]; 620 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS];
471 621
472/* Current offset within tx_FIFO_start, where driver would write new Tx frame*/ 622 /* Fifo specific structure */
473 tx_curr_put_info_t tx_curr_put_info[MAX_TX_FIFOS]; 623 fifo_info_t fifos[MAX_TX_FIFOS];
474 tx_curr_get_info_t tx_curr_get_info[MAX_TX_FIFOS]; 624
625/* rx side stuff */
626 /* Ring specific structure */
627 ring_info_t rings[MAX_RX_RINGS];
628
629 u16 rmac_pause_time;
630 u16 mc_pause_threshold_q0q3;
631 u16 mc_pause_threshold_q4q7;
475 632
476 void *stats_mem; /* orignal pointer to allocated mem */ 633 void *stats_mem; /* orignal pointer to allocated mem */
477 dma_addr_t stats_mem_phy; /* Physical address of the stat block */ 634 dma_addr_t stats_mem_phy; /* Physical address of the stat block */
@@ -485,12 +642,6 @@ typedef struct {
485 int usage_cnt; 642 int usage_cnt;
486} usr_addr_t; 643} usr_addr_t;
487 644
488/* Structure that holds the Phy and virt addresses of the Blocks */
489typedef struct rx_block_info {
490 RxD_t *block_virt_addr;
491 dma_addr_t block_dma_addr;
492} rx_block_info_t;
493
494/* Default Tunable parameters of the NIC. */ 645/* Default Tunable parameters of the NIC. */
495#define DEFAULT_FIFO_LEN 4096 646#define DEFAULT_FIFO_LEN 4096
496#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1) 647#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
@@ -499,7 +650,20 @@ typedef struct rx_block_info {
499#define LARGE_BLK_CNT 100 650#define LARGE_BLK_CNT 100
500 651
501/* Structure representing one instance of the NIC */ 652/* Structure representing one instance of the NIC */
502typedef struct s2io_nic { 653struct s2io_nic {
654#ifdef CONFIG_S2IO_NAPI
655 /*
656 * Count of packets to be processed in a given iteration, it will be indicated
657 * by the quota field of the device structure when NAPI is enabled.
658 */
659 int pkts_to_process;
660#endif
661 struct net_device *dev;
662 mac_info_t mac_control;
663 struct config_param config;
664 struct pci_dev *pdev;
665 void __iomem *bar0;
666 void __iomem *bar1;
503#define MAX_MAC_SUPPORTED 16 667#define MAX_MAC_SUPPORTED 16
504#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED 668#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
505 669
@@ -507,33 +671,20 @@ typedef struct s2io_nic {
507 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED]; 671 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
508 672
509 struct net_device_stats stats; 673 struct net_device_stats stats;
510 void __iomem *bar0;
511 void __iomem *bar1;
512 struct config_param config;
513 mac_info_t mac_control;
514 int high_dma_flag; 674 int high_dma_flag;
515 int device_close_flag; 675 int device_close_flag;
516 int device_enabled_once; 676 int device_enabled_once;
517 677
518 char name[32]; 678 char name[50];
519 struct tasklet_struct task; 679 struct tasklet_struct task;
520 volatile unsigned long tasklet_status; 680 volatile unsigned long tasklet_status;
521 struct timer_list timer;
522 struct net_device *dev;
523 struct pci_dev *pdev;
524 681
525 u16 vendor_id; 682 /* Timer that handles I/O errors/exceptions */
526 u16 device_id; 683 struct timer_list alarm_timer;
527 u16 ccmd; 684
528 u32 cbar0_1; 685 /* Space to back up the PCI config space */
529 u32 cbar0_2; 686 u32 config_space[256 / sizeof(u32)];
530 u32 cbar1_1; 687
531 u32 cbar1_2;
532 u32 cirq;
533 u8 cache_line;
534 u32 rom_expansion;
535 u16 pcix_cmd;
536 u32 irq;
537 atomic_t rx_bufs_left[MAX_RX_RINGS]; 688 atomic_t rx_bufs_left[MAX_RX_RINGS];
538 689
539 spinlock_t tx_lock; 690 spinlock_t tx_lock;
@@ -558,27 +709,11 @@ typedef struct s2io_nic {
558 u16 tx_err_count; 709 u16 tx_err_count;
559 u16 rx_err_count; 710 u16 rx_err_count;
560 711
561#ifndef CONFIG_S2IO_NAPI
562 /* Index to the absolute position of the put pointer of Rx ring. */
563 int put_pos[MAX_RX_RINGS];
564#endif
565
566 /*
567 * Place holders for the virtual and physical addresses of
568 * all the Rx Blocks
569 */
570 rx_block_info_t rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
571 int block_count[MAX_RX_RINGS];
572 int pkt_cnt[MAX_RX_RINGS];
573
574 /* Place holder of all the TX List's Phy and Virt addresses. */
575 list_info_hold_t *list_info[MAX_TX_FIFOS];
576
577 /* Id timer, used to blink NIC to physically identify NIC. */ 712 /* Id timer, used to blink NIC to physically identify NIC. */
578 struct timer_list id_timer; 713 struct timer_list id_timer;
579 714
580 /* Restart timer, used to restart NIC if the device is stuck and 715 /* Restart timer, used to restart NIC if the device is stuck and
581 * a schedule task that will set the correct Link state once the 716 * a schedule task that will set the correct Link state once the
582 * NIC's PHY has stabilized after a state change. 717 * NIC's PHY has stabilized after a state change.
583 */ 718 */
584#ifdef INIT_TQUEUE 719#ifdef INIT_TQUEUE
@@ -589,12 +724,12 @@ typedef struct s2io_nic {
589 struct work_struct set_link_task; 724 struct work_struct set_link_task;
590#endif 725#endif
591 726
592 /* Flag that can be used to turn on or turn off the Rx checksum 727 /* Flag that can be used to turn on or turn off the Rx checksum
593 * offload feature. 728 * offload feature.
594 */ 729 */
595 int rx_csum; 730 int rx_csum;
596 731
597 /* after blink, the adapter must be restored with original 732 /* after blink, the adapter must be restored with original
598 * values. 733 * values.
599 */ 734 */
600 u64 adapt_ctrl_org; 735 u64 adapt_ctrl_org;
@@ -604,16 +739,19 @@ typedef struct s2io_nic {
604#define LINK_DOWN 1 739#define LINK_DOWN 1
605#define LINK_UP 2 740#define LINK_UP 2
606 741
607#ifdef CONFIG_2BUFF_MODE
608 /* Buffer Address store. */
609 buffAdd_t **ba[MAX_RX_RINGS];
610#endif
611 int task_flag; 742 int task_flag;
612#define CARD_DOWN 1 743#define CARD_DOWN 1
613#define CARD_UP 2 744#define CARD_UP 2
614 atomic_t card_state; 745 atomic_t card_state;
615 volatile unsigned long link_state; 746 volatile unsigned long link_state;
616} nic_t; 747 struct vlan_group *vlgrp;
748#define XFRAME_I_DEVICE 1
749#define XFRAME_II_DEVICE 2
750 u8 device_type;
751
752 spinlock_t rx_lock;
753 atomic_t isr_cnt;
754};
617 755
618#define RESET_ERROR 1; 756#define RESET_ERROR 1;
619#define CMD_ERROR 2; 757#define CMD_ERROR 2;
@@ -622,9 +760,10 @@ typedef struct s2io_nic {
622#ifndef readq 760#ifndef readq
623static inline u64 readq(void __iomem *addr) 761static inline u64 readq(void __iomem *addr)
624{ 762{
625 u64 ret = readl(addr + 4); 763 u64 ret = 0;
626 ret <<= 32; 764 ret = readl(addr + 4);
627 ret |= readl(addr); 765 (u64) ret <<= 32;
766 (u64) ret |= readl(addr);
628 767
629 return ret; 768 return ret;
630} 769}
@@ -637,10 +776,10 @@ static inline void writeq(u64 val, void __iomem *addr)
637 writel((u32) (val >> 32), (addr + 4)); 776 writel((u32) (val >> 32), (addr + 4));
638} 777}
639 778
640/* In 32 bit modes, some registers have to be written in a 779/* In 32 bit modes, some registers have to be written in a
641 * particular order to expect correct hardware operation. The 780 * particular order to expect correct hardware operation. The
642 * macro SPECIAL_REG_WRITE is used to perform such ordered 781 * macro SPECIAL_REG_WRITE is used to perform such ordered
643 * writes. Defines UF (Upper First) and LF (Lower First) will 782 * writes. Defines UF (Upper First) and LF (Lower First) will
644 * be used to specify the required write order. 783 * be used to specify the required write order.
645 */ 784 */
646#define UF 1 785#define UF 1
@@ -716,6 +855,7 @@ static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
716#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate 855#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
717 PCC_FB_ECC Error. */ 856 PCC_FB_ECC Error. */
718 857
858#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
719/* 859/*
720 * Prototype declaration. 860 * Prototype declaration.
721 */ 861 */
@@ -725,36 +865,30 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
725static int init_shared_mem(struct s2io_nic *sp); 865static int init_shared_mem(struct s2io_nic *sp);
726static void free_shared_mem(struct s2io_nic *sp); 866static void free_shared_mem(struct s2io_nic *sp);
727static int init_nic(struct s2io_nic *nic); 867static int init_nic(struct s2io_nic *nic);
728#ifndef CONFIG_S2IO_NAPI 868static void rx_intr_handler(ring_info_t *ring_data);
729static void rx_intr_handler(struct s2io_nic *sp); 869static void tx_intr_handler(fifo_info_t *fifo_data);
730#endif
731static void tx_intr_handler(struct s2io_nic *sp);
732static void alarm_intr_handler(struct s2io_nic *sp); 870static void alarm_intr_handler(struct s2io_nic *sp);
733 871
734static int s2io_starter(void); 872static int s2io_starter(void);
735static void s2io_closer(void); 873void s2io_closer(void);
736static void s2io_tx_watchdog(struct net_device *dev); 874static void s2io_tx_watchdog(struct net_device *dev);
737static void s2io_tasklet(unsigned long dev_addr); 875static void s2io_tasklet(unsigned long dev_addr);
738static void s2io_set_multicast(struct net_device *dev); 876static void s2io_set_multicast(struct net_device *dev);
739#ifndef CONFIG_2BUFF_MODE 877static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp);
740static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no); 878void s2io_link(nic_t * sp, int link);
741#else 879void s2io_reset(nic_t * sp);
742static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no, 880#if defined(CONFIG_S2IO_NAPI)
743 buffAdd_t * ba);
744#endif
745static void s2io_link(nic_t * sp, int link);
746static void s2io_reset(nic_t * sp);
747#ifdef CONFIG_S2IO_NAPI
748static int s2io_poll(struct net_device *dev, int *budget); 881static int s2io_poll(struct net_device *dev, int *budget);
749#endif 882#endif
750static void s2io_init_pci(nic_t * sp); 883static void s2io_init_pci(nic_t * sp);
751static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 884int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
885static void s2io_alarm_handle(unsigned long data);
752static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); 886static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
753static int verify_xena_quiescence(u64 val64, int flag); 887static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
754static struct ethtool_ops netdev_ethtool_ops; 888static struct ethtool_ops netdev_ethtool_ops;
755static void s2io_set_link(unsigned long data); 889static void s2io_set_link(unsigned long data);
756static int s2io_set_swapper(nic_t * sp); 890int s2io_set_swapper(nic_t * sp);
757static void s2io_card_down(nic_t * nic); 891static void s2io_card_down(nic_t *nic);
758static int s2io_card_up(nic_t * nic); 892static int s2io_card_up(nic_t *nic);
759 893int get_xena_rev_id(struct pci_dev *pdev);
760#endif /* _S2IO_H */ 894#endif /* _S2IO_H */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index f15739481d62..d7c98515fdfd 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.8" 45#define DRV_VERSION "0.9"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -79,8 +79,8 @@ static const struct pci_device_id skge_id_table[] = {
79 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
81 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
82 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032) },
83 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
83 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
84 { 0 } 84 { 0 }
85}; 85};
86MODULE_DEVICE_TABLE(pci, skge_id_table); 86MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -189,7 +189,7 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
189{ 189{
190 u32 supported; 190 u32 supported;
191 191
192 if (iscopper(hw)) { 192 if (hw->copper) {
193 supported = SUPPORTED_10baseT_Half 193 supported = SUPPORTED_10baseT_Half
194 | SUPPORTED_10baseT_Full 194 | SUPPORTED_10baseT_Full
195 | SUPPORTED_100baseT_Half 195 | SUPPORTED_100baseT_Half
@@ -222,7 +222,7 @@ static int skge_get_settings(struct net_device *dev,
222 ecmd->transceiver = XCVR_INTERNAL; 222 ecmd->transceiver = XCVR_INTERNAL;
223 ecmd->supported = skge_supported_modes(hw); 223 ecmd->supported = skge_supported_modes(hw);
224 224
225 if (iscopper(hw)) { 225 if (hw->copper) {
226 ecmd->port = PORT_TP; 226 ecmd->port = PORT_TP;
227 ecmd->phy_address = hw->phy_addr; 227 ecmd->phy_address = hw->phy_addr;
228 } else 228 } else
@@ -876,6 +876,9 @@ static int skge_rx_fill(struct skge_port *skge)
876 876
877static void skge_link_up(struct skge_port *skge) 877static void skge_link_up(struct skge_port *skge)
878{ 878{
879 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
880 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
881
879 netif_carrier_on(skge->netdev); 882 netif_carrier_on(skge->netdev);
880 if (skge->tx_avail > MAX_SKB_FRAGS + 1) 883 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
881 netif_wake_queue(skge->netdev); 884 netif_wake_queue(skge->netdev);
@@ -894,6 +897,7 @@ static void skge_link_up(struct skge_port *skge)
894 897
895static void skge_link_down(struct skge_port *skge) 898static void skge_link_down(struct skge_port *skge)
896{ 899{
900 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
897 netif_carrier_off(skge->netdev); 901 netif_carrier_off(skge->netdev);
898 netif_stop_queue(skge->netdev); 902 netif_stop_queue(skge->netdev);
899 903
@@ -1599,7 +1603,7 @@ static void yukon_init(struct skge_hw *hw, int port)
1599 adv = PHY_AN_CSMA; 1603 adv = PHY_AN_CSMA;
1600 1604
1601 if (skge->autoneg == AUTONEG_ENABLE) { 1605 if (skge->autoneg == AUTONEG_ENABLE) {
1602 if (iscopper(hw)) { 1606 if (hw->copper) {
1603 if (skge->advertising & ADVERTISED_1000baseT_Full) 1607 if (skge->advertising & ADVERTISED_1000baseT_Full)
1604 ct1000 |= PHY_M_1000C_AFD; 1608 ct1000 |= PHY_M_1000C_AFD;
1605 if (skge->advertising & ADVERTISED_1000baseT_Half) 1609 if (skge->advertising & ADVERTISED_1000baseT_Half)
@@ -1691,7 +1695,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1691 /* Set hardware config mode */ 1695 /* Set hardware config mode */
1692 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 1696 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1693 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; 1697 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1694 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 1698 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1695 1699
1696 /* Clear GMC reset */ 1700 /* Clear GMC reset */
1697 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1701 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
@@ -1780,7 +1784,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1780 reg &= ~GMF_RX_F_FL_ON; 1784 reg &= ~GMF_RX_F_FL_ON;
1781 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1785 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1782 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1786 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1783 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); 1787 /*
1788 * because Pause Packet Truncation in GMAC is not working
1789 * we have to increase the Flush Threshold to 64 bytes
1790 * in order to flush pause packets in Rx FIFO on Yukon-1
1791 */
1792 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1784 1793
1785 /* Configure Tx MAC FIFO */ 1794 /* Configure Tx MAC FIFO */
1786 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 1795 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
@@ -2670,18 +2679,6 @@ static void skge_error_irq(struct skge_hw *hw)
2670 /* Timestamp (unused) overflow */ 2679 /* Timestamp (unused) overflow */
2671 if (hwstatus & IS_IRQ_TIST_OV) 2680 if (hwstatus & IS_IRQ_TIST_OV)
2672 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2681 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2673
2674 if (hwstatus & IS_IRQ_SENSOR) {
2675 /* no sensors on 32-bit Yukon */
2676 if (!(skge_read16(hw, B0_CTST) & CS_BUS_SLOT_SZ)) {
2677 printk(KERN_ERR PFX "ignoring bogus sensor interrups\n");
2678 skge_write32(hw, B0_HWE_IMSK,
2679 IS_ERR_MSK & ~IS_IRQ_SENSOR);
2680 } else
2681 printk(KERN_WARNING PFX "sensor interrupt\n");
2682 }
2683
2684
2685 } 2682 }
2686 2683
2687 if (hwstatus & IS_RAM_RD_PAR) { 2684 if (hwstatus & IS_RAM_RD_PAR) {
@@ -2712,9 +2709,10 @@ static void skge_error_irq(struct skge_hw *hw)
2712 2709
2713 skge_pci_clear(hw); 2710 skge_pci_clear(hw);
2714 2711
2712 /* if error still set then just ignore it */
2715 hwstatus = skge_read32(hw, B0_HWE_ISRC); 2713 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2716 if (hwstatus & IS_IRQ_STAT) { 2714 if (hwstatus & IS_IRQ_STAT) {
2717 printk(KERN_WARNING PFX "IRQ status %x: still set ignoring hardware errors\n", 2715 pr_debug("IRQ status %x: still set ignoring hardware errors\n",
2718 hwstatus); 2716 hwstatus);
2719 hw->intr_mask &= ~IS_HW_ERR; 2717 hw->intr_mask &= ~IS_HW_ERR;
2720 } 2718 }
@@ -2876,7 +2874,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
2876static int skge_reset(struct skge_hw *hw) 2874static int skge_reset(struct skge_hw *hw)
2877{ 2875{
2878 u16 ctst; 2876 u16 ctst;
2879 u8 t8, mac_cfg; 2877 u8 t8, mac_cfg, pmd_type, phy_type;
2880 int i; 2878 int i;
2881 2879
2882 ctst = skge_read16(hw, B0_CTST); 2880 ctst = skge_read16(hw, B0_CTST);
@@ -2895,18 +2893,19 @@ static int skge_reset(struct skge_hw *hw)
2895 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); 2893 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2896 2894
2897 hw->chip_id = skge_read8(hw, B2_CHIP_ID); 2895 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2898 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 2896 phy_type = skge_read8(hw, B2_E_1) & 0xf;
2899 hw->pmd_type = skge_read8(hw, B2_PMD_TYP); 2897 pmd_type = skge_read8(hw, B2_PMD_TYP);
2898 hw->copper = (pmd_type == 'T' || pmd_type == '1');
2900 2899
2901 switch (hw->chip_id) { 2900 switch (hw->chip_id) {
2902 case CHIP_ID_GENESIS: 2901 case CHIP_ID_GENESIS:
2903 switch (hw->phy_type) { 2902 switch (phy_type) {
2904 case SK_PHY_BCOM: 2903 case SK_PHY_BCOM:
2905 hw->phy_addr = PHY_ADDR_BCOM; 2904 hw->phy_addr = PHY_ADDR_BCOM;
2906 break; 2905 break;
2907 default: 2906 default:
2908 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n", 2907 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
2909 pci_name(hw->pdev), hw->phy_type); 2908 pci_name(hw->pdev), phy_type);
2910 return -EOPNOTSUPP; 2909 return -EOPNOTSUPP;
2911 } 2910 }
2912 break; 2911 break;
@@ -2914,13 +2913,10 @@ static int skge_reset(struct skge_hw *hw)
2914 case CHIP_ID_YUKON: 2913 case CHIP_ID_YUKON:
2915 case CHIP_ID_YUKON_LITE: 2914 case CHIP_ID_YUKON_LITE:
2916 case CHIP_ID_YUKON_LP: 2915 case CHIP_ID_YUKON_LP:
2917 if (hw->phy_type < SK_PHY_MARV_COPPER && hw->pmd_type != 'S') 2916 if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2918 hw->phy_type = SK_PHY_MARV_COPPER; 2917 hw->copper = 1;
2919 2918
2920 hw->phy_addr = PHY_ADDR_MARV; 2919 hw->phy_addr = PHY_ADDR_MARV;
2921 if (!iscopper(hw))
2922 hw->phy_type = SK_PHY_MARV_FIBER;
2923
2924 break; 2920 break;
2925 2921
2926 default: 2922 default:
@@ -2948,12 +2944,20 @@ static int skge_reset(struct skge_hw *hw)
2948 else 2944 else
2949 hw->ram_size = t8 * 4096; 2945 hw->ram_size = t8 * 4096;
2950 2946
2947 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
2951 if (hw->chip_id == CHIP_ID_GENESIS) 2948 if (hw->chip_id == CHIP_ID_GENESIS)
2952 genesis_init(hw); 2949 genesis_init(hw);
2953 else { 2950 else {
2954 /* switch power to VCC (WA for VAUX problem) */ 2951 /* switch power to VCC (WA for VAUX problem) */
2955 skge_write8(hw, B0_POWER_CTRL, 2952 skge_write8(hw, B0_POWER_CTRL,
2956 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 2953 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
2954 /* avoid boards with stuck Hardware error bits */
2955 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2956 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
2957 printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
2958 hw->intr_mask &= ~IS_HW_ERR;
2959 }
2960
2957 for (i = 0; i < hw->ports; i++) { 2961 for (i = 0; i < hw->ports; i++) {
2958 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2962 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2959 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2963 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
@@ -2994,7 +2998,6 @@ static int skge_reset(struct skge_hw *hw)
2994 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 2998 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
2995 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 2999 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
2996 3000
2997 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
2998 skge_write32(hw, B0_IMSK, hw->intr_mask); 3001 skge_write32(hw, B0_IMSK, hw->intr_mask);
2999 3002
3000 if (hw->chip_id != CHIP_ID_GENESIS) 3003 if (hw->chip_id != CHIP_ID_GENESIS)
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index b432f1bb8168..f1680beb8e68 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -214,8 +214,6 @@ enum {
214 214
215/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ 215/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
216enum { 216enum {
217 IS_ERR_MSK = 0x00003fff,/* All Error bits */
218
219 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ 217 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
220 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ 218 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
221 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ 219 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
@@ -230,6 +228,12 @@ enum {
230 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ 228 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
231 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ 229 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
232 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ 230 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
231
232 IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
233 | IS_NO_STAT_M1 | IS_NO_STAT_M2
234 | IS_RAM_RD_PAR | IS_RAM_WR_PAR
235 | IS_M1_PAR_ERR | IS_M2_PAR_ERR
236 | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
233}; 237};
234 238
235/* B2_TST_CTRL1 8 bit Test Control Register 1 */ 239/* B2_TST_CTRL1 8 bit Test Control Register 1 */
@@ -2456,24 +2460,17 @@ struct skge_hw {
2456 2460
2457 u8 chip_id; 2461 u8 chip_id;
2458 u8 chip_rev; 2462 u8 chip_rev;
2459 u8 phy_type; 2463 u8 copper;
2460 u8 pmd_type;
2461 u16 phy_addr;
2462 u8 ports; 2464 u8 ports;
2463 2465
2464 u32 ram_size; 2466 u32 ram_size;
2465 u32 ram_offset; 2467 u32 ram_offset;
2468 u16 phy_addr;
2466 2469
2467 struct tasklet_struct ext_tasklet; 2470 struct tasklet_struct ext_tasklet;
2468 spinlock_t phy_lock; 2471 spinlock_t phy_lock;
2469}; 2472};
2470 2473
2471
2472static inline int iscopper(const struct skge_hw *hw)
2473{
2474 return (hw->pmd_type == 'T');
2475}
2476
2477enum { 2474enum {
2478 FLOW_MODE_NONE = 0, /* No Flow-Control */ 2475 FLOW_MODE_NONE = 0, /* No Flow-Control */
2479 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */ 2476 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index cdc9cc873e06..90b818a8de6e 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -1,6 +1,11 @@
1/* 1/*
2 * sonic.c 2 * sonic.c
3 * 3 *
4 * (C) 2005 Finn Thain
5 *
6 * Converted to DMA API, added zero-copy buffer handling, and
7 * (from the mac68k project) introduced dhd's support for 16-bit cards.
8 *
4 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) 9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
5 * 10 *
6 * This driver is based on work from Andreas Busse, but most of 11 * This driver is based on work from Andreas Busse, but most of
@@ -9,12 +14,23 @@
9 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) 14 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
10 * 15 *
11 * Core code included by system sonic drivers 16 * Core code included by system sonic drivers
17 *
18 * And... partially rewritten again by David Huggins-Daines in order
19 * to cope with screwed up Macintosh NICs that may or may not use
20 * 16-bit DMA.
21 *
22 * (C) 1999 David Huggins-Daines <dhd@debian.org>
23 *
12 */ 24 */
13 25
14/* 26/*
15 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook, 27 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
16 * National Semiconductors data sheet for the DP83932B Sonic Ethernet 28 * National Semiconductors data sheet for the DP83932B Sonic Ethernet
17 * controller, and the files "8390.c" and "skeleton.c" in this directory. 29 * controller, and the files "8390.c" and "skeleton.c" in this directory.
30 *
31 * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
32 * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
33 * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
18 */ 34 */
19 35
20 36
@@ -28,6 +44,9 @@
28 */ 44 */
29static int sonic_open(struct net_device *dev) 45static int sonic_open(struct net_device *dev)
30{ 46{
47 struct sonic_local *lp = netdev_priv(dev);
48 int i;
49
31 if (sonic_debug > 2) 50 if (sonic_debug > 2)
32 printk("sonic_open: initializing sonic driver.\n"); 51 printk("sonic_open: initializing sonic driver.\n");
33 52
@@ -40,14 +59,59 @@ static int sonic_open(struct net_device *dev)
40 * This means that during execution of the handler interrupt are disabled 59 * This means that during execution of the handler interrupt are disabled
41 * covering another bug otherwise corrupting data. This doesn't mean 60 * covering another bug otherwise corrupting data. This doesn't mean
42 * this glue works ok under all situations. 61 * this glue works ok under all situations.
62 *
63 * Note (dhd): this also appears to prevent lockups on the Macintrash
64 * when more than one Ethernet card is installed (knock on wood)
65 *
66 * Note (fthain): whether the above is still true is anyones guess. Certainly
67 * the buffer handling algorithms will not tolerate re-entrance without some
68 * mutual exclusion added. Anyway, the memcpy has now been eliminated from the
69 * rx code to make this a faster "fast interrupt".
43 */ 70 */
44// if (sonic_request_irq(dev->irq, &sonic_interrupt, 0, "sonic", dev)) { 71 if (request_irq(dev->irq, &sonic_interrupt, SONIC_IRQ_FLAG, "sonic", dev)) {
45 if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT, 72 printk(KERN_ERR "\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
46 "sonic", dev)) {
47 printk("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
48 return -EAGAIN; 73 return -EAGAIN;
49 } 74 }
50 75
76 for (i = 0; i < SONIC_NUM_RRS; i++) {
77 struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
78 if (skb == NULL) {
79 while(i > 0) { /* free any that were allocated successfully */
80 i--;
81 dev_kfree_skb(lp->rx_skb[i]);
82 lp->rx_skb[i] = NULL;
83 }
84 printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
85 dev->name);
86 return -ENOMEM;
87 }
88 skb->dev = dev;
89 /* align IP header unless DMA requires otherwise */
90 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
91 skb_reserve(skb, 2);
92 lp->rx_skb[i] = skb;
93 }
94
95 for (i = 0; i < SONIC_NUM_RRS; i++) {
96 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
97 SONIC_RBSIZE, DMA_FROM_DEVICE);
98 if (!laddr) {
99 while(i > 0) { /* free any that were mapped successfully */
100 i--;
101 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
102 lp->rx_laddr[i] = (dma_addr_t)0;
103 }
104 for (i = 0; i < SONIC_NUM_RRS; i++) {
105 dev_kfree_skb(lp->rx_skb[i]);
106 lp->rx_skb[i] = NULL;
107 }
108 printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
109 dev->name);
110 return -ENOMEM;
111 }
112 lp->rx_laddr[i] = laddr;
113 }
114
51 /* 115 /*
52 * Initialize the SONIC 116 * Initialize the SONIC
53 */ 117 */
@@ -67,7 +131,8 @@ static int sonic_open(struct net_device *dev)
67 */ 131 */
68static int sonic_close(struct net_device *dev) 132static int sonic_close(struct net_device *dev)
69{ 133{
70 unsigned int base_addr = dev->base_addr; 134 struct sonic_local *lp = netdev_priv(dev);
135 int i;
71 136
72 if (sonic_debug > 2) 137 if (sonic_debug > 2)
73 printk("sonic_close\n"); 138 printk("sonic_close\n");
@@ -77,20 +142,56 @@ static int sonic_close(struct net_device *dev)
77 /* 142 /*
78 * stop the SONIC, disable interrupts 143 * stop the SONIC, disable interrupts
79 */ 144 */
80 SONIC_WRITE(SONIC_ISR, 0x7fff);
81 SONIC_WRITE(SONIC_IMR, 0); 145 SONIC_WRITE(SONIC_IMR, 0);
146 SONIC_WRITE(SONIC_ISR, 0x7fff);
82 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); 147 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
83 148
84 sonic_free_irq(dev->irq, dev); /* release the IRQ */ 149 /* unmap and free skbs that haven't been transmitted */
150 for (i = 0; i < SONIC_NUM_TDS; i++) {
151 if(lp->tx_laddr[i]) {
152 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
153 lp->tx_laddr[i] = (dma_addr_t)0;
154 }
155 if(lp->tx_skb[i]) {
156 dev_kfree_skb(lp->tx_skb[i]);
157 lp->tx_skb[i] = NULL;
158 }
159 }
160
161 /* unmap and free the receive buffers */
162 for (i = 0; i < SONIC_NUM_RRS; i++) {
163 if(lp->rx_laddr[i]) {
164 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
165 lp->rx_laddr[i] = (dma_addr_t)0;
166 }
167 if(lp->rx_skb[i]) {
168 dev_kfree_skb(lp->rx_skb[i]);
169 lp->rx_skb[i] = NULL;
170 }
171 }
172
173 free_irq(dev->irq, dev); /* release the IRQ */
85 174
86 return 0; 175 return 0;
87} 176}
88 177
89static void sonic_tx_timeout(struct net_device *dev) 178static void sonic_tx_timeout(struct net_device *dev)
90{ 179{
91 struct sonic_local *lp = (struct sonic_local *) dev->priv; 180 struct sonic_local *lp = netdev_priv(dev);
92 printk("%s: transmit timed out.\n", dev->name); 181 int i;
93 182 /* Stop the interrupts for this */
183 SONIC_WRITE(SONIC_IMR, 0);
184 /* We could resend the original skbs. Easier to re-initialise. */
185 for (i = 0; i < SONIC_NUM_TDS; i++) {
186 if(lp->tx_laddr[i]) {
187 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
188 lp->tx_laddr[i] = (dma_addr_t)0;
189 }
190 if(lp->tx_skb[i]) {
191 dev_kfree_skb(lp->tx_skb[i]);
192 lp->tx_skb[i] = NULL;
193 }
194 }
94 /* Try to restart the adaptor. */ 195 /* Try to restart the adaptor. */
95 sonic_init(dev); 196 sonic_init(dev);
96 lp->stats.tx_errors++; 197 lp->stats.tx_errors++;
@@ -100,60 +201,92 @@ static void sonic_tx_timeout(struct net_device *dev)
100 201
101/* 202/*
102 * transmit packet 203 * transmit packet
204 *
205 * Appends new TD during transmission thus avoiding any TX interrupts
206 * until we run out of TDs.
207 * This routine interacts closely with the ISR in that it may,
208 * set tx_skb[i]
209 * reset the status flags of the new TD
210 * set and reset EOL flags
211 * stop the tx queue
212 * The ISR interacts with this routine in various ways. It may,
213 * reset tx_skb[i]
214 * test the EOL and status flags of the TDs
215 * wake the tx queue
216 * Concurrently with all of this, the SONIC is potentially writing to
217 * the status flags of the TDs.
218 * Until some mutual exclusion is added, this code will not work with SMP. However,
219 * MIPS Jazz machines and m68k Macs were all uni-processor machines.
103 */ 220 */
221
104static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) 222static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
105{ 223{
106 struct sonic_local *lp = (struct sonic_local *) dev->priv; 224 struct sonic_local *lp = netdev_priv(dev);
107 unsigned int base_addr = dev->base_addr; 225 dma_addr_t laddr;
108 unsigned int laddr; 226 int length;
109 int entry, length; 227 int entry = lp->next_tx;
110
111 netif_stop_queue(dev);
112 228
113 if (sonic_debug > 2) 229 if (sonic_debug > 2)
114 printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev); 230 printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
115 231
232 length = skb->len;
233 if (length < ETH_ZLEN) {
234 skb = skb_padto(skb, ETH_ZLEN);
235 if (skb == NULL)
236 return 0;
237 length = ETH_ZLEN;
238 }
239
116 /* 240 /*
117 * Map the packet data into the logical DMA address space 241 * Map the packet data into the logical DMA address space
118 */ 242 */
119 if ((laddr = vdma_alloc(CPHYSADDR(skb->data), skb->len)) == ~0UL) { 243
120 printk("%s: no VDMA entry for transmit available.\n", 244 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
121 dev->name); 245 if (!laddr) {
246 printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
122 dev_kfree_skb(skb); 247 dev_kfree_skb(skb);
123 netif_start_queue(dev);
124 return 1; 248 return 1;
125 } 249 }
126 entry = lp->cur_tx & SONIC_TDS_MASK; 250
251 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
252 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
253 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
254 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
255 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
256 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
257 sonic_tda_put(dev, entry, SONIC_TD_LINK,
258 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
259
260 /*
261 * Must set tx_skb[entry] only after clearing status, and
262 * before clearing EOL and before stopping queue
263 */
264 wmb();
265 lp->tx_len[entry] = length;
127 lp->tx_laddr[entry] = laddr; 266 lp->tx_laddr[entry] = laddr;
128 lp->tx_skb[entry] = skb; 267 lp->tx_skb[entry] = skb;
129 268
130 length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; 269 wmb();
131 flush_cache_all(); 270 sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
271 sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
272 lp->eol_tx = entry;
132 273
133 /* 274 lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
134 * Setup the transmit descriptor and issue the transmit command. 275 if (lp->tx_skb[lp->next_tx] != NULL) {
135 */ 276 /* The ring is full, the ISR has yet to process the next TD. */
136 lp->tda[entry].tx_status = 0; /* clear status */ 277 if (sonic_debug > 3)
137 lp->tda[entry].tx_frag_count = 1; /* single fragment */ 278 printk("%s: stopping queue\n", dev->name);
138 lp->tda[entry].tx_pktsize = length; /* length of packet */ 279 netif_stop_queue(dev);
139 lp->tda[entry].tx_frag_ptr_l = laddr & 0xffff; 280 /* after this packet, wait for ISR to free up some TDAs */
140 lp->tda[entry].tx_frag_ptr_h = laddr >> 16; 281 } else netif_start_queue(dev);
141 lp->tda[entry].tx_frag_size = length;
142 lp->cur_tx++;
143 lp->stats.tx_bytes += length;
144 282
145 if (sonic_debug > 2) 283 if (sonic_debug > 2)
146 printk("sonic_send_packet: issueing Tx command\n"); 284 printk("sonic_send_packet: issuing Tx command\n");
147 285
148 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); 286 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
149 287
150 dev->trans_start = jiffies; 288 dev->trans_start = jiffies;
151 289
152 if (lp->cur_tx < lp->dirty_tx + SONIC_NUM_TDS)
153 netif_start_queue(dev);
154 else
155 lp->tx_full = 1;
156
157 return 0; 290 return 0;
158} 291}
159 292
@@ -164,175 +297,199 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
164static irqreturn_t sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs) 297static irqreturn_t sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
165{ 298{
166 struct net_device *dev = (struct net_device *) dev_id; 299 struct net_device *dev = (struct net_device *) dev_id;
167 unsigned int base_addr = dev->base_addr; 300 struct sonic_local *lp = netdev_priv(dev);
168 struct sonic_local *lp;
169 int status; 301 int status;
170 302
171 if (dev == NULL) { 303 if (dev == NULL) {
172 printk("sonic_interrupt: irq %d for unknown device.\n", irq); 304 printk(KERN_ERR "sonic_interrupt: irq %d for unknown device.\n", irq);
173 return IRQ_NONE; 305 return IRQ_NONE;
174 } 306 }
175 307
176 lp = (struct sonic_local *) dev->priv; 308 if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
177 309 return IRQ_NONE;
178 status = SONIC_READ(SONIC_ISR);
179 SONIC_WRITE(SONIC_ISR, 0x7fff); /* clear all bits */
180
181 if (sonic_debug > 2)
182 printk("sonic_interrupt: ISR=%x\n", status);
183
184 if (status & SONIC_INT_PKTRX) {
185 sonic_rx(dev); /* got packet(s) */
186 }
187
188 if (status & SONIC_INT_TXDN) {
189 int dirty_tx = lp->dirty_tx;
190
191 while (dirty_tx < lp->cur_tx) {
192 int entry = dirty_tx & SONIC_TDS_MASK;
193 int status = lp->tda[entry].tx_status;
194 310
195 if (sonic_debug > 3) 311 do {
196 printk 312 if (status & SONIC_INT_PKTRX) {
197 ("sonic_interrupt: status %d, cur_tx %d, dirty_tx %d\n", 313 if (sonic_debug > 2)
198 status, lp->cur_tx, lp->dirty_tx); 314 printk("%s: packet rx\n", dev->name);
315 sonic_rx(dev); /* got packet(s) */
316 SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
317 }
199 318
200 if (status == 0) { 319 if (status & SONIC_INT_TXDN) {
201 /* It still hasn't been Txed, kick the sonic again */ 320 int entry = lp->cur_tx;
202 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); 321 int td_status;
203 break; 322 int freed_some = 0;
204 }
205 323
206 /* put back EOL and free descriptor */ 324 /* At this point, cur_tx is the index of a TD that is one of:
207 lp->tda[entry].tx_frag_count = 0; 325 * unallocated/freed (status set & tx_skb[entry] clear)
208 lp->tda[entry].tx_status = 0; 326 * allocated and sent (status set & tx_skb[entry] set )
209 327 * allocated and not yet sent (status clear & tx_skb[entry] set )
210 if (status & 0x0001) 328 * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
211 lp->stats.tx_packets++; 329 */
212 else {
213 lp->stats.tx_errors++;
214 if (status & 0x0642)
215 lp->stats.tx_aborted_errors++;
216 if (status & 0x0180)
217 lp->stats.tx_carrier_errors++;
218 if (status & 0x0020)
219 lp->stats.tx_window_errors++;
220 if (status & 0x0004)
221 lp->stats.tx_fifo_errors++;
222 }
223 330
224 /* We must free the original skb */ 331 if (sonic_debug > 2)
225 if (lp->tx_skb[entry]) { 332 printk("%s: tx done\n", dev->name);
333
334 while (lp->tx_skb[entry] != NULL) {
335 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
336 break;
337
338 if (td_status & 0x0001) {
339 lp->stats.tx_packets++;
340 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
341 } else {
342 lp->stats.tx_errors++;
343 if (td_status & 0x0642)
344 lp->stats.tx_aborted_errors++;
345 if (td_status & 0x0180)
346 lp->stats.tx_carrier_errors++;
347 if (td_status & 0x0020)
348 lp->stats.tx_window_errors++;
349 if (td_status & 0x0004)
350 lp->stats.tx_fifo_errors++;
351 }
352
353 /* We must free the original skb */
226 dev_kfree_skb_irq(lp->tx_skb[entry]); 354 dev_kfree_skb_irq(lp->tx_skb[entry]);
227 lp->tx_skb[entry] = 0; 355 lp->tx_skb[entry] = NULL;
356 /* and unmap DMA buffer */
357 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
358 lp->tx_laddr[entry] = (dma_addr_t)0;
359 freed_some = 1;
360
361 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
362 entry = (entry + 1) & SONIC_TDS_MASK;
363 break;
364 }
365 entry = (entry + 1) & SONIC_TDS_MASK;
228 } 366 }
229 /* and the VDMA address */
230 vdma_free(lp->tx_laddr[entry]);
231 dirty_tx++;
232 }
233 367
234 if (lp->tx_full 368 if (freed_some || lp->tx_skb[entry] == NULL)
235 && dirty_tx + SONIC_NUM_TDS > lp->cur_tx + 2) { 369 netif_wake_queue(dev); /* The ring is no longer full */
236 /* The ring is no longer full, clear tbusy. */ 370 lp->cur_tx = entry;
237 lp->tx_full = 0; 371 SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
238 netif_wake_queue(dev);
239 } 372 }
240 373
241 lp->dirty_tx = dirty_tx; 374 /*
242 } 375 * check error conditions
376 */
377 if (status & SONIC_INT_RFO) {
378 if (sonic_debug > 1)
379 printk("%s: rx fifo overrun\n", dev->name);
380 lp->stats.rx_fifo_errors++;
381 SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
382 }
383 if (status & SONIC_INT_RDE) {
384 if (sonic_debug > 1)
385 printk("%s: rx descriptors exhausted\n", dev->name);
386 lp->stats.rx_dropped++;
387 SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
388 }
389 if (status & SONIC_INT_RBAE) {
390 if (sonic_debug > 1)
391 printk("%s: rx buffer area exceeded\n", dev->name);
392 lp->stats.rx_dropped++;
393 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
394 }
243 395
244 /* 396 /* counter overruns; all counters are 16bit wide */
245 * check error conditions 397 if (status & SONIC_INT_FAE) {
246 */ 398 lp->stats.rx_frame_errors += 65536;
247 if (status & SONIC_INT_RFO) { 399 SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
248 printk("%s: receive fifo underrun\n", dev->name); 400 }
249 lp->stats.rx_fifo_errors++; 401 if (status & SONIC_INT_CRC) {
250 } 402 lp->stats.rx_crc_errors += 65536;
251 if (status & SONIC_INT_RDE) { 403 SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
252 printk("%s: receive descriptors exhausted\n", dev->name); 404 }
253 lp->stats.rx_dropped++; 405 if (status & SONIC_INT_MP) {
254 } 406 lp->stats.rx_missed_errors += 65536;
255 if (status & SONIC_INT_RBE) { 407 SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
256 printk("%s: receive buffer exhausted\n", dev->name); 408 }
257 lp->stats.rx_dropped++;
258 }
259 if (status & SONIC_INT_RBAE) {
260 printk("%s: receive buffer area exhausted\n", dev->name);
261 lp->stats.rx_dropped++;
262 }
263 409
264 /* counter overruns; all counters are 16bit wide */ 410 /* transmit error */
265 if (status & SONIC_INT_FAE) 411 if (status & SONIC_INT_TXER) {
266 lp->stats.rx_frame_errors += 65536; 412 if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
267 if (status & SONIC_INT_CRC) 413 printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
268 lp->stats.rx_crc_errors += 65536; 414 SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
269 if (status & SONIC_INT_MP) 415 }
270 lp->stats.rx_missed_errors += 65536;
271 416
272 /* transmit error */ 417 /* bus retry */
273 if (status & SONIC_INT_TXER) 418 if (status & SONIC_INT_BR) {
274 lp->stats.tx_errors++; 419 printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
420 dev->name);
421 /* ... to help debug DMA problems causing endless interrupts. */
422 /* Bounce the eth interface to turn on the interrupt again. */
423 SONIC_WRITE(SONIC_IMR, 0);
424 SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
425 }
275 426
276 /* 427 /* load CAM done */
277 * clear interrupt bits and return 428 if (status & SONIC_INT_LCD)
278 */ 429 SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
279 SONIC_WRITE(SONIC_ISR, status); 430 } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
280 return IRQ_HANDLED; 431 return IRQ_HANDLED;
281} 432}
282 433
283/* 434/*
284 * We have a good packet(s), get it/them out of the buffers. 435 * We have a good packet(s), pass it/them up the network stack.
285 */ 436 */
286static void sonic_rx(struct net_device *dev) 437static void sonic_rx(struct net_device *dev)
287{ 438{
288 unsigned int base_addr = dev->base_addr; 439 struct sonic_local *lp = netdev_priv(dev);
289 struct sonic_local *lp = (struct sonic_local *) dev->priv;
290 sonic_rd_t *rd = &lp->rda[lp->cur_rx & SONIC_RDS_MASK];
291 int status; 440 int status;
292 441 int entry = lp->cur_rx;
293 while (rd->in_use == 0) { 442
294 struct sk_buff *skb; 443 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
444 struct sk_buff *used_skb;
445 struct sk_buff *new_skb;
446 dma_addr_t new_laddr;
447 u16 bufadr_l;
448 u16 bufadr_h;
295 int pkt_len; 449 int pkt_len;
296 unsigned char *pkt_ptr;
297 450
298 status = rd->rx_status; 451 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
299 if (sonic_debug > 3)
300 printk("status %x, cur_rx %d, cur_rra %x\n",
301 status, lp->cur_rx, lp->cur_rra);
302 if (status & SONIC_RCR_PRX) { 452 if (status & SONIC_RCR_PRX) {
303 pkt_len = rd->rx_pktlen;
304 pkt_ptr =
305 (char *)
306 sonic_chiptomem((rd->rx_pktptr_h << 16) +
307 rd->rx_pktptr_l);
308
309 if (sonic_debug > 3)
310 printk
311 ("pktptr %p (rba %p) h:%x l:%x, bsize h:%x l:%x\n",
312 pkt_ptr, lp->rba, rd->rx_pktptr_h,
313 rd->rx_pktptr_l,
314 SONIC_READ(SONIC_RBWC1),
315 SONIC_READ(SONIC_RBWC0));
316
317 /* Malloc up new buffer. */ 453 /* Malloc up new buffer. */
318 skb = dev_alloc_skb(pkt_len + 2); 454 new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
319 if (skb == NULL) { 455 if (new_skb == NULL) {
320 printk 456 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
321 ("%s: Memory squeeze, dropping packet.\n", 457 lp->stats.rx_dropped++;
322 dev->name); 458 break;
459 }
460 new_skb->dev = dev;
461 /* provide 16 byte IP header alignment unless DMA requires otherwise */
462 if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
463 skb_reserve(new_skb, 2);
464
465 new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
466 SONIC_RBSIZE, DMA_FROM_DEVICE);
467 if (!new_laddr) {
468 dev_kfree_skb(new_skb);
469 printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
323 lp->stats.rx_dropped++; 470 lp->stats.rx_dropped++;
324 break; 471 break;
325 } 472 }
326 skb->dev = dev; 473
327 skb_reserve(skb, 2); /* 16 byte align */ 474 /* now we have a new skb to replace it, pass the used one up the stack */
328 skb_put(skb, pkt_len); /* Make room */ 475 dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
329 eth_copy_and_sum(skb, pkt_ptr, pkt_len, 0); 476 used_skb = lp->rx_skb[entry];
330 skb->protocol = eth_type_trans(skb, dev); 477 pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
331 netif_rx(skb); /* pass the packet to upper layers */ 478 skb_trim(used_skb, pkt_len);
479 used_skb->protocol = eth_type_trans(used_skb, dev);
480 netif_rx(used_skb);
332 dev->last_rx = jiffies; 481 dev->last_rx = jiffies;
333 lp->stats.rx_packets++; 482 lp->stats.rx_packets++;
334 lp->stats.rx_bytes += pkt_len; 483 lp->stats.rx_bytes += pkt_len;
335 484
485 /* and insert the new skb */
486 lp->rx_laddr[entry] = new_laddr;
487 lp->rx_skb[entry] = new_skb;
488
489 bufadr_l = (unsigned long)new_laddr & 0xffff;
490 bufadr_h = (unsigned long)new_laddr >> 16;
491 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
492 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
336 } else { 493 } else {
337 /* This should only happen, if we enable accepting broken packets. */ 494 /* This should only happen, if we enable accepting broken packets. */
338 lp->stats.rx_errors++; 495 lp->stats.rx_errors++;
@@ -341,29 +498,35 @@ static void sonic_rx(struct net_device *dev)
341 if (status & SONIC_RCR_CRCR) 498 if (status & SONIC_RCR_CRCR)
342 lp->stats.rx_crc_errors++; 499 lp->stats.rx_crc_errors++;
343 } 500 }
344
345 rd->in_use = 1;
346 rd = &lp->rda[(++lp->cur_rx) & SONIC_RDS_MASK];
347 /* now give back the buffer to the receive buffer area */
348 if (status & SONIC_RCR_LPKT) { 501 if (status & SONIC_RCR_LPKT) {
349 /* 502 /*
350 * this was the last packet out of the current receice buffer 503 * this was the last packet out of the current receive buffer
351 * give the buffer back to the SONIC 504 * give the buffer back to the SONIC
352 */ 505 */
353 lp->cur_rra += sizeof(sonic_rr_t); 506 lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
354 if (lp->cur_rra > 507 if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
355 (lp->rra_laddr + 508 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
356 (SONIC_NUM_RRS - 509 if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
357 1) * sizeof(sonic_rr_t))) lp->cur_rra = 510 if (sonic_debug > 2)
358 lp->rra_laddr; 511 printk("%s: rx buffer exhausted\n", dev->name);
359 SONIC_WRITE(SONIC_RWP, lp->cur_rra & 0xffff); 512 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
513 }
360 } else 514 } else
361 printk 515 printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
362 ("%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
363 dev->name); 516 dev->name);
517 /*
518 * give back the descriptor
519 */
520 sonic_rda_put(dev, entry, SONIC_RD_LINK,
521 sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
522 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
523 sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
524 sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
525 lp->eol_rx = entry;
526 lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
364 } 527 }
365 /* 528 /*
366 * If any worth-while packets have been received, dev_rint() 529 * If any worth-while packets have been received, netif_rx()
367 * has done a mark_bh(NET_BH) for us and will work on them 530 * has done a mark_bh(NET_BH) for us and will work on them
368 * when we get to the bottom-half routine. 531 * when we get to the bottom-half routine.
369 */ 532 */
@@ -376,8 +539,7 @@ static void sonic_rx(struct net_device *dev)
376 */ 539 */
377static struct net_device_stats *sonic_get_stats(struct net_device *dev) 540static struct net_device_stats *sonic_get_stats(struct net_device *dev)
378{ 541{
379 struct sonic_local *lp = (struct sonic_local *) dev->priv; 542 struct sonic_local *lp = netdev_priv(dev);
380 unsigned int base_addr = dev->base_addr;
381 543
382 /* read the tally counter from the SONIC and reset them */ 544 /* read the tally counter from the SONIC and reset them */
383 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT); 545 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
@@ -396,8 +558,7 @@ static struct net_device_stats *sonic_get_stats(struct net_device *dev)
396 */ 558 */
397static void sonic_multicast_list(struct net_device *dev) 559static void sonic_multicast_list(struct net_device *dev)
398{ 560{
399 struct sonic_local *lp = (struct sonic_local *) dev->priv; 561 struct sonic_local *lp = netdev_priv(dev);
400 unsigned int base_addr = dev->base_addr;
401 unsigned int rcr; 562 unsigned int rcr;
402 struct dev_mc_list *dmi = dev->mc_list; 563 struct dev_mc_list *dmi = dev->mc_list;
403 unsigned char *addr; 564 unsigned char *addr;
@@ -413,20 +574,15 @@ static void sonic_multicast_list(struct net_device *dev)
413 rcr |= SONIC_RCR_AMC; 574 rcr |= SONIC_RCR_AMC;
414 } else { 575 } else {
415 if (sonic_debug > 2) 576 if (sonic_debug > 2)
416 printk 577 printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
417 ("sonic_multicast_list: mc_count %d\n", 578 sonic_set_cam_enable(dev, 1); /* always enable our own address */
418 dev->mc_count);
419 lp->cda.cam_enable = 1; /* always enable our own address */
420 for (i = 1; i <= dev->mc_count; i++) { 579 for (i = 1; i <= dev->mc_count; i++) {
421 addr = dmi->dmi_addr; 580 addr = dmi->dmi_addr;
422 dmi = dmi->next; 581 dmi = dmi->next;
423 lp->cda.cam_desc[i].cam_cap0 = 582 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
424 addr[1] << 8 | addr[0]; 583 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
425 lp->cda.cam_desc[i].cam_cap1 = 584 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
426 addr[3] << 8 | addr[2]; 585 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
427 lp->cda.cam_desc[i].cam_cap2 =
428 addr[5] << 8 | addr[4];
429 lp->cda.cam_enable |= (1 << i);
430 } 586 }
431 SONIC_WRITE(SONIC_CDC, 16); 587 SONIC_WRITE(SONIC_CDC, 16);
432 /* issue Load CAM command */ 588 /* issue Load CAM command */
@@ -447,19 +603,16 @@ static void sonic_multicast_list(struct net_device *dev)
447 */ 603 */
448static int sonic_init(struct net_device *dev) 604static int sonic_init(struct net_device *dev)
449{ 605{
450 unsigned int base_addr = dev->base_addr;
451 unsigned int cmd; 606 unsigned int cmd;
452 struct sonic_local *lp = (struct sonic_local *) dev->priv; 607 struct sonic_local *lp = netdev_priv(dev);
453 unsigned int rra_start;
454 unsigned int rra_end;
455 int i; 608 int i;
456 609
457 /* 610 /*
458 * put the Sonic into software-reset mode and 611 * put the Sonic into software-reset mode and
459 * disable all interrupts 612 * disable all interrupts
460 */ 613 */
461 SONIC_WRITE(SONIC_ISR, 0x7fff);
462 SONIC_WRITE(SONIC_IMR, 0); 614 SONIC_WRITE(SONIC_IMR, 0);
615 SONIC_WRITE(SONIC_ISR, 0x7fff);
463 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); 616 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
464 617
465 /* 618 /*
@@ -475,34 +628,32 @@ static int sonic_init(struct net_device *dev)
475 if (sonic_debug > 2) 628 if (sonic_debug > 2)
476 printk("sonic_init: initialize receive resource area\n"); 629 printk("sonic_init: initialize receive resource area\n");
477 630
478 rra_start = lp->rra_laddr & 0xffff;
479 rra_end =
480 (rra_start + (SONIC_NUM_RRS * sizeof(sonic_rr_t))) & 0xffff;
481
482 for (i = 0; i < SONIC_NUM_RRS; i++) { 631 for (i = 0; i < SONIC_NUM_RRS; i++) {
483 lp->rra[i].rx_bufadr_l = 632 u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
484 (lp->rba_laddr + i * SONIC_RBSIZE) & 0xffff; 633 u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
485 lp->rra[i].rx_bufadr_h = 634 sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
486 (lp->rba_laddr + i * SONIC_RBSIZE) >> 16; 635 sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
487 lp->rra[i].rx_bufsize_l = SONIC_RBSIZE >> 1; 636 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
488 lp->rra[i].rx_bufsize_h = 0; 637 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
489 } 638 }
490 639
491 /* initialize all RRA registers */ 640 /* initialize all RRA registers */
492 SONIC_WRITE(SONIC_RSA, rra_start); 641 lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
493 SONIC_WRITE(SONIC_REA, rra_end); 642 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
494 SONIC_WRITE(SONIC_RRP, rra_start); 643 lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
495 SONIC_WRITE(SONIC_RWP, rra_end); 644 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
645
646 SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
647 SONIC_WRITE(SONIC_REA, lp->rra_end);
648 SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
649 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
496 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); 650 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
497 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE - 2) >> 1); 651 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
498
499 lp->cur_rra =
500 lp->rra_laddr + (SONIC_NUM_RRS - 1) * sizeof(sonic_rr_t);
501 652
502 /* load the resource pointers */ 653 /* load the resource pointers */
503 if (sonic_debug > 3) 654 if (sonic_debug > 3)
504 printk("sonic_init: issueing RRRA command\n"); 655 printk("sonic_init: issuing RRRA command\n");
505 656
506 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); 657 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
507 i = 0; 658 i = 0;
508 while (i++ < 100) { 659 while (i++ < 100) {
@@ -511,27 +662,30 @@ static int sonic_init(struct net_device *dev)
511 } 662 }
512 663
513 if (sonic_debug > 2) 664 if (sonic_debug > 2)
514 printk("sonic_init: status=%x\n", SONIC_READ(SONIC_CMD)); 665 printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i);
515 666
516 /* 667 /*
517 * Initialize the receive descriptors so that they 668 * Initialize the receive descriptors so that they
518 * become a circular linked list, ie. let the last 669 * become a circular linked list, ie. let the last
519 * descriptor point to the first again. 670 * descriptor point to the first again.
520 */ 671 */
521 if (sonic_debug > 2) 672 if (sonic_debug > 2)
522 printk("sonic_init: initialize receive descriptors\n"); 673 printk("sonic_init: initialize receive descriptors\n");
523 for (i = 0; i < SONIC_NUM_RDS; i++) { 674 for (i=0; i<SONIC_NUM_RDS; i++) {
524 lp->rda[i].rx_status = 0; 675 sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
525 lp->rda[i].rx_pktlen = 0; 676 sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
526 lp->rda[i].rx_pktptr_l = 0; 677 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
527 lp->rda[i].rx_pktptr_h = 0; 678 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
528 lp->rda[i].rx_seqno = 0; 679 sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
529 lp->rda[i].in_use = 1; 680 sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
530 lp->rda[i].link = 681 sonic_rda_put(dev, i, SONIC_RD_LINK,
531 lp->rda_laddr + (i + 1) * sizeof(sonic_rd_t); 682 lp->rda_laddr +
683 ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
532 } 684 }
533 /* fix last descriptor */ 685 /* fix last descriptor */
534 lp->rda[SONIC_NUM_RDS - 1].link = lp->rda_laddr; 686 sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
687 (lp->rda_laddr & 0xffff) | SONIC_EOL);
688 lp->eol_rx = SONIC_NUM_RDS - 1;
535 lp->cur_rx = 0; 689 lp->cur_rx = 0;
536 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16); 690 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
537 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff); 691 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
@@ -542,34 +696,34 @@ static int sonic_init(struct net_device *dev)
542 if (sonic_debug > 2) 696 if (sonic_debug > 2)
543 printk("sonic_init: initialize transmit descriptors\n"); 697 printk("sonic_init: initialize transmit descriptors\n");
544 for (i = 0; i < SONIC_NUM_TDS; i++) { 698 for (i = 0; i < SONIC_NUM_TDS; i++) {
545 lp->tda[i].tx_status = 0; 699 sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
546 lp->tda[i].tx_config = 0; 700 sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
547 lp->tda[i].tx_pktsize = 0; 701 sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
548 lp->tda[i].tx_frag_count = 0; 702 sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
549 lp->tda[i].link = 703 sonic_tda_put(dev, i, SONIC_TD_LINK,
550 (lp->tda_laddr + 704 (lp->tda_laddr & 0xffff) +
551 (i + 1) * sizeof(sonic_td_t)) | SONIC_END_OF_LINKS; 705 (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
706 lp->tx_skb[i] = NULL;
552 } 707 }
553 lp->tda[SONIC_NUM_TDS - 1].link = 708 /* fix last descriptor */
554 (lp->tda_laddr & 0xffff) | SONIC_END_OF_LINKS; 709 sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
710 (lp->tda_laddr & 0xffff));
555 711
556 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16); 712 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
557 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff); 713 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
558 lp->cur_tx = lp->dirty_tx = 0; 714 lp->cur_tx = lp->next_tx = 0;
559 715 lp->eol_tx = SONIC_NUM_TDS - 1;
716
560 /* 717 /*
561 * put our own address to CAM desc[0] 718 * put our own address to CAM desc[0]
562 */ 719 */
563 lp->cda.cam_desc[0].cam_cap0 = 720 sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
564 dev->dev_addr[1] << 8 | dev->dev_addr[0]; 721 sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
565 lp->cda.cam_desc[0].cam_cap1 = 722 sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
566 dev->dev_addr[3] << 8 | dev->dev_addr[2]; 723 sonic_set_cam_enable(dev, 1);
567 lp->cda.cam_desc[0].cam_cap2 =
568 dev->dev_addr[5] << 8 | dev->dev_addr[4];
569 lp->cda.cam_enable = 1;
570 724
571 for (i = 0; i < 16; i++) 725 for (i = 0; i < 16; i++)
572 lp->cda.cam_desc[i].cam_entry_pointer = i; 726 sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
573 727
574 /* 728 /*
575 * initialize CAM registers 729 * initialize CAM registers
@@ -588,8 +742,8 @@ static int sonic_init(struct net_device *dev)
588 break; 742 break;
589 } 743 }
590 if (sonic_debug > 2) { 744 if (sonic_debug > 2) {
591 printk("sonic_init: CMD=%x, ISR=%x\n", 745 printk("sonic_init: CMD=%x, ISR=%x\n, i=%d",
592 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR)); 746 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
593 } 747 }
594 748
595 /* 749 /*
@@ -604,7 +758,7 @@ static int sonic_init(struct net_device *dev)
604 758
605 cmd = SONIC_READ(SONIC_CMD); 759 cmd = SONIC_READ(SONIC_CMD);
606 if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) 760 if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
607 printk("sonic_init: failed, status=%x\n", cmd); 761 printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
608 762
609 if (sonic_debug > 2) 763 if (sonic_debug > 2)
610 printk("sonic_init: new status=%x\n", 764 printk("sonic_init: new status=%x\n",
diff --git a/drivers/net/sonic.h b/drivers/net/sonic.h
index c4a6d58e4afb..cede969a8baa 100644
--- a/drivers/net/sonic.h
+++ b/drivers/net/sonic.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Helpfile for sonic.c 2 * Header file for sonic.c
3 * 3 *
4 * (C) Waldorf Electronics, Germany 4 * (C) Waldorf Electronics, Germany
5 * Written by Andreas Busse 5 * Written by Andreas Busse
@@ -9,10 +9,16 @@
9 * and pad structure members must be exchanged. Also, the structures 9 * and pad structure members must be exchanged. Also, the structures
10 * need to be changed accordingly to the bus size. 10 * need to be changed accordingly to the bus size.
11 * 11 *
12 * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian), 12 * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian)
13 * see CONFIG_MACSONIC branch below.
14 * 13 *
14 * 990611 David Huggins-Daines <dhd@debian.org>: This machine abstraction
15 * does not cope with 16-bit bus sizes very well. Therefore I have
16 * rewritten it with ugly macros and evil inlines.
17 *
18 * 050625 Finn Thain: introduced more 32-bit cards and dhd's support
19 * for 16-bit cards (from the mac68k project).
15 */ 20 */
21
16#ifndef SONIC_H 22#ifndef SONIC_H
17#define SONIC_H 23#define SONIC_H
18 24
@@ -83,6 +89,7 @@
83/* 89/*
84 * Error counters 90 * Error counters
85 */ 91 */
92
86#define SONIC_CRCT 0x2c 93#define SONIC_CRCT 0x2c
87#define SONIC_FAET 0x2d 94#define SONIC_FAET 0x2d
88#define SONIC_MPT 0x2e 95#define SONIC_MPT 0x2e
@@ -182,14 +189,14 @@
182 189
183#define SONIC_INT_BR 0x4000 190#define SONIC_INT_BR 0x4000
184#define SONIC_INT_HBL 0x2000 191#define SONIC_INT_HBL 0x2000
185#define SONIC_INT_LCD 0x1000 192#define SONIC_INT_LCD 0x1000
186#define SONIC_INT_PINT 0x0800 193#define SONIC_INT_PINT 0x0800
187#define SONIC_INT_PKTRX 0x0400 194#define SONIC_INT_PKTRX 0x0400
188#define SONIC_INT_TXDN 0x0200 195#define SONIC_INT_TXDN 0x0200
189#define SONIC_INT_TXER 0x0100 196#define SONIC_INT_TXER 0x0100
190#define SONIC_INT_TC 0x0080 197#define SONIC_INT_TC 0x0080
191#define SONIC_INT_RDE 0x0040 198#define SONIC_INT_RDE 0x0040
192#define SONIC_INT_RBE 0x0020 199#define SONIC_INT_RBE 0x0020
193#define SONIC_INT_RBAE 0x0010 200#define SONIC_INT_RBAE 0x0010
194#define SONIC_INT_CRC 0x0008 201#define SONIC_INT_CRC 0x0008
195#define SONIC_INT_FAE 0x0004 202#define SONIC_INT_FAE 0x0004
@@ -201,224 +208,61 @@
201 * The interrupts we allow. 208 * The interrupts we allow.
202 */ 209 */
203 210
204#define SONIC_IMR_DEFAULT (SONIC_INT_BR | \ 211#define SONIC_IMR_DEFAULT ( SONIC_INT_BR | \
205 SONIC_INT_LCD | \ 212 SONIC_INT_LCD | \
206 SONIC_INT_PINT | \ 213 SONIC_INT_RFO | \
207 SONIC_INT_PKTRX | \ 214 SONIC_INT_PKTRX | \
208 SONIC_INT_TXDN | \ 215 SONIC_INT_TXDN | \
209 SONIC_INT_TXER | \ 216 SONIC_INT_TXER | \
210 SONIC_INT_RDE | \ 217 SONIC_INT_RDE | \
211 SONIC_INT_RBE | \
212 SONIC_INT_RBAE | \ 218 SONIC_INT_RBAE | \
213 SONIC_INT_CRC | \ 219 SONIC_INT_CRC | \
214 SONIC_INT_FAE | \ 220 SONIC_INT_FAE | \
215 SONIC_INT_MP) 221 SONIC_INT_MP)
216 222
217 223
218#define SONIC_END_OF_LINKS 0x0001 224#define SONIC_EOL 0x0001
219
220
221#ifdef CONFIG_MACSONIC
222/*
223 * Big endian like structures on 680x0 Macs
224 */
225
226typedef struct {
227 u32 rx_bufadr_l; /* receive buffer ptr */
228 u32 rx_bufadr_h;
229
230 u32 rx_bufsize_l; /* no. of words in the receive buffer */
231 u32 rx_bufsize_h;
232} sonic_rr_t;
233
234/*
235 * Sonic receive descriptor. Receive descriptors are
236 * kept in a linked list of these structures.
237 */
238
239typedef struct {
240 SREGS_PAD(pad0);
241 u16 rx_status; /* status after reception of a packet */
242 SREGS_PAD(pad1);
243 u16 rx_pktlen; /* length of the packet incl. CRC */
244
245 /*
246 * Pointers to the location in the receive buffer area (RBA)
247 * where the packet resides. A packet is always received into
248 * a contiguous piece of memory.
249 */
250 SREGS_PAD(pad2);
251 u16 rx_pktptr_l;
252 SREGS_PAD(pad3);
253 u16 rx_pktptr_h;
254
255 SREGS_PAD(pad4);
256 u16 rx_seqno; /* sequence no. */
257
258 SREGS_PAD(pad5);
259 u16 link; /* link to next RDD (end if EOL bit set) */
260
261 /*
262 * Owner of this descriptor, 0= driver, 1=sonic
263 */
264
265 SREGS_PAD(pad6);
266 u16 in_use;
267
268 caddr_t rda_next; /* pointer to next RD */
269} sonic_rd_t;
270
271
272/*
273 * Describes a Transmit Descriptor
274 */
275typedef struct {
276 SREGS_PAD(pad0);
277 u16 tx_status; /* status after transmission of a packet */
278 SREGS_PAD(pad1);
279 u16 tx_config; /* transmit configuration for this packet */
280 SREGS_PAD(pad2);
281 u16 tx_pktsize; /* size of the packet to be transmitted */
282 SREGS_PAD(pad3);
283 u16 tx_frag_count; /* no. of fragments */
284
285 SREGS_PAD(pad4);
286 u16 tx_frag_ptr_l;
287 SREGS_PAD(pad5);
288 u16 tx_frag_ptr_h;
289 SREGS_PAD(pad6);
290 u16 tx_frag_size;
291
292 SREGS_PAD(pad7);
293 u16 link; /* ptr to next descriptor */
294} sonic_td_t;
295
296
297/*
298 * Describes an entry in the CAM Descriptor Area.
299 */
300
301typedef struct {
302 SREGS_PAD(pad0);
303 u16 cam_entry_pointer;
304 SREGS_PAD(pad1);
305 u16 cam_cap0;
306 SREGS_PAD(pad2);
307 u16 cam_cap1;
308 SREGS_PAD(pad3);
309 u16 cam_cap2;
310} sonic_cd_t;
311
312#define CAM_DESCRIPTORS 16 225#define CAM_DESCRIPTORS 16
313 226
314 227/* Offsets in the various DMA buffers accessed by the SONIC */
315typedef struct { 228
316 sonic_cd_t cam_desc[CAM_DESCRIPTORS]; 229#define SONIC_BITMODE16 0
317 SREGS_PAD(pad); 230#define SONIC_BITMODE32 1
318 u16 cam_enable; 231#define SONIC_BUS_SCALE(bitmode) ((bitmode) ? 4 : 2)
319} sonic_cda_t; 232/* Note! These are all measured in bus-size units, so use SONIC_BUS_SCALE */
320 233#define SIZEOF_SONIC_RR 4
321#else /* original declarations, little endian 32 bit */ 234#define SONIC_RR_BUFADR_L 0
322 235#define SONIC_RR_BUFADR_H 1
323/* 236#define SONIC_RR_BUFSIZE_L 2
324 * structure definitions 237#define SONIC_RR_BUFSIZE_H 3
325 */ 238
326 239#define SIZEOF_SONIC_RD 7
327typedef struct { 240#define SONIC_RD_STATUS 0
328 u32 rx_bufadr_l; /* receive buffer ptr */ 241#define SONIC_RD_PKTLEN 1
329 u32 rx_bufadr_h; 242#define SONIC_RD_PKTPTR_L 2
330 243#define SONIC_RD_PKTPTR_H 3
331 u32 rx_bufsize_l; /* no. of words in the receive buffer */ 244#define SONIC_RD_SEQNO 4
332 u32 rx_bufsize_h; 245#define SONIC_RD_LINK 5
333} sonic_rr_t; 246#define SONIC_RD_IN_USE 6
334 247
335/* 248#define SIZEOF_SONIC_TD 8
336 * Sonic receive descriptor. Receive descriptors are 249#define SONIC_TD_STATUS 0
337 * kept in a linked list of these structures. 250#define SONIC_TD_CONFIG 1
338 */ 251#define SONIC_TD_PKTSIZE 2
339 252#define SONIC_TD_FRAG_COUNT 3
340typedef struct { 253#define SONIC_TD_FRAG_PTR_L 4
341 u16 rx_status; /* status after reception of a packet */ 254#define SONIC_TD_FRAG_PTR_H 5
342 SREGS_PAD(pad0); 255#define SONIC_TD_FRAG_SIZE 6
343 u16 rx_pktlen; /* length of the packet incl. CRC */ 256#define SONIC_TD_LINK 7
344 SREGS_PAD(pad1); 257
345 258#define SIZEOF_SONIC_CD 4
346 /* 259#define SONIC_CD_ENTRY_POINTER 0
347 * Pointers to the location in the receive buffer area (RBA) 260#define SONIC_CD_CAP0 1
348 * where the packet resides. A packet is always received into 261#define SONIC_CD_CAP1 2
349 * a contiguous piece of memory. 262#define SONIC_CD_CAP2 3
350 */ 263
351 u16 rx_pktptr_l; 264#define SIZEOF_SONIC_CDA ((CAM_DESCRIPTORS * SIZEOF_SONIC_CD) + 1)
352 SREGS_PAD(pad2); 265#define SONIC_CDA_CAM_ENABLE (CAM_DESCRIPTORS * SIZEOF_SONIC_CD)
353 u16 rx_pktptr_h;
354 SREGS_PAD(pad3);
355
356 u16 rx_seqno; /* sequence no. */
357 SREGS_PAD(pad4);
358
359 u16 link; /* link to next RDD (end if EOL bit set) */
360 SREGS_PAD(pad5);
361
362 /*
363 * Owner of this descriptor, 0= driver, 1=sonic
364 */
365
366 u16 in_use;
367 SREGS_PAD(pad6);
368
369 caddr_t rda_next; /* pointer to next RD */
370} sonic_rd_t;
371
372
373/*
374 * Describes a Transmit Descriptor
375 */
376typedef struct {
377 u16 tx_status; /* status after transmission of a packet */
378 SREGS_PAD(pad0);
379 u16 tx_config; /* transmit configuration for this packet */
380 SREGS_PAD(pad1);
381 u16 tx_pktsize; /* size of the packet to be transmitted */
382 SREGS_PAD(pad2);
383 u16 tx_frag_count; /* no. of fragments */
384 SREGS_PAD(pad3);
385
386 u16 tx_frag_ptr_l;
387 SREGS_PAD(pad4);
388 u16 tx_frag_ptr_h;
389 SREGS_PAD(pad5);
390 u16 tx_frag_size;
391 SREGS_PAD(pad6);
392
393 u16 link; /* ptr to next descriptor */
394 SREGS_PAD(pad7);
395} sonic_td_t;
396
397
398/*
399 * Describes an entry in the CAM Descriptor Area.
400 */
401
402typedef struct {
403 u16 cam_entry_pointer;
404 SREGS_PAD(pad0);
405 u16 cam_cap0;
406 SREGS_PAD(pad1);
407 u16 cam_cap1;
408 SREGS_PAD(pad2);
409 u16 cam_cap2;
410 SREGS_PAD(pad3);
411} sonic_cd_t;
412
413#define CAM_DESCRIPTORS 16
414
415
416typedef struct {
417 sonic_cd_t cam_desc[CAM_DESCRIPTORS];
418 u16 cam_enable;
419 SREGS_PAD(pad);
420} sonic_cda_t;
421#endif /* endianness */
422 266
423/* 267/*
424 * Some tunables for the buffer areas. Power of 2 is required 268 * Some tunables for the buffer areas. Power of 2 is required
@@ -426,44 +270,60 @@ typedef struct {
426 * 270 *
427 * MSch: use more buffer space for the slow m68k Macs! 271 * MSch: use more buffer space for the slow m68k Macs!
428 */ 272 */
429#ifdef CONFIG_MACSONIC 273#define SONIC_NUM_RRS 16 /* number of receive resources */
430#define SONIC_NUM_RRS 32 /* number of receive resources */ 274#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
431#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ 275#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
432#define SONIC_NUM_TDS 32 /* number of transmit descriptors */
433#else
434#define SONIC_NUM_RRS 16 /* number of receive resources */
435#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
436#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
437#endif
438#define SONIC_RBSIZE 1520 /* size of one resource buffer */
439 276
440#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) 277#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
441#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) 278#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
442 279
280#define SONIC_RBSIZE 1520 /* size of one resource buffer */
281
282/* Again, measured in bus size units! */
283#define SIZEOF_SONIC_DESC (SIZEOF_SONIC_CDA \
284 + (SIZEOF_SONIC_TD * SONIC_NUM_TDS) \
285 + (SIZEOF_SONIC_RD * SONIC_NUM_RDS) \
286 + (SIZEOF_SONIC_RR * SONIC_NUM_RRS))
443 287
444/* Information that need to be kept for each board. */ 288/* Information that need to be kept for each board. */
445struct sonic_local { 289struct sonic_local {
446 sonic_cda_t cda; /* virtual CPU address of CDA */ 290 /* Bus size. 0 == 16 bits, 1 == 32 bits. */
447 sonic_td_t tda[SONIC_NUM_TDS]; /* transmit descriptor area */ 291 int dma_bitmode;
448 sonic_rr_t rra[SONIC_NUM_RRS]; /* receive resource area */ 292 /* Register offset within the longword (independent of endianness,
449 sonic_rd_t rda[SONIC_NUM_RDS]; /* receive descriptor area */ 293 and varies from one type of Macintosh SONIC to another
450 struct sk_buff *tx_skb[SONIC_NUM_TDS]; /* skbuffs for packets to transmit */ 294 (Aarrgh)) */
451 unsigned int tx_laddr[SONIC_NUM_TDS]; /* logical DMA address fro skbuffs */ 295 int reg_offset;
452 unsigned char *rba; /* start of receive buffer areas */ 296 void *descriptors;
453 unsigned int cda_laddr; /* logical DMA address of CDA */ 297 /* Crud. These areas have to be within the same 64K. Therefore
454 unsigned int tda_laddr; /* logical DMA address of TDA */ 298 we allocate a desriptors page, and point these to places within it. */
455 unsigned int rra_laddr; /* logical DMA address of RRA */ 299 void *cda; /* CAM descriptor area */
456 unsigned int rda_laddr; /* logical DMA address of RDA */ 300 void *tda; /* Transmit descriptor area */
457 unsigned int rba_laddr; /* logical DMA address of RBA */ 301 void *rra; /* Receive resource area */
458 unsigned int cur_rra; /* current indexes to resource areas */ 302 void *rda; /* Receive descriptor area */
303 struct sk_buff* volatile rx_skb[SONIC_NUM_RRS]; /* packets to be received */
304 struct sk_buff* volatile tx_skb[SONIC_NUM_TDS]; /* packets to be transmitted */
305 unsigned int tx_len[SONIC_NUM_TDS]; /* lengths of tx DMA mappings */
306 /* Logical DMA addresses on MIPS, bus addresses on m68k
307 * (so "laddr" is a bit misleading) */
308 dma_addr_t descriptors_laddr;
309 u32 cda_laddr; /* logical DMA address of CDA */
310 u32 tda_laddr; /* logical DMA address of TDA */
311 u32 rra_laddr; /* logical DMA address of RRA */
312 u32 rda_laddr; /* logical DMA address of RDA */
313 dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
314 dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
315 unsigned int rra_end;
316 unsigned int cur_rwp;
459 unsigned int cur_rx; 317 unsigned int cur_rx;
460 unsigned int cur_tx; 318 unsigned int cur_tx; /* first unacked transmit packet */
461 unsigned int dirty_tx; /* last unacked transmit packet */ 319 unsigned int eol_rx;
462 char tx_full; 320 unsigned int eol_tx; /* last unacked transmit packet */
321 unsigned int next_tx; /* next free TD */
322 struct device *device; /* generic device */
463 struct net_device_stats stats; 323 struct net_device_stats stats;
464}; 324};
465 325
466#define TX_TIMEOUT 6 326#define TX_TIMEOUT (3 * HZ)
467 327
468/* Index to functions, as function prototypes. */ 328/* Index to functions, as function prototypes. */
469 329
@@ -477,6 +337,114 @@ static void sonic_multicast_list(struct net_device *dev);
477static int sonic_init(struct net_device *dev); 337static int sonic_init(struct net_device *dev);
478static void sonic_tx_timeout(struct net_device *dev); 338static void sonic_tx_timeout(struct net_device *dev);
479 339
340/* Internal inlines for reading/writing DMA buffers. Note that bus
341 size and endianness matter here, whereas they don't for registers,
342 as far as we can tell. */
343/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
344 is a much better name. */
345static inline void sonic_buf_put(void* base, int bitmode,
346 int offset, __u16 val)
347{
348 if (bitmode)
349#ifdef __BIG_ENDIAN
350 ((__u16 *) base + (offset*2))[1] = val;
351#else
352 ((__u16 *) base + (offset*2))[0] = val;
353#endif
354 else
355 ((__u16 *) base)[offset] = val;
356}
357
358static inline __u16 sonic_buf_get(void* base, int bitmode,
359 int offset)
360{
361 if (bitmode)
362#ifdef __BIG_ENDIAN
363 return ((volatile __u16 *) base + (offset*2))[1];
364#else
365 return ((volatile __u16 *) base + (offset*2))[0];
366#endif
367 else
368 return ((volatile __u16 *) base)[offset];
369}
370
371/* Inlines that you should actually use for reading/writing DMA buffers */
372static inline void sonic_cda_put(struct net_device* dev, int entry,
373 int offset, __u16 val)
374{
375 struct sonic_local* lp = (struct sonic_local *) dev->priv;
376 sonic_buf_put(lp->cda, lp->dma_bitmode,
377 (entry * SIZEOF_SONIC_CD) + offset, val);
378}
379
380static inline __u16 sonic_cda_get(struct net_device* dev, int entry,
381 int offset)
382{
383 struct sonic_local* lp = (struct sonic_local *) dev->priv;
384 return sonic_buf_get(lp->cda, lp->dma_bitmode,
385 (entry * SIZEOF_SONIC_CD) + offset);
386}
387
388static inline void sonic_set_cam_enable(struct net_device* dev, __u16 val)
389{
390 struct sonic_local* lp = (struct sonic_local *) dev->priv;
391 sonic_buf_put(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE, val);
392}
393
394static inline __u16 sonic_get_cam_enable(struct net_device* dev)
395{
396 struct sonic_local* lp = (struct sonic_local *) dev->priv;
397 return sonic_buf_get(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE);
398}
399
400static inline void sonic_tda_put(struct net_device* dev, int entry,
401 int offset, __u16 val)
402{
403 struct sonic_local* lp = (struct sonic_local *) dev->priv;
404 sonic_buf_put(lp->tda, lp->dma_bitmode,
405 (entry * SIZEOF_SONIC_TD) + offset, val);
406}
407
408static inline __u16 sonic_tda_get(struct net_device* dev, int entry,
409 int offset)
410{
411 struct sonic_local* lp = (struct sonic_local *) dev->priv;
412 return sonic_buf_get(lp->tda, lp->dma_bitmode,
413 (entry * SIZEOF_SONIC_TD) + offset);
414}
415
416static inline void sonic_rda_put(struct net_device* dev, int entry,
417 int offset, __u16 val)
418{
419 struct sonic_local* lp = (struct sonic_local *) dev->priv;
420 sonic_buf_put(lp->rda, lp->dma_bitmode,
421 (entry * SIZEOF_SONIC_RD) + offset, val);
422}
423
424static inline __u16 sonic_rda_get(struct net_device* dev, int entry,
425 int offset)
426{
427 struct sonic_local* lp = (struct sonic_local *) dev->priv;
428 return sonic_buf_get(lp->rda, lp->dma_bitmode,
429 (entry * SIZEOF_SONIC_RD) + offset);
430}
431
432static inline void sonic_rra_put(struct net_device* dev, int entry,
433 int offset, __u16 val)
434{
435 struct sonic_local* lp = (struct sonic_local *) dev->priv;
436 sonic_buf_put(lp->rra, lp->dma_bitmode,
437 (entry * SIZEOF_SONIC_RR) + offset, val);
438}
439
440static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
441 int offset)
442{
443 struct sonic_local* lp = (struct sonic_local *) dev->priv;
444 return sonic_buf_get(lp->rra, lp->dma_bitmode,
445 (entry * SIZEOF_SONIC_RR) + offset);
446}
447
480static const char *version = 448static const char *version =
481 "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; 449 "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
482 450
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index 7e99e9f8045e..e4cfc80b283b 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -84,7 +84,7 @@ config 3C359
84 84
85config TMS380TR 85config TMS380TR
86 tristate "Generic TMS380 Token Ring ISA/PCI adapter support" 86 tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
87 depends on TR && (PCI || ISA && ISA_DMA_API) 87 depends on TR && (PCI || ISA && ISA_DMA_API || MCA)
88 select FW_LOADER 88 select FW_LOADER
89 ---help--- 89 ---help---
90 This driver provides generic support for token ring adapters 90 This driver provides generic support for token ring adapters
@@ -158,7 +158,7 @@ config ABYSS
158 158
159config MADGEMC 159config MADGEMC
160 tristate "Madge Smart 16/4 Ringnode MicroChannel" 160 tristate "Madge Smart 16/4 Ringnode MicroChannel"
161 depends on TR && TMS380TR && MCA_LEGACY 161 depends on TR && TMS380TR && MCA
162 help 162 help
163 This tms380 module supports the Madge Smart 16/4 MC16 and MC32 163 This tms380 module supports the Madge Smart 16/4 MC16 and MC32
164 MicroChannel adapters. 164 MicroChannel adapters.
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 87103c400999..9345e68c451e 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -139,7 +139,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
139 */ 139 */
140 dev->base_addr += 0x10; 140 dev->base_addr += 0x10;
141 141
142 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev); 142 ret = tmsdev_init(dev, &pdev->dev);
143 if (ret) { 143 if (ret) {
144 printk("%s: unable to get memory for dev->priv.\n", 144 printk("%s: unable to get memory for dev->priv.\n",
145 dev->name); 145 dev->name);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 659cbdbef7f3..3a25d191ea4a 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -20,7 +20,7 @@
20static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n"; 20static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/mca-legacy.h> 23#include <linux/mca.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
@@ -38,9 +38,7 @@ static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
38#define MADGEMC_IO_EXTENT 32 38#define MADGEMC_IO_EXTENT 32
39#define MADGEMC_SIF_OFFSET 0x08 39#define MADGEMC_SIF_OFFSET 0x08
40 40
41struct madgemc_card { 41struct card_info {
42 struct net_device *dev;
43
44 /* 42 /*
45 * These are read from the BIA ROM. 43 * These are read from the BIA ROM.
46 */ 44 */
@@ -57,16 +55,12 @@ struct madgemc_card {
57 unsigned int arblevel:4; 55 unsigned int arblevel:4;
58 unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */ 56 unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */
59 unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */ 57 unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */
60
61 struct madgemc_card *next;
62}; 58};
63static struct madgemc_card *madgemc_card_list;
64
65 59
66static int madgemc_open(struct net_device *dev); 60static int madgemc_open(struct net_device *dev);
67static int madgemc_close(struct net_device *dev); 61static int madgemc_close(struct net_device *dev);
68static int madgemc_chipset_init(struct net_device *dev); 62static int madgemc_chipset_init(struct net_device *dev);
69static void madgemc_read_rom(struct madgemc_card *card); 63static void madgemc_read_rom(struct net_device *dev, struct card_info *card);
70static unsigned short madgemc_setnselout_pins(struct net_device *dev); 64static unsigned short madgemc_setnselout_pins(struct net_device *dev);
71static void madgemc_setcabletype(struct net_device *dev, int type); 65static void madgemc_setcabletype(struct net_device *dev, int type);
72 66
@@ -151,261 +145,237 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
151 145
152 146
153 147
154static int __init madgemc_probe(void) 148static int __devinit madgemc_probe(struct device *device)
155{ 149{
156 static int versionprinted; 150 static int versionprinted;
157 struct net_device *dev; 151 struct net_device *dev;
158 struct net_local *tp; 152 struct net_local *tp;
159 struct madgemc_card *card; 153 struct card_info *card;
160 int i,slot = 0; 154 struct mca_device *mdev = to_mca_device(device);
161 __u8 posreg[4]; 155 int ret = 0, i = 0;
162 156
163 if (!MCA_bus) 157 if (versionprinted++ == 0)
164 return -1; 158 printk("%s", version);
165 159
166 while (slot != MCA_NOTFOUND) { 160 if(mca_device_claimed(mdev))
167 /* 161 return -EBUSY;
168 * Currently we only support the MC16/32 (MCA ID 002d) 162 mca_device_set_claim(mdev, 1);
169 */ 163
170 slot = mca_find_unused_adapter(0x002d, slot); 164 dev = alloc_trdev(sizeof(struct net_local));
171 if (slot == MCA_NOTFOUND) 165 if (!dev) {
172 break; 166 printk("madgemc: unable to allocate dev space\n");
173 167 mca_device_set_claim(mdev, 0);
174 /* 168 ret = -ENOMEM;
175 * If we get here, we have an adapter. 169 goto getout;
176 */ 170 }
177 if (versionprinted++ == 0)
178 printk("%s", version);
179
180 dev = alloc_trdev(sizeof(struct net_local));
181 if (dev == NULL) {
182 printk("madgemc: unable to allocate dev space\n");
183 if (madgemc_card_list)
184 return 0;
185 return -1;
186 }
187 171
188 SET_MODULE_OWNER(dev); 172 SET_MODULE_OWNER(dev);
189 dev->dma = 0; 173 dev->dma = 0;
190 174
191 /* 175 card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
192 * Fetch MCA config registers 176 if (card==NULL) {
193 */ 177 printk("madgemc: unable to allocate card struct\n");
194 for(i=0;i<4;i++) 178 ret = -ENOMEM;
195 posreg[i] = mca_read_stored_pos(slot, i+2); 179 goto getout1;
196 180 }
197 card = kmalloc(sizeof(struct madgemc_card), GFP_KERNEL); 181
198 if (card==NULL) { 182 /*
199 printk("madgemc: unable to allocate card struct\n"); 183 * Parse configuration information. This all comes
200 free_netdev(dev); 184 * directly from the publicly available @002d.ADF.
201 if (madgemc_card_list) 185 * Get it from Madge or your local ADF library.
202 return 0; 186 */
203 return -1; 187
204 } 188 /*
205 card->dev = dev; 189 * Base address
206 190 */
207 /* 191 dev->base_addr = 0x0a20 +
208 * Parse configuration information. This all comes 192 ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) +
209 * directly from the publicly available @002d.ADF. 193 ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) +
210 * Get it from Madge or your local ADF library. 194 ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0);
211 */ 195
212 196 /*
213 /* 197 * Interrupt line
214 * Base address 198 */
215 */ 199 switch(mdev->pos[0] >> 6) { /* upper two bits */
216 dev->base_addr = 0x0a20 +
217 ((posreg[2] & MC16_POS2_ADDR2)?0x0400:0) +
218 ((posreg[0] & MC16_POS0_ADDR1)?0x1000:0) +
219 ((posreg[3] & MC16_POS3_ADDR3)?0x2000:0);
220
221 /*
222 * Interrupt line
223 */
224 switch(posreg[0] >> 6) { /* upper two bits */
225 case 0x1: dev->irq = 3; break; 200 case 0x1: dev->irq = 3; break;
226 case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */ 201 case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */
227 case 0x3: dev->irq = 10; break; 202 case 0x3: dev->irq = 10; break;
228 default: dev->irq = 0; break; 203 default: dev->irq = 0; break;
229 } 204 }
230 205
231 if (dev->irq == 0) { 206 if (dev->irq == 0) {
232 printk("%s: invalid IRQ\n", dev->name); 207 printk("%s: invalid IRQ\n", dev->name);
233 goto getout1; 208 ret = -EBUSY;
234 } 209 goto getout2;
210 }
235 211
236 if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT, 212 if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT,
237 "madgemc")) { 213 "madgemc")) {
238 printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", slot, dev->base_addr); 214 printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr);
239 dev->base_addr += MADGEMC_SIF_OFFSET;
240 goto getout1;
241 }
242 dev->base_addr += MADGEMC_SIF_OFFSET; 215 dev->base_addr += MADGEMC_SIF_OFFSET;
216 ret = -EBUSY;
217 goto getout2;
218 }
219 dev->base_addr += MADGEMC_SIF_OFFSET;
220
221 /*
222 * Arbitration Level
223 */
224 card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8;
225
226 /*
227 * Burst mode and Fairness
228 */
229 card->burstmode = ((mdev->pos[2] >> 6) & 0x3);
230 card->fairness = ((mdev->pos[2] >> 4) & 0x1);
231
232 /*
233 * Ring Speed
234 */
235 if ((mdev->pos[1] >> 2)&0x1)
236 card->ringspeed = 2; /* not selected */
237 else if ((mdev->pos[2] >> 5) & 0x1)
238 card->ringspeed = 1; /* 16Mb */
239 else
240 card->ringspeed = 0; /* 4Mb */
241
242 /*
243 * Cable type
244 */
245 if ((mdev->pos[1] >> 6)&0x1)
246 card->cabletype = 1; /* STP/DB9 */
247 else
248 card->cabletype = 0; /* UTP/RJ-45 */
249
250
251 /*
252 * ROM Info. This requires us to actually twiddle
253 * bits on the card, so we must ensure above that
254 * the base address is free of conflict (request_region above).
255 */
256 madgemc_read_rom(dev, card);
243 257
244 /* 258 if (card->manid != 0x4d) { /* something went wrong */
245 * Arbitration Level 259 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
246 */ 260 goto getout3;
247 card->arblevel = ((posreg[0] >> 1) & 0x7) + 8; 261 }
248
249 /*
250 * Burst mode and Fairness
251 */
252 card->burstmode = ((posreg[2] >> 6) & 0x3);
253 card->fairness = ((posreg[2] >> 4) & 0x1);
254
255 /*
256 * Ring Speed
257 */
258 if ((posreg[1] >> 2)&0x1)
259 card->ringspeed = 2; /* not selected */
260 else if ((posreg[2] >> 5) & 0x1)
261 card->ringspeed = 1; /* 16Mb */
262 else
263 card->ringspeed = 0; /* 4Mb */
264
265 /*
266 * Cable type
267 */
268 if ((posreg[1] >> 6)&0x1)
269 card->cabletype = 1; /* STP/DB9 */
270 else
271 card->cabletype = 0; /* UTP/RJ-45 */
272
273
274 /*
275 * ROM Info. This requires us to actually twiddle
276 * bits on the card, so we must ensure above that
277 * the base address is free of conflict (request_region above).
278 */
279 madgemc_read_rom(card);
280
281 if (card->manid != 0x4d) { /* something went wrong */
282 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
283 goto getout;
284 }
285 262
286 if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) { 263 if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
287 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype); 264 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
288 goto getout; 265 ret = -EIO;
289 } 266 goto getout3;
267 }
290 268
291 /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */ 269 /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
292 if ((card->cardtype == 0x08) && (card->cardrev <= 0x01)) 270 if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
293 card->ramsize = 128; 271 card->ramsize = 128;
294 else 272 else
295 card->ramsize = 256; 273 card->ramsize = 256;
296 274
297 printk("%s: %s Rev %d at 0x%04lx IRQ %d\n", 275 printk("%s: %s Rev %d at 0x%04lx IRQ %d\n",
298 dev->name, 276 dev->name,
299 (card->cardtype == 0x08)?MADGEMC16_CARDNAME: 277 (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
300 MADGEMC32_CARDNAME, card->cardrev, 278 MADGEMC32_CARDNAME, card->cardrev,
301 dev->base_addr, dev->irq); 279 dev->base_addr, dev->irq);
302 280
303 if (card->cardtype == 0x0d) 281 if (card->cardtype == 0x0d)
304 printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name); 282 printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name);
305 283
306 if (card->ringspeed==2) { /* Unknown */ 284 if (card->ringspeed==2) { /* Unknown */
307 printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name); 285 printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
308 card->ringspeed = 1; /* default to 16mb */ 286 card->ringspeed = 1; /* default to 16mb */
309 } 287 }
310 288
311 printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize); 289 printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize);
312 290
313 printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name, 291 printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name,
314 (card->ringspeed)?16:4, 292 (card->ringspeed)?16:4,
315 card->cabletype?"STP/DB9":"UTP/RJ-45"); 293 card->cabletype?"STP/DB9":"UTP/RJ-45");
316 printk("%s: Arbitration Level: %d\n", dev->name, 294 printk("%s: Arbitration Level: %d\n", dev->name,
317 card->arblevel); 295 card->arblevel);
318 296
319 printk("%s: Burst Mode: ", dev->name); 297 printk("%s: Burst Mode: ", dev->name);
320 switch(card->burstmode) { 298 switch(card->burstmode) {
321 case 0: printk("Cycle steal"); break; 299 case 0: printk("Cycle steal"); break;
322 case 1: printk("Limited burst"); break; 300 case 1: printk("Limited burst"); break;
323 case 2: printk("Delayed release"); break; 301 case 2: printk("Delayed release"); break;
324 case 3: printk("Immediate release"); break; 302 case 3: printk("Immediate release"); break;
325 } 303 }
326 printk(" (%s)\n", (card->fairness)?"Unfair":"Fair"); 304 printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
327
328
329 /*
330 * Enable SIF before we assign the interrupt handler,
331 * just in case we get spurious interrupts that need
332 * handling.
333 */
334 outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
335 madgemc_setsifsel(dev, 1);
336 if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
337 "madgemc", dev))
338 goto getout;
339
340 madgemc_chipset_init(dev); /* enables interrupts! */
341 madgemc_setcabletype(dev, card->cabletype);
342 305
343 /* Setup MCA structures */
344 mca_set_adapter_name(slot, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
345 mca_set_adapter_procfn(slot, madgemc_mcaproc, dev);
346 mca_mark_as_used(slot);
347 306
348 printk("%s: Ring Station Address: ", dev->name); 307 /*
349 printk("%2.2x", dev->dev_addr[0]); 308 * Enable SIF before we assign the interrupt handler,
350 for (i = 1; i < 6; i++) 309 * just in case we get spurious interrupts that need
351 printk(":%2.2x", dev->dev_addr[i]); 310 * handling.
352 printk("\n"); 311 */
353 312 outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
354 /* XXX is ISA_MAX_ADDRESS correct here? */ 313 madgemc_setsifsel(dev, 1);
355 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) { 314 if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
356 printk("%s: unable to get memory for dev->priv.\n", 315 "madgemc", dev)) {
357 dev->name); 316 ret = -EBUSY;
358 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, 317 goto getout3;
359 MADGEMC_IO_EXTENT);
360
361 kfree(card);
362 tmsdev_term(dev);
363 free_netdev(dev);
364 if (madgemc_card_list)
365 return 0;
366 return -1;
367 }
368 tp = netdev_priv(dev);
369
370 /*
371 * The MC16 is physically a 32bit card. However, Madge
372 * insists on calling it 16bit, so I'll assume here that
373 * they know what they're talking about. Cut off DMA
374 * at 16mb.
375 */
376 tp->setnselout = madgemc_setnselout_pins;
377 tp->sifwriteb = madgemc_sifwriteb;
378 tp->sifreadb = madgemc_sifreadb;
379 tp->sifwritew = madgemc_sifwritew;
380 tp->sifreadw = madgemc_sifreadw;
381 tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
382
383 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
384
385 dev->open = madgemc_open;
386 dev->stop = madgemc_close;
387
388 if (register_netdev(dev) == 0) {
389 /* Enlist in the card list */
390 card->next = madgemc_card_list;
391 madgemc_card_list = card;
392 slot++;
393 continue; /* successful, try to find another */
394 }
395
396 free_irq(dev->irq, dev);
397 getout:
398 release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
399 MADGEMC_IO_EXTENT);
400 getout1:
401 kfree(card);
402 free_netdev(dev);
403 slot++;
404 } 318 }
405 319
406 if (madgemc_card_list) 320 madgemc_chipset_init(dev); /* enables interrupts! */
321 madgemc_setcabletype(dev, card->cabletype);
322
323 /* Setup MCA structures */
324 mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
325 mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev);
326
327 printk("%s: Ring Station Address: ", dev->name);
328 printk("%2.2x", dev->dev_addr[0]);
329 for (i = 1; i < 6; i++)
330 printk(":%2.2x", dev->dev_addr[i]);
331 printk("\n");
332
333 if (tmsdev_init(dev, device)) {
334 printk("%s: unable to get memory for dev->priv.\n",
335 dev->name);
336 ret = -ENOMEM;
337 goto getout4;
338 }
339 tp = netdev_priv(dev);
340
341 /*
342 * The MC16 is physically a 32bit card. However, Madge
343 * insists on calling it 16bit, so I'll assume here that
344 * they know what they're talking about. Cut off DMA
345 * at 16mb.
346 */
347 tp->setnselout = madgemc_setnselout_pins;
348 tp->sifwriteb = madgemc_sifwriteb;
349 tp->sifreadb = madgemc_sifreadb;
350 tp->sifwritew = madgemc_sifwritew;
351 tp->sifreadw = madgemc_sifreadw;
352 tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
353
354 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
355
356 dev->open = madgemc_open;
357 dev->stop = madgemc_close;
358
359 tp->tmspriv = card;
360 dev_set_drvdata(device, dev);
361
362 if (register_netdev(dev) == 0)
407 return 0; 363 return 0;
408 return -1; 364
365 dev_set_drvdata(device, NULL);
366 ret = -ENOMEM;
367getout4:
368 free_irq(dev->irq, dev);
369getout3:
370 release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
371 MADGEMC_IO_EXTENT);
372getout2:
373 kfree(card);
374getout1:
375 free_netdev(dev);
376getout:
377 mca_device_set_claim(mdev, 0);
378 return ret;
409} 379}
410 380
411/* 381/*
@@ -664,12 +634,12 @@ static void madgemc_chipset_close(struct net_device *dev)
664 * is complete. 634 * is complete.
665 * 635 *
666 */ 636 */
667static void madgemc_read_rom(struct madgemc_card *card) 637static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
668{ 638{
669 unsigned long ioaddr; 639 unsigned long ioaddr;
670 unsigned char reg0, reg1, tmpreg0, i; 640 unsigned char reg0, reg1, tmpreg0, i;
671 641
672 ioaddr = card->dev->base_addr; 642 ioaddr = dev->base_addr;
673 643
674 reg0 = inb(ioaddr + MC_CONTROL_REG0); 644 reg0 = inb(ioaddr + MC_CONTROL_REG0);
675 reg1 = inb(ioaddr + MC_CONTROL_REG1); 645 reg1 = inb(ioaddr + MC_CONTROL_REG1);
@@ -686,9 +656,9 @@ static void madgemc_read_rom(struct madgemc_card *card)
686 outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0); 656 outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0);
687 657
688 /* Read BIA */ 658 /* Read BIA */
689 card->dev->addr_len = 6; 659 dev->addr_len = 6;
690 for (i = 0; i < 6; i++) 660 for (i = 0; i < 6; i++)
691 card->dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i); 661 dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
692 662
693 /* Restore original register values */ 663 /* Restore original register values */
694 outb(reg0, ioaddr + MC_CONTROL_REG0); 664 outb(reg0, ioaddr + MC_CONTROL_REG0);
@@ -721,14 +691,10 @@ static int madgemc_close(struct net_device *dev)
721static int madgemc_mcaproc(char *buf, int slot, void *d) 691static int madgemc_mcaproc(char *buf, int slot, void *d)
722{ 692{
723 struct net_device *dev = (struct net_device *)d; 693 struct net_device *dev = (struct net_device *)d;
724 struct madgemc_card *curcard = madgemc_card_list; 694 struct net_local *tp = dev->priv;
695 struct card_info *curcard = tp->tmspriv;
725 int len = 0; 696 int len = 0;
726 697
727 while (curcard) { /* search for card struct */
728 if (curcard->dev == dev)
729 break;
730 curcard = curcard->next;
731 }
732 len += sprintf(buf+len, "-------\n"); 698 len += sprintf(buf+len, "-------\n");
733 if (curcard) { 699 if (curcard) {
734 struct net_local *tp = netdev_priv(dev); 700 struct net_local *tp = netdev_priv(dev);
@@ -763,25 +729,56 @@ static int madgemc_mcaproc(char *buf, int slot, void *d)
763 return len; 729 return len;
764} 730}
765 731
766static void __exit madgemc_exit(void) 732static int __devexit madgemc_remove(struct device *device)
767{ 733{
768 struct net_device *dev; 734 struct net_device *dev = dev_get_drvdata(device);
769 struct madgemc_card *this_card; 735 struct net_local *tp;
770 736 struct card_info *card;
771 while (madgemc_card_list) { 737
772 dev = madgemc_card_list->dev; 738 if (!dev)
773 unregister_netdev(dev); 739 BUG();
774 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT); 740
775 free_irq(dev->irq, dev); 741 tp = dev->priv;
776 tmsdev_term(dev); 742 card = tp->tmspriv;
777 free_netdev(dev); 743 kfree(card);
778 this_card = madgemc_card_list; 744 tp->tmspriv = NULL;
779 madgemc_card_list = this_card->next; 745
780 kfree(this_card); 746 unregister_netdev(dev);
781 } 747 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
748 free_irq(dev->irq, dev);
749 tmsdev_term(dev);
750 free_netdev(dev);
751 dev_set_drvdata(device, NULL);
752
753 return 0;
754}
755
756static short madgemc_adapter_ids[] __initdata = {
757 0x002d,
758 0x0000
759};
760
761static struct mca_driver madgemc_driver = {
762 .id_table = madgemc_adapter_ids,
763 .driver = {
764 .name = "madgemc",
765 .bus = &mca_bus_type,
766 .probe = madgemc_probe,
767 .remove = __devexit_p(madgemc_remove),
768 },
769};
770
771static int __init madgemc_init (void)
772{
773 return mca_register_driver (&madgemc_driver);
774}
775
776static void __exit madgemc_exit (void)
777{
778 mca_unregister_driver (&madgemc_driver);
782} 779}
783 780
784module_init(madgemc_probe); 781module_init(madgemc_init);
785module_exit(madgemc_exit); 782module_exit(madgemc_exit);
786 783
787MODULE_LICENSE("GPL"); 784MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 40ad0fde28af..eb1423ede75c 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -62,8 +62,7 @@ static int dmalist[] __initdata = {
62}; 62};
63 63
64static char cardname[] = "Proteon 1392\0"; 64static char cardname[] = "Proteon 1392\0";
65 65static u64 dma_mask = ISA_MAX_ADDRESS;
66struct net_device *proteon_probe(int unit);
67static int proteon_open(struct net_device *dev); 66static int proteon_open(struct net_device *dev);
68static void proteon_read_eeprom(struct net_device *dev); 67static void proteon_read_eeprom(struct net_device *dev);
69static unsigned short proteon_setnselout_pins(struct net_device *dev); 68static unsigned short proteon_setnselout_pins(struct net_device *dev);
@@ -116,7 +115,7 @@ nodev:
116 return -ENODEV; 115 return -ENODEV;
117} 116}
118 117
119static int __init setup_card(struct net_device *dev) 118static int __init setup_card(struct net_device *dev, struct device *pdev)
120{ 119{
121 struct net_local *tp; 120 struct net_local *tp;
122 static int versionprinted; 121 static int versionprinted;
@@ -137,7 +136,7 @@ static int __init setup_card(struct net_device *dev)
137 } 136 }
138 } 137 }
139 if (err) 138 if (err)
140 goto out4; 139 goto out5;
141 140
142 /* At this point we have found a valid card. */ 141 /* At this point we have found a valid card. */
143 142
@@ -145,14 +144,15 @@ static int __init setup_card(struct net_device *dev)
145 printk(KERN_DEBUG "%s", version); 144 printk(KERN_DEBUG "%s", version);
146 145
147 err = -EIO; 146 err = -EIO;
148 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) 147 pdev->dma_mask = &dma_mask;
148 if (tmsdev_init(dev, pdev))
149 goto out4; 149 goto out4;
150 150
151 dev->base_addr &= ~3; 151 dev->base_addr &= ~3;
152 152
153 proteon_read_eeprom(dev); 153 proteon_read_eeprom(dev);
154 154
155 printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name); 155 printk(KERN_DEBUG "proteon.c: Ring Station Address: ");
156 printk("%2.2x", dev->dev_addr[0]); 156 printk("%2.2x", dev->dev_addr[0]);
157 for (j = 1; j < 6; j++) 157 for (j = 1; j < 6; j++)
158 printk(":%2.2x", dev->dev_addr[j]); 158 printk(":%2.2x", dev->dev_addr[j]);
@@ -185,7 +185,7 @@ static int __init setup_card(struct net_device *dev)
185 185
186 if(irqlist[j] == 0) 186 if(irqlist[j] == 0)
187 { 187 {
188 printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name); 188 printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
189 goto out3; 189 goto out3;
190 } 190 }
191 } 191 }
@@ -196,15 +196,15 @@ static int __init setup_card(struct net_device *dev)
196 break; 196 break;
197 if (irqlist[j] == 0) 197 if (irqlist[j] == 0)
198 { 198 {
199 printk(KERN_INFO "%s: Illegal IRQ %d specified\n", 199 printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
200 dev->name, dev->irq); 200 dev->irq);
201 goto out3; 201 goto out3;
202 } 202 }
203 if (request_irq(dev->irq, tms380tr_interrupt, 0, 203 if (request_irq(dev->irq, tms380tr_interrupt, 0,
204 cardname, dev)) 204 cardname, dev))
205 { 205 {
206 printk(KERN_INFO "%s: Selected IRQ %d not available\n", 206 printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
207 dev->name, dev->irq); 207 dev->irq);
208 goto out3; 208 goto out3;
209 } 209 }
210 } 210 }
@@ -220,7 +220,7 @@ static int __init setup_card(struct net_device *dev)
220 220
221 if(dmalist[j] == 0) 221 if(dmalist[j] == 0)
222 { 222 {
223 printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name); 223 printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
224 goto out2; 224 goto out2;
225 } 225 }
226 } 226 }
@@ -231,25 +231,25 @@ static int __init setup_card(struct net_device *dev)
231 break; 231 break;
232 if (dmalist[j] == 0) 232 if (dmalist[j] == 0)
233 { 233 {
234 printk(KERN_INFO "%s: Illegal DMA %d specified\n", 234 printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
235 dev->name, dev->dma); 235 dev->dma);
236 goto out2; 236 goto out2;
237 } 237 }
238 if (request_dma(dev->dma, cardname)) 238 if (request_dma(dev->dma, cardname))
239 { 239 {
240 printk(KERN_INFO "%s: Selected DMA %d not available\n", 240 printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
241 dev->name, dev->dma); 241 dev->dma);
242 goto out2; 242 goto out2;
243 } 243 }
244 } 244 }
245 245
246 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
247 dev->name, dev->base_addr, dev->irq, dev->dma);
248
249 err = register_netdev(dev); 246 err = register_netdev(dev);
250 if (err) 247 if (err)
251 goto out; 248 goto out;
252 249
250 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
251 dev->name, dev->base_addr, dev->irq, dev->dma);
252
253 return 0; 253 return 0;
254out: 254out:
255 free_dma(dev->dma); 255 free_dma(dev->dma);
@@ -258,34 +258,11 @@ out2:
258out3: 258out3:
259 tmsdev_term(dev); 259 tmsdev_term(dev);
260out4: 260out4:
261 release_region(dev->base_addr, PROTEON_IO_EXTENT); 261 release_region(dev->base_addr, PROTEON_IO_EXTENT);
262out5:
262 return err; 263 return err;
263} 264}
264 265
265struct net_device * __init proteon_probe(int unit)
266{
267 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
268 int err = 0;
269
270 if (!dev)
271 return ERR_PTR(-ENOMEM);
272
273 if (unit >= 0) {
274 sprintf(dev->name, "tr%d", unit);
275 netdev_boot_setup_check(dev);
276 }
277
278 err = setup_card(dev);
279 if (err)
280 goto out;
281
282 return dev;
283
284out:
285 free_netdev(dev);
286 return ERR_PTR(err);
287}
288
289/* 266/*
290 * Reads MAC address from adapter RAM, which should've read it from 267 * Reads MAC address from adapter RAM, which should've read it from
291 * the onboard ROM. 268 * the onboard ROM.
@@ -352,8 +329,6 @@ static int proteon_open(struct net_device *dev)
352 return tms380tr_open(dev); 329 return tms380tr_open(dev);
353} 330}
354 331
355#ifdef MODULE
356
357#define ISATR_MAX_ADAPTERS 3 332#define ISATR_MAX_ADAPTERS 3
358 333
359static int io[ISATR_MAX_ADAPTERS]; 334static int io[ISATR_MAX_ADAPTERS];
@@ -366,13 +341,23 @@ module_param_array(io, int, NULL, 0);
366module_param_array(irq, int, NULL, 0); 341module_param_array(irq, int, NULL, 0);
367module_param_array(dma, int, NULL, 0); 342module_param_array(dma, int, NULL, 0);
368 343
369static struct net_device *proteon_dev[ISATR_MAX_ADAPTERS]; 344static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
345
346static struct device_driver proteon_driver = {
347 .name = "proteon",
348 .bus = &platform_bus_type,
349};
370 350
371int init_module(void) 351static int __init proteon_init(void)
372{ 352{
373 struct net_device *dev; 353 struct net_device *dev;
354 struct platform_device *pdev;
374 int i, num = 0, err = 0; 355 int i, num = 0, err = 0;
375 356
357 err = driver_register(&proteon_driver);
358 if (err)
359 return err;
360
376 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 361 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
377 dev = alloc_trdev(sizeof(struct net_local)); 362 dev = alloc_trdev(sizeof(struct net_local));
378 if (!dev) 363 if (!dev)
@@ -381,11 +366,15 @@ int init_module(void)
381 dev->base_addr = io[i]; 366 dev->base_addr = io[i];
382 dev->irq = irq[i]; 367 dev->irq = irq[i];
383 dev->dma = dma[i]; 368 dev->dma = dma[i];
384 err = setup_card(dev); 369 pdev = platform_device_register_simple("proteon",
370 i, NULL, 0);
371 err = setup_card(dev, &pdev->dev);
385 if (!err) { 372 if (!err) {
386 proteon_dev[i] = dev; 373 proteon_dev[i] = pdev;
374 dev_set_drvdata(&pdev->dev, dev);
387 ++num; 375 ++num;
388 } else { 376 } else {
377 platform_device_unregister(pdev);
389 free_netdev(dev); 378 free_netdev(dev);
390 } 379 }
391 } 380 }
@@ -399,23 +388,28 @@ int init_module(void)
399 return (0); 388 return (0);
400} 389}
401 390
402void cleanup_module(void) 391static void __exit proteon_cleanup(void)
403{ 392{
393 struct net_device *dev;
404 int i; 394 int i;
405 395
406 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 396 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
407 struct net_device *dev = proteon_dev[i]; 397 struct platform_device *pdev = proteon_dev[i];
408 398
409 if (!dev) 399 if (!pdev)
410 continue; 400 continue;
411 401 dev = dev_get_drvdata(&pdev->dev);
412 unregister_netdev(dev); 402 unregister_netdev(dev);
413 release_region(dev->base_addr, PROTEON_IO_EXTENT); 403 release_region(dev->base_addr, PROTEON_IO_EXTENT);
414 free_irq(dev->irq, dev); 404 free_irq(dev->irq, dev);
415 free_dma(dev->dma); 405 free_dma(dev->dma);
416 tmsdev_term(dev); 406 tmsdev_term(dev);
417 free_netdev(dev); 407 free_netdev(dev);
408 dev_set_drvdata(&pdev->dev, NULL);
409 platform_device_unregister(pdev);
418 } 410 }
411 driver_unregister(&proteon_driver);
419} 412}
420#endif /* MODULE */
421 413
414module_init(proteon_init);
415module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index f26796e2d0e5..3c7c66204f74 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -68,8 +68,7 @@ static int dmalist[] __initdata = {
68}; 68};
69 69
70static char isa_cardname[] = "SK NET TR 4/16 ISA\0"; 70static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
71 71static u64 dma_mask = ISA_MAX_ADDRESS;
72struct net_device *sk_isa_probe(int unit);
73static int sk_isa_open(struct net_device *dev); 72static int sk_isa_open(struct net_device *dev);
74static void sk_isa_read_eeprom(struct net_device *dev); 73static void sk_isa_read_eeprom(struct net_device *dev);
75static unsigned short sk_isa_setnselout_pins(struct net_device *dev); 74static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
@@ -133,7 +132,7 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
133 return 0; 132 return 0;
134} 133}
135 134
136static int __init setup_card(struct net_device *dev) 135static int __init setup_card(struct net_device *dev, struct device *pdev)
137{ 136{
138 struct net_local *tp; 137 struct net_local *tp;
139 static int versionprinted; 138 static int versionprinted;
@@ -154,7 +153,7 @@ static int __init setup_card(struct net_device *dev)
154 } 153 }
155 } 154 }
156 if (err) 155 if (err)
157 goto out4; 156 goto out5;
158 157
159 /* At this point we have found a valid card. */ 158 /* At this point we have found a valid card. */
160 159
@@ -162,14 +161,15 @@ static int __init setup_card(struct net_device *dev)
162 printk(KERN_DEBUG "%s", version); 161 printk(KERN_DEBUG "%s", version);
163 162
164 err = -EIO; 163 err = -EIO;
165 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) 164 pdev->dma_mask = &dma_mask;
165 if (tmsdev_init(dev, pdev))
166 goto out4; 166 goto out4;
167 167
168 dev->base_addr &= ~3; 168 dev->base_addr &= ~3;
169 169
170 sk_isa_read_eeprom(dev); 170 sk_isa_read_eeprom(dev);
171 171
172 printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name); 172 printk(KERN_DEBUG "skisa.c: Ring Station Address: ");
173 printk("%2.2x", dev->dev_addr[0]); 173 printk("%2.2x", dev->dev_addr[0]);
174 for (j = 1; j < 6; j++) 174 for (j = 1; j < 6; j++)
175 printk(":%2.2x", dev->dev_addr[j]); 175 printk(":%2.2x", dev->dev_addr[j]);
@@ -202,7 +202,7 @@ static int __init setup_card(struct net_device *dev)
202 202
203 if(irqlist[j] == 0) 203 if(irqlist[j] == 0)
204 { 204 {
205 printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name); 205 printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
206 goto out3; 206 goto out3;
207 } 207 }
208 } 208 }
@@ -213,15 +213,15 @@ static int __init setup_card(struct net_device *dev)
213 break; 213 break;
214 if (irqlist[j] == 0) 214 if (irqlist[j] == 0)
215 { 215 {
216 printk(KERN_INFO "%s: Illegal IRQ %d specified\n", 216 printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
217 dev->name, dev->irq); 217 dev->irq);
218 goto out3; 218 goto out3;
219 } 219 }
220 if (request_irq(dev->irq, tms380tr_interrupt, 0, 220 if (request_irq(dev->irq, tms380tr_interrupt, 0,
221 isa_cardname, dev)) 221 isa_cardname, dev))
222 { 222 {
223 printk(KERN_INFO "%s: Selected IRQ %d not available\n", 223 printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
224 dev->name, dev->irq); 224 dev->irq);
225 goto out3; 225 goto out3;
226 } 226 }
227 } 227 }
@@ -237,7 +237,7 @@ static int __init setup_card(struct net_device *dev)
237 237
238 if(dmalist[j] == 0) 238 if(dmalist[j] == 0)
239 { 239 {
240 printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name); 240 printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
241 goto out2; 241 goto out2;
242 } 242 }
243 } 243 }
@@ -248,25 +248,25 @@ static int __init setup_card(struct net_device *dev)
248 break; 248 break;
249 if (dmalist[j] == 0) 249 if (dmalist[j] == 0)
250 { 250 {
251 printk(KERN_INFO "%s: Illegal DMA %d specified\n", 251 printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
252 dev->name, dev->dma); 252 dev->dma);
253 goto out2; 253 goto out2;
254 } 254 }
255 if (request_dma(dev->dma, isa_cardname)) 255 if (request_dma(dev->dma, isa_cardname))
256 { 256 {
257 printk(KERN_INFO "%s: Selected DMA %d not available\n", 257 printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
258 dev->name, dev->dma); 258 dev->dma);
259 goto out2; 259 goto out2;
260 } 260 }
261 } 261 }
262 262
263 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
264 dev->name, dev->base_addr, dev->irq, dev->dma);
265
266 err = register_netdev(dev); 263 err = register_netdev(dev);
267 if (err) 264 if (err)
268 goto out; 265 goto out;
269 266
267 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
268 dev->name, dev->base_addr, dev->irq, dev->dma);
269
270 return 0; 270 return 0;
271out: 271out:
272 free_dma(dev->dma); 272 free_dma(dev->dma);
@@ -275,33 +275,11 @@ out2:
275out3: 275out3:
276 tmsdev_term(dev); 276 tmsdev_term(dev);
277out4: 277out4:
278 release_region(dev->base_addr, SK_ISA_IO_EXTENT); 278 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
279out5:
279 return err; 280 return err;
280} 281}
281 282
282struct net_device * __init sk_isa_probe(int unit)
283{
284 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
285 int err = 0;
286
287 if (!dev)
288 return ERR_PTR(-ENOMEM);
289
290 if (unit >= 0) {
291 sprintf(dev->name, "tr%d", unit);
292 netdev_boot_setup_check(dev);
293 }
294
295 err = setup_card(dev);
296 if (err)
297 goto out;
298
299 return dev;
300out:
301 free_netdev(dev);
302 return ERR_PTR(err);
303}
304
305/* 283/*
306 * Reads MAC address from adapter RAM, which should've read it from 284 * Reads MAC address from adapter RAM, which should've read it from
307 * the onboard ROM. 285 * the onboard ROM.
@@ -361,8 +339,6 @@ static int sk_isa_open(struct net_device *dev)
361 return tms380tr_open(dev); 339 return tms380tr_open(dev);
362} 340}
363 341
364#ifdef MODULE
365
366#define ISATR_MAX_ADAPTERS 3 342#define ISATR_MAX_ADAPTERS 3
367 343
368static int io[ISATR_MAX_ADAPTERS]; 344static int io[ISATR_MAX_ADAPTERS];
@@ -375,13 +351,23 @@ module_param_array(io, int, NULL, 0);
375module_param_array(irq, int, NULL, 0); 351module_param_array(irq, int, NULL, 0);
376module_param_array(dma, int, NULL, 0); 352module_param_array(dma, int, NULL, 0);
377 353
378static struct net_device *sk_isa_dev[ISATR_MAX_ADAPTERS]; 354static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
379 355
380int init_module(void) 356static struct device_driver sk_isa_driver = {
357 .name = "skisa",
358 .bus = &platform_bus_type,
359};
360
361static int __init sk_isa_init(void)
381{ 362{
382 struct net_device *dev; 363 struct net_device *dev;
364 struct platform_device *pdev;
383 int i, num = 0, err = 0; 365 int i, num = 0, err = 0;
384 366
367 err = driver_register(&sk_isa_driver);
368 if (err)
369 return err;
370
385 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 371 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
386 dev = alloc_trdev(sizeof(struct net_local)); 372 dev = alloc_trdev(sizeof(struct net_local));
387 if (!dev) 373 if (!dev)
@@ -390,12 +376,15 @@ int init_module(void)
390 dev->base_addr = io[i]; 376 dev->base_addr = io[i];
391 dev->irq = irq[i]; 377 dev->irq = irq[i];
392 dev->dma = dma[i]; 378 dev->dma = dma[i];
393 err = setup_card(dev); 379 pdev = platform_device_register_simple("skisa",
394 380 i, NULL, 0);
381 err = setup_card(dev, &pdev->dev);
395 if (!err) { 382 if (!err) {
396 sk_isa_dev[i] = dev; 383 sk_isa_dev[i] = pdev;
384 dev_set_drvdata(&sk_isa_dev[i]->dev, dev);
397 ++num; 385 ++num;
398 } else { 386 } else {
387 platform_device_unregister(pdev);
399 free_netdev(dev); 388 free_netdev(dev);
400 } 389 }
401 } 390 }
@@ -409,23 +398,28 @@ int init_module(void)
409 return (0); 398 return (0);
410} 399}
411 400
412void cleanup_module(void) 401static void __exit sk_isa_cleanup(void)
413{ 402{
403 struct net_device *dev;
414 int i; 404 int i;
415 405
416 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 406 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
417 struct net_device *dev = sk_isa_dev[i]; 407 struct platform_device *pdev = sk_isa_dev[i];
418 408
419 if (!dev) 409 if (!pdev)
420 continue; 410 continue;
421 411 dev = dev_get_drvdata(&pdev->dev);
422 unregister_netdev(dev); 412 unregister_netdev(dev);
423 release_region(dev->base_addr, SK_ISA_IO_EXTENT); 413 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
424 free_irq(dev->irq, dev); 414 free_irq(dev->irq, dev);
425 free_dma(dev->dma); 415 free_dma(dev->dma);
426 tmsdev_term(dev); 416 tmsdev_term(dev);
427 free_netdev(dev); 417 free_netdev(dev);
418 dev_set_drvdata(&pdev->dev, NULL);
419 platform_device_unregister(pdev);
428 } 420 }
421 driver_unregister(&sk_isa_driver);
429} 422}
430#endif /* MODULE */
431 423
424module_init(sk_isa_init);
425module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 5e0b0ce98ed7..2e39bf1f7462 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -62,6 +62,7 @@
62 * normal operation. 62 * normal operation.
63 * 30-Dec-02 JF Removed incorrect __init from 63 * 30-Dec-02 JF Removed incorrect __init from
64 * tms380tr_init_card. 64 * tms380tr_init_card.
65 * 22-Jul-05 JF Converted to dma-mapping.
65 * 66 *
66 * To do: 67 * To do:
67 * 1. Multi/Broadcast packet handling (this may have fixed itself) 68 * 1. Multi/Broadcast packet handling (this may have fixed itself)
@@ -89,7 +90,7 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
89#include <linux/time.h> 90#include <linux/time.h>
90#include <linux/errno.h> 91#include <linux/errno.h>
91#include <linux/init.h> 92#include <linux/init.h>
92#include <linux/pci.h> 93#include <linux/dma-mapping.h>
93#include <linux/delay.h> 94#include <linux/delay.h>
94#include <linux/netdevice.h> 95#include <linux/netdevice.h>
95#include <linux/etherdevice.h> 96#include <linux/etherdevice.h>
@@ -114,8 +115,6 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
114#endif 115#endif
115static unsigned int tms380tr_debug = TMS380TR_DEBUG; 116static unsigned int tms380tr_debug = TMS380TR_DEBUG;
116 117
117static struct device tms_device;
118
119/* Index to functions, as function prototypes. 118/* Index to functions, as function prototypes.
120 * Alphabetical by function name. 119 * Alphabetical by function name.
121 */ 120 */
@@ -434,7 +433,7 @@ static void tms380tr_init_net_local(struct net_device *dev)
434 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize); 433 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
435 434
436 /* data unreachable for DMA ? then use local buffer */ 435 /* data unreachable for DMA ? then use local buffer */
437 dmabuf = pci_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE); 436 dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
438 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) 437 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
439 { 438 {
440 tp->Rpl[i].SkbStat = SKB_DATA_COPY; 439 tp->Rpl[i].SkbStat = SKB_DATA_COPY;
@@ -638,10 +637,10 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
638 /* Is buffer reachable for Busmaster-DMA? */ 637 /* Is buffer reachable for Busmaster-DMA? */
639 638
640 length = skb->len; 639 length = skb->len;
641 dmabuf = pci_map_single(tp->pdev, skb->data, length, PCI_DMA_TODEVICE); 640 dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
642 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) { 641 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
643 /* Copy frame to local buffer */ 642 /* Copy frame to local buffer */
644 pci_unmap_single(tp->pdev, dmabuf, length, PCI_DMA_TODEVICE); 643 dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
645 dmabuf = 0; 644 dmabuf = 0;
646 i = tp->TplFree->TPLIndex; 645 i = tp->TplFree->TPLIndex;
647 buf = tp->LocalTxBuffers[i]; 646 buf = tp->LocalTxBuffers[i];
@@ -1284,9 +1283,7 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1284 unsigned short count, c, count2; 1283 unsigned short count, c, count2;
1285 const struct firmware *fw_entry = NULL; 1284 const struct firmware *fw_entry = NULL;
1286 1285
1287 strncpy(tms_device.bus_id,dev->name, BUS_ID_SIZE); 1286 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
1288
1289 if (request_firmware(&fw_entry, "tms380tr.bin", &tms_device) != 0) {
1290 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", 1287 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
1291 dev->name, "tms380tr.bin"); 1288 dev->name, "tms380tr.bin");
1292 return (-1); 1289 return (-1);
@@ -2021,7 +2018,7 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
2021 2018
2022 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl); 2019 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
2023 if (tpl->DMABuff) 2020 if (tpl->DMABuff)
2024 pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE); 2021 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2025 dev_kfree_skb_any(tpl->Skb); 2022 dev_kfree_skb_any(tpl->Skb);
2026 } 2023 }
2027 2024
@@ -2090,7 +2087,7 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
2090 2087
2091 tp->MacStat.tx_packets++; 2088 tp->MacStat.tx_packets++;
2092 if (tpl->DMABuff) 2089 if (tpl->DMABuff)
2093 pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE); 2090 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2094 dev_kfree_skb_irq(tpl->Skb); 2091 dev_kfree_skb_irq(tpl->Skb);
2095 tpl->BusyFlag = 0; /* "free" TPL */ 2092 tpl->BusyFlag = 0; /* "free" TPL */
2096 } 2093 }
@@ -2209,7 +2206,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2209 tp->MacStat.rx_errors++; 2206 tp->MacStat.rx_errors++;
2210 } 2207 }
2211 if (rpl->DMABuff) 2208 if (rpl->DMABuff)
2212 pci_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, PCI_DMA_TODEVICE); 2209 dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
2213 rpl->DMABuff = 0; 2210 rpl->DMABuff = 0;
2214 2211
2215 /* Allocate new skb for rpl */ 2212 /* Allocate new skb for rpl */
@@ -2227,7 +2224,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2227 skb_put(rpl->Skb, tp->MaxPacketSize); 2224 skb_put(rpl->Skb, tp->MaxPacketSize);
2228 2225
2229 /* Data unreachable for DMA ? then use local buffer */ 2226 /* Data unreachable for DMA ? then use local buffer */
2230 dmabuf = pci_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE); 2227 dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
2231 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) 2228 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
2232 { 2229 {
2233 rpl->SkbStat = SKB_DATA_COPY; 2230 rpl->SkbStat = SKB_DATA_COPY;
@@ -2332,23 +2329,26 @@ void tmsdev_term(struct net_device *dev)
2332 struct net_local *tp; 2329 struct net_local *tp;
2333 2330
2334 tp = netdev_priv(dev); 2331 tp = netdev_priv(dev);
2335 pci_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local), 2332 dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
2336 PCI_DMA_BIDIRECTIONAL); 2333 DMA_BIDIRECTIONAL);
2337} 2334}
2338 2335
2339int tmsdev_init(struct net_device *dev, unsigned long dmalimit, 2336int tmsdev_init(struct net_device *dev, struct device *pdev)
2340 struct pci_dev *pdev)
2341{ 2337{
2342 struct net_local *tms_local; 2338 struct net_local *tms_local;
2343 2339
2344 memset(dev->priv, 0, sizeof(struct net_local)); 2340 memset(dev->priv, 0, sizeof(struct net_local));
2345 tms_local = netdev_priv(dev); 2341 tms_local = netdev_priv(dev);
2346 init_waitqueue_head(&tms_local->wait_for_tok_int); 2342 init_waitqueue_head(&tms_local->wait_for_tok_int);
2347 tms_local->dmalimit = dmalimit; 2343 if (pdev->dma_mask)
2344 tms_local->dmalimit = *pdev->dma_mask;
2345 else
2346 return -ENOMEM;
2348 tms_local->pdev = pdev; 2347 tms_local->pdev = pdev;
2349 tms_local->dmabuffer = pci_map_single(pdev, (void *)tms_local, 2348 tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
2350 sizeof(struct net_local), PCI_DMA_BIDIRECTIONAL); 2349 sizeof(struct net_local), DMA_BIDIRECTIONAL);
2351 if (tms_local->dmabuffer + sizeof(struct net_local) > dmalimit) 2350 if (tms_local->dmabuffer + sizeof(struct net_local) >
2351 tms_local->dmalimit)
2352 { 2352 {
2353 printk(KERN_INFO "%s: Memory not accessible for DMA\n", 2353 printk(KERN_INFO "%s: Memory not accessible for DMA\n",
2354 dev->name); 2354 dev->name);
@@ -2370,8 +2370,6 @@ int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
2370 return 0; 2370 return 0;
2371} 2371}
2372 2372
2373#ifdef MODULE
2374
2375EXPORT_SYMBOL(tms380tr_open); 2373EXPORT_SYMBOL(tms380tr_open);
2376EXPORT_SYMBOL(tms380tr_close); 2374EXPORT_SYMBOL(tms380tr_close);
2377EXPORT_SYMBOL(tms380tr_interrupt); 2375EXPORT_SYMBOL(tms380tr_interrupt);
@@ -2379,6 +2377,8 @@ EXPORT_SYMBOL(tmsdev_init);
2379EXPORT_SYMBOL(tmsdev_term); 2377EXPORT_SYMBOL(tmsdev_term);
2380EXPORT_SYMBOL(tms380tr_wait); 2378EXPORT_SYMBOL(tms380tr_wait);
2381 2379
2380#ifdef MODULE
2381
2382static struct module *TMS380_module = NULL; 2382static struct module *TMS380_module = NULL;
2383 2383
2384int init_module(void) 2384int init_module(void)
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index f2c5ba0f37a5..30452c67bb68 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -17,8 +17,7 @@
17int tms380tr_open(struct net_device *dev); 17int tms380tr_open(struct net_device *dev);
18int tms380tr_close(struct net_device *dev); 18int tms380tr_close(struct net_device *dev);
19irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs); 19irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
20int tmsdev_init(struct net_device *dev, unsigned long dmalimit, 20int tmsdev_init(struct net_device *dev, struct device *pdev);
21 struct pci_dev *pdev);
22void tmsdev_term(struct net_device *dev); 21void tmsdev_term(struct net_device *dev);
23void tms380tr_wait(unsigned long time); 22void tms380tr_wait(unsigned long time);
24 23
@@ -719,7 +718,7 @@ struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */
719 struct sk_buff *Skb; 718 struct sk_buff *Skb;
720 unsigned char TPLIndex; 719 unsigned char TPLIndex;
721 volatile unsigned char BusyFlag;/* Flag: TPL busy? */ 720 volatile unsigned char BusyFlag;/* Flag: TPL busy? */
722 dma_addr_t DMABuff; /* DMA IO bus address from pci_map */ 721 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
723}; 722};
724 723
725/* ---------------------Receive Functions-------------------------------* 724/* ---------------------Receive Functions-------------------------------*
@@ -1060,7 +1059,7 @@ struct s_RPL { /* Receive Parameter List */
1060 struct sk_buff *Skb; 1059 struct sk_buff *Skb;
1061 SKB_STAT SkbStat; 1060 SKB_STAT SkbStat;
1062 int RPLIndex; 1061 int RPLIndex;
1063 dma_addr_t DMABuff; /* DMA IO bus address from pci_map */ 1062 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
1064}; 1063};
1065 1064
1066/* Information that need to be kept for each board. */ 1065/* Information that need to be kept for each board. */
@@ -1091,7 +1090,7 @@ typedef struct net_local {
1091 RPL *RplTail; 1090 RPL *RplTail;
1092 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE]; 1091 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
1093 1092
1094 struct pci_dev *pdev; 1093 struct device *pdev;
1095 int DataRate; 1094 int DataRate;
1096 unsigned char ScbInUse; 1095 unsigned char ScbInUse;
1097 unsigned short CMDqueue; 1096 unsigned short CMDqueue;
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 2e18c0a46482..ab47c0547a3b 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -100,7 +100,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
100 unsigned int pci_irq_line; 100 unsigned int pci_irq_line;
101 unsigned long pci_ioaddr; 101 unsigned long pci_ioaddr;
102 struct card_info *cardinfo = &card_info_table[ent->driver_data]; 102 struct card_info *cardinfo = &card_info_table[ent->driver_data];
103 103
104 if (versionprinted++ == 0) 104 if (versionprinted++ == 0)
105 printk("%s", version); 105 printk("%s", version);
106 106
@@ -143,7 +143,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
143 printk(":%2.2x", dev->dev_addr[i]); 143 printk(":%2.2x", dev->dev_addr[i]);
144 printk("\n"); 144 printk("\n");
145 145
146 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev); 146 ret = tmsdev_init(dev, &pdev->dev);
147 if (ret) { 147 if (ret) {
148 printk("%s: unable to get memory for dev->priv.\n", dev->name); 148 printk("%s: unable to get memory for dev->priv.\n", dev->name);
149 goto err_out_irq; 149 goto err_out_irq;
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index 6e74af62ca08..9e56fc346ba4 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -56,7 +56,7 @@
56#include <linux/sched.h> /* for jiffies, HZ, etc. */ 56#include <linux/sched.h> /* for jiffies, HZ, etc. */
57#include <linux/cycx_drv.h> /* API definitions */ 57#include <linux/cycx_drv.h> /* API definitions */
58#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */ 58#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
59#include <linux/delay.h> /* udelay */ 59#include <linux/delay.h> /* udelay, msleep_interruptible */
60#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */ 60#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
61 61
62#define MOD_VERSION 0 62#define MOD_VERSION 0
@@ -74,7 +74,6 @@ static int reset_cyc2x(void __iomem *addr);
74static int detect_cyc2x(void __iomem *addr); 74static int detect_cyc2x(void __iomem *addr);
75 75
76/* Miscellaneous functions */ 76/* Miscellaneous functions */
77static void delay_cycx(int sec);
78static int get_option_index(long *optlist, long optval); 77static int get_option_index(long *optlist, long optval);
79static u16 checksum(u8 *buf, u32 len); 78static u16 checksum(u8 *buf, u32 len);
80 79
@@ -259,7 +258,7 @@ static int memory_exists(void __iomem *addr)
259 if (readw(addr + 0x10) == TEST_PATTERN) 258 if (readw(addr + 0x10) == TEST_PATTERN)
260 return 1; 259 return 1;
261 260
262 delay_cycx(1); 261 msleep_interruptible(1 * 1000);
263 } 262 }
264 263
265 return 0; 264 return 0;
@@ -316,7 +315,7 @@ static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
316 315
317 /* 80186 was in hold, go */ 316 /* 80186 was in hold, go */
318 writeb(0, addr + START_CPU); 317 writeb(0, addr + START_CPU);
319 delay_cycx(1); 318 msleep_interruptible(1 * 1000);
320} 319}
321 320
322/* Load data.bin file through boot (reset) interface. */ 321/* Load data.bin file through boot (reset) interface. */
@@ -462,13 +461,13 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
462 cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size); 461 cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
463 /* reset is waiting for boot */ 462 /* reset is waiting for boot */
464 writew(GEN_POWER_ON, pt_cycld); 463 writew(GEN_POWER_ON, pt_cycld);
465 delay_cycx(1); 464 msleep_interruptible(1 * 1000);
466 465
467 for (j = 0 ; j < 3 ; j++) 466 for (j = 0 ; j < 3 ; j++)
468 if (!readw(pt_cycld)) 467 if (!readw(pt_cycld))
469 goto reset_loaded; 468 goto reset_loaded;
470 else 469 else
471 delay_cycx(1); 470 msleep_interruptible(1 * 1000);
472 } 471 }
473 472
474 printk(KERN_ERR "%s: reset not started.\n", modname); 473 printk(KERN_ERR "%s: reset not started.\n", modname);
@@ -495,7 +494,7 @@ reset_loaded:
495 494
496 /* Arthur Ganzert's tip: wait a while after the firmware loading... 495 /* Arthur Ganzert's tip: wait a while after the firmware loading...
497 seg abr 26 17:17:12 EST 1999 - acme */ 496 seg abr 26 17:17:12 EST 1999 - acme */
498 delay_cycx(7); 497 msleep_interruptible(7 * 1000);
499 printk(KERN_INFO "%s: firmware loaded!\n", modname); 498 printk(KERN_INFO "%s: firmware loaded!\n", modname);
500 499
501 /* enable interrupts */ 500 /* enable interrupts */
@@ -547,20 +546,13 @@ static int get_option_index(long *optlist, long optval)
547static int reset_cyc2x(void __iomem *addr) 546static int reset_cyc2x(void __iomem *addr)
548{ 547{
549 writeb(0, addr + RST_ENABLE); 548 writeb(0, addr + RST_ENABLE);
550 delay_cycx(2); 549 msleep_interruptible(2 * 1000);
551 writeb(0, addr + RST_DISABLE); 550 writeb(0, addr + RST_DISABLE);
552 delay_cycx(2); 551 msleep_interruptible(2 * 1000);
553 552
554 return memory_exists(addr); 553 return memory_exists(addr);
555} 554}
556 555
557/* Delay */
558static void delay_cycx(int sec)
559{
560 set_current_state(TASK_INTERRUPTIBLE);
561 schedule_timeout(sec * HZ);
562}
563
564/* Calculate 16-bit CRC using CCITT polynomial. */ 556/* Calculate 16-bit CRC using CCITT polynomial. */
565static u16 checksum(u8 *buf, u32 len) 557static u16 checksum(u8 *buf, u32 len)
566{ 558{
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index aabcdc2be05e..9c2d07cde010 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -4322,36 +4322,36 @@ static const struct iw_priv_args orinoco_privtab[] = {
4322 */ 4322 */
4323 4323
4324static const iw_handler orinoco_handler[] = { 4324static const iw_handler orinoco_handler[] = {
4325 [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) orinoco_ioctl_commit, 4325 [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit,
4326 [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getname, 4326 [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname,
4327 [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfreq, 4327 [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq,
4328 [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfreq, 4328 [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq,
4329 [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setmode, 4329 [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode,
4330 [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getmode, 4330 [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode,
4331 [SIOCSIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setsens, 4331 [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
4332 [SIOCGIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getsens, 4332 [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
4333 [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwrange, 4333 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
4334 [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setspy, 4334 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy,
4335 [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getspy, 4335 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy,
4336 [SIOCSIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setwap, 4336 [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
4337 [SIOCGIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getwap, 4337 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
4338 [SIOCSIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setscan, 4338 [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
4339 [SIOCGIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getscan, 4339 [SIOCGIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan,
4340 [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setessid, 4340 [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid,
4341 [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getessid, 4341 [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid,
4342 [SIOCSIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setnick, 4342 [SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick,
4343 [SIOCGIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getnick, 4343 [SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick,
4344 [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrate, 4344 [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate,
4345 [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrate, 4345 [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate,
4346 [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrts, 4346 [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts,
4347 [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrts, 4347 [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts,
4348 [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfrag, 4348 [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag,
4349 [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfrag, 4349 [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag,
4350 [SIOCGIWRETRY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getretry, 4350 [SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry,
4351 [SIOCSIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_setiwencode, 4351 [SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode,
4352 [SIOCGIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwencode, 4352 [SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode,
4353 [SIOCSIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setpower, 4353 [SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower,
4354 [SIOCGIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getpower, 4354 [SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower,
4355}; 4355};
4356 4356
4357 4357
@@ -4359,15 +4359,15 @@ static const iw_handler orinoco_handler[] = {
4359 Added typecasting since we no longer use iwreq_data -- Moustafa 4359 Added typecasting since we no longer use iwreq_data -- Moustafa
4360 */ 4360 */
4361static const iw_handler orinoco_private_handler[] = { 4361static const iw_handler orinoco_private_handler[] = {
4362 [0] (iw_handler) orinoco_ioctl_reset, 4362 [0] = (iw_handler) orinoco_ioctl_reset,
4363 [1] (iw_handler) orinoco_ioctl_reset, 4363 [1] = (iw_handler) orinoco_ioctl_reset,
4364 [2] (iw_handler) orinoco_ioctl_setport3, 4364 [2] = (iw_handler) orinoco_ioctl_setport3,
4365 [3] (iw_handler) orinoco_ioctl_getport3, 4365 [3] = (iw_handler) orinoco_ioctl_getport3,
4366 [4] (iw_handler) orinoco_ioctl_setpreamble, 4366 [4] = (iw_handler) orinoco_ioctl_setpreamble,
4367 [5] (iw_handler) orinoco_ioctl_getpreamble, 4367 [5] = (iw_handler) orinoco_ioctl_getpreamble,
4368 [6] (iw_handler) orinoco_ioctl_setibssport, 4368 [6] = (iw_handler) orinoco_ioctl_setibssport,
4369 [7] (iw_handler) orinoco_ioctl_getibssport, 4369 [7] = (iw_handler) orinoco_ioctl_getibssport,
4370 [9] (iw_handler) orinoco_ioctl_getrid, 4370 [9] = (iw_handler) orinoco_ioctl_getrid,
4371}; 4371};
4372 4372
4373static const struct iw_handler_def orinoco_handler_def = { 4373static const struct iw_handler_def orinoco_handler_def = {
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index a0ab26aab450..d7021c391b2b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -408,6 +408,8 @@ struct ethtool_ops {
408#define SUPPORTED_FIBRE (1 << 10) 408#define SUPPORTED_FIBRE (1 << 10)
409#define SUPPORTED_BNC (1 << 11) 409#define SUPPORTED_BNC (1 << 11)
410#define SUPPORTED_10000baseT_Full (1 << 12) 410#define SUPPORTED_10000baseT_Full (1 << 12)
411#define SUPPORTED_Pause (1 << 13)
412#define SUPPORTED_Asym_Pause (1 << 14)
411 413
412/* Indicates what features are advertised by the interface. */ 414/* Indicates what features are advertised by the interface. */
413#define ADVERTISED_10baseT_Half (1 << 0) 415#define ADVERTISED_10baseT_Half (1 << 0)
@@ -423,6 +425,8 @@ struct ethtool_ops {
423#define ADVERTISED_FIBRE (1 << 10) 425#define ADVERTISED_FIBRE (1 << 10)
424#define ADVERTISED_BNC (1 << 11) 426#define ADVERTISED_BNC (1 << 11)
425#define ADVERTISED_10000baseT_Full (1 << 12) 427#define ADVERTISED_10000baseT_Full (1 << 12)
428#define ADVERTISED_Pause (1 << 13)
429#define ADVERTISED_Asym_Pause (1 << 14)
426 430
427/* The following are all involved in forcing a particular link 431/* The following are all involved in forcing a particular link
428 * mode for the device for setting things. When getting the 432 * mode for the device for setting things. When getting the
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 374b615ea9ea..9b8d0476988a 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -22,6 +22,7 @@
22#define MII_EXPANSION 0x06 /* Expansion register */ 22#define MII_EXPANSION 0x06 /* Expansion register */
23#define MII_CTRL1000 0x09 /* 1000BASE-T control */ 23#define MII_CTRL1000 0x09 /* 1000BASE-T control */
24#define MII_STAT1000 0x0a /* 1000BASE-T status */ 24#define MII_STAT1000 0x0a /* 1000BASE-T status */
25#define MII_ESTATUS 0x0f /* Extended Status */
25#define MII_DCOUNTER 0x12 /* Disconnect counter */ 26#define MII_DCOUNTER 0x12 /* Disconnect counter */
26#define MII_FCSCOUNTER 0x13 /* False carrier counter */ 27#define MII_FCSCOUNTER 0x13 /* False carrier counter */
27#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ 28#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
@@ -54,7 +55,10 @@
54#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ 55#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
55#define BMSR_RFAULT 0x0010 /* Remote fault detected */ 56#define BMSR_RFAULT 0x0010 /* Remote fault detected */
56#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ 57#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
57#define BMSR_RESV 0x07c0 /* Unused... */ 58#define BMSR_RESV 0x00c0 /* Unused... */
59#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
60#define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */
61#define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */
58#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ 62#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
59#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ 63#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
60#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ 64#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
@@ -114,6 +118,9 @@
114#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */ 118#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */
115#define EXPANSION_RESV 0xffe0 /* Unused... */ 119#define EXPANSION_RESV 0xffe0 /* Unused... */
116 120
121#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
122#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
123
117/* N-way test register. */ 124/* N-way test register. */
118#define NWAYTEST_RESV1 0x00ff /* Unused... */ 125#define NWAYTEST_RESV1 0x00ff /* Unused... */
119#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */ 126#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
diff --git a/include/linux/phy.h b/include/linux/phy.h
new file mode 100644
index 000000000000..72cb67b66e0c
--- /dev/null
+++ b/include/linux/phy.h
@@ -0,0 +1,377 @@
1/*
2 * include/linux/phy.h
3 *
4 * Framework and drivers for configuring and reading different PHYs
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17
18#ifndef __PHY_H
19#define __PHY_H
20
21#include <linux/spinlock.h>
22#include <linux/device.h>
23
24#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
25 SUPPORTED_10baseT_Full | \
26 SUPPORTED_100baseT_Half | \
27 SUPPORTED_100baseT_Full | \
28 SUPPORTED_Autoneg | \
29 SUPPORTED_TP | \
30 SUPPORTED_MII)
31
32#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
33 SUPPORTED_1000baseT_Half | \
34 SUPPORTED_1000baseT_Full)
35
36/* Set phydev->irq to PHY_POLL if interrupts are not supported,
37 * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
38 * the attached driver handles the interrupt
39 */
40#define PHY_POLL -1
41#define PHY_IGNORE_INTERRUPT -2
42
43#define PHY_HAS_INTERRUPT 0x00000001
44#define PHY_HAS_MAGICANEG 0x00000002
45
46#define MII_BUS_MAX 4
47
48
49#define PHY_INIT_TIMEOUT 100000
50#define PHY_STATE_TIME 1
51#define PHY_FORCE_TIMEOUT 10
52#define PHY_AN_TIMEOUT 10
53
54#define PHY_MAX_ADDR 32
55
56/* The Bus class for PHYs. Devices which provide access to
57 * PHYs should register using this structure */
58struct mii_bus {
59 const char *name;
60 int id;
61 void *priv;
62 int (*read)(struct mii_bus *bus, int phy_id, int regnum);
63 int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
64 int (*reset)(struct mii_bus *bus);
65
66 /* A lock to ensure that only one thing can read/write
67 * the MDIO bus at a time */
68 spinlock_t mdio_lock;
69
70 struct device *dev;
71
72 /* list of all PHYs on bus */
73 struct phy_device *phy_map[PHY_MAX_ADDR];
74
75 /* Pointer to an array of interrupts, each PHY's
76 * interrupt at the index matching its address */
77 int *irq;
78};
79
80#define PHY_INTERRUPT_DISABLED 0x0
81#define PHY_INTERRUPT_ENABLED 0x80000000
82
83/* PHY state machine states:
84 *
85 * DOWN: PHY device and driver are not ready for anything. probe
86 * should be called if and only if the PHY is in this state,
87 * given that the PHY device exists.
88 * - PHY driver probe function will, depending on the PHY, set
89 * the state to STARTING or READY
90 *
91 * STARTING: PHY device is coming up, and the ethernet driver is
92 * not ready. PHY drivers may set this in the probe function.
93 * If they do, they are responsible for making sure the state is
94 * eventually set to indicate whether the PHY is UP or READY,
95 * depending on the state when the PHY is done starting up.
96 * - PHY driver will set the state to READY
97 * - start will set the state to PENDING
98 *
99 * READY: PHY is ready to send and receive packets, but the
100 * controller is not. By default, PHYs which do not implement
101 * probe will be set to this state by phy_probe(). If the PHY
102 * driver knows the PHY is ready, and the PHY state is STARTING,
103 * then it sets this STATE.
104 * - start will set the state to UP
105 *
106 * PENDING: PHY device is coming up, but the ethernet driver is
107 * ready. phy_start will set this state if the PHY state is
108 * STARTING.
109 * - PHY driver will set the state to UP when the PHY is ready
110 *
111 * UP: The PHY and attached device are ready to do work.
112 * Interrupts should be started here.
113 * - timer moves to AN
114 *
115 * AN: The PHY is currently negotiating the link state. Link is
116 * therefore down for now. phy_timer will set this state when it
117 * detects the state is UP. config_aneg will set this state
118 * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
119 * - If autonegotiation finishes, but there's no link, it sets
120 * the state to NOLINK.
121 * - If aneg finishes with link, it sets the state to RUNNING,
122 * and calls adjust_link
123 * - If autonegotiation did not finish after an arbitrary amount
124 * of time, autonegotiation should be tried again if the PHY
125 * supports "magic" autonegotiation (back to AN)
126 * - If it didn't finish, and no magic_aneg, move to FORCING.
127 *
128 * NOLINK: PHY is up, but not currently plugged in.
129 * - If the timer notes that the link comes back, we move to RUNNING
130 * - config_aneg moves to AN
131 * - phy_stop moves to HALTED
132 *
133 * FORCING: PHY is being configured with forced settings
134 * - if link is up, move to RUNNING
135 * - If link is down, we drop to the next highest setting, and
136 * retry (FORCING) after a timeout
137 * - phy_stop moves to HALTED
138 *
139 * RUNNING: PHY is currently up, running, and possibly sending
140 * and/or receiving packets
141 * - timer will set CHANGELINK if we're polling (this ensures the
142 * link state is polled every other cycle of this state machine,
143 * which makes it every other second)
144 * - irq will set CHANGELINK
145 * - config_aneg will set AN
146 * - phy_stop moves to HALTED
147 *
148 * CHANGELINK: PHY experienced a change in link state
149 * - timer moves to RUNNING if link
150 * - timer moves to NOLINK if the link is down
151 * - phy_stop moves to HALTED
152 *
153 * HALTED: PHY is up, but no polling or interrupts are done. Or
154 * PHY is in an error state.
155 *
156 * - phy_start moves to RESUMING
157 *
158 * RESUMING: PHY was halted, but now wants to run again.
159 * - If we are forcing, or aneg is done, timer moves to RUNNING
160 * - If aneg is not done, timer moves to AN
161 * - phy_stop moves to HALTED
162 */
163enum phy_state {
164 PHY_DOWN=0,
165 PHY_STARTING,
166 PHY_READY,
167 PHY_PENDING,
168 PHY_UP,
169 PHY_AN,
170 PHY_RUNNING,
171 PHY_NOLINK,
172 PHY_FORCING,
173 PHY_CHANGELINK,
174 PHY_HALTED,
175 PHY_RESUMING
176};
177
178/* phy_device: An instance of a PHY
179 *
180 * drv: Pointer to the driver for this PHY instance
181 * bus: Pointer to the bus this PHY is on
182 * dev: driver model device structure for this PHY
183 * phy_id: UID for this device found during discovery
184 * state: state of the PHY for management purposes
185 * dev_flags: Device-specific flags used by the PHY driver.
186 * addr: Bus address of PHY
187 * link_timeout: The number of timer firings to wait before the
188 * giving up on the current attempt at acquiring a link
189 * irq: IRQ number of the PHY's interrupt (-1 if none)
190 * phy_timer: The timer for handling the state machine
191 * phy_queue: A work_queue for the interrupt
192 * attached_dev: The attached enet driver's device instance ptr
193 * adjust_link: Callback for the enet controller to respond to
194 * changes in the link state.
195 * adjust_state: Callback for the enet driver to respond to
196 * changes in the state machine.
197 *
198 * speed, duplex, pause, supported, advertising, and
199 * autoneg are used like in mii_if_info
200 *
201 * interrupts currently only supports enabled or disabled,
202 * but could be changed in the future to support enabling
203 * and disabling specific interrupts
204 *
205 * Contains some infrastructure for polling and interrupt
206 * handling, as well as handling shifts in PHY hardware state
207 */
208struct phy_device {
209 /* Information about the PHY type */
210 /* And management functions */
211 struct phy_driver *drv;
212
213 struct mii_bus *bus;
214
215 struct device dev;
216
217 u32 phy_id;
218
219 enum phy_state state;
220
221 u32 dev_flags;
222
223 /* Bus address of the PHY (0-32) */
224 int addr;
225
226 /* forced speed & duplex (no autoneg)
227 * partner speed & duplex & pause (autoneg)
228 */
229 int speed;
230 int duplex;
231 int pause;
232 int asym_pause;
233
234 /* The most recently read link state */
235 int link;
236
237 /* Enabled Interrupts */
238 u32 interrupts;
239
240 /* Union of PHY and Attached devices' supported modes */
241 /* See mii.h for more info */
242 u32 supported;
243 u32 advertising;
244
245 int autoneg;
246
247 int link_timeout;
248
249 /* Interrupt number for this PHY
250 * -1 means no interrupt */
251 int irq;
252
253 /* private data pointer */
254 /* For use by PHYs to maintain extra state */
255 void *priv;
256
257 /* Interrupt and Polling infrastructure */
258 struct work_struct phy_queue;
259 struct timer_list phy_timer;
260
261 spinlock_t lock;
262
263 struct net_device *attached_dev;
264
265 void (*adjust_link)(struct net_device *dev);
266
267 void (*adjust_state)(struct net_device *dev);
268};
269#define to_phy_device(d) container_of(d, struct phy_device, dev)
270
271/* struct phy_driver: Driver structure for a particular PHY type
272 *
273 * phy_id: The result of reading the UID registers of this PHY
274 * type, and ANDing them with the phy_id_mask. This driver
275 * only works for PHYs with IDs which match this field
276 * name: The friendly name of this PHY type
277 * phy_id_mask: Defines the important bits of the phy_id
278 * features: A list of features (speed, duplex, etc) supported
279 * by this PHY
280 * flags: A bitfield defining certain other features this PHY
281 * supports (like interrupts)
282 *
283 * The drivers must implement config_aneg and read_status. All
284 * other functions are optional. Note that none of these
285 * functions should be called from interrupt time. The goal is
286 * for the bus read/write functions to be able to block when the
287 * bus transaction is happening, and be freed up by an interrupt
288 * (The MPC85xx has this ability, though it is not currently
289 * supported in the driver).
290 */
291struct phy_driver {
292 u32 phy_id;
293 char *name;
294 unsigned int phy_id_mask;
295 u32 features;
296 u32 flags;
297
298 /* Called to initialize the PHY,
299 * including after a reset */
300 int (*config_init)(struct phy_device *phydev);
301
302 /* Called during discovery. Used to set
303 * up device-specific structures, if any */
304 int (*probe)(struct phy_device *phydev);
305
306 /* PHY Power Management */
307 int (*suspend)(struct phy_device *phydev);
308 int (*resume)(struct phy_device *phydev);
309
310 /* Configures the advertisement and resets
311 * autonegotiation if phydev->autoneg is on,
312 * forces the speed to the current settings in phydev
313 * if phydev->autoneg is off */
314 int (*config_aneg)(struct phy_device *phydev);
315
316 /* Determines the negotiated speed and duplex */
317 int (*read_status)(struct phy_device *phydev);
318
319 /* Clears any pending interrupts */
320 int (*ack_interrupt)(struct phy_device *phydev);
321
322 /* Enables or disables interrupts */
323 int (*config_intr)(struct phy_device *phydev);
324
325 /* Clears up any memory if needed */
326 void (*remove)(struct phy_device *phydev);
327
328 struct device_driver driver;
329};
330#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
331
332int phy_read(struct phy_device *phydev, u16 regnum);
333int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
334struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
335int phy_clear_interrupt(struct phy_device *phydev);
336int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
337struct phy_device * phy_attach(struct net_device *dev,
338 const char *phy_id, u32 flags);
339struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
340 void (*handler)(struct net_device *), u32 flags);
341void phy_disconnect(struct phy_device *phydev);
342void phy_detach(struct phy_device *phydev);
343void phy_start(struct phy_device *phydev);
344void phy_stop(struct phy_device *phydev);
345int phy_start_aneg(struct phy_device *phydev);
346
347int mdiobus_register(struct mii_bus *bus);
348void mdiobus_unregister(struct mii_bus *bus);
349void phy_sanitize_settings(struct phy_device *phydev);
350int phy_stop_interrupts(struct phy_device *phydev);
351
352static inline int phy_read_status(struct phy_device *phydev) {
353 return phydev->drv->read_status(phydev);
354}
355
356int genphy_config_advert(struct phy_device *phydev);
357int genphy_setup_forced(struct phy_device *phydev);
358int genphy_restart_aneg(struct phy_device *phydev);
359int genphy_config_aneg(struct phy_device *phydev);
360int genphy_update_link(struct phy_device *phydev);
361int genphy_read_status(struct phy_device *phydev);
362void phy_driver_unregister(struct phy_driver *drv);
363int phy_driver_register(struct phy_driver *new_driver);
364void phy_prepare_link(struct phy_device *phydev,
365 void (*adjust_link)(struct net_device *));
366void phy_start_machine(struct phy_device *phydev,
367 void (*handler)(struct net_device *));
368void phy_stop_machine(struct phy_device *phydev);
369int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
370int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
371int phy_mii_ioctl(struct phy_device *phydev,
372 struct mii_ioctl_data *mii_data, int cmd);
373int phy_start_interrupts(struct phy_device *phydev);
374void phy_print_status(struct phy_device *phydev);
375
376extern struct bus_type mdio_bus_type;
377#endif /* __PHY_H */