aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ixgb.txt419
-rw-r--r--drivers/net/Kconfig59
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/chelsio/cxgb2.c2
-rw-r--r--drivers/net/chelsio/sge.c70
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c11
-rw-r--r--drivers/net/cxgb3/regs.h10
-rw-r--r--drivers/net/cxgb3/t3_cpl.h40
-rw-r--r--drivers/net/cxgb3/t3cdev.h1
-rw-r--r--drivers/net/gianfar.c76
-rw-r--r--drivers/net/gianfar.h11
-rw-r--r--drivers/net/igb/e1000_82575.c208
-rw-r--r--drivers/net/igb/e1000_82575.h22
-rw-r--r--drivers/net/igb/e1000_defines.h10
-rw-r--r--drivers/net/igb/e1000_hw.h8
-rw-r--r--drivers/net/igb/e1000_mac.c3
-rw-r--r--drivers/net/igb/e1000_mac.h1
-rw-r--r--drivers/net/igb/e1000_regs.h9
-rw-r--r--drivers/net/igb/igb.h47
-rw-r--r--drivers/net/igb/igb_ethtool.c203
-rw-r--r--drivers/net/igb/igb_main.c1032
-rw-r--r--drivers/net/ixgb/Makefile2
-rw-r--r--drivers/net/ixgb/ixgb.h21
-rw-r--r--drivers/net/ixgb/ixgb_ee.c28
-rw-r--r--drivers/net/ixgb/ixgb_ee.h12
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c120
-rw-r--r--drivers/net/ixgb/ixgb_hw.c40
-rw-r--r--drivers/net/ixgb/ixgb_hw.h2
-rw-r--r--drivers/net/ixgb/ixgb_ids.h10
-rw-r--r--drivers/net/ixgb/ixgb_main.c480
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h4
-rw-r--r--drivers/net/ixgb/ixgb_param.c44
-rw-r--r--drivers/net/phy/mdio-bitbang.c2
-rw-r--r--drivers/net/s2io.c90
-rw-r--r--drivers/net/s2io.h1
-rw-r--r--drivers/net/saa9730.c1139
-rw-r--r--drivers/net/saa9730.h384
-rw-r--r--drivers/net/ucc_geth.c29
-rw-r--r--drivers/net/via-rhine.c27
-rw-r--r--drivers/net/via-velocity.c158
-rw-r--r--drivers/net/via-velocity.h5
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--include/linux/netdevice.h13
44 files changed, 2071 insertions, 2791 deletions
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
index 7c98277777eb..a0d0ffb5e584 100644
--- a/Documentation/networking/ixgb.txt
+++ b/Documentation/networking/ixgb.txt
@@ -1,7 +1,7 @@
1Linux* Base Driver for the Intel(R) PRO/10GbE Family of Adapters 1Linux Base Driver for 10 Gigabit Intel(R) Network Connection
2================================================================ 2=============================================================
3 3
4November 17, 2004 4October 9, 2007
5 5
6 6
7Contents 7Contents
@@ -9,94 +9,151 @@ Contents
9 9
10- In This Release 10- In This Release
11- Identifying Your Adapter 11- Identifying Your Adapter
12- Building and Installation
12- Command Line Parameters 13- Command Line Parameters
13- Improving Performance 14- Improving Performance
15- Additional Configurations
16- Known Issues/Troubleshooting
14- Support 17- Support
15 18
16 19
20
17In This Release 21In This Release
18=============== 22===============
19 23
20This file describes the Linux* Base Driver for the Intel(R) PRO/10GbE Family 24This file describes the ixgb Linux Base Driver for the 10 Gigabit Intel(R)
21of Adapters, version 1.0.x. 25Network Connection. This driver includes support for Itanium(R)2-based
26systems.
27
28For questions related to hardware requirements, refer to the documentation
29supplied with your 10 Gigabit adapter. All hardware requirements listed apply
30to use with Linux.
31
32The following features are available in this kernel:
33 - Native VLANs
34 - Channel Bonding (teaming)
35 - SNMP
36
37Channel Bonding documentation can be found in the Linux kernel source:
38/Documentation/networking/bonding.txt
39
40The driver information previously displayed in the /proc filesystem is not
41supported in this release. Alternatively, you can use ethtool (version 1.6
42or later), lspci, and ifconfig to obtain the same information.
43
44Instructions on updating ethtool can be found in the section "Additional
45Configurations" later in this document.
22 46
23For questions related to hardware requirements, refer to the documentation
24supplied with your Intel PRO/10GbE adapter. All hardware requirements listed
25apply to use with Linux.
26 47
27Identifying Your Adapter 48Identifying Your Adapter
28======================== 49========================
29 50
30To verify your Intel adapter is supported, find the board ID number on the 51The following Intel network adapters are compatible with the drivers in this
31adapter. Look for a label that has a barcode and a number in the format 52release:
32A12345-001. 53
54Controller Adapter Name Physical Layer
55---------- ------------ --------------
5682597EX Intel(R) PRO/10GbE LR/SR/CX4 10G Base-LR (1310 nm optical fiber)
57 Server Adapters 10G Base-SR (850 nm optical fiber)
58 10G Base-CX4(twin-axial copper cabling)
59
60For more information on how to identify your adapter, go to the Adapter &
61Driver ID Guide at:
62
63 http://support.intel.com/support/network/sb/CS-012904.htm
64
65
66Building and Installation
67=========================
68
69select m for "Intel(R) PRO/10GbE support" located at:
70 Location:
71 -> Device Drivers
72 -> Network device support (NETDEVICES [=y])
73 -> Ethernet (10000 Mbit) (NETDEV_10000 [=y])
741. make modules && make modules_install
75
762. Load the module:
77
78    modprobe ixgb <parameter>=<value>
79
80 The insmod command can be used if the full
81 path to the driver module is specified. For example:
82
83 insmod /lib/modules/<KERNEL VERSION>/kernel/drivers/net/ixgb/ixgb.ko
84
85 With 2.6 based kernels also make sure that older ixgb drivers are
86 removed from the kernel, before loading the new module:
33 87
34Use the above information and the Adapter & Driver ID Guide at: 88 rmmod ixgb; modprobe ixgb
35 89
36 http://support.intel.com/support/network/adapter/pro100/21397.htm 903. Assign an IP address to the interface by entering the following, where
91 x is the interface number:
37 92
38For the latest Intel network drivers for Linux, go to: 93 ifconfig ethx <IP_address>
94
954. Verify that the interface works. Enter the following, where <IP_address>
96 is the IP address for another machine on the same subnet as the interface
97 that is being tested:
98
99 ping <IP_address>
39 100
40 http://downloadfinder.intel.com/scripts-df/support_intel.asp
41 101
42Command Line Parameters 102Command Line Parameters
43======================= 103=======================
44 104
45If the driver is built as a module, the following optional parameters are 105If the driver is built as a module, the following optional parameters are
46used by entering them on the command line with the modprobe or insmod command 106used by entering them on the command line with the modprobe command using
47using this syntax: 107this syntax:
48 108
49 modprobe ixgb [<option>=<VAL1>,<VAL2>,...] 109 modprobe ixgb [<option>=<VAL1>,<VAL2>,...]
50 110
51 insmod ixgb [<option>=<VAL1>,<VAL2>,...] 111For example, with two 10GbE PCI adapters, entering:
52 112
53For example, with two PRO/10GbE PCI adapters, entering: 113 modprobe ixgb TxDescriptors=80,128
54 114
55 insmod ixgb TxDescriptors=80,128 115loads the ixgb driver with 80 TX resources for the first adapter and 128 TX
56
57loads the ixgb driver with 80 TX resources for the first adapter and 128 TX
58resources for the second adapter. 116resources for the second adapter.
59 117
60The default value for each parameter is generally the recommended setting, 118The default value for each parameter is generally the recommended setting,
61unless otherwise noted. Also, if the driver is statically built into the 119unless otherwise noted.
62kernel, the driver is loaded with the default values for all the parameters.
63Ethtool can be used to change some of the parameters at runtime.
64 120
65FlowControl 121FlowControl
66Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) 122Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
67Default: Read from the EEPROM 123Default: Read from the EEPROM
68 If EEPROM is not detected, default is 3 124 If EEPROM is not detected, default is 1
69 This parameter controls the automatic generation(Tx) and response(Rx) to 125 This parameter controls the automatic generation(Tx) and response(Rx) to
70 Ethernet PAUSE frames. 126 Ethernet PAUSE frames. There are hardware bugs associated with enabling
127 Tx flow control so beware.
71 128
72RxDescriptors 129RxDescriptors
73Valid Range: 64-512 130Valid Range: 64-512
74Default Value: 512 131Default Value: 512
75 This value is the number of receive descriptors allocated by the driver. 132 This value is the number of receive descriptors allocated by the driver.
76 Increasing this value allows the driver to buffer more incoming packets. 133 Increasing this value allows the driver to buffer more incoming packets.
77 Each descriptor is 16 bytes. A receive buffer is also allocated for 134 Each descriptor is 16 bytes. A receive buffer is also allocated for
78 each descriptor and can be either 2048, 4056, 8192, or 16384 bytes, 135 each descriptor and can be either 2048, 4056, 8192, or 16384 bytes,
79 depending on the MTU setting. When the MTU size is 1500 or less, the 136 depending on the MTU setting. When the MTU size is 1500 or less, the
80 receive buffer size is 2048 bytes. When the MTU is greater than 1500 the 137 receive buffer size is 2048 bytes. When the MTU is greater than 1500 the
81 receive buffer size will be either 4056, 8192, or 16384 bytes. The 138 receive buffer size will be either 4056, 8192, or 16384 bytes. The
82 maximum MTU size is 16114. 139 maximum MTU size is 16114.
83 140
84RxIntDelay 141RxIntDelay
85Valid Range: 0-65535 (0=off) 142Valid Range: 0-65535 (0=off)
86Default Value: 6 143Default Value: 72
87 This value delays the generation of receive interrupts in units of 144 This value delays the generation of receive interrupts in units of
88 0.8192 microseconds. Receive interrupt reduction can improve CPU 145 0.8192 microseconds. Receive interrupt reduction can improve CPU
89 efficiency if properly tuned for specific network traffic. Increasing 146 efficiency if properly tuned for specific network traffic. Increasing
90 this value adds extra latency to frame reception and can end up 147 this value adds extra latency to frame reception and can end up
91 decreasing the throughput of TCP traffic. If the system is reporting 148 decreasing the throughput of TCP traffic. If the system is reporting
92 dropped receives, this value may be set too high, causing the driver to 149 dropped receives, this value may be set too high, causing the driver to
93 run out of available receive descriptors. 150 run out of available receive descriptors.
94 151
95TxDescriptors 152TxDescriptors
96Valid Range: 64-4096 153Valid Range: 64-4096
97Default Value: 256 154Default Value: 256
98 This value is the number of transmit descriptors allocated by the driver. 155 This value is the number of transmit descriptors allocated by the driver.
99 Increasing this value allows the driver to queue more transmits. Each 156 Increasing this value allows the driver to queue more transmits. Each
100 descriptor is 16 bytes. 157 descriptor is 16 bytes.
101 158
102XsumRX 159XsumRX
@@ -105,51 +162,49 @@ Default Value: 1
105 A value of '1' indicates that the driver should enable IP checksum 162 A value of '1' indicates that the driver should enable IP checksum
106 offload for received packets (both UDP and TCP) to the adapter hardware. 163 offload for received packets (both UDP and TCP) to the adapter hardware.
107 164
108XsumTX
109Valid Range: 0-1
110Default Value: 1
111 A value of '1' indicates that the driver should enable IP checksum
112 offload for transmitted packets (both UDP and TCP) to the adapter
113 hardware.
114 165
115Improving Performance 166Improving Performance
116===================== 167=====================
117 168
118With the Intel PRO/10 GbE adapter, the default Linux configuration will very 169With the 10 Gigabit server adapters, the default Linux configuration will
119likely limit the total available throughput artificially. There is a set of 170very likely limit the total available throughput artificially. There is a set
120things that when applied together increase the ability of Linux to transmit 171of configuration changes that, when applied together, will increase the ability
121and receive data. The following enhancements were originally acquired from 172of Linux to transmit and receive data. The following enhancements were
122settings published at http://www.spec.org/web99 for various submitted results 173originally acquired from settings published at http://www.spec.org/web99/ for
123using Linux. 174various submitted results using Linux.
124 175
125NOTE: These changes are only suggestions, and serve as a starting point for 176NOTE: These changes are only suggestions, and serve as a starting point for
126tuning your network performance. 177 tuning your network performance.
127 178
128The changes are made in three major ways, listed in order of greatest effect: 179The changes are made in three major ways, listed in order of greatest effect:
129- Use ifconfig to modify the mtu (maximum transmission unit) and the txqueuelen 180- Use ifconfig to modify the mtu (maximum transmission unit) and the txqueuelen
130 parameter. 181 parameter.
131- Use sysctl to modify /proc parameters (essentially kernel tuning) 182- Use sysctl to modify /proc parameters (essentially kernel tuning)
132- Use setpci to modify the MMRBC field in PCI-X configuration space to increase 183- Use setpci to modify the MMRBC field in PCI-X configuration space to increase
133 transmit burst lengths on the bus. 184 transmit burst lengths on the bus.
134 185
135NOTE: setpci modifies the adapter's configuration registers to allow it to read 186NOTE: setpci modifies the adapter's configuration registers to allow it to read
136up to 4k bytes at a time (for transmits). However, for some systems the 187up to 4k bytes at a time (for transmits). However, for some systems the
137behavior after modifying this register may be undefined (possibly errors of some 188behavior after modifying this register may be undefined (possibly errors of
138kind). A power-cycle, hard reset or explicitly setting the e6 register back to 189some kind). A power-cycle, hard reset or explicitly setting the e6 register
13922 (setpci -d 8086:1048 e6.b=22) may be required to get back to a stable 190back to 22 (setpci -d 8086:1a48 e6.b=22) may be required to get back to a
140configuration. 191stable configuration.
141 192
142- COPY these lines and paste them into ixgb_perf.sh: 193- COPY these lines and paste them into ixgb_perf.sh:
143#!/bin/bash 194#!/bin/bash
144echo "configuring network performance , edit this file to change the interface" 195echo "configuring network performance , edit this file to change the interface
196or device ID of 10GbE card"
145# set mmrbc to 4k reads, modify only Intel 10GbE device IDs 197# set mmrbc to 4k reads, modify only Intel 10GbE device IDs
146setpci -d 8086:1048 e6.b=2e 198# replace 1a48 with appropriate 10GbE device's ID installed on the system,
147# set the MTU (max transmission unit) - it requires your switch and clients to change too! 199# if needed.
200setpci -d 8086:1a48 e6.b=2e
201# set the MTU (max transmission unit) - it requires your switch and clients
202# to change as well.
148# set the txqueuelen 203# set the txqueuelen
149# your ixgb adapter should be loaded as eth1 for this to work, change if needed 204# your ixgb adapter should be loaded as eth1 for this to work, change if needed
150ifconfig eth1 mtu 9000 txqueuelen 1000 up 205ifconfig eth1 mtu 9000 txqueuelen 1000 up
151# call the sysctl utility to modify /proc/sys entries 206# call the sysctl utility to modify /proc/sys entries
152sysctl -p ./sysctl_ixgb.conf 207sysctl -p ./sysctl_ixgb.conf
153- END ixgb_perf.sh 208- END ixgb_perf.sh
154 209
155- COPY these lines and paste them into sysctl_ixgb.conf: 210- COPY these lines and paste them into sysctl_ixgb.conf:
@@ -159,54 +214,220 @@ sysctl -p ./sysctl_ixgb.conf
159# several network benchmark tests, your mileage may vary 214# several network benchmark tests, your mileage may vary
160 215
161### IPV4 specific settings 216### IPV4 specific settings
162net.ipv4.tcp_timestamps = 0 # turns TCP timestamp support off, default 1, reduces CPU use 217# turn TCP timestamp support off, default 1, reduces CPU use
163net.ipv4.tcp_sack = 0 # turn SACK support off, default on 218net.ipv4.tcp_timestamps = 0
164# on systems with a VERY fast bus -> memory interface this is the big gainer 219# turn SACK support off, default on
165net.ipv4.tcp_rmem = 10000000 10000000 10000000 # sets min/default/max TCP read buffer, default 4096 87380 174760 220# on systems with a VERY fast bus -> memory interface this is the big gainer
166net.ipv4.tcp_wmem = 10000000 10000000 10000000 # sets min/pressure/max TCP write buffer, default 4096 16384 131072 221net.ipv4.tcp_sack = 0
167net.ipv4.tcp_mem = 10000000 10000000 10000000 # sets min/pressure/max TCP buffer space, default 31744 32256 32768 222# set min/default/max TCP read buffer, default 4096 87380 174760
223net.ipv4.tcp_rmem = 10000000 10000000 10000000
224# set min/pressure/max TCP write buffer, default 4096 16384 131072
225net.ipv4.tcp_wmem = 10000000 10000000 10000000
226# set min/pressure/max TCP buffer space, default 31744 32256 32768
227net.ipv4.tcp_mem = 10000000 10000000 10000000
168 228
169### CORE settings (mostly for socket and UDP effect) 229### CORE settings (mostly for socket and UDP effect)
170net.core.rmem_max = 524287 # maximum receive socket buffer size, default 131071 230# set maximum receive socket buffer size, default 131071
171net.core.wmem_max = 524287 # maximum send socket buffer size, default 131071 231net.core.rmem_max = 524287
172net.core.rmem_default = 524287 # default receive socket buffer size, default 65535 232# set maximum send socket buffer size, default 131071
173net.core.wmem_default = 524287 # default send socket buffer size, default 65535 233net.core.wmem_max = 524287
174net.core.optmem_max = 524287 # maximum amount of option memory buffers, default 10240 234# set default receive socket buffer size, default 65535
175net.core.netdev_max_backlog = 300000 # number of unprocessed input packets before kernel starts dropping them, default 300 235net.core.rmem_default = 524287
236# set default send socket buffer size, default 65535
237net.core.wmem_default = 524287
238# set maximum amount of option memory buffers, default 10240
239net.core.optmem_max = 524287
240# set number of unprocessed input packets before kernel starts dropping them; default 300
241net.core.netdev_max_backlog = 300000
176- END sysctl_ixgb.conf 242- END sysctl_ixgb.conf
177 243
178Edit the ixgb_perf.sh script if necessary to change eth1 to whatever interface 244Edit the ixgb_perf.sh script if necessary to change eth1 to whatever interface
179your ixgb driver is using. 245your ixgb driver is using and/or replace '1a48' with appropriate 10GbE device's
246ID installed on the system.
180 247
181NOTE: Unless these scripts are added to the boot process, these changes will 248NOTE: Unless these scripts are added to the boot process, these changes will
182only last only until the next system reboot. 249 only last only until the next system reboot.
183 250
184 251
185Resolving Slow UDP Traffic 252Resolving Slow UDP Traffic
186-------------------------- 253--------------------------
254If your server does not seem to be able to receive UDP traffic as fast as it
255can receive TCP traffic, it could be because Linux, by default, does not set
256the network stack buffers as large as they need to be to support high UDP
257transfer rates. One way to alleviate this problem is to allow more memory to
258be used by the IP stack to store incoming data.
187 259
188If your server does not seem to be able to receive UDP traffic as fast as it 260For instance, use the commands:
189can receive TCP traffic, it could be because Linux, by default, does not set
190the network stack buffers as large as they need to be to support high UDP
191transfer rates. One way to alleviate this problem is to allow more memory to
192be used by the IP stack to store incoming data.
193
194For instance, use the commands:
195 sysctl -w net.core.rmem_max=262143 261 sysctl -w net.core.rmem_max=262143
196and 262and
197 sysctl -w net.core.rmem_default=262143 263 sysctl -w net.core.rmem_default=262143
198to increase the read buffer memory max and default to 262143 (256k - 1) from 264to increase the read buffer memory max and default to 262143 (256k - 1) from
199defaults of max=131071 (128k - 1) and default=65535 (64k - 1). These variables 265defaults of max=131071 (128k - 1) and default=65535 (64k - 1). These variables
200will increase the amount of memory used by the network stack for receives, and 266will increase the amount of memory used by the network stack for receives, and
201can be increased significantly more if necessary for your application. 267can be increased significantly more if necessary for your application.
202 268
269
270Additional Configurations
271=========================
272
273 Configuring the Driver on Different Distributions
274 -------------------------------------------------
275 Configuring a network driver to load properly when the system is started is
276 distribution dependent. Typically, the configuration process involves adding
277 an alias line to /etc/modprobe.conf as well as editing other system startup
278 scripts and/or configuration files. Many popular Linux distributions ship
279 with tools to make these changes for you. To learn the proper way to
280 configure a network device for your system, refer to your distribution
281 documentation. If during this process you are asked for the driver or module
282 name, the name for the Linux Base Driver for the Intel 10GbE Family of
283 Adapters is ixgb.
284
285 Viewing Link Messages
286 ---------------------
287 Link messages will not be displayed to the console if the distribution is
288 restricting system messages. In order to see network driver link messages on
289 your console, set dmesg to eight by entering the following:
290
291 dmesg -n 8
292
293 NOTE: This setting is not saved across reboots.
294
295
296 Jumbo Frames
297 ------------
298 The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
299 enabled by changing the MTU to a value larger than the default of 1500.
300 The maximum value for the MTU is 16114. Use the ifconfig command to
301 increase the MTU size. For example:
302
303 ifconfig ethx mtu 9000 up
304
305 The maximum MTU setting for Jumbo Frames is 16114. This value coincides
306 with the maximum Jumbo Frames size of 16128.
307
308
309 Ethtool
310 -------
311 The driver utilizes the ethtool interface for driver configuration and
312 diagnostics, as well as displaying statistical information. Ethtool
313 version 1.6 or later is required for this functionality.
314
315 The latest release of ethtool can be found from
316 http://sourceforge.net/projects/gkernel
317
318 NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
319 for a more complete ethtool feature set can be enabled by upgrading
320 to the latest version.
321
322
323 NAPI
324 ----
325
326 NAPI (Rx polling mode) is supported in the ixgb driver. NAPI is enabled
327 or disabled based on the configuration of the kernel. see CONFIG_IXGB_NAPI
328
329 See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
330
331
332Known Issues/Troubleshooting
333============================
334
335 NOTE: After installing the driver, if your Intel Network Connection is not
336 working, verify in the "In This Release" section of the readme that you have
337 installed the correct driver.
338
339 Intel(R) PRO/10GbE CX4 Server Adapter Cable Interoperability Issue with
340 Fujitsu XENPAK Module in SmartBits Chassis
341 ---------------------------------------------------------------------
342 Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4
343 Server adapter is connected to a Fujitsu XENPAK CX4 module in a SmartBits
344 chassis using 15 m/24AWG cable assemblies manufactured by Fujitsu or Leoni.
345 The CRC errors may be received either by the Intel(R) PRO/10GbE CX4
346 Server adapter or the SmartBits. If this situation occurs using a different
347 cable assembly may resolve the issue.
348
349 CX4 Server Adapter Cable Interoperability Issues with HP Procurve 3400cl
350 Switch Port
351 ------------------------------------------------------------------------
352 Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4 Server
353 adapter is connected to an HP Procurve 3400cl switch port using short cables
354 (1 m or shorter). If this situation occurs, using a longer cable may resolve
355 the issue.
356
357 Excessive CRC errors may be observed using Fujitsu 24AWG cable assemblies that
358 Are 10 m or longer or where using a Leoni 15 m/24AWG cable assembly. The CRC
359 errors may be received either by the CX4 Server adapter or at the switch. If
360 this situation occurs, using a different cable assembly may resolve the issue.
361
362
363 Jumbo Frames System Requirement
364 -------------------------------
365 Memory allocation failures have been observed on Linux systems with 64 MB
366 of RAM or less that are running Jumbo Frames. If you are using Jumbo
367 Frames, your system may require more than the advertised minimum
368 requirement of 64 MB of system memory.
369
370
371 Performance Degradation with Jumbo Frames
372 -----------------------------------------
373 Degradation in throughput performance may be observed in some Jumbo frames
374 environments. If this is observed, increasing the application's socket buffer
375 size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
376 See the specific application manual and /usr/src/linux*/Documentation/
377 networking/ip-sysctl.txt for more details.
378
379
380 Allocating Rx Buffers when Using Jumbo Frames
381 ---------------------------------------------
382 Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
383 the available memory is heavily fragmented. This issue may be seen with PCI-X
384 adapters or with packet split disabled. This can be reduced or eliminated
385 by changing the amount of available memory for receive buffer allocation, by
386 increasing /proc/sys/vm/min_free_kbytes.
387
388
389 Multiple Interfaces on Same Ethernet Broadcast Network
390 ------------------------------------------------------
391 Due to the default ARP behavior on Linux, it is not possible to have
392 one system on two IP networks in the same Ethernet broadcast domain
393 (non-partitioned switch) behave as expected. All Ethernet interfaces
394 will respond to IP traffic for any IP address assigned to the system.
395 This results in unbalanced receive traffic.
396
397 If you have multiple interfaces in a server, do either of the following:
398
399 - Turn on ARP filtering by entering:
400 echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
401
402 - Install the interfaces in separate broadcast domains - either in
403 different switches or in a switch partitioned to VLANs.
404
405
406 UDP Stress Test Dropped Packet Issue
407 --------------------------------------
408 Under small packets UDP stress test with 10GbE driver, the Linux system
409 may drop UDP packets due to the fullness of socket buffers. You may want
410 to change the driver's Flow Control variables to the minimum value for
411 controlling packet reception.
412
413
414 Tx Hangs Possible Under Stress
415 ------------------------------
416 Under stress conditions, if TX hangs occur, turning off TSO
417 "ethtool -K eth0 tso off" may resolve the problem.
418
419
203Support 420Support
204======= 421=======
205 422
206For general information and support, go to the Intel support website at: 423For general information, go to the Intel support website at:
207 424
208 http://support.intel.com 425 http://support.intel.com
209 426
427or the Intel Wired Networking project hosted by Sourceforge at:
428
429 http://sourceforge.net/projects/e1000
430
210If an issue is identified with the released source code on the supported 431If an issue is identified with the released source code on the supported
211kernel with a supported adapter, email the specific information related to 432kernel with a supported adapter, email the specific information related
212the issue to linux.nics@intel.com. 433to the issue to e1000-devel@lists.sf.net
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4675c1bd6fb9..9490cb172330 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1694,26 +1694,6 @@ config VIA_RHINE_MMIO
1694 1694
1695 If unsure, say Y. 1695 If unsure, say Y.
1696 1696
1697config VIA_RHINE_NAPI
1698 bool "Use Rx Polling (NAPI)"
1699 depends on VIA_RHINE
1700 help
1701 NAPI is a new driver API designed to reduce CPU and interrupt load
1702 when the driver is receiving lots of packets from the card.
1703
1704 If your estimated Rx load is 10kpps or more, or if the card will be
1705 deployed on potentially unfriendly networks (e.g. in a firewall),
1706 then say Y here.
1707
1708config LAN_SAA9730
1709 bool "Philips SAA9730 Ethernet support"
1710 depends on NET_PCI && PCI && MIPS_ATLAS
1711 help
1712 The SAA9730 is a combined multimedia and peripheral controller used
1713 in thin clients, Internet access terminals, and diskless
1714 workstations.
1715 See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
1716
1717config SC92031 1697config SC92031
1718 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)" 1698 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
1719 depends on NET_PCI && PCI && EXPERIMENTAL 1699 depends on NET_PCI && PCI && EXPERIMENTAL
@@ -2029,6 +2009,15 @@ config IGB
2029 To compile this driver as a module, choose M here. The module 2009 To compile this driver as a module, choose M here. The module
2030 will be called igb. 2010 will be called igb.
2031 2011
2012config IGB_LRO
2013 bool "Use software LRO"
2014 depends on IGB && INET
2015 select INET_LRO
2016 ---help---
2017 Say Y here if you want to use large receive offload.
2018
2019 If in doubt, say N.
2020
2032source "drivers/net/ixp2000/Kconfig" 2021source "drivers/net/ixp2000/Kconfig"
2033 2022
2034config MYRI_SBUS 2023config MYRI_SBUS
@@ -2273,10 +2262,6 @@ config GIANFAR
2273 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, 2262 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
2274 and MPC86xx family of chips, and the FEC on the 8540. 2263 and MPC86xx family of chips, and the FEC on the 8540.
2275 2264
2276config GFAR_NAPI
2277 bool "Use Rx Polling (NAPI)"
2278 depends on GIANFAR
2279
2280config UCC_GETH 2265config UCC_GETH
2281 tristate "Freescale QE Gigabit Ethernet" 2266 tristate "Freescale QE Gigabit Ethernet"
2282 depends on QUICC_ENGINE 2267 depends on QUICC_ENGINE
@@ -2285,10 +2270,6 @@ config UCC_GETH
2285 This driver supports the Gigabit Ethernet mode of the QUICC Engine, 2270 This driver supports the Gigabit Ethernet mode of the QUICC Engine,
2286 which is available on some Freescale SOCs. 2271 which is available on some Freescale SOCs.
2287 2272
2288config UGETH_NAPI
2289 bool "Use Rx Polling (NAPI)"
2290 depends on UCC_GETH
2291
2292config UGETH_MAGIC_PACKET 2273config UGETH_MAGIC_PACKET
2293 bool "Magic Packet detection support" 2274 bool "Magic Packet detection support"
2294 depends on UCC_GETH 2275 depends on UCC_GETH
@@ -2378,14 +2359,6 @@ config CHELSIO_T1_1G
2378 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2359 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2379 are using only 10G cards say 'N' here. 2360 are using only 10G cards say 'N' here.
2380 2361
2381config CHELSIO_T1_NAPI
2382 bool "Use Rx Polling (NAPI)"
2383 depends on CHELSIO_T1
2384 default y
2385 help
2386 NAPI is a driver API designed to reduce CPU and interrupt load
2387 when the driver is receiving lots of packets from the card.
2388
2389config CHELSIO_T3 2362config CHELSIO_T3
2390 tristate "Chelsio Communications T3 10Gb Ethernet support" 2363 tristate "Chelsio Communications T3 10Gb Ethernet support"
2391 depends on PCI && INET 2364 depends on PCI && INET
@@ -2457,20 +2430,6 @@ config IXGB
2457 To compile this driver as a module, choose M here. The module 2430 To compile this driver as a module, choose M here. The module
2458 will be called ixgb. 2431 will be called ixgb.
2459 2432
2460config IXGB_NAPI
2461 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
2462 depends on IXGB && EXPERIMENTAL
2463 help
2464 NAPI is a new driver API designed to reduce CPU and interrupt load
2465 when the driver is receiving lots of packets from the card. It is
2466 still somewhat experimental and thus not yet enabled by default.
2467
2468 If your estimated Rx load is 10kpps or more, or if the card will be
2469 deployed on potentially unfriendly networks (e.g. in a firewall),
2470 then say Y here.
2471
2472 If in doubt, say N.
2473
2474config S2IO 2433config S2IO
2475 tristate "S2IO 10Gbe XFrame NIC" 2434 tristate "S2IO 10Gbe XFrame NIC"
2476 depends on PCI 2435 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4beb043e09e6..3292d0af59c3 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -166,7 +166,6 @@ obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
166obj-$(CONFIG_8139CP) += 8139cp.o 166obj-$(CONFIG_8139CP) += 8139cp.o
167obj-$(CONFIG_8139TOO) += 8139too.o 167obj-$(CONFIG_8139TOO) += 8139too.o
168obj-$(CONFIG_ZNET) += znet.o 168obj-$(CONFIG_ZNET) += znet.o
169obj-$(CONFIG_LAN_SAA9730) += saa9730.o
170obj-$(CONFIG_CPMAC) += cpmac.o 169obj-$(CONFIG_CPMAC) += cpmac.o
171obj-$(CONFIG_DEPCA) += depca.o 170obj-$(CONFIG_DEPCA) += depca.o
172obj-$(CONFIG_EWRK3) += ewrk3.o 171obj-$(CONFIG_EWRK3) += ewrk3.o
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index a509337eab2d..638c9a27a7a6 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1153#ifdef CONFIG_NET_POLL_CONTROLLER 1153#ifdef CONFIG_NET_POLL_CONTROLLER
1154 netdev->poll_controller = t1_netpoll; 1154 netdev->poll_controller = t1_netpoll;
1155#endif 1155#endif
1156#ifdef CONFIG_CHELSIO_T1_NAPI
1157 netif_napi_add(netdev, &adapter->napi, t1_poll, 64); 1156 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1158#endif
1159 1157
1160 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1158 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1161 } 1159 }
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 8a7efd38e95b..d6c7d2aa761b 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1396 1396
1397 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1397 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
1398 st->vlan_xtract++; 1398 st->vlan_xtract++;
1399#ifdef CONFIG_CHELSIO_T1_NAPI 1399 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1400 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1400 ntohs(p->vlan));
1401 ntohs(p->vlan)); 1401 } else
1402#else
1403 vlan_hwaccel_rx(skb, adapter->vlan_grp,
1404 ntohs(p->vlan));
1405#endif
1406 } else {
1407#ifdef CONFIG_CHELSIO_T1_NAPI
1408 netif_receive_skb(skb); 1402 netif_receive_skb(skb);
1409#else
1410 netif_rx(skb);
1411#endif
1412 }
1413} 1403}
1414 1404
1415/* 1405/*
@@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter)
1568 return (e->GenerationBit == Q->genbit); 1558 return (e->GenerationBit == Q->genbit);
1569} 1559}
1570 1560
1571#ifdef CONFIG_CHELSIO_T1_NAPI
1572/* 1561/*
1573 * A simpler version of process_responses() that handles only pure (i.e., 1562 * A simpler version of process_responses() that handles only pure (i.e.,
1574 * non data-carrying) responses. Such respones are too light-weight to justify 1563 * non data-carrying) responses. Such respones are too light-weight to justify
@@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget)
1636 return work_done; 1625 return work_done;
1637} 1626}
1638 1627
1639/*
1640 * NAPI version of the main interrupt handler.
1641 */
1642irqreturn_t t1_interrupt(int irq, void *data) 1628irqreturn_t t1_interrupt(int irq, void *data)
1643{ 1629{
1644 struct adapter *adapter = data; 1630 struct adapter *adapter = data;
@@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data)
1656 else { 1642 else {
1657 /* no data, no NAPI needed */ 1643 /* no data, no NAPI needed */
1658 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1644 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1659 napi_enable(&adapter->napi); /* undo schedule_prep */ 1645 /* undo schedule_prep */
1646 napi_enable(&adapter->napi);
1660 } 1647 }
1661 } 1648 }
1662 return IRQ_HANDLED; 1649 return IRQ_HANDLED;
@@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data)
1672 return IRQ_RETVAL(handled != 0); 1659 return IRQ_RETVAL(handled != 0);
1673} 1660}
1674 1661
1675#else
1676/*
1677 * Main interrupt handler, optimized assuming that we took a 'DATA'
1678 * interrupt.
1679 *
1680 * 1. Clear the interrupt
1681 * 2. Loop while we find valid descriptors and process them; accumulate
1682 * information that can be processed after the loop
1683 * 3. Tell the SGE at which index we stopped processing descriptors
1684 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1685 * outstanding TX buffers waiting, replenish RX buffers, potentially
1686 * reenable upper layers if they were turned off due to lack of TX
1687 * resources which are available again.
1688 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1689 * let the slow_intr_handler run and do error handling.
1690 */
1691irqreturn_t t1_interrupt(int irq, void *cookie)
1692{
1693 int work_done;
1694 struct adapter *adapter = cookie;
1695 struct respQ *Q = &adapter->sge->respQ;
1696
1697 spin_lock(&adapter->async_lock);
1698
1699 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1700
1701 if (likely(responses_pending(adapter)))
1702 work_done = process_responses(adapter, -1);
1703 else
1704 work_done = t1_slow_intr_handler(adapter);
1705
1706 /*
1707 * The unconditional clearing of the PL_CAUSE above may have raced
1708 * with DMA completion and the corresponding generation of a response
1709 * to cause us to miss the resulting data interrupt. The next write
1710 * is also unconditional to recover the missed interrupt and render
1711 * this race harmless.
1712 */
1713 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1714
1715 if (!work_done)
1716 adapter->sge->stats.unhandled_irqs++;
1717 spin_unlock(&adapter->async_lock);
1718 return IRQ_RETVAL(work_done != 0);
1719}
1720#endif
1721
1722/* 1662/*
1723 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1663 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1724 * 1664 *
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
index 6c4f32066919..d38e6cc92668 100644
--- a/drivers/net/cxgb3/cxgb3_ctl_defs.h
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -110,10 +110,7 @@ struct ulp_iscsi_info {
110 unsigned int llimit; 110 unsigned int llimit;
111 unsigned int ulimit; 111 unsigned int ulimit;
112 unsigned int tagmask; 112 unsigned int tagmask;
113 unsigned int pgsz3; 113 u8 pgsz_factor[4];
114 unsigned int pgsz2;
115 unsigned int pgsz1;
116 unsigned int pgsz0;
117 unsigned int max_rxsz; 114 unsigned int max_rxsz;
118 unsigned int max_txsz; 115 unsigned int max_txsz;
119 struct pci_dev *pdev; 116 struct pci_dev *pdev;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ae6ff5df779c..c69f4c0187d9 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -207,6 +207,17 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
207 break; 207 break;
208 case ULP_ISCSI_SET_PARAMS: 208 case ULP_ISCSI_SET_PARAMS:
209 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); 209 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
210 /* set MaxRxData and MaxCoalesceSize to 16224 */
211 t3_write_reg(adapter, A_TP_PARA_REG2, 0x3f603f60);
212 /* program the ddp page sizes */
213 {
214 int i;
215 unsigned int val = 0;
216 for (i = 0; i < 4; i++)
217 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
218 if (val)
219 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
220 }
210 break; 221 break;
211 default: 222 default:
212 ret = -EOPNOTSUPP; 223 ret = -EOPNOTSUPP;
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 567178879345..4bda27c551c9 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1517,16 +1517,18 @@
1517 1517
1518#define A_ULPRX_ISCSI_TAGMASK 0x514 1518#define A_ULPRX_ISCSI_TAGMASK 0x514
1519 1519
1520#define S_HPZ0 0 1520#define A_ULPRX_ISCSI_PSZ 0x518
1521#define M_HPZ0 0xf
1522#define V_HPZ0(x) ((x) << S_HPZ0)
1523#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
1524 1521
1525#define A_ULPRX_TDDP_LLIMIT 0x51c 1522#define A_ULPRX_TDDP_LLIMIT 0x51c
1526 1523
1527#define A_ULPRX_TDDP_ULIMIT 0x520 1524#define A_ULPRX_TDDP_ULIMIT 0x520
1528#define A_ULPRX_TDDP_PSZ 0x528 1525#define A_ULPRX_TDDP_PSZ 0x528
1529 1526
1527#define S_HPZ0 0
1528#define M_HPZ0 0xf
1529#define V_HPZ0(x) ((x) << S_HPZ0)
1530#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
1531
1530#define A_ULPRX_STAG_LLIMIT 0x52c 1532#define A_ULPRX_STAG_LLIMIT 0x52c
1531 1533
1532#define A_ULPRX_STAG_ULIMIT 0x530 1534#define A_ULPRX_STAG_ULIMIT 0x530
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
index a666c5d51cc0..917970ed24a1 100644
--- a/drivers/net/cxgb3/t3_cpl.h
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -191,6 +191,9 @@ union opcode_tid {
191#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) 191#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
192#define G_TID(x) ((x) & 0xFFFFFF) 192#define G_TID(x) ((x) & 0xFFFFFF)
193 193
194#define S_QNUM 0
195#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
196
194#define S_HASHTYPE 22 197#define S_HASHTYPE 22
195#define M_HASHTYPE 0x3 198#define M_HASHTYPE 0x3
196#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE) 199#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
@@ -779,6 +782,12 @@ struct tx_data_wr {
779 __be32 param; 782 __be32 param;
780}; 783};
781 784
785/* tx_data_wr.flags fields */
786#define S_TX_ACK_PAGES 21
787#define M_TX_ACK_PAGES 0x7
788#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
789#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
790
782/* tx_data_wr.param fields */ 791/* tx_data_wr.param fields */
783#define S_TX_PORT 0 792#define S_TX_PORT 0
784#define M_TX_PORT 0x7 793#define M_TX_PORT 0x7
@@ -1452,4 +1461,35 @@ struct cpl_rdma_terminate {
1452#define M_TERM_TID 0xFFFFF 1461#define M_TERM_TID 0xFFFFF
1453#define V_TERM_TID(x) ((x) << S_TERM_TID) 1462#define V_TERM_TID(x) ((x) << S_TERM_TID)
1454#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID) 1463#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
1464
1465/* ULP_TX opcodes */
1466enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
1467
1468#define S_ULPTX_CMD 28
1469#define M_ULPTX_CMD 0xF
1470#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
1471
1472#define S_ULPTX_NFLITS 0
1473#define M_ULPTX_NFLITS 0xFF
1474#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
1475
1476struct ulp_mem_io {
1477 WR_HDR;
1478 __be32 cmd_lock_addr;
1479 __be32 len;
1480};
1481
1482/* ulp_mem_io.cmd_lock_addr fields */
1483#define S_ULP_MEMIO_ADDR 0
1484#define M_ULP_MEMIO_ADDR 0x7FFFFFF
1485#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
1486#define S_ULP_MEMIO_LOCK 27
1487#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
1488#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
1489
1490/* ulp_mem_io.len fields */
1491#define S_ULP_MEMIO_DATA_LEN 28
1492#define M_ULP_MEMIO_DATA_LEN 0xF
1493#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
1494
1455#endif /* T3_CPL_H */ 1495#endif /* T3_CPL_H */
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
index 8556628fd5af..0a21cfbd2b21 100644
--- a/drivers/net/cxgb3/t3cdev.h
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -64,6 +64,7 @@ struct t3cdev {
64 void *l3opt; /* optional layer 3 data */ 64 void *l3opt; /* optional layer 3 data */
65 void *l4opt; /* optional layer 4 data */ 65 void *l4opt; /* optional layer 4 data */
66 void *ulp; /* ulp stuff */ 66 void *ulp; /* ulp stuff */
67 void *ulp_iscsi; /* ulp iscsi */
67}; 68};
68 69
69#endif /* _T3CDEV_H_ */ 70#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 393a0f175302..fa78d6870124 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -44,8 +44,7 @@
44 * happen immediately, but will wait until either a set number 44 * happen immediately, but will wait until either a set number
45 * of frames or amount of time have passed). In NAPI, the 45 * of frames or amount of time have passed). In NAPI, the
46 * interrupt handler will signal there is work to be done, and 46 * interrupt handler will signal there is work to be done, and
47 * exit. Without NAPI, the packet(s) will be handled 47 * exit. This method will start at the last known empty
48 * immediately. Both methods will start at the last known empty
49 * descriptor, and process every subsequent descriptor until there 48 * descriptor, and process every subsequent descriptor until there
50 * are none left with data (NAPI will stop after a set number of 49 * are none left with data (NAPI will stop after a set number of
51 * packets to give time to other tasks, but will eventually 50 * packets to give time to other tasks, but will eventually
@@ -101,12 +100,6 @@
101#undef BRIEF_GFAR_ERRORS 100#undef BRIEF_GFAR_ERRORS
102#undef VERBOSE_GFAR_ERRORS 101#undef VERBOSE_GFAR_ERRORS
103 102
104#ifdef CONFIG_GFAR_NAPI
105#define RECEIVE(x) netif_receive_skb(x)
106#else
107#define RECEIVE(x) netif_rx(x)
108#endif
109
110const char gfar_driver_name[] = "Gianfar Ethernet"; 103const char gfar_driver_name[] = "Gianfar Ethernet";
111const char gfar_driver_version[] = "1.3"; 104const char gfar_driver_version[] = "1.3";
112 105
@@ -131,9 +124,7 @@ static void free_skb_resources(struct gfar_private *priv);
131static void gfar_set_multi(struct net_device *dev); 124static void gfar_set_multi(struct net_device *dev);
132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 125static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133static void gfar_configure_serdes(struct net_device *dev); 126static void gfar_configure_serdes(struct net_device *dev);
134#ifdef CONFIG_GFAR_NAPI
135static int gfar_poll(struct napi_struct *napi, int budget); 127static int gfar_poll(struct napi_struct *napi, int budget);
136#endif
137#ifdef CONFIG_NET_POLL_CONTROLLER 128#ifdef CONFIG_NET_POLL_CONTROLLER
138static void gfar_netpoll(struct net_device *dev); 129static void gfar_netpoll(struct net_device *dev);
139#endif 130#endif
@@ -260,9 +251,7 @@ static int gfar_probe(struct platform_device *pdev)
260 dev->hard_start_xmit = gfar_start_xmit; 251 dev->hard_start_xmit = gfar_start_xmit;
261 dev->tx_timeout = gfar_timeout; 252 dev->tx_timeout = gfar_timeout;
262 dev->watchdog_timeo = TX_TIMEOUT; 253 dev->watchdog_timeo = TX_TIMEOUT;
263#ifdef CONFIG_GFAR_NAPI
264 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); 254 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
265#endif
266#ifdef CONFIG_NET_POLL_CONTROLLER 255#ifdef CONFIG_NET_POLL_CONTROLLER
267 dev->poll_controller = gfar_netpoll; 256 dev->poll_controller = gfar_netpoll;
268#endif 257#endif
@@ -363,11 +352,7 @@ static int gfar_probe(struct platform_device *pdev)
363 352
364 /* Even more device info helps when determining which kernel */ 353 /* Even more device info helps when determining which kernel */
365 /* provided which set of benchmarks. */ 354 /* provided which set of benchmarks. */
366#ifdef CONFIG_GFAR_NAPI
367 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 355 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
368#else
369 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
370#endif
371 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 356 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
372 dev->name, priv->rx_ring_size, priv->tx_ring_size); 357 dev->name, priv->rx_ring_size, priv->tx_ring_size);
373 358
@@ -945,14 +930,10 @@ tx_skb_fail:
945/* Returns 0 for success. */ 930/* Returns 0 for success. */
946static int gfar_enet_open(struct net_device *dev) 931static int gfar_enet_open(struct net_device *dev)
947{ 932{
948#ifdef CONFIG_GFAR_NAPI
949 struct gfar_private *priv = netdev_priv(dev); 933 struct gfar_private *priv = netdev_priv(dev);
950#endif
951 int err; 934 int err;
952 935
953#ifdef CONFIG_GFAR_NAPI
954 napi_enable(&priv->napi); 936 napi_enable(&priv->napi);
955#endif
956 937
957 /* Initialize a bunch of registers */ 938 /* Initialize a bunch of registers */
958 init_registers(dev); 939 init_registers(dev);
@@ -962,17 +943,13 @@ static int gfar_enet_open(struct net_device *dev)
962 err = init_phy(dev); 943 err = init_phy(dev);
963 944
964 if(err) { 945 if(err) {
965#ifdef CONFIG_GFAR_NAPI
966 napi_disable(&priv->napi); 946 napi_disable(&priv->napi);
967#endif
968 return err; 947 return err;
969 } 948 }
970 949
971 err = startup_gfar(dev); 950 err = startup_gfar(dev);
972 if (err) { 951 if (err) {
973#ifdef CONFIG_GFAR_NAPI
974 napi_disable(&priv->napi); 952 napi_disable(&priv->napi);
975#endif
976 return err; 953 return err;
977 } 954 }
978 955
@@ -1128,9 +1105,7 @@ static int gfar_close(struct net_device *dev)
1128{ 1105{
1129 struct gfar_private *priv = netdev_priv(dev); 1106 struct gfar_private *priv = netdev_priv(dev);
1130 1107
1131#ifdef CONFIG_GFAR_NAPI
1132 napi_disable(&priv->napi); 1108 napi_disable(&priv->napi);
1133#endif
1134 1109
1135 stop_gfar(dev); 1110 stop_gfar(dev);
1136 1111
@@ -1427,14 +1402,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
1427{ 1402{
1428 struct net_device *dev = (struct net_device *) dev_id; 1403 struct net_device *dev = (struct net_device *) dev_id;
1429 struct gfar_private *priv = netdev_priv(dev); 1404 struct gfar_private *priv = netdev_priv(dev);
1430#ifdef CONFIG_GFAR_NAPI
1431 u32 tempval; 1405 u32 tempval;
1432#else
1433 unsigned long flags;
1434#endif
1435 1406
1436 /* support NAPI */ 1407 /* support NAPI */
1437#ifdef CONFIG_GFAR_NAPI
1438 /* Clear IEVENT, so interrupts aren't called again 1408 /* Clear IEVENT, so interrupts aren't called again
1439 * because of the packets that have already arrived */ 1409 * because of the packets that have already arrived */
1440 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 1410 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
@@ -1451,38 +1421,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
1451 dev->name, gfar_read(&priv->regs->ievent), 1421 dev->name, gfar_read(&priv->regs->ievent),
1452 gfar_read(&priv->regs->imask)); 1422 gfar_read(&priv->regs->imask));
1453 } 1423 }
1454#else
1455 /* Clear IEVENT, so rx interrupt isn't called again
1456 * because of this interrupt */
1457 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1458
1459 spin_lock_irqsave(&priv->rxlock, flags);
1460 gfar_clean_rx_ring(dev, priv->rx_ring_size);
1461
1462 /* If we are coalescing interrupts, update the timer */
1463 /* Otherwise, clear it */
1464 if (likely(priv->rxcoalescing)) {
1465 gfar_write(&priv->regs->rxic, 0);
1466 gfar_write(&priv->regs->rxic,
1467 mk_ic_value(priv->rxcount, priv->rxtime));
1468 }
1469
1470 spin_unlock_irqrestore(&priv->rxlock, flags);
1471#endif
1472 1424
1473 return IRQ_HANDLED; 1425 return IRQ_HANDLED;
1474} 1426}
1475 1427
1476static inline int gfar_rx_vlan(struct sk_buff *skb,
1477 struct vlan_group *vlgrp, unsigned short vlctl)
1478{
1479#ifdef CONFIG_GFAR_NAPI
1480 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1481#else
1482 return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1483#endif
1484}
1485
1486static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 1428static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1487{ 1429{
1488 /* If valid headers were found, and valid sums 1430 /* If valid headers were found, and valid sums
@@ -1539,10 +1481,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1539 skb->protocol = eth_type_trans(skb, dev); 1481 skb->protocol = eth_type_trans(skb, dev);
1540 1482
1541 /* Send the packet up the stack */ 1483 /* Send the packet up the stack */
1542 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 1484 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
1543 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1485 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
1544 else 1486 fcb->vlctl);
1545 ret = RECEIVE(skb); 1487 } else
1488 ret = netif_receive_skb(skb);
1546 1489
1547 if (NET_RX_DROP == ret) 1490 if (NET_RX_DROP == ret)
1548 priv->extra_stats.kernel_dropped++; 1491 priv->extra_stats.kernel_dropped++;
@@ -1629,7 +1572,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1629 return howmany; 1572 return howmany;
1630} 1573}
1631 1574
1632#ifdef CONFIG_GFAR_NAPI
1633static int gfar_poll(struct napi_struct *napi, int budget) 1575static int gfar_poll(struct napi_struct *napi, int budget)
1634{ 1576{
1635 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 1577 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
@@ -1664,7 +1606,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1664 1606
1665 return howmany; 1607 return howmany;
1666} 1608}
1667#endif
1668 1609
1669#ifdef CONFIG_NET_POLL_CONTROLLER 1610#ifdef CONFIG_NET_POLL_CONTROLLER
1670/* 1611/*
@@ -2003,11 +1944,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2003 1944
2004 gfar_receive(irq, dev_id); 1945 gfar_receive(irq, dev_id);
2005 1946
2006#ifndef CONFIG_GFAR_NAPI
2007 /* Clear the halt bit in RSTAT */
2008 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
2009#endif
2010
2011 if (netif_msg_rx_err(priv)) 1947 if (netif_msg_rx_err(priv))
2012 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 1948 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2013 dev->name, gfar_read(&priv->regs->rstat)); 1949 dev->name, gfar_read(&priv->regs->rstat));
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 27f37c81e52c..bead71cb2b16 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -77,13 +77,8 @@ extern const char gfar_driver_name[];
77extern const char gfar_driver_version[]; 77extern const char gfar_driver_version[];
78 78
79/* These need to be powers of 2 for this driver */ 79/* These need to be powers of 2 for this driver */
80#ifdef CONFIG_GFAR_NAPI
81#define DEFAULT_TX_RING_SIZE 256 80#define DEFAULT_TX_RING_SIZE 256
82#define DEFAULT_RX_RING_SIZE 256 81#define DEFAULT_RX_RING_SIZE 256
83#else
84#define DEFAULT_TX_RING_SIZE 64
85#define DEFAULT_RX_RING_SIZE 64
86#endif
87 82
88#define GFAR_RX_MAX_RING_SIZE 256 83#define GFAR_RX_MAX_RING_SIZE 256
89#define GFAR_TX_MAX_RING_SIZE 256 84#define GFAR_TX_MAX_RING_SIZE 256
@@ -128,14 +123,8 @@ extern const char gfar_driver_version[];
128 123
129#define DEFAULT_RXTIME 21 124#define DEFAULT_RXTIME 21
130 125
131/* Non NAPI Case */
132#ifndef CONFIG_GFAR_NAPI
133#define DEFAULT_RX_COALESCE 1
134#define DEFAULT_RXCOUNT 16
135#else
136#define DEFAULT_RX_COALESCE 0 126#define DEFAULT_RX_COALESCE 0
137#define DEFAULT_RXCOUNT 0 127#define DEFAULT_RXCOUNT 0
138#endif /* CONFIG_GFAR_NAPI */
139 128
140#define MIIMCFG_INIT_VALUE 0x00000007 129#define MIIMCFG_INIT_VALUE 0x00000007
141#define MIIMCFG_RESET 0x80000000 130#define MIIMCFG_RESET 0x80000000
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 2c8b91060d98..e098f234770f 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/if_ether.h>
34 35
35#include "e1000_mac.h" 36#include "e1000_mac.h"
36#include "e1000_82575.h" 37#include "e1000_82575.h"
@@ -45,7 +46,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *);
45static s32 igb_init_hw_82575(struct e1000_hw *); 46static s32 igb_init_hw_82575(struct e1000_hw *);
46static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
47static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
48static void igb_rar_set_82575(struct e1000_hw *, u8 *, u32);
49static s32 igb_reset_hw_82575(struct e1000_hw *); 49static s32 igb_reset_hw_82575(struct e1000_hw *);
50static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 50static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
51static s32 igb_setup_copper_link_82575(struct e1000_hw *); 51static s32 igb_setup_copper_link_82575(struct e1000_hw *);
@@ -84,6 +84,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
84 case E1000_DEV_ID_82575GB_QUAD_COPPER: 84 case E1000_DEV_ID_82575GB_QUAD_COPPER:
85 mac->type = e1000_82575; 85 mac->type = e1000_82575;
86 break; 86 break;
87 case E1000_DEV_ID_82576:
88 case E1000_DEV_ID_82576_FIBER:
89 case E1000_DEV_ID_82576_SERDES:
90 case E1000_DEV_ID_82576_QUAD_COPPER:
91 mac->type = e1000_82576;
92 break;
87 default: 93 default:
88 return -E1000_ERR_MAC_INIT; 94 return -E1000_ERR_MAC_INIT;
89 break; 95 break;
@@ -128,6 +134,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
128 mac->mta_reg_count = 128; 134 mac->mta_reg_count = 128;
129 /* Set rar entry count */ 135 /* Set rar entry count */
130 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 136 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
137 if (mac->type == e1000_82576)
138 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
131 /* Set if part includes ASF firmware */ 139 /* Set if part includes ASF firmware */
132 mac->asf_firmware_present = true; 140 mac->asf_firmware_present = true;
133 /* Set if manageability features are enabled. */ 141 /* Set if manageability features are enabled. */
@@ -694,13 +702,12 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
694 if ((hw->phy.media_type != e1000_media_type_copper) || 702 if ((hw->phy.media_type != e1000_media_type_copper) ||
695 (igb_sgmii_active_82575(hw))) 703 (igb_sgmii_active_82575(hw)))
696 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 704 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
697 &duplex); 705 &duplex);
698 else 706 else
699 ret_val = igb_check_for_copper_link(hw); 707 ret_val = igb_check_for_copper_link(hw);
700 708
701 return ret_val; 709 return ret_val;
702} 710}
703
704/** 711/**
705 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 712 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
706 * @hw: pointer to the HW structure 713 * @hw: pointer to the HW structure
@@ -757,18 +764,129 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
757} 764}
758 765
759/** 766/**
760 * igb_rar_set_82575 - Set receive address register 767 * igb_init_rx_addrs_82575 - Initialize receive address's
768 * @hw: pointer to the HW structure
769 * @rar_count: receive address registers
770 *
771 * Setups the receive address registers by setting the base receive address
772 * register to the devices MAC address and clearing all the other receive
773 * address registers to 0.
774 **/
775static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
776{
777 u32 i;
778 u8 addr[6] = {0,0,0,0,0,0};
779 /*
780 * This function is essentially the same as that of
781 * e1000_init_rx_addrs_generic. However it also takes care
782 * of the special case where the register offset of the
783 * second set of RARs begins elsewhere. This is implicitly taken care by
784 * function e1000_rar_set_generic.
785 */
786
787 hw_dbg("e1000_init_rx_addrs_82575");
788
789 /* Setup the receive address */
790 hw_dbg("Programming MAC Address into RAR[0]\n");
791 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
792
793 /* Zero out the other (rar_entry_count - 1) receive addresses */
794 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
795 for (i = 1; i < rar_count; i++)
796 hw->mac.ops.rar_set(hw, addr, i);
797}
798
799/**
800 * igb_update_mc_addr_list_82575 - Update Multicast addresses
801 * @hw: pointer to the HW structure
802 * @mc_addr_list: array of multicast addresses to program
803 * @mc_addr_count: number of multicast addresses to program
804 * @rar_used_count: the first RAR register free to program
805 * @rar_count: total number of supported Receive Address Registers
806 *
807 * Updates the Receive Address Registers and Multicast Table Array.
808 * The caller must have a packed mc_addr_list of multicast addresses.
809 * The parameter rar_count will usually be hw->mac.rar_entry_count
810 * unless there are workarounds that change this.
811 **/
812void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
813 u8 *mc_addr_list, u32 mc_addr_count,
814 u32 rar_used_count, u32 rar_count)
815{
816 u32 hash_value;
817 u32 i;
818 u8 addr[6] = {0,0,0,0,0,0};
819 /*
820 * This function is essentially the same as that of
821 * igb_update_mc_addr_list_generic. However it also takes care
822 * of the special case where the register offset of the
823 * second set of RARs begins elsewhere. This is implicitly taken care by
824 * function e1000_rar_set_generic.
825 */
826
827 /*
828 * Load the first set of multicast addresses into the exact
829 * filters (RAR). If there are not enough to fill the RAR
830 * array, clear the filters.
831 */
832 for (i = rar_used_count; i < rar_count; i++) {
833 if (mc_addr_count) {
834 igb_rar_set(hw, mc_addr_list, i);
835 mc_addr_count--;
836 mc_addr_list += ETH_ALEN;
837 } else {
838 igb_rar_set(hw, addr, i);
839 }
840 }
841
842 /* Clear the old settings from the MTA */
843 hw_dbg("Clearing MTA\n");
844 for (i = 0; i < hw->mac.mta_reg_count; i++) {
845 array_wr32(E1000_MTA, i, 0);
846 wrfl();
847 }
848
849 /* Load any remaining multicast addresses into the hash table. */
850 for (; mc_addr_count > 0; mc_addr_count--) {
851 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
852 hw_dbg("Hash value = 0x%03X\n", hash_value);
853 hw->mac.ops.mta_set(hw, hash_value);
854 mc_addr_list += ETH_ALEN;
855 }
856}
857
858/**
859 * igb_shutdown_fiber_serdes_link_82575 - Remove link during power down
761 * @hw: pointer to the HW structure 860 * @hw: pointer to the HW structure
762 * @addr: pointer to the receive address
763 * @index: receive address array register
764 * 861 *
765 * Sets the receive address array register at index to the address passed 862 * In the case of fiber serdes, shut down optics and PCS on driver unload
766 * in by addr. 863 * when management pass thru is not enabled.
767 **/ 864 **/
768static void igb_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index) 865void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
769{ 866{
770 if (index < E1000_RAR_ENTRIES_82575) 867 u32 reg;
771 igb_rar_set(hw, addr, index); 868
869 if (hw->mac.type != e1000_82576 ||
870 (hw->phy.media_type != e1000_media_type_fiber &&
871 hw->phy.media_type != e1000_media_type_internal_serdes))
872 return;
873
874 /* if the management interface is not enabled, then power down */
875 if (!igb_enable_mng_pass_thru(hw)) {
876 /* Disable PCS to turn off link */
877 reg = rd32(E1000_PCS_CFG0);
878 reg &= ~E1000_PCS_CFG_PCS_EN;
879 wr32(E1000_PCS_CFG0, reg);
880
881 /* shutdown the laser */
882 reg = rd32(E1000_CTRL_EXT);
883 reg |= E1000_CTRL_EXT_SDP7_DATA;
884 wr32(E1000_CTRL_EXT, reg);
885
886 /* flush the write to verify completion */
887 wrfl();
888 msleep(1);
889 }
772 890
773 return; 891 return;
774} 892}
@@ -854,7 +972,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
854 igb_clear_vfta(hw); 972 igb_clear_vfta(hw);
855 973
856 /* Setup the receive address */ 974 /* Setup the receive address */
857 igb_init_rx_addrs(hw, rar_count); 975 igb_init_rx_addrs_82575(hw, rar_count);
858 /* Zero out the Multicast HASH table */ 976 /* Zero out the Multicast HASH table */
859 hw_dbg("Zeroing the MTA\n"); 977 hw_dbg("Zeroing the MTA\n");
860 for (i = 0; i < mac->mta_reg_count; i++) 978 for (i = 0; i < mac->mta_reg_count; i++)
@@ -1114,6 +1232,70 @@ out:
1114} 1232}
1115 1233
1116/** 1234/**
1235 * igb_translate_register_82576 - Translate the proper register offset
1236 * @reg: e1000 register to be read
1237 *
1238 * Registers in 82576 are located in different offsets than other adapters
1239 * even though they function in the same manner. This function takes in
1240 * the name of the register to read and returns the correct offset for
1241 * 82576 silicon.
1242 **/
1243u32 igb_translate_register_82576(u32 reg)
1244{
1245 /*
1246 * Some of the Kawela registers are located at different
1247 * offsets than they are in older adapters.
1248 * Despite the difference in location, the registers
1249 * function in the same manner.
1250 */
1251 switch (reg) {
1252 case E1000_TDBAL(0):
1253 reg = 0x0E000;
1254 break;
1255 case E1000_TDBAH(0):
1256 reg = 0x0E004;
1257 break;
1258 case E1000_TDLEN(0):
1259 reg = 0x0E008;
1260 break;
1261 case E1000_TDH(0):
1262 reg = 0x0E010;
1263 break;
1264 case E1000_TDT(0):
1265 reg = 0x0E018;
1266 break;
1267 case E1000_TXDCTL(0):
1268 reg = 0x0E028;
1269 break;
1270 case E1000_RDBAL(0):
1271 reg = 0x0C000;
1272 break;
1273 case E1000_RDBAH(0):
1274 reg = 0x0C004;
1275 break;
1276 case E1000_RDLEN(0):
1277 reg = 0x0C008;
1278 break;
1279 case E1000_RDH(0):
1280 reg = 0x0C010;
1281 break;
1282 case E1000_RDT(0):
1283 reg = 0x0C018;
1284 break;
1285 case E1000_RXDCTL(0):
1286 reg = 0x0C028;
1287 break;
1288 case E1000_SRRCTL(0):
1289 reg = 0x0C00C;
1290 break;
1291 default:
1292 break;
1293 }
1294
1295 return reg;
1296}
1297
1298/**
1117 * igb_reset_init_script_82575 - Inits HW defaults after reset 1299 * igb_reset_init_script_82575 - Inits HW defaults after reset
1118 * @hw: pointer to the HW structure 1300 * @hw: pointer to the HW structure
1119 * 1301 *
@@ -1304,7 +1486,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
1304 .reset_hw = igb_reset_hw_82575, 1486 .reset_hw = igb_reset_hw_82575,
1305 .init_hw = igb_init_hw_82575, 1487 .init_hw = igb_init_hw_82575,
1306 .check_for_link = igb_check_for_link_82575, 1488 .check_for_link = igb_check_for_link_82575,
1307 .rar_set = igb_rar_set_82575, 1489 .rar_set = igb_rar_set,
1308 .read_mac_addr = igb_read_mac_addr_82575, 1490 .read_mac_addr = igb_read_mac_addr_82575,
1309 .get_speed_and_duplex = igb_get_speed_and_duplex_copper, 1491 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
1310}; 1492};
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index d78ad33d32bf..2f848e578a24 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -28,9 +28,13 @@
28#ifndef _E1000_82575_H_ 28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31u32 igb_translate_register_82576(u32 reg);
32void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
33extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
31extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 34extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
32 35
33#define E1000_RAR_ENTRIES_82575 16 36#define E1000_RAR_ENTRIES_82575 16
37#define E1000_RAR_ENTRIES_82576 24
34 38
35/* SRRCTL bit definitions */ 39/* SRRCTL bit definitions */
36#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ 40#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
@@ -95,6 +99,8 @@ union e1000_adv_rx_desc {
95/* RSS Hash results */ 99/* RSS Hash results */
96 100
97/* RSS Packet Types as indicated in the receive descriptor */ 101/* RSS Packet Types as indicated in the receive descriptor */
102#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
103#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
98 104
99/* Transmit Descriptor - Advanced */ 105/* Transmit Descriptor - Advanced */
100union e1000_adv_tx_desc { 106union e1000_adv_tx_desc {
@@ -144,9 +150,25 @@ struct e1000_adv_tx_context_desc {
144#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ 150#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
145 151
146/* Direct Cache Access (DCA) definitions */ 152/* Direct Cache Access (DCA) definitions */
153#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
154#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
147 155
156#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
157#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
148 158
159#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
160#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
161#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
162#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
149 163
164#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
165#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
150#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 166#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
151 167
168/* Additional DCA related definitions, note change in position of CPUID */
169#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
170#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
171#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
172#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
173
152#endif 174#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index ed748dcfb7a4..afdba3c9073c 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -90,6 +90,11 @@
90#define E1000_I2CCMD_ERROR 0x80000000 90#define E1000_I2CCMD_ERROR 0x80000000
91#define E1000_MAX_SGMII_PHY_REG_ADDR 255 91#define E1000_MAX_SGMII_PHY_REG_ADDR 255
92#define E1000_I2CCMD_PHY_TIMEOUT 200 92#define E1000_I2CCMD_PHY_TIMEOUT 200
93#define E1000_IVAR_VALID 0x80
94#define E1000_GPIE_NSICR 0x00000001
95#define E1000_GPIE_MSIX_MODE 0x00000010
96#define E1000_GPIE_EIAME 0x40000000
97#define E1000_GPIE_PBA 0x80000000
93 98
94/* Receive Descriptor bit definitions */ 99/* Receive Descriptor bit definitions */
95#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 100#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -213,6 +218,7 @@
213/* Device Control */ 218/* Device Control */
214#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 219#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
215#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ 220#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
221#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
216#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ 222#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
217#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ 223#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
218#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ 224#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
@@ -244,6 +250,7 @@
244 */ 250 */
245 251
246#define E1000_CONNSW_ENRGSRC 0x4 252#define E1000_CONNSW_ENRGSRC 0x4
253#define E1000_PCS_CFG_PCS_EN 8
247#define E1000_PCS_LCTL_FLV_LINK_UP 1 254#define E1000_PCS_LCTL_FLV_LINK_UP 1
248#define E1000_PCS_LCTL_FSV_100 2 255#define E1000_PCS_LCTL_FSV_100 2
249#define E1000_PCS_LCTL_FSV_1000 4 256#define E1000_PCS_LCTL_FSV_1000 4
@@ -253,6 +260,7 @@
253#define E1000_PCS_LCTL_AN_ENABLE 0x10000 260#define E1000_PCS_LCTL_AN_ENABLE 0x10000
254#define E1000_PCS_LCTL_AN_RESTART 0x20000 261#define E1000_PCS_LCTL_AN_RESTART 0x20000
255#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 262#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
263#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
256 264
257#define E1000_PCS_LSTS_LINK_OK 1 265#define E1000_PCS_LSTS_LINK_OK 1
258#define E1000_PCS_LSTS_SPEED_100 2 266#define E1000_PCS_LSTS_SPEED_100 2
@@ -360,6 +368,7 @@
360#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 368#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
361#define E1000_PBA_24K 0x0018 369#define E1000_PBA_24K 0x0018
362#define E1000_PBA_34K 0x0022 370#define E1000_PBA_34K 0x0022
371#define E1000_PBA_64K 0x0040 /* 64KB */
363 372
364#define IFS_MAX 80 373#define IFS_MAX 80
365#define IFS_MIN 40 374#define IFS_MIN 40
@@ -528,6 +537,7 @@
528/* PHY Control Register */ 537/* PHY Control Register */
529#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 538#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
530#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 539#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
540#define MII_CR_POWER_DOWN 0x0800 /* Power down */
531#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ 541#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
532#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ 542#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
533#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ 543#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 746c3ea09e27..19fa4ee96f2e 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -38,6 +38,10 @@
38 38
39struct e1000_hw; 39struct e1000_hw;
40 40
41#define E1000_DEV_ID_82576 0x10C9
42#define E1000_DEV_ID_82576_FIBER 0x10E6
43#define E1000_DEV_ID_82576_SERDES 0x10E7
44#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
41#define E1000_DEV_ID_82575EB_COPPER 0x10A7 45#define E1000_DEV_ID_82575EB_COPPER 0x10A7
42#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 46#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
43#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 47#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
@@ -50,6 +54,7 @@ struct e1000_hw;
50enum e1000_mac_type { 54enum e1000_mac_type {
51 e1000_undefined = 0, 55 e1000_undefined = 0,
52 e1000_82575, 56 e1000_82575,
57 e1000_82576,
53 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 58 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
54}; 59};
55 60
@@ -410,14 +415,17 @@ struct e1000_mac_operations {
410 s32 (*check_for_link)(struct e1000_hw *); 415 s32 (*check_for_link)(struct e1000_hw *);
411 s32 (*reset_hw)(struct e1000_hw *); 416 s32 (*reset_hw)(struct e1000_hw *);
412 s32 (*init_hw)(struct e1000_hw *); 417 s32 (*init_hw)(struct e1000_hw *);
418 bool (*check_mng_mode)(struct e1000_hw *);
413 s32 (*setup_physical_interface)(struct e1000_hw *); 419 s32 (*setup_physical_interface)(struct e1000_hw *);
414 void (*rar_set)(struct e1000_hw *, u8 *, u32); 420 void (*rar_set)(struct e1000_hw *, u8 *, u32);
415 s32 (*read_mac_addr)(struct e1000_hw *); 421 s32 (*read_mac_addr)(struct e1000_hw *);
416 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
423 void (*mta_set)(struct e1000_hw *, u32);
417}; 424};
418 425
419struct e1000_phy_operations { 426struct e1000_phy_operations {
420 s32 (*acquire_phy)(struct e1000_hw *); 427 s32 (*acquire_phy)(struct e1000_hw *);
428 s32 (*check_reset_block)(struct e1000_hw *);
421 s32 (*force_speed_duplex)(struct e1000_hw *); 429 s32 (*force_speed_duplex)(struct e1000_hw *);
422 s32 (*get_cfg_done)(struct e1000_hw *hw); 430 s32 (*get_cfg_done)(struct e1000_hw *hw);
423 s32 (*get_cable_length)(struct e1000_hw *); 431 s32 (*get_cable_length)(struct e1000_hw *);
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 47ad2c4277c3..20408aa1f916 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -36,7 +36,6 @@
36 36
37static s32 igb_set_default_fc(struct e1000_hw *hw); 37static s32 igb_set_default_fc(struct e1000_hw *hw);
38static s32 igb_set_fc_watermarks(struct e1000_hw *hw); 38static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
40 39
41/** 40/**
42 * igb_remove_device - Free device specific structure 41 * igb_remove_device - Free device specific structure
@@ -360,7 +359,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
360 * the multicast filter table array address and new table value. See 359 * the multicast filter table array address and new table value. See
361 * igb_mta_set() 360 * igb_mta_set()
362 **/ 361 **/
363static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 362u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
364{ 363{
365 u32 hash_value, hash_mask; 364 u32 hash_value, hash_mask;
366 u8 bit_shift = 0; 365 u8 bit_shift = 0;
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index 326b6592307b..dc2f8cce15e7 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -94,5 +94,6 @@ enum e1000_mng_mode {
94#define E1000_HICR_C 0x02 94#define E1000_HICR_C 0x02
95 95
96extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); 96extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
97extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
97 98
98#endif 99#endif
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index ff187b73c69e..b95093d24c09 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -56,6 +56,9 @@
56#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ 56#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
57#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ 57#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
58#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ 58#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
59#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
60#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
61#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
59#define E1000_TCTL 0x00400 /* TX Control - RW */ 62#define E1000_TCTL 0x00400 /* TX Control - RW */
60#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ 63#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
61#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ 64#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
@@ -217,6 +220,7 @@
217#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 220#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
218#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 221#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
219#define E1000_RA 0x05400 /* Receive Address - RW Array */ 222#define E1000_RA 0x05400 /* Receive Address - RW Array */
223#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
220#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 224#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
221#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ 225#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */
222#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 226#define E1000_WUC 0x05800 /* Wakeup Control - RW */
@@ -235,6 +239,8 @@
235#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ 239#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
236#define E1000_SWSM 0x05B50 /* SW Semaphore */ 240#define E1000_SWSM 0x05B50 /* SW Semaphore */
237#define E1000_FWSM 0x05B54 /* FW Semaphore */ 241#define E1000_FWSM 0x05B54 /* FW Semaphore */
242#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
243#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
238#define E1000_HICR 0x08F00 /* Host Inteface Control */ 244#define E1000_HICR 0x08F00 /* Host Inteface Control */
239 245
240/* RSS registers */ 246/* RSS registers */
@@ -256,7 +262,8 @@
256#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 262#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
257#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 263#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
258 264
259#define E1000_REGISTER(a, reg) reg 265#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \
266 ? reg : e1000_translate_register_82576(reg))
260 267
261#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 268#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
262#define rd32(reg) (readl(hw->hw_addr + reg)) 269#define rd32(reg) (readl(hw->hw_addr + reg))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 2c48eec17660..56de7ec15b46 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -36,12 +36,20 @@
36 36
37struct igb_adapter; 37struct igb_adapter;
38 38
39#ifdef CONFIG_IGB_LRO
40#include <linux/inet_lro.h>
41#define MAX_LRO_AGGR 32
42#define MAX_LRO_DESCRIPTORS 8
43#endif
44
39/* Interrupt defines */ 45/* Interrupt defines */
40#define IGB_MAX_TX_CLEAN 72 46#define IGB_MAX_TX_CLEAN 72
41 47
42#define IGB_MIN_DYN_ITR 3000 48#define IGB_MIN_DYN_ITR 3000
43#define IGB_MAX_DYN_ITR 96000 49#define IGB_MAX_DYN_ITR 96000
44#define IGB_START_ITR 6000 50
51/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
52#define IGB_START_ITR 648
45 53
46#define IGB_DYN_ITR_PACKET_THRESHOLD 2 54#define IGB_DYN_ITR_PACKET_THRESHOLD 2
47#define IGB_DYN_ITR_LENGTH_LOW 200 55#define IGB_DYN_ITR_LENGTH_LOW 200
@@ -62,6 +70,7 @@ struct igb_adapter;
62 70
63/* Transmit and receive queues */ 71/* Transmit and receive queues */
64#define IGB_MAX_RX_QUEUES 4 72#define IGB_MAX_RX_QUEUES 4
73#define IGB_MAX_TX_QUEUES 4
65 74
66/* RX descriptor control thresholds. 75/* RX descriptor control thresholds.
67 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 76 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -124,6 +133,7 @@ struct igb_buffer {
124 struct { 133 struct {
125 struct page *page; 134 struct page *page;
126 u64 page_dma; 135 u64 page_dma;
136 unsigned int page_offset;
127 }; 137 };
128 }; 138 };
129}; 139};
@@ -157,18 +167,19 @@ struct igb_ring {
157 union { 167 union {
158 /* TX */ 168 /* TX */
159 struct { 169 struct {
160 spinlock_t tx_clean_lock; 170 struct igb_queue_stats tx_stats;
161 spinlock_t tx_lock;
162 bool detect_tx_hung; 171 bool detect_tx_hung;
163 }; 172 };
164 /* RX */ 173 /* RX */
165 struct { 174 struct {
166 /* arrays of page information for packet split */
167 struct sk_buff *pending_skb;
168 int pending_skb_page;
169 int no_itr_adjust;
170 struct igb_queue_stats rx_stats; 175 struct igb_queue_stats rx_stats;
171 struct napi_struct napi; 176 struct napi_struct napi;
177 int set_itr;
178 struct igb_ring *buddy;
179#ifdef CONFIG_IGB_LRO
180 struct net_lro_mgr lro_mgr;
181 bool lro_used;
182#endif
172 }; 183 };
173 }; 184 };
174 185
@@ -211,7 +222,6 @@ struct igb_adapter {
211 u32 itr_setting; 222 u32 itr_setting;
212 u16 tx_itr; 223 u16 tx_itr;
213 u16 rx_itr; 224 u16 rx_itr;
214 int set_itr;
215 225
216 struct work_struct reset_task; 226 struct work_struct reset_task;
217 struct work_struct watchdog_task; 227 struct work_struct watchdog_task;
@@ -270,15 +280,32 @@ struct igb_adapter {
270 280
271 /* to not mess up cache alignment, always add to the bottom */ 281 /* to not mess up cache alignment, always add to the bottom */
272 unsigned long state; 282 unsigned long state;
273 unsigned int msi_enabled; 283 unsigned int flags;
274
275 u32 eeprom_wol; 284 u32 eeprom_wol;
276 285
277 /* for ioport free */ 286 /* for ioport free */
278 int bars; 287 int bars;
279 int need_ioport; 288 int need_ioport;
289
290#ifdef CONFIG_NETDEVICES_MULTIQUEUE
291 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
292#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
293#ifdef CONFIG_IGB_LRO
294 unsigned int lro_max_aggr;
295 unsigned int lro_aggregated;
296 unsigned int lro_flushed;
297 unsigned int lro_no_desc;
298#endif
280}; 299};
281 300
301#define IGB_FLAG_HAS_MSI (1 << 0)
302#define IGB_FLAG_MSI_ENABLE (1 << 1)
303#define IGB_FLAG_HAS_DCA (1 << 2)
304#define IGB_FLAG_DCA_ENABLED (1 << 3)
305#define IGB_FLAG_IN_NETPOLL (1 << 5)
306#define IGB_FLAG_QUAD_PORT_A (1 << 6)
307#define IGB_FLAG_NEED_CTX_IDX (1 << 7)
308
282enum e1000_state_t { 309enum e1000_state_t {
283 __IGB_TESTING, 310 __IGB_TESTING,
284 __IGB_RESETTING, 311 __IGB_RESETTING,
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 0447f9bcd27a..11aee1309951 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -93,13 +93,16 @@ static const struct igb_stats igb_gstrings_stats[] = {
93 { "tx_smbus", IGB_STAT(stats.mgptc) }, 93 { "tx_smbus", IGB_STAT(stats.mgptc) },
94 { "rx_smbus", IGB_STAT(stats.mgprc) }, 94 { "rx_smbus", IGB_STAT(stats.mgprc) },
95 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 95 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
96#ifdef CONFIG_IGB_LRO
97 { "lro_aggregated", IGB_STAT(lro_aggregated) },
98 { "lro_flushed", IGB_STAT(lro_flushed) },
99 { "lro_no_desc", IGB_STAT(lro_no_desc) },
100#endif
96}; 101};
97 102
98#define IGB_QUEUE_STATS_LEN \ 103#define IGB_QUEUE_STATS_LEN \
99 ((((((struct igb_adapter *)netdev->priv)->num_rx_queues > 1) ? \ 104 ((((struct igb_adapter *)netdev->priv)->num_rx_queues + \
100 ((struct igb_adapter *)netdev->priv)->num_rx_queues : 0) + \ 105 ((struct igb_adapter *)netdev->priv)->num_tx_queues) * \
101 (((((struct igb_adapter *)netdev->priv)->num_tx_queues > 1) ? \
102 ((struct igb_adapter *)netdev->priv)->num_tx_queues : 0))) * \
103 (sizeof(struct igb_queue_stats) / sizeof(u64))) 106 (sizeof(struct igb_queue_stats) / sizeof(u64)))
104#define IGB_GLOBAL_STATS_LEN \ 107#define IGB_GLOBAL_STATS_LEN \
105 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 108 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
@@ -829,8 +832,9 @@ err_setup:
829/* ethtool register test data */ 832/* ethtool register test data */
830struct igb_reg_test { 833struct igb_reg_test {
831 u16 reg; 834 u16 reg;
832 u8 array_len; 835 u16 reg_offset;
833 u8 test_type; 836 u16 array_len;
837 u16 test_type;
834 u32 mask; 838 u32 mask;
835 u32 write; 839 u32 write;
836}; 840};
@@ -852,34 +856,72 @@ struct igb_reg_test {
852#define TABLE64_TEST_LO 5 856#define TABLE64_TEST_LO 5
853#define TABLE64_TEST_HI 6 857#define TABLE64_TEST_HI 6
854 858
855/* default register test */ 859/* 82576 reg test */
860static struct igb_reg_test reg_test_82576[] = {
861 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
862 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
863 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
864 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
865 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
866 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
867 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
868 { E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
869 { E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
870 { E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
871 /* Enable all four RX queues before testing. */
872 { E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
873 /* RDH is read-only for 82576, only test RDT. */
874 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
875 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
876 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
877 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
878 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
879 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
880 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
881 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
882 { E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
883 { E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
884 { E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
885 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
886 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
887 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
888 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
889 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
890 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
891 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
892 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
893 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
894 { 0, 0, 0, 0 }
895};
896
897/* 82575 register test */
856static struct igb_reg_test reg_test_82575[] = { 898static struct igb_reg_test reg_test_82575[] = {
857 { E1000_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 899 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
858 { E1000_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 900 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
859 { E1000_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 901 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
860 { E1000_VET, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 902 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
861 { E1000_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 903 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
862 { E1000_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 904 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
863 { E1000_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 905 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
864 /* Enable all four RX queues before testing. */ 906 /* Enable all four RX queues before testing. */
865 { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 907 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
866 /* RDH is read-only for 82575, only test RDT. */ 908 /* RDH is read-only for 82575, only test RDT. */
867 { E1000_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 909 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
868 { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 910 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
869 { E1000_FCRTH, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 911 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
870 { E1000_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 912 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
871 { E1000_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 913 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
872 { E1000_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 914 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
873 { E1000_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 915 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
874 { E1000_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 916 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
875 { E1000_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 917 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
876 { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, 918 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
877 { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, 919 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
878 { E1000_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 920 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
879 { E1000_TXCW, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, 921 { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
880 { E1000_RA, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 922 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
881 { E1000_RA, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, 923 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
882 { E1000_MTA, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 924 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
883 { 0, 0, 0, 0 } 925 { 0, 0, 0, 0 }
884}; 926};
885 927
@@ -939,7 +981,15 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
939 u32 i, toggle; 981 u32 i, toggle;
940 982
941 toggle = 0x7FFFF3FF; 983 toggle = 0x7FFFF3FF;
942 test = reg_test_82575; 984
985 switch (adapter->hw.mac.type) {
986 case e1000_82576:
987 test = reg_test_82576;
988 break;
989 default:
990 test = reg_test_82575;
991 break;
992 }
943 993
944 /* Because the status register is such a special case, 994 /* Because the status register is such a special case,
945 * we handle it separately from the rest of the register 995 * we handle it separately from the rest of the register
@@ -966,19 +1016,19 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
966 for (i = 0; i < test->array_len; i++) { 1016 for (i = 0; i < test->array_len; i++) {
967 switch (test->test_type) { 1017 switch (test->test_type) {
968 case PATTERN_TEST: 1018 case PATTERN_TEST:
969 REG_PATTERN_TEST(test->reg + (i * 0x100), 1019 REG_PATTERN_TEST(test->reg + (i * test->reg_offset),
970 test->mask, 1020 test->mask,
971 test->write); 1021 test->write);
972 break; 1022 break;
973 case SET_READ_TEST: 1023 case SET_READ_TEST:
974 REG_SET_AND_CHECK(test->reg + (i * 0x100), 1024 REG_SET_AND_CHECK(test->reg + (i * test->reg_offset),
975 test->mask, 1025 test->mask,
976 test->write); 1026 test->write);
977 break; 1027 break;
978 case WRITE_NO_TEST: 1028 case WRITE_NO_TEST:
979 writel(test->write, 1029 writel(test->write,
980 (adapter->hw.hw_addr + test->reg) 1030 (adapter->hw.hw_addr + test->reg)
981 + (i * 0x100)); 1031 + (i * test->reg_offset));
982 break; 1032 break;
983 case TABLE32_TEST: 1033 case TABLE32_TEST:
984 REG_PATTERN_TEST(test->reg + (i * 4), 1034 REG_PATTERN_TEST(test->reg + (i * 4),
@@ -1052,7 +1102,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1052 if (adapter->msix_entries) { 1102 if (adapter->msix_entries) {
1053 /* NOTE: we don't test MSI-X interrupts here, yet */ 1103 /* NOTE: we don't test MSI-X interrupts here, yet */
1054 return 0; 1104 return 0;
1055 } else if (adapter->msi_enabled) { 1105 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1056 shared_int = false; 1106 shared_int = false;
1057 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1107 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
1058 *data = 1; 1108 *data = 1;
@@ -1394,13 +1444,39 @@ static int igb_set_phy_loopback(struct igb_adapter *adapter)
1394static int igb_setup_loopback_test(struct igb_adapter *adapter) 1444static int igb_setup_loopback_test(struct igb_adapter *adapter)
1395{ 1445{
1396 struct e1000_hw *hw = &adapter->hw; 1446 struct e1000_hw *hw = &adapter->hw;
1397 u32 rctl; 1447 u32 reg;
1398 1448
1399 if (hw->phy.media_type == e1000_media_type_fiber || 1449 if (hw->phy.media_type == e1000_media_type_fiber ||
1400 hw->phy.media_type == e1000_media_type_internal_serdes) { 1450 hw->phy.media_type == e1000_media_type_internal_serdes) {
1401 rctl = rd32(E1000_RCTL); 1451 reg = rd32(E1000_RCTL);
1402 rctl |= E1000_RCTL_LBM_TCVR; 1452 reg |= E1000_RCTL_LBM_TCVR;
1403 wr32(E1000_RCTL, rctl); 1453 wr32(E1000_RCTL, reg);
1454
1455 wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
1456
1457 reg = rd32(E1000_CTRL);
1458 reg &= ~(E1000_CTRL_RFCE |
1459 E1000_CTRL_TFCE |
1460 E1000_CTRL_LRST);
1461 reg |= E1000_CTRL_SLU |
1462 E1000_CTRL_FD;
1463 wr32(E1000_CTRL, reg);
1464
1465 /* Unset switch control to serdes energy detect */
1466 reg = rd32(E1000_CONNSW);
1467 reg &= ~E1000_CONNSW_ENRGSRC;
1468 wr32(E1000_CONNSW, reg);
1469
1470 /* Set PCS register for forced speed */
1471 reg = rd32(E1000_PCS_LCTL);
1472 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
1473 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
1474 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1475 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1476 E1000_PCS_LCTL_FSD | /* Force Speed */
1477 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1478 wr32(E1000_PCS_LCTL, reg);
1479
1404 return 0; 1480 return 0;
1405 } else if (hw->phy.media_type == e1000_media_type_copper) { 1481 } else if (hw->phy.media_type == e1000_media_type_copper) {
1406 return igb_set_phy_loopback(adapter); 1482 return igb_set_phy_loopback(adapter);
@@ -1660,6 +1736,8 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1660 wol->supported = 0; 1736 wol->supported = 0;
1661 break; 1737 break;
1662 case E1000_DEV_ID_82575EB_FIBER_SERDES: 1738 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1739 case E1000_DEV_ID_82576_FIBER:
1740 case E1000_DEV_ID_82576_SERDES:
1663 /* Wake events not supported on port B */ 1741 /* Wake events not supported on port B */
1664 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { 1742 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
1665 wol->supported = 0; 1743 wol->supported = 0;
@@ -1668,6 +1746,15 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1668 /* return success for non excluded adapter ports */ 1746 /* return success for non excluded adapter ports */
1669 retval = 0; 1747 retval = 0;
1670 break; 1748 break;
1749 case E1000_DEV_ID_82576_QUAD_COPPER:
1750 /* quad port adapters only support WoL on port A */
1751 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1752 wol->supported = 0;
1753 break;
1754 }
1755 /* return success for non excluded adapter ports */
1756 retval = 0;
1757 break;
1671 default: 1758 default:
1672 /* dual port cards only support WoL on port A from now on 1759 /* dual port cards only support WoL on port A from now on
1673 * unless it was enabled in the eeprom for port B 1760 * unless it was enabled in the eeprom for port B
@@ -1774,6 +1861,8 @@ static int igb_set_coalesce(struct net_device *netdev,
1774 struct ethtool_coalesce *ec) 1861 struct ethtool_coalesce *ec)
1775{ 1862{
1776 struct igb_adapter *adapter = netdev_priv(netdev); 1863 struct igb_adapter *adapter = netdev_priv(netdev);
1864 struct e1000_hw *hw = &adapter->hw;
1865 int i;
1777 1866
1778 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1867 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1779 ((ec->rx_coalesce_usecs > 3) && 1868 ((ec->rx_coalesce_usecs > 3) &&
@@ -1782,13 +1871,16 @@ static int igb_set_coalesce(struct net_device *netdev,
1782 return -EINVAL; 1871 return -EINVAL;
1783 1872
1784 /* convert to rate of irq's per second */ 1873 /* convert to rate of irq's per second */
1785 if (ec->rx_coalesce_usecs <= 3) 1874 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
1786 adapter->itr_setting = ec->rx_coalesce_usecs; 1875 adapter->itr_setting = ec->rx_coalesce_usecs;
1787 else 1876 adapter->itr = IGB_START_ITR;
1788 adapter->itr_setting = (1000000 / ec->rx_coalesce_usecs); 1877 } else {
1878 adapter->itr_setting = ec->rx_coalesce_usecs << 2;
1879 adapter->itr = adapter->itr_setting;
1880 }
1789 1881
1790 if (netif_running(netdev)) 1882 for (i = 0; i < adapter->num_rx_queues; i++)
1791 igb_reinit_locked(adapter); 1883 wr32(adapter->rx_ring[i].itr_register, adapter->itr);
1792 1884
1793 return 0; 1885 return 0;
1794} 1886}
@@ -1801,7 +1893,7 @@ static int igb_get_coalesce(struct net_device *netdev,
1801 if (adapter->itr_setting <= 3) 1893 if (adapter->itr_setting <= 3)
1802 ec->rx_coalesce_usecs = adapter->itr_setting; 1894 ec->rx_coalesce_usecs = adapter->itr_setting;
1803 else 1895 else
1804 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; 1896 ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
1805 1897
1806 return 0; 1898 return 0;
1807} 1899}
@@ -1835,6 +1927,18 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1835 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); 1927 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
1836 int j; 1928 int j;
1837 int i; 1929 int i;
1930#ifdef CONFIG_IGB_LRO
1931 int aggregated = 0, flushed = 0, no_desc = 0;
1932
1933 for (i = 0; i < adapter->num_rx_queues; i++) {
1934 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
1935 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
1936 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
1937 }
1938 adapter->lro_aggregated = aggregated;
1939 adapter->lro_flushed = flushed;
1940 adapter->lro_no_desc = no_desc;
1941#endif
1838 1942
1839 igb_update_stats(adapter); 1943 igb_update_stats(adapter);
1840 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1944 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -1842,6 +1946,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1842 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1946 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1843 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1947 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1844 } 1948 }
1949 for (j = 0; j < adapter->num_tx_queues; j++) {
1950 int k;
1951 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
1952 for (k = 0; k < stat_count; k++)
1953 data[i + k] = queue_stat[k];
1954 i += k;
1955 }
1845 for (j = 0; j < adapter->num_rx_queues; j++) { 1956 for (j = 0; j < adapter->num_rx_queues; j++) {
1846 int k; 1957 int k;
1847 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 1958 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index afd4ce3f7b53..aaed129f4ca0 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -41,22 +41,27 @@
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/if_ether.h> 43#include <linux/if_ether.h>
44 44#ifdef CONFIG_DCA
45#include <linux/dca.h>
46#endif
45#include "igb.h" 47#include "igb.h"
46 48
47#define DRV_VERSION "1.0.8-k2" 49#define DRV_VERSION "1.2.45-k2"
48char igb_driver_name[] = "igb"; 50char igb_driver_name[] = "igb";
49char igb_driver_version[] = DRV_VERSION; 51char igb_driver_version[] = DRV_VERSION;
50static const char igb_driver_string[] = 52static const char igb_driver_string[] =
51 "Intel(R) Gigabit Ethernet Network Driver"; 53 "Intel(R) Gigabit Ethernet Network Driver";
52static const char igb_copyright[] = "Copyright (c) 2007 Intel Corporation."; 54static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
53
54 55
55static const struct e1000_info *igb_info_tbl[] = { 56static const struct e1000_info *igb_info_tbl[] = {
56 [board_82575] = &e1000_82575_info, 57 [board_82575] = &e1000_82575_info,
57}; 58};
58 59
59static struct pci_device_id igb_pci_tbl[] = { 60static struct pci_device_id igb_pci_tbl[] = {
61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
60 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, 67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -102,10 +107,18 @@ static irqreturn_t igb_msix_other(int irq, void *);
102static irqreturn_t igb_msix_rx(int irq, void *); 107static irqreturn_t igb_msix_rx(int irq, void *);
103static irqreturn_t igb_msix_tx(int irq, void *); 108static irqreturn_t igb_msix_tx(int irq, void *);
104static int igb_clean_rx_ring_msix(struct napi_struct *, int); 109static int igb_clean_rx_ring_msix(struct napi_struct *, int);
110#ifdef CONFIG_DCA
111static void igb_update_rx_dca(struct igb_ring *);
112static void igb_update_tx_dca(struct igb_ring *);
113static void igb_setup_dca(struct igb_adapter *);
114#endif /* CONFIG_DCA */
105static bool igb_clean_tx_irq(struct igb_ring *); 115static bool igb_clean_tx_irq(struct igb_ring *);
106static int igb_clean(struct napi_struct *, int); 116static int igb_poll(struct napi_struct *, int);
107static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 117static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
108static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 118static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
119#ifdef CONFIG_IGB_LRO
120static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
121#endif
109static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 122static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
110static void igb_tx_timeout(struct net_device *); 123static void igb_tx_timeout(struct net_device *);
111static void igb_reset_task(struct work_struct *); 124static void igb_reset_task(struct work_struct *);
@@ -119,6 +132,14 @@ static int igb_suspend(struct pci_dev *, pm_message_t);
119static int igb_resume(struct pci_dev *); 132static int igb_resume(struct pci_dev *);
120#endif 133#endif
121static void igb_shutdown(struct pci_dev *); 134static void igb_shutdown(struct pci_dev *);
135#ifdef CONFIG_DCA
136static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
137static struct notifier_block dca_notifier = {
138 .notifier_call = igb_notify_dca,
139 .next = NULL,
140 .priority = 0
141};
142#endif
122 143
123#ifdef CONFIG_NET_POLL_CONTROLLER 144#ifdef CONFIG_NET_POLL_CONTROLLER
124/* for netdump / net console */ 145/* for netdump / net console */
@@ -151,6 +172,8 @@ static struct pci_driver igb_driver = {
151 .err_handler = &igb_err_handler 172 .err_handler = &igb_err_handler
152}; 173};
153 174
175static int global_quad_port_a; /* global quad port a indication */
176
154MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 177MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
155MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 178MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
156MODULE_LICENSE("GPL"); 179MODULE_LICENSE("GPL");
@@ -182,7 +205,12 @@ static int __init igb_init_module(void)
182 205
183 printk(KERN_INFO "%s\n", igb_copyright); 206 printk(KERN_INFO "%s\n", igb_copyright);
184 207
208 global_quad_port_a = 0;
209
185 ret = pci_register_driver(&igb_driver); 210 ret = pci_register_driver(&igb_driver);
211#ifdef CONFIG_DCA
212 dca_register_notify(&dca_notifier);
213#endif
186 return ret; 214 return ret;
187} 215}
188 216
@@ -196,6 +224,9 @@ module_init(igb_init_module);
196 **/ 224 **/
197static void __exit igb_exit_module(void) 225static void __exit igb_exit_module(void)
198{ 226{
227#ifdef CONFIG_DCA
228 dca_unregister_notify(&dca_notifier);
229#endif
199 pci_unregister_driver(&igb_driver); 230 pci_unregister_driver(&igb_driver);
200} 231}
201 232
@@ -224,6 +255,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
224 return -ENOMEM; 255 return -ENOMEM;
225 } 256 }
226 257
258 adapter->rx_ring->buddy = adapter->tx_ring;
259
260 for (i = 0; i < adapter->num_tx_queues; i++) {
261 struct igb_ring *ring = &(adapter->tx_ring[i]);
262 ring->adapter = adapter;
263 ring->queue_index = i;
264 }
227 for (i = 0; i < adapter->num_rx_queues; i++) { 265 for (i = 0; i < adapter->num_rx_queues; i++) {
228 struct igb_ring *ring = &(adapter->rx_ring[i]); 266 struct igb_ring *ring = &(adapter->rx_ring[i]);
229 ring->adapter = adapter; 267 ring->adapter = adapter;
@@ -231,17 +269,32 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
231 ring->itr_register = E1000_ITR; 269 ring->itr_register = E1000_ITR;
232 270
233 /* set a default napi handler for each rx_ring */ 271 /* set a default napi handler for each rx_ring */
234 netif_napi_add(adapter->netdev, &ring->napi, igb_clean, 64); 272 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
235 } 273 }
236 return 0; 274 return 0;
237} 275}
238 276
277static void igb_free_queues(struct igb_adapter *adapter)
278{
279 int i;
280
281 for (i = 0; i < adapter->num_rx_queues; i++)
282 netif_napi_del(&adapter->rx_ring[i].napi);
283
284 kfree(adapter->tx_ring);
285 kfree(adapter->rx_ring);
286}
287
239#define IGB_N0_QUEUE -1 288#define IGB_N0_QUEUE -1
240static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, 289static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
241 int tx_queue, int msix_vector) 290 int tx_queue, int msix_vector)
242{ 291{
243 u32 msixbm = 0; 292 u32 msixbm = 0;
244 struct e1000_hw *hw = &adapter->hw; 293 struct e1000_hw *hw = &adapter->hw;
294 u32 ivar, index;
295
296 switch (hw->mac.type) {
297 case e1000_82575:
245 /* The 82575 assigns vectors using a bitmask, which matches the 298 /* The 82575 assigns vectors using a bitmask, which matches the
246 bitmask for the EICR/EIMS/EIMC registers. To assign one 299 bitmask for the EICR/EIMS/EIMC registers. To assign one
247 or more queues to a vector, we write the appropriate bits 300 or more queues to a vector, we write the appropriate bits
@@ -256,6 +309,47 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
256 E1000_EICR_TX_QUEUE0 << tx_queue; 309 E1000_EICR_TX_QUEUE0 << tx_queue;
257 } 310 }
258 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
312 break;
313 case e1000_82576:
314 /* Kawela uses a table-based method for assigning vectors.
315 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */
318 if (rx_queue > IGB_N0_QUEUE) {
319 index = (rx_queue & 0x7);
320 ivar = array_rd32(E1000_IVAR0, index);
321 if (rx_queue < 8) {
322 /* vector goes into low byte of register */
323 ivar = ivar & 0xFFFFFF00;
324 ivar |= msix_vector | E1000_IVAR_VALID;
325 } else {
326 /* vector goes into third byte of register */
327 ivar = ivar & 0xFF00FFFF;
328 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
329 }
330 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
331 array_wr32(E1000_IVAR0, index, ivar);
332 }
333 if (tx_queue > IGB_N0_QUEUE) {
334 index = (tx_queue & 0x7);
335 ivar = array_rd32(E1000_IVAR0, index);
336 if (tx_queue < 8) {
337 /* vector goes into second byte of register */
338 ivar = ivar & 0xFFFF00FF;
339 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
340 } else {
341 /* vector goes into high byte of register */
342 ivar = ivar & 0x00FFFFFF;
343 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
344 }
345 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
346 array_wr32(E1000_IVAR0, index, ivar);
347 }
348 break;
349 default:
350 BUG();
351 break;
352 }
259} 353}
260 354
261/** 355/**
@@ -271,13 +365,19 @@ static void igb_configure_msix(struct igb_adapter *adapter)
271 struct e1000_hw *hw = &adapter->hw; 365 struct e1000_hw *hw = &adapter->hw;
272 366
273 adapter->eims_enable_mask = 0; 367 adapter->eims_enable_mask = 0;
368 if (hw->mac.type == e1000_82576)
369 /* Turn on MSI-X capability first, or our settings
370 * won't stick. And it will take days to debug. */
371 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
372 E1000_GPIE_PBA | E1000_GPIE_EIAME |
373 E1000_GPIE_NSICR);
274 374
275 for (i = 0; i < adapter->num_tx_queues; i++) { 375 for (i = 0; i < adapter->num_tx_queues; i++) {
276 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 376 struct igb_ring *tx_ring = &adapter->tx_ring[i];
277 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++); 377 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
278 adapter->eims_enable_mask |= tx_ring->eims_value; 378 adapter->eims_enable_mask |= tx_ring->eims_value;
279 if (tx_ring->itr_val) 379 if (tx_ring->itr_val)
280 writel(1000000000 / (tx_ring->itr_val * 256), 380 writel(tx_ring->itr_val,
281 hw->hw_addr + tx_ring->itr_register); 381 hw->hw_addr + tx_ring->itr_register);
282 else 382 else
283 writel(1, hw->hw_addr + tx_ring->itr_register); 383 writel(1, hw->hw_addr + tx_ring->itr_register);
@@ -285,10 +385,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
285 385
286 for (i = 0; i < adapter->num_rx_queues; i++) { 386 for (i = 0; i < adapter->num_rx_queues; i++) {
287 struct igb_ring *rx_ring = &adapter->rx_ring[i]; 387 struct igb_ring *rx_ring = &adapter->rx_ring[i];
388 rx_ring->buddy = 0;
288 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); 389 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
289 adapter->eims_enable_mask |= rx_ring->eims_value; 390 adapter->eims_enable_mask |= rx_ring->eims_value;
290 if (rx_ring->itr_val) 391 if (rx_ring->itr_val)
291 writel(1000000000 / (rx_ring->itr_val * 256), 392 writel(rx_ring->itr_val,
292 hw->hw_addr + rx_ring->itr_register); 393 hw->hw_addr + rx_ring->itr_register);
293 else 394 else
294 writel(1, hw->hw_addr + rx_ring->itr_register); 395 writel(1, hw->hw_addr + rx_ring->itr_register);
@@ -296,6 +397,8 @@ static void igb_configure_msix(struct igb_adapter *adapter)
296 397
297 398
298 /* set vector for other causes, i.e. link changes */ 399 /* set vector for other causes, i.e. link changes */
400 switch (hw->mac.type) {
401 case e1000_82575:
299 array_wr32(E1000_MSIXBM(0), vector++, 402 array_wr32(E1000_MSIXBM(0), vector++,
300 E1000_EIMS_OTHER); 403 E1000_EIMS_OTHER);
301 404
@@ -311,6 +414,19 @@ static void igb_configure_msix(struct igb_adapter *adapter)
311 adapter->eims_enable_mask |= E1000_EIMS_OTHER; 414 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
312 adapter->eims_other = E1000_EIMS_OTHER; 415 adapter->eims_other = E1000_EIMS_OTHER;
313 416
417 break;
418
419 case e1000_82576:
420 tmp = (vector++ | E1000_IVAR_VALID) << 8;
421 wr32(E1000_IVAR_MISC, tmp);
422
423 adapter->eims_enable_mask = (1 << (vector)) - 1;
424 adapter->eims_other = 1 << (vector - 1);
425 break;
426 default:
427 /* do nothing, since nothing else supports MSI-X */
428 break;
429 } /* switch (hw->mac.type) */
314 wrfl(); 430 wrfl();
315} 431}
316 432
@@ -336,7 +452,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
336 if (err) 452 if (err)
337 goto out; 453 goto out;
338 ring->itr_register = E1000_EITR(0) + (vector << 2); 454 ring->itr_register = E1000_EITR(0) + (vector << 2);
339 ring->itr_val = adapter->itr; 455 ring->itr_val = 976; /* ~4000 ints/sec */
340 vector++; 456 vector++;
341 } 457 }
342 for (i = 0; i < adapter->num_rx_queues; i++) { 458 for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -375,7 +491,7 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
375 pci_disable_msix(adapter->pdev); 491 pci_disable_msix(adapter->pdev);
376 kfree(adapter->msix_entries); 492 kfree(adapter->msix_entries);
377 adapter->msix_entries = NULL; 493 adapter->msix_entries = NULL;
378 } else if (adapter->msi_enabled) 494 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
379 pci_disable_msi(adapter->pdev); 495 pci_disable_msi(adapter->pdev);
380 return; 496 return;
381} 497}
@@ -412,8 +528,14 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
412 /* If we can't do MSI-X, try MSI */ 528 /* If we can't do MSI-X, try MSI */
413msi_only: 529msi_only:
414 adapter->num_rx_queues = 1; 530 adapter->num_rx_queues = 1;
531 adapter->num_tx_queues = 1;
415 if (!pci_enable_msi(adapter->pdev)) 532 if (!pci_enable_msi(adapter->pdev))
416 adapter->msi_enabled = 1; 533 adapter->flags |= IGB_FLAG_HAS_MSI;
534
535#ifdef CONFIG_NETDEVICES_MULTIQUEUE
536 /* Notify the stack of the (possibly) reduced Tx Queue count. */
537 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
538#endif
417 return; 539 return;
418} 540}
419 541
@@ -436,24 +558,33 @@ static int igb_request_irq(struct igb_adapter *adapter)
436 /* fall back to MSI */ 558 /* fall back to MSI */
437 igb_reset_interrupt_capability(adapter); 559 igb_reset_interrupt_capability(adapter);
438 if (!pci_enable_msi(adapter->pdev)) 560 if (!pci_enable_msi(adapter->pdev))
439 adapter->msi_enabled = 1; 561 adapter->flags |= IGB_FLAG_HAS_MSI;
440 igb_free_all_tx_resources(adapter); 562 igb_free_all_tx_resources(adapter);
441 igb_free_all_rx_resources(adapter); 563 igb_free_all_rx_resources(adapter);
442 adapter->num_rx_queues = 1; 564 adapter->num_rx_queues = 1;
443 igb_alloc_queues(adapter); 565 igb_alloc_queues(adapter);
444 } else { 566 } else {
445 wr32(E1000_MSIXBM(0), (E1000_EICR_RX_QUEUE0 | 567 switch (hw->mac.type) {
446 E1000_EIMS_OTHER)); 568 case e1000_82575:
569 wr32(E1000_MSIXBM(0),
570 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
571 break;
572 case e1000_82576:
573 wr32(E1000_IVAR0, E1000_IVAR_VALID);
574 break;
575 default:
576 break;
577 }
447 } 578 }
448 579
449 if (adapter->msi_enabled) { 580 if (adapter->flags & IGB_FLAG_HAS_MSI) {
450 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, 581 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
451 netdev->name, netdev); 582 netdev->name, netdev);
452 if (!err) 583 if (!err)
453 goto request_done; 584 goto request_done;
454 /* fall back to legacy interrupts */ 585 /* fall back to legacy interrupts */
455 igb_reset_interrupt_capability(adapter); 586 igb_reset_interrupt_capability(adapter);
456 adapter->msi_enabled = 0; 587 adapter->flags &= ~IGB_FLAG_HAS_MSI;
457 } 588 }
458 589
459 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 590 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
@@ -693,6 +824,10 @@ void igb_down(struct igb_adapter *adapter)
693 /* flush and sleep below */ 824 /* flush and sleep below */
694 825
695 netif_stop_queue(netdev); 826 netif_stop_queue(netdev);
827#ifdef CONFIG_NETDEVICES_MULTIQUEUE
828 for (i = 0; i < adapter->num_tx_queues; i++)
829 netif_stop_subqueue(netdev, i);
830#endif
696 831
697 /* disable transmits in the hardware */ 832 /* disable transmits in the hardware */
698 tctl = rd32(E1000_TCTL); 833 tctl = rd32(E1000_TCTL);
@@ -734,16 +869,23 @@ void igb_reinit_locked(struct igb_adapter *adapter)
734void igb_reset(struct igb_adapter *adapter) 869void igb_reset(struct igb_adapter *adapter)
735{ 870{
736 struct e1000_hw *hw = &adapter->hw; 871 struct e1000_hw *hw = &adapter->hw;
737 struct e1000_fc_info *fc = &adapter->hw.fc; 872 struct e1000_mac_info *mac = &hw->mac;
873 struct e1000_fc_info *fc = &hw->fc;
738 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 874 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
739 u16 hwm; 875 u16 hwm;
740 876
741 /* Repartition Pba for greater than 9k mtu 877 /* Repartition Pba for greater than 9k mtu
742 * To take effect CTRL.RST is required. 878 * To take effect CTRL.RST is required.
743 */ 879 */
880 if (mac->type != e1000_82576) {
744 pba = E1000_PBA_34K; 881 pba = E1000_PBA_34K;
882 }
883 else {
884 pba = E1000_PBA_64K;
885 }
745 886
746 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 887 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
888 (mac->type < e1000_82576)) {
747 /* adjust PBA for jumbo frames */ 889 /* adjust PBA for jumbo frames */
748 wr32(E1000_PBA, pba); 890 wr32(E1000_PBA, pba);
749 891
@@ -782,8 +924,8 @@ void igb_reset(struct igb_adapter *adapter)
782 if (pba < min_rx_space) 924 if (pba < min_rx_space)
783 pba = min_rx_space; 925 pba = min_rx_space;
784 } 926 }
927 wr32(E1000_PBA, pba);
785 } 928 }
786 wr32(E1000_PBA, pba);
787 929
788 /* flow control settings */ 930 /* flow control settings */
789 /* The high water mark must be low enough to fit one full frame 931 /* The high water mark must be low enough to fit one full frame
@@ -792,10 +934,15 @@ void igb_reset(struct igb_adapter *adapter)
792 * - 90% of the Rx FIFO size, or 934 * - 90% of the Rx FIFO size, or
793 * - the full Rx FIFO size minus one full frame */ 935 * - the full Rx FIFO size minus one full frame */
794 hwm = min(((pba << 10) * 9 / 10), 936 hwm = min(((pba << 10) * 9 / 10),
795 ((pba << 10) - adapter->max_frame_size)); 937 ((pba << 10) - 2 * adapter->max_frame_size));
796 938
797 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 939 if (mac->type < e1000_82576) {
798 fc->low_water = fc->high_water - 8; 940 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
941 fc->low_water = fc->high_water - 8;
942 } else {
943 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
944 fc->low_water = fc->high_water - 16;
945 }
799 fc->pause_time = 0xFFFF; 946 fc->pause_time = 0xFFFF;
800 fc->send_xon = 1; 947 fc->send_xon = 1;
801 fc->type = fc->original_type; 948 fc->type = fc->original_type;
@@ -895,7 +1042,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
895 pci_save_state(pdev); 1042 pci_save_state(pdev);
896 1043
897 err = -ENOMEM; 1044 err = -ENOMEM;
1045#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1046 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES);
1047#else
898 netdev = alloc_etherdev(sizeof(struct igb_adapter)); 1048 netdev = alloc_etherdev(sizeof(struct igb_adapter));
1049#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
899 if (!netdev) 1050 if (!netdev)
900 goto err_alloc_etherdev; 1051 goto err_alloc_etherdev;
901 1052
@@ -966,6 +1117,17 @@ static int __devinit igb_probe(struct pci_dev *pdev,
966 1117
967 igb_get_bus_info_pcie(hw); 1118 igb_get_bus_info_pcie(hw);
968 1119
1120 /* set flags */
1121 switch (hw->mac.type) {
1122 case e1000_82576:
1123 case e1000_82575:
1124 adapter->flags |= IGB_FLAG_HAS_DCA;
1125 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1126 break;
1127 default:
1128 break;
1129 }
1130
969 hw->phy.autoneg_wait_to_complete = false; 1131 hw->phy.autoneg_wait_to_complete = false;
970 hw->mac.adaptive_ifs = true; 1132 hw->mac.adaptive_ifs = true;
971 1133
@@ -989,6 +1151,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
989 netdev->features |= NETIF_F_TSO; 1151 netdev->features |= NETIF_F_TSO;
990 netdev->features |= NETIF_F_TSO6; 1152 netdev->features |= NETIF_F_TSO6;
991 1153
1154#ifdef CONFIG_IGB_LRO
1155 netdev->features |= NETIF_F_LRO;
1156#endif
1157
992 netdev->vlan_features |= NETIF_F_TSO; 1158 netdev->vlan_features |= NETIF_F_TSO;
993 netdev->vlan_features |= NETIF_F_TSO6; 1159 netdev->vlan_features |= NETIF_F_TSO6;
994 netdev->vlan_features |= NETIF_F_HW_CSUM; 1160 netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -997,6 +1163,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
997 if (pci_using_dac) 1163 if (pci_using_dac)
998 netdev->features |= NETIF_F_HIGHDMA; 1164 netdev->features |= NETIF_F_HIGHDMA;
999 1165
1166#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1167 netdev->features |= NETIF_F_MULTI_QUEUE;
1168#endif
1169
1000 netdev->features |= NETIF_F_LLTX; 1170 netdev->features |= NETIF_F_LLTX;
1001 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1171 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1002 1172
@@ -1077,11 +1247,23 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1077 adapter->eeprom_wol = 0; 1247 adapter->eeprom_wol = 0;
1078 break; 1248 break;
1079 case E1000_DEV_ID_82575EB_FIBER_SERDES: 1249 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1250 case E1000_DEV_ID_82576_FIBER:
1251 case E1000_DEV_ID_82576_SERDES:
1080 /* Wake events only supported on port A for dual fiber 1252 /* Wake events only supported on port A for dual fiber
1081 * regardless of eeprom setting */ 1253 * regardless of eeprom setting */
1082 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 1254 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1083 adapter->eeprom_wol = 0; 1255 adapter->eeprom_wol = 0;
1084 break; 1256 break;
1257 case E1000_DEV_ID_82576_QUAD_COPPER:
1258 /* if quad port adapter, disable WoL on all but port A */
1259 if (global_quad_port_a != 0)
1260 adapter->eeprom_wol = 0;
1261 else
1262 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1263 /* Reset for multiple quad port adapters */
1264 if (++global_quad_port_a == 4)
1265 global_quad_port_a = 0;
1266 break;
1085 } 1267 }
1086 1268
1087 /* initialize the wol settings based on the eeprom settings */ 1269 /* initialize the wol settings based on the eeprom settings */
@@ -1097,12 +1279,28 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1097 /* tell the stack to leave us alone until igb_open() is called */ 1279 /* tell the stack to leave us alone until igb_open() is called */
1098 netif_carrier_off(netdev); 1280 netif_carrier_off(netdev);
1099 netif_stop_queue(netdev); 1281 netif_stop_queue(netdev);
1282#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1283 for (i = 0; i < adapter->num_tx_queues; i++)
1284 netif_stop_subqueue(netdev, i);
1285#endif
1100 1286
1101 strcpy(netdev->name, "eth%d"); 1287 strcpy(netdev->name, "eth%d");
1102 err = register_netdev(netdev); 1288 err = register_netdev(netdev);
1103 if (err) 1289 if (err)
1104 goto err_register; 1290 goto err_register;
1105 1291
1292#ifdef CONFIG_DCA
1293 if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
1294 (dca_add_requester(&pdev->dev) == 0)) {
1295 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1296 dev_info(&pdev->dev, "DCA enabled\n");
1297 /* Always use CB2 mode, difference is masked
1298 * in the CB driver. */
1299 wr32(E1000_DCA_CTRL, 2);
1300 igb_setup_dca(adapter);
1301 }
1302#endif
1303
1106 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1304 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1107 /* print bus type/speed/width info */ 1305 /* print bus type/speed/width info */
1108 dev_info(&pdev->dev, 1306 dev_info(&pdev->dev,
@@ -1123,7 +1321,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1123 dev_info(&pdev->dev, 1321 dev_info(&pdev->dev,
1124 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", 1322 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1125 adapter->msix_entries ? "MSI-X" : 1323 adapter->msix_entries ? "MSI-X" :
1126 adapter->msi_enabled ? "MSI" : "legacy", 1324 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1127 adapter->num_rx_queues, adapter->num_tx_queues); 1325 adapter->num_rx_queues, adapter->num_tx_queues);
1128 1326
1129 return 0; 1327 return 0;
@@ -1138,8 +1336,7 @@ err_eeprom:
1138 iounmap(hw->flash_address); 1336 iounmap(hw->flash_address);
1139 1337
1140 igb_remove_device(hw); 1338 igb_remove_device(hw);
1141 kfree(adapter->tx_ring); 1339 igb_free_queues(adapter);
1142 kfree(adapter->rx_ring);
1143err_sw_init: 1340err_sw_init:
1144err_hw_init: 1341err_hw_init:
1145 iounmap(hw->hw_addr); 1342 iounmap(hw->hw_addr);
@@ -1166,6 +1363,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1166{ 1363{
1167 struct net_device *netdev = pci_get_drvdata(pdev); 1364 struct net_device *netdev = pci_get_drvdata(pdev);
1168 struct igb_adapter *adapter = netdev_priv(netdev); 1365 struct igb_adapter *adapter = netdev_priv(netdev);
1366#ifdef CONFIG_DCA
1367 struct e1000_hw *hw = &adapter->hw;
1368#endif
1169 1369
1170 /* flush_scheduled work may reschedule our watchdog task, so 1370 /* flush_scheduled work may reschedule our watchdog task, so
1171 * explicitly disable watchdog tasks from being rescheduled */ 1371 * explicitly disable watchdog tasks from being rescheduled */
@@ -1175,6 +1375,15 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1175 1375
1176 flush_scheduled_work(); 1376 flush_scheduled_work();
1177 1377
1378#ifdef CONFIG_DCA
1379 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1380 dev_info(&pdev->dev, "DCA disabled\n");
1381 dca_remove_requester(&pdev->dev);
1382 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1383 wr32(E1000_DCA_CTRL, 1);
1384 }
1385#endif
1386
1178 /* Release control of h/w to f/w. If f/w is AMT enabled, this 1387 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1179 * would have already happened in close and is redundant. */ 1388 * would have already happened in close and is redundant. */
1180 igb_release_hw_control(adapter); 1389 igb_release_hw_control(adapter);
@@ -1187,8 +1396,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1187 igb_remove_device(&adapter->hw); 1396 igb_remove_device(&adapter->hw);
1188 igb_reset_interrupt_capability(adapter); 1397 igb_reset_interrupt_capability(adapter);
1189 1398
1190 kfree(adapter->tx_ring); 1399 igb_free_queues(adapter);
1191 kfree(adapter->rx_ring);
1192 1400
1193 iounmap(adapter->hw.hw_addr); 1401 iounmap(adapter->hw.hw_addr);
1194 if (adapter->hw.flash_address) 1402 if (adapter->hw.flash_address)
@@ -1223,9 +1431,15 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1223 1431
1224 /* Number of supported queues. */ 1432 /* Number of supported queues. */
1225 /* Having more queues than CPUs doesn't make sense. */ 1433 /* Having more queues than CPUs doesn't make sense. */
1434 adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus());
1435#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1436 adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus());
1437#else
1226 adapter->num_tx_queues = 1; 1438 adapter->num_tx_queues = 1;
1227 adapter->num_rx_queues = min(IGB_MAX_RX_QUEUES, num_online_cpus()); 1439#endif /* CONFIG_NET_MULTI_QUEUE_DEVICE */
1228 1440
1441 /* This call may decrease the number of queues depending on
1442 * interrupt mode. */
1229 igb_set_interrupt_capability(adapter); 1443 igb_set_interrupt_capability(adapter);
1230 1444
1231 if (igb_alloc_queues(adapter)) { 1445 if (igb_alloc_queues(adapter)) {
@@ -1386,8 +1600,6 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1386 tx_ring->adapter = adapter; 1600 tx_ring->adapter = adapter;
1387 tx_ring->next_to_use = 0; 1601 tx_ring->next_to_use = 0;
1388 tx_ring->next_to_clean = 0; 1602 tx_ring->next_to_clean = 0;
1389 spin_lock_init(&tx_ring->tx_clean_lock);
1390 spin_lock_init(&tx_ring->tx_lock);
1391 return 0; 1603 return 0;
1392 1604
1393err: 1605err:
@@ -1407,6 +1619,9 @@ err:
1407static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 1619static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1408{ 1620{
1409 int i, err = 0; 1621 int i, err = 0;
1622#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1623 int r_idx;
1624#endif
1410 1625
1411 for (i = 0; i < adapter->num_tx_queues; i++) { 1626 for (i = 0; i < adapter->num_tx_queues; i++) {
1412 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1627 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
@@ -1419,6 +1634,12 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1419 } 1634 }
1420 } 1635 }
1421 1636
1637#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1638 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1639 r_idx = i % adapter->num_tx_queues;
1640 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1641 }
1642#endif
1422 return err; 1643 return err;
1423} 1644}
1424 1645
@@ -1505,6 +1726,14 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1505 struct pci_dev *pdev = adapter->pdev; 1726 struct pci_dev *pdev = adapter->pdev;
1506 int size, desc_len; 1727 int size, desc_len;
1507 1728
1729#ifdef CONFIG_IGB_LRO
1730 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1731 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1732 if (!rx_ring->lro_mgr.lro_arr)
1733 goto err;
1734 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1735#endif
1736
1508 size = sizeof(struct igb_buffer) * rx_ring->count; 1737 size = sizeof(struct igb_buffer) * rx_ring->count;
1509 rx_ring->buffer_info = vmalloc(size); 1738 rx_ring->buffer_info = vmalloc(size);
1510 if (!rx_ring->buffer_info) 1739 if (!rx_ring->buffer_info)
@@ -1525,13 +1754,16 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1525 1754
1526 rx_ring->next_to_clean = 0; 1755 rx_ring->next_to_clean = 0;
1527 rx_ring->next_to_use = 0; 1756 rx_ring->next_to_use = 0;
1528 rx_ring->pending_skb = NULL;
1529 1757
1530 rx_ring->adapter = adapter; 1758 rx_ring->adapter = adapter;
1531 1759
1532 return 0; 1760 return 0;
1533 1761
1534err: 1762err:
1763#ifdef CONFIG_IGB_LRO
1764 vfree(rx_ring->lro_mgr.lro_arr);
1765 rx_ring->lro_mgr.lro_arr = NULL;
1766#endif
1535 vfree(rx_ring->buffer_info); 1767 vfree(rx_ring->buffer_info);
1536 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 1768 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1537 "the receive descriptor ring\n"); 1769 "the receive descriptor ring\n");
@@ -1582,10 +1814,12 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1582 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1814 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1583 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1815 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1584 1816
1585 /* disable the stripping of CRC because it breaks 1817 /*
1586 * BMC firmware connected over SMBUS 1818 * enable stripping of CRC. It's unlikely this will break BMC
1587 rctl |= E1000_RCTL_SECRC; 1819 * redirection as it did with e1000. Newer features require
1820 * that the HW strips the CRC.
1588 */ 1821 */
1822 rctl |= E1000_RCTL_SECRC;
1589 1823
1590 rctl &= ~E1000_RCTL_SBP; 1824 rctl &= ~E1000_RCTL_SBP;
1591 1825
@@ -1615,15 +1849,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1615 rctl |= E1000_RCTL_SZ_2048; 1849 rctl |= E1000_RCTL_SZ_2048;
1616 rctl &= ~E1000_RCTL_BSEX; 1850 rctl &= ~E1000_RCTL_BSEX;
1617 break; 1851 break;
1618 case IGB_RXBUFFER_4096:
1619 rctl |= E1000_RCTL_SZ_4096;
1620 break;
1621 case IGB_RXBUFFER_8192:
1622 rctl |= E1000_RCTL_SZ_8192;
1623 break;
1624 case IGB_RXBUFFER_16384:
1625 rctl |= E1000_RCTL_SZ_16384;
1626 break;
1627 } 1852 }
1628 } else { 1853 } else {
1629 rctl &= ~E1000_RCTL_BSEX; 1854 rctl &= ~E1000_RCTL_BSEX;
@@ -1641,10 +1866,8 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1641 * so only enable packet split for jumbo frames */ 1866 * so only enable packet split for jumbo frames */
1642 if (rctl & E1000_RCTL_LPE) { 1867 if (rctl & E1000_RCTL_LPE) {
1643 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; 1868 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
1644 srrctl = adapter->rx_ps_hdr_size << 1869 srrctl |= adapter->rx_ps_hdr_size <<
1645 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1870 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1646 /* buffer size is ALWAYS one page */
1647 srrctl |= PAGE_SIZE >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1648 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1871 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1649 } else { 1872 } else {
1650 adapter->rx_ps_hdr_size = 0; 1873 adapter->rx_ps_hdr_size = 0;
@@ -1678,8 +1901,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1678 mdelay(10); 1901 mdelay(10);
1679 1902
1680 if (adapter->itr_setting > 3) 1903 if (adapter->itr_setting > 3)
1681 wr32(E1000_ITR, 1904 wr32(E1000_ITR, adapter->itr);
1682 1000000000 / (adapter->itr * 256));
1683 1905
1684 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1906 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1685 * the Base and Length of the Rx Descriptor Ring */ 1907 * the Base and Length of the Rx Descriptor Ring */
@@ -1704,6 +1926,16 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1704 rxdctl |= IGB_RX_HTHRESH << 8; 1926 rxdctl |= IGB_RX_HTHRESH << 8;
1705 rxdctl |= IGB_RX_WTHRESH << 16; 1927 rxdctl |= IGB_RX_WTHRESH << 16;
1706 wr32(E1000_RXDCTL(i), rxdctl); 1928 wr32(E1000_RXDCTL(i), rxdctl);
1929#ifdef CONFIG_IGB_LRO
1930 /* Intitial LRO Settings */
1931 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1932 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1933 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1934 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1935 ring->lro_mgr.dev = adapter->netdev;
1936 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1937 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1938#endif
1707 } 1939 }
1708 1940
1709 if (adapter->num_rx_queues > 1) { 1941 if (adapter->num_rx_queues > 1) {
@@ -1717,7 +1949,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1717 1949
1718 get_random_bytes(&random[0], 40); 1950 get_random_bytes(&random[0], 40);
1719 1951
1720 shift = 6; 1952 if (hw->mac.type >= e1000_82576)
1953 shift = 0;
1954 else
1955 shift = 6;
1721 for (j = 0; j < (32 * 4); j++) { 1956 for (j = 0; j < (32 * 4); j++) {
1722 reta.bytes[j & 3] = 1957 reta.bytes[j & 3] =
1723 (j % adapter->num_rx_queues) << shift; 1958 (j % adapter->num_rx_queues) << shift;
@@ -1892,6 +2127,11 @@ static void igb_free_rx_resources(struct igb_ring *rx_ring)
1892 vfree(rx_ring->buffer_info); 2127 vfree(rx_ring->buffer_info);
1893 rx_ring->buffer_info = NULL; 2128 rx_ring->buffer_info = NULL;
1894 2129
2130#ifdef CONFIG_IGB_LRO
2131 vfree(rx_ring->lro_mgr.lro_arr);
2132 rx_ring->lro_mgr.lro_arr = NULL;
2133#endif
2134
1895 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2135 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1896 2136
1897 rx_ring->desc = NULL; 2137 rx_ring->desc = NULL;
@@ -1946,20 +2186,17 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
1946 buffer_info->skb = NULL; 2186 buffer_info->skb = NULL;
1947 } 2187 }
1948 if (buffer_info->page) { 2188 if (buffer_info->page) {
1949 pci_unmap_page(pdev, buffer_info->page_dma, 2189 if (buffer_info->page_dma)
1950 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2190 pci_unmap_page(pdev, buffer_info->page_dma,
2191 PAGE_SIZE / 2,
2192 PCI_DMA_FROMDEVICE);
1951 put_page(buffer_info->page); 2193 put_page(buffer_info->page);
1952 buffer_info->page = NULL; 2194 buffer_info->page = NULL;
1953 buffer_info->page_dma = 0; 2195 buffer_info->page_dma = 0;
2196 buffer_info->page_offset = 0;
1954 } 2197 }
1955 } 2198 }
1956 2199
1957 /* there also may be some cached data from a chained receive */
1958 if (rx_ring->pending_skb) {
1959 dev_kfree_skb(rx_ring->pending_skb);
1960 rx_ring->pending_skb = NULL;
1961 }
1962
1963 size = sizeof(struct igb_buffer) * rx_ring->count; 2200 size = sizeof(struct igb_buffer) * rx_ring->count;
1964 memset(rx_ring->buffer_info, 0, size); 2201 memset(rx_ring->buffer_info, 0, size);
1965 2202
@@ -2043,7 +2280,7 @@ static void igb_set_multi(struct net_device *netdev)
2043 2280
2044 if (!netdev->mc_count) { 2281 if (!netdev->mc_count) {
2045 /* nothing to program, so clear mc list */ 2282 /* nothing to program, so clear mc list */
2046 igb_update_mc_addr_list(hw, NULL, 0, 1, 2283 igb_update_mc_addr_list_82575(hw, NULL, 0, 1,
2047 mac->rar_entry_count); 2284 mac->rar_entry_count);
2048 return; 2285 return;
2049 } 2286 }
@@ -2061,7 +2298,8 @@ static void igb_set_multi(struct net_device *netdev)
2061 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 2298 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2062 mc_ptr = mc_ptr->next; 2299 mc_ptr = mc_ptr->next;
2063 } 2300 }
2064 igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count); 2301 igb_update_mc_addr_list_82575(hw, mta_list, i, 1,
2302 mac->rar_entry_count);
2065 kfree(mta_list); 2303 kfree(mta_list);
2066} 2304}
2067 2305
@@ -2096,6 +2334,9 @@ static void igb_watchdog_task(struct work_struct *work)
2096 struct e1000_mac_info *mac = &adapter->hw.mac; 2334 struct e1000_mac_info *mac = &adapter->hw.mac;
2097 u32 link; 2335 u32 link;
2098 s32 ret_val; 2336 s32 ret_val;
2337#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2338 int i;
2339#endif
2099 2340
2100 if ((netif_carrier_ok(netdev)) && 2341 if ((netif_carrier_ok(netdev)) &&
2101 (rd32(E1000_STATUS) & E1000_STATUS_LU)) 2342 (rd32(E1000_STATUS) & E1000_STATUS_LU))
@@ -2152,6 +2393,10 @@ static void igb_watchdog_task(struct work_struct *work)
2152 2393
2153 netif_carrier_on(netdev); 2394 netif_carrier_on(netdev);
2154 netif_wake_queue(netdev); 2395 netif_wake_queue(netdev);
2396#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2397 for (i = 0; i < adapter->num_tx_queues; i++)
2398 netif_wake_subqueue(netdev, i);
2399#endif
2155 2400
2156 if (!test_bit(__IGB_DOWN, &adapter->state)) 2401 if (!test_bit(__IGB_DOWN, &adapter->state))
2157 mod_timer(&adapter->phy_info_timer, 2402 mod_timer(&adapter->phy_info_timer,
@@ -2164,6 +2409,10 @@ static void igb_watchdog_task(struct work_struct *work)
2164 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2409 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2165 netif_carrier_off(netdev); 2410 netif_carrier_off(netdev);
2166 netif_stop_queue(netdev); 2411 netif_stop_queue(netdev);
2412#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2413 for (i = 0; i < adapter->num_tx_queues; i++)
2414 netif_stop_subqueue(netdev, i);
2415#endif
2167 if (!test_bit(__IGB_DOWN, &adapter->state)) 2416 if (!test_bit(__IGB_DOWN, &adapter->state))
2168 mod_timer(&adapter->phy_info_timer, 2417 mod_timer(&adapter->phy_info_timer,
2169 round_jiffies(jiffies + 2 * HZ)); 2418 round_jiffies(jiffies + 2 * HZ));
@@ -2216,38 +2465,60 @@ enum latency_range {
2216}; 2465};
2217 2466
2218 2467
2219static void igb_lower_rx_eitr(struct igb_adapter *adapter, 2468/**
2220 struct igb_ring *rx_ring) 2469 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2470 *
2471 * Stores a new ITR value based on strictly on packet size. This
2472 * algorithm is less sophisticated than that used in igb_update_itr,
2473 * due to the difficulty of synchronizing statistics across multiple
2474 * receive rings. The divisors and thresholds used by this fuction
2475 * were determined based on theoretical maximum wire speed and testing
2476 * data, in order to minimize response time while increasing bulk
2477 * throughput.
2478 * This functionality is controlled by the InterruptThrottleRate module
2479 * parameter (see igb_param.c)
2480 * NOTE: This function is called only when operating in a multiqueue
2481 * receive environment.
2482 * @rx_ring: pointer to ring
2483 **/
2484static void igb_update_ring_itr(struct igb_ring *rx_ring)
2221{ 2485{
2222 struct e1000_hw *hw = &adapter->hw; 2486 int new_val = rx_ring->itr_val;
2223 int new_val; 2487 int avg_wire_size = 0;
2488 struct igb_adapter *adapter = rx_ring->adapter;
2224 2489
2225 new_val = rx_ring->itr_val / 2; 2490 if (!rx_ring->total_packets)
2226 if (new_val < IGB_MIN_DYN_ITR) 2491 goto clear_counts; /* no packets, so don't do anything */
2227 new_val = IGB_MIN_DYN_ITR;
2228 2492
2229 if (new_val != rx_ring->itr_val) { 2493 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2230 rx_ring->itr_val = new_val; 2494 * ints/sec - ITR timer value of 120 ticks.
2231 wr32(rx_ring->itr_register, 2495 */
2232 1000000000 / (new_val * 256)); 2496 if (adapter->link_speed != SPEED_1000) {
2497 new_val = 120;
2498 goto set_itr_val;
2233 } 2499 }
2234} 2500 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
2235 2501
2236static void igb_raise_rx_eitr(struct igb_adapter *adapter, 2502 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2237 struct igb_ring *rx_ring) 2503 avg_wire_size += 24;
2238{
2239 struct e1000_hw *hw = &adapter->hw;
2240 int new_val;
2241 2504
2242 new_val = rx_ring->itr_val * 2; 2505 /* Don't starve jumbo frames */
2243 if (new_val > IGB_MAX_DYN_ITR) 2506 avg_wire_size = min(avg_wire_size, 3000);
2244 new_val = IGB_MAX_DYN_ITR;
2245 2507
2508 /* Give a little boost to mid-size frames */
2509 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2510 new_val = avg_wire_size / 3;
2511 else
2512 new_val = avg_wire_size / 2;
2513
2514set_itr_val:
2246 if (new_val != rx_ring->itr_val) { 2515 if (new_val != rx_ring->itr_val) {
2247 rx_ring->itr_val = new_val; 2516 rx_ring->itr_val = new_val;
2248 wr32(rx_ring->itr_register, 2517 rx_ring->set_itr = 1;
2249 1000000000 / (new_val * 256));
2250 } 2518 }
2519clear_counts:
2520 rx_ring->total_bytes = 0;
2521 rx_ring->total_packets = 0;
2251} 2522}
2252 2523
2253/** 2524/**
@@ -2314,8 +2585,7 @@ update_itr_done:
2314 return retval; 2585 return retval;
2315} 2586}
2316 2587
2317static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register, 2588static void igb_set_itr(struct igb_adapter *adapter)
2318 int rx_only)
2319{ 2589{
2320 u16 current_itr; 2590 u16 current_itr;
2321 u32 new_itr = adapter->itr; 2591 u32 new_itr = adapter->itr;
@@ -2331,26 +2601,23 @@ static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register,
2331 adapter->rx_itr, 2601 adapter->rx_itr,
2332 adapter->rx_ring->total_packets, 2602 adapter->rx_ring->total_packets,
2333 adapter->rx_ring->total_bytes); 2603 adapter->rx_ring->total_bytes);
2334 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2335 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2336 adapter->rx_itr = low_latency;
2337 2604
2338 if (!rx_only) { 2605 if (adapter->rx_ring->buddy) {
2339 adapter->tx_itr = igb_update_itr(adapter, 2606 adapter->tx_itr = igb_update_itr(adapter,
2340 adapter->tx_itr, 2607 adapter->tx_itr,
2341 adapter->tx_ring->total_packets, 2608 adapter->tx_ring->total_packets,
2342 adapter->tx_ring->total_bytes); 2609 adapter->tx_ring->total_bytes);
2343 /* conservative mode (itr 3) eliminates the
2344 * lowest_latency setting */
2345 if (adapter->itr_setting == 3 &&
2346 adapter->tx_itr == lowest_latency)
2347 adapter->tx_itr = low_latency;
2348 2610
2349 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2611 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2350 } else { 2612 } else {
2351 current_itr = adapter->rx_itr; 2613 current_itr = adapter->rx_itr;
2352 } 2614 }
2353 2615
2616 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2617 if (adapter->itr_setting == 3 &&
2618 current_itr == lowest_latency)
2619 current_itr = low_latency;
2620
2354 switch (current_itr) { 2621 switch (current_itr) {
2355 /* counts and packets in update_itr are dependent on these numbers */ 2622 /* counts and packets in update_itr are dependent on these numbers */
2356 case lowest_latency: 2623 case lowest_latency:
@@ -2367,6 +2634,13 @@ static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register,
2367 } 2634 }
2368 2635
2369set_itr_now: 2636set_itr_now:
2637 adapter->rx_ring->total_bytes = 0;
2638 adapter->rx_ring->total_packets = 0;
2639 if (adapter->rx_ring->buddy) {
2640 adapter->rx_ring->buddy->total_bytes = 0;
2641 adapter->rx_ring->buddy->total_packets = 0;
2642 }
2643
2370 if (new_itr != adapter->itr) { 2644 if (new_itr != adapter->itr) {
2371 /* this attempts to bias the interrupt rate towards Bulk 2645 /* this attempts to bias the interrupt rate towards Bulk
2372 * by adding intermediate steps when interrupt rate is 2646 * by adding intermediate steps when interrupt rate is
@@ -2381,7 +2655,8 @@ set_itr_now:
2381 * ends up being correct. 2655 * ends up being correct.
2382 */ 2656 */
2383 adapter->itr = new_itr; 2657 adapter->itr = new_itr;
2384 adapter->set_itr = 1; 2658 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2659 adapter->rx_ring->set_itr = 1;
2385 } 2660 }
2386 2661
2387 return; 2662 return;
@@ -2457,9 +2732,9 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
2457 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 2732 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2458 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 2733 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2459 2734
2460 /* Context index must be unique per ring. Luckily, so is the interrupt 2735 /* Context index must be unique per ring. */
2461 * mask value. */ 2736 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2462 mss_l4len_idx |= tx_ring->eims_value >> 4; 2737 mss_l4len_idx |= tx_ring->queue_index << 4;
2463 2738
2464 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2739 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2465 context_desc->seqnum_seed = 0; 2740 context_desc->seqnum_seed = 0;
@@ -2523,8 +2798,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2523 2798
2524 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 2799 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2525 context_desc->seqnum_seed = 0; 2800 context_desc->seqnum_seed = 0;
2526 context_desc->mss_l4len_idx = 2801 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2527 cpu_to_le32(tx_ring->eims_value >> 4); 2802 context_desc->mss_l4len_idx =
2803 cpu_to_le32(tx_ring->queue_index << 4);
2528 2804
2529 buffer_info->time_stamp = jiffies; 2805 buffer_info->time_stamp = jiffies;
2530 buffer_info->dma = 0; 2806 buffer_info->dma = 0;
@@ -2625,9 +2901,10 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2625 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2901 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2626 } 2902 }
2627 2903
2628 if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | 2904 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
2629 IGB_TX_FLAGS_VLAN)) 2905 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
2630 olinfo_status |= tx_ring->eims_value >> 4; 2906 IGB_TX_FLAGS_VLAN)))
2907 olinfo_status |= tx_ring->queue_index << 4;
2631 2908
2632 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 2909 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2633 2910
@@ -2663,7 +2940,12 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
2663{ 2940{
2664 struct igb_adapter *adapter = netdev_priv(netdev); 2941 struct igb_adapter *adapter = netdev_priv(netdev);
2665 2942
2943#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2944 netif_stop_subqueue(netdev, tx_ring->queue_index);
2945#else
2666 netif_stop_queue(netdev); 2946 netif_stop_queue(netdev);
2947#endif
2948
2667 /* Herbert's original patch had: 2949 /* Herbert's original patch had:
2668 * smp_mb__after_netif_stop_queue(); 2950 * smp_mb__after_netif_stop_queue();
2669 * but since that doesn't exist yet, just open code it. */ 2951 * but since that doesn't exist yet, just open code it. */
@@ -2675,7 +2957,11 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
2675 return -EBUSY; 2957 return -EBUSY;
2676 2958
2677 /* A reprieve! */ 2959 /* A reprieve! */
2678 netif_start_queue(netdev); 2960#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2961 netif_wake_subqueue(netdev, tx_ring->queue_index);
2962#else
2963 netif_wake_queue(netdev);
2964#endif
2679 ++adapter->restart_queue; 2965 ++adapter->restart_queue;
2680 return 0; 2966 return 0;
2681} 2967}
@@ -2697,7 +2983,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2697 struct igb_adapter *adapter = netdev_priv(netdev); 2983 struct igb_adapter *adapter = netdev_priv(netdev);
2698 unsigned int tx_flags = 0; 2984 unsigned int tx_flags = 0;
2699 unsigned int len; 2985 unsigned int len;
2700 unsigned long irq_flags;
2701 u8 hdr_len = 0; 2986 u8 hdr_len = 0;
2702 int tso = 0; 2987 int tso = 0;
2703 2988
@@ -2713,10 +2998,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2713 return NETDEV_TX_OK; 2998 return NETDEV_TX_OK;
2714 } 2999 }
2715 3000
2716 if (!spin_trylock_irqsave(&tx_ring->tx_lock, irq_flags))
2717 /* Collision - tell upper layer to requeue */
2718 return NETDEV_TX_LOCKED;
2719
2720 /* need: 1 descriptor per page, 3001 /* need: 1 descriptor per page,
2721 * + 2 desc gap to keep tail from touching head, 3002 * + 2 desc gap to keep tail from touching head,
2722 * + 1 desc for skb->data, 3003 * + 1 desc for skb->data,
@@ -2724,21 +3005,23 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2724 * otherwise try next time */ 3005 * otherwise try next time */
2725 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 3006 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
2726 /* this is a hard error */ 3007 /* this is a hard error */
2727 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2728 return NETDEV_TX_BUSY; 3008 return NETDEV_TX_BUSY;
2729 } 3009 }
3010 skb_orphan(skb);
2730 3011
2731 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3012 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2732 tx_flags |= IGB_TX_FLAGS_VLAN; 3013 tx_flags |= IGB_TX_FLAGS_VLAN;
2733 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 3014 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
2734 } 3015 }
2735 3016
3017 if (skb->protocol == htons(ETH_P_IP))
3018 tx_flags |= IGB_TX_FLAGS_IPV4;
3019
2736 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3020 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2737 &hdr_len) : 0; 3021 &hdr_len) : 0;
2738 3022
2739 if (tso < 0) { 3023 if (tso < 0) {
2740 dev_kfree_skb_any(skb); 3024 dev_kfree_skb_any(skb);
2741 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2742 return NETDEV_TX_OK; 3025 return NETDEV_TX_OK;
2743 } 3026 }
2744 3027
@@ -2748,9 +3031,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2748 if (skb->ip_summed == CHECKSUM_PARTIAL) 3031 if (skb->ip_summed == CHECKSUM_PARTIAL)
2749 tx_flags |= IGB_TX_FLAGS_CSUM; 3032 tx_flags |= IGB_TX_FLAGS_CSUM;
2750 3033
2751 if (skb->protocol == htons(ETH_P_IP))
2752 tx_flags |= IGB_TX_FLAGS_IPV4;
2753
2754 igb_tx_queue_adv(adapter, tx_ring, tx_flags, 3034 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
2755 igb_tx_map_adv(adapter, tx_ring, skb), 3035 igb_tx_map_adv(adapter, tx_ring, skb),
2756 skb->len, hdr_len); 3036 skb->len, hdr_len);
@@ -2760,14 +3040,22 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2760 /* Make sure there is space in the ring for the next send. */ 3040 /* Make sure there is space in the ring for the next send. */
2761 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); 3041 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
2762 3042
2763 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2764 return NETDEV_TX_OK; 3043 return NETDEV_TX_OK;
2765} 3044}
2766 3045
2767static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev) 3046static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
2768{ 3047{
2769 struct igb_adapter *adapter = netdev_priv(netdev); 3048 struct igb_adapter *adapter = netdev_priv(netdev);
2770 struct igb_ring *tx_ring = &adapter->tx_ring[0]; 3049 struct igb_ring *tx_ring;
3050
3051#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3052 int r_idx = 0;
3053 r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1);
3054 tx_ring = adapter->multi_tx_table[r_idx];
3055#else
3056 tx_ring = &adapter->tx_ring[0];
3057#endif
3058
2771 3059
2772 /* This goes back to the question of how to logically map a tx queue 3060 /* This goes back to the question of how to logically map a tx queue
2773 * to a flow. Right now, performance is impacted slightly negatively 3061 * to a flow. Right now, performance is impacted slightly negatively
@@ -2862,7 +3150,11 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
2862 else if (max_frame <= IGB_RXBUFFER_2048) 3150 else if (max_frame <= IGB_RXBUFFER_2048)
2863 adapter->rx_buffer_len = IGB_RXBUFFER_2048; 3151 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
2864 else 3152 else
2865 adapter->rx_buffer_len = IGB_RXBUFFER_4096; 3153#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3154 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3155#else
3156 adapter->rx_buffer_len = PAGE_SIZE / 2;
3157#endif
2866 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3158 /* adjust allocation if LPE protects us, and we aren't using SBP */
2867 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3159 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2868 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)) 3160 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
@@ -3035,7 +3327,7 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3035 /* guard against interrupt when we're going down */ 3327 /* guard against interrupt when we're going down */
3036 if (!test_bit(__IGB_DOWN, &adapter->state)) 3328 if (!test_bit(__IGB_DOWN, &adapter->state))
3037 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3329 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3038 3330
3039no_link_interrupt: 3331no_link_interrupt:
3040 wr32(E1000_IMS, E1000_IMS_LSC); 3332 wr32(E1000_IMS, E1000_IMS_LSC);
3041 wr32(E1000_EIMS, adapter->eims_other); 3333 wr32(E1000_EIMS, adapter->eims_other);
@@ -3049,42 +3341,186 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3049 struct igb_adapter *adapter = tx_ring->adapter; 3341 struct igb_adapter *adapter = tx_ring->adapter;
3050 struct e1000_hw *hw = &adapter->hw; 3342 struct e1000_hw *hw = &adapter->hw;
3051 3343
3052 if (!tx_ring->itr_val) 3344#ifdef CONFIG_DCA
3053 wr32(E1000_EIMC, tx_ring->eims_value); 3345 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3054 3346 igb_update_tx_dca(tx_ring);
3347#endif
3055 tx_ring->total_bytes = 0; 3348 tx_ring->total_bytes = 0;
3056 tx_ring->total_packets = 0; 3349 tx_ring->total_packets = 0;
3350
3351 /* auto mask will automatically reenable the interrupt when we write
3352 * EICS */
3057 if (!igb_clean_tx_irq(tx_ring)) 3353 if (!igb_clean_tx_irq(tx_ring))
3058 /* Ring was not completely cleaned, so fire another interrupt */ 3354 /* Ring was not completely cleaned, so fire another interrupt */
3059 wr32(E1000_EICS, tx_ring->eims_value); 3355 wr32(E1000_EICS, tx_ring->eims_value);
3060 3356 else
3061 if (!tx_ring->itr_val)
3062 wr32(E1000_EIMS, tx_ring->eims_value); 3357 wr32(E1000_EIMS, tx_ring->eims_value);
3358
3063 return IRQ_HANDLED; 3359 return IRQ_HANDLED;
3064} 3360}
3065 3361
3362static void igb_write_itr(struct igb_ring *ring)
3363{
3364 struct e1000_hw *hw = &ring->adapter->hw;
3365 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3366 switch (hw->mac.type) {
3367 case e1000_82576:
3368 wr32(ring->itr_register,
3369 ring->itr_val |
3370 0x80000000);
3371 break;
3372 default:
3373 wr32(ring->itr_register,
3374 ring->itr_val |
3375 (ring->itr_val << 16));
3376 break;
3377 }
3378 ring->set_itr = 0;
3379 }
3380}
3381
3066static irqreturn_t igb_msix_rx(int irq, void *data) 3382static irqreturn_t igb_msix_rx(int irq, void *data)
3067{ 3383{
3068 struct igb_ring *rx_ring = data; 3384 struct igb_ring *rx_ring = data;
3069 struct igb_adapter *adapter = rx_ring->adapter; 3385 struct igb_adapter *adapter = rx_ring->adapter;
3070 struct e1000_hw *hw = &adapter->hw;
3071 3386
3072 /* Write the ITR value calculated at the end of the 3387 /* Write the ITR value calculated at the end of the
3073 * previous interrupt. 3388 * previous interrupt.
3074 */ 3389 */
3075 3390
3076 if (adapter->set_itr) { 3391 igb_write_itr(rx_ring);
3077 wr32(rx_ring->itr_register,
3078 1000000000 / (rx_ring->itr_val * 256));
3079 adapter->set_itr = 0;
3080 }
3081 3392
3082 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) 3393 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
3083 __netif_rx_schedule(adapter->netdev, &rx_ring->napi); 3394 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3084 3395
3085 return IRQ_HANDLED; 3396#ifdef CONFIG_DCA
3397 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3398 igb_update_rx_dca(rx_ring);
3399#endif
3400 return IRQ_HANDLED;
3086} 3401}
3087 3402
3403#ifdef CONFIG_DCA
3404static void igb_update_rx_dca(struct igb_ring *rx_ring)
3405{
3406 u32 dca_rxctrl;
3407 struct igb_adapter *adapter = rx_ring->adapter;
3408 struct e1000_hw *hw = &adapter->hw;
3409 int cpu = get_cpu();
3410 int q = rx_ring - adapter->rx_ring;
3411
3412 if (rx_ring->cpu != cpu) {
3413 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
3414 if (hw->mac.type == e1000_82576) {
3415 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
3416 dca_rxctrl |= dca_get_tag(cpu) <<
3417 E1000_DCA_RXCTRL_CPUID_SHIFT;
3418 } else {
3419 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3420 dca_rxctrl |= dca_get_tag(cpu);
3421 }
3422 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3423 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3424 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3425 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3426 rx_ring->cpu = cpu;
3427 }
3428 put_cpu();
3429}
3430
3431static void igb_update_tx_dca(struct igb_ring *tx_ring)
3432{
3433 u32 dca_txctrl;
3434 struct igb_adapter *adapter = tx_ring->adapter;
3435 struct e1000_hw *hw = &adapter->hw;
3436 int cpu = get_cpu();
3437 int q = tx_ring - adapter->tx_ring;
3438
3439 if (tx_ring->cpu != cpu) {
3440 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3441 if (hw->mac.type == e1000_82576) {
3442 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3443 dca_txctrl |= dca_get_tag(cpu) <<
3444 E1000_DCA_TXCTRL_CPUID_SHIFT;
3445 } else {
3446 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3447 dca_txctrl |= dca_get_tag(cpu);
3448 }
3449 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3450 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3451 tx_ring->cpu = cpu;
3452 }
3453 put_cpu();
3454}
3455
3456static void igb_setup_dca(struct igb_adapter *adapter)
3457{
3458 int i;
3459
3460 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
3461 return;
3462
3463 for (i = 0; i < adapter->num_tx_queues; i++) {
3464 adapter->tx_ring[i].cpu = -1;
3465 igb_update_tx_dca(&adapter->tx_ring[i]);
3466 }
3467 for (i = 0; i < adapter->num_rx_queues; i++) {
3468 adapter->rx_ring[i].cpu = -1;
3469 igb_update_rx_dca(&adapter->rx_ring[i]);
3470 }
3471}
3472
3473static int __igb_notify_dca(struct device *dev, void *data)
3474{
3475 struct net_device *netdev = dev_get_drvdata(dev);
3476 struct igb_adapter *adapter = netdev_priv(netdev);
3477 struct e1000_hw *hw = &adapter->hw;
3478 unsigned long event = *(unsigned long *)data;
3479
3480 if (!(adapter->flags & IGB_FLAG_HAS_DCA))
3481 goto out;
3482
3483 switch (event) {
3484 case DCA_PROVIDER_ADD:
3485 /* if already enabled, don't do it again */
3486 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3487 break;
3488 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3489 /* Always use CB2 mode, difference is masked
3490 * in the CB driver. */
3491 wr32(E1000_DCA_CTRL, 2);
3492 if (dca_add_requester(dev) == 0) {
3493 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3494 igb_setup_dca(adapter);
3495 break;
3496 }
3497 /* Fall Through since DCA is disabled. */
3498 case DCA_PROVIDER_REMOVE:
3499 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3500 /* without this a class_device is left
3501 * hanging around in the sysfs model */
3502 dca_remove_requester(dev);
3503 dev_info(&adapter->pdev->dev, "DCA disabled\n");
3504 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3505 wr32(E1000_DCA_CTRL, 1);
3506 }
3507 break;
3508 }
3509out:
3510 return 0;
3511}
3512
3513static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3514 void *p)
3515{
3516 int ret_val;
3517
3518 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3519 __igb_notify_dca);
3520
3521 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3522}
3523#endif /* CONFIG_DCA */
3088 3524
3089/** 3525/**
3090 * igb_intr_msi - Interrupt Handler 3526 * igb_intr_msi - Interrupt Handler
@@ -3099,13 +3535,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3099 /* read ICR disables interrupts using IAM */ 3535 /* read ICR disables interrupts using IAM */
3100 u32 icr = rd32(E1000_ICR); 3536 u32 icr = rd32(E1000_ICR);
3101 3537
3102 /* Write the ITR value calculated at the end of the 3538 igb_write_itr(adapter->rx_ring);
3103 * previous interrupt.
3104 */
3105 if (adapter->set_itr) {
3106 wr32(E1000_ITR, 1000000000 / (adapter->itr * 256));
3107 adapter->set_itr = 0;
3108 }
3109 3539
3110 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3540 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3111 hw->mac.get_link_status = 1; 3541 hw->mac.get_link_status = 1;
@@ -3135,13 +3565,7 @@ static irqreturn_t igb_intr(int irq, void *data)
3135 if (!icr) 3565 if (!icr)
3136 return IRQ_NONE; /* Not our interrupt */ 3566 return IRQ_NONE; /* Not our interrupt */
3137 3567
3138 /* Write the ITR value calculated at the end of the 3568 igb_write_itr(adapter->rx_ring);
3139 * previous interrupt.
3140 */
3141 if (adapter->set_itr) {
3142 wr32(E1000_ITR, 1000000000 / (adapter->itr * 256));
3143 adapter->set_itr = 0;
3144 }
3145 3569
3146 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 3570 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3147 * not set, then the adapter didn't send an interrupt */ 3571 * not set, then the adapter didn't send an interrupt */
@@ -3163,44 +3587,35 @@ static irqreturn_t igb_intr(int irq, void *data)
3163} 3587}
3164 3588
3165/** 3589/**
3166 * igb_clean - NAPI Rx polling callback 3590 * igb_poll - NAPI Rx polling callback
3167 * @adapter: board private structure 3591 * @napi: napi polling structure
3592 * @budget: count of how many packets we should handle
3168 **/ 3593 **/
3169static int igb_clean(struct napi_struct *napi, int budget) 3594static int igb_poll(struct napi_struct *napi, int budget)
3170{ 3595{
3171 struct igb_adapter *adapter = container_of(napi, struct igb_adapter, 3596 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3172 napi); 3597 struct igb_adapter *adapter = rx_ring->adapter;
3173 struct net_device *netdev = adapter->netdev; 3598 struct net_device *netdev = adapter->netdev;
3174 int tx_clean_complete = 1, work_done = 0; 3599 int tx_clean_complete, work_done = 0;
3175 int i;
3176
3177 /* Must NOT use netdev_priv macro here. */
3178 adapter = netdev->priv;
3179
3180 /* Keep link state information with original netdev */
3181 if (!netif_carrier_ok(netdev))
3182 goto quit_polling;
3183 3600
3184 /* igb_clean is called per-cpu. This lock protects tx_ring[i] from 3601 /* this poll routine only supports one tx and one rx queue */
3185 * being cleaned by multiple cpus simultaneously. A failure obtaining 3602#ifdef CONFIG_DCA
3186 * the lock means tx_ring[i] is currently being cleaned anyway. */ 3603 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3187 for (i = 0; i < adapter->num_tx_queues; i++) { 3604 igb_update_tx_dca(&adapter->tx_ring[0]);
3188 if (spin_trylock(&adapter->tx_ring[i].tx_clean_lock)) { 3605#endif
3189 tx_clean_complete &= igb_clean_tx_irq(&adapter->tx_ring[i]); 3606 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
3190 spin_unlock(&adapter->tx_ring[i].tx_clean_lock);
3191 }
3192 }
3193 3607
3194 for (i = 0; i < adapter->num_rx_queues; i++) 3608#ifdef CONFIG_DCA
3195 igb_clean_rx_irq_adv(&adapter->rx_ring[i], &work_done, 3609 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3196 adapter->rx_ring[i].napi.weight); 3610 igb_update_rx_dca(&adapter->rx_ring[0]);
3611#endif
3612 igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
3197 3613
3198 /* If no Tx and not enough Rx work done, exit the polling mode */ 3614 /* If no Tx and not enough Rx work done, exit the polling mode */
3199 if ((tx_clean_complete && (work_done < budget)) || 3615 if ((tx_clean_complete && (work_done < budget)) ||
3200 !netif_running(netdev)) { 3616 !netif_running(netdev)) {
3201quit_polling:
3202 if (adapter->itr_setting & 3) 3617 if (adapter->itr_setting & 3)
3203 igb_set_itr(adapter, E1000_ITR, false); 3618 igb_set_itr(adapter);
3204 netif_rx_complete(netdev, napi); 3619 netif_rx_complete(netdev, napi);
3205 if (!test_bit(__IGB_DOWN, &adapter->state)) 3620 if (!test_bit(__IGB_DOWN, &adapter->state))
3206 igb_irq_enable(adapter); 3621 igb_irq_enable(adapter);
@@ -3222,6 +3637,10 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3222 if (!netif_carrier_ok(netdev)) 3637 if (!netif_carrier_ok(netdev))
3223 goto quit_polling; 3638 goto quit_polling;
3224 3639
3640#ifdef CONFIG_DCA
3641 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3642 igb_update_rx_dca(rx_ring);
3643#endif
3225 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 3644 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
3226 3645
3227 3646
@@ -3230,15 +3649,11 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3230quit_polling: 3649quit_polling:
3231 netif_rx_complete(netdev, napi); 3650 netif_rx_complete(netdev, napi);
3232 3651
3233 wr32(E1000_EIMS, rx_ring->eims_value); 3652 if (adapter->itr_setting & 3) {
3234 if ((adapter->itr_setting & 3) && !rx_ring->no_itr_adjust && 3653 if (adapter->num_rx_queues == 1)
3235 (rx_ring->total_packets > IGB_DYN_ITR_PACKET_THRESHOLD)) { 3654 igb_set_itr(adapter);
3236 int mean_size = rx_ring->total_bytes / 3655 else
3237 rx_ring->total_packets; 3656 igb_update_ring_itr(rx_ring);
3238 if (mean_size < IGB_DYN_ITR_LENGTH_LOW)
3239 igb_raise_rx_eitr(adapter, rx_ring);
3240 else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH)
3241 igb_lower_rx_eitr(adapter, rx_ring);
3242 } 3657 }
3243 3658
3244 if (!test_bit(__IGB_DOWN, &adapter->state)) 3659 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3327,11 +3742,19 @@ done_cleaning:
3327 * sees the new next_to_clean. 3742 * sees the new next_to_clean.
3328 */ 3743 */
3329 smp_mb(); 3744 smp_mb();
3745#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3746 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
3747 !(test_bit(__IGB_DOWN, &adapter->state))) {
3748 netif_wake_subqueue(netdev, tx_ring->queue_index);
3749 ++adapter->restart_queue;
3750 }
3751#else
3330 if (netif_queue_stopped(netdev) && 3752 if (netif_queue_stopped(netdev) &&
3331 !(test_bit(__IGB_DOWN, &adapter->state))) { 3753 !(test_bit(__IGB_DOWN, &adapter->state))) {
3332 netif_wake_queue(netdev); 3754 netif_wake_queue(netdev);
3333 ++adapter->restart_queue; 3755 ++adapter->restart_queue;
3334 } 3756 }
3757#endif
3335 } 3758 }
3336 3759
3337 if (tx_ring->detect_tx_hung) { 3760 if (tx_ring->detect_tx_hung) {
@@ -3348,7 +3771,7 @@ done_cleaning:
3348 /* detected Tx unit hang */ 3771 /* detected Tx unit hang */
3349 dev_err(&adapter->pdev->dev, 3772 dev_err(&adapter->pdev->dev,
3350 "Detected Tx Unit Hang\n" 3773 "Detected Tx Unit Hang\n"
3351 " Tx Queue <%lu>\n" 3774 " Tx Queue <%d>\n"
3352 " TDH <%x>\n" 3775 " TDH <%x>\n"
3353 " TDT <%x>\n" 3776 " TDT <%x>\n"
3354 " next_to_use <%x>\n" 3777 " next_to_use <%x>\n"
@@ -3358,8 +3781,7 @@ done_cleaning:
3358 " time_stamp <%lx>\n" 3781 " time_stamp <%lx>\n"
3359 " jiffies <%lx>\n" 3782 " jiffies <%lx>\n"
3360 " desc.status <%x>\n", 3783 " desc.status <%x>\n",
3361 (unsigned long)((tx_ring - adapter->tx_ring) / 3784 tx_ring->queue_index,
3362 sizeof(struct igb_ring)),
3363 readl(adapter->hw.hw_addr + tx_ring->head), 3785 readl(adapter->hw.hw_addr + tx_ring->head),
3364 readl(adapter->hw.hw_addr + tx_ring->tail), 3786 readl(adapter->hw.hw_addr + tx_ring->tail),
3365 tx_ring->next_to_use, 3787 tx_ring->next_to_use,
@@ -3368,32 +3790,91 @@ done_cleaning:
3368 tx_ring->buffer_info[i].time_stamp, 3790 tx_ring->buffer_info[i].time_stamp,
3369 jiffies, 3791 jiffies,
3370 tx_desc->upper.fields.status); 3792 tx_desc->upper.fields.status);
3793#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3794 netif_stop_subqueue(netdev, tx_ring->queue_index);
3795#else
3371 netif_stop_queue(netdev); 3796 netif_stop_queue(netdev);
3797#endif
3372 } 3798 }
3373 } 3799 }
3374 tx_ring->total_bytes += total_bytes; 3800 tx_ring->total_bytes += total_bytes;
3375 tx_ring->total_packets += total_packets; 3801 tx_ring->total_packets += total_packets;
3802 tx_ring->tx_stats.bytes += total_bytes;
3803 tx_ring->tx_stats.packets += total_packets;
3376 adapter->net_stats.tx_bytes += total_bytes; 3804 adapter->net_stats.tx_bytes += total_bytes;
3377 adapter->net_stats.tx_packets += total_packets; 3805 adapter->net_stats.tx_packets += total_packets;
3378 return retval; 3806 return retval;
3379} 3807}
3380 3808
3809#ifdef CONFIG_IGB_LRO
3810 /**
3811 * igb_get_skb_hdr - helper function for LRO header processing
3812 * @skb: pointer to sk_buff to be added to LRO packet
3813 * @iphdr: pointer to ip header structure
3814 * @tcph: pointer to tcp header structure
3815 * @hdr_flags: pointer to header flags
3816 * @priv: pointer to the receive descriptor for the current sk_buff
3817 **/
3818static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3819 u64 *hdr_flags, void *priv)
3820{
3821 union e1000_adv_rx_desc *rx_desc = priv;
3822 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3823 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3824
3825 /* Verify that this is a valid IPv4 TCP packet */
3826 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3827 E1000_RXDADV_PKTTYPE_TCP))
3828 return -1;
3829
3830 /* Set network headers */
3831 skb_reset_network_header(skb);
3832 skb_set_transport_header(skb, ip_hdrlen(skb));
3833 *iphdr = ip_hdr(skb);
3834 *tcph = tcp_hdr(skb);
3835 *hdr_flags = LRO_IPV4 | LRO_TCP;
3836
3837 return 0;
3838
3839}
3840#endif /* CONFIG_IGB_LRO */
3381 3841
3382/** 3842/**
3383 * igb_receive_skb - helper function to handle rx indications 3843 * igb_receive_skb - helper function to handle rx indications
3384 * @adapter: board private structure 3844 * @ring: pointer to receive ring receving this packet
3385 * @status: descriptor status field as written by hardware 3845 * @status: descriptor status field as written by hardware
3386 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3846 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3387 * @skb: pointer to sk_buff to be indicated to stack 3847 * @skb: pointer to sk_buff to be indicated to stack
3388 **/ 3848 **/
3389static void igb_receive_skb(struct igb_adapter *adapter, u8 status, __le16 vlan, 3849static void igb_receive_skb(struct igb_ring *ring, u8 status,
3390 struct sk_buff *skb) 3850 union e1000_adv_rx_desc * rx_desc,
3851 struct sk_buff *skb)
3391{ 3852{
3392 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) 3853 struct igb_adapter * adapter = ring->adapter;
3393 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3854 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3394 le16_to_cpu(vlan)); 3855
3395 else 3856#ifdef CONFIG_IGB_LRO
3396 netif_receive_skb(skb); 3857 if (adapter->netdev->features & NETIF_F_LRO &&
3858 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3859 if (vlan_extracted)
3860 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
3861 adapter->vlgrp,
3862 le16_to_cpu(rx_desc->wb.upper.vlan),
3863 rx_desc);
3864 else
3865 lro_receive_skb(&ring->lro_mgr,skb, rx_desc);
3866 ring->lro_used = 1;
3867 } else {
3868#endif
3869 if (vlan_extracted)
3870 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3871 le16_to_cpu(rx_desc->wb.upper.vlan));
3872 else
3873
3874 netif_receive_skb(skb);
3875#ifdef CONFIG_IGB_LRO
3876 }
3877#endif
3397} 3878}
3398 3879
3399 3880
@@ -3428,7 +3909,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3428 union e1000_adv_rx_desc *rx_desc , *next_rxd; 3909 union e1000_adv_rx_desc *rx_desc , *next_rxd;
3429 struct igb_buffer *buffer_info , *next_buffer; 3910 struct igb_buffer *buffer_info , *next_buffer;
3430 struct sk_buff *skb; 3911 struct sk_buff *skb;
3431 unsigned int i, j; 3912 unsigned int i;
3432 u32 length, hlen, staterr; 3913 u32 length, hlen, staterr;
3433 bool cleaned = false; 3914 bool cleaned = false;
3434 int cleaned_count = 0; 3915 int cleaned_count = 0;
@@ -3458,64 +3939,48 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3458 cleaned = true; 3939 cleaned = true;
3459 cleaned_count++; 3940 cleaned_count++;
3460 3941
3461 if (rx_ring->pending_skb != NULL) { 3942 skb = buffer_info->skb;
3462 skb = rx_ring->pending_skb; 3943 prefetch(skb->data - NET_IP_ALIGN);
3463 rx_ring->pending_skb = NULL; 3944 buffer_info->skb = NULL;
3464 j = rx_ring->pending_skb_page; 3945 if (!adapter->rx_ps_hdr_size) {
3465 } else { 3946 pci_unmap_single(pdev, buffer_info->dma,
3466 skb = buffer_info->skb; 3947 adapter->rx_buffer_len +
3467 prefetch(skb->data - NET_IP_ALIGN); 3948 NET_IP_ALIGN,
3468 buffer_info->skb = NULL; 3949 PCI_DMA_FROMDEVICE);
3469 if (hlen) { 3950 skb_put(skb, length);
3470 pci_unmap_single(pdev, buffer_info->dma, 3951 goto send_up;
3471 adapter->rx_ps_hdr_size +
3472 NET_IP_ALIGN,
3473 PCI_DMA_FROMDEVICE);
3474 skb_put(skb, hlen);
3475 } else {
3476 pci_unmap_single(pdev, buffer_info->dma,
3477 adapter->rx_buffer_len +
3478 NET_IP_ALIGN,
3479 PCI_DMA_FROMDEVICE);
3480 skb_put(skb, length);
3481 goto send_up;
3482 }
3483 j = 0;
3484 } 3952 }
3485 3953
3486 while (length) { 3954 if (!skb_shinfo(skb)->nr_frags) {
3955 pci_unmap_single(pdev, buffer_info->dma,
3956 adapter->rx_ps_hdr_size +
3957 NET_IP_ALIGN,
3958 PCI_DMA_FROMDEVICE);
3959 skb_put(skb, hlen);
3960 }
3961
3962 if (length) {
3487 pci_unmap_page(pdev, buffer_info->page_dma, 3963 pci_unmap_page(pdev, buffer_info->page_dma,
3488 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3964 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
3489 buffer_info->page_dma = 0; 3965 buffer_info->page_dma = 0;
3490 skb_fill_page_desc(skb, j, buffer_info->page, 3966
3491 0, length); 3967 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
3492 buffer_info->page = NULL; 3968 buffer_info->page,
3969 buffer_info->page_offset,
3970 length);
3971
3972 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
3973 (page_count(buffer_info->page) != 1))
3974 buffer_info->page = NULL;
3975 else
3976 get_page(buffer_info->page);
3493 3977
3494 skb->len += length; 3978 skb->len += length;
3495 skb->data_len += length; 3979 skb->data_len += length;
3496 skb->truesize += length;
3497 rx_desc->wb.upper.status_error = 0;
3498 if (staterr & E1000_RXD_STAT_EOP)
3499 break;
3500 3980
3501 j++; 3981 skb->truesize += length;
3502 cleaned_count++;
3503 i++;
3504 if (i == rx_ring->count)
3505 i = 0;
3506
3507 buffer_info = &rx_ring->buffer_info[i];
3508 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3509 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3510 length = le16_to_cpu(rx_desc->wb.upper.length);
3511 if (!(staterr & E1000_RXD_STAT_DD)) {
3512 rx_ring->pending_skb = skb;
3513 rx_ring->pending_skb_page = j;
3514 goto out;
3515 }
3516 } 3982 }
3517send_up: 3983send_up:
3518 pskb_trim(skb, skb->len - 4);
3519 i++; 3984 i++;
3520 if (i == rx_ring->count) 3985 if (i == rx_ring->count)
3521 i = 0; 3986 i = 0;
@@ -3523,11 +3988,16 @@ send_up:
3523 prefetch(next_rxd); 3988 prefetch(next_rxd);
3524 next_buffer = &rx_ring->buffer_info[i]; 3989 next_buffer = &rx_ring->buffer_info[i];
3525 3990
3991 if (!(staterr & E1000_RXD_STAT_EOP)) {
3992 buffer_info->skb = xchg(&next_buffer->skb, skb);
3993 buffer_info->dma = xchg(&next_buffer->dma, 0);
3994 goto next_desc;
3995 }
3996
3526 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 3997 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3527 dev_kfree_skb_irq(skb); 3998 dev_kfree_skb_irq(skb);
3528 goto next_desc; 3999 goto next_desc;
3529 } 4000 }
3530 rx_ring->no_itr_adjust |= (staterr & E1000_RXD_STAT_DYNINT);
3531 4001
3532 total_bytes += skb->len; 4002 total_bytes += skb->len;
3533 total_packets++; 4003 total_packets++;
@@ -3536,7 +4006,7 @@ send_up:
3536 4006
3537 skb->protocol = eth_type_trans(skb, netdev); 4007 skb->protocol = eth_type_trans(skb, netdev);
3538 4008
3539 igb_receive_skb(adapter, staterr, rx_desc->wb.upper.vlan, skb); 4009 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
3540 4010
3541 netdev->last_rx = jiffies; 4011 netdev->last_rx = jiffies;
3542 4012
@@ -3555,10 +4025,17 @@ next_desc:
3555 4025
3556 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 4026 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3557 } 4027 }
3558out: 4028
3559 rx_ring->next_to_clean = i; 4029 rx_ring->next_to_clean = i;
3560 cleaned_count = IGB_DESC_UNUSED(rx_ring); 4030 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3561 4031
4032#ifdef CONFIG_IGB_LRO
4033 if (rx_ring->lro_used) {
4034 lro_flush_all(&rx_ring->lro_mgr);
4035 rx_ring->lro_used = 0;
4036 }
4037#endif
4038
3562 if (cleaned_count) 4039 if (cleaned_count)
3563 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); 4040 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3564 4041
@@ -3593,16 +4070,22 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
3593 while (cleaned_count--) { 4070 while (cleaned_count--) {
3594 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 4071 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3595 4072
3596 if (adapter->rx_ps_hdr_size && !buffer_info->page) { 4073 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
3597 buffer_info->page = alloc_page(GFP_ATOMIC);
3598 if (!buffer_info->page) { 4074 if (!buffer_info->page) {
3599 adapter->alloc_rx_buff_failed++; 4075 buffer_info->page = alloc_page(GFP_ATOMIC);
3600 goto no_buffers; 4076 if (!buffer_info->page) {
4077 adapter->alloc_rx_buff_failed++;
4078 goto no_buffers;
4079 }
4080 buffer_info->page_offset = 0;
4081 } else {
4082 buffer_info->page_offset ^= PAGE_SIZE / 2;
3601 } 4083 }
3602 buffer_info->page_dma = 4084 buffer_info->page_dma =
3603 pci_map_page(pdev, 4085 pci_map_page(pdev,
3604 buffer_info->page, 4086 buffer_info->page,
3605 0, PAGE_SIZE, 4087 buffer_info->page_offset,
4088 PAGE_SIZE / 2,
3606 PCI_DMA_FROMDEVICE); 4089 PCI_DMA_FROMDEVICE);
3607 } 4090 }
3608 4091
@@ -3869,7 +4352,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
3869 struct net_device *netdev = pci_get_drvdata(pdev); 4352 struct net_device *netdev = pci_get_drvdata(pdev);
3870 struct igb_adapter *adapter = netdev_priv(netdev); 4353 struct igb_adapter *adapter = netdev_priv(netdev);
3871 struct e1000_hw *hw = &adapter->hw; 4354 struct e1000_hw *hw = &adapter->hw;
3872 u32 ctrl, ctrl_ext, rctl, status; 4355 u32 ctrl, rctl, status;
3873 u32 wufc = adapter->wol; 4356 u32 wufc = adapter->wol;
3874#ifdef CONFIG_PM 4357#ifdef CONFIG_PM
3875 int retval = 0; 4358 int retval = 0;
@@ -3877,11 +4360,12 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
3877 4360
3878 netif_device_detach(netdev); 4361 netif_device_detach(netdev);
3879 4362
3880 if (netif_running(netdev)) { 4363 if (netif_running(netdev))
3881 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); 4364 igb_close(netdev);
3882 igb_down(adapter); 4365
3883 igb_free_irq(adapter); 4366 igb_reset_interrupt_capability(adapter);
3884 } 4367
4368 igb_free_queues(adapter);
3885 4369
3886#ifdef CONFIG_PM 4370#ifdef CONFIG_PM
3887 retval = pci_save_state(pdev); 4371 retval = pci_save_state(pdev);
@@ -3912,33 +4396,24 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
3912 ctrl |= E1000_CTRL_ADVD3WUC; 4396 ctrl |= E1000_CTRL_ADVD3WUC;
3913 wr32(E1000_CTRL, ctrl); 4397 wr32(E1000_CTRL, ctrl);
3914 4398
3915 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3916 adapter->hw.phy.media_type ==
3917 e1000_media_type_internal_serdes) {
3918 /* keep the laser running in D3 */
3919 ctrl_ext = rd32(E1000_CTRL_EXT);
3920 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3921 wr32(E1000_CTRL_EXT, ctrl_ext);
3922 }
3923
3924 /* Allow time for pending master requests to run */ 4399 /* Allow time for pending master requests to run */
3925 igb_disable_pcie_master(&adapter->hw); 4400 igb_disable_pcie_master(&adapter->hw);
3926 4401
3927 wr32(E1000_WUC, E1000_WUC_PME_EN); 4402 wr32(E1000_WUC, E1000_WUC_PME_EN);
3928 wr32(E1000_WUFC, wufc); 4403 wr32(E1000_WUFC, wufc);
3929 pci_enable_wake(pdev, PCI_D3hot, 1);
3930 pci_enable_wake(pdev, PCI_D3cold, 1);
3931 } else { 4404 } else {
3932 wr32(E1000_WUC, 0); 4405 wr32(E1000_WUC, 0);
3933 wr32(E1000_WUFC, 0); 4406 wr32(E1000_WUFC, 0);
3934 pci_enable_wake(pdev, PCI_D3hot, 0);
3935 pci_enable_wake(pdev, PCI_D3cold, 0);
3936 } 4407 }
3937 4408
3938 /* make sure adapter isn't asleep if manageability is enabled */ 4409 /* make sure adapter isn't asleep if manageability/wol is enabled */
3939 if (adapter->en_mng_pt) { 4410 if (wufc || adapter->en_mng_pt) {
3940 pci_enable_wake(pdev, PCI_D3hot, 1); 4411 pci_enable_wake(pdev, PCI_D3hot, 1);
3941 pci_enable_wake(pdev, PCI_D3cold, 1); 4412 pci_enable_wake(pdev, PCI_D3cold, 1);
4413 } else {
4414 igb_shutdown_fiber_serdes_link_82575(hw);
4415 pci_enable_wake(pdev, PCI_D3hot, 0);
4416 pci_enable_wake(pdev, PCI_D3cold, 0);
3942 } 4417 }
3943 4418
3944 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4419 /* Release control of h/w to f/w. If f/w is AMT enabled, this
@@ -3977,10 +4452,11 @@ static int igb_resume(struct pci_dev *pdev)
3977 pci_enable_wake(pdev, PCI_D3hot, 0); 4452 pci_enable_wake(pdev, PCI_D3hot, 0);
3978 pci_enable_wake(pdev, PCI_D3cold, 0); 4453 pci_enable_wake(pdev, PCI_D3cold, 0);
3979 4454
3980 if (netif_running(netdev)) { 4455 igb_set_interrupt_capability(adapter);
3981 err = igb_request_irq(adapter); 4456
3982 if (err) 4457 if (igb_alloc_queues(adapter)) {
3983 return err; 4458 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4459 return -ENOMEM;
3984 } 4460 }
3985 4461
3986 /* e1000_power_up_phy(adapter); */ 4462 /* e1000_power_up_phy(adapter); */
@@ -3988,10 +4464,11 @@ static int igb_resume(struct pci_dev *pdev)
3988 igb_reset(adapter); 4464 igb_reset(adapter);
3989 wr32(E1000_WUS, ~0); 4465 wr32(E1000_WUS, ~0);
3990 4466
3991 igb_init_manageability(adapter); 4467 if (netif_running(netdev)) {
3992 4468 err = igb_open(netdev);
3993 if (netif_running(netdev)) 4469 if (err)
3994 igb_up(adapter); 4470 return err;
4471 }
3995 4472
3996 netif_device_attach(netdev); 4473 netif_device_attach(netdev);
3997 4474
@@ -4021,6 +4498,8 @@ static void igb_netpoll(struct net_device *netdev)
4021 int work_done = 0; 4498 int work_done = 0;
4022 4499
4023 igb_irq_disable(adapter); 4500 igb_irq_disable(adapter);
4501 adapter->flags |= IGB_FLAG_IN_NETPOLL;
4502
4024 for (i = 0; i < adapter->num_tx_queues; i++) 4503 for (i = 0; i < adapter->num_tx_queues; i++)
4025 igb_clean_tx_irq(&adapter->tx_ring[i]); 4504 igb_clean_tx_irq(&adapter->tx_ring[i]);
4026 4505
@@ -4029,6 +4508,7 @@ static void igb_netpoll(struct net_device *netdev)
4029 &work_done, 4508 &work_done,
4030 adapter->rx_ring[i].napi.weight); 4509 adapter->rx_ring[i].napi.weight);
4031 4510
4511 adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
4032 igb_irq_enable(adapter); 4512 igb_irq_enable(adapter);
4033} 4513}
4034#endif /* CONFIG_NET_POLL_CONTROLLER */ 4514#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile
index 838a5084fa00..0b20c5e62ffe 100644
--- a/drivers/net/ixgb/Makefile
+++ b/drivers/net/ixgb/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/10GbE Linux driver 3# Intel PRO/10GbE Linux driver
4# Copyright(c) 1999 - 2006 Intel Corporation. 4# Copyright(c) 1999 - 2008 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 16f9c756aa46..804698fc6a8f 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -89,18 +89,16 @@ struct ixgb_adapter;
89 89
90 90
91/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
92#define DEFAULT_TXD 256 92#define DEFAULT_TXD 256
93#define MAX_TXD 4096 93#define MAX_TXD 4096
94#define MIN_TXD 64 94#define MIN_TXD 64
95 95
96/* hardware cannot reliably support more than 512 descriptors owned by 96/* hardware cannot reliably support more than 512 descriptors owned by
97 * hardware descrioptor cache otherwise an unreliable ring under heavy 97 * hardware descriptor cache otherwise an unreliable ring under heavy
98 * recieve load may result */ 98 * receive load may result */
99/* #define DEFAULT_RXD 1024 */ 99#define DEFAULT_RXD 512
100/* #define MAX_RXD 4096 */ 100#define MAX_RXD 512
101#define DEFAULT_RXD 512 101#define MIN_RXD 64
102#define MAX_RXD 512
103#define MIN_RXD 64
104 102
105/* Supported Rx Buffer Sizes */ 103/* Supported Rx Buffer Sizes */
106#define IXGB_RXBUFFER_2048 2048 104#define IXGB_RXBUFFER_2048 2048
@@ -157,7 +155,6 @@ struct ixgb_adapter {
157 u32 part_num; 155 u32 part_num;
158 u16 link_speed; 156 u16 link_speed;
159 u16 link_duplex; 157 u16 link_duplex;
160 spinlock_t tx_lock;
161 struct work_struct tx_timeout_task; 158 struct work_struct tx_timeout_task;
162 159
163 struct timer_list blink_timer; 160 struct timer_list blink_timer;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 2f7ed52c7502..89ffa7264a12 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -108,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
108 */ 108 */
109 eecd_reg &= ~IXGB_EECD_DI; 109 eecd_reg &= ~IXGB_EECD_DI;
110 110
111 if(data & mask) 111 if (data & mask)
112 eecd_reg |= IXGB_EECD_DI; 112 eecd_reg |= IXGB_EECD_DI;
113 113
114 IXGB_WRITE_REG(hw, EECD, eecd_reg); 114 IXGB_WRITE_REG(hw, EECD, eecd_reg);
@@ -120,7 +120,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
120 120
121 mask = mask >> 1; 121 mask = mask >> 1;
122 122
123 } while(mask); 123 } while (mask);
124 124
125 /* We leave the "DI" bit set to "0" when we leave this routine. */ 125 /* We leave the "DI" bit set to "0" when we leave this routine. */
126 eecd_reg &= ~IXGB_EECD_DI; 126 eecd_reg &= ~IXGB_EECD_DI;
@@ -152,14 +152,14 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
152 eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); 152 eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
153 data = 0; 153 data = 0;
154 154
155 for(i = 0; i < 16; i++) { 155 for (i = 0; i < 16; i++) {
156 data = data << 1; 156 data = data << 1;
157 ixgb_raise_clock(hw, &eecd_reg); 157 ixgb_raise_clock(hw, &eecd_reg);
158 158
159 eecd_reg = IXGB_READ_REG(hw, EECD); 159 eecd_reg = IXGB_READ_REG(hw, EECD);
160 160
161 eecd_reg &= ~(IXGB_EECD_DI); 161 eecd_reg &= ~(IXGB_EECD_DI);
162 if(eecd_reg & IXGB_EECD_DO) 162 if (eecd_reg & IXGB_EECD_DO)
163 data |= 1; 163 data |= 1;
164 164
165 ixgb_lower_clock(hw, &eecd_reg); 165 ixgb_lower_clock(hw, &eecd_reg);
@@ -205,7 +205,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
205 205
206 eecd_reg = IXGB_READ_REG(hw, EECD); 206 eecd_reg = IXGB_READ_REG(hw, EECD);
207 207
208 /* Deselct EEPROM */ 208 /* Deselect EEPROM */
209 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); 209 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
210 IXGB_WRITE_REG(hw, EECD, eecd_reg); 210 IXGB_WRITE_REG(hw, EECD, eecd_reg);
211 udelay(50); 211 udelay(50);
@@ -293,14 +293,14 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
293 */ 293 */
294 ixgb_standby_eeprom(hw); 294 ixgb_standby_eeprom(hw);
295 295
296 /* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will 296 /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
297 * signal that the command has been completed by raising the DO signal. 297 * signal that the command has been completed by raising the DO signal.
298 * If DO does not go high in 10 milliseconds, then error out. 298 * If DO does not go high in 10 milliseconds, then error out.
299 */ 299 */
300 for(i = 0; i < 200; i++) { 300 for (i = 0; i < 200; i++) {
301 eecd_reg = IXGB_READ_REG(hw, EECD); 301 eecd_reg = IXGB_READ_REG(hw, EECD);
302 302
303 if(eecd_reg & IXGB_EECD_DO) 303 if (eecd_reg & IXGB_EECD_DO)
304 return (true); 304 return (true);
305 305
306 udelay(50); 306 udelay(50);
@@ -328,10 +328,10 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
328 u16 checksum = 0; 328 u16 checksum = 0;
329 u16 i; 329 u16 i;
330 330
331 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) 331 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
332 checksum += ixgb_read_eeprom(hw, i); 332 checksum += ixgb_read_eeprom(hw, i);
333 333
334 if(checksum == (u16) EEPROM_SUM) 334 if (checksum == (u16) EEPROM_SUM)
335 return (true); 335 return (true);
336 else 336 else
337 return (false); 337 return (false);
@@ -351,7 +351,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
351 u16 checksum = 0; 351 u16 checksum = 0;
352 u16 i; 352 u16 i;
353 353
354 for(i = 0; i < EEPROM_CHECKSUM_REG; i++) 354 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
355 checksum += ixgb_read_eeprom(hw, i); 355 checksum += ixgb_read_eeprom(hw, i);
356 356
357 checksum = (u16) EEPROM_SUM - checksum; 357 checksum = (u16) EEPROM_SUM - checksum;
@@ -365,7 +365,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
365 * 365 *
366 * hw - Struct containing variables accessed by shared code 366 * hw - Struct containing variables accessed by shared code
367 * reg - offset within the EEPROM to be written to 367 * reg - offset within the EEPROM to be written to
368 * data - 16 bit word to be writen to the EEPROM 368 * data - 16 bit word to be written to the EEPROM
369 * 369 *
370 * If ixgb_update_eeprom_checksum is not called after this function, the 370 * If ixgb_update_eeprom_checksum is not called after this function, the
371 * EEPROM will most likely contain an invalid checksum. 371 * EEPROM will most likely contain an invalid checksum.
@@ -472,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
472 ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 472 ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
473 473
474 DEBUGOUT("ixgb_ee: Reading eeprom data\n"); 474 DEBUGOUT("ixgb_ee: Reading eeprom data\n");
475 for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { 475 for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
476 u16 ee_data; 476 u16 ee_data;
477 ee_data = ixgb_read_eeprom(hw, i); 477 ee_data = ixgb_read_eeprom(hw, i);
478 checksum += ee_data; 478 checksum += ee_data;
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index 4b7bd0d4a8a9..7ea12652f471 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,11 +34,11 @@
34#define IXGB_ETH_LENGTH_OF_ADDRESS 6 34#define IXGB_ETH_LENGTH_OF_ADDRESS 6
35 35
36/* EEPROM Commands */ 36/* EEPROM Commands */
37#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */ 37#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
38#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */ 38#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
39#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */ 39#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
40#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */ 40#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
41#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */ 41#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
42 42
43/* EEPROM MAP (Word Offsets) */ 43/* EEPROM MAP (Word Offsets) */
44#define EEPROM_IA_1_2_REG 0x0000 44#define EEPROM_IA_1_2_REG 0x0000
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 8464d8a013b0..288ee1d0f431 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -95,7 +95,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
95 ecmd->port = PORT_FIBRE; 95 ecmd->port = PORT_FIBRE;
96 ecmd->transceiver = XCVR_EXTERNAL; 96 ecmd->transceiver = XCVR_EXTERNAL;
97 97
98 if(netif_carrier_ok(adapter->netdev)) { 98 if (netif_carrier_ok(adapter->netdev)) {
99 ecmd->speed = SPEED_10000; 99 ecmd->speed = SPEED_10000;
100 ecmd->duplex = DUPLEX_FULL; 100 ecmd->duplex = DUPLEX_FULL;
101 } else { 101 } else {
@@ -122,11 +122,11 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
122{ 122{
123 struct ixgb_adapter *adapter = netdev_priv(netdev); 123 struct ixgb_adapter *adapter = netdev_priv(netdev);
124 124
125 if(ecmd->autoneg == AUTONEG_ENABLE || 125 if (ecmd->autoneg == AUTONEG_ENABLE ||
126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) 126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
127 return -EINVAL; 127 return -EINVAL;
128 128
129 if(netif_running(adapter->netdev)) { 129 if (netif_running(adapter->netdev)) {
130 ixgb_down(adapter, true); 130 ixgb_down(adapter, true);
131 ixgb_reset(adapter); 131 ixgb_reset(adapter);
132 ixgb_up(adapter); 132 ixgb_up(adapter);
@@ -143,14 +143,14 @@ ixgb_get_pauseparam(struct net_device *netdev,
143{ 143{
144 struct ixgb_adapter *adapter = netdev_priv(netdev); 144 struct ixgb_adapter *adapter = netdev_priv(netdev);
145 struct ixgb_hw *hw = &adapter->hw; 145 struct ixgb_hw *hw = &adapter->hw;
146 146
147 pause->autoneg = AUTONEG_DISABLE; 147 pause->autoneg = AUTONEG_DISABLE;
148 148
149 if(hw->fc.type == ixgb_fc_rx_pause) 149 if (hw->fc.type == ixgb_fc_rx_pause)
150 pause->rx_pause = 1; 150 pause->rx_pause = 1;
151 else if(hw->fc.type == ixgb_fc_tx_pause) 151 else if (hw->fc.type == ixgb_fc_tx_pause)
152 pause->tx_pause = 1; 152 pause->tx_pause = 1;
153 else if(hw->fc.type == ixgb_fc_full) { 153 else if (hw->fc.type == ixgb_fc_full) {
154 pause->rx_pause = 1; 154 pause->rx_pause = 1;
155 pause->tx_pause = 1; 155 pause->tx_pause = 1;
156 } 156 }
@@ -162,26 +162,26 @@ ixgb_set_pauseparam(struct net_device *netdev,
162{ 162{
163 struct ixgb_adapter *adapter = netdev_priv(netdev); 163 struct ixgb_adapter *adapter = netdev_priv(netdev);
164 struct ixgb_hw *hw = &adapter->hw; 164 struct ixgb_hw *hw = &adapter->hw;
165 165
166 if(pause->autoneg == AUTONEG_ENABLE) 166 if (pause->autoneg == AUTONEG_ENABLE)
167 return -EINVAL; 167 return -EINVAL;
168 168
169 if(pause->rx_pause && pause->tx_pause) 169 if (pause->rx_pause && pause->tx_pause)
170 hw->fc.type = ixgb_fc_full; 170 hw->fc.type = ixgb_fc_full;
171 else if(pause->rx_pause && !pause->tx_pause) 171 else if (pause->rx_pause && !pause->tx_pause)
172 hw->fc.type = ixgb_fc_rx_pause; 172 hw->fc.type = ixgb_fc_rx_pause;
173 else if(!pause->rx_pause && pause->tx_pause) 173 else if (!pause->rx_pause && pause->tx_pause)
174 hw->fc.type = ixgb_fc_tx_pause; 174 hw->fc.type = ixgb_fc_tx_pause;
175 else if(!pause->rx_pause && !pause->tx_pause) 175 else if (!pause->rx_pause && !pause->tx_pause)
176 hw->fc.type = ixgb_fc_none; 176 hw->fc.type = ixgb_fc_none;
177 177
178 if(netif_running(adapter->netdev)) { 178 if (netif_running(adapter->netdev)) {
179 ixgb_down(adapter, true); 179 ixgb_down(adapter, true);
180 ixgb_up(adapter); 180 ixgb_up(adapter);
181 ixgb_set_speed_duplex(netdev); 181 ixgb_set_speed_duplex(netdev);
182 } else 182 } else
183 ixgb_reset(adapter); 183 ixgb_reset(adapter);
184 184
185 return 0; 185 return 0;
186} 186}
187 187
@@ -200,7 +200,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data)
200 200
201 adapter->rx_csum = data; 201 adapter->rx_csum = data;
202 202
203 if(netif_running(netdev)) { 203 if (netif_running(netdev)) {
204 ixgb_down(adapter, true); 204 ixgb_down(adapter, true);
205 ixgb_up(adapter); 205 ixgb_up(adapter);
206 ixgb_set_speed_duplex(netdev); 206 ixgb_set_speed_duplex(netdev);
@@ -208,7 +208,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data)
208 ixgb_reset(adapter); 208 ixgb_reset(adapter);
209 return 0; 209 return 0;
210} 210}
211 211
212static u32 212static u32
213ixgb_get_tx_csum(struct net_device *netdev) 213ixgb_get_tx_csum(struct net_device *netdev)
214{ 214{
@@ -229,12 +229,12 @@ ixgb_set_tx_csum(struct net_device *netdev, u32 data)
229static int 229static int
230ixgb_set_tso(struct net_device *netdev, u32 data) 230ixgb_set_tso(struct net_device *netdev, u32 data)
231{ 231{
232 if(data) 232 if (data)
233 netdev->features |= NETIF_F_TSO; 233 netdev->features |= NETIF_F_TSO;
234 else 234 else
235 netdev->features &= ~NETIF_F_TSO; 235 netdev->features &= ~NETIF_F_TSO;
236 return 0; 236 return 0;
237} 237}
238 238
239static u32 239static u32
240ixgb_get_msglevel(struct net_device *netdev) 240ixgb_get_msglevel(struct net_device *netdev)
@@ -251,7 +251,7 @@ ixgb_set_msglevel(struct net_device *netdev, u32 data)
251} 251}
252#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ 252#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
253 253
254static int 254static int
255ixgb_get_regs_len(struct net_device *netdev) 255ixgb_get_regs_len(struct net_device *netdev)
256{ 256{
257#define IXGB_REG_DUMP_LEN 136*sizeof(u32) 257#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
@@ -301,7 +301,7 @@ ixgb_get_regs(struct net_device *netdev,
301 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ 301 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
302 302
303 /* there are 16 RAR entries in hardware, we only use 3 */ 303 /* there are 16 RAR entries in hardware, we only use 3 */
304 for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { 304 for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
305 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ 305 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
306 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ 306 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
307 } 307 }
@@ -415,7 +415,7 @@ ixgb_get_eeprom(struct net_device *netdev,
415 int i, max_len, first_word, last_word; 415 int i, max_len, first_word, last_word;
416 int ret_val = 0; 416 int ret_val = 0;
417 417
418 if(eeprom->len == 0) { 418 if (eeprom->len == 0) {
419 ret_val = -EINVAL; 419 ret_val = -EINVAL;
420 goto geeprom_error; 420 goto geeprom_error;
421 } 421 }
@@ -424,12 +424,12 @@ ixgb_get_eeprom(struct net_device *netdev,
424 424
425 max_len = ixgb_get_eeprom_len(netdev); 425 max_len = ixgb_get_eeprom_len(netdev);
426 426
427 if(eeprom->offset > eeprom->offset + eeprom->len) { 427 if (eeprom->offset > eeprom->offset + eeprom->len) {
428 ret_val = -EINVAL; 428 ret_val = -EINVAL;
429 goto geeprom_error; 429 goto geeprom_error;
430 } 430 }
431 431
432 if((eeprom->offset + eeprom->len) > max_len) 432 if ((eeprom->offset + eeprom->len) > max_len)
433 eeprom->len = (max_len - eeprom->offset); 433 eeprom->len = (max_len - eeprom->offset);
434 434
435 first_word = eeprom->offset >> 1; 435 first_word = eeprom->offset >> 1;
@@ -437,16 +437,14 @@ ixgb_get_eeprom(struct net_device *netdev,
437 437
438 eeprom_buff = kmalloc(sizeof(__le16) * 438 eeprom_buff = kmalloc(sizeof(__le16) *
439 (last_word - first_word + 1), GFP_KERNEL); 439 (last_word - first_word + 1), GFP_KERNEL);
440 if(!eeprom_buff) 440 if (!eeprom_buff)
441 return -ENOMEM; 441 return -ENOMEM;
442 442
443 /* note the eeprom was good because the driver loaded */ 443 /* note the eeprom was good because the driver loaded */
444 for(i = 0; i <= (last_word - first_word); i++) { 444 for (i = 0; i <= (last_word - first_word); i++)
445 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); 445 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
446 }
447 446
448 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 447 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
449 eeprom->len);
450 kfree(eeprom_buff); 448 kfree(eeprom_buff);
451 449
452geeprom_error: 450geeprom_error:
@@ -464,47 +462,47 @@ ixgb_set_eeprom(struct net_device *netdev,
464 int max_len, first_word, last_word; 462 int max_len, first_word, last_word;
465 u16 i; 463 u16 i;
466 464
467 if(eeprom->len == 0) 465 if (eeprom->len == 0)
468 return -EINVAL; 466 return -EINVAL;
469 467
470 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 468 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
471 return -EFAULT; 469 return -EFAULT;
472 470
473 max_len = ixgb_get_eeprom_len(netdev); 471 max_len = ixgb_get_eeprom_len(netdev);
474 472
475 if(eeprom->offset > eeprom->offset + eeprom->len) 473 if (eeprom->offset > eeprom->offset + eeprom->len)
476 return -EINVAL; 474 return -EINVAL;
477 475
478 if((eeprom->offset + eeprom->len) > max_len) 476 if ((eeprom->offset + eeprom->len) > max_len)
479 eeprom->len = (max_len - eeprom->offset); 477 eeprom->len = (max_len - eeprom->offset);
480 478
481 first_word = eeprom->offset >> 1; 479 first_word = eeprom->offset >> 1;
482 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 480 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
483 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 481 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
484 if(!eeprom_buff) 482 if (!eeprom_buff)
485 return -ENOMEM; 483 return -ENOMEM;
486 484
487 ptr = (void *)eeprom_buff; 485 ptr = (void *)eeprom_buff;
488 486
489 if(eeprom->offset & 1) { 487 if (eeprom->offset & 1) {
490 /* need read/modify/write of first changed EEPROM word */ 488 /* need read/modify/write of first changed EEPROM word */
491 /* only the second byte of the word is being modified */ 489 /* only the second byte of the word is being modified */
492 eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); 490 eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
493 ptr++; 491 ptr++;
494 } 492 }
495 if((eeprom->offset + eeprom->len) & 1) { 493 if ((eeprom->offset + eeprom->len) & 1) {
496 /* need read/modify/write of last changed EEPROM word */ 494 /* need read/modify/write of last changed EEPROM word */
497 /* only the first byte of the word is being modified */ 495 /* only the first byte of the word is being modified */
498 eeprom_buff[last_word - first_word] 496 eeprom_buff[last_word - first_word]
499 = ixgb_read_eeprom(hw, last_word); 497 = ixgb_read_eeprom(hw, last_word);
500 } 498 }
501 499
502 memcpy(ptr, bytes, eeprom->len); 500 memcpy(ptr, bytes, eeprom->len);
503 for(i = 0; i <= (last_word - first_word); i++) 501 for (i = 0; i <= (last_word - first_word); i++)
504 ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); 502 ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
505 503
506 /* Update the checksum over the first part of the EEPROM if needed */ 504 /* Update the checksum over the first part of the EEPROM if needed */
507 if(first_word <= EEPROM_CHECKSUM_REG) 505 if (first_word <= EEPROM_CHECKSUM_REG)
508 ixgb_update_eeprom_checksum(hw); 506 ixgb_update_eeprom_checksum(hw);
509 507
510 kfree(eeprom_buff); 508 kfree(eeprom_buff);
@@ -534,7 +532,7 @@ ixgb_get_ringparam(struct net_device *netdev,
534 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 532 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
535 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 533 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
536 534
537 ring->rx_max_pending = MAX_RXD; 535 ring->rx_max_pending = MAX_RXD;
538 ring->tx_max_pending = MAX_TXD; 536 ring->tx_max_pending = MAX_TXD;
539 ring->rx_mini_max_pending = 0; 537 ring->rx_mini_max_pending = 0;
540 ring->rx_jumbo_max_pending = 0; 538 ring->rx_jumbo_max_pending = 0;
@@ -544,7 +542,7 @@ ixgb_get_ringparam(struct net_device *netdev,
544 ring->rx_jumbo_pending = 0; 542 ring->rx_jumbo_pending = 0;
545} 543}
546 544
547static int 545static int
548ixgb_set_ringparam(struct net_device *netdev, 546ixgb_set_ringparam(struct net_device *netdev,
549 struct ethtool_ringparam *ring) 547 struct ethtool_ringparam *ring)
550{ 548{
@@ -557,10 +555,10 @@ ixgb_set_ringparam(struct net_device *netdev,
557 tx_old = adapter->tx_ring; 555 tx_old = adapter->tx_ring;
558 rx_old = adapter->rx_ring; 556 rx_old = adapter->rx_ring;
559 557
560 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 558 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
561 return -EINVAL; 559 return -EINVAL;
562 560
563 if(netif_running(adapter->netdev)) 561 if (netif_running(adapter->netdev))
564 ixgb_down(adapter, true); 562 ixgb_down(adapter, true);
565 563
566 rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); 564 rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
@@ -571,11 +569,11 @@ ixgb_set_ringparam(struct net_device *netdev,
571 txdr->count = min(txdr->count,(u32)MAX_TXD); 569 txdr->count = min(txdr->count,(u32)MAX_TXD);
572 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); 570 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
573 571
574 if(netif_running(adapter->netdev)) { 572 if (netif_running(adapter->netdev)) {
575 /* Try to get new resources before deleting old */ 573 /* Try to get new resources before deleting old */
576 if((err = ixgb_setup_rx_resources(adapter))) 574 if ((err = ixgb_setup_rx_resources(adapter)))
577 goto err_setup_rx; 575 goto err_setup_rx;
578 if((err = ixgb_setup_tx_resources(adapter))) 576 if ((err = ixgb_setup_tx_resources(adapter)))
579 goto err_setup_tx; 577 goto err_setup_tx;
580 578
581 /* save the new, restore the old in order to free it, 579 /* save the new, restore the old in order to free it,
@@ -589,7 +587,7 @@ ixgb_set_ringparam(struct net_device *netdev,
589 ixgb_free_tx_resources(adapter); 587 ixgb_free_tx_resources(adapter);
590 adapter->rx_ring = rx_new; 588 adapter->rx_ring = rx_new;
591 adapter->tx_ring = tx_new; 589 adapter->tx_ring = tx_new;
592 if((err = ixgb_up(adapter))) 590 if ((err = ixgb_up(adapter)))
593 return err; 591 return err;
594 ixgb_set_speed_duplex(netdev); 592 ixgb_set_speed_duplex(netdev);
595 } 593 }
@@ -615,7 +613,7 @@ ixgb_led_blink_callback(unsigned long data)
615{ 613{
616 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; 614 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
617 615
618 if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status)) 616 if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
619 ixgb_led_off(&adapter->hw); 617 ixgb_led_off(&adapter->hw);
620 else 618 else
621 ixgb_led_on(&adapter->hw); 619 ixgb_led_on(&adapter->hw);
@@ -631,7 +629,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data)
631 if (!data) 629 if (!data)
632 data = INT_MAX; 630 data = INT_MAX;
633 631
634 if(!adapter->blink_timer.function) { 632 if (!adapter->blink_timer.function) {
635 init_timer(&adapter->blink_timer); 633 init_timer(&adapter->blink_timer);
636 adapter->blink_timer.function = ixgb_led_blink_callback; 634 adapter->blink_timer.function = ixgb_led_blink_callback;
637 adapter->blink_timer.data = (unsigned long)adapter; 635 adapter->blink_timer.data = (unsigned long)adapter;
@@ -647,7 +645,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data)
647 return 0; 645 return 0;
648} 646}
649 647
650static int 648static int
651ixgb_get_sset_count(struct net_device *netdev, int sset) 649ixgb_get_sset_count(struct net_device *netdev, int sset)
652{ 650{
653 switch (sset) { 651 switch (sset) {
@@ -658,30 +656,30 @@ ixgb_get_sset_count(struct net_device *netdev, int sset)
658 } 656 }
659} 657}
660 658
661static void 659static void
662ixgb_get_ethtool_stats(struct net_device *netdev, 660ixgb_get_ethtool_stats(struct net_device *netdev,
663 struct ethtool_stats *stats, u64 *data) 661 struct ethtool_stats *stats, u64 *data)
664{ 662{
665 struct ixgb_adapter *adapter = netdev_priv(netdev); 663 struct ixgb_adapter *adapter = netdev_priv(netdev);
666 int i; 664 int i;
667 665
668 ixgb_update_stats(adapter); 666 ixgb_update_stats(adapter);
669 for(i = 0; i < IXGB_STATS_LEN; i++) { 667 for (i = 0; i < IXGB_STATS_LEN; i++) {
670 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; 668 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
671 data[i] = (ixgb_gstrings_stats[i].sizeof_stat == 669 data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
672 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 670 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
673 } 671 }
674} 672}
675 673
676static void 674static void
677ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 675ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
678{ 676{
679 int i; 677 int i;
680 678
681 switch(stringset) { 679 switch(stringset) {
682 case ETH_SS_STATS: 680 case ETH_SS_STATS:
683 for(i=0; i < IXGB_STATS_LEN; i++) { 681 for (i = 0; i < IXGB_STATS_LEN; i++) {
684 memcpy(data + i * ETH_GSTRING_LEN, 682 memcpy(data + i * ETH_GSTRING_LEN,
685 ixgb_gstrings_stats[i].stat_string, 683 ixgb_gstrings_stats[i].stat_string,
686 ETH_GSTRING_LEN); 684 ETH_GSTRING_LEN);
687 } 685 }
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 04d2003e24e1..11dcda0f453e 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -125,7 +125,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
125 /* If we are stopped or resetting exit gracefully and wait to be 125 /* If we are stopped or resetting exit gracefully and wait to be
126 * started again before accessing the hardware. 126 * started again before accessing the hardware.
127 */ 127 */
128 if(hw->adapter_stopped) { 128 if (hw->adapter_stopped) {
129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); 129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
130 return false; 130 return false;
131 } 131 }
@@ -347,7 +347,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
347 347
348 /* Zero out the Multicast HASH table */ 348 /* Zero out the Multicast HASH table */
349 DEBUGOUT("Zeroing the MTA\n"); 349 DEBUGOUT("Zeroing the MTA\n");
350 for(i = 0; i < IXGB_MC_TBL_SIZE; i++) 350 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
351 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 351 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
352 352
353 /* Zero out the VLAN Filter Table Array */ 353 /* Zero out the VLAN Filter Table Array */
@@ -371,7 +371,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
371 * hw - Struct containing variables accessed by shared code 371 * hw - Struct containing variables accessed by shared code
372 * 372 *
373 * Places the MAC address in receive address register 0 and clears the rest 373 * Places the MAC address in receive address register 0 and clears the rest
374 * of the receive addresss registers. Clears the multicast table. Assumes 374 * of the receive address registers. Clears the multicast table. Assumes
375 * the receiver is in reset when the routine is called. 375 * the receiver is in reset when the routine is called.
376 *****************************************************************************/ 376 *****************************************************************************/
377static void 377static void
@@ -413,7 +413,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
413 413
414 /* Zero out the other 15 receive addresses. */ 414 /* Zero out the other 15 receive addresses. */
415 DEBUGOUT("Clearing RAR[1-15]\n"); 415 DEBUGOUT("Clearing RAR[1-15]\n");
416 for(i = 1; i < IXGB_RAR_ENTRIES; i++) { 416 for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
417 /* Write high reg first to disable the AV bit first */ 417 /* Write high reg first to disable the AV bit first */
418 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 418 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
419 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 419 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
@@ -452,19 +452,18 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
452 452
453 /* Clear RAR[1-15] */ 453 /* Clear RAR[1-15] */
454 DEBUGOUT(" Clearing RAR[1-15]\n"); 454 DEBUGOUT(" Clearing RAR[1-15]\n");
455 for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { 455 for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
456 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 456 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
457 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 457 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
458 } 458 }
459 459
460 /* Clear the MTA */ 460 /* Clear the MTA */
461 DEBUGOUT(" Clearing MTA\n"); 461 DEBUGOUT(" Clearing MTA\n");
462 for(i = 0; i < IXGB_MC_TBL_SIZE; i++) { 462 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
463 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 463 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
464 }
465 464
466 /* Add the new addresses */ 465 /* Add the new addresses */
467 for(i = 0; i < mc_addr_count; i++) { 466 for (i = 0; i < mc_addr_count; i++) {
468 DEBUGOUT(" Adding the multicast addresses:\n"); 467 DEBUGOUT(" Adding the multicast addresses:\n");
469 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, 468 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
470 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)], 469 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
@@ -482,7 +481,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
482 /* Place this multicast address in the RAR if there is room, * 481 /* Place this multicast address in the RAR if there is room, *
483 * else put it in the MTA 482 * else put it in the MTA
484 */ 483 */
485 if(rar_used_count < IXGB_RAR_ENTRIES) { 484 if (rar_used_count < IXGB_RAR_ENTRIES) {
486 ixgb_rar_set(hw, 485 ixgb_rar_set(hw,
487 mc_addr_list + 486 mc_addr_list +
488 (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)), 487 (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
@@ -649,7 +648,7 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
649{ 648{
650 u32 offset; 649 u32 offset;
651 650
652 for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) 651 for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
653 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 652 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
654 return; 653 return;
655} 654}
@@ -719,9 +718,8 @@ ixgb_setup_fc(struct ixgb_hw *hw)
719 /* Write the new settings */ 718 /* Write the new settings */
720 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); 719 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
721 720
722 if (pap_reg != 0) { 721 if (pap_reg != 0)
723 IXGB_WRITE_REG(hw, PAP, pap_reg); 722 IXGB_WRITE_REG(hw, PAP, pap_reg);
724 }
725 723
726 /* Set the flow control receive threshold registers. Normally, 724 /* Set the flow control receive threshold registers. Normally,
727 * these registers will be set to a default threshold that may be 725 * these registers will be set to a default threshold that may be
@@ -729,14 +727,14 @@ ixgb_setup_fc(struct ixgb_hw *hw)
729 * ability to transmit pause frames in not enabled, then these 727 * ability to transmit pause frames in not enabled, then these
730 * registers will be set to 0. 728 * registers will be set to 0.
731 */ 729 */
732 if(!(hw->fc.type & ixgb_fc_tx_pause)) { 730 if (!(hw->fc.type & ixgb_fc_tx_pause)) {
733 IXGB_WRITE_REG(hw, FCRTL, 0); 731 IXGB_WRITE_REG(hw, FCRTL, 0);
734 IXGB_WRITE_REG(hw, FCRTH, 0); 732 IXGB_WRITE_REG(hw, FCRTH, 0);
735 } else { 733 } else {
736 /* We need to set up the Receive Threshold high and low water 734 /* We need to set up the Receive Threshold high and low water
737 * marks as well as (optionally) enabling the transmission of XON 735 * marks as well as (optionally) enabling the transmission of XON
738 * frames. */ 736 * frames. */
739 if(hw->fc.send_xon) { 737 if (hw->fc.send_xon) {
740 IXGB_WRITE_REG(hw, FCRTL, 738 IXGB_WRITE_REG(hw, FCRTL,
741 (hw->fc.low_water | IXGB_FCRTL_XONE)); 739 (hw->fc.low_water | IXGB_FCRTL_XONE));
742 } else { 740 } else {
@@ -791,7 +789,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
791 ** from the CPU Write to the Ready bit assertion. 789 ** from the CPU Write to the Ready bit assertion.
792 **************************************************************/ 790 **************************************************************/
793 791
794 for(i = 0; i < 10; i++) 792 for (i = 0; i < 10; i++)
795 { 793 {
796 udelay(10); 794 udelay(10);
797 795
@@ -818,7 +816,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
818 ** from the CPU Write to the Ready bit assertion. 816 ** from the CPU Write to the Ready bit assertion.
819 **************************************************************/ 817 **************************************************************/
820 818
821 for(i = 0; i < 10; i++) 819 for (i = 0; i < 10; i++)
822 { 820 {
823 udelay(10); 821 udelay(10);
824 822
@@ -887,7 +885,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
887 ** from the CPU Write to the Ready bit assertion. 885 ** from the CPU Write to the Ready bit assertion.
888 **************************************************************/ 886 **************************************************************/
889 887
890 for(i = 0; i < 10; i++) 888 for (i = 0; i < 10; i++)
891 { 889 {
892 udelay(10); 890 udelay(10);
893 891
@@ -914,7 +912,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
914 ** from the CPU Write to the Ready bit assertion. 912 ** from the CPU Write to the Ready bit assertion.
915 **************************************************************/ 913 **************************************************************/
916 914
917 for(i = 0; i < 10; i++) 915 for (i = 0; i < 10; i++)
918 { 916 {
919 udelay(10); 917 udelay(10);
920 918
@@ -965,7 +963,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
965} 963}
966 964
967/****************************************************************************** 965/******************************************************************************
968 * Check for a bad link condition that may have occured. 966 * Check for a bad link condition that may have occurred.
969 * The indication is that the RFC / LFC registers may be incrementing 967 * The indication is that the RFC / LFC registers may be incrementing
970 * continually. A full adapter reset is required to recover. 968 * continually. A full adapter reset is required to recover.
971 * 969 *
@@ -1007,7 +1005,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1007 DEBUGFUNC("ixgb_clear_hw_cntrs"); 1005 DEBUGFUNC("ixgb_clear_hw_cntrs");
1008 1006
1009 /* if we are stopped or resetting exit gracefully */ 1007 /* if we are stopped or resetting exit gracefully */
1010 if(hw->adapter_stopped) { 1008 if (hw->adapter_stopped) {
1011 DEBUGOUT("Exiting because the adapter is stopped!!!\n"); 1009 DEBUGOUT("Exiting because the adapter is stopped!!!\n");
1012 return; 1010 return;
1013 } 1011 }
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 39cfa47bea69..831fe0c58b2b 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
index 180d20e793a5..2a58847f46e8 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -38,11 +38,11 @@
38#define SUN_VENDOR_ID 0x108E 38#define SUN_VENDOR_ID 0x108E
39#define SUN_SUBVENDOR_ID 0x108E 39#define SUN_SUBVENDOR_ID 0x108E
40 40
41#define IXGB_DEVICE_ID_82597EX 0x1048 41#define IXGB_DEVICE_ID_82597EX 0x1048
42#define IXGB_DEVICE_ID_82597EX_SR 0x1A48 42#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
43#define IXGB_DEVICE_ID_82597EX_LR 0x1B48 43#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
44#define IXGB_SUBDEVICE_ID_A11F 0xA11F 44#define IXGB_SUBDEVICE_ID_A11F 0xA11F
45#define IXGB_SUBDEVICE_ID_A01F 0xA01F 45#define IXGB_SUBDEVICE_ID_A01F 0xA01F
46 46
47#define IXGB_DEVICE_ID_82597EX_CX4 0x109E 47#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
48#define IXGB_SUBDEVICE_ID_A00C 0xA00C 48#define IXGB_SUBDEVICE_ID_A00C 0xA00C
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 41f3adf5f375..e83feaf830bd 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,14 +31,16 @@
31char ixgb_driver_name[] = "ixgb"; 31char ixgb_driver_name[] = "ixgb";
32static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; 32static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
33 33
34#ifndef CONFIG_IXGB_NAPI
35#define DRIVERNAPI
36#else
37#define DRIVERNAPI "-NAPI" 34#define DRIVERNAPI "-NAPI"
38#endif 35#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
39#define DRV_VERSION "1.0.126-k4"DRIVERNAPI
40const char ixgb_driver_version[] = DRV_VERSION; 36const char ixgb_driver_version[] = DRV_VERSION;
41static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 37static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
38
39#define IXGB_CB_LENGTH 256
40static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
41module_param(copybreak, uint, 0644);
42MODULE_PARM_DESC(copybreak,
43 "Maximum size of packet that is copied to a new buffer on receive");
42 44
43/* ixgb_pci_tbl - PCI Device ID Table 45/* ixgb_pci_tbl - PCI Device ID Table
44 * 46 *
@@ -55,7 +57,7 @@ static struct pci_device_id ixgb_pci_tbl[] = {
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, 58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, 60 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 61 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
60 62
61 /* required last entry */ 63 /* required last entry */
@@ -65,16 +67,6 @@ static struct pci_device_id ixgb_pci_tbl[] = {
65MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); 67MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
66 68
67/* Local Function Prototypes */ 69/* Local Function Prototypes */
68
69int ixgb_up(struct ixgb_adapter *adapter);
70void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
71void ixgb_reset(struct ixgb_adapter *adapter);
72int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
73int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
74void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
75void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
76void ixgb_update_stats(struct ixgb_adapter *adapter);
77
78static int ixgb_init_module(void); 70static int ixgb_init_module(void);
79static void ixgb_exit_module(void); 71static void ixgb_exit_module(void);
80static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 72static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -96,18 +88,15 @@ static int ixgb_set_mac(struct net_device *netdev, void *p);
96static irqreturn_t ixgb_intr(int irq, void *data); 88static irqreturn_t ixgb_intr(int irq, void *data);
97static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 89static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
98 90
99#ifdef CONFIG_IXGB_NAPI 91static int ixgb_clean(struct napi_struct *, int);
100static int ixgb_clean(struct napi_struct *napi, int budget); 92static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
101static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter, 93static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
102 int *work_done, int work_to_do); 94
103#else
104static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
105#endif
106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
107static void ixgb_tx_timeout(struct net_device *dev); 95static void ixgb_tx_timeout(struct net_device *dev);
108static void ixgb_tx_timeout_task(struct work_struct *work); 96static void ixgb_tx_timeout_task(struct work_struct *work);
97
109static void ixgb_vlan_rx_register(struct net_device *netdev, 98static void ixgb_vlan_rx_register(struct net_device *netdev,
110 struct vlan_group *grp); 99 struct vlan_group *grp);
111static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 100static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
112static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 101static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
113static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 102static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
@@ -118,7 +107,7 @@ static void ixgb_netpoll(struct net_device *dev);
118#endif 107#endif
119 108
120static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 109static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
121 enum pci_channel_state state); 110 enum pci_channel_state state);
122static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); 111static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
123static void ixgb_io_resume (struct pci_dev *pdev); 112static void ixgb_io_resume (struct pci_dev *pdev);
124 113
@@ -146,14 +135,6 @@ static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
146module_param(debug, int, 0); 135module_param(debug, int, 0);
147MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 136MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
148 137
149/* some defines for controlling descriptor fetches in h/w */
150#define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
151#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
152 * this */
153#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
154 * is pushed this many descriptors
155 * from head */
156
157/** 138/**
158 * ixgb_init_module - Driver Registration Routine 139 * ixgb_init_module - Driver Registration Routine
159 * 140 *
@@ -236,7 +217,7 @@ ixgb_up(struct ixgb_adapter *adapter)
236 ixgb_configure_tx(adapter); 217 ixgb_configure_tx(adapter);
237 ixgb_setup_rctl(adapter); 218 ixgb_setup_rctl(adapter);
238 ixgb_configure_rx(adapter); 219 ixgb_configure_rx(adapter);
239 ixgb_alloc_rx_buffers(adapter); 220 ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
240 221
241 /* disable interrupts and get the hardware into a known state */ 222 /* disable interrupts and get the hardware into a known state */
242 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); 223 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
@@ -261,7 +242,7 @@ ixgb_up(struct ixgb_adapter *adapter)
261 return err; 242 return err;
262 } 243 }
263 244
264 if((hw->max_frame_size != max_frame) || 245 if ((hw->max_frame_size != max_frame) ||
265 (hw->max_frame_size != 246 (hw->max_frame_size !=
266 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { 247 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
267 248
@@ -269,11 +250,11 @@ ixgb_up(struct ixgb_adapter *adapter)
269 250
270 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); 251 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
271 252
272 if(hw->max_frame_size > 253 if (hw->max_frame_size >
273 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { 254 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
274 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); 255 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
275 256
276 if(!(ctrl0 & IXGB_CTRL0_JFE)) { 257 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
277 ctrl0 |= IXGB_CTRL0_JFE; 258 ctrl0 |= IXGB_CTRL0_JFE;
278 IXGB_WRITE_REG(hw, CTRL0, ctrl0); 259 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
279 } 260 }
@@ -282,9 +263,7 @@ ixgb_up(struct ixgb_adapter *adapter)
282 263
283 clear_bit(__IXGB_DOWN, &adapter->flags); 264 clear_bit(__IXGB_DOWN, &adapter->flags);
284 265
285#ifdef CONFIG_IXGB_NAPI
286 napi_enable(&adapter->napi); 266 napi_enable(&adapter->napi);
287#endif
288 ixgb_irq_enable(adapter); 267 ixgb_irq_enable(adapter);
289 268
290 mod_timer(&adapter->watchdog_timer, jiffies); 269 mod_timer(&adapter->watchdog_timer, jiffies);
@@ -300,9 +279,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
300 /* prevent the interrupt handler from restarting watchdog */ 279 /* prevent the interrupt handler from restarting watchdog */
301 set_bit(__IXGB_DOWN, &adapter->flags); 280 set_bit(__IXGB_DOWN, &adapter->flags);
302 281
303#ifdef CONFIG_IXGB_NAPI
304 napi_disable(&adapter->napi); 282 napi_disable(&adapter->napi);
305#endif
306 /* waiting for NAPI to complete can re-enable interrupts */ 283 /* waiting for NAPI to complete can re-enable interrupts */
307 ixgb_irq_disable(adapter); 284 ixgb_irq_disable(adapter);
308 free_irq(adapter->pdev->irq, netdev); 285 free_irq(adapter->pdev->irq, netdev);
@@ -310,7 +287,7 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
310 if (adapter->have_msi) 287 if (adapter->have_msi)
311 pci_disable_msi(adapter->pdev); 288 pci_disable_msi(adapter->pdev);
312 289
313 if(kill_watchdog) 290 if (kill_watchdog)
314 del_timer_sync(&adapter->watchdog_timer); 291 del_timer_sync(&adapter->watchdog_timer);
315 292
316 adapter->link_speed = 0; 293 adapter->link_speed = 0;
@@ -357,27 +334,25 @@ ixgb_reset(struct ixgb_adapter *adapter)
357 **/ 334 **/
358 335
359static int __devinit 336static int __devinit
360ixgb_probe(struct pci_dev *pdev, 337ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
361 const struct pci_device_id *ent)
362{ 338{
363 struct net_device *netdev = NULL; 339 struct net_device *netdev = NULL;
364 struct ixgb_adapter *adapter; 340 struct ixgb_adapter *adapter;
365 static int cards_found = 0; 341 static int cards_found = 0;
366 unsigned long mmio_start;
367 int mmio_len;
368 int pci_using_dac; 342 int pci_using_dac;
369 int i; 343 int i;
370 int err; 344 int err;
371 345
372 if((err = pci_enable_device(pdev))) 346 err = pci_enable_device(pdev);
347 if (err)
373 return err; 348 return err;
374 349
375 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && 350 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
376 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { 351 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
377 pci_using_dac = 1; 352 pci_using_dac = 1;
378 } else { 353 } else {
379 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || 354 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
380 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { 355 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
381 printk(KERN_ERR 356 printk(KERN_ERR
382 "ixgb: No usable DMA configuration, aborting\n"); 357 "ixgb: No usable DMA configuration, aborting\n");
383 goto err_dma_mask; 358 goto err_dma_mask;
@@ -385,13 +360,14 @@ ixgb_probe(struct pci_dev *pdev,
385 pci_using_dac = 0; 360 pci_using_dac = 0;
386 } 361 }
387 362
388 if((err = pci_request_regions(pdev, ixgb_driver_name))) 363 err = pci_request_regions(pdev, ixgb_driver_name);
364 if (err)
389 goto err_request_regions; 365 goto err_request_regions;
390 366
391 pci_set_master(pdev); 367 pci_set_master(pdev);
392 368
393 netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); 369 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
394 if(!netdev) { 370 if (!netdev) {
395 err = -ENOMEM; 371 err = -ENOMEM;
396 goto err_alloc_etherdev; 372 goto err_alloc_etherdev;
397 } 373 }
@@ -405,19 +381,17 @@ ixgb_probe(struct pci_dev *pdev,
405 adapter->hw.back = adapter; 381 adapter->hw.back = adapter;
406 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); 382 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
407 383
408 mmio_start = pci_resource_start(pdev, BAR_0); 384 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
409 mmio_len = pci_resource_len(pdev, BAR_0); 385 pci_resource_len(pdev, BAR_0));
410 386 if (!adapter->hw.hw_addr) {
411 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
412 if(!adapter->hw.hw_addr) {
413 err = -EIO; 387 err = -EIO;
414 goto err_ioremap; 388 goto err_ioremap;
415 } 389 }
416 390
417 for(i = BAR_1; i <= BAR_5; i++) { 391 for (i = BAR_1; i <= BAR_5; i++) {
418 if(pci_resource_len(pdev, i) == 0) 392 if (pci_resource_len(pdev, i) == 0)
419 continue; 393 continue;
420 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 394 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
421 adapter->hw.io_base = pci_resource_start(pdev, i); 395 adapter->hw.io_base = pci_resource_start(pdev, i);
422 break; 396 break;
423 } 397 }
@@ -433,9 +407,7 @@ ixgb_probe(struct pci_dev *pdev,
433 ixgb_set_ethtool_ops(netdev); 407 ixgb_set_ethtool_ops(netdev);
434 netdev->tx_timeout = &ixgb_tx_timeout; 408 netdev->tx_timeout = &ixgb_tx_timeout;
435 netdev->watchdog_timeo = 5 * HZ; 409 netdev->watchdog_timeo = 5 * HZ;
436#ifdef CONFIG_IXGB_NAPI
437 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); 410 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
438#endif
439 netdev->vlan_rx_register = ixgb_vlan_rx_register; 411 netdev->vlan_rx_register = ixgb_vlan_rx_register;
440 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; 412 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
441 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid; 413 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
@@ -444,9 +416,6 @@ ixgb_probe(struct pci_dev *pdev,
444#endif 416#endif
445 417
446 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 418 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
447 netdev->mem_start = mmio_start;
448 netdev->mem_end = mmio_start + mmio_len;
449 netdev->base_addr = adapter->hw.io_base;
450 419
451 adapter->bd_number = cards_found; 420 adapter->bd_number = cards_found;
452 adapter->link_speed = 0; 421 adapter->link_speed = 0;
@@ -454,7 +423,8 @@ ixgb_probe(struct pci_dev *pdev,
454 423
455 /* setup the private structure */ 424 /* setup the private structure */
456 425
457 if((err = ixgb_sw_init(adapter))) 426 err = ixgb_sw_init(adapter);
427 if (err)
458 goto err_sw_init; 428 goto err_sw_init;
459 429
460 netdev->features = NETIF_F_SG | 430 netdev->features = NETIF_F_SG |
@@ -463,16 +433,13 @@ ixgb_probe(struct pci_dev *pdev,
463 NETIF_F_HW_VLAN_RX | 433 NETIF_F_HW_VLAN_RX |
464 NETIF_F_HW_VLAN_FILTER; 434 NETIF_F_HW_VLAN_FILTER;
465 netdev->features |= NETIF_F_TSO; 435 netdev->features |= NETIF_F_TSO;
466#ifdef NETIF_F_LLTX
467 netdev->features |= NETIF_F_LLTX;
468#endif
469 436
470 if(pci_using_dac) 437 if (pci_using_dac)
471 netdev->features |= NETIF_F_HIGHDMA; 438 netdev->features |= NETIF_F_HIGHDMA;
472 439
473 /* make sure the EEPROM is good */ 440 /* make sure the EEPROM is good */
474 441
475 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { 442 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
476 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 443 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
477 err = -EIO; 444 err = -EIO;
478 goto err_eeprom; 445 goto err_eeprom;
@@ -481,7 +448,7 @@ ixgb_probe(struct pci_dev *pdev,
481 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 448 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
482 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 449 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
483 450
484 if(!is_valid_ether_addr(netdev->perm_addr)) { 451 if (!is_valid_ether_addr(netdev->perm_addr)) {
485 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 452 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
486 err = -EIO; 453 err = -EIO;
487 goto err_eeprom; 454 goto err_eeprom;
@@ -496,7 +463,8 @@ ixgb_probe(struct pci_dev *pdev,
496 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); 463 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
497 464
498 strcpy(netdev->name, "eth%d"); 465 strcpy(netdev->name, "eth%d");
499 if((err = register_netdev(netdev))) 466 err = register_netdev(netdev);
467 if (err)
500 goto err_register; 468 goto err_register;
501 469
502 /* we're going to reset, so assume we have no link for now */ 470 /* we're going to reset, so assume we have no link for now */
@@ -543,6 +511,8 @@ ixgb_remove(struct pci_dev *pdev)
543 struct net_device *netdev = pci_get_drvdata(pdev); 511 struct net_device *netdev = pci_get_drvdata(pdev);
544 struct ixgb_adapter *adapter = netdev_priv(netdev); 512 struct ixgb_adapter *adapter = netdev_priv(netdev);
545 513
514 flush_scheduled_work();
515
546 unregister_netdev(netdev); 516 unregister_netdev(netdev);
547 517
548 iounmap(adapter->hw.hw_addr); 518 iounmap(adapter->hw.hw_addr);
@@ -575,13 +545,13 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
575 hw->subsystem_id = pdev->subsystem_device; 545 hw->subsystem_id = pdev->subsystem_device;
576 546
577 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 547 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
578 adapter->rx_buffer_len = hw->max_frame_size; 548 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
579 549
580 if((hw->device_id == IXGB_DEVICE_ID_82597EX) 550 if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
581 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) 551 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
582 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) 552 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
583 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) 553 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
584 hw->mac_type = ixgb_82597; 554 hw->mac_type = ixgb_82597;
585 else { 555 else {
586 /* should never have loaded on this device */ 556 /* should never have loaded on this device */
587 DPRINTK(PROBE, ERR, "unsupported device id\n"); 557 DPRINTK(PROBE, ERR, "unsupported device id\n");
@@ -590,8 +560,6 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
590 /* enable flow control to be programmed */ 560 /* enable flow control to be programmed */
591 hw->fc.send_xon = 1; 561 hw->fc.send_xon = 1;
592 562
593 spin_lock_init(&adapter->tx_lock);
594
595 set_bit(__IXGB_DOWN, &adapter->flags); 563 set_bit(__IXGB_DOWN, &adapter->flags);
596 return 0; 564 return 0;
597} 565}
@@ -616,16 +584,18 @@ ixgb_open(struct net_device *netdev)
616 int err; 584 int err;
617 585
618 /* allocate transmit descriptors */ 586 /* allocate transmit descriptors */
619 587 err = ixgb_setup_tx_resources(adapter);
620 if((err = ixgb_setup_tx_resources(adapter))) 588 if (err)
621 goto err_setup_tx; 589 goto err_setup_tx;
622 590
623 /* allocate receive descriptors */ 591 /* allocate receive descriptors */
624 592
625 if((err = ixgb_setup_rx_resources(adapter))) 593 err = ixgb_setup_rx_resources(adapter);
594 if (err)
626 goto err_setup_rx; 595 goto err_setup_rx;
627 596
628 if((err = ixgb_up(adapter))) 597 err = ixgb_up(adapter);
598 if (err)
629 goto err_up; 599 goto err_up;
630 600
631 return 0; 601 return 0;
@@ -681,7 +651,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
681 651
682 size = sizeof(struct ixgb_buffer) * txdr->count; 652 size = sizeof(struct ixgb_buffer) * txdr->count;
683 txdr->buffer_info = vmalloc(size); 653 txdr->buffer_info = vmalloc(size);
684 if(!txdr->buffer_info) { 654 if (!txdr->buffer_info) {
685 DPRINTK(PROBE, ERR, 655 DPRINTK(PROBE, ERR,
686 "Unable to allocate transmit descriptor ring memory\n"); 656 "Unable to allocate transmit descriptor ring memory\n");
687 return -ENOMEM; 657 return -ENOMEM;
@@ -694,7 +664,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
694 txdr->size = ALIGN(txdr->size, 4096); 664 txdr->size = ALIGN(txdr->size, 4096);
695 665
696 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 666 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
697 if(!txdr->desc) { 667 if (!txdr->desc) {
698 vfree(txdr->buffer_info); 668 vfree(txdr->buffer_info);
699 DPRINTK(PROBE, ERR, 669 DPRINTK(PROBE, ERR,
700 "Unable to allocate transmit descriptor memory\n"); 670 "Unable to allocate transmit descriptor memory\n");
@@ -723,8 +693,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter)
723 u32 tctl; 693 u32 tctl;
724 struct ixgb_hw *hw = &adapter->hw; 694 struct ixgb_hw *hw = &adapter->hw;
725 695
726 /* Setup the Base and Length of the Tx Descriptor Ring 696 /* Setup the Base and Length of the Tx Descriptor Ring
727 * tx_ring.dma can be either a 32 or 64 bit value 697 * tx_ring.dma can be either a 32 or 64 bit value
728 */ 698 */
729 699
730 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); 700 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
@@ -750,8 +720,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter)
750 720
751 /* Setup Transmit Descriptor Settings for this adapter */ 721 /* Setup Transmit Descriptor Settings for this adapter */
752 adapter->tx_cmd_type = 722 adapter->tx_cmd_type =
753 IXGB_TX_DESC_TYPE 723 IXGB_TX_DESC_TYPE |
754 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); 724 (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
755} 725}
756 726
757/** 727/**
@@ -770,7 +740,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
770 740
771 size = sizeof(struct ixgb_buffer) * rxdr->count; 741 size = sizeof(struct ixgb_buffer) * rxdr->count;
772 rxdr->buffer_info = vmalloc(size); 742 rxdr->buffer_info = vmalloc(size);
773 if(!rxdr->buffer_info) { 743 if (!rxdr->buffer_info) {
774 DPRINTK(PROBE, ERR, 744 DPRINTK(PROBE, ERR,
775 "Unable to allocate receive descriptor ring\n"); 745 "Unable to allocate receive descriptor ring\n");
776 return -ENOMEM; 746 return -ENOMEM;
@@ -784,7 +754,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
784 754
785 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 755 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
786 756
787 if(!rxdr->desc) { 757 if (!rxdr->desc) {
788 vfree(rxdr->buffer_info); 758 vfree(rxdr->buffer_info);
789 DPRINTK(PROBE, ERR, 759 DPRINTK(PROBE, ERR,
790 "Unable to allocate receive descriptors\n"); 760 "Unable to allocate receive descriptors\n");
@@ -813,8 +783,8 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
813 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); 783 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
814 784
815 rctl |= 785 rctl |=
816 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | 786 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
817 IXGB_RCTL_RXEN | IXGB_RCTL_CFF | 787 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
818 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); 788 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
819 789
820 rctl |= IXGB_RCTL_SECRC; 790 rctl |= IXGB_RCTL_SECRC;
@@ -846,7 +816,6 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
846 struct ixgb_hw *hw = &adapter->hw; 816 struct ixgb_hw *hw = &adapter->hw;
847 u32 rctl; 817 u32 rctl;
848 u32 rxcsum; 818 u32 rxcsum;
849 u32 rxdctl;
850 819
851 /* make sure receives are disabled while setting up the descriptors */ 820 /* make sure receives are disabled while setting up the descriptors */
852 821
@@ -868,18 +837,12 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
868 IXGB_WRITE_REG(hw, RDH, 0); 837 IXGB_WRITE_REG(hw, RDH, 0);
869 IXGB_WRITE_REG(hw, RDT, 0); 838 IXGB_WRITE_REG(hw, RDT, 0);
870 839
871 /* set up pre-fetching of receive buffers so we get some before we 840 /* due to the hardware errata with RXDCTL, we are unable to use any of
872 * run out (default hardware behavior is to run out before fetching 841 * the performance enhancing features of it without causing other
873 * more). This sets up to fetch if HTHRESH rx descriptors are avail 842 * subtle bugs, some of the bugs could include receive length
874 * and the descriptors in hw cache are below PTHRESH. This avoids 843 * corruption at high data rates (WTHRESH > 0) and/or receive
875 * the hardware behavior of fetching <=512 descriptors in a single 844 * descriptor ring irregularites (particularly in hardware cache) */
876 * burst that pre-empts all other activity, usually causing fifo 845 IXGB_WRITE_REG(hw, RXDCTL, 0);
877 * overflows. */
878 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
879 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
880 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
881 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
882 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
883 846
884 /* Enable Receive Checksum Offload for TCP and UDP */ 847 /* Enable Receive Checksum Offload for TCP and UDP */
885 if (adapter->rx_csum) { 848 if (adapter->rx_csum) {
@@ -918,7 +881,7 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
918 881
919static void 882static void
920ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, 883ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
921 struct ixgb_buffer *buffer_info) 884 struct ixgb_buffer *buffer_info)
922{ 885{
923 struct pci_dev *pdev = adapter->pdev; 886 struct pci_dev *pdev = adapter->pdev;
924 887
@@ -926,8 +889,10 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
926 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, 889 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
927 PCI_DMA_TODEVICE); 890 PCI_DMA_TODEVICE);
928 891
892 /* okay to call kfree_skb here instead of kfree_skb_any because
893 * this is never called in interrupt context */
929 if (buffer_info->skb) 894 if (buffer_info->skb)
930 dev_kfree_skb_any(buffer_info->skb); 895 dev_kfree_skb(buffer_info->skb);
931 896
932 buffer_info->skb = NULL; 897 buffer_info->skb = NULL;
933 buffer_info->dma = 0; 898 buffer_info->dma = 0;
@@ -952,7 +917,7 @@ ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
952 917
953 /* Free all the Tx ring sk_buffs */ 918 /* Free all the Tx ring sk_buffs */
954 919
955 for(i = 0; i < tx_ring->count; i++) { 920 for (i = 0; i < tx_ring->count; i++) {
956 buffer_info = &tx_ring->buffer_info[i]; 921 buffer_info = &tx_ring->buffer_info[i];
957 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 922 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
958 } 923 }
@@ -1010,9 +975,9 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1010 975
1011 /* Free all the Rx ring sk_buffs */ 976 /* Free all the Rx ring sk_buffs */
1012 977
1013 for(i = 0; i < rx_ring->count; i++) { 978 for (i = 0; i < rx_ring->count; i++) {
1014 buffer_info = &rx_ring->buffer_info[i]; 979 buffer_info = &rx_ring->buffer_info[i];
1015 if(buffer_info->skb) { 980 if (buffer_info->skb) {
1016 981
1017 pci_unmap_single(pdev, 982 pci_unmap_single(pdev,
1018 buffer_info->dma, 983 buffer_info->dma,
@@ -1053,7 +1018,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
1053 struct ixgb_adapter *adapter = netdev_priv(netdev); 1018 struct ixgb_adapter *adapter = netdev_priv(netdev);
1054 struct sockaddr *addr = p; 1019 struct sockaddr *addr = p;
1055 1020
1056 if(!is_valid_ether_addr(addr->sa_data)) 1021 if (!is_valid_ether_addr(addr->sa_data))
1057 return -EADDRNOTAVAIL; 1022 return -EADDRNOTAVAIL;
1058 1023
1059 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1024 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -1086,16 +1051,16 @@ ixgb_set_multi(struct net_device *netdev)
1086 1051
1087 rctl = IXGB_READ_REG(hw, RCTL); 1052 rctl = IXGB_READ_REG(hw, RCTL);
1088 1053
1089 if(netdev->flags & IFF_PROMISC) { 1054 if (netdev->flags & IFF_PROMISC) {
1090 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1055 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1091 } else if(netdev->flags & IFF_ALLMULTI) { 1056 } else if (netdev->flags & IFF_ALLMULTI) {
1092 rctl |= IXGB_RCTL_MPE; 1057 rctl |= IXGB_RCTL_MPE;
1093 rctl &= ~IXGB_RCTL_UPE; 1058 rctl &= ~IXGB_RCTL_UPE;
1094 } else { 1059 } else {
1095 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1060 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1096 } 1061 }
1097 1062
1098 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { 1063 if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1099 rctl |= IXGB_RCTL_MPE; 1064 rctl |= IXGB_RCTL_MPE;
1100 IXGB_WRITE_REG(hw, RCTL, rctl); 1065 IXGB_WRITE_REG(hw, RCTL, rctl);
1101 } else { 1066 } else {
@@ -1104,10 +1069,11 @@ ixgb_set_multi(struct net_device *netdev)
1104 1069
1105 IXGB_WRITE_REG(hw, RCTL, rctl); 1070 IXGB_WRITE_REG(hw, RCTL, rctl);
1106 1071
1107 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr; 1072 for (i = 0, mc_ptr = netdev->mc_list;
1108 i++, mc_ptr = mc_ptr->next) 1073 mc_ptr;
1074 i++, mc_ptr = mc_ptr->next)
1109 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS], 1075 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1110 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); 1076 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1111 1077
1112 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0); 1078 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1113 } 1079 }
@@ -1132,8 +1098,8 @@ ixgb_watchdog(unsigned long data)
1132 netif_stop_queue(netdev); 1098 netif_stop_queue(netdev);
1133 } 1099 }
1134 1100
1135 if(adapter->hw.link_up) { 1101 if (adapter->hw.link_up) {
1136 if(!netif_carrier_ok(netdev)) { 1102 if (!netif_carrier_ok(netdev)) {
1137 DPRINTK(LINK, INFO, 1103 DPRINTK(LINK, INFO,
1138 "NIC Link is Up 10000 Mbps Full Duplex\n"); 1104 "NIC Link is Up 10000 Mbps Full Duplex\n");
1139 adapter->link_speed = 10000; 1105 adapter->link_speed = 10000;
@@ -1142,7 +1108,7 @@ ixgb_watchdog(unsigned long data)
1142 netif_wake_queue(netdev); 1108 netif_wake_queue(netdev);
1143 } 1109 }
1144 } else { 1110 } else {
1145 if(netif_carrier_ok(netdev)) { 1111 if (netif_carrier_ok(netdev)) {
1146 adapter->link_speed = 0; 1112 adapter->link_speed = 0;
1147 adapter->link_duplex = 0; 1113 adapter->link_duplex = 0;
1148 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 1114 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -1154,8 +1120,8 @@ ixgb_watchdog(unsigned long data)
1154 1120
1155 ixgb_update_stats(adapter); 1121 ixgb_update_stats(adapter);
1156 1122
1157 if(!netif_carrier_ok(netdev)) { 1123 if (!netif_carrier_ok(netdev)) {
1158 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { 1124 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1159 /* We've lost link, so the controller stops DMA, 1125 /* We've lost link, so the controller stops DMA,
1160 * but we've got queued Tx work that's never going 1126 * but we've got queued Tx work that's never going
1161 * to get done, so reset controller to flush Tx. 1127 * to get done, so reset controller to flush Tx.
@@ -1227,7 +1193,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1227 context_desc->hdr_len = hdr_len; 1193 context_desc->hdr_len = hdr_len;
1228 context_desc->status = 0; 1194 context_desc->status = 0;
1229 context_desc->cmd_type_len = cpu_to_le32( 1195 context_desc->cmd_type_len = cpu_to_le32(
1230 IXGB_CONTEXT_DESC_TYPE 1196 IXGB_CONTEXT_DESC_TYPE
1231 | IXGB_CONTEXT_DESC_CMD_TSE 1197 | IXGB_CONTEXT_DESC_CMD_TSE
1232 | IXGB_CONTEXT_DESC_CMD_IP 1198 | IXGB_CONTEXT_DESC_CMD_IP
1233 | IXGB_CONTEXT_DESC_CMD_TCP 1199 | IXGB_CONTEXT_DESC_CMD_TCP
@@ -1235,7 +1201,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1235 | (skb->len - (hdr_len))); 1201 | (skb->len - (hdr_len)));
1236 1202
1237 1203
1238 if(++i == adapter->tx_ring.count) i = 0; 1204 if (++i == adapter->tx_ring.count) i = 0;
1239 adapter->tx_ring.next_to_use = i; 1205 adapter->tx_ring.next_to_use = i;
1240 1206
1241 return 1; 1207 return 1;
@@ -1251,7 +1217,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1251 unsigned int i; 1217 unsigned int i;
1252 u8 css, cso; 1218 u8 css, cso;
1253 1219
1254 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1220 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1255 struct ixgb_buffer *buffer_info; 1221 struct ixgb_buffer *buffer_info;
1256 css = skb_transport_offset(skb); 1222 css = skb_transport_offset(skb);
1257 cso = css + skb->csum_offset; 1223 cso = css + skb->csum_offset;
@@ -1273,7 +1239,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1273 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE 1239 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1274 | IXGB_TX_DESC_CMD_IDE); 1240 | IXGB_TX_DESC_CMD_IDE);
1275 1241
1276 if(++i == adapter->tx_ring.count) i = 0; 1242 if (++i == adapter->tx_ring.count) i = 0;
1277 adapter->tx_ring.next_to_use = i; 1243 adapter->tx_ring.next_to_use = i;
1278 1244
1279 return true; 1245 return true;
@@ -1302,7 +1268,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1302 1268
1303 i = tx_ring->next_to_use; 1269 i = tx_ring->next_to_use;
1304 1270
1305 while(len) { 1271 while (len) {
1306 buffer_info = &tx_ring->buffer_info[i]; 1272 buffer_info = &tx_ring->buffer_info[i];
1307 size = min(len, IXGB_MAX_DATA_PER_TXD); 1273 size = min(len, IXGB_MAX_DATA_PER_TXD);
1308 /* Workaround for premature desc write-backs 1274 /* Workaround for premature desc write-backs
@@ -1312,28 +1278,28 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1312 1278
1313 buffer_info->length = size; 1279 buffer_info->length = size;
1314 WARN_ON(buffer_info->dma != 0); 1280 WARN_ON(buffer_info->dma != 0);
1281 buffer_info->time_stamp = jiffies;
1315 buffer_info->dma = 1282 buffer_info->dma =
1316 pci_map_single(adapter->pdev, 1283 pci_map_single(adapter->pdev,
1317 skb->data + offset, 1284 skb->data + offset,
1318 size, 1285 size,
1319 PCI_DMA_TODEVICE); 1286 PCI_DMA_TODEVICE);
1320 buffer_info->time_stamp = jiffies;
1321 buffer_info->next_to_watch = 0; 1287 buffer_info->next_to_watch = 0;
1322 1288
1323 len -= size; 1289 len -= size;
1324 offset += size; 1290 offset += size;
1325 count++; 1291 count++;
1326 if(++i == tx_ring->count) i = 0; 1292 if (++i == tx_ring->count) i = 0;
1327 } 1293 }
1328 1294
1329 for(f = 0; f < nr_frags; f++) { 1295 for (f = 0; f < nr_frags; f++) {
1330 struct skb_frag_struct *frag; 1296 struct skb_frag_struct *frag;
1331 1297
1332 frag = &skb_shinfo(skb)->frags[f]; 1298 frag = &skb_shinfo(skb)->frags[f];
1333 len = frag->size; 1299 len = frag->size;
1334 offset = 0; 1300 offset = 0;
1335 1301
1336 while(len) { 1302 while (len) {
1337 buffer_info = &tx_ring->buffer_info[i]; 1303 buffer_info = &tx_ring->buffer_info[i];
1338 size = min(len, IXGB_MAX_DATA_PER_TXD); 1304 size = min(len, IXGB_MAX_DATA_PER_TXD);
1339 1305
@@ -1344,19 +1310,19 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1344 size -= 4; 1310 size -= 4;
1345 1311
1346 buffer_info->length = size; 1312 buffer_info->length = size;
1313 buffer_info->time_stamp = jiffies;
1347 buffer_info->dma = 1314 buffer_info->dma =
1348 pci_map_page(adapter->pdev, 1315 pci_map_page(adapter->pdev,
1349 frag->page, 1316 frag->page,
1350 frag->page_offset + offset, 1317 frag->page_offset + offset,
1351 size, 1318 size,
1352 PCI_DMA_TODEVICE); 1319 PCI_DMA_TODEVICE);
1353 buffer_info->time_stamp = jiffies;
1354 buffer_info->next_to_watch = 0; 1320 buffer_info->next_to_watch = 0;
1355 1321
1356 len -= size; 1322 len -= size;
1357 offset += size; 1323 offset += size;
1358 count++; 1324 count++;
1359 if(++i == tx_ring->count) i = 0; 1325 if (++i == tx_ring->count) i = 0;
1360 } 1326 }
1361 } 1327 }
1362 i = (i == 0) ? tx_ring->count - 1 : i - 1; 1328 i = (i == 0) ? tx_ring->count - 1 : i - 1;
@@ -1377,21 +1343,20 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1377 u8 popts = 0; 1343 u8 popts = 0;
1378 unsigned int i; 1344 unsigned int i;
1379 1345
1380 if(tx_flags & IXGB_TX_FLAGS_TSO) { 1346 if (tx_flags & IXGB_TX_FLAGS_TSO) {
1381 cmd_type_len |= IXGB_TX_DESC_CMD_TSE; 1347 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1382 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); 1348 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1383 } 1349 }
1384 1350
1385 if(tx_flags & IXGB_TX_FLAGS_CSUM) 1351 if (tx_flags & IXGB_TX_FLAGS_CSUM)
1386 popts |= IXGB_TX_DESC_POPTS_TXSM; 1352 popts |= IXGB_TX_DESC_POPTS_TXSM;
1387 1353
1388 if(tx_flags & IXGB_TX_FLAGS_VLAN) { 1354 if (tx_flags & IXGB_TX_FLAGS_VLAN)
1389 cmd_type_len |= IXGB_TX_DESC_CMD_VLE; 1355 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1390 }
1391 1356
1392 i = tx_ring->next_to_use; 1357 i = tx_ring->next_to_use;
1393 1358
1394 while(count--) { 1359 while (count--) {
1395 buffer_info = &tx_ring->buffer_info[i]; 1360 buffer_info = &tx_ring->buffer_info[i];
1396 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1361 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1397 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 1362 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -1401,11 +1366,11 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1401 tx_desc->popts = popts; 1366 tx_desc->popts = popts;
1402 tx_desc->vlan = cpu_to_le16(vlan_id); 1367 tx_desc->vlan = cpu_to_le16(vlan_id);
1403 1368
1404 if(++i == tx_ring->count) i = 0; 1369 if (++i == tx_ring->count) i = 0;
1405 } 1370 }
1406 1371
1407 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP 1372 tx_desc->cmd_type_len |=
1408 | IXGB_TX_DESC_CMD_RS ); 1373 cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1409 1374
1410 /* Force memory writes to complete before letting h/w 1375 /* Force memory writes to complete before letting h/w
1411 * know there are new descriptors to fetch. (Only 1376 * know there are new descriptors to fetch. (Only
@@ -1461,7 +1426,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1461 struct ixgb_adapter *adapter = netdev_priv(netdev); 1426 struct ixgb_adapter *adapter = netdev_priv(netdev);
1462 unsigned int first; 1427 unsigned int first;
1463 unsigned int tx_flags = 0; 1428 unsigned int tx_flags = 0;
1464 unsigned long flags;
1465 int vlan_id = 0; 1429 int vlan_id = 0;
1466 int tso; 1430 int tso;
1467 1431
@@ -1470,51 +1434,31 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1470 return NETDEV_TX_OK; 1434 return NETDEV_TX_OK;
1471 } 1435 }
1472 1436
1473 if(skb->len <= 0) { 1437 if (skb->len <= 0) {
1474 dev_kfree_skb_any(skb); 1438 dev_kfree_skb(skb);
1475 return 0; 1439 return 0;
1476 } 1440 }
1477 1441
1478#ifdef NETIF_F_LLTX
1479 if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
1480 /* Collision - tell upper layer to requeue */
1481 local_irq_restore(flags);
1482 return NETDEV_TX_LOCKED;
1483 }
1484#else
1485 spin_lock_irqsave(&adapter->tx_lock, flags);
1486#endif
1487
1488 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, 1442 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1489 DESC_NEEDED))) { 1443 DESC_NEEDED)))
1490 netif_stop_queue(netdev);
1491 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1492 return NETDEV_TX_BUSY; 1444 return NETDEV_TX_BUSY;
1493 }
1494 1445
1495#ifndef NETIF_F_LLTX 1446 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1496 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1497#endif
1498
1499 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1500 tx_flags |= IXGB_TX_FLAGS_VLAN; 1447 tx_flags |= IXGB_TX_FLAGS_VLAN;
1501 vlan_id = vlan_tx_tag_get(skb); 1448 vlan_id = vlan_tx_tag_get(skb);
1502 } 1449 }
1503 1450
1504 first = adapter->tx_ring.next_to_use; 1451 first = adapter->tx_ring.next_to_use;
1505 1452
1506 tso = ixgb_tso(adapter, skb); 1453 tso = ixgb_tso(adapter, skb);
1507 if (tso < 0) { 1454 if (tso < 0) {
1508 dev_kfree_skb_any(skb); 1455 dev_kfree_skb(skb);
1509#ifdef NETIF_F_LLTX
1510 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1511#endif
1512 return NETDEV_TX_OK; 1456 return NETDEV_TX_OK;
1513 } 1457 }
1514 1458
1515 if (likely(tso)) 1459 if (likely(tso))
1516 tx_flags |= IXGB_TX_FLAGS_TSO; 1460 tx_flags |= IXGB_TX_FLAGS_TSO;
1517 else if(ixgb_tx_csum(adapter, skb)) 1461 else if (ixgb_tx_csum(adapter, skb))
1518 tx_flags |= IXGB_TX_FLAGS_CSUM; 1462 tx_flags |= IXGB_TX_FLAGS_CSUM;
1519 1463
1520 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, 1464 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
@@ -1522,13 +1466,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1522 1466
1523 netdev->trans_start = jiffies; 1467 netdev->trans_start = jiffies;
1524 1468
1525#ifdef NETIF_F_LLTX
1526 /* Make sure there is space in the ring for the next send. */ 1469 /* Make sure there is space in the ring for the next send. */
1527 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); 1470 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1528 1471
1529 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1530
1531#endif
1532 return NETDEV_TX_OK; 1472 return NETDEV_TX_OK;
1533} 1473}
1534 1474
@@ -1588,21 +1528,25 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1588 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1528 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1589 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1529 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1590 1530
1591 1531 /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1592 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) 1532 if ((new_mtu < 68) ||
1593 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { 1533 (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1594 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu); 1534 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
1595 return -EINVAL; 1535 return -EINVAL;
1596 } 1536 }
1597 1537
1598 adapter->rx_buffer_len = max_frame; 1538 if (old_max_frame == max_frame)
1539 return 0;
1540
1541 if (netif_running(netdev))
1542 ixgb_down(adapter, true);
1543
1544 adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1599 1545
1600 netdev->mtu = new_mtu; 1546 netdev->mtu = new_mtu;
1601 1547
1602 if ((old_max_frame != max_frame) && netif_running(netdev)) { 1548 if (netif_running(netdev))
1603 ixgb_down(adapter, true);
1604 ixgb_up(adapter); 1549 ixgb_up(adapter);
1605 }
1606 1550
1607 return 0; 1551 return 0;
1608} 1552}
@@ -1622,21 +1566,21 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1622 if (pci_channel_offline(pdev)) 1566 if (pci_channel_offline(pdev))
1623 return; 1567 return;
1624 1568
1625 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1569 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1626 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { 1570 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1627 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); 1571 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1628 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); 1572 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1629 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); 1573 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1630 u64 bcast = ((u64)bcast_h << 32) | bcast_l; 1574 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1631 1575
1632 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); 1576 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1633 /* fix up multicast stats by removing broadcasts */ 1577 /* fix up multicast stats by removing broadcasts */
1634 if(multi >= bcast) 1578 if (multi >= bcast)
1635 multi -= bcast; 1579 multi -= bcast;
1636 1580
1637 adapter->stats.mprcl += (multi & 0xFFFFFFFF); 1581 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1638 adapter->stats.mprch += (multi >> 32); 1582 adapter->stats.mprch += (multi >> 32);
1639 adapter->stats.bprcl += bcast_l; 1583 adapter->stats.bprcl += bcast_l;
1640 adapter->stats.bprch += bcast_h; 1584 adapter->stats.bprch += bcast_h;
1641 } else { 1585 } else {
1642 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); 1586 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
@@ -1751,41 +1695,26 @@ ixgb_intr(int irq, void *data)
1751 struct ixgb_adapter *adapter = netdev_priv(netdev); 1695 struct ixgb_adapter *adapter = netdev_priv(netdev);
1752 struct ixgb_hw *hw = &adapter->hw; 1696 struct ixgb_hw *hw = &adapter->hw;
1753 u32 icr = IXGB_READ_REG(hw, ICR); 1697 u32 icr = IXGB_READ_REG(hw, ICR);
1754#ifndef CONFIG_IXGB_NAPI
1755 unsigned int i;
1756#endif
1757 1698
1758 if(unlikely(!icr)) 1699 if (unlikely(!icr))
1759 return IRQ_NONE; /* Not our interrupt */ 1700 return IRQ_NONE; /* Not our interrupt */
1760 1701
1761 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) 1702 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1762 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1703 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1763 mod_timer(&adapter->watchdog_timer, jiffies); 1704 mod_timer(&adapter->watchdog_timer, jiffies);
1764 1705
1765#ifdef CONFIG_IXGB_NAPI
1766 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1706 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1767 1707
1768 /* Disable interrupts and register for poll. The flush 1708 /* Disable interrupts and register for poll. The flush
1769 of the posted write is intentionally left out. 1709 of the posted write is intentionally left out.
1770 */ 1710 */
1771 1711
1772 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1712 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1773 __netif_rx_schedule(netdev, &adapter->napi); 1713 __netif_rx_schedule(netdev, &adapter->napi);
1774 } 1714 }
1775#else
1776 /* yes, that is actually a & and it is meant to make sure that
1777 * every pass through this for loop checks both receive and
1778 * transmit queues for completed descriptors, intended to
1779 * avoid starvation issues and assist tx/rx fairness. */
1780 for(i = 0; i < IXGB_MAX_INTR; i++)
1781 if(!ixgb_clean_rx_irq(adapter) &
1782 !ixgb_clean_tx_irq(adapter))
1783 break;
1784#endif
1785 return IRQ_HANDLED; 1715 return IRQ_HANDLED;
1786} 1716}
1787 1717
1788#ifdef CONFIG_IXGB_NAPI
1789/** 1718/**
1790 * ixgb_clean - NAPI Rx polling callback 1719 * ixgb_clean - NAPI Rx polling callback
1791 * @adapter: board private structure 1720 * @adapter: board private structure
@@ -1804,12 +1733,12 @@ ixgb_clean(struct napi_struct *napi, int budget)
1804 /* If budget not fully consumed, exit the polling mode */ 1733 /* If budget not fully consumed, exit the polling mode */
1805 if (work_done < budget) { 1734 if (work_done < budget) {
1806 netif_rx_complete(netdev, napi); 1735 netif_rx_complete(netdev, napi);
1807 ixgb_irq_enable(adapter); 1736 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1737 ixgb_irq_enable(adapter);
1808 } 1738 }
1809 1739
1810 return work_done; 1740 return work_done;
1811} 1741}
1812#endif
1813 1742
1814/** 1743/**
1815 * ixgb_clean_tx_irq - Reclaim resources after transmit completes 1744 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
@@ -1830,15 +1759,15 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1830 eop = tx_ring->buffer_info[i].next_to_watch; 1759 eop = tx_ring->buffer_info[i].next_to_watch;
1831 eop_desc = IXGB_TX_DESC(*tx_ring, eop); 1760 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1832 1761
1833 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1762 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1834 1763
1835 for (cleaned = false; !cleaned; ) { 1764 for (cleaned = false; !cleaned; ) {
1836 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1765 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1837 buffer_info = &tx_ring->buffer_info[i]; 1766 buffer_info = &tx_ring->buffer_info[i];
1838 1767
1839 if (tx_desc->popts 1768 if (tx_desc->popts &
1840 & (IXGB_TX_DESC_POPTS_TXSM | 1769 (IXGB_TX_DESC_POPTS_TXSM |
1841 IXGB_TX_DESC_POPTS_IXSM)) 1770 IXGB_TX_DESC_POPTS_IXSM))
1842 adapter->hw_csum_tx_good++; 1771 adapter->hw_csum_tx_good++;
1843 1772
1844 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1773 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
@@ -1846,7 +1775,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1846 *(u32 *)&(tx_desc->status) = 0; 1775 *(u32 *)&(tx_desc->status) = 0;
1847 1776
1848 cleaned = (i == eop); 1777 cleaned = (i == eop);
1849 if(++i == tx_ring->count) i = 0; 1778 if (++i == tx_ring->count) i = 0;
1850 } 1779 }
1851 1780
1852 eop = tx_ring->buffer_info[i].next_to_watch; 1781 eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1855,15 +1784,20 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1855 1784
1856 tx_ring->next_to_clean = i; 1785 tx_ring->next_to_clean = i;
1857 1786
1858 if (unlikely(netif_queue_stopped(netdev))) { 1787 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1859 spin_lock(&adapter->tx_lock); 1788 IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1860 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && 1789 /* Make sure that anybody stopping the queue after this
1861 (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) 1790 * sees the new next_to_clean. */
1791 smp_mb();
1792
1793 if (netif_queue_stopped(netdev) &&
1794 !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1862 netif_wake_queue(netdev); 1795 netif_wake_queue(netdev);
1863 spin_unlock(&adapter->tx_lock); 1796 ++adapter->restart_queue;
1797 }
1864 } 1798 }
1865 1799
1866 if(adapter->detect_tx_hung) { 1800 if (adapter->detect_tx_hung) {
1867 /* detect a transmit hang in hardware, this serializes the 1801 /* detect a transmit hang in hardware, this serializes the
1868 * check with the clearing of time_stamp and movement of i */ 1802 * check with the clearing of time_stamp and movement of i */
1869 adapter->detect_tx_hung = false; 1803 adapter->detect_tx_hung = false;
@@ -1906,13 +1840,13 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1906 1840
1907static void 1841static void
1908ixgb_rx_checksum(struct ixgb_adapter *adapter, 1842ixgb_rx_checksum(struct ixgb_adapter *adapter,
1909 struct ixgb_rx_desc *rx_desc, 1843 struct ixgb_rx_desc *rx_desc,
1910 struct sk_buff *skb) 1844 struct sk_buff *skb)
1911{ 1845{
1912 /* Ignore Checksum bit is set OR 1846 /* Ignore Checksum bit is set OR
1913 * TCP Checksum has not been calculated 1847 * TCP Checksum has not been calculated
1914 */ 1848 */
1915 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || 1849 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1916 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { 1850 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1917 skb->ip_summed = CHECKSUM_NONE; 1851 skb->ip_summed = CHECKSUM_NONE;
1918 return; 1852 return;
@@ -1920,7 +1854,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1920 1854
1921 /* At this point we know the hardware did the TCP checksum */ 1855 /* At this point we know the hardware did the TCP checksum */
1922 /* now look at the TCP checksum error bit */ 1856 /* now look at the TCP checksum error bit */
1923 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { 1857 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1924 /* let the stack verify checksum errors */ 1858 /* let the stack verify checksum errors */
1925 skb->ip_summed = CHECKSUM_NONE; 1859 skb->ip_summed = CHECKSUM_NONE;
1926 adapter->hw_csum_rx_error++; 1860 adapter->hw_csum_rx_error++;
@@ -1937,11 +1871,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1937 **/ 1871 **/
1938 1872
1939static bool 1873static bool
1940#ifdef CONFIG_IXGB_NAPI
1941ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) 1874ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1942#else
1943ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1944#endif
1945{ 1875{
1946 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 1876 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1947 struct net_device *netdev = adapter->netdev; 1877 struct net_device *netdev = adapter->netdev;
@@ -1950,50 +1880,50 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1950 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1880 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1951 u32 length; 1881 u32 length;
1952 unsigned int i, j; 1882 unsigned int i, j;
1883 int cleaned_count = 0;
1953 bool cleaned = false; 1884 bool cleaned = false;
1954 1885
1955 i = rx_ring->next_to_clean; 1886 i = rx_ring->next_to_clean;
1956 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1887 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1957 buffer_info = &rx_ring->buffer_info[i]; 1888 buffer_info = &rx_ring->buffer_info[i];
1958 1889
1959 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { 1890 while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1960 struct sk_buff *skb, *next_skb; 1891 struct sk_buff *skb;
1961 u8 status; 1892 u8 status;
1962 1893
1963#ifdef CONFIG_IXGB_NAPI 1894 if (*work_done >= work_to_do)
1964 if(*work_done >= work_to_do)
1965 break; 1895 break;
1966 1896
1967 (*work_done)++; 1897 (*work_done)++;
1968#endif
1969 status = rx_desc->status; 1898 status = rx_desc->status;
1970 skb = buffer_info->skb; 1899 skb = buffer_info->skb;
1971 buffer_info->skb = NULL; 1900 buffer_info->skb = NULL;
1972 1901
1973 prefetch(skb->data); 1902 prefetch(skb->data - NET_IP_ALIGN);
1974 1903
1975 if(++i == rx_ring->count) i = 0; 1904 if (++i == rx_ring->count) i = 0;
1976 next_rxd = IXGB_RX_DESC(*rx_ring, i); 1905 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1977 prefetch(next_rxd); 1906 prefetch(next_rxd);
1978 1907
1979 if((j = i + 1) == rx_ring->count) j = 0; 1908 if ((j = i + 1) == rx_ring->count) j = 0;
1980 next2_buffer = &rx_ring->buffer_info[j]; 1909 next2_buffer = &rx_ring->buffer_info[j];
1981 prefetch(next2_buffer); 1910 prefetch(next2_buffer);
1982 1911
1983 next_buffer = &rx_ring->buffer_info[i]; 1912 next_buffer = &rx_ring->buffer_info[i];
1984 next_skb = next_buffer->skb;
1985 prefetch(next_skb);
1986 1913
1987 cleaned = true; 1914 cleaned = true;
1915 cleaned_count++;
1988 1916
1989 pci_unmap_single(pdev, 1917 pci_unmap_single(pdev,
1990 buffer_info->dma, 1918 buffer_info->dma,
1991 buffer_info->length, 1919 buffer_info->length,
1992 PCI_DMA_FROMDEVICE); 1920 PCI_DMA_FROMDEVICE);
1921 buffer_info->dma = 0;
1993 1922
1994 length = le16_to_cpu(rx_desc->length); 1923 length = le16_to_cpu(rx_desc->length);
1924 rx_desc->length = 0;
1995 1925
1996 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { 1926 if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1997 1927
1998 /* All receives must fit into a single buffer */ 1928 /* All receives must fit into a single buffer */
1999 1929
@@ -2004,11 +1934,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
2004 goto rxdesc_done; 1934 goto rxdesc_done;
2005 } 1935 }
2006 1936
2007 if (unlikely(rx_desc->errors 1937 if (unlikely(rx_desc->errors &
2008 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE 1938 (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2009 | IXGB_RX_DESC_ERRORS_P | 1939 IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2010 IXGB_RX_DESC_ERRORS_RXE))) {
2011
2012 dev_kfree_skb_irq(skb); 1940 dev_kfree_skb_irq(skb);
2013 goto rxdesc_done; 1941 goto rxdesc_done;
2014 } 1942 }
@@ -2016,8 +1944,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
2016 /* code added for copybreak, this should improve 1944 /* code added for copybreak, this should improve
2017 * performance for small packets with large amounts 1945 * performance for small packets with large amounts
2018 * of reassembly being done in the stack */ 1946 * of reassembly being done in the stack */
2019#define IXGB_CB_LENGTH 256 1947 if (length < copybreak) {
2020 if (length < IXGB_CB_LENGTH) {
2021 struct sk_buff *new_skb = 1948 struct sk_buff *new_skb =
2022 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 1949 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
2023 if (new_skb) { 1950 if (new_skb) {
@@ -2042,27 +1969,24 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
2042 ixgb_rx_checksum(adapter, rx_desc, skb); 1969 ixgb_rx_checksum(adapter, rx_desc, skb);
2043 1970
2044 skb->protocol = eth_type_trans(skb, netdev); 1971 skb->protocol = eth_type_trans(skb, netdev);
2045#ifdef CONFIG_IXGB_NAPI 1972 if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2046 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2047 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 1973 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2048 le16_to_cpu(rx_desc->special)); 1974 le16_to_cpu(rx_desc->special));
2049 } else { 1975 } else {
2050 netif_receive_skb(skb); 1976 netif_receive_skb(skb);
2051 } 1977 }
2052#else /* CONFIG_IXGB_NAPI */
2053 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2054 vlan_hwaccel_rx(skb, adapter->vlgrp,
2055 le16_to_cpu(rx_desc->special));
2056 } else {
2057 netif_rx(skb);
2058 }
2059#endif /* CONFIG_IXGB_NAPI */
2060 netdev->last_rx = jiffies; 1978 netdev->last_rx = jiffies;
2061 1979
2062rxdesc_done: 1980rxdesc_done:
2063 /* clean up descriptor, might be written over by hw */ 1981 /* clean up descriptor, might be written over by hw */
2064 rx_desc->status = 0; 1982 rx_desc->status = 0;
2065 1983
1984 /* return some buffers to hardware, one at a time is too slow */
1985 if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
1986 ixgb_alloc_rx_buffers(adapter, cleaned_count);
1987 cleaned_count = 0;
1988 }
1989
2066 /* use prefetched values */ 1990 /* use prefetched values */
2067 rx_desc = next_rxd; 1991 rx_desc = next_rxd;
2068 buffer_info = next_buffer; 1992 buffer_info = next_buffer;
@@ -2070,7 +1994,9 @@ rxdesc_done:
2070 1994
2071 rx_ring->next_to_clean = i; 1995 rx_ring->next_to_clean = i;
2072 1996
2073 ixgb_alloc_rx_buffers(adapter); 1997 cleaned_count = IXGB_DESC_UNUSED(rx_ring);
1998 if (cleaned_count)
1999 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2074 2000
2075 return cleaned; 2001 return cleaned;
2076} 2002}
@@ -2081,7 +2007,7 @@ rxdesc_done:
2081 **/ 2007 **/
2082 2008
2083static void 2009static void
2084ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) 2010ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2085{ 2011{
2086 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 2012 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2087 struct net_device *netdev = adapter->netdev; 2013 struct net_device *netdev = adapter->netdev;
@@ -2098,7 +2024,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2098 2024
2099 2025
2100 /* leave three descriptors unused */ 2026 /* leave three descriptors unused */
2101 while(--cleancount > 2) { 2027 while (--cleancount > 2 && cleaned_count--) {
2102 /* recycle! its good for you */ 2028 /* recycle! its good for you */
2103 skb = buffer_info->skb; 2029 skb = buffer_info->skb;
2104 if (skb) { 2030 if (skb) {
@@ -2131,12 +2057,12 @@ map_skb:
2131 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2057 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2132 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 2058 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2133 /* guarantee DD bit not set now before h/w gets descriptor 2059 /* guarantee DD bit not set now before h/w gets descriptor
2134 * this is the rest of the workaround for h/w double 2060 * this is the rest of the workaround for h/w double
2135 * writeback. */ 2061 * writeback. */
2136 rx_desc->status = 0; 2062 rx_desc->status = 0;
2137 2063
2138 2064
2139 if(++i == rx_ring->count) i = 0; 2065 if (++i == rx_ring->count) i = 0;
2140 buffer_info = &rx_ring->buffer_info[i]; 2066 buffer_info = &rx_ring->buffer_info[i];
2141 } 2067 }
2142 2068
@@ -2156,7 +2082,7 @@ map_skb:
2156 2082
2157/** 2083/**
2158 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. 2084 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2159 * 2085 *
2160 * @param netdev network interface device structure 2086 * @param netdev network interface device structure
2161 * @param grp indicates to enable or disable tagging/stripping 2087 * @param grp indicates to enable or disable tagging/stripping
2162 **/ 2088 **/
@@ -2169,7 +2095,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2169 ixgb_irq_disable(adapter); 2095 ixgb_irq_disable(adapter);
2170 adapter->vlgrp = grp; 2096 adapter->vlgrp = grp;
2171 2097
2172 if(grp) { 2098 if (grp) {
2173 /* enable VLAN tag insert/strip */ 2099 /* enable VLAN tag insert/strip */
2174 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2100 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2175 ctrl |= IXGB_CTRL0_VME; 2101 ctrl |= IXGB_CTRL0_VME;
@@ -2241,10 +2167,10 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2241{ 2167{
2242 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2168 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2243 2169
2244 if(adapter->vlgrp) { 2170 if (adapter->vlgrp) {
2245 u16 vid; 2171 u16 vid;
2246 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2172 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2247 if(!vlan_group_get_device(adapter->vlgrp, vid)) 2173 if (!vlan_group_get_device(adapter->vlgrp, vid))
2248 continue; 2174 continue;
2249 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2175 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2250 } 2176 }
@@ -2276,13 +2202,13 @@ static void ixgb_netpoll(struct net_device *dev)
2276 * This callback is called by the PCI subsystem whenever 2202 * This callback is called by the PCI subsystem whenever
2277 * a PCI bus error is detected. 2203 * a PCI bus error is detected.
2278 */ 2204 */
2279static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 2205static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2280 enum pci_channel_state state) 2206 enum pci_channel_state state)
2281{ 2207{
2282 struct net_device *netdev = pci_get_drvdata(pdev); 2208 struct net_device *netdev = pci_get_drvdata(pdev);
2283 struct ixgb_adapter *adapter = netdev_priv(netdev); 2209 struct ixgb_adapter *adapter = netdev_priv(netdev);
2284 2210
2285 if(netif_running(netdev)) 2211 if (netif_running(netdev))
2286 ixgb_down(adapter, true); 2212 ixgb_down(adapter, true);
2287 2213
2288 pci_disable_device(pdev); 2214 pci_disable_device(pdev);
@@ -2295,17 +2221,17 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
2295 * ixgb_io_slot_reset - called after the pci bus has been reset. 2221 * ixgb_io_slot_reset - called after the pci bus has been reset.
2296 * @pdev pointer to pci device with error 2222 * @pdev pointer to pci device with error
2297 * 2223 *
2298 * This callback is called after the PCI buss has been reset. 2224 * This callback is called after the PCI bus has been reset.
2299 * Basically, this tries to restart the card from scratch. 2225 * Basically, this tries to restart the card from scratch.
2300 * This is a shortened version of the device probe/discovery code, 2226 * This is a shortened version of the device probe/discovery code,
2301 * it resembles the first-half of the ixgb_probe() routine. 2227 * it resembles the first-half of the ixgb_probe() routine.
2302 */ 2228 */
2303static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) 2229static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2304{ 2230{
2305 struct net_device *netdev = pci_get_drvdata(pdev); 2231 struct net_device *netdev = pci_get_drvdata(pdev);
2306 struct ixgb_adapter *adapter = netdev_priv(netdev); 2232 struct ixgb_adapter *adapter = netdev_priv(netdev);
2307 2233
2308 if(pci_enable_device(pdev)) { 2234 if (pci_enable_device(pdev)) {
2309 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); 2235 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
2310 return PCI_ERS_RESULT_DISCONNECT; 2236 return PCI_ERS_RESULT_DISCONNECT;
2311 } 2237 }
@@ -2321,14 +2247,14 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
2321 ixgb_reset(adapter); 2247 ixgb_reset(adapter);
2322 2248
2323 /* Make sure the EEPROM is good */ 2249 /* Make sure the EEPROM is good */
2324 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { 2250 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2325 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); 2251 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
2326 return PCI_ERS_RESULT_DISCONNECT; 2252 return PCI_ERS_RESULT_DISCONNECT;
2327 } 2253 }
2328 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 2254 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 2255 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2330 2256
2331 if(!is_valid_ether_addr(netdev->perm_addr)) { 2257 if (!is_valid_ether_addr(netdev->perm_addr)) {
2332 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); 2258 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
2333 return PCI_ERS_RESULT_DISCONNECT; 2259 return PCI_ERS_RESULT_DISCONNECT;
2334 } 2260 }
@@ -2344,15 +2270,15 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
2344 * normal operation. Implementation resembles the second-half 2270 * normal operation. Implementation resembles the second-half
2345 * of the ixgb_probe() routine. 2271 * of the ixgb_probe() routine.
2346 */ 2272 */
2347static void ixgb_io_resume (struct pci_dev *pdev) 2273static void ixgb_io_resume(struct pci_dev *pdev)
2348{ 2274{
2349 struct net_device *netdev = pci_get_drvdata(pdev); 2275 struct net_device *netdev = pci_get_drvdata(pdev);
2350 struct ixgb_adapter *adapter = netdev_priv(netdev); 2276 struct ixgb_adapter *adapter = netdev_priv(netdev);
2351 2277
2352 pci_set_master(pdev); 2278 pci_set_master(pdev);
2353 2279
2354 if(netif_running(netdev)) { 2280 if (netif_running(netdev)) {
2355 if(ixgb_up(adapter)) { 2281 if (ixgb_up(adapter)) {
2356 printk ("ixgb: can't bring device back up after reset\n"); 2282 printk ("ixgb: can't bring device back up after reset\n");
2357 return; 2283 return;
2358 } 2284 }
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 4be1b273e1b8..d92e72bd627a 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -40,7 +40,7 @@
40#include <linux/sched.h> 40#include <linux/sched.h>
41 41
42#undef ASSERT 42#undef ASSERT
43#define ASSERT(x) if(!(x)) BUG() 43#define ASSERT(x) if (!(x)) BUG()
44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) 44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
45 45
46#ifdef DBG 46#ifdef DBG
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 865d14d6e5a7..af35e1ddadd6 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/10GbE Linux driver 3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -136,7 +136,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
136/* Flow control request timeout (how long to pause the link partner's tx) 136/* Flow control request timeout (how long to pause the link partner's tx)
137 * (PAP 15:0) 137 * (PAP 15:0)
138 * 138 *
139 * Valid Range: 1 - 65535 139 * Valid Range: 1 - 65535
140 * 140 *
141 * Default Value: 65535 (0xffff) (we'll send an xon if we recover) 141 * Default Value: 65535 (0xffff) (we'll send an xon if we recover)
142 */ 142 */
@@ -200,7 +200,7 @@ struct ixgb_option {
200static int __devinit 200static int __devinit
201ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) 201ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
202{ 202{
203 if(*value == OPTION_UNSET) { 203 if (*value == OPTION_UNSET) {
204 *value = opt->def; 204 *value = opt->def;
205 return 0; 205 return 0;
206 } 206 }
@@ -217,7 +217,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
217 } 217 }
218 break; 218 break;
219 case range_option: 219 case range_option:
220 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 220 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
221 printk(KERN_INFO "%s set to %i\n", opt->name, *value); 221 printk(KERN_INFO "%s set to %i\n", opt->name, *value);
222 return 0; 222 return 0;
223 } 223 }
@@ -226,10 +226,10 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
226 int i; 226 int i;
227 struct ixgb_opt_list *ent; 227 struct ixgb_opt_list *ent;
228 228
229 for(i = 0; i < opt->arg.l.nr; i++) { 229 for (i = 0; i < opt->arg.l.nr; i++) {
230 ent = &opt->arg.l.p[i]; 230 ent = &opt->arg.l.p[i];
231 if(*value == ent->i) { 231 if (*value == ent->i) {
232 if(ent->str[0] != '\0') 232 if (ent->str[0] != '\0')
233 printk(KERN_INFO "%s\n", ent->str); 233 printk(KERN_INFO "%s\n", ent->str);
234 return 0; 234 return 0;
235 } 235 }
@@ -260,7 +260,7 @@ void __devinit
260ixgb_check_options(struct ixgb_adapter *adapter) 260ixgb_check_options(struct ixgb_adapter *adapter)
261{ 261{
262 int bd = adapter->bd_number; 262 int bd = adapter->bd_number;
263 if(bd >= IXGB_MAX_NIC) { 263 if (bd >= IXGB_MAX_NIC) {
264 printk(KERN_NOTICE 264 printk(KERN_NOTICE
265 "Warning: no configuration for board #%i\n", bd); 265 "Warning: no configuration for board #%i\n", bd);
266 printk(KERN_NOTICE "Using defaults for all values\n"); 266 printk(KERN_NOTICE "Using defaults for all values\n");
@@ -277,7 +277,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
277 }; 277 };
278 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 278 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
279 279
280 if(num_TxDescriptors > bd) { 280 if (num_TxDescriptors > bd) {
281 tx_ring->count = TxDescriptors[bd]; 281 tx_ring->count = TxDescriptors[bd];
282 ixgb_validate_option(&tx_ring->count, &opt); 282 ixgb_validate_option(&tx_ring->count, &opt);
283 } else { 283 } else {
@@ -296,7 +296,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
296 }; 296 };
297 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 297 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
298 298
299 if(num_RxDescriptors > bd) { 299 if (num_RxDescriptors > bd) {
300 rx_ring->count = RxDescriptors[bd]; 300 rx_ring->count = RxDescriptors[bd];
301 ixgb_validate_option(&rx_ring->count, &opt); 301 ixgb_validate_option(&rx_ring->count, &opt);
302 } else { 302 } else {
@@ -312,7 +312,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
312 .def = OPTION_ENABLED 312 .def = OPTION_ENABLED
313 }; 313 };
314 314
315 if(num_XsumRX > bd) { 315 if (num_XsumRX > bd) {
316 unsigned int rx_csum = XsumRX[bd]; 316 unsigned int rx_csum = XsumRX[bd];
317 ixgb_validate_option(&rx_csum, &opt); 317 ixgb_validate_option(&rx_csum, &opt);
318 adapter->rx_csum = rx_csum; 318 adapter->rx_csum = rx_csum;
@@ -338,7 +338,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
338 .p = fc_list }} 338 .p = fc_list }}
339 }; 339 };
340 340
341 if(num_FlowControl > bd) { 341 if (num_FlowControl > bd) {
342 unsigned int fc = FlowControl[bd]; 342 unsigned int fc = FlowControl[bd];
343 ixgb_validate_option(&fc, &opt); 343 ixgb_validate_option(&fc, &opt);
344 adapter->hw.fc.type = fc; 344 adapter->hw.fc.type = fc;
@@ -356,14 +356,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
356 .max = MAX_FCRTH}} 356 .max = MAX_FCRTH}}
357 }; 357 };
358 358
359 if(num_RxFCHighThresh > bd) { 359 if (num_RxFCHighThresh > bd) {
360 adapter->hw.fc.high_water = RxFCHighThresh[bd]; 360 adapter->hw.fc.high_water = RxFCHighThresh[bd];
361 ixgb_validate_option(&adapter->hw.fc.high_water, &opt); 361 ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
362 } else { 362 } else {
363 adapter->hw.fc.high_water = opt.def; 363 adapter->hw.fc.high_water = opt.def;
364 } 364 }
365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
366 printk (KERN_INFO 366 printk(KERN_INFO
367 "Ignoring RxFCHighThresh when no RxFC\n"); 367 "Ignoring RxFCHighThresh when no RxFC\n");
368 } 368 }
369 { /* Receive Flow Control Low Threshold */ 369 { /* Receive Flow Control Low Threshold */
@@ -376,14 +376,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
376 .max = MAX_FCRTL}} 376 .max = MAX_FCRTL}}
377 }; 377 };
378 378
379 if(num_RxFCLowThresh > bd) { 379 if (num_RxFCLowThresh > bd) {
380 adapter->hw.fc.low_water = RxFCLowThresh[bd]; 380 adapter->hw.fc.low_water = RxFCLowThresh[bd];
381 ixgb_validate_option(&adapter->hw.fc.low_water, &opt); 381 ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
382 } else { 382 } else {
383 adapter->hw.fc.low_water = opt.def; 383 adapter->hw.fc.low_water = opt.def;
384 } 384 }
385 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 385 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
386 printk (KERN_INFO 386 printk(KERN_INFO
387 "Ignoring RxFCLowThresh when no RxFC\n"); 387 "Ignoring RxFCLowThresh when no RxFC\n");
388 } 388 }
389 { /* Flow Control Pause Time Request*/ 389 { /* Flow Control Pause Time Request*/
@@ -396,7 +396,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
396 .max = MAX_FCPAUSE}} 396 .max = MAX_FCPAUSE}}
397 }; 397 };
398 398
399 if(num_FCReqTimeout > bd) { 399 if (num_FCReqTimeout > bd) {
400 unsigned int pause_time = FCReqTimeout[bd]; 400 unsigned int pause_time = FCReqTimeout[bd];
401 ixgb_validate_option(&pause_time, &opt); 401 ixgb_validate_option(&pause_time, &opt);
402 adapter->hw.fc.pause_time = pause_time; 402 adapter->hw.fc.pause_time = pause_time;
@@ -404,7 +404,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
404 adapter->hw.fc.pause_time = opt.def; 404 adapter->hw.fc.pause_time = opt.def;
405 } 405 }
406 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 406 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
407 printk (KERN_INFO 407 printk(KERN_INFO
408 "Ignoring FCReqTimeout when no RxFC\n"); 408 "Ignoring FCReqTimeout when no RxFC\n");
409 } 409 }
410 /* high low and spacing check for rx flow control thresholds */ 410 /* high low and spacing check for rx flow control thresholds */
@@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
412 /* high must be greater than low */ 412 /* high must be greater than low */
413 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { 413 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
414 /* set defaults */ 414 /* set defaults */
415 printk (KERN_INFO 415 printk(KERN_INFO
416 "RxFCHighThresh must be >= (RxFCLowThresh + 8), " 416 "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
417 "Using Defaults\n"); 417 "Using Defaults\n");
418 adapter->hw.fc.high_water = DEFAULT_FCRTH; 418 adapter->hw.fc.high_water = DEFAULT_FCRTH;
@@ -429,7 +429,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
429 .max = MAX_RDTR}} 429 .max = MAX_RDTR}}
430 }; 430 };
431 431
432 if(num_RxIntDelay > bd) { 432 if (num_RxIntDelay > bd) {
433 adapter->rx_int_delay = RxIntDelay[bd]; 433 adapter->rx_int_delay = RxIntDelay[bd];
434 ixgb_validate_option(&adapter->rx_int_delay, &opt); 434 ixgb_validate_option(&adapter->rx_int_delay, &opt);
435 } else { 435 } else {
@@ -446,7 +446,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
446 .max = MAX_TIDV}} 446 .max = MAX_TIDV}}
447 }; 447 };
448 448
449 if(num_TxIntDelay > bd) { 449 if (num_TxIntDelay > bd) {
450 adapter->tx_int_delay = TxIntDelay[bd]; 450 adapter->tx_int_delay = TxIntDelay[bd];
451 ixgb_validate_option(&adapter->tx_int_delay, &opt); 451 ixgb_validate_option(&adapter->tx_int_delay, &opt);
452 } else { 452 } else {
@@ -462,7 +462,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
462 .def = OPTION_ENABLED 462 .def = OPTION_ENABLED
463 }; 463 };
464 464
465 if(num_IntDelayEnable > bd) { 465 if (num_IntDelayEnable > bd) {
466 unsigned int ide = IntDelayEnable[bd]; 466 unsigned int ide = IntDelayEnable[bd];
467 ixgb_validate_option(&ide, &opt); 467 ixgb_validate_option(&ide, &opt);
468 adapter->tx_int_delay_enable = ide; 468 adapter->tx_int_delay_enable = ide;
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 2747b1f89ffe..c01b78013ddc 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -177,6 +177,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
177 177
178 return bus; 178 return bus;
179} 179}
180EXPORT_SYMBOL(alloc_mdio_bitbang);
180 181
181void free_mdio_bitbang(struct mii_bus *bus) 182void free_mdio_bitbang(struct mii_bus *bus)
182{ 183{
@@ -185,5 +186,6 @@ void free_mdio_bitbang(struct mii_bus *bus)
185 module_put(ctrl->ops->owner); 186 module_put(ctrl->ops->owner);
186 kfree(bus); 187 kfree(bus);
187} 188}
189EXPORT_SYMBOL(free_mdio_bitbang);
188 190
189MODULE_LICENSE("GPL"); 191MODULE_LICENSE("GPL");
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 51a91154125d..517425dcb77c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -86,7 +86,7 @@
86#include "s2io.h" 86#include "s2io.h"
87#include "s2io-regs.h" 87#include "s2io-regs.h"
88 88
89#define DRV_VERSION "2.0.26.24" 89#define DRV_VERSION "2.0.26.25"
90 90
91/* S2io Driver name & version. */ 91/* S2io Driver name & version. */
92static char s2io_driver_name[] = "Neterion"; 92static char s2io_driver_name[] = "Neterion";
@@ -1891,8 +1891,6 @@ static int init_nic(struct s2io_nic *nic)
1891 1891
1892static int s2io_link_fault_indication(struct s2io_nic *nic) 1892static int s2io_link_fault_indication(struct s2io_nic *nic)
1893{ 1893{
1894 if (nic->config.intr_type != INTA)
1895 return MAC_RMAC_ERR_TIMER;
1896 if (nic->device_type == XFRAME_II_DEVICE) 1894 if (nic->device_type == XFRAME_II_DEVICE)
1897 return LINK_UP_DOWN_INTERRUPT; 1895 return LINK_UP_DOWN_INTERRUPT;
1898 else 1896 else
@@ -1925,7 +1923,9 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1925{ 1923{
1926 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1924 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1927 register u64 gen_int_mask = 0; 1925 register u64 gen_int_mask = 0;
1926 u64 interruptible;
1928 1927
1928 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1929 if (mask & TX_DMA_INTR) { 1929 if (mask & TX_DMA_INTR) {
1930 1930
1931 gen_int_mask |= TXDMA_INT_M; 1931 gen_int_mask |= TXDMA_INT_M;
@@ -2015,10 +2015,12 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
2015 gen_int_mask |= RXMAC_INT_M; 2015 gen_int_mask |= RXMAC_INT_M;
2016 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, 2016 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017 &bar0->mac_int_mask); 2017 &bar0->mac_int_mask);
2018 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | 2018 interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | 2019 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020 RMAC_DOUBLE_ECC_ERR | 2020 RMAC_DOUBLE_ECC_ERR;
2021 RMAC_LINK_STATE_CHANGE_INT, 2021 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2022 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2023 do_s2io_write_bits(interruptible,
2022 flag, &bar0->mac_rmac_err_mask); 2024 flag, &bar0->mac_rmac_err_mask);
2023 } 2025 }
2024 2026
@@ -2501,6 +2503,9 @@ static void stop_nic(struct s2io_nic *nic)
2501/** 2503/**
2502 * fill_rx_buffers - Allocates the Rx side skbs 2504 * fill_rx_buffers - Allocates the Rx side skbs
2503 * @ring_info: per ring structure 2505 * @ring_info: per ring structure
2506 * @from_card_up: If this is true, we will map the buffer to get
2507 * the dma address for buf0 and buf1 to give it to the card.
2508 * Else we will sync the already mapped buffer to give it to the card.
2504 * Description: 2509 * Description:
2505 * The function allocates Rx side skbs and puts the physical 2510 * The function allocates Rx side skbs and puts the physical
2506 * address of these buffers into the RxD buffer pointers, so that the NIC 2511 * address of these buffers into the RxD buffer pointers, so that the NIC
@@ -2518,7 +2523,7 @@ static void stop_nic(struct s2io_nic *nic)
2518 * SUCCESS on success or an appropriate -ve value on failure. 2523 * SUCCESS on success or an appropriate -ve value on failure.
2519 */ 2524 */
2520 2525
2521static int fill_rx_buffers(struct ring_info *ring) 2526static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2522{ 2527{
2523 struct sk_buff *skb; 2528 struct sk_buff *skb;
2524 struct RxD_t *rxdp; 2529 struct RxD_t *rxdp;
@@ -2637,17 +2642,16 @@ static int fill_rx_buffers(struct ring_info *ring)
2637 skb->data = (void *) (unsigned long)tmp; 2642 skb->data = (void *) (unsigned long)tmp;
2638 skb_reset_tail_pointer(skb); 2643 skb_reset_tail_pointer(skb);
2639 2644
2640 /* AK: check is wrong. 0 can be valid dma address */ 2645 if (from_card_up) {
2641 if (!(rxdp3->Buffer0_ptr))
2642 rxdp3->Buffer0_ptr = 2646 rxdp3->Buffer0_ptr =
2643 pci_map_single(ring->pdev, ba->ba_0, 2647 pci_map_single(ring->pdev, ba->ba_0,
2644 BUF0_LEN, PCI_DMA_FROMDEVICE); 2648 BUF0_LEN, PCI_DMA_FROMDEVICE);
2645 else 2649 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2650 goto pci_map_failed;
2651 } else
2646 pci_dma_sync_single_for_device(ring->pdev, 2652 pci_dma_sync_single_for_device(ring->pdev,
2647 (dma_addr_t) rxdp3->Buffer0_ptr, 2653 (dma_addr_t) rxdp3->Buffer0_ptr,
2648 BUF0_LEN, PCI_DMA_FROMDEVICE); 2654 BUF0_LEN, PCI_DMA_FROMDEVICE);
2649 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2650 goto pci_map_failed;
2651 2655
2652 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2656 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2653 if (ring->rxd_mode == RXD_MODE_3B) { 2657 if (ring->rxd_mode == RXD_MODE_3B) {
@@ -2664,21 +2668,22 @@ static int fill_rx_buffers(struct ring_info *ring)
2664 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) 2668 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
2665 goto pci_map_failed; 2669 goto pci_map_failed;
2666 2670
2667 /* AK: check is wrong */ 2671 if (from_card_up) {
2668 if (!rxdp3->Buffer1_ptr)
2669 rxdp3->Buffer1_ptr = 2672 rxdp3->Buffer1_ptr =
2670 pci_map_single(ring->pdev, 2673 pci_map_single(ring->pdev,
2671 ba->ba_1, BUF1_LEN, 2674 ba->ba_1, BUF1_LEN,
2672 PCI_DMA_FROMDEVICE); 2675 PCI_DMA_FROMDEVICE);
2673 2676
2674 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { 2677 if (pci_dma_mapping_error
2675 pci_unmap_single 2678 (rxdp3->Buffer1_ptr)) {
2676 (ring->pdev, 2679 pci_unmap_single
2677 (dma_addr_t)(unsigned long) 2680 (ring->pdev,
2678 skb->data, 2681 (dma_addr_t)(unsigned long)
2679 ring->mtu + 4, 2682 skb->data,
2680 PCI_DMA_FROMDEVICE); 2683 ring->mtu + 4,
2681 goto pci_map_failed; 2684 PCI_DMA_FROMDEVICE);
2685 goto pci_map_failed;
2686 }
2682 } 2687 }
2683 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2688 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2684 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2689 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
@@ -2813,7 +2818,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
2813 2818
2814static int s2io_chk_rx_buffers(struct ring_info *ring) 2819static int s2io_chk_rx_buffers(struct ring_info *ring)
2815{ 2820{
2816 if (fill_rx_buffers(ring) == -ENOMEM) { 2821 if (fill_rx_buffers(ring, 0) == -ENOMEM) {
2817 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 2822 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2818 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 2823 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2819 } 2824 }
@@ -2944,7 +2949,7 @@ static void s2io_netpoll(struct net_device *dev)
2944 rx_intr_handler(&mac_control->rings[i], 0); 2949 rx_intr_handler(&mac_control->rings[i], 0);
2945 2950
2946 for (i = 0; i < config->rx_ring_num; i++) { 2951 for (i = 0; i < config->rx_ring_num; i++) {
2947 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2952 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
2948 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2953 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2949 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); 2954 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2950 break; 2955 break;
@@ -4373,18 +4378,24 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4373 /* Nothing much can be done. Get out */ 4378 /* Nothing much can be done. Get out */
4374 return IRQ_HANDLED; 4379 return IRQ_HANDLED;
4375 4380
4376 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); 4381 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4382 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4377 4383
4378 if (reason & GEN_INTR_TXTRAFFIC) 4384 if (reason & GEN_INTR_TXPIC)
4379 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); 4385 s2io_txpic_intr_handle(sp);
4380 4386
4381 for (i = 0; i < config->tx_fifo_num; i++) 4387 if (reason & GEN_INTR_TXTRAFFIC)
4382 tx_intr_handler(&fifos[i]); 4388 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4383 4389
4384 writeq(sp->general_int_mask, &bar0->general_int_mask); 4390 for (i = 0; i < config->tx_fifo_num; i++)
4385 readl(&bar0->general_int_status); 4391 tx_intr_handler(&fifos[i]);
4386 4392
4387 return IRQ_HANDLED; 4393 writeq(sp->general_int_mask, &bar0->general_int_mask);
4394 readl(&bar0->general_int_status);
4395 return IRQ_HANDLED;
4396 }
4397 /* The interrupt was not raised by us */
4398 return IRQ_NONE;
4388} 4399}
4389 4400
4390static void s2io_txpic_intr_handle(struct s2io_nic *sp) 4401static void s2io_txpic_intr_handle(struct s2io_nic *sp)
@@ -7112,6 +7123,9 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7112 7123
7113 s2io_rem_isr(sp); 7124 s2io_rem_isr(sp);
7114 7125
7126 /* stop the tx queue, indicate link down */
7127 s2io_link(sp, LINK_DOWN);
7128
7115 /* Check if the device is Quiescent and then Reset the NIC */ 7129 /* Check if the device is Quiescent and then Reset the NIC */
7116 while(do_io) { 7130 while(do_io) {
7117 /* As per the HW requirement we need to replenish the 7131 /* As per the HW requirement we need to replenish the
@@ -7183,7 +7197,7 @@ static int s2io_card_up(struct s2io_nic * sp)
7183 7197
7184 for (i = 0; i < config->rx_ring_num; i++) { 7198 for (i = 0; i < config->rx_ring_num; i++) {
7185 mac_control->rings[i].mtu = dev->mtu; 7199 mac_control->rings[i].mtu = dev->mtu;
7186 ret = fill_rx_buffers(&mac_control->rings[i]); 7200 ret = fill_rx_buffers(&mac_control->rings[i], 1);
7187 if (ret) { 7201 if (ret) {
7188 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7202 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7189 dev->name); 7203 dev->name);
@@ -7244,17 +7258,19 @@ static int s2io_card_up(struct s2io_nic * sp)
7244 7258
7245 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); 7259 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7246 7260
7261 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7262
7247 /* Enable select interrupts */ 7263 /* Enable select interrupts */
7248 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7264 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7249 if (sp->config.intr_type != INTA) 7265 if (sp->config.intr_type != INTA) {
7250 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); 7266 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7251 else { 7267 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7268 } else {
7252 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 7269 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7253 interruptible |= TX_PIC_INTR; 7270 interruptible |= TX_PIC_INTR;
7254 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); 7271 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7255 } 7272 }
7256 7273
7257 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7258 return 0; 7274 return 0;
7259} 7275}
7260 7276
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 483b17c34ae8..6722a2f7d091 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1107,6 +1107,7 @@ static int init_shared_mem(struct s2io_nic *sp);
1107static void free_shared_mem(struct s2io_nic *sp); 1107static void free_shared_mem(struct s2io_nic *sp);
1108static int init_nic(struct s2io_nic *nic); 1108static int init_nic(struct s2io_nic *nic);
1109static int rx_intr_handler(struct ring_info *ring_data, int budget); 1109static int rx_intr_handler(struct ring_info *ring_data, int budget);
1110static void s2io_txpic_intr_handle(struct s2io_nic *sp);
1110static void tx_intr_handler(struct fifo_info *fifo_data); 1111static void tx_intr_handler(struct fifo_info *fifo_data);
1111static void s2io_handle_errors(void * dev_id); 1112static void s2io_handle_errors(void * dev_id);
1112 1113
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
deleted file mode 100644
index c65199df8a7f..000000000000
--- a/drivers/net/saa9730.c
+++ /dev/null
@@ -1,1139 +0,0 @@
1/*
2 * Copyright (C) 2000, 2005 MIPS Technologies, Inc. All rights reserved.
3 * Authors: Carsten Langgaard <carstenl@mips.com>
4 * Maciej W. Rozycki <macro@mips.com>
5 * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 * SAA9730 ethernet driver.
21 *
22 * Changes:
23 * Angelo Dell'Aera <buffer@antifork.org> : Conversion to the new PCI API
24 * (pci_driver).
25 * Conversion to spinlocks.
26 * Error handling fixes.
27 */
28
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/delay.h>
32#include <linux/etherdevice.h>
33#include <linux/module.h>
34#include <linux/skbuff.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/types.h>
38
39#include <asm/addrspace.h>
40#include <asm/io.h>
41
42#include <asm/mips-boards/prom.h>
43
44#include "saa9730.h"
45
46#ifdef LAN_SAA9730_DEBUG
47int lan_saa9730_debug = LAN_SAA9730_DEBUG;
48#else
49int lan_saa9730_debug;
50#endif
51
52#define DRV_MODULE_NAME "saa9730"
53
54static struct pci_device_id saa9730_pci_tbl[] = {
55 { PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA9730,
56 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
57 { 0, }
58};
59
60MODULE_DEVICE_TABLE(pci, saa9730_pci_tbl);
61
62/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
63static unsigned int pci_irq_line;
64
65static void evm_saa9730_enable_lan_int(struct lan_saa9730_private *lp)
66{
67 writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT,
68 &lp->evm_saa9730_regs->InterruptBlock1);
69 writel(readl(&lp->evm_saa9730_regs->InterruptStatus1) | EVM_LAN_INT,
70 &lp->evm_saa9730_regs->InterruptStatus1);
71 writel(readl(&lp->evm_saa9730_regs->InterruptEnable1) | EVM_LAN_INT |
72 EVM_MASTER_EN, &lp->evm_saa9730_regs->InterruptEnable1);
73}
74
75static void evm_saa9730_disable_lan_int(struct lan_saa9730_private *lp)
76{
77 writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT,
78 &lp->evm_saa9730_regs->InterruptBlock1);
79 writel(readl(&lp->evm_saa9730_regs->InterruptEnable1) & ~EVM_LAN_INT,
80 &lp->evm_saa9730_regs->InterruptEnable1);
81}
82
83static void evm_saa9730_clear_lan_int(struct lan_saa9730_private *lp)
84{
85 writel(EVM_LAN_INT, &lp->evm_saa9730_regs->InterruptStatus1);
86}
87
88static void evm_saa9730_block_lan_int(struct lan_saa9730_private *lp)
89{
90 writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT,
91 &lp->evm_saa9730_regs->InterruptBlock1);
92}
93
94static void evm_saa9730_unblock_lan_int(struct lan_saa9730_private *lp)
95{
96 writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT,
97 &lp->evm_saa9730_regs->InterruptBlock1);
98}
99
100static void __used show_saa9730_regs(struct net_device *dev)
101{
102 struct lan_saa9730_private *lp = netdev_priv(dev);
103 int i, j;
104
105 printk("TxmBufferA = %p\n", lp->TxmBuffer[0][0]);
106 printk("TxmBufferB = %p\n", lp->TxmBuffer[1][0]);
107 printk("RcvBufferA = %p\n", lp->RcvBuffer[0][0]);
108 printk("RcvBufferB = %p\n", lp->RcvBuffer[1][0]);
109
110 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
111 for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) {
112 printk("TxmBuffer[%d][%d] = %x\n", i, j,
113 le32_to_cpu(*(unsigned int *)
114 lp->TxmBuffer[i][j]));
115 }
116 }
117 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
118 for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) {
119 printk("RcvBuffer[%d][%d] = %x\n", i, j,
120 le32_to_cpu(*(unsigned int *)
121 lp->RcvBuffer[i][j]));
122 }
123 }
124 printk("lp->evm_saa9730_regs->InterruptBlock1 = %x\n",
125 readl(&lp->evm_saa9730_regs->InterruptBlock1));
126 printk("lp->evm_saa9730_regs->InterruptStatus1 = %x\n",
127 readl(&lp->evm_saa9730_regs->InterruptStatus1));
128 printk("lp->evm_saa9730_regs->InterruptEnable1 = %x\n",
129 readl(&lp->evm_saa9730_regs->InterruptEnable1));
130 printk("lp->lan_saa9730_regs->Ok2Use = %x\n",
131 readl(&lp->lan_saa9730_regs->Ok2Use));
132 printk("lp->NextTxmBufferIndex = %x\n", lp->NextTxmBufferIndex);
133 printk("lp->NextTxmPacketIndex = %x\n", lp->NextTxmPacketIndex);
134 printk("lp->PendingTxmBufferIndex = %x\n",
135 lp->PendingTxmBufferIndex);
136 printk("lp->PendingTxmPacketIndex = %x\n",
137 lp->PendingTxmPacketIndex);
138 printk("lp->lan_saa9730_regs->LanDmaCtl = %x\n",
139 readl(&lp->lan_saa9730_regs->LanDmaCtl));
140 printk("lp->lan_saa9730_regs->DmaStatus = %x\n",
141 readl(&lp->lan_saa9730_regs->DmaStatus));
142 printk("lp->lan_saa9730_regs->CamCtl = %x\n",
143 readl(&lp->lan_saa9730_regs->CamCtl));
144 printk("lp->lan_saa9730_regs->TxCtl = %x\n",
145 readl(&lp->lan_saa9730_regs->TxCtl));
146 printk("lp->lan_saa9730_regs->TxStatus = %x\n",
147 readl(&lp->lan_saa9730_regs->TxStatus));
148 printk("lp->lan_saa9730_regs->RxCtl = %x\n",
149 readl(&lp->lan_saa9730_regs->RxCtl));
150 printk("lp->lan_saa9730_regs->RxStatus = %x\n",
151 readl(&lp->lan_saa9730_regs->RxStatus));
152
153 for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) {
154 writel(i, &lp->lan_saa9730_regs->CamAddress);
155 printk("lp->lan_saa9730_regs->CamData = %x\n",
156 readl(&lp->lan_saa9730_regs->CamData));
157 }
158
159 printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets);
160 printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors);
161 printk("dev->stats.tx_aborted_errors = %lx\n",
162 dev->stats.tx_aborted_errors);
163 printk("dev->stats.tx_window_errors = %lx\n",
164 dev->stats.tx_window_errors);
165 printk("dev->stats.tx_carrier_errors = %lx\n",
166 dev->stats.tx_carrier_errors);
167 printk("dev->stats.tx_fifo_errors = %lx\n",
168 dev->stats.tx_fifo_errors);
169 printk("dev->stats.tx_heartbeat_errors = %lx\n",
170 dev->stats.tx_heartbeat_errors);
171 printk("dev->stats.collisions = %lx\n", dev->stats.collisions);
172
173 printk("dev->stats.rx_packets = %lx\n", dev->stats.rx_packets);
174 printk("dev->stats.rx_errors = %lx\n", dev->stats.rx_errors);
175 printk("dev->stats.rx_dropped = %lx\n", dev->stats.rx_dropped);
176 printk("dev->stats.rx_crc_errors = %lx\n", dev->stats.rx_crc_errors);
177 printk("dev->stats.rx_frame_errors = %lx\n",
178 dev->stats.rx_frame_errors);
179 printk("dev->stats.rx_fifo_errors = %lx\n",
180 dev->stats.rx_fifo_errors);
181 printk("dev->stats.rx_length_errors = %lx\n",
182 dev->stats.rx_length_errors);
183
184 printk("lp->lan_saa9730_regs->DebugPCIMasterAddr = %x\n",
185 readl(&lp->lan_saa9730_regs->DebugPCIMasterAddr));
186 printk("lp->lan_saa9730_regs->DebugLanTxStateMachine = %x\n",
187 readl(&lp->lan_saa9730_regs->DebugLanTxStateMachine));
188 printk("lp->lan_saa9730_regs->DebugLanRxStateMachine = %x\n",
189 readl(&lp->lan_saa9730_regs->DebugLanRxStateMachine));
190 printk("lp->lan_saa9730_regs->DebugLanTxFifoPointers = %x\n",
191 readl(&lp->lan_saa9730_regs->DebugLanTxFifoPointers));
192 printk("lp->lan_saa9730_regs->DebugLanRxFifoPointers = %x\n",
193 readl(&lp->lan_saa9730_regs->DebugLanRxFifoPointers));
194 printk("lp->lan_saa9730_regs->DebugLanCtlStateMachine = %x\n",
195 readl(&lp->lan_saa9730_regs->DebugLanCtlStateMachine));
196}
197
198static void lan_saa9730_buffer_init(struct lan_saa9730_private *lp)
199{
200 int i, j;
201
202 /* Init RX buffers */
203 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
204 for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) {
205 *(unsigned int *) lp->RcvBuffer[i][j] =
206 cpu_to_le32(RXSF_READY <<
207 RX_STAT_CTL_OWNER_SHF);
208 }
209 }
210
211 /* Init TX buffers */
212 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
213 for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) {
214 *(unsigned int *) lp->TxmBuffer[i][j] =
215 cpu_to_le32(TXSF_EMPTY <<
216 TX_STAT_CTL_OWNER_SHF);
217 }
218 }
219}
220
221static void lan_saa9730_free_buffers(struct pci_dev *pdev,
222 struct lan_saa9730_private *lp)
223{
224 pci_free_consistent(pdev, lp->buffer_size, lp->buffer_start,
225 lp->dma_addr);
226}
227
228static int lan_saa9730_allocate_buffers(struct pci_dev *pdev,
229 struct lan_saa9730_private *lp)
230{
231 void *Pa;
232 unsigned int i, j, rxoffset, txoffset;
233 int ret;
234
235 /* Initialize buffer space */
236 lp->DmaRcvPackets = LAN_SAA9730_RCV_Q_SIZE;
237 lp->DmaTxmPackets = LAN_SAA9730_TXM_Q_SIZE;
238
239 /* Initialize Rx Buffer Index */
240 lp->NextRcvPacketIndex = 0;
241 lp->NextRcvBufferIndex = 0;
242
243 /* Set current buffer index & next available packet index */
244 lp->NextTxmPacketIndex = 0;
245 lp->NextTxmBufferIndex = 0;
246 lp->PendingTxmPacketIndex = 0;
247 lp->PendingTxmBufferIndex = 0;
248
249 /*
250 * Allocate all RX and TX packets in one chunk.
251 * The Rx and Tx packets must be PACKET_SIZE aligned.
252 */
253 lp->buffer_size = ((LAN_SAA9730_RCV_Q_SIZE + LAN_SAA9730_TXM_Q_SIZE) *
254 LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_BUFFERS) +
255 LAN_SAA9730_PACKET_SIZE;
256 lp->buffer_start = pci_alloc_consistent(pdev, lp->buffer_size,
257 &lp->dma_addr);
258 if (!lp->buffer_start) {
259 ret = -ENOMEM;
260 goto out;
261 }
262
263 Pa = (void *)ALIGN((unsigned long)lp->buffer_start,
264 LAN_SAA9730_PACKET_SIZE);
265
266 rxoffset = Pa - lp->buffer_start;
267
268 /* Init RX buffers */
269 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
270 for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) {
271 *(unsigned int *) Pa =
272 cpu_to_le32(RXSF_READY <<
273 RX_STAT_CTL_OWNER_SHF);
274 lp->RcvBuffer[i][j] = Pa;
275 Pa += LAN_SAA9730_PACKET_SIZE;
276 }
277 }
278
279 txoffset = Pa - lp->buffer_start;
280
281 /* Init TX buffers */
282 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
283 for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) {
284 *(unsigned int *) Pa =
285 cpu_to_le32(TXSF_EMPTY <<
286 TX_STAT_CTL_OWNER_SHF);
287 lp->TxmBuffer[i][j] = Pa;
288 Pa += LAN_SAA9730_PACKET_SIZE;
289 }
290 }
291
292 /*
293 * Set rx buffer A and rx buffer B to point to the first two buffer
294 * spaces.
295 */
296 writel(lp->dma_addr + rxoffset, &lp->lan_saa9730_regs->RxBuffA);
297 writel(lp->dma_addr + rxoffset +
298 LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_RCV_Q_SIZE,
299 &lp->lan_saa9730_regs->RxBuffB);
300
301 /*
302 * Set txm_buf_a and txm_buf_b to point to the first two buffer
303 * space
304 */
305 writel(lp->dma_addr + txoffset,
306 &lp->lan_saa9730_regs->TxBuffA);
307 writel(lp->dma_addr + txoffset +
308 LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_TXM_Q_SIZE,
309 &lp->lan_saa9730_regs->TxBuffB);
310
311 /* Set packet number */
312 writel((lp->DmaRcvPackets << PK_COUNT_RX_A_SHF) |
313 (lp->DmaRcvPackets << PK_COUNT_RX_B_SHF) |
314 (lp->DmaTxmPackets << PK_COUNT_TX_A_SHF) |
315 (lp->DmaTxmPackets << PK_COUNT_TX_B_SHF),
316 &lp->lan_saa9730_regs->PacketCount);
317
318 return 0;
319
320out:
321 return ret;
322}
323
324static int lan_saa9730_cam_load(struct lan_saa9730_private *lp)
325{
326 unsigned int i;
327 unsigned char *NetworkAddress;
328
329 NetworkAddress = (unsigned char *) &lp->PhysicalAddress[0][0];
330
331 for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) {
332 /* First set address to where data is written */
333 writel(i, &lp->lan_saa9730_regs->CamAddress);
334 writel((NetworkAddress[0] << 24) | (NetworkAddress[1] << 16) |
335 (NetworkAddress[2] << 8) | NetworkAddress[3],
336 &lp->lan_saa9730_regs->CamData);
337 NetworkAddress += 4;
338 }
339 return 0;
340}
341
342static int lan_saa9730_cam_init(struct net_device *dev)
343{
344 struct lan_saa9730_private *lp = netdev_priv(dev);
345 unsigned int i;
346
347 /* Copy MAC-address into all entries. */
348 for (i = 0; i < LAN_SAA9730_CAM_ENTRIES; i++) {
349 memcpy((unsigned char *) lp->PhysicalAddress[i],
350 (unsigned char *) dev->dev_addr, 6);
351 }
352
353 return 0;
354}
355
356static int lan_saa9730_mii_init(struct lan_saa9730_private *lp)
357{
358 int i, l;
359
360 /* Check link status, spin here till station is not busy. */
361 i = 0;
362 while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) & MD_CA_BUSY) {
363 i++;
364 if (i > 100) {
365 printk("Error: lan_saa9730_mii_init: timeout\n");
366 return -1;
367 }
368 mdelay(1); /* wait 1 ms. */
369 }
370
371 /* Now set the control and address register. */
372 writel(MD_CA_BUSY | PHY_STATUS | PHY_ADDRESS << MD_CA_PHY_SHF,
373 &lp->lan_saa9730_regs->StationMgmtCtl);
374
375 /* check link status, spin here till station is not busy */
376 i = 0;
377 while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) & MD_CA_BUSY) {
378 i++;
379 if (i > 100) {
380 printk("Error: lan_saa9730_mii_init: timeout\n");
381 return -1;
382 }
383 mdelay(1); /* wait 1 ms. */
384 }
385
386 /* Wait for 1 ms. */
387 mdelay(1);
388
389 /* Check the link status. */
390 if (readl(&lp->lan_saa9730_regs->StationMgmtData) &
391 PHY_STATUS_LINK_UP) {
392 /* Link is up. */
393 return 0;
394 } else {
395 /* Link is down, reset the PHY first. */
396
397 /* set PHY address = 'CONTROL' */
398 writel(PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | PHY_CONTROL,
399 &lp->lan_saa9730_regs->StationMgmtCtl);
400
401 /* Wait for 1 ms. */
402 mdelay(1);
403
404 /* set 'CONTROL' = force reset and renegotiate */
405 writel(PHY_CONTROL_RESET | PHY_CONTROL_AUTO_NEG |
406 PHY_CONTROL_RESTART_AUTO_NEG,
407 &lp->lan_saa9730_regs->StationMgmtData);
408
409 /* Wait for 50 ms. */
410 mdelay(50);
411
412 /* set 'BUSY' to start operation */
413 writel(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR |
414 PHY_CONTROL, &lp->lan_saa9730_regs->StationMgmtCtl);
415
416 /* await completion */
417 i = 0;
418 while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) &
419 MD_CA_BUSY) {
420 i++;
421 if (i > 100) {
422 printk
423 ("Error: lan_saa9730_mii_init: timeout\n");
424 return -1;
425 }
426 mdelay(1); /* wait 1 ms. */
427 }
428
429 /* Wait for 1 ms. */
430 mdelay(1);
431
432 for (l = 0; l < 2; l++) {
433 /* set PHY address = 'STATUS' */
434 writel(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF |
435 PHY_STATUS,
436 &lp->lan_saa9730_regs->StationMgmtCtl);
437
438 /* await completion */
439 i = 0;
440 while (readl(&lp->lan_saa9730_regs->StationMgmtCtl) &
441 MD_CA_BUSY) {
442 i++;
443 if (i > 100) {
444 printk
445 ("Error: lan_saa9730_mii_init: timeout\n");
446 return -1;
447 }
448 mdelay(1); /* wait 1 ms. */
449 }
450
451 /* wait for 3 sec. */
452 mdelay(3000);
453
454 /* check the link status */
455 if (readl(&lp->lan_saa9730_regs->StationMgmtData) &
456 PHY_STATUS_LINK_UP) {
457 /* link is up */
458 break;
459 }
460 }
461 }
462
463 return 0;
464}
465
466static int lan_saa9730_control_init(struct lan_saa9730_private *lp)
467{
468 /* Initialize DMA control register. */
469 writel((LANMB_ANY << DMA_CTL_MAX_XFER_SHF) |
470 (LANEND_LITTLE << DMA_CTL_ENDIAN_SHF) |
471 (LAN_SAA9730_RCV_Q_INT_THRESHOLD << DMA_CTL_RX_INT_COUNT_SHF)
472 | DMA_CTL_RX_INT_TO_EN | DMA_CTL_RX_INT_EN |
473 DMA_CTL_MAC_RX_INT_EN | DMA_CTL_MAC_TX_INT_EN,
474 &lp->lan_saa9730_regs->LanDmaCtl);
475
476 /* Initial MAC control register. */
477 writel((MACCM_MII << MAC_CONTROL_CONN_SHF) | MAC_CONTROL_FULL_DUP,
478 &lp->lan_saa9730_regs->MacCtl);
479
480 /* Initialize CAM control register. */
481 writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_BROAD_ACC,
482 &lp->lan_saa9730_regs->CamCtl);
483
484 /*
485 * Initialize CAM enable register, only turn on first entry, should
486 * contain own addr.
487 */
488 writel(0x0001, &lp->lan_saa9730_regs->CamEnable);
489
490 /* Initialize Tx control register */
491 writel(TX_CTL_EN_COMP, &lp->lan_saa9730_regs->TxCtl);
492
493 /* Initialize Rcv control register */
494 writel(RX_CTL_STRIP_CRC, &lp->lan_saa9730_regs->RxCtl);
495
496 /* Reset DMA engine */
497 writel(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest);
498
499 return 0;
500}
501
502static int lan_saa9730_stop(struct lan_saa9730_private *lp)
503{
504 int i;
505
506 /* Stop DMA first */
507 writel(readl(&lp->lan_saa9730_regs->LanDmaCtl) &
508 ~(DMA_CTL_EN_TX_DMA | DMA_CTL_EN_RX_DMA),
509 &lp->lan_saa9730_regs->LanDmaCtl);
510
511 /* Set the SW Reset bits in DMA and MAC control registers */
512 writel(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest);
513 writel(readl(&lp->lan_saa9730_regs->MacCtl) | MAC_CONTROL_RESET,
514 &lp->lan_saa9730_regs->MacCtl);
515
516 /*
517 * Wait for MAC reset to have finished. The reset bit is auto cleared
518 * when the reset is done.
519 */
520 i = 0;
521 while (readl(&lp->lan_saa9730_regs->MacCtl) & MAC_CONTROL_RESET) {
522 i++;
523 if (i > 100) {
524 printk
525 ("Error: lan_sa9730_stop: MAC reset timeout\n");
526 return -1;
527 }
528 mdelay(1); /* wait 1 ms. */
529 }
530
531 return 0;
532}
533
534static int lan_saa9730_dma_init(struct lan_saa9730_private *lp)
535{
536 /* Stop lan controller. */
537 lan_saa9730_stop(lp);
538
539 writel(LAN_SAA9730_DEFAULT_TIME_OUT_CNT,
540 &lp->lan_saa9730_regs->Timeout);
541
542 return 0;
543}
544
545static int lan_saa9730_start(struct lan_saa9730_private *lp)
546{
547 lan_saa9730_buffer_init(lp);
548
549 /* Initialize Rx Buffer Index */
550 lp->NextRcvPacketIndex = 0;
551 lp->NextRcvBufferIndex = 0;
552
553 /* Set current buffer index & next available packet index */
554 lp->NextTxmPacketIndex = 0;
555 lp->NextTxmBufferIndex = 0;
556 lp->PendingTxmPacketIndex = 0;
557 lp->PendingTxmBufferIndex = 0;
558
559 writel(readl(&lp->lan_saa9730_regs->LanDmaCtl) | DMA_CTL_EN_TX_DMA |
560 DMA_CTL_EN_RX_DMA, &lp->lan_saa9730_regs->LanDmaCtl);
561
562 /* For Tx, turn on MAC then DMA */
563 writel(readl(&lp->lan_saa9730_regs->TxCtl) | TX_CTL_TX_EN,
564 &lp->lan_saa9730_regs->TxCtl);
565
566 /* For Rx, turn on DMA then MAC */
567 writel(readl(&lp->lan_saa9730_regs->RxCtl) | RX_CTL_RX_EN,
568 &lp->lan_saa9730_regs->RxCtl);
569
570 /* Set Ok2Use to let hardware own the buffers. */
571 writel(OK2USE_RX_A | OK2USE_RX_B, &lp->lan_saa9730_regs->Ok2Use);
572
573 return 0;
574}
575
576static int lan_saa9730_restart(struct lan_saa9730_private *lp)
577{
578 lan_saa9730_stop(lp);
579 lan_saa9730_start(lp);
580
581 return 0;
582}
583
584static int lan_saa9730_tx(struct net_device *dev)
585{
586 struct lan_saa9730_private *lp = netdev_priv(dev);
587 unsigned int *pPacket;
588 unsigned int tx_status;
589
590 if (lan_saa9730_debug > 5)
591 printk("lan_saa9730_tx interrupt\n");
592
593 /* Clear interrupt. */
594 writel(DMA_STATUS_MAC_TX_INT, &lp->lan_saa9730_regs->DmaStatus);
595
596 while (1) {
597 pPacket = lp->TxmBuffer[lp->PendingTxmBufferIndex]
598 [lp->PendingTxmPacketIndex];
599
600 /* Get status of first packet transmitted. */
601 tx_status = le32_to_cpu(*pPacket);
602
603 /* Check ownership. */
604 if ((tx_status & TX_STAT_CTL_OWNER_MSK) !=
605 (TXSF_HWDONE << TX_STAT_CTL_OWNER_SHF)) break;
606
607 /* Check for error. */
608 if (tx_status & TX_STAT_CTL_ERROR_MSK) {
609 if (lan_saa9730_debug > 1)
610 printk("lan_saa9730_tx: tx error = %x\n",
611 tx_status);
612
613 dev->stats.tx_errors++;
614 if (tx_status &
615 (TX_STATUS_EX_COLL << TX_STAT_CTL_STATUS_SHF))
616 dev->stats.tx_aborted_errors++;
617 if (tx_status &
618 (TX_STATUS_LATE_COLL << TX_STAT_CTL_STATUS_SHF))
619 dev->stats.tx_window_errors++;
620 if (tx_status &
621 (TX_STATUS_L_CARR << TX_STAT_CTL_STATUS_SHF))
622 dev->stats.tx_carrier_errors++;
623 if (tx_status &
624 (TX_STATUS_UNDER << TX_STAT_CTL_STATUS_SHF))
625 dev->stats.tx_fifo_errors++;
626 if (tx_status &
627 (TX_STATUS_SQ_ERR << TX_STAT_CTL_STATUS_SHF))
628 dev->stats.tx_heartbeat_errors++;
629
630 dev->stats.collisions +=
631 tx_status & TX_STATUS_TX_COLL_MSK;
632 }
633
634 /* Free buffer. */
635 *pPacket =
636 cpu_to_le32(TXSF_EMPTY << TX_STAT_CTL_OWNER_SHF);
637
638 /* Update pending index pointer. */
639 lp->PendingTxmPacketIndex++;
640 if (lp->PendingTxmPacketIndex >= LAN_SAA9730_TXM_Q_SIZE) {
641 lp->PendingTxmPacketIndex = 0;
642 lp->PendingTxmBufferIndex ^= 1;
643 }
644 }
645
646 /* The tx buffer is no longer full. */
647 netif_wake_queue(dev);
648
649 return 0;
650}
651
652static int lan_saa9730_rx(struct net_device *dev)
653{
654 struct lan_saa9730_private *lp = netdev_priv(dev);
655 int len = 0;
656 struct sk_buff *skb = 0;
657 unsigned int rx_status;
658 int BufferIndex;
659 int PacketIndex;
660 unsigned int *pPacket;
661 unsigned char *pData;
662
663 if (lan_saa9730_debug > 5)
664 printk("lan_saa9730_rx interrupt\n");
665
666 /* Clear receive interrupts. */
667 writel(DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT |
668 DMA_STATUS_RX_TO_INT, &lp->lan_saa9730_regs->DmaStatus);
669
670 /* Address next packet */
671 BufferIndex = lp->NextRcvBufferIndex;
672 PacketIndex = lp->NextRcvPacketIndex;
673 pPacket = lp->RcvBuffer[BufferIndex][PacketIndex];
674 rx_status = le32_to_cpu(*pPacket);
675
676 /* Process each packet. */
677 while ((rx_status & RX_STAT_CTL_OWNER_MSK) ==
678 (RXSF_HWDONE << RX_STAT_CTL_OWNER_SHF)) {
679 /* Check the rx status. */
680 if (rx_status & (RX_STATUS_GOOD << RX_STAT_CTL_STATUS_SHF)) {
681 /* Received packet is good. */
682 len = (rx_status & RX_STAT_CTL_LENGTH_MSK) >>
683 RX_STAT_CTL_LENGTH_SHF;
684
685 pData = (unsigned char *) pPacket;
686 pData += 4;
687 skb = dev_alloc_skb(len + 2);
688 if (skb == 0) {
689 printk
690 ("%s: Memory squeeze, deferring packet.\n",
691 dev->name);
692 dev->stats.rx_dropped++;
693 } else {
694 dev->stats.rx_bytes += len;
695 dev->stats.rx_packets++;
696 skb_reserve(skb, 2); /* 16 byte align */
697 skb_put(skb, len); /* make room */
698 skb_copy_to_linear_data(skb,
699 (unsigned char *) pData,
700 len);
701 skb->protocol = eth_type_trans(skb, dev);
702 netif_rx(skb);
703 dev->last_rx = jiffies;
704 }
705 } else {
706 /* We got an error packet. */
707 if (lan_saa9730_debug > 2)
708 printk
709 ("lan_saa9730_rx: We got an error packet = %x\n",
710 rx_status);
711
712 dev->stats.rx_errors++;
713 if (rx_status &
714 (RX_STATUS_CRC_ERR << RX_STAT_CTL_STATUS_SHF))
715 dev->stats.rx_crc_errors++;
716 if (rx_status &
717 (RX_STATUS_ALIGN_ERR << RX_STAT_CTL_STATUS_SHF))
718 dev->stats.rx_frame_errors++;
719 if (rx_status &
720 (RX_STATUS_OVERFLOW << RX_STAT_CTL_STATUS_SHF))
721 dev->stats.rx_fifo_errors++;
722 if (rx_status &
723 (RX_STATUS_LONG_ERR << RX_STAT_CTL_STATUS_SHF))
724 dev->stats.rx_length_errors++;
725 }
726
727 /* Indicate we have processed the buffer. */
728 *pPacket = cpu_to_le32(RXSF_READY << RX_STAT_CTL_OWNER_SHF);
729
730 /* Make sure A or B is available to hardware as appropriate. */
731 writel(BufferIndex ? OK2USE_RX_B : OK2USE_RX_A,
732 &lp->lan_saa9730_regs->Ok2Use);
733
734 /* Go to next packet in sequence. */
735 lp->NextRcvPacketIndex++;
736 if (lp->NextRcvPacketIndex >= LAN_SAA9730_RCV_Q_SIZE) {
737 lp->NextRcvPacketIndex = 0;
738 lp->NextRcvBufferIndex ^= 1;
739 }
740
741 /* Address next packet */
742 BufferIndex = lp->NextRcvBufferIndex;
743 PacketIndex = lp->NextRcvPacketIndex;
744 pPacket = lp->RcvBuffer[BufferIndex][PacketIndex];
745 rx_status = le32_to_cpu(*pPacket);
746 }
747
748 return 0;
749}
750
751static irqreturn_t lan_saa9730_interrupt(const int irq, void *dev_id)
752{
753 struct net_device *dev = dev_id;
754 struct lan_saa9730_private *lp = netdev_priv(dev);
755
756 if (lan_saa9730_debug > 5)
757 printk("lan_saa9730_interrupt\n");
758
759 /* Disable the EVM LAN interrupt. */
760 evm_saa9730_block_lan_int(lp);
761
762 /* Clear the EVM LAN interrupt. */
763 evm_saa9730_clear_lan_int(lp);
764
765 /* Service pending transmit interrupts. */
766 if (readl(&lp->lan_saa9730_regs->DmaStatus) & DMA_STATUS_MAC_TX_INT)
767 lan_saa9730_tx(dev);
768
769 /* Service pending receive interrupts. */
770 if (readl(&lp->lan_saa9730_regs->DmaStatus) &
771 (DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT |
772 DMA_STATUS_RX_TO_INT)) lan_saa9730_rx(dev);
773
774 /* Enable the EVM LAN interrupt. */
775 evm_saa9730_unblock_lan_int(lp);
776
777 return IRQ_HANDLED;
778}
779
780static int lan_saa9730_open(struct net_device *dev)
781{
782 struct lan_saa9730_private *lp = netdev_priv(dev);
783
784 /* Associate IRQ with lan_saa9730_interrupt */
785 if (request_irq(dev->irq, &lan_saa9730_interrupt, 0, "SAA9730 Eth",
786 dev)) {
787 printk("lan_saa9730_open: Can't get irq %d\n", dev->irq);
788 return -EAGAIN;
789 }
790
791 /* Enable the Lan interrupt in the event manager. */
792 evm_saa9730_enable_lan_int(lp);
793
794 /* Start the LAN controller */
795 if (lan_saa9730_start(lp))
796 return -1;
797
798 netif_start_queue(dev);
799
800 return 0;
801}
802
803static int lan_saa9730_write(struct lan_saa9730_private *lp,
804 struct sk_buff *skb, int skblen)
805{
806 unsigned char *pbData = skb->data;
807 unsigned int len = skblen;
808 unsigned char *pbPacketData;
809 unsigned int tx_status;
810 int BufferIndex;
811 int PacketIndex;
812
813 if (lan_saa9730_debug > 5)
814 printk("lan_saa9730_write: skb=%p\n", skb);
815
816 BufferIndex = lp->NextTxmBufferIndex;
817 PacketIndex = lp->NextTxmPacketIndex;
818
819 tx_status = le32_to_cpu(*(unsigned int *)lp->TxmBuffer[BufferIndex]
820 [PacketIndex]);
821 if ((tx_status & TX_STAT_CTL_OWNER_MSK) !=
822 (TXSF_EMPTY << TX_STAT_CTL_OWNER_SHF)) {
823 if (lan_saa9730_debug > 4)
824 printk
825 ("lan_saa9730_write: Tx buffer not available: tx_status = %x\n",
826 tx_status);
827 return -1;
828 }
829
830 lp->NextTxmPacketIndex++;
831 if (lp->NextTxmPacketIndex >= LAN_SAA9730_TXM_Q_SIZE) {
832 lp->NextTxmPacketIndex = 0;
833 lp->NextTxmBufferIndex ^= 1;
834 }
835
836 pbPacketData = lp->TxmBuffer[BufferIndex][PacketIndex];
837 pbPacketData += 4;
838
839 /* copy the bits */
840 memcpy(pbPacketData, pbData, len);
841
842 /* Set transmit status for hardware */
843 *(unsigned int *)lp->TxmBuffer[BufferIndex][PacketIndex] =
844 cpu_to_le32((TXSF_READY << TX_STAT_CTL_OWNER_SHF) |
845 (TX_STAT_CTL_INT_AFTER_TX <<
846 TX_STAT_CTL_FRAME_SHF) |
847 (len << TX_STAT_CTL_LENGTH_SHF));
848
849 /* Make sure A or B is available to hardware as appropriate. */
850 writel(BufferIndex ? OK2USE_TX_B : OK2USE_TX_A,
851 &lp->lan_saa9730_regs->Ok2Use);
852
853 return 0;
854}
855
856static void lan_saa9730_tx_timeout(struct net_device *dev)
857{
858 struct lan_saa9730_private *lp = netdev_priv(dev);
859
860 /* Transmitter timeout, serious problems */
861 dev->stats.tx_errors++;
862 printk("%s: transmit timed out, reset\n", dev->name);
863 /*show_saa9730_regs(dev); */
864 lan_saa9730_restart(lp);
865
866 dev->trans_start = jiffies;
867 netif_wake_queue(dev);
868}
869
870static int lan_saa9730_start_xmit(struct sk_buff *skb,
871 struct net_device *dev)
872{
873 struct lan_saa9730_private *lp = netdev_priv(dev);
874 unsigned long flags;
875 int skblen;
876 int len;
877
878 if (lan_saa9730_debug > 4)
879 printk("Send packet: skb=%p\n", skb);
880
881 skblen = skb->len;
882
883 spin_lock_irqsave(&lp->lock, flags);
884
885 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
886
887 if (lan_saa9730_write(lp, skb, skblen)) {
888 spin_unlock_irqrestore(&lp->lock, flags);
889 printk("Error when writing packet to controller: skb=%p\n", skb);
890 netif_stop_queue(dev);
891 return -1;
892 }
893
894 dev->stats.tx_bytes += len;
895 dev->stats.tx_packets++;
896
897 dev->trans_start = jiffies;
898 netif_wake_queue(dev);
899 dev_kfree_skb(skb);
900
901 spin_unlock_irqrestore(&lp->lock, flags);
902
903 return 0;
904}
905
906static int lan_saa9730_close(struct net_device *dev)
907{
908 struct lan_saa9730_private *lp = netdev_priv(dev);
909
910 if (lan_saa9730_debug > 1)
911 printk("lan_saa9730_close:\n");
912
913 netif_stop_queue(dev);
914
915 /* Disable the Lan interrupt in the event manager. */
916 evm_saa9730_disable_lan_int(lp);
917
918 /* Stop the controller */
919 if (lan_saa9730_stop(lp))
920 return -1;
921
922 free_irq(dev->irq, (void *) dev);
923
924 return 0;
925}
926
927static void lan_saa9730_set_multicast(struct net_device *dev)
928{
929 struct lan_saa9730_private *lp = netdev_priv(dev);
930
931 /* Stop the controller */
932 lan_saa9730_stop(lp);
933
934 if (dev->flags & IFF_PROMISC) {
935 /* accept all packets */
936 writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_STATION_ACC |
937 CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC,
938 &lp->lan_saa9730_regs->CamCtl);
939 } else {
940 if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
941 /* accept all multicast packets */
942 /*
943 * Will handle the multicast stuff later. -carstenl
944 */
945 writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
946 CAM_CONTROL_BROAD_ACC,
947 &lp->lan_saa9730_regs->CamCtl);
948 }
949 }
950
951 lan_saa9730_restart(lp);
952}
953
954
955static void __devexit saa9730_remove_one(struct pci_dev *pdev)
956{
957 struct net_device *dev = pci_get_drvdata(pdev);
958 struct lan_saa9730_private *lp = netdev_priv(dev);
959
960 if (dev) {
961 unregister_netdev(dev);
962 lan_saa9730_free_buffers(pdev, lp);
963 iounmap(lp->lan_saa9730_regs);
964 iounmap(lp->evm_saa9730_regs);
965 free_netdev(dev);
966 pci_release_regions(pdev);
967 pci_disable_device(pdev);
968 pci_set_drvdata(pdev, NULL);
969 }
970}
971
972
973static int lan_saa9730_init(struct net_device *dev, struct pci_dev *pdev,
974 unsigned long ioaddr, int irq)
975{
976 struct lan_saa9730_private *lp = netdev_priv(dev);
977 unsigned char ethernet_addr[6];
978 int ret;
979
980 if (get_ethernet_addr(ethernet_addr)) {
981 ret = -ENODEV;
982 goto out;
983 }
984
985 memcpy(dev->dev_addr, ethernet_addr, 6);
986 dev->base_addr = ioaddr;
987 dev->irq = irq;
988
989 lp->pci_dev = pdev;
990
991 /* Set SAA9730 LAN base address. */
992 lp->lan_saa9730_regs = ioremap(ioaddr + SAA9730_LAN_REGS_ADDR,
993 SAA9730_LAN_REGS_SIZE);
994 if (!lp->lan_saa9730_regs) {
995 ret = -ENOMEM;
996 goto out;
997 }
998
999 /* Set SAA9730 EVM base address. */
1000 lp->evm_saa9730_regs = ioremap(ioaddr + SAA9730_EVM_REGS_ADDR,
1001 SAA9730_EVM_REGS_SIZE);
1002 if (!lp->evm_saa9730_regs) {
1003 ret = -ENOMEM;
1004 goto out_iounmap_lan;
1005 }
1006
1007 /* Allocate LAN RX/TX frame buffer space. */
1008 if ((ret = lan_saa9730_allocate_buffers(pdev, lp)))
1009 goto out_iounmap;
1010
1011 /* Stop LAN controller. */
1012 if ((ret = lan_saa9730_stop(lp)))
1013 goto out_free_consistent;
1014
1015 /* Initialize CAM registers. */
1016 if ((ret = lan_saa9730_cam_init(dev)))
1017 goto out_free_consistent;
1018
1019 /* Initialize MII registers. */
1020 if ((ret = lan_saa9730_mii_init(lp)))
1021 goto out_free_consistent;
1022
1023 /* Initialize control registers. */
1024 if ((ret = lan_saa9730_control_init(lp)))
1025 goto out_free_consistent;
1026
1027 /* Load CAM registers. */
1028 if ((ret = lan_saa9730_cam_load(lp)))
1029 goto out_free_consistent;
1030
1031 /* Initialize DMA context registers. */
1032 if ((ret = lan_saa9730_dma_init(lp)))
1033 goto out_free_consistent;
1034
1035 spin_lock_init(&lp->lock);
1036
1037 dev->open = lan_saa9730_open;
1038 dev->hard_start_xmit = lan_saa9730_start_xmit;
1039 dev->stop = lan_saa9730_close;
1040 dev->set_multicast_list = lan_saa9730_set_multicast;
1041 dev->tx_timeout = lan_saa9730_tx_timeout;
1042 dev->watchdog_timeo = (HZ >> 1);
1043 dev->dma = 0;
1044
1045 ret = register_netdev (dev);
1046 if (ret)
1047 goto out_free_consistent;
1048
1049 return 0;
1050
1051out_free_consistent:
1052 lan_saa9730_free_buffers(pdev, lp);
1053out_iounmap:
1054 iounmap(lp->evm_saa9730_regs);
1055out_iounmap_lan:
1056 iounmap(lp->lan_saa9730_regs);
1057out:
1058 return ret;
1059}
1060
1061
1062static int __devinit saa9730_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1063{
1064 struct net_device *dev = NULL;
1065 unsigned long pci_ioaddr;
1066 int err;
1067
1068 if (lan_saa9730_debug > 1)
1069 printk("saa9730.c: PCI bios is present, checking for devices...\n");
1070
1071 err = pci_enable_device(pdev);
1072 if (err) {
1073 printk(KERN_ERR "Cannot enable PCI device, aborting.\n");
1074 goto out;
1075 }
1076
1077 err = pci_request_regions(pdev, DRV_MODULE_NAME);
1078 if (err) {
1079 printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
1080 goto out_disable_pdev;
1081 }
1082
1083 pci_irq_line = pdev->irq;
1084 /* LAN base address in located at BAR 1. */
1085
1086 pci_ioaddr = pci_resource_start(pdev, 1);
1087 pci_set_master(pdev);
1088
1089 printk("Found SAA9730 (PCI) at %lx, irq %d.\n",
1090 pci_ioaddr, pci_irq_line);
1091
1092 dev = alloc_etherdev(sizeof(struct lan_saa9730_private));
1093 if (!dev)
1094 goto out_disable_pdev;
1095
1096 err = lan_saa9730_init(dev, pdev, pci_ioaddr, pci_irq_line);
1097 if (err) {
1098 printk("LAN init failed");
1099 goto out_free_netdev;
1100 }
1101
1102 pci_set_drvdata(pdev, dev);
1103 SET_NETDEV_DEV(dev, &pdev->dev);
1104 return 0;
1105
1106out_free_netdev:
1107 free_netdev(dev);
1108out_disable_pdev:
1109 pci_disable_device(pdev);
1110out:
1111 pci_set_drvdata(pdev, NULL);
1112 return err;
1113}
1114
1115
1116static struct pci_driver saa9730_driver = {
1117 .name = DRV_MODULE_NAME,
1118 .id_table = saa9730_pci_tbl,
1119 .probe = saa9730_init_one,
1120 .remove = __devexit_p(saa9730_remove_one),
1121};
1122
1123
1124static int __init saa9730_init(void)
1125{
1126 return pci_register_driver(&saa9730_driver);
1127}
1128
1129static void __exit saa9730_cleanup(void)
1130{
1131 pci_unregister_driver(&saa9730_driver);
1132}
1133
1134module_init(saa9730_init);
1135module_exit(saa9730_cleanup);
1136
1137MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1138MODULE_DESCRIPTION("Philips SAA9730 ethernet driver");
1139MODULE_LICENSE("GPL");
diff --git a/drivers/net/saa9730.h b/drivers/net/saa9730.h
deleted file mode 100644
index 010a120ea938..000000000000
--- a/drivers/net/saa9730.h
+++ /dev/null
@@ -1,384 +0,0 @@
1/*
2 * Copyright (C) 2000, 2005 MIPS Technologies, Inc. All rights reserved.
3 * Authors: Carsten Langgaard <carstenl@mips.com>
4 * Maciej W. Rozycki <macro@mips.com>
5 *
6 * ########################################################################
7 *
8 * This program is free software; you can distribute it and/or modify it
9 * under the terms of the GNU General Public License (Version 2) as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 *
21 * ########################################################################
22 *
23 * SAA9730 ethernet driver description.
24 *
25 */
26#ifndef _SAA9730_H
27#define _SAA9730_H
28
29
30/* Number of 6-byte entries in the CAM. */
31#define LAN_SAA9730_CAM_ENTRIES 10
32#define LAN_SAA9730_CAM_DWORDS ((LAN_SAA9730_CAM_ENTRIES*6)/4)
33
34/* TX and RX packet size: fixed to 2048 bytes, according to HW requirements. */
35#define LAN_SAA9730_PACKET_SIZE 2048
36
37/*
38 * Number of TX buffers = number of RX buffers = 2, which is fixed according
39 * to HW requirements.
40 */
41#define LAN_SAA9730_BUFFERS 2
42
43/* Number of RX packets per RX buffer. */
44#define LAN_SAA9730_RCV_Q_SIZE 15
45
46/* Number of TX packets per TX buffer. */
47#define LAN_SAA9730_TXM_Q_SIZE 15
48
49/*
50 * We get an interrupt for each LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD
51 * packets received.
52 * If however we receive less than LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD
53 * packets, the hardware can timeout after a certain time and still tell
54 * us packets have arrived.
55 * The timeout value in unit of 32 PCI clocks (33Mhz).
56 * The value 200 approximates 0.0002 seconds.
57 */
58#define LAN_SAA9730_RCV_Q_INT_THRESHOLD 1
59#define LAN_SAA9730_DEFAULT_TIME_OUT_CNT 10
60
61#define RXSF_NDIS 0
62#define RXSF_READY 2
63#define RXSF_HWDONE 3
64
65#define TXSF_EMPTY 0
66#define TXSF_READY 2
67#define TXSF_HWDONE 3
68
69#define LANEND_LITTLE 0
70#define LANEND_BIG_2143 1
71#define LANEND_BIG_4321 2
72
73#define LANMB_ANY 0
74#define LANMB_8 1
75#define LANMB_32 2
76#define LANMB_64 3
77
78#define MACCM_AUTOMATIC 0
79#define MACCM_10MB 1
80#define MACCM_MII 2
81
82/*
83 * PHY definitions for Basic registers of QS6612 (used on MIPS ATLAS board)
84 */
85#define PHY_CONTROL 0x0
86#define PHY_STATUS 0x1
87#define PHY_STATUS_LINK_UP 0x4
88#define PHY_CONTROL_RESET 0x8000
89#define PHY_CONTROL_AUTO_NEG 0x1000
90#define PHY_CONTROL_RESTART_AUTO_NEG 0x0200
91#define PHY_ADDRESS 0x0
92
93/* PK_COUNT register. */
94#define PK_COUNT_TX_A_SHF 24
95#define PK_COUNT_TX_A_MSK (0xff << PK_COUNT_TX_A_SHF)
96#define PK_COUNT_TX_B_SHF 16
97#define PK_COUNT_TX_B_MSK (0xff << PK_COUNT_TX_B_SHF)
98#define PK_COUNT_RX_A_SHF 8
99#define PK_COUNT_RX_A_MSK (0xff << PK_COUNT_RX_A_SHF)
100#define PK_COUNT_RX_B_SHF 0
101#define PK_COUNT_RX_B_MSK (0xff << PK_COUNT_RX_B_SHF)
102
103/* OK2USE register. */
104#define OK2USE_TX_A 0x8
105#define OK2USE_TX_B 0x4
106#define OK2USE_RX_A 0x2
107#define OK2USE_RX_B 0x1
108
109/* LAN DMA CONTROL register. */
110#define DMA_CTL_BLK_INT 0x80000000
111#define DMA_CTL_MAX_XFER_SHF 18
112#define DMA_CTL_MAX_XFER_MSK (0x3 << LAN_DMA_CTL_MAX_XFER_SHF)
113#define DMA_CTL_ENDIAN_SHF 16
114#define DMA_CTL_ENDIAN_MSK (0x3 << LAN_DMA_CTL_ENDIAN_SHF)
115#define DMA_CTL_RX_INT_COUNT_SHF 8
116#define DMA_CTL_RX_INT_COUNT_MSK (0xff << LAN_DMA_CTL_RX_INT_COUNT_SHF)
117#define DMA_CTL_EN_TX_DMA 0x00000080
118#define DMA_CTL_EN_RX_DMA 0x00000040
119#define DMA_CTL_RX_INT_BUFFUL_EN 0x00000020
120#define DMA_CTL_RX_INT_TO_EN 0x00000010
121#define DMA_CTL_RX_INT_EN 0x00000008
122#define DMA_CTL_TX_INT_EN 0x00000004
123#define DMA_CTL_MAC_TX_INT_EN 0x00000002
124#define DMA_CTL_MAC_RX_INT_EN 0x00000001
125
126/* DMA STATUS register. */
127#define DMA_STATUS_BAD_ADDR_SHF 16
128#define DMA_STATUS_BAD_ADDR_MSK (0xf << DMA_STATUS_BAD_ADDR_SHF)
129#define DMA_STATUS_RX_PKTS_RECEIVED_SHF 8
130#define DMA_STATUS_RX_PKTS_RECEIVED_MSK (0xff << DMA_STATUS_RX_PKTS_RECEIVED_SHF)
131#define DMA_STATUS_TX_EN_SYNC 0x00000080
132#define DMA_STATUS_RX_BUF_A_FUL 0x00000040
133#define DMA_STATUS_RX_BUF_B_FUL 0x00000020
134#define DMA_STATUS_RX_TO_INT 0x00000010
135#define DMA_STATUS_RX_INT 0x00000008
136#define DMA_STATUS_TX_INT 0x00000004
137#define DMA_STATUS_MAC_TX_INT 0x00000002
138#define DMA_STATUS_MAC_RX_INT 0x00000001
139
140/* DMA TEST/PANIC SWITHES register. */
141#define DMA_TEST_LOOPBACK 0x01000000
142#define DMA_TEST_SW_RESET 0x00000001
143
144/* MAC CONTROL register. */
145#define MAC_CONTROL_EN_MISS_ROLL 0x00002000
146#define MAC_CONTROL_MISS_ROLL 0x00000400
147#define MAC_CONTROL_LOOP10 0x00000080
148#define MAC_CONTROL_CONN_SHF 5
149#define MAC_CONTROL_CONN_MSK (0x3 << MAC_CONTROL_CONN_SHF)
150#define MAC_CONTROL_MAC_LOOP 0x00000010
151#define MAC_CONTROL_FULL_DUP 0x00000008
152#define MAC_CONTROL_RESET 0x00000004
153#define MAC_CONTROL_HALT_IMM 0x00000002
154#define MAC_CONTROL_HALT_REQ 0x00000001
155
156/* CAM CONTROL register. */
157#define CAM_CONTROL_COMP_EN 0x00000010
158#define CAM_CONTROL_NEG_CAM 0x00000008
159#define CAM_CONTROL_BROAD_ACC 0x00000004
160#define CAM_CONTROL_GROUP_ACC 0x00000002
161#define CAM_CONTROL_STATION_ACC 0x00000001
162
163/* TRANSMIT CONTROL register. */
164#define TX_CTL_EN_COMP 0x00004000
165#define TX_CTL_EN_TX_PAR 0x00002000
166#define TX_CTL_EN_LATE_COLL 0x00001000
167#define TX_CTL_EN_EX_COLL 0x00000800
168#define TX_CTL_EN_L_CARR 0x00000400
169#define TX_CTL_EN_EX_DEFER 0x00000200
170#define TX_CTL_EN_UNDER 0x00000100
171#define TX_CTL_MII10 0x00000080
172#define TX_CTL_SD_PAUSE 0x00000040
173#define TX_CTL_NO_EX_DEF0 0x00000020
174#define TX_CTL_F_BACK 0x00000010
175#define TX_CTL_NO_CRC 0x00000008
176#define TX_CTL_NO_PAD 0x00000004
177#define TX_CTL_TX_HALT 0x00000002
178#define TX_CTL_TX_EN 0x00000001
179
180/* TRANSMIT STATUS register. */
181#define TX_STATUS_SQ_ERR 0x00010000
182#define TX_STATUS_TX_HALTED 0x00008000
183#define TX_STATUS_COMP 0x00004000
184#define TX_STATUS_TX_PAR 0x00002000
185#define TX_STATUS_LATE_COLL 0x00001000
186#define TX_STATUS_TX10_STAT 0x00000800
187#define TX_STATUS_L_CARR 0x00000400
188#define TX_STATUS_EX_DEFER 0x00000200
189#define TX_STATUS_UNDER 0x00000100
190#define TX_STATUS_IN_TX 0x00000080
191#define TX_STATUS_PAUSED 0x00000040
192#define TX_STATUS_TX_DEFERRED 0x00000020
193#define TX_STATUS_EX_COLL 0x00000010
194#define TX_STATUS_TX_COLL_SHF 0
195#define TX_STATUS_TX_COLL_MSK (0xf << TX_STATUS_TX_COLL_SHF)
196
197/* RECEIVE CONTROL register. */
198#define RX_CTL_EN_GOOD 0x00004000
199#define RX_CTL_EN_RX_PAR 0x00002000
200#define RX_CTL_EN_LONG_ERR 0x00000800
201#define RX_CTL_EN_OVER 0x00000400
202#define RX_CTL_EN_CRC_ERR 0x00000200
203#define RX_CTL_EN_ALIGN 0x00000100
204#define RX_CTL_IGNORE_CRC 0x00000040
205#define RX_CTL_PASS_CTL 0x00000020
206#define RX_CTL_STRIP_CRC 0x00000010
207#define RX_CTL_SHORT_EN 0x00000008
208#define RX_CTL_LONG_EN 0x00000004
209#define RX_CTL_RX_HALT 0x00000002
210#define RX_CTL_RX_EN 0x00000001
211
212/* RECEIVE STATUS register. */
213#define RX_STATUS_RX_HALTED 0x00008000
214#define RX_STATUS_GOOD 0x00004000
215#define RX_STATUS_RX_PAR 0x00002000
216#define RX_STATUS_LONG_ERR 0x00000800
217#define RX_STATUS_OVERFLOW 0x00000400
218#define RX_STATUS_CRC_ERR 0x00000200
219#define RX_STATUS_ALIGN_ERR 0x00000100
220#define RX_STATUS_RX10_STAT 0x00000080
221#define RX_STATUS_INT_RX 0x00000040
222#define RX_STATUS_CTL_RECD 0x00000020
223
224/* MD_CA register. */
225#define MD_CA_PRE_SUP 0x00001000
226#define MD_CA_BUSY 0x00000800
227#define MD_CA_WR 0x00000400
228#define MD_CA_PHY_SHF 5
229#define MD_CA_PHY_MSK (0x1f << MD_CA_PHY_SHF)
230#define MD_CA_ADDR_SHF 0
231#define MD_CA_ADDR_MSK (0x1f << MD_CA_ADDR_SHF)
232
233/* Tx Status/Control. */
234#define TX_STAT_CTL_OWNER_SHF 30
235#define TX_STAT_CTL_OWNER_MSK (0x3 << TX_STAT_CTL_OWNER_SHF)
236#define TX_STAT_CTL_FRAME_SHF 27
237#define TX_STAT_CTL_FRAME_MSK (0x7 << TX_STAT_CTL_FRAME_SHF)
238#define TX_STAT_CTL_STATUS_SHF 11
239#define TX_STAT_CTL_STATUS_MSK (0x1ffff << TX_STAT_CTL_STATUS_SHF)
240#define TX_STAT_CTL_LENGTH_SHF 0
241#define TX_STAT_CTL_LENGTH_MSK (0x7ff << TX_STAT_CTL_LENGTH_SHF)
242
243#define TX_STAT_CTL_ERROR_MSK ((TX_STATUS_SQ_ERR | \
244 TX_STATUS_TX_HALTED | \
245 TX_STATUS_TX_PAR | \
246 TX_STATUS_LATE_COLL | \
247 TX_STATUS_L_CARR | \
248 TX_STATUS_EX_DEFER | \
249 TX_STATUS_UNDER | \
250 TX_STATUS_PAUSED | \
251 TX_STATUS_TX_DEFERRED | \
252 TX_STATUS_EX_COLL | \
253 TX_STATUS_TX_COLL_MSK) \
254 << TX_STAT_CTL_STATUS_SHF)
255#define TX_STAT_CTL_INT_AFTER_TX 0x4
256
257/* Rx Status/Control. */
258#define RX_STAT_CTL_OWNER_SHF 30
259#define RX_STAT_CTL_OWNER_MSK (0x3 << RX_STAT_CTL_OWNER_SHF)
260#define RX_STAT_CTL_STATUS_SHF 11
261#define RX_STAT_CTL_STATUS_MSK (0xffff << RX_STAT_CTL_STATUS_SHF)
262#define RX_STAT_CTL_LENGTH_SHF 0
263#define RX_STAT_CTL_LENGTH_MSK (0x7ff << RX_STAT_CTL_LENGTH_SHF)
264
265
266
267/* The SAA9730 (LAN) controller register map, as seen via the PCI-bus. */
268#define SAA9730_LAN_REGS_ADDR 0x20400
269#define SAA9730_LAN_REGS_SIZE 0x00400
270
271struct lan_saa9730_regmap {
272 volatile unsigned int TxBuffA; /* 0x20400 */
273 volatile unsigned int TxBuffB; /* 0x20404 */
274 volatile unsigned int RxBuffA; /* 0x20408 */
275 volatile unsigned int RxBuffB; /* 0x2040c */
276 volatile unsigned int PacketCount; /* 0x20410 */
277 volatile unsigned int Ok2Use; /* 0x20414 */
278 volatile unsigned int LanDmaCtl; /* 0x20418 */
279 volatile unsigned int Timeout; /* 0x2041c */
280 volatile unsigned int DmaStatus; /* 0x20420 */
281 volatile unsigned int DmaTest; /* 0x20424 */
282 volatile unsigned char filler20428[0x20430 - 0x20428];
283 volatile unsigned int PauseCount; /* 0x20430 */
284 volatile unsigned int RemotePauseCount; /* 0x20434 */
285 volatile unsigned char filler20438[0x20440 - 0x20438];
286 volatile unsigned int MacCtl; /* 0x20440 */
287 volatile unsigned int CamCtl; /* 0x20444 */
288 volatile unsigned int TxCtl; /* 0x20448 */
289 volatile unsigned int TxStatus; /* 0x2044c */
290 volatile unsigned int RxCtl; /* 0x20450 */
291 volatile unsigned int RxStatus; /* 0x20454 */
292 volatile unsigned int StationMgmtData; /* 0x20458 */
293 volatile unsigned int StationMgmtCtl; /* 0x2045c */
294 volatile unsigned int CamAddress; /* 0x20460 */
295 volatile unsigned int CamData; /* 0x20464 */
296 volatile unsigned int CamEnable; /* 0x20468 */
297 volatile unsigned char filler2046c[0x20500 - 0x2046c];
298 volatile unsigned int DebugPCIMasterAddr; /* 0x20500 */
299 volatile unsigned int DebugLanTxStateMachine; /* 0x20504 */
300 volatile unsigned int DebugLanRxStateMachine; /* 0x20508 */
301 volatile unsigned int DebugLanTxFifoPointers; /* 0x2050c */
302 volatile unsigned int DebugLanRxFifoPointers; /* 0x20510 */
303 volatile unsigned int DebugLanCtlStateMachine; /* 0x20514 */
304};
305typedef volatile struct lan_saa9730_regmap t_lan_saa9730_regmap;
306
307
308/* EVM interrupt control registers. */
309#define EVM_LAN_INT 0x00010000
310#define EVM_MASTER_EN 0x00000001
311
312/* The SAA9730 (EVM) controller register map, as seen via the PCI-bus. */
313#define SAA9730_EVM_REGS_ADDR 0x02000
314#define SAA9730_EVM_REGS_SIZE 0x00400
315
316struct evm_saa9730_regmap {
317 volatile unsigned int InterruptStatus1; /* 0x2000 */
318 volatile unsigned int InterruptEnable1; /* 0x2004 */
319 volatile unsigned int InterruptMonitor1; /* 0x2008 */
320 volatile unsigned int Counter; /* 0x200c */
321 volatile unsigned int CounterThreshold; /* 0x2010 */
322 volatile unsigned int CounterControl; /* 0x2014 */
323 volatile unsigned int GpioControl1; /* 0x2018 */
324 volatile unsigned int InterruptStatus2; /* 0x201c */
325 volatile unsigned int InterruptEnable2; /* 0x2020 */
326 volatile unsigned int InterruptMonitor2; /* 0x2024 */
327 volatile unsigned int GpioControl2; /* 0x2028 */
328 volatile unsigned int InterruptBlock1; /* 0x202c */
329 volatile unsigned int InterruptBlock2; /* 0x2030 */
330};
331typedef volatile struct evm_saa9730_regmap t_evm_saa9730_regmap;
332
333
334struct lan_saa9730_private {
335 /*
336 * Rx/Tx packet buffers.
337 * The Rx and Tx packets must be PACKET_SIZE aligned.
338 */
339 void *buffer_start;
340 unsigned int buffer_size;
341
342 /*
343 * DMA address of beginning of this object, returned
344 * by pci_alloc_consistent().
345 */
346 dma_addr_t dma_addr;
347
348 /* Pointer to the associated pci device structure */
349 struct pci_dev *pci_dev;
350
351 /* Pointer for the SAA9730 LAN controller register set. */
352 t_lan_saa9730_regmap *lan_saa9730_regs;
353
354 /* Pointer to the SAA9730 EVM register. */
355 t_evm_saa9730_regmap *evm_saa9730_regs;
356
357 /* Rcv buffer Index. */
358 unsigned char NextRcvPacketIndex;
359 /* Next buffer index. */
360 unsigned char NextRcvBufferIndex;
361
362 /* Index of next packet to use in that buffer. */
363 unsigned char NextTxmPacketIndex;
364 /* Next buffer index. */
365 unsigned char NextTxmBufferIndex;
366
367 /* Index of first pending packet ready to send. */
368 unsigned char PendingTxmPacketIndex;
369 /* Pending buffer index. */
370 unsigned char PendingTxmBufferIndex;
371
372 unsigned char DmaRcvPackets;
373 unsigned char DmaTxmPackets;
374
375 void *TxmBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_TXM_Q_SIZE];
376 void *RcvBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_RCV_Q_SIZE];
377 unsigned int TxBufferFree[LAN_SAA9730_BUFFERS];
378
379 unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6];
380
381 spinlock_t lock;
382};
383
384#endif /* _SAA9730_H */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index fb0b918e5ccb..07933f71b86d 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3500,11 +3500,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3500 3500
3501 dev->stats.rx_bytes += length; 3501 dev->stats.rx_bytes += length;
3502 /* Send the packet up the stack */ 3502 /* Send the packet up the stack */
3503#ifdef CONFIG_UGETH_NAPI
3504 netif_receive_skb(skb); 3503 netif_receive_skb(skb);
3505#else
3506 netif_rx(skb);
3507#endif /* CONFIG_UGETH_NAPI */
3508 } 3504 }
3509 3505
3510 ugeth->dev->last_rx = jiffies; 3506 ugeth->dev->last_rx = jiffies;
@@ -3580,7 +3576,6 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3580 return 0; 3576 return 0;
3581} 3577}
3582 3578
3583#ifdef CONFIG_UGETH_NAPI
3584static int ucc_geth_poll(struct napi_struct *napi, int budget) 3579static int ucc_geth_poll(struct napi_struct *napi, int budget)
3585{ 3580{
3586 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); 3581 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
@@ -3607,7 +3602,6 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
3607 3602
3608 return howmany; 3603 return howmany;
3609} 3604}
3610#endif /* CONFIG_UGETH_NAPI */
3611 3605
3612static irqreturn_t ucc_geth_irq_handler(int irq, void *info) 3606static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3613{ 3607{
@@ -3617,9 +3611,6 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3617 struct ucc_geth_info *ug_info; 3611 struct ucc_geth_info *ug_info;
3618 register u32 ucce; 3612 register u32 ucce;
3619 register u32 uccm; 3613 register u32 uccm;
3620#ifndef CONFIG_UGETH_NAPI
3621 register u32 rx_mask;
3622#endif
3623 register u32 tx_mask; 3614 register u32 tx_mask;
3624 u8 i; 3615 u8 i;
3625 3616
@@ -3636,21 +3627,11 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3636 3627
3637 /* check for receive events that require processing */ 3628 /* check for receive events that require processing */
3638 if (ucce & UCCE_RX_EVENTS) { 3629 if (ucce & UCCE_RX_EVENTS) {
3639#ifdef CONFIG_UGETH_NAPI
3640 if (netif_rx_schedule_prep(dev, &ugeth->napi)) { 3630 if (netif_rx_schedule_prep(dev, &ugeth->napi)) {
3641 uccm &= ~UCCE_RX_EVENTS; 3631 uccm &= ~UCCE_RX_EVENTS;
3642 out_be32(uccf->p_uccm, uccm); 3632 out_be32(uccf->p_uccm, uccm);
3643 __netif_rx_schedule(dev, &ugeth->napi); 3633 __netif_rx_schedule(dev, &ugeth->napi);
3644 } 3634 }
3645#else
3646 rx_mask = UCCE_RXBF_SINGLE_MASK;
3647 for (i = 0; i < ug_info->numQueuesRx; i++) {
3648 if (ucce & rx_mask)
3649 ucc_geth_rx(ugeth, i, (int)ugeth->ug_info->bdRingLenRx[i]);
3650 ucce &= ~rx_mask;
3651 rx_mask <<= 1;
3652 }
3653#endif /* CONFIG_UGETH_NAPI */
3654 } 3635 }
3655 3636
3656 /* Tx event processing */ 3637 /* Tx event processing */
@@ -3720,9 +3701,8 @@ static int ucc_geth_open(struct net_device *dev)
3720 return err; 3701 return err;
3721 } 3702 }
3722 3703
3723#ifdef CONFIG_UGETH_NAPI
3724 napi_enable(&ugeth->napi); 3704 napi_enable(&ugeth->napi);
3725#endif 3705
3726 err = ucc_geth_startup(ugeth); 3706 err = ucc_geth_startup(ugeth);
3727 if (err) { 3707 if (err) {
3728 if (netif_msg_ifup(ugeth)) 3708 if (netif_msg_ifup(ugeth))
@@ -3783,9 +3763,8 @@ static int ucc_geth_open(struct net_device *dev)
3783 return err; 3763 return err;
3784 3764
3785out_err: 3765out_err:
3786#ifdef CONFIG_UGETH_NAPI
3787 napi_disable(&ugeth->napi); 3766 napi_disable(&ugeth->napi);
3788#endif 3767
3789 return err; 3768 return err;
3790} 3769}
3791 3770
@@ -3796,9 +3775,7 @@ static int ucc_geth_close(struct net_device *dev)
3796 3775
3797 ugeth_vdbg("%s: IN", __FUNCTION__); 3776 ugeth_vdbg("%s: IN", __FUNCTION__);
3798 3777
3799#ifdef CONFIG_UGETH_NAPI
3800 napi_disable(&ugeth->napi); 3778 napi_disable(&ugeth->napi);
3801#endif
3802 3779
3803 ucc_geth_stop(ugeth); 3780 ucc_geth_stop(ugeth);
3804 3781
@@ -4050,9 +4027,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
4050 dev->hard_start_xmit = ucc_geth_start_xmit; 4027 dev->hard_start_xmit = ucc_geth_start_xmit;
4051 dev->tx_timeout = ucc_geth_timeout; 4028 dev->tx_timeout = ucc_geth_timeout;
4052 dev->watchdog_timeo = TX_TIMEOUT; 4029 dev->watchdog_timeo = TX_TIMEOUT;
4053#ifdef CONFIG_UGETH_NAPI
4054 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); 4030 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
4055#endif /* CONFIG_UGETH_NAPI */
4056#ifdef CONFIG_NET_POLL_CONTROLLER 4031#ifdef CONFIG_NET_POLL_CONTROLLER
4057 dev->poll_controller = ucc_netpoll; 4032 dev->poll_controller = ucc_netpoll;
4058#endif 4033#endif
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 8c9d6ae2bb31..96dff04334b8 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -73,12 +73,7 @@ static const int multicast_filter_limit = 32;
73 There are no ill effects from too-large receive rings. */ 73 There are no ill effects from too-large receive rings. */
74#define TX_RING_SIZE 16 74#define TX_RING_SIZE 16
75#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 75#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
76#ifdef CONFIG_VIA_RHINE_NAPI
77#define RX_RING_SIZE 64 76#define RX_RING_SIZE 64
78#else
79#define RX_RING_SIZE 16
80#endif
81
82 77
83/* Operational parameters that usually are not changed. */ 78/* Operational parameters that usually are not changed. */
84 79
@@ -583,7 +578,6 @@ static void rhine_poll(struct net_device *dev)
583} 578}
584#endif 579#endif
585 580
586#ifdef CONFIG_VIA_RHINE_NAPI
587static int rhine_napipoll(struct napi_struct *napi, int budget) 581static int rhine_napipoll(struct napi_struct *napi, int budget)
588{ 582{
589 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 583 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
@@ -604,7 +598,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
604 } 598 }
605 return work_done; 599 return work_done;
606} 600}
607#endif
608 601
609static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) 602static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
610{ 603{
@@ -784,9 +777,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
784#ifdef CONFIG_NET_POLL_CONTROLLER 777#ifdef CONFIG_NET_POLL_CONTROLLER
785 dev->poll_controller = rhine_poll; 778 dev->poll_controller = rhine_poll;
786#endif 779#endif
787#ifdef CONFIG_VIA_RHINE_NAPI
788 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 780 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
789#endif 781
790 if (rp->quirks & rqRhineI) 782 if (rp->quirks & rqRhineI)
791 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 783 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
792 784
@@ -1056,9 +1048,7 @@ static void init_registers(struct net_device *dev)
1056 1048
1057 rhine_set_rx_mode(dev); 1049 rhine_set_rx_mode(dev);
1058 1050
1059#ifdef CONFIG_VIA_RHINE_NAPI
1060 napi_enable(&rp->napi); 1051 napi_enable(&rp->napi);
1061#endif
1062 1052
1063 /* Enable interrupts by setting the interrupt mask. */ 1053 /* Enable interrupts by setting the interrupt mask. */
1064 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 1054 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
@@ -1193,9 +1183,7 @@ static void rhine_tx_timeout(struct net_device *dev)
1193 /* protect against concurrent rx interrupts */ 1183 /* protect against concurrent rx interrupts */
1194 disable_irq(rp->pdev->irq); 1184 disable_irq(rp->pdev->irq);
1195 1185
1196#ifdef CONFIG_VIA_RHINE_NAPI
1197 napi_disable(&rp->napi); 1186 napi_disable(&rp->napi);
1198#endif
1199 1187
1200 spin_lock(&rp->lock); 1188 spin_lock(&rp->lock);
1201 1189
@@ -1319,16 +1307,12 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1319 1307
1320 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1308 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1321 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { 1309 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1322#ifdef CONFIG_VIA_RHINE_NAPI
1323 iowrite16(IntrTxAborted | 1310 iowrite16(IntrTxAborted |
1324 IntrTxDone | IntrTxError | IntrTxUnderrun | 1311 IntrTxDone | IntrTxError | IntrTxUnderrun |
1325 IntrPCIErr | IntrStatsMax | IntrLinkChange, 1312 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1326 ioaddr + IntrEnable); 1313 ioaddr + IntrEnable);
1327 1314
1328 netif_rx_schedule(dev, &rp->napi); 1315 netif_rx_schedule(dev, &rp->napi);
1329#else
1330 rhine_rx(dev, RX_RING_SIZE);
1331#endif
1332 } 1316 }
1333 1317
1334 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1318 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
@@ -1520,11 +1504,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1520 PCI_DMA_FROMDEVICE); 1504 PCI_DMA_FROMDEVICE);
1521 } 1505 }
1522 skb->protocol = eth_type_trans(skb, dev); 1506 skb->protocol = eth_type_trans(skb, dev);
1523#ifdef CONFIG_VIA_RHINE_NAPI
1524 netif_receive_skb(skb); 1507 netif_receive_skb(skb);
1525#else
1526 netif_rx(skb);
1527#endif
1528 dev->last_rx = jiffies; 1508 dev->last_rx = jiffies;
1529 rp->stats.rx_bytes += pkt_len; 1509 rp->stats.rx_bytes += pkt_len;
1530 rp->stats.rx_packets++; 1510 rp->stats.rx_packets++;
@@ -1836,9 +1816,7 @@ static int rhine_close(struct net_device *dev)
1836 spin_lock_irq(&rp->lock); 1816 spin_lock_irq(&rp->lock);
1837 1817
1838 netif_stop_queue(dev); 1818 netif_stop_queue(dev);
1839#ifdef CONFIG_VIA_RHINE_NAPI
1840 napi_disable(&rp->napi); 1819 napi_disable(&rp->napi);
1841#endif
1842 1820
1843 if (debug > 1) 1821 if (debug > 1)
1844 printk(KERN_DEBUG "%s: Shutting down ethercard, " 1822 printk(KERN_DEBUG "%s: Shutting down ethercard, "
@@ -1937,9 +1915,8 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1937 if (!netif_running(dev)) 1915 if (!netif_running(dev))
1938 return 0; 1916 return 0;
1939 1917
1940#ifdef CONFIG_VIA_RHINE_NAPI
1941 napi_disable(&rp->napi); 1918 napi_disable(&rp->napi);
1942#endif 1919
1943 netif_device_detach(dev); 1920 netif_device_detach(dev);
1944 pci_save_state(pdev); 1921 pci_save_state(pdev);
1945 1922
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bcbf2fa9b94a..370ce30f2f45 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1102,61 +1102,41 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
1102 1102
1103static int velocity_init_rings(struct velocity_info *vptr) 1103static int velocity_init_rings(struct velocity_info *vptr)
1104{ 1104{
1105 int i; 1105 struct velocity_opt *opt = &vptr->options;
1106 unsigned int psize; 1106 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1107 unsigned int tsize; 1107 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1108 struct pci_dev *pdev = vptr->pdev;
1108 dma_addr_t pool_dma; 1109 dma_addr_t pool_dma;
1109 u8 *pool; 1110 void *pool;
1110 1111 unsigned int i;
1111 /*
1112 * Allocate all RD/TD rings a single pool
1113 */
1114
1115 psize = vptr->options.numrx * sizeof(struct rx_desc) +
1116 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1117 1112
1118 /* 1113 /*
1114 * Allocate all RD/TD rings a single pool.
1115 *
1119 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1120 * alignment 1117 * alignment
1121 */ 1118 */
1122 pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma); 1119 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq +
1123 1120 rx_ring_size, &pool_dma);
1124 if (pool == NULL) { 1121 if (!pool) {
1125 printk(KERN_ERR "%s : DMA memory allocation failed.\n", 1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1126 vptr->dev->name); 1123 vptr->dev->name);
1127 return -ENOMEM; 1124 return -ENOMEM;
1128 } 1125 }
1129 1126
1130 memset(pool, 0, psize); 1127 vptr->rd_ring = pool;
1131
1132 vptr->rd_ring = (struct rx_desc *) pool;
1133
1134 vptr->rd_pool_dma = pool_dma; 1128 vptr->rd_pool_dma = pool_dma;
1135 1129
1136 tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; 1130 pool += rx_ring_size;
1137 vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize, 1131 pool_dma += rx_ring_size;
1138 &vptr->tx_bufs_dma);
1139
1140 if (vptr->tx_bufs == NULL) {
1141 printk(KERN_ERR "%s: DMA memory allocation failed.\n",
1142 vptr->dev->name);
1143 pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
1144 return -ENOMEM;
1145 }
1146
1147 memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
1148 1132
1149 i = vptr->options.numrx * sizeof(struct rx_desc);
1150 pool += i;
1151 pool_dma += i;
1152 for (i = 0; i < vptr->num_txq; i++) { 1133 for (i = 0; i < vptr->num_txq; i++) {
1153 int offset = vptr->options.numtx * sizeof(struct tx_desc); 1134 vptr->td_rings[i] = pool;
1154
1155 vptr->td_pool_dma[i] = pool_dma; 1135 vptr->td_pool_dma[i] = pool_dma;
1156 vptr->td_rings[i] = (struct tx_desc *) pool; 1136 pool += tx_ring_size;
1157 pool += offset; 1137 pool_dma += tx_ring_size;
1158 pool_dma += offset;
1159 } 1138 }
1139
1160 return 0; 1140 return 0;
1161} 1141}
1162 1142
@@ -1169,19 +1149,13 @@ static int velocity_init_rings(struct velocity_info *vptr)
1169 1149
1170static void velocity_free_rings(struct velocity_info *vptr) 1150static void velocity_free_rings(struct velocity_info *vptr)
1171{ 1151{
1172 int size; 1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1173 1153 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1174 size = vptr->options.numrx * sizeof(struct rx_desc) +
1175 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1176 1154
1177 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1155 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
1178
1179 size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
1180
1181 pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
1182} 1156}
1183 1157
1184static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) 1158static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1185{ 1159{
1186 struct mac_regs __iomem *regs = vptr->mac_regs; 1160 struct mac_regs __iomem *regs = vptr->mac_regs;
1187 int avail, dirty, unusable; 1161 int avail, dirty, unusable;
@@ -1208,7 +1182,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
1208 1182
1209static int velocity_rx_refill(struct velocity_info *vptr) 1183static int velocity_rx_refill(struct velocity_info *vptr)
1210{ 1184{
1211 int dirty = vptr->rd_dirty, done = 0, ret = 0; 1185 int dirty = vptr->rd_dirty, done = 0;
1212 1186
1213 do { 1187 do {
1214 struct rx_desc *rd = vptr->rd_ring + dirty; 1188 struct rx_desc *rd = vptr->rd_ring + dirty;
@@ -1218,8 +1192,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1218 break; 1192 break;
1219 1193
1220 if (!vptr->rd_info[dirty].skb) { 1194 if (!vptr->rd_info[dirty].skb) {
1221 ret = velocity_alloc_rx_buf(vptr, dirty); 1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1222 if (ret < 0)
1223 break; 1196 break;
1224 } 1197 }
1225 done++; 1198 done++;
@@ -1229,10 +1202,14 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1229 if (done) { 1202 if (done) {
1230 vptr->rd_dirty = dirty; 1203 vptr->rd_dirty = dirty;
1231 vptr->rd_filled += done; 1204 vptr->rd_filled += done;
1232 velocity_give_many_rx_descs(vptr);
1233 } 1205 }
1234 1206
1235 return ret; 1207 return done;
1208}
1209
1210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1211{
1212 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1236} 1213}
1237 1214
1238/** 1215/**
@@ -1245,25 +1222,24 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1245 1222
1246static int velocity_init_rd_ring(struct velocity_info *vptr) 1223static int velocity_init_rd_ring(struct velocity_info *vptr)
1247{ 1224{
1248 int ret; 1225 int ret = -ENOMEM;
1249 int mtu = vptr->dev->mtu;
1250
1251 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1252 1226
1253 vptr->rd_info = kcalloc(vptr->options.numrx, 1227 vptr->rd_info = kcalloc(vptr->options.numrx,
1254 sizeof(struct velocity_rd_info), GFP_KERNEL); 1228 sizeof(struct velocity_rd_info), GFP_KERNEL);
1255 if (!vptr->rd_info) 1229 if (!vptr->rd_info)
1256 return -ENOMEM; 1230 goto out;
1257 1231
1258 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1232 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
1259 1233
1260 ret = velocity_rx_refill(vptr); 1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1261 if (ret < 0) {
1262 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1263 "%s: failed to allocate RX buffer.\n", vptr->dev->name); 1236 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1264 velocity_free_rd_ring(vptr); 1237 velocity_free_rd_ring(vptr);
1238 goto out;
1265 } 1239 }
1266 1240
1241 ret = 0;
1242out:
1267 return ret; 1243 return ret;
1268} 1244}
1269 1245
@@ -1313,10 +1289,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1313 1289
1314static int velocity_init_td_ring(struct velocity_info *vptr) 1290static int velocity_init_td_ring(struct velocity_info *vptr)
1315{ 1291{
1316 int i, j;
1317 dma_addr_t curr; 1292 dma_addr_t curr;
1318 struct tx_desc *td; 1293 unsigned int j;
1319 struct velocity_td_info *td_info;
1320 1294
1321 /* Init the TD ring entries */ 1295 /* Init the TD ring entries */
1322 for (j = 0; j < vptr->num_txq; j++) { 1296 for (j = 0; j < vptr->num_txq; j++) {
@@ -1331,14 +1305,6 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1331 return -ENOMEM; 1305 return -ENOMEM;
1332 } 1306 }
1333 1307
1334 for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
1335 td = &(vptr->td_rings[j][i]);
1336 td_info = &(vptr->td_infos[j][i]);
1337 td_info->buf = vptr->tx_bufs +
1338 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1339 td_info->buf_dma = vptr->tx_bufs_dma +
1340 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1341 }
1342 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1308 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
1343 } 1309 }
1344 return 0; 1310 return 0;
@@ -1448,10 +1414,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1448 1414
1449 vptr->rd_curr = rd_curr; 1415 vptr->rd_curr = rd_curr;
1450 1416
1451 if (works > 0 && velocity_rx_refill(vptr) < 0) { 1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1452 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1418 velocity_give_many_rx_descs(vptr);
1453 "%s: rx buf allocation failure\n", vptr->dev->name);
1454 }
1455 1419
1456 VAR_USED(stats); 1420 VAR_USED(stats);
1457 return works; 1421 return works;
@@ -1867,7 +1831,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1867 /* 1831 /*
1868 * Don't unmap the pre-allocated tx_bufs 1832 * Don't unmap the pre-allocated tx_bufs
1869 */ 1833 */
1870 if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) { 1834 if (tdinfo->skb_dma) {
1871 1835
1872 for (i = 0; i < tdinfo->nskb_dma; i++) { 1836 for (i = 0; i < tdinfo->nskb_dma; i++) {
1873#ifdef VELOCITY_ZERO_COPY_SUPPORT 1837#ifdef VELOCITY_ZERO_COPY_SUPPORT
@@ -1898,6 +1862,8 @@ static int velocity_open(struct net_device *dev)
1898 struct velocity_info *vptr = netdev_priv(dev); 1862 struct velocity_info *vptr = netdev_priv(dev);
1899 int ret; 1863 int ret;
1900 1864
1865 velocity_set_rxbufsize(vptr, dev->mtu);
1866
1901 ret = velocity_init_rings(vptr); 1867 ret = velocity_init_rings(vptr);
1902 if (ret < 0) 1868 if (ret < 0)
1903 goto out; 1869 goto out;
@@ -1913,6 +1879,8 @@ static int velocity_open(struct net_device *dev)
1913 /* Ensure chip is running */ 1879 /* Ensure chip is running */
1914 pci_set_power_state(vptr->pdev, PCI_D0); 1880 pci_set_power_state(vptr->pdev, PCI_D0);
1915 1881
1882 velocity_give_many_rx_descs(vptr);
1883
1916 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1884 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1917 1885
1918 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, 1886 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
@@ -1977,6 +1945,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1977 1945
1978 dev->mtu = new_mtu; 1946 dev->mtu = new_mtu;
1979 1947
1948 velocity_set_rxbufsize(vptr, new_mtu);
1949
1980 ret = velocity_init_rd_ring(vptr); 1950 ret = velocity_init_rd_ring(vptr);
1981 if (ret < 0) 1951 if (ret < 0)
1982 goto out_unlock; 1952 goto out_unlock;
@@ -2063,9 +2033,19 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2063 struct tx_desc *td_ptr; 2033 struct tx_desc *td_ptr;
2064 struct velocity_td_info *tdinfo; 2034 struct velocity_td_info *tdinfo;
2065 unsigned long flags; 2035 unsigned long flags;
2066 int index;
2067 int pktlen = skb->len; 2036 int pktlen = skb->len;
2068 __le16 len = cpu_to_le16(pktlen); 2037 __le16 len;
2038 int index;
2039
2040
2041
2042 if (skb->len < ETH_ZLEN) {
2043 if (skb_padto(skb, ETH_ZLEN))
2044 goto out;
2045 pktlen = ETH_ZLEN;
2046 }
2047
2048 len = cpu_to_le16(pktlen);
2069 2049
2070#ifdef VELOCITY_ZERO_COPY_SUPPORT 2050#ifdef VELOCITY_ZERO_COPY_SUPPORT
2071 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { 2051 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
@@ -2083,23 +2063,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2083 td_ptr->tdesc1.TCR = TCR0_TIC; 2063 td_ptr->tdesc1.TCR = TCR0_TIC;
2084 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2064 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2085 2065
2086 /*
2087 * Pad short frames.
2088 */
2089 if (pktlen < ETH_ZLEN) {
2090 /* Cannot occur until ZC support */
2091 pktlen = ETH_ZLEN;
2092 len = cpu_to_le16(ETH_ZLEN);
2093 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2094 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
2095 tdinfo->skb = skb;
2096 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2097 td_ptr->tdesc0.len = len;
2098 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2099 td_ptr->td_buf[0].pa_high = 0;
2100 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
2101 tdinfo->nskb_dma = 1;
2102 } else
2103#ifdef VELOCITY_ZERO_COPY_SUPPORT 2066#ifdef VELOCITY_ZERO_COPY_SUPPORT
2104 if (skb_shinfo(skb)->nr_frags > 0) { 2067 if (skb_shinfo(skb)->nr_frags > 0) {
2105 int nfrags = skb_shinfo(skb)->nr_frags; 2068 int nfrags = skb_shinfo(skb)->nr_frags;
@@ -2191,7 +2154,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2191 } 2154 }
2192 dev->trans_start = jiffies; 2155 dev->trans_start = jiffies;
2193 spin_unlock_irqrestore(&vptr->lock, flags); 2156 spin_unlock_irqrestore(&vptr->lock, flags);
2194 return 0; 2157out:
2158 return NETDEV_TX_OK;
2195} 2159}
2196 2160
2197/** 2161/**
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 7387be4f428d..86446147284c 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -236,10 +236,8 @@ struct velocity_rd_info {
236 236
237struct velocity_td_info { 237struct velocity_td_info {
238 struct sk_buff *skb; 238 struct sk_buff *skb;
239 u8 *buf;
240 int nskb_dma; 239 int nskb_dma;
241 dma_addr_t skb_dma[7]; 240 dma_addr_t skb_dma[7];
242 dma_addr_t buf_dma;
243}; 241};
244 242
245enum velocity_owner { 243enum velocity_owner {
@@ -1506,9 +1504,6 @@ struct velocity_info {
1506 dma_addr_t rd_pool_dma; 1504 dma_addr_t rd_pool_dma;
1507 dma_addr_t td_pool_dma[TX_QUEUE_NO]; 1505 dma_addr_t td_pool_dma[TX_QUEUE_NO];
1508 1506
1509 dma_addr_t tx_bufs_dma;
1510 u8 *tx_bufs;
1511
1512 struct vlan_group *vlgrp; 1507 struct vlan_group *vlgrp;
1513 u8 ip_addr[4]; 1508 u8 ip_addr[4];
1514 enum chip_type chip_id; 1509 enum chip_type chip_id;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4452306d5328..c28d7cb2035b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -550,7 +550,8 @@ static struct virtio_device_id id_table[] = {
550}; 550};
551 551
552static unsigned int features[] = { 552static unsigned int features[] = {
553 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 553 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
554 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
554 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 555 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
555 VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, 556 VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
556}; 557};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 203c5504fe43..b54ec16dfbda 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -828,6 +828,19 @@ static inline void netif_napi_add(struct net_device *dev,
828 set_bit(NAPI_STATE_SCHED, &napi->state); 828 set_bit(NAPI_STATE_SCHED, &napi->state);
829} 829}
830 830
831/**
832 * netif_napi_del - remove a napi context
833 * @napi: napi context
834 *
835 * netif_napi_del() removes a napi context from the network device napi list
836 */
837static inline void netif_napi_del(struct napi_struct *napi)
838{
839#ifdef CONFIG_NETPOLL
840 list_del(&napi->dev_list);
841#endif
842}
843
831struct packet_type { 844struct packet_type {
832 __be16 type; /* This is really htons(ether_type). */ 845 __be16 type; /* This is really htons(ether_type). */
833 struct net_device *dev; /* NULL is wildcarded here */ 846 struct net_device *dev; /* NULL is wildcarded here */