aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/synclink_gt.c2
-rw-r--r--drivers/char/tlclk.c93
-rw-r--r--drivers/edac/Kconfig102
-rw-r--r--drivers/edac/Makefile18
-rw-r--r--drivers/edac/amd76x_edac.c356
-rw-r--r--drivers/edac/e752x_edac.c1071
-rw-r--r--drivers/edac/e7xxx_edac.c558
-rw-r--r--drivers/edac/edac_mc.c2209
-rw-r--r--drivers/edac/edac_mc.h448
-rw-r--r--drivers/edac/i82860_edac.c299
-rw-r--r--drivers/edac/i82875p_edac.c532
-rw-r--r--drivers/edac/r82600_edac.c407
-rw-r--r--drivers/md/kcopyd.c1
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/cassini.c36
-rw-r--r--drivers/net/e100.c142
-rw-r--r--drivers/net/e1000/e1000.h51
-rw-r--r--drivers/net/e1000/e1000_ethtool.c474
-rw-r--r--drivers/net/e1000/e1000_hw.c71
-rw-r--r--drivers/net/e1000/e1000_hw.h45
-rw-r--r--drivers/net/e1000/e1000_main.c1612
-rw-r--r--drivers/net/e1000/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/e1000_param.c58
-rw-r--r--drivers/net/mv643xx_eth.c680
-rw-r--r--drivers/net/skge.c20
-rw-r--r--drivers/net/sky2.c219
-rw-r--r--drivers/net/spider_net.c512
-rw-r--r--drivers/net/spider_net.h75
-rw-r--r--drivers/net/spider_net_ethtool.c19
-rw-r--r--drivers/net/wireless/airo.c21
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/hostap/Kconfig22
-rw-r--r--drivers/net/wireless/hostap/Makefile3
-rw-r--r--drivers/net/wireless/hostap/hostap.h37
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c36
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h13
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c12
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c60
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h4
-rw-r--r--drivers/net/wireless/ipw2100.c434
-rw-r--r--drivers/net/wireless/ipw2200.c14
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/scsi/ahci.c10
-rw-r--r--drivers/scsi/ata_piix.c3
-rw-r--r--drivers/scsi/libata-core.c73
-rw-r--r--drivers/scsi/sata_promise.c16
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/serial/8250.c13
-rw-r--r--drivers/serial/8250_pci.c10
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/at91_serial.c2
-rw-r--r--drivers/serial/suncore.c34
-rw-r--r--drivers/serial/sunsab.c7
67 files changed, 8856 insertions, 2159 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 283c089537bc..bddf431bbb72 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,4 +68,6 @@ source "drivers/infiniband/Kconfig"
68 68
69source "drivers/sn/Kconfig" 69source "drivers/sn/Kconfig"
70 70
71source "drivers/edac/Kconfig"
72
71endmenu 73endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7c45050ecd03..619dd964c51c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_PHONE) += telephony/
63obj-$(CONFIG_MD) += md/ 63obj-$(CONFIG_MD) += md/
64obj-$(CONFIG_BT) += bluetooth/ 64obj-$(CONFIG_BT) += bluetooth/
65obj-$(CONFIG_ISDN) += isdn/ 65obj-$(CONFIG_ISDN) += isdn/
66obj-$(CONFIG_EDAC) += edac/
66obj-$(CONFIG_MCA) += mca/ 67obj-$(CONFIG_MCA) += mca/
67obj-$(CONFIG_EISA) += eisa/ 68obj-$(CONFIG_EISA) += eisa/
68obj-$(CONFIG_CPU_FREQ) += cpufreq/ 69obj-$(CONFIG_CPU_FREQ) += cpufreq/
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 07c9be6a6bbf..a85a60a93deb 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -2630,7 +2630,7 @@ static int get_interface(struct slgt_info *info, int __user *if_mode)
2630static int set_interface(struct slgt_info *info, int if_mode) 2630static int set_interface(struct slgt_info *info, int if_mode)
2631{ 2631{
2632 unsigned long flags; 2632 unsigned long flags;
2633 unsigned char val; 2633 unsigned short val;
2634 2634
2635 DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode)); 2635 DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
2636 spin_lock_irqsave(&info->lock,flags); 2636 spin_lock_irqsave(&info->lock,flags);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index bc56df8a3474..4c272189cd42 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -34,7 +34,6 @@
34#include <linux/kernel.h> /* printk() */ 34#include <linux/kernel.h> /* printk() */
35#include <linux/fs.h> /* everything... */ 35#include <linux/fs.h> /* everything... */
36#include <linux/errno.h> /* error codes */ 36#include <linux/errno.h> /* error codes */
37#include <linux/delay.h> /* udelay */
38#include <linux/slab.h> 37#include <linux/slab.h>
39#include <linux/ioport.h> 38#include <linux/ioport.h>
40#include <linux/interrupt.h> 39#include <linux/interrupt.h>
@@ -156,6 +155,8 @@ This directory exports the following interfaces. There operation is
156documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4. 155documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
157alarms : 156alarms :
158current_ref : 157current_ref :
158received_ref_clk3a :
159received_ref_clk3b :
159enable_clk3a_output : 160enable_clk3a_output :
160enable_clk3b_output : 161enable_clk3b_output :
161enable_clka0_output : 162enable_clka0_output :
@@ -165,7 +166,7 @@ enable_clkb1_output :
165filter_select : 166filter_select :
166hardware_switching : 167hardware_switching :
167hardware_switching_mode : 168hardware_switching_mode :
168interrupt_switch : 169telclock_version :
169mode_select : 170mode_select :
170refalign : 171refalign :
171reset : 172reset :
@@ -173,7 +174,6 @@ select_amcb1_transmit_clock :
173select_amcb2_transmit_clock : 174select_amcb2_transmit_clock :
174select_redundant_clock : 175select_redundant_clock :
175select_ref_frequency : 176select_ref_frequency :
176test_mode :
177 177
178All sysfs interfaces are integers in hex format, i.e echo 99 > refalign 178All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
179has the same effect as echo 0x99 > refalign. 179has the same effect as echo 0x99 > refalign.
@@ -226,7 +226,7 @@ static int tlclk_release(struct inode *inode, struct file *filp)
226 return 0; 226 return 0;
227} 227}
228 228
229ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count, 229static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
230 loff_t *f_pos) 230 loff_t *f_pos)
231{ 231{
232 if (count < sizeof(struct tlclk_alarms)) 232 if (count < sizeof(struct tlclk_alarms))
@@ -242,7 +242,7 @@ ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
242 return sizeof(struct tlclk_alarms); 242 return sizeof(struct tlclk_alarms);
243} 243}
244 244
245ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count, 245static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
246 loff_t *f_pos) 246 loff_t *f_pos)
247{ 247{
248 return 0; 248 return 0;
@@ -278,21 +278,21 @@ static ssize_t show_current_ref(struct device *d,
278static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL); 278static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
279 279
280 280
281static ssize_t show_interrupt_switch(struct device *d, 281static ssize_t show_telclock_version(struct device *d,
282 struct device_attribute *attr, char *buf) 282 struct device_attribute *attr, char *buf)
283{ 283{
284 unsigned long ret_val; 284 unsigned long ret_val;
285 unsigned long flags; 285 unsigned long flags;
286 286
287 spin_lock_irqsave(&event_lock, flags); 287 spin_lock_irqsave(&event_lock, flags);
288 ret_val = inb(TLCLK_REG6); 288 ret_val = inb(TLCLK_REG5);
289 spin_unlock_irqrestore(&event_lock, flags); 289 spin_unlock_irqrestore(&event_lock, flags);
290 290
291 return sprintf(buf, "0x%lX\n", ret_val); 291 return sprintf(buf, "0x%lX\n", ret_val);
292} 292}
293 293
294static DEVICE_ATTR(interrupt_switch, S_IRUGO, 294static DEVICE_ATTR(telclock_version, S_IRUGO,
295 show_interrupt_switch, NULL); 295 show_telclock_version, NULL);
296 296
297static ssize_t show_alarms(struct device *d, 297static ssize_t show_alarms(struct device *d,
298 struct device_attribute *attr, char *buf) 298 struct device_attribute *attr, char *buf)
@@ -309,6 +309,50 @@ static ssize_t show_alarms(struct device *d,
309 309
310static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 310static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
311 311
312static ssize_t store_received_ref_clk3a(struct device *d,
313 struct device_attribute *attr, const char *buf, size_t count)
314{
315 unsigned long tmp;
316 unsigned char val;
317 unsigned long flags;
318
319 sscanf(buf, "%lX", &tmp);
320 dev_dbg(d, ": tmp = 0x%lX\n", tmp);
321
322 val = (unsigned char)tmp;
323 spin_lock_irqsave(&event_lock, flags);
324 SET_PORT_BITS(TLCLK_REG1, 0xef, val);
325 spin_unlock_irqrestore(&event_lock, flags);
326
327 return strnlen(buf, count);
328}
329
330static DEVICE_ATTR(received_ref_clk3a, S_IWUGO, NULL,
331 store_received_ref_clk3a);
332
333
334static ssize_t store_received_ref_clk3b(struct device *d,
335 struct device_attribute *attr, const char *buf, size_t count)
336{
337 unsigned long tmp;
338 unsigned char val;
339 unsigned long flags;
340
341 sscanf(buf, "%lX", &tmp);
342 dev_dbg(d, ": tmp = 0x%lX\n", tmp);
343
344 val = (unsigned char)tmp;
345 spin_lock_irqsave(&event_lock, flags);
346 SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1);
347 spin_unlock_irqrestore(&event_lock, flags);
348
349 return strnlen(buf, count);
350}
351
352static DEVICE_ATTR(received_ref_clk3b, S_IWUGO, NULL,
353 store_received_ref_clk3b);
354
355
312static ssize_t store_enable_clk3b_output(struct device *d, 356static ssize_t store_enable_clk3b_output(struct device *d,
313 struct device_attribute *attr, const char *buf, size_t count) 357 struct device_attribute *attr, const char *buf, size_t count)
314{ 358{
@@ -436,26 +480,6 @@ static ssize_t store_enable_clka0_output(struct device *d,
436static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL, 480static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL,
437 store_enable_clka0_output); 481 store_enable_clka0_output);
438 482
439static ssize_t store_test_mode(struct device *d,
440 struct device_attribute *attr, const char *buf, size_t count)
441{
442 unsigned long flags;
443 unsigned long tmp;
444 unsigned char val;
445
446 sscanf(buf, "%lX", &tmp);
447 dev_dbg(d, "tmp = 0x%lX\n", tmp);
448
449 val = (unsigned char)tmp;
450 spin_lock_irqsave(&event_lock, flags);
451 SET_PORT_BITS(TLCLK_REG4, 0xfd, 2);
452 spin_unlock_irqrestore(&event_lock, flags);
453
454 return strnlen(buf, count);
455}
456
457static DEVICE_ATTR(test_mode, S_IWUGO, NULL, store_test_mode);
458
459static ssize_t store_select_amcb2_transmit_clock(struct device *d, 483static ssize_t store_select_amcb2_transmit_clock(struct device *d,
460 struct device_attribute *attr, const char *buf, size_t count) 484 struct device_attribute *attr, const char *buf, size_t count)
461{ 485{
@@ -475,7 +499,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
475 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); 499 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
476 switch (val) { 500 switch (val) {
477 case CLK_8_592MHz: 501 case CLK_8_592MHz:
478 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); 502 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
479 break; 503 break;
480 case CLK_11_184MHz: 504 case CLK_11_184MHz:
481 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); 505 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
@@ -484,7 +508,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
484 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); 508 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
485 break; 509 break;
486 case CLK_44_736MHz: 510 case CLK_44_736MHz:
487 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); 511 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
488 break; 512 break;
489 } 513 }
490 } else 514 } else
@@ -653,9 +677,7 @@ static ssize_t store_refalign (struct device *d,
653 dev_dbg(d, "tmp = 0x%lX\n", tmp); 677 dev_dbg(d, "tmp = 0x%lX\n", tmp);
654 spin_lock_irqsave(&event_lock, flags); 678 spin_lock_irqsave(&event_lock, flags);
655 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); 679 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
656 udelay(2);
657 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08); 680 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
658 udelay(2);
659 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); 681 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
660 spin_unlock_irqrestore(&event_lock, flags); 682 spin_unlock_irqrestore(&event_lock, flags);
661 683
@@ -706,15 +728,16 @@ static DEVICE_ATTR(reset, S_IWUGO, NULL, store_reset);
706 728
707static struct attribute *tlclk_sysfs_entries[] = { 729static struct attribute *tlclk_sysfs_entries[] = {
708 &dev_attr_current_ref.attr, 730 &dev_attr_current_ref.attr,
709 &dev_attr_interrupt_switch.attr, 731 &dev_attr_telclock_version.attr,
710 &dev_attr_alarms.attr, 732 &dev_attr_alarms.attr,
733 &dev_attr_received_ref_clk3a.attr,
734 &dev_attr_received_ref_clk3b.attr,
711 &dev_attr_enable_clk3a_output.attr, 735 &dev_attr_enable_clk3a_output.attr,
712 &dev_attr_enable_clk3b_output.attr, 736 &dev_attr_enable_clk3b_output.attr,
713 &dev_attr_enable_clkb1_output.attr, 737 &dev_attr_enable_clkb1_output.attr,
714 &dev_attr_enable_clka1_output.attr, 738 &dev_attr_enable_clka1_output.attr,
715 &dev_attr_enable_clkb0_output.attr, 739 &dev_attr_enable_clkb0_output.attr,
716 &dev_attr_enable_clka0_output.attr, 740 &dev_attr_enable_clka0_output.attr,
717 &dev_attr_test_mode.attr,
718 &dev_attr_select_amcb1_transmit_clock.attr, 741 &dev_attr_select_amcb1_transmit_clock.attr,
719 &dev_attr_select_amcb2_transmit_clock.attr, 742 &dev_attr_select_amcb2_transmit_clock.attr,
720 &dev_attr_select_redundant_clock.attr, 743 &dev_attr_select_redundant_clock.attr,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644
index 000000000000..4819e7fc00dd
--- /dev/null
+++ b/drivers/edac/Kconfig
@@ -0,0 +1,102 @@
1#
2# EDAC Kconfig
3# Copyright (c) 2003 Linux Networx
4# Licensed and distributed under the GPL
5#
6# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
7#
8
9menu 'EDAC - error detection and reporting (RAS)'
10
11config EDAC
12 tristate "EDAC core system error reporting"
13 depends on X86
14 default y
15 help
16 EDAC is designed to report errors in the core system.
17 These are low-level errors that are reported in the CPU or
18 supporting chipset: memory errors, cache errors, PCI errors,
19 thermal throttling, etc.. If unsure, select 'Y'.
20
21
22comment "Reporting subsystems"
23 depends on EDAC
24
25config EDAC_DEBUG
26 bool "Debugging"
27 depends on EDAC
28 help
29 This turns on debugging information for the entire EDAC
30 sub-system. You can insert module with "debug_level=x", current
31 there're four debug levels (x=0,1,2,3 from low to high).
32 Usually you should select 'N'.
33
34config EDAC_MM_EDAC
35 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
36 depends on EDAC
37 default y
38 help
39 Some systems are able to detect and correct errors in main
40 memory. EDAC can report statistics on memory error
41 detection and correction (EDAC - or commonly referred to ECC
42 errors). EDAC will also try to decode where these errors
43 occurred so that a particular failing memory module can be
44 replaced. If unsure, select 'Y'.
45
46
47config EDAC_AMD76X
48 tristate "AMD 76x (760, 762, 768)"
49 depends on EDAC_MM_EDAC && PCI
50 help
51 Support for error detection and correction on the AMD 76x
52 series of chipsets used with the Athlon processor.
53
54config EDAC_E7XXX
55 tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
56 depends on EDAC_MM_EDAC && PCI
57 help
58 Support for error detection and correction on the Intel
59 E7205, E7500, E7501 and E7505 server chipsets.
60
61config EDAC_E752X
62 tristate "Intel e752x (e7520, e7525, e7320)"
63 depends on EDAC_MM_EDAC && PCI
64 help
65 Support for error detection and correction on the Intel
66 E7520, E7525, E7320 server chipsets.
67
68config EDAC_I82875P
69 tristate "Intel 82875p (D82875P, E7210)"
70 depends on EDAC_MM_EDAC && PCI
71 help
72 Support for error detection and correction on the Intel
73 DP82785P and E7210 server chipsets.
74
75config EDAC_I82860
76 tristate "Intel 82860"
77 depends on EDAC_MM_EDAC && PCI
78 help
79 Support for error detection and correction on the Intel
80 82860 chipset.
81
82config EDAC_R82600
83 tristate "Radisys 82600 embedded chipset"
84 depends on EDAC_MM_EDAC
85 help
86 Support for error detection and correction on the Radisys
87 82600 embedded chipset.
88
89choice
90 prompt "Error detecting method"
91 depends on EDAC
92 default EDAC_POLL
93
94config EDAC_POLL
95 bool "Poll for errors"
96 depends on EDAC
97 help
98 Poll the chipset periodically to detect errors.
99
100endchoice
101
102endmenu
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644
index 000000000000..93137fdab4b3
--- /dev/null
+++ b/drivers/edac/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the Linux kernel EDAC drivers.
3#
4# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
5# This file may be distributed under the terms of the
6# GNU General Public License.
7#
8# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
9
10
11obj-$(CONFIG_EDAC_MM_EDAC) += edac_mc.o
12obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
13obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
14obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
15obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
16obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
17obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
18
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644
index 000000000000..2fcc8120b53c
--- /dev/null
+++ b/drivers/edac/amd76x_edac.c
@@ -0,0 +1,356 @@
1/*
2 * AMD 76x Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
12 *
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/init.h>
19
20#include <linux/pci.h>
21#include <linux/pci_ids.h>
22
23#include <linux/slab.h>
24
25#include "edac_mc.h"
26
27
28#define AMD76X_NR_CSROWS 8
29#define AMD76X_NR_CHANS 1
30#define AMD76X_NR_DIMMS 4
31
32
33/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
34#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
35 *
36 * 31:16 reserved
37 * 15:14 SERR enabled: x1=ue 1x=ce
38 * 13 reserved
39 * 12 diag: disabled, enabled
40 * 11:10 mode: dis, EC, ECC, ECC+scrub
41 * 9:8 status: x1=ue 1x=ce
42 * 7:4 UE cs row
43 * 3:0 CE cs row
44 */
45#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
46 *
47 * 31:26 clock disable 5 - 0
48 * 25 SDRAM init
49 * 24 reserved
50 * 23 mode register service
51 * 22:21 suspend to RAM
52 * 20 burst refresh enable
53 * 19 refresh disable
54 * 18 reserved
55 * 17:16 cycles-per-refresh
56 * 15:8 reserved
57 * 7:0 x4 mode enable 7 - 0
58 */
59#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
60 *
61 * 31:23 chip-select base
62 * 22:16 reserved
63 * 15:7 chip-select mask
64 * 6:3 reserved
65 * 2:1 address mode
66 * 0 chip-select enable
67 */
68
69
70struct amd76x_error_info {
71 u32 ecc_mode_status;
72};
73
74
75enum amd76x_chips {
76 AMD761 = 0,
77 AMD762
78};
79
80
81struct amd76x_dev_info {
82 const char *ctl_name;
83};
84
85
86static const struct amd76x_dev_info amd76x_devs[] = {
87 [AMD761] = {.ctl_name = "AMD761"},
88 [AMD762] = {.ctl_name = "AMD762"},
89};
90
91
92/**
93 * amd76x_get_error_info - fetch error information
94 * @mci: Memory controller
95 * @info: Info to fill in
96 *
97 * Fetch and store the AMD76x ECC status. Clear pending status
98 * on the chip so that further errors will be reported
99 */
100
101static void amd76x_get_error_info (struct mem_ctl_info *mci,
102 struct amd76x_error_info *info)
103{
104 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
105 &info->ecc_mode_status);
106
107 if (info->ecc_mode_status & BIT(8))
108 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
109 (u32) BIT(8), (u32) BIT(8));
110
111 if (info->ecc_mode_status & BIT(9))
112 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
113 (u32) BIT(9), (u32) BIT(9));
114}
115
116
117/**
118 * amd76x_process_error_info - Error check
119 * @mci: Memory controller
120 * @info: Previously fetched information from chip
121 * @handle_errors: 1 if we should do recovery
122 *
123 * Process the chip state and decide if an error has occurred.
124 * A return of 1 indicates an error. Also if handle_errors is true
125 * then attempt to handle and clean up after the error
126 */
127
128static int amd76x_process_error_info (struct mem_ctl_info *mci,
129 struct amd76x_error_info *info, int handle_errors)
130{
131 int error_found;
132 u32 row;
133
134 error_found = 0;
135
136 /*
137 * Check for an uncorrectable error
138 */
139 if (info->ecc_mode_status & BIT(8)) {
140 error_found = 1;
141
142 if (handle_errors) {
143 row = (info->ecc_mode_status >> 4) & 0xf;
144 edac_mc_handle_ue(mci,
145 mci->csrows[row].first_page, 0, row,
146 mci->ctl_name);
147 }
148 }
149
150 /*
151 * Check for a correctable error
152 */
153 if (info->ecc_mode_status & BIT(9)) {
154 error_found = 1;
155
156 if (handle_errors) {
157 row = info->ecc_mode_status & 0xf;
158 edac_mc_handle_ce(mci,
159 mci->csrows[row].first_page, 0, 0, row, 0,
160 mci->ctl_name);
161 }
162 }
163 return error_found;
164}
165
166/**
167 * amd76x_check - Poll the controller
168 * @mci: Memory controller
169 *
170 * Called by the poll handlers this function reads the status
171 * from the controller and checks for errors.
172 */
173
174static void amd76x_check(struct mem_ctl_info *mci)
175{
176 struct amd76x_error_info info;
177 debugf3("MC: " __FILE__ ": %s()\n", __func__);
178 amd76x_get_error_info(mci, &info);
179 amd76x_process_error_info(mci, &info, 1);
180}
181
182
183/**
184 * amd76x_probe1 - Perform set up for detected device
185 * @pdev; PCI device detected
186 * @dev_idx: Device type index
187 *
188 * We have found an AMD76x and now need to set up the memory
189 * controller status reporting. We configure and set up the
190 * memory controller reporting and claim the device.
191 */
192
193static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
194{
195 int rc = -ENODEV;
196 int index;
197 struct mem_ctl_info *mci = NULL;
198 enum edac_type ems_modes[] = {
199 EDAC_NONE,
200 EDAC_EC,
201 EDAC_SECDED,
202 EDAC_SECDED
203 };
204 u32 ems;
205 u32 ems_mode;
206
207 debugf0("MC: " __FILE__ ": %s()\n", __func__);
208
209 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
210 ems_mode = (ems >> 10) & 0x3;
211
212 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
213
214 if (mci == NULL) {
215 rc = -ENOMEM;
216 goto fail;
217 }
218
219 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
220
221 mci->pdev = pci_dev_get(pdev);
222 mci->mtype_cap = MEM_FLAG_RDDR;
223
224 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
225 mci->edac_cap = ems_mode ?
226 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
227
228 mci->mod_name = BS_MOD_STR;
229 mci->mod_ver = "$Revision: 1.4.2.5 $";
230 mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
231 mci->edac_check = amd76x_check;
232 mci->ctl_page_to_phys = NULL;
233
234 for (index = 0; index < mci->nr_csrows; index++) {
235 struct csrow_info *csrow = &mci->csrows[index];
236 u32 mba;
237 u32 mba_base;
238 u32 mba_mask;
239 u32 dms;
240
241 /* find the DRAM Chip Select Base address and mask */
242 pci_read_config_dword(mci->pdev,
243 AMD76X_MEM_BASE_ADDR + (index * 4),
244 &mba);
245
246 if (!(mba & BIT(0)))
247 continue;
248
249 mba_base = mba & 0xff800000UL;
250 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
251
252 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
253 &dms);
254
255 csrow->first_page = mba_base >> PAGE_SHIFT;
256 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
257 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
258 csrow->page_mask = mba_mask >> PAGE_SHIFT;
259 csrow->grain = csrow->nr_pages << PAGE_SHIFT;
260 csrow->mtype = MEM_RDDR;
261 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
262 csrow->edac_mode = ems_modes[ems_mode];
263 }
264
265 /* clear counters */
266 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
267 (u32) (0x3 << 8));
268
269 if (edac_mc_add_mc(mci)) {
270 debugf3("MC: " __FILE__
271 ": %s(): failed edac_mc_add_mc()\n", __func__);
272 goto fail;
273 }
274
275 /* get this far and it's successful */
276 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
277 return 0;
278
279fail:
280 if (mci) {
281 if(mci->pdev)
282 pci_dev_put(mci->pdev);
283 edac_mc_free(mci);
284 }
285 return rc;
286}
287
288/* returns count (>= 0), or negative on error */
289static int __devinit amd76x_init_one(struct pci_dev *pdev,
290 const struct pci_device_id *ent)
291{
292 debugf0("MC: " __FILE__ ": %s()\n", __func__);
293
294 /* don't need to call pci_device_enable() */
295 return amd76x_probe1(pdev, ent->driver_data);
296}
297
298
299/**
300 * amd76x_remove_one - driver shutdown
301 * @pdev: PCI device being handed back
302 *
303 * Called when the driver is unloaded. Find the matching mci
304 * structure for the device then delete the mci and free the
305 * resources.
306 */
307
308static void __devexit amd76x_remove_one(struct pci_dev *pdev)
309{
310 struct mem_ctl_info *mci;
311
312 debugf0(__FILE__ ": %s()\n", __func__);
313
314 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
315 return;
316 if (edac_mc_del_mc(mci))
317 return;
318 pci_dev_put(mci->pdev);
319 edac_mc_free(mci);
320}
321
322
323static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
324 {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 AMD762},
326 {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 AMD761},
328 {0,} /* 0 terminated list. */
329};
330
331MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
332
333
334static struct pci_driver amd76x_driver = {
335 .name = BS_MOD_STR,
336 .probe = amd76x_init_one,
337 .remove = __devexit_p(amd76x_remove_one),
338 .id_table = amd76x_pci_tbl,
339};
340
341static int __init amd76x_init(void)
342{
343 return pci_register_driver(&amd76x_driver);
344}
345
346static void __exit amd76x_exit(void)
347{
348 pci_unregister_driver(&amd76x_driver);
349}
350
351module_init(amd76x_init);
352module_exit(amd76x_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
356MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644
index 000000000000..770a5a633079
--- /dev/null
+++ b/drivers/edac/e752x_edac.c
@@ -0,0 +1,1071 @@
1/*
2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e752x_chips" below for supported chipsets
8 *
9 * Written by Tom Zimmerman
10 *
11 * Contributors:
12 * Thayne Harbaugh at realmsys.com (?)
13 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com
15 *
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
17 *
18 */
19
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/init.h>
24
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27
28#include <linux/slab.h>
29
30#include "edac_mc.h"
31
32
33#ifndef PCI_DEVICE_ID_INTEL_7520_0
34#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
35#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
36
37#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
38#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
39#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
40
41#ifndef PCI_DEVICE_ID_INTEL_7525_0
42#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
43#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
44
45#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
46#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
47#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
48
49#ifndef PCI_DEVICE_ID_INTEL_7320_0
50#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
51#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
52
53#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
54#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
55#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
56
57#define E752X_NR_CSROWS 8 /* number of csrows */
58
59
60/* E752X register addresses - device 0 function 0 */
61#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
62#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
63 /*
64 * 31:30 Device width row 7
65 * 01=x8 10=x4 11=x8 DDR2
66 * 27:26 Device width row 6
67 * 23:22 Device width row 5
68 * 19:20 Device width row 4
69 * 15:14 Device width row 3
70 * 11:10 Device width row 2
71 * 7:6 Device width row 1
72 * 3:2 Device width row 0
73 */
74#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
75 /* FIXME:IS THIS RIGHT? */
76 /*
77 * 22 Number channels 0=1,1=2
78 * 19:18 DRB Granularity 32/64MB
79 */
80#define E752X_DRM 0x80 /* Dimm mapping register */
81#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
82 /*
83 * 14:12 1 single A, 2 single B, 3 dual
84 */
85#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
86#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
87#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
88#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
89
90/* E752X register addresses - device 0 function 1 */
91#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
92#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
93#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
94#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
95#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
96#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
97#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
98#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
99#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
100#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
101#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
102#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
103#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
104#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */
105#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
106#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
107#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
108#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
109#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
110#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
111 /* error address register (32b) */
112 /*
113 * 31 Reserved
114 * 30:2 CE address (64 byte block 34:6)
115 * 1 Reserved
116 * 0 HiLoCS
117 */
118#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
119 /* error address register (32b) */
120 /*
121 * 31 Reserved
122 * 30:2 CE address (64 byte block 34:6)
123 * 1 Reserved
124 * 0 HiLoCS
125 */
126#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
127 /* error address register (32b) */
128 /*
129 * 31 Reserved
130 * 30:2 CE address (64 byte block 34:6)
131 * 1 Reserved
132 * 0 HiLoCS
133 */
134#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */
135 /* error address register (32b) */
136 /*
137 * 31 Reserved
138 * 30:2 CE address (64 byte block 34:6)
139 * 1 Reserved
140 * 0 HiLoCS
141 */
142#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
143 /* error syndrome register (16b) */
144#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
145 /* error syndrome register (16b) */
146#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
147
148/* ICH5R register addresses - device 30 function 0 */
149#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
150#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
151#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
152
153enum e752x_chips {
154 E7520 = 0,
155 E7525 = 1,
156 E7320 = 2
157};
158
159
160struct e752x_pvt {
161 struct pci_dev *bridge_ck;
162 struct pci_dev *dev_d0f0;
163 struct pci_dev *dev_d0f1;
164 u32 tolm;
165 u32 remapbase;
166 u32 remaplimit;
167 int mc_symmetric;
168 u8 map[8];
169 int map_type;
170 const struct e752x_dev_info *dev_info;
171};
172
173
174struct e752x_dev_info {
175 u16 err_dev;
176 const char *ctl_name;
177};
178
179struct e752x_error_info {
180 u32 ferr_global;
181 u32 nerr_global;
182 u8 hi_ferr;
183 u8 hi_nerr;
184 u16 sysbus_ferr;
185 u16 sysbus_nerr;
186 u8 buf_ferr;
187 u8 buf_nerr;
188 u16 dram_ferr;
189 u16 dram_nerr;
190 u32 dram_sec1_add;
191 u32 dram_sec2_add;
192 u16 dram_sec1_syndrome;
193 u16 dram_sec2_syndrome;
194 u32 dram_ded_add;
195 u32 dram_scrb_add;
196 u32 dram_retr_add;
197};
198
199static const struct e752x_dev_info e752x_devs[] = {
200 [E7520] = {
201 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
202 .ctl_name = "E7520"},
203 [E7525] = {
204 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
205 .ctl_name = "E7525"},
206 [E7320] = {
207 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
208 .ctl_name = "E7320"},
209};
210
211
212static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
213 unsigned long page)
214{
215 u32 remap;
216 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
217
218 debugf3("MC: " __FILE__ ": %s()\n", __func__);
219
220 if (page < pvt->tolm)
221 return page;
222 if ((page >= 0x100000) && (page < pvt->remapbase))
223 return page;
224 remap = (page - pvt->tolm) + pvt->remapbase;
225 if (remap < pvt->remaplimit)
226 return remap;
227 printk(KERN_ERR "Invalid page %lx - out of range\n", page);
228 return pvt->tolm - 1;
229}
230
231static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
232 u32 sec1_add, u16 sec1_syndrome)
233{
234 u32 page;
235 int row;
236 int channel;
237 int i;
238 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
239
240 debugf3("MC: " __FILE__ ": %s()\n", __func__);
241
242 /* convert the addr to 4k page */
243 page = sec1_add >> (PAGE_SHIFT - 4);
244
245 /* FIXME - check for -1 */
246 if (pvt->mc_symmetric) {
247 /* chip select are bits 14 & 13 */
248 row = ((page >> 1) & 3);
249 printk(KERN_WARNING
250 "Test row %d Table %d %d %d %d %d %d %d %d\n",
251 row, pvt->map[0], pvt->map[1], pvt->map[2],
252 pvt->map[3], pvt->map[4], pvt->map[5],
253 pvt->map[6], pvt->map[7]);
254
255 /* test for channel remapping */
256 for (i = 0; i < 8; i++) {
257 if (pvt->map[i] == row)
258 break;
259 }
260 printk(KERN_WARNING "Test computed row %d\n", i);
261 if (i < 8)
262 row = i;
263 else
264 printk(KERN_WARNING
265 "MC%d: row %d not found in remap table\n",
266 mci->mc_idx, row);
267 } else
268 row = edac_mc_find_csrow_by_page(mci, page);
269 /* 0 = channel A, 1 = channel B */
270 channel = !(error_one & 1);
271
272 if (!pvt->map_type)
273 row = 7 - row;
274 edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
275 "e752x CE");
276}
277
278
279static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
280 u32 sec1_add, u16 sec1_syndrome, int *error_found,
281 int handle_error)
282{
283 *error_found = 1;
284
285 if (handle_error)
286 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
287}
288
289static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add,
290 u32 scrb_add)
291{
292 u32 error_2b, block_page;
293 int row;
294 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
295
296 debugf3("MC: " __FILE__ ": %s()\n", __func__);
297
298 if (error_one & 0x0202) {
299 error_2b = ded_add;
300 /* convert to 4k address */
301 block_page = error_2b >> (PAGE_SHIFT - 4);
302 row = pvt->mc_symmetric ?
303 /* chip select are bits 14 & 13 */
304 ((block_page >> 1) & 3) :
305 edac_mc_find_csrow_by_page(mci, block_page);
306 edac_mc_handle_ue(mci, block_page, 0, row,
307 "e752x UE from Read");
308 }
309 if (error_one & 0x0404) {
310 error_2b = scrb_add;
311 /* convert to 4k address */
312 block_page = error_2b >> (PAGE_SHIFT - 4);
313 row = pvt->mc_symmetric ?
314 /* chip select are bits 14 & 13 */
315 ((block_page >> 1) & 3) :
316 edac_mc_find_csrow_by_page(mci, block_page);
317 edac_mc_handle_ue(mci, block_page, 0, row,
318 "e752x UE from Scruber");
319 }
320}
321
322static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
323 u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
324{
325 *error_found = 1;
326
327 if (handle_error)
328 do_process_ue(mci, error_one, ded_add, scrb_add);
329}
330
331static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
332 int *error_found, int handle_error)
333{
334 *error_found = 1;
335
336 if (!handle_error)
337 return;
338
339 debugf3("MC: " __FILE__ ": %s()\n", __func__);
340 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
341}
342
343static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
344 u32 retry_add)
345{
346 u32 error_1b, page;
347 int row;
348 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
349
350 error_1b = retry_add;
351 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
352 row = pvt->mc_symmetric ?
353 ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
354 edac_mc_find_csrow_by_page(mci, page);
355 printk(KERN_WARNING
356 "MC%d: CE page 0x%lx, row %d : Memory read retry\n",
357 mci->mc_idx, (long unsigned int) page, row);
358}
359
360static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
361 u32 retry_add, int *error_found, int handle_error)
362{
363 *error_found = 1;
364
365 if (handle_error)
366 do_process_ded_retry(mci, error, retry_add);
367}
368
369static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
370 int *error_found, int handle_error)
371{
372 *error_found = 1;
373
374 if (handle_error)
375 printk(KERN_WARNING "MC%d: Memory threshold CE\n",
376 mci->mc_idx);
377}
378
379static char *global_message[11] = {
380 "PCI Express C1", "PCI Express C", "PCI Express B1",
381 "PCI Express B", "PCI Express A1", "PCI Express A",
382 "DMA Controler", "HUB Interface", "System Bus",
383 "DRAM Controler", "Internal Buffer"
384};
385
386static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
387
388static void do_global_error(int fatal, u32 errors)
389{
390 int i;
391
392 for (i = 0; i < 11; i++) {
393 if (errors & (1 << i))
394 printk(KERN_WARNING "%sError %s\n",
395 fatal_message[fatal], global_message[i]);
396 }
397}
398
399static inline void global_error(int fatal, u32 errors, int *error_found,
400 int handle_error)
401{
402 *error_found = 1;
403
404 if (handle_error)
405 do_global_error(fatal, errors);
406}
407
408static char *hub_message[7] = {
409 "HI Address or Command Parity", "HI Illegal Access",
410 "HI Internal Parity", "Out of Range Access",
411 "HI Data Parity", "Enhanced Config Access",
412 "Hub Interface Target Abort"
413};
414
415static void do_hub_error(int fatal, u8 errors)
416{
417 int i;
418
419 for (i = 0; i < 7; i++) {
420 if (errors & (1 << i))
421 printk(KERN_WARNING "%sError %s\n",
422 fatal_message[fatal], hub_message[i]);
423 }
424}
425
426static inline void hub_error(int fatal, u8 errors, int *error_found,
427 int handle_error)
428{
429 *error_found = 1;
430
431 if (handle_error)
432 do_hub_error(fatal, errors);
433}
434
435static char *membuf_message[4] = {
436 "Internal PMWB to DRAM parity",
437 "Internal PMWB to System Bus Parity",
438 "Internal System Bus or IO to PMWB Parity",
439 "Internal DRAM to PMWB Parity"
440};
441
442static void do_membuf_error(u8 errors)
443{
444 int i;
445
446 for (i = 0; i < 4; i++) {
447 if (errors & (1 << i))
448 printk(KERN_WARNING "Non-Fatal Error %s\n",
449 membuf_message[i]);
450 }
451}
452
453static inline void membuf_error(u8 errors, int *error_found, int handle_error)
454{
455 *error_found = 1;
456
457 if (handle_error)
458 do_membuf_error(errors);
459}
460
461#if 0
462char *sysbus_message[10] = {
463 "Addr or Request Parity",
464 "Data Strobe Glitch",
465 "Addr Strobe Glitch",
466 "Data Parity",
467 "Addr Above TOM",
468 "Non DRAM Lock Error",
469 "MCERR", "BINIT",
470 "Memory Parity",
471 "IO Subsystem Parity"
472};
473#endif /* 0 */
474
475static void do_sysbus_error(int fatal, u32 errors)
476{
477 int i;
478
479 for (i = 0; i < 10; i++) {
480 if (errors & (1 << i))
481 printk(KERN_WARNING "%sError System Bus %s\n",
482 fatal_message[fatal], global_message[i]);
483 }
484}
485
486static inline void sysbus_error(int fatal, u32 errors, int *error_found,
487 int handle_error)
488{
489 *error_found = 1;
490
491 if (handle_error)
492 do_sysbus_error(fatal, errors);
493}
494
495static void e752x_check_hub_interface (struct e752x_error_info *info,
496 int *error_found, int handle_error)
497{
498 u8 stat8;
499
500 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
501 stat8 = info->hi_ferr;
502 if(stat8 & 0x7f) { /* Error, so process */
503 stat8 &= 0x7f;
504 if(stat8 & 0x2b)
505 hub_error(1, stat8 & 0x2b, error_found, handle_error);
506 if(stat8 & 0x54)
507 hub_error(0, stat8 & 0x54, error_found, handle_error);
508 }
509 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
510 stat8 = info->hi_nerr;
511 if(stat8 & 0x7f) { /* Error, so process */
512 stat8 &= 0x7f;
513 if (stat8 & 0x2b)
514 hub_error(1, stat8 & 0x2b, error_found, handle_error);
515 if(stat8 & 0x54)
516 hub_error(0, stat8 & 0x54, error_found, handle_error);
517 }
518}
519
520static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
521 int handle_error)
522{
523 u32 stat32, error32;
524
525 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
526 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
527
528 if (stat32 == 0)
529 return; /* no errors */
530
531 error32 = (stat32 >> 16) & 0x3ff;
532 stat32 = stat32 & 0x3ff;
533 if(stat32 & 0x083)
534 sysbus_error(1, stat32 & 0x083, error_found, handle_error);
535 if(stat32 & 0x37c)
536 sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
537 if(error32 & 0x083)
538 sysbus_error(1, error32 & 0x083, error_found, handle_error);
539 if(error32 & 0x37c)
540 sysbus_error(0, error32 & 0x37c, error_found, handle_error);
541}
542
543static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
544 int handle_error)
545{
546 u8 stat8;
547
548 stat8 = info->buf_ferr;
549 if (stat8 & 0x0f) { /* Error, so process */
550 stat8 &= 0x0f;
551 membuf_error(stat8, error_found, handle_error);
552 }
553 stat8 = info->buf_nerr;
554 if (stat8 & 0x0f) { /* Error, so process */
555 stat8 &= 0x0f;
556 membuf_error(stat8, error_found, handle_error);
557 }
558}
559
560static void e752x_check_dram (struct mem_ctl_info *mci,
561 struct e752x_error_info *info, int *error_found, int handle_error)
562{
563 u16 error_one, error_next;
564
565 error_one = info->dram_ferr;
566 error_next = info->dram_nerr;
567
568 /* decode and report errors */
569 if(error_one & 0x0101) /* check first error correctable */
570 process_ce(mci, error_one, info->dram_sec1_add,
571 info->dram_sec1_syndrome, error_found,
572 handle_error);
573
574 if(error_next & 0x0101) /* check next error correctable */
575 process_ce(mci, error_next, info->dram_sec2_add,
576 info->dram_sec2_syndrome, error_found,
577 handle_error);
578
579 if(error_one & 0x4040)
580 process_ue_no_info_wr(mci, error_found, handle_error);
581
582 if(error_next & 0x4040)
583 process_ue_no_info_wr(mci, error_found, handle_error);
584
585 if(error_one & 0x2020)
586 process_ded_retry(mci, error_one, info->dram_retr_add,
587 error_found, handle_error);
588
589 if(error_next & 0x2020)
590 process_ded_retry(mci, error_next, info->dram_retr_add,
591 error_found, handle_error);
592
593 if(error_one & 0x0808)
594 process_threshold_ce(mci, error_one, error_found,
595 handle_error);
596
597 if(error_next & 0x0808)
598 process_threshold_ce(mci, error_next, error_found,
599 handle_error);
600
601 if(error_one & 0x0606)
602 process_ue(mci, error_one, info->dram_ded_add,
603 info->dram_scrb_add, error_found, handle_error);
604
605 if(error_next & 0x0606)
606 process_ue(mci, error_next, info->dram_ded_add,
607 info->dram_scrb_add, error_found, handle_error);
608}
609
610static void e752x_get_error_info (struct mem_ctl_info *mci,
611 struct e752x_error_info *info)
612{
613 struct pci_dev *dev;
614 struct e752x_pvt *pvt;
615
616 memset(info, 0, sizeof(*info));
617 pvt = (struct e752x_pvt *) mci->pvt_info;
618 dev = pvt->dev_d0f1;
619
620 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
621
622 if (info->ferr_global) {
623 pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
624 pci_read_config_word(dev, E752X_SYSBUS_FERR,
625 &info->sysbus_ferr);
626 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
627 pci_read_config_word(dev, E752X_DRAM_FERR,
628 &info->dram_ferr);
629 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
630 &info->dram_sec1_add);
631 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
632 &info->dram_sec1_syndrome);
633 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
634 &info->dram_ded_add);
635 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
636 &info->dram_scrb_add);
637 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
638 &info->dram_retr_add);
639
640 if (info->hi_ferr & 0x7f)
641 pci_write_config_byte(dev, E752X_HI_FERR,
642 info->hi_ferr);
643
644 if (info->sysbus_ferr)
645 pci_write_config_word(dev, E752X_SYSBUS_FERR,
646 info->sysbus_ferr);
647
648 if (info->buf_ferr & 0x0f)
649 pci_write_config_byte(dev, E752X_BUF_FERR,
650 info->buf_ferr);
651
652 if (info->dram_ferr)
653 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
654 info->dram_ferr, info->dram_ferr);
655
656 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
657 info->ferr_global);
658 }
659
660 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
661
662 if (info->nerr_global) {
663 pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
664 pci_read_config_word(dev, E752X_SYSBUS_NERR,
665 &info->sysbus_nerr);
666 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
667 pci_read_config_word(dev, E752X_DRAM_NERR,
668 &info->dram_nerr);
669 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
670 &info->dram_sec2_add);
671 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
672 &info->dram_sec2_syndrome);
673
674 if (info->hi_nerr & 0x7f)
675 pci_write_config_byte(dev, E752X_HI_NERR,
676 info->hi_nerr);
677
678 if (info->sysbus_nerr)
679 pci_write_config_word(dev, E752X_SYSBUS_NERR,
680 info->sysbus_nerr);
681
682 if (info->buf_nerr & 0x0f)
683 pci_write_config_byte(dev, E752X_BUF_NERR,
684 info->buf_nerr);
685
686 if (info->dram_nerr)
687 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
688 info->dram_nerr, info->dram_nerr);
689
690 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
691 info->nerr_global);
692 }
693}
694
695static int e752x_process_error_info (struct mem_ctl_info *mci,
696 struct e752x_error_info *info, int handle_errors)
697{
698 u32 error32, stat32;
699 int error_found;
700
701 error_found = 0;
702 error32 = (info->ferr_global >> 18) & 0x3ff;
703 stat32 = (info->ferr_global >> 4) & 0x7ff;
704
705 if (error32)
706 global_error(1, error32, &error_found, handle_errors);
707
708 if (stat32)
709 global_error(0, stat32, &error_found, handle_errors);
710
711 error32 = (info->nerr_global >> 18) & 0x3ff;
712 stat32 = (info->nerr_global >> 4) & 0x7ff;
713
714 if (error32)
715 global_error(1, error32, &error_found, handle_errors);
716
717 if (stat32)
718 global_error(0, stat32, &error_found, handle_errors);
719
720 e752x_check_hub_interface(info, &error_found, handle_errors);
721 e752x_check_sysbus(info, &error_found, handle_errors);
722 e752x_check_membuf(info, &error_found, handle_errors);
723 e752x_check_dram(mci, info, &error_found, handle_errors);
724 return error_found;
725}
726
727static void e752x_check(struct mem_ctl_info *mci)
728{
729 struct e752x_error_info info;
730 debugf3("MC: " __FILE__ ": %s()\n", __func__);
731 e752x_get_error_info(mci, &info);
732 e752x_process_error_info(mci, &info, 1);
733}
734
735static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
736{
737 int rc = -ENODEV;
738 int index;
739 u16 pci_data, stat;
740 u32 stat32;
741 u16 stat16;
742 u8 stat8;
743 struct mem_ctl_info *mci = NULL;
744 struct e752x_pvt *pvt = NULL;
745 u16 ddrcsr;
746 u32 drc;
747 int drc_chan; /* Number of channels 0=1chan,1=2chan */
748 int drc_drbg; /* DRB granularity 0=64mb,1=128mb */
749 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
750 u32 dra;
751 unsigned long last_cumul_size;
752 struct pci_dev *pres_dev;
753 struct pci_dev *dev = NULL;
754
755 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
756 debugf0("Starting Probe1\n");
757
758 /* enable device 0 function 1 */
759 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
760 stat8 |= (1 << 5);
761 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
762
763 /* need to find out the number of channels */
764 pci_read_config_dword(pdev, E752X_DRC, &drc);
765 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
766 /* FIXME: should check >>12 or 0xf, true for all? */
767 /* Dual channel = 1, Single channel = 0 */
768 drc_chan = (((ddrcsr >> 12) & 3) == 3);
769 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
770 drc_ddim = (drc >> 20) & 0x3;
771
772 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
773
774 if (mci == NULL) {
775 rc = -ENOMEM;
776 goto fail;
777 }
778
779 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
780
781 mci->mtype_cap = MEM_FLAG_RDDR;
782 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
783 EDAC_FLAG_S4ECD4ED;
784 /* FIXME - what if different memory types are in different csrows? */
785 mci->mod_name = BS_MOD_STR;
786 mci->mod_ver = "$Revision: 1.5.2.11 $";
787 mci->pdev = pdev;
788
789 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
790 pvt = (struct e752x_pvt *) mci->pvt_info;
791 pvt->dev_info = &e752x_devs[dev_idx];
792 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
793 pvt->dev_info->err_dev,
794 pvt->bridge_ck);
795 if (pvt->bridge_ck == NULL)
796 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
797 PCI_DEVFN(0, 1));
798 if (pvt->bridge_ck == NULL) {
799 printk(KERN_ERR "MC: error reporting device not found:"
800 "vendor %x device 0x%x (broken BIOS?)\n",
801 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
802 goto fail;
803 }
804 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
805
806 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
807 mci->ctl_name = pvt->dev_info->ctl_name;
808 mci->edac_check = e752x_check;
809 mci->ctl_page_to_phys = ctl_page_to_phys;
810
811 /* find out the device types */
812 pci_read_config_dword(pdev, E752X_DRA, &dra);
813
814 /*
815 * The dram row boundary (DRB) reg values are boundary address for
816 * each DRAM row with a granularity of 64 or 128MB (single/dual
817 * channel operation). DRB regs are cumulative; therefore DRB7 will
818 * contain the total memory contained in all eight rows.
819 */
820 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
821 u8 value;
822 u32 cumul_size;
823 /* mem_dev 0=x8, 1=x4 */
824 int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
825 struct csrow_info *csrow = &mci->csrows[index];
826
827 mem_dev = (mem_dev == 2);
828 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
829 /* convert a 128 or 64 MiB DRB to a page size. */
830 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
831 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
832 __func__, index, cumul_size);
833 if (cumul_size == last_cumul_size)
834 continue; /* not populated */
835
836 csrow->first_page = last_cumul_size;
837 csrow->last_page = cumul_size - 1;
838 csrow->nr_pages = cumul_size - last_cumul_size;
839 last_cumul_size = cumul_size;
840 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
841 csrow->mtype = MEM_RDDR; /* only one type supported */
842 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
843
844 /*
845 * if single channel or x8 devices then SECDED
846 * if dual channel and x4 then S4ECD4ED
847 */
848 if (drc_ddim) {
849 if (drc_chan && mem_dev) {
850 csrow->edac_mode = EDAC_S4ECD4ED;
851 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
852 } else {
853 csrow->edac_mode = EDAC_SECDED;
854 mci->edac_cap |= EDAC_FLAG_SECDED;
855 }
856 } else
857 csrow->edac_mode = EDAC_NONE;
858 }
859
860 /* Fill in the memory map table */
861 {
862 u8 value;
863 u8 last = 0;
864 u8 row = 0;
865 for (index = 0; index < 8; index += 2) {
866
867 pci_read_config_byte(mci->pdev, E752X_DRB + index,
868 &value);
869 /* test if there is a dimm in this slot */
870 if (value == last) {
871 /* no dimm in the slot, so flag it as empty */
872 pvt->map[index] = 0xff;
873 pvt->map[index + 1] = 0xff;
874 } else { /* there is a dimm in the slot */
875 pvt->map[index] = row;
876 row++;
877 last = value;
878 /* test the next value to see if the dimm is
879 double sided */
880 pci_read_config_byte(mci->pdev,
881 E752X_DRB + index + 1,
882 &value);
883 pvt->map[index + 1] = (value == last) ?
884 0xff : /* the dimm is single sided,
885 so flag as empty */
886 row; /* this is a double sided dimm
887 to save the next row # */
888 row++;
889 last = value;
890 }
891 }
892 }
893
894 /* set the map type. 1 = normal, 0 = reversed */
895 pci_read_config_byte(mci->pdev, E752X_DRM, &stat8);
896 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
897
898 mci->edac_cap |= EDAC_FLAG_NONE;
899
900 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
901 __func__);
902 /* load the top of low memory, remap base, and remap limit vars */
903 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
904 pvt->tolm = ((u32) pci_data) << 4;
905 pci_read_config_word(mci->pdev, E752X_REMAPBASE, &pci_data);
906 pvt->remapbase = ((u32) pci_data) << 14;
907 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
908 pvt->remaplimit = ((u32) pci_data) << 14;
909 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
910 pvt->remapbase, pvt->remaplimit);
911
912 if (edac_mc_add_mc(mci)) {
913 debugf3("MC: " __FILE__
914 ": %s(): failed edac_mc_add_mc()\n",
915 __func__);
916 goto fail;
917 }
918
919 /* Walk through the PCI table and clear errors */
920 switch (dev_idx) {
921 case E7520:
922 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
923 PCI_DEVICE_ID_INTEL_7520_0, NULL);
924 break;
925 case E7525:
926 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
927 PCI_DEVICE_ID_INTEL_7525_0, NULL);
928 break;
929 case E7320:
930 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
931 PCI_DEVICE_ID_INTEL_7320_0, NULL);
932 break;
933 }
934
935
936 pvt->dev_d0f0 = dev;
937 for (pres_dev = dev;
938 ((struct pci_dev *) pres_dev->global_list.next != dev);
939 pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
940 pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
941 stat = (u16) (stat32 >> 16);
942 /* clear any error bits */
943 if (stat32 & ((1 << 6) + (1 << 8)))
944 pci_write_config_word(pres_dev, PCI_STATUS, stat);
945 }
946 /* find the error reporting device and clear errors */
947 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
948 /* Turn off error disable & SMI in case the BIOS turned it on */
949 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
950 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
951 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
952 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
953 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
954 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
955 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
956 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
957 /* clear other MCH errors */
958 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32);
959 pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
960 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
961 pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
962 pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
963 pci_write_config_byte(dev, E752X_HI_FERR, stat8);
964 pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
965 pci_write_config_byte(dev, E752X_HI_NERR, stat8);
966 pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
967 pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
968 pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
969 pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
970 pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
971 pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
972 pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
973 pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
974 pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
975 pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
976
977 /* get this far and it's successful */
978 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
979 return 0;
980
981fail:
982 if (mci) {
983 if (pvt->dev_d0f0)
984 pci_dev_put(pvt->dev_d0f0);
985 if (pvt->dev_d0f1)
986 pci_dev_put(pvt->dev_d0f1);
987 if (pvt->bridge_ck)
988 pci_dev_put(pvt->bridge_ck);
989 edac_mc_free(mci);
990 }
991 return rc;
992}
993
994/* returns count (>= 0), or negative on error */
995static int __devinit e752x_init_one(struct pci_dev *pdev,
996 const struct pci_device_id *ent)
997{
998 debugf0("MC: " __FILE__ ": %s()\n", __func__);
999
1000 /* wake up and enable device */
1001 if(pci_enable_device(pdev) < 0)
1002 return -EIO;
1003 return e752x_probe1(pdev, ent->driver_data);
1004}
1005
1006
1007static void __devexit e752x_remove_one(struct pci_dev *pdev)
1008{
1009 struct mem_ctl_info *mci;
1010 struct e752x_pvt *pvt;
1011
1012 debugf0(__FILE__ ": %s()\n", __func__);
1013
1014 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
1015 return;
1016
1017 if (edac_mc_del_mc(mci))
1018 return;
1019
1020 pvt = (struct e752x_pvt *) mci->pvt_info;
1021 pci_dev_put(pvt->dev_d0f0);
1022 pci_dev_put(pvt->dev_d0f1);
1023 pci_dev_put(pvt->bridge_ck);
1024 edac_mc_free(mci);
1025}
1026
1027
1028static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1029 {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1030 E7520},
1031 {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1032 E7525},
1033 {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1034 E7320},
1035 {0,} /* 0 terminated list. */
1036};
1037
1038MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1039
1040
1041static struct pci_driver e752x_driver = {
1042 name: BS_MOD_STR,
1043 probe: e752x_init_one,
1044 remove: __devexit_p(e752x_remove_one),
1045 id_table: e752x_pci_tbl,
1046};
1047
1048
1049static int __init e752x_init(void)
1050{
1051 int pci_rc;
1052
1053 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1054 pci_rc = pci_register_driver(&e752x_driver);
1055 return (pci_rc < 0) ? pci_rc : 0;
1056}
1057
1058
1059static void __exit e752x_exit(void)
1060{
1061 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1062 pci_unregister_driver(&e752x_driver);
1063}
1064
1065
1066module_init(e752x_init);
1067module_exit(e752x_exit);
1068
1069MODULE_LICENSE("GPL");
1070MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1071MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644
index 000000000000..d5e320dfc66f
--- /dev/null
+++ b/drivers/edac/e7xxx_edac.c
@@ -0,0 +1,558 @@
1/*
2 * Intel e7xxx Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e7xxx_chips" below for supported chipsets
8 *
9 * Written by Thayne Harbaugh
10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
11 * http://www.anime.net/~goemon/linux-ecc/
12 *
13 * Contributors:
14 * Eric Biederman (Linux Networx)
15 * Tom Zimmerman (Linux Networx)
16 * Jim Garlick (Lawrence Livermore National Labs)
17 * Dave Peterson (Lawrence Livermore National Labs)
18 * That One Guy (Some other place)
19 * Wang Zhenyu (intel.com)
20 *
21 * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
22 *
23 */
24
25
26#include <linux/config.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include "edac_mc.h"
33
34
35#ifndef PCI_DEVICE_ID_INTEL_7205_0
36#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
37#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
38
39#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
40#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
41#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
42
43#ifndef PCI_DEVICE_ID_INTEL_7500_0
44#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
45#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
46
47#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
48#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
49#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
50
51#ifndef PCI_DEVICE_ID_INTEL_7501_0
52#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
53#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
54
55#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
56#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
57#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
58
59#ifndef PCI_DEVICE_ID_INTEL_7505_0
60#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
61#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
62
63#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
64#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
65#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
66
67
68#define E7XXX_NR_CSROWS 8 /* number of csrows */
69#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
70
71
72/* E7XXX register addresses - device 0 function 0 */
73#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
74#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
75 /*
76 * 31 Device width row 7 0=x8 1=x4
77 * 27 Device width row 6
78 * 23 Device width row 5
79 * 19 Device width row 4
80 * 15 Device width row 3
81 * 11 Device width row 2
82 * 7 Device width row 1
83 * 3 Device width row 0
84 */
85#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
86 /*
87 * 22 Number channels 0=1,1=2
88 * 19:18 DRB Granularity 32/64MB
89 */
90#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
91#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
92#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
93
94/* E7XXX register addresses - device 0 function 1 */
95#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
96#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
97#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
98 /* error address register (32b) */
99 /*
100 * 31:28 Reserved
101 * 27:6 CE address (4k block 33:12)
102 * 5:0 Reserved
103 */
104#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
105 /* error address register (32b) */
106 /*
107 * 31:28 Reserved
108 * 27:6 CE address (4k block 33:12)
109 * 5:0 Reserved
110 */
111#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
112 /* error syndrome register (16b) */
113
114enum e7xxx_chips {
115 E7500 = 0,
116 E7501,
117 E7505,
118 E7205,
119};
120
121
122struct e7xxx_pvt {
123 struct pci_dev *bridge_ck;
124 u32 tolm;
125 u32 remapbase;
126 u32 remaplimit;
127 const struct e7xxx_dev_info *dev_info;
128};
129
130
131struct e7xxx_dev_info {
132 u16 err_dev;
133 const char *ctl_name;
134};
135
136
137struct e7xxx_error_info {
138 u8 dram_ferr;
139 u8 dram_nerr;
140 u32 dram_celog_add;
141 u16 dram_celog_syndrome;
142 u32 dram_uelog_add;
143};
144
145static const struct e7xxx_dev_info e7xxx_devs[] = {
146 [E7500] = {
147 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
148 .ctl_name = "E7500"},
149 [E7501] = {
150 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
151 .ctl_name = "E7501"},
152 [E7505] = {
153 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
154 .ctl_name = "E7505"},
155 [E7205] = {
156 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
157 .ctl_name = "E7205"},
158};
159
160
161/* FIXME - is this valid for both SECDED and S4ECD4ED? */
162static inline int e7xxx_find_channel(u16 syndrome)
163{
164 debugf3("MC: " __FILE__ ": %s()\n", __func__);
165
166 if ((syndrome & 0xff00) == 0)
167 return 0;
168 if ((syndrome & 0x00ff) == 0)
169 return 1;
170 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
171 return 0;
172 return 1;
173}
174
175
176static unsigned long
177ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
178{
179 u32 remap;
180 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
181
182 debugf3("MC: " __FILE__ ": %s()\n", __func__);
183
184 if ((page < pvt->tolm) ||
185 ((page >= 0x100000) && (page < pvt->remapbase)))
186 return page;
187 remap = (page - pvt->tolm) + pvt->remapbase;
188 if (remap < pvt->remaplimit)
189 return remap;
190 printk(KERN_ERR "Invalid page %lx - out of range\n", page);
191 return pvt->tolm - 1;
192}
193
194
195static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
196{
197 u32 error_1b, page;
198 u16 syndrome;
199 int row;
200 int channel;
201
202 debugf3("MC: " __FILE__ ": %s()\n", __func__);
203
204 /* read the error address */
205 error_1b = info->dram_celog_add;
206 /* FIXME - should use PAGE_SHIFT */
207 page = error_1b >> 6; /* convert the address to 4k page */
208 /* read the syndrome */
209 syndrome = info->dram_celog_syndrome;
210 /* FIXME - check for -1 */
211 row = edac_mc_find_csrow_by_page(mci, page);
212 /* convert syndrome to channel */
213 channel = e7xxx_find_channel(syndrome);
214 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel,
215 "e7xxx CE");
216}
217
218
219static void process_ce_no_info(struct mem_ctl_info *mci)
220{
221 debugf3("MC: " __FILE__ ": %s()\n", __func__);
222 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
223}
224
225
226static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
227{
228 u32 error_2b, block_page;
229 int row;
230
231 debugf3("MC: " __FILE__ ": %s()\n", __func__);
232
233 /* read the error address */
234 error_2b = info->dram_uelog_add;
235 /* FIXME - should use PAGE_SHIFT */
236 block_page = error_2b >> 6; /* convert to 4k address */
237 row = edac_mc_find_csrow_by_page(mci, block_page);
238 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
239}
240
241
242static void process_ue_no_info(struct mem_ctl_info *mci)
243{
244 debugf3("MC: " __FILE__ ": %s()\n", __func__);
245 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
246}
247
248
249static void e7xxx_get_error_info (struct mem_ctl_info *mci,
250 struct e7xxx_error_info *info)
251{
252 struct e7xxx_pvt *pvt;
253
254 pvt = (struct e7xxx_pvt *) mci->pvt_info;
255 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
256 &info->dram_ferr);
257 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
258 &info->dram_nerr);
259
260 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
261 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
262 &info->dram_celog_add);
263 pci_read_config_word(pvt->bridge_ck,
264 E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome);
265 }
266
267 if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
268 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
269 &info->dram_uelog_add);
270
271 if (info->dram_ferr & 3)
272 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03,
273 0x03);
274
275 if (info->dram_nerr & 3)
276 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03,
277 0x03);
278}
279
280
281static int e7xxx_process_error_info (struct mem_ctl_info *mci,
282 struct e7xxx_error_info *info, int handle_errors)
283{
284 int error_found;
285
286 error_found = 0;
287
288 /* decode and report errors */
289 if (info->dram_ferr & 1) { /* check first error correctable */
290 error_found = 1;
291
292 if (handle_errors)
293 process_ce(mci, info);
294 }
295
296 if (info->dram_ferr & 2) { /* check first error uncorrectable */
297 error_found = 1;
298
299 if (handle_errors)
300 process_ue(mci, info);
301 }
302
303 if (info->dram_nerr & 1) { /* check next error correctable */
304 error_found = 1;
305
306 if (handle_errors) {
307 if (info->dram_ferr & 1)
308 process_ce_no_info(mci);
309 else
310 process_ce(mci, info);
311 }
312 }
313
314 if (info->dram_nerr & 2) { /* check next error uncorrectable */
315 error_found = 1;
316
317 if (handle_errors) {
318 if (info->dram_ferr & 2)
319 process_ue_no_info(mci);
320 else
321 process_ue(mci, info);
322 }
323 }
324
325 return error_found;
326}
327
328
329static void e7xxx_check(struct mem_ctl_info *mci)
330{
331 struct e7xxx_error_info info;
332
333 debugf3("MC: " __FILE__ ": %s()\n", __func__);
334 e7xxx_get_error_info(mci, &info);
335 e7xxx_process_error_info(mci, &info, 1);
336}
337
338
339static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
340{
341 int rc = -ENODEV;
342 int index;
343 u16 pci_data;
344 struct mem_ctl_info *mci = NULL;
345 struct e7xxx_pvt *pvt = NULL;
346 u32 drc;
347 int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
348 int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
349 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
350 u32 dra;
351 unsigned long last_cumul_size;
352
353
354 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
355
356 /* need to find out the number of channels */
357 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
358 /* only e7501 can be single channel */
359 if (dev_idx == E7501) {
360 drc_chan = ((drc >> 22) & 0x1);
361 drc_drbg = (drc >> 18) & 0x3;
362 }
363 drc_ddim = (drc >> 20) & 0x3;
364
365 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
366
367 if (mci == NULL) {
368 rc = -ENOMEM;
369 goto fail;
370 }
371
372 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
373
374 mci->mtype_cap = MEM_FLAG_RDDR;
375 mci->edac_ctl_cap =
376 EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED;
377 /* FIXME - what if different memory types are in different csrows? */
378 mci->mod_name = BS_MOD_STR;
379 mci->mod_ver = "$Revision: 1.5.2.9 $";
380 mci->pdev = pdev;
381
382 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
383 pvt = (struct e7xxx_pvt *) mci->pvt_info;
384 pvt->dev_info = &e7xxx_devs[dev_idx];
385 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
386 pvt->dev_info->err_dev,
387 pvt->bridge_ck);
388 if (!pvt->bridge_ck) {
389 printk(KERN_ERR
390 "MC: error reporting device not found:"
391 "vendor %x device 0x%x (broken BIOS?)\n",
392 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
393 goto fail;
394 }
395
396 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
397 mci->ctl_name = pvt->dev_info->ctl_name;
398
399 mci->edac_check = e7xxx_check;
400 mci->ctl_page_to_phys = ctl_page_to_phys;
401
402 /* find out the device types */
403 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
404
405 /*
406 * The dram row boundary (DRB) reg values are boundary address
407 * for each DRAM row with a granularity of 32 or 64MB (single/dual
408 * channel operation). DRB regs are cumulative; therefore DRB7 will
409 * contain the total memory contained in all eight rows.
410 */
411 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
412 u8 value;
413 u32 cumul_size;
414 /* mem_dev 0=x8, 1=x4 */
415 int mem_dev = (dra >> (index * 4 + 3)) & 0x1;
416 struct csrow_info *csrow = &mci->csrows[index];
417
418 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
419 /* convert a 64 or 32 MiB DRB to a page size. */
420 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
421 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
422 __func__, index, cumul_size);
423 if (cumul_size == last_cumul_size)
424 continue; /* not populated */
425
426 csrow->first_page = last_cumul_size;
427 csrow->last_page = cumul_size - 1;
428 csrow->nr_pages = cumul_size - last_cumul_size;
429 last_cumul_size = cumul_size;
430 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
431 csrow->mtype = MEM_RDDR; /* only one type supported */
432 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
433
434 /*
435 * if single channel or x8 devices then SECDED
436 * if dual channel and x4 then S4ECD4ED
437 */
438 if (drc_ddim) {
439 if (drc_chan && mem_dev) {
440 csrow->edac_mode = EDAC_S4ECD4ED;
441 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
442 } else {
443 csrow->edac_mode = EDAC_SECDED;
444 mci->edac_cap |= EDAC_FLAG_SECDED;
445 }
446 } else
447 csrow->edac_mode = EDAC_NONE;
448 }
449
450 mci->edac_cap |= EDAC_FLAG_NONE;
451
452 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
453 __func__);
454 /* load the top of low memory, remap base, and remap limit vars */
455 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
456 pvt->tolm = ((u32) pci_data) << 4;
457 pci_read_config_word(mci->pdev, E7XXX_REMAPBASE, &pci_data);
458 pvt->remapbase = ((u32) pci_data) << 14;
459 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
460 pvt->remaplimit = ((u32) pci_data) << 14;
461 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
462 pvt->remapbase, pvt->remaplimit);
463
464 /* clear any pending errors, or initial state bits */
465 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
466 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
467
468 if (edac_mc_add_mc(mci) != 0) {
469 debugf3("MC: " __FILE__
470 ": %s(): failed edac_mc_add_mc()\n",
471 __func__);
472 goto fail;
473 }
474
475 /* get this far and it's successful */
476 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
477 return 0;
478
479fail:
480 if (mci != NULL) {
481 if(pvt != NULL && pvt->bridge_ck)
482 pci_dev_put(pvt->bridge_ck);
483 edac_mc_free(mci);
484 }
485
486 return rc;
487}
488
489/* returns count (>= 0), or negative on error */
490static int __devinit
491e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
492{
493 debugf0("MC: " __FILE__ ": %s()\n", __func__);
494
495 /* wake up and enable device */
496 return pci_enable_device(pdev) ?
497 -EIO : e7xxx_probe1(pdev, ent->driver_data);
498}
499
500
501static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
502{
503 struct mem_ctl_info *mci;
504 struct e7xxx_pvt *pvt;
505
506 debugf0(__FILE__ ": %s()\n", __func__);
507
508 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) &&
509 edac_mc_del_mc(mci)) {
510 pvt = (struct e7xxx_pvt *) mci->pvt_info;
511 pci_dev_put(pvt->bridge_ck);
512 edac_mc_free(mci);
513 }
514}
515
516
517static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
518 {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
519 E7205},
520 {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
521 E7500},
522 {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
523 E7501},
524 {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
525 E7505},
526 {0,} /* 0 terminated list. */
527};
528
529MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
530
531
532static struct pci_driver e7xxx_driver = {
533 .name = BS_MOD_STR,
534 .probe = e7xxx_init_one,
535 .remove = __devexit_p(e7xxx_remove_one),
536 .id_table = e7xxx_pci_tbl,
537};
538
539
540static int __init e7xxx_init(void)
541{
542 return pci_register_driver(&e7xxx_driver);
543}
544
545
546static void __exit e7xxx_exit(void)
547{
548 pci_unregister_driver(&e7xxx_driver);
549}
550
551module_init(e7xxx_init);
552module_exit(e7xxx_exit);
553
554
555MODULE_LICENSE("GPL");
556MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
557 "Based on.work by Dan Hollis et al");
558MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644
index 000000000000..4be9bd0a1267
--- /dev/null
+++ b/drivers/edac/edac_mc.c
@@ -0,0 +1,2209 @@
1/*
2 * edac_mc kernel module
3 * (C) 2005 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15
16#include <linux/config.h>
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/proc_fs.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/sysctl.h>
25#include <linux/highmem.h>
26#include <linux/timer.h>
27#include <linux/slab.h>
28#include <linux/jiffies.h>
29#include <linux/spinlock.h>
30#include <linux/list.h>
31#include <linux/sysdev.h>
32#include <linux/ctype.h>
33
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/edac.h>
37
38#include "edac_mc.h"
39
40#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__
41
42#ifdef CONFIG_EDAC_DEBUG
43/* Values of 0 to 4 will generate output */
44int edac_debug_level = 1;
45EXPORT_SYMBOL(edac_debug_level);
46#endif
47
48/* EDAC Controls, setable by module parameter, and sysfs */
49static int log_ue = 1;
50static int log_ce = 1;
51static int panic_on_ue = 1;
52static int poll_msec = 1000;
53
54static int check_pci_parity = 0; /* default YES check PCI parity */
55static int panic_on_pci_parity; /* default no panic on PCI Parity */
56static atomic_t pci_parity_count = ATOMIC_INIT(0);
57
58/* lock to memory controller's control array */
59static DECLARE_MUTEX(mem_ctls_mutex);
60static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
61
62/* Structure of the whitelist and blacklist arrays */
63struct edac_pci_device_list {
64 unsigned int vendor; /* Vendor ID */
65 unsigned int device; /* Deviice ID */
66};
67
68
69#define MAX_LISTED_PCI_DEVICES 32
70
71/* List of PCI devices (vendor-id:device-id) that should be skipped */
72static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES];
73static int pci_blacklist_count;
74
75/* List of PCI devices (vendor-id:device-id) that should be scanned */
76static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
77static int pci_whitelist_count ;
78
79/* START sysfs data and methods */
80
81static const char *mem_types[] = {
82 [MEM_EMPTY] = "Empty",
83 [MEM_RESERVED] = "Reserved",
84 [MEM_UNKNOWN] = "Unknown",
85 [MEM_FPM] = "FPM",
86 [MEM_EDO] = "EDO",
87 [MEM_BEDO] = "BEDO",
88 [MEM_SDR] = "Unbuffered-SDR",
89 [MEM_RDR] = "Registered-SDR",
90 [MEM_DDR] = "Unbuffered-DDR",
91 [MEM_RDDR] = "Registered-DDR",
92 [MEM_RMBS] = "RMBS"
93};
94
95static const char *dev_types[] = {
96 [DEV_UNKNOWN] = "Unknown",
97 [DEV_X1] = "x1",
98 [DEV_X2] = "x2",
99 [DEV_X4] = "x4",
100 [DEV_X8] = "x8",
101 [DEV_X16] = "x16",
102 [DEV_X32] = "x32",
103 [DEV_X64] = "x64"
104};
105
106static const char *edac_caps[] = {
107 [EDAC_UNKNOWN] = "Unknown",
108 [EDAC_NONE] = "None",
109 [EDAC_RESERVED] = "Reserved",
110 [EDAC_PARITY] = "PARITY",
111 [EDAC_EC] = "EC",
112 [EDAC_SECDED] = "SECDED",
113 [EDAC_S2ECD2ED] = "S2ECD2ED",
114 [EDAC_S4ECD4ED] = "S4ECD4ED",
115 [EDAC_S8ECD8ED] = "S8ECD8ED",
116 [EDAC_S16ECD16ED] = "S16ECD16ED"
117};
118
119
120/* sysfs object: /sys/devices/system/edac */
121static struct sysdev_class edac_class = {
122 set_kset_name("edac"),
123};
124
125/* sysfs objects:
126 * /sys/devices/system/edac/mc
127 * /sys/devices/system/edac/pci
128 */
129static struct kobject edac_memctrl_kobj;
130static struct kobject edac_pci_kobj;
131
132/*
133 * /sys/devices/system/edac/mc;
134 * data structures and methods
135 */
136static ssize_t memctrl_string_show(void *ptr, char *buffer)
137{
138 char *value = (char*) ptr;
139 return sprintf(buffer, "%s\n", value);
140}
141
142static ssize_t memctrl_int_show(void *ptr, char *buffer)
143{
144 int *value = (int*) ptr;
145 return sprintf(buffer, "%d\n", *value);
146}
147
148static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
149{
150 int *value = (int*) ptr;
151
152 if (isdigit(*buffer))
153 *value = simple_strtoul(buffer, NULL, 0);
154
155 return count;
156}
157
158struct memctrl_dev_attribute {
159 struct attribute attr;
160 void *value;
161 ssize_t (*show)(void *,char *);
162 ssize_t (*store)(void *, const char *, size_t);
163};
164
165/* Set of show/store abstract level functions for memory control object */
166static ssize_t
167memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
168{
169 struct memctrl_dev_attribute *memctrl_dev;
170 memctrl_dev = (struct memctrl_dev_attribute*)attr;
171
172 if (memctrl_dev->show)
173 return memctrl_dev->show(memctrl_dev->value, buffer);
174 return -EIO;
175}
176
177static ssize_t
178memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
179 const char *buffer, size_t count)
180{
181 struct memctrl_dev_attribute *memctrl_dev;
182 memctrl_dev = (struct memctrl_dev_attribute*)attr;
183
184 if (memctrl_dev->store)
185 return memctrl_dev->store(memctrl_dev->value, buffer, count);
186 return -EIO;
187}
188
189static struct sysfs_ops memctrlfs_ops = {
190 .show = memctrl_dev_show,
191 .store = memctrl_dev_store
192};
193
194#define MEMCTRL_ATTR(_name,_mode,_show,_store) \
195struct memctrl_dev_attribute attr_##_name = { \
196 .attr = {.name = __stringify(_name), .mode = _mode }, \
197 .value = &_name, \
198 .show = _show, \
199 .store = _store, \
200};
201
202#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \
203struct memctrl_dev_attribute attr_##_name = { \
204 .attr = {.name = __stringify(_name), .mode = _mode }, \
205 .value = _data, \
206 .show = _show, \
207 .store = _store, \
208};
209
210/* cwrow<id> attribute f*/
211MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL);
212
213/* csrow<id> control files */
214MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
215MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
216MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
217MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
218
219
220/* Base Attributes of the memory ECC object */
221static struct memctrl_dev_attribute *memctrl_attr[] = {
222 &attr_panic_on_ue,
223 &attr_log_ue,
224 &attr_log_ce,
225 &attr_poll_msec,
226 &attr_mc_version,
227 NULL,
228};
229
230/* Main MC kobject release() function */
231static void edac_memctrl_master_release(struct kobject *kobj)
232{
233 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
234}
235
236static struct kobj_type ktype_memctrl = {
237 .release = edac_memctrl_master_release,
238 .sysfs_ops = &memctrlfs_ops,
239 .default_attrs = (struct attribute **) memctrl_attr,
240};
241
242
243/* Initialize the main sysfs entries for edac:
244 * /sys/devices/system/edac
245 *
246 * and children
247 *
248 * Return: 0 SUCCESS
249 * !0 FAILURE
250 */
251static int edac_sysfs_memctrl_setup(void)
252{
253 int err=0;
254
255 debugf1("MC: " __FILE__ ": %s()\n", __func__);
256
257 /* create the /sys/devices/system/edac directory */
258 err = sysdev_class_register(&edac_class);
259 if (!err) {
260 /* Init the MC's kobject */
261 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
262 kobject_init(&edac_memctrl_kobj);
263
264 edac_memctrl_kobj.parent = &edac_class.kset.kobj;
265 edac_memctrl_kobj.ktype = &ktype_memctrl;
266
267 /* generate sysfs "..../edac/mc" */
268 err = kobject_set_name(&edac_memctrl_kobj,"mc");
269 if (!err) {
270 /* FIXME: maybe new sysdev_create_subdir() */
271 err = kobject_register(&edac_memctrl_kobj);
272 if (err) {
273 debugf1("Failed to register '.../edac/mc'\n");
274 } else {
275 debugf1("Registered '.../edac/mc' kobject\n");
276 }
277 }
278 } else {
279 debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err);
280 }
281
282 return err;
283}
284
285/*
286 * MC teardown:
287 * the '..../edac/mc' kobject followed by '..../edac' itself
288 */
289static void edac_sysfs_memctrl_teardown(void)
290{
291 debugf0("MC: " __FILE__ ": %s()\n", __func__);
292
293 /* Unregister the MC's kobject */
294 kobject_unregister(&edac_memctrl_kobj);
295
296 /* release the master edac mc kobject */
297 kobject_put(&edac_memctrl_kobj);
298
299 /* Unregister the 'edac' object */
300 sysdev_class_unregister(&edac_class);
301}
302
303/*
304 * /sys/devices/system/edac/pci;
305 * data structures and methods
306 */
307
308struct list_control {
309 struct edac_pci_device_list *list;
310 int *count;
311};
312
313/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
314static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
315{
316 struct list_control *listctl;
317 struct edac_pci_device_list *list;
318 char *p = buffer;
319 int len=0;
320 int i;
321
322 listctl = ptr;
323 list = listctl->list;
324
325 for (i = 0; i < *(listctl->count); i++, list++ ) {
326 if (len > 0)
327 len += snprintf(p + len, (PAGE_SIZE-len), ",");
328
329 len += snprintf(p + len,
330 (PAGE_SIZE-len),
331 "%x:%x",
332 list->vendor,list->device);
333 }
334
335 len += snprintf(p + len,(PAGE_SIZE-len), "\n");
336
337 return (ssize_t) len;
338}
339
340/**
341 *
342 * Scan string from **s to **e looking for one 'vendor:device' tuple
343 * where each field is a hex value
344 *
345 * return 0 if an entry is NOT found
346 * return 1 if an entry is found
347 * fill in *vendor_id and *device_id with values found
348 *
349 * In both cases, make sure *s has been moved forward toward *e
350 */
351static int parse_one_device(const char **s,const char **e,
352 unsigned int *vendor_id, unsigned int *device_id)
353{
354 const char *runner, *p;
355
356 /* if null byte, we are done */
357 if (!**s) {
358 (*s)++; /* keep *s moving */
359 return 0;
360 }
361
362 /* skip over newlines & whitespace */
363 if ((**s == '\n') || isspace(**s)) {
364 (*s)++;
365 return 0;
366 }
367
368 if (!isxdigit(**s)) {
369 (*s)++;
370 return 0;
371 }
372
373 /* parse vendor_id */
374 runner = *s;
375 while (runner < *e) {
376 /* scan for vendor:device delimiter */
377 if (*runner == ':') {
378 *vendor_id = simple_strtol((char*) *s, (char**) &p, 16);
379 runner = p + 1;
380 break;
381 }
382 runner++;
383 }
384
385 if (!isxdigit(*runner)) {
386 *s = ++runner;
387 return 0;
388 }
389
390 /* parse device_id */
391 if (runner < *e) {
392 *device_id = simple_strtol((char*)runner, (char**)&p, 16);
393 runner = p;
394 }
395
396 *s = runner;
397
398 return 1;
399}
400
401static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
402 size_t count)
403{
404 struct list_control *listctl;
405 struct edac_pci_device_list *list;
406 unsigned int vendor_id, device_id;
407 const char *s, *e;
408 int *index;
409
410 s = (char*)buffer;
411 e = s + count;
412
413 listctl = ptr;
414 list = listctl->list;
415 index = listctl->count;
416
417 *index = 0;
418 while (*index < MAX_LISTED_PCI_DEVICES) {
419
420 if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
421 list[ *index ].vendor = vendor_id;
422 list[ *index ].device = device_id;
423 (*index)++;
424 }
425
426 /* check for all data consume */
427 if (s >= e)
428 break;
429 }
430
431 return count;
432}
433
434static ssize_t edac_pci_int_show(void *ptr, char *buffer)
435{
436 int *value = ptr;
437 return sprintf(buffer,"%d\n",*value);
438}
439
440static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
441{
442 int *value = ptr;
443
444 if (isdigit(*buffer))
445 *value = simple_strtoul(buffer,NULL,0);
446
447 return count;
448}
449
450struct edac_pci_dev_attribute {
451 struct attribute attr;
452 void *value;
453 ssize_t (*show)(void *,char *);
454 ssize_t (*store)(void *, const char *,size_t);
455};
456
457/* Set of show/store abstract level functions for PCI Parity object */
458static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
459 char *buffer)
460{
461 struct edac_pci_dev_attribute *edac_pci_dev;
462 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
463
464 if (edac_pci_dev->show)
465 return edac_pci_dev->show(edac_pci_dev->value, buffer);
466 return -EIO;
467}
468
469static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr,
470 const char *buffer, size_t count)
471{
472 struct edac_pci_dev_attribute *edac_pci_dev;
473 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
474
475 if (edac_pci_dev->show)
476 return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
477 return -EIO;
478}
479
480static struct sysfs_ops edac_pci_sysfs_ops = {
481 .show = edac_pci_dev_show,
482 .store = edac_pci_dev_store
483};
484
485
486#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
487struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
488 .attr = {.name = __stringify(_name), .mode = _mode }, \
489 .value = &_name, \
490 .show = _show, \
491 .store = _store, \
492};
493
494#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
495struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
496 .attr = {.name = __stringify(_name), .mode = _mode }, \
497 .value = _data, \
498 .show = _show, \
499 .store = _store, \
500};
501
502static struct list_control pci_whitelist_control = {
503 .list = pci_whitelist,
504 .count = &pci_whitelist_count
505};
506
507static struct list_control pci_blacklist_control = {
508 .list = pci_blacklist,
509 .count = &pci_blacklist_count
510};
511
512/* whitelist attribute */
513EDAC_PCI_STRING_ATTR(pci_parity_whitelist,
514 &pci_whitelist_control,
515 S_IRUGO|S_IWUSR,
516 edac_pci_list_string_show,
517 edac_pci_list_string_store);
518
519EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
520 &pci_blacklist_control,
521 S_IRUGO|S_IWUSR,
522 edac_pci_list_string_show,
523 edac_pci_list_string_store);
524
525/* PCI Parity control files */
526EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
527EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
528EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL);
529
530/* Base Attributes of the memory ECC object */
531static struct edac_pci_dev_attribute *edac_pci_attr[] = {
532 &edac_pci_attr_check_pci_parity,
533 &edac_pci_attr_panic_on_pci_parity,
534 &edac_pci_attr_pci_parity_count,
535 &edac_pci_attr_pci_parity_whitelist,
536 &edac_pci_attr_pci_parity_blacklist,
537 NULL,
538};
539
540/* No memory to release */
541static void edac_pci_release(struct kobject *kobj)
542{
543 debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__);
544}
545
546static struct kobj_type ktype_edac_pci = {
547 .release = edac_pci_release,
548 .sysfs_ops = &edac_pci_sysfs_ops,
549 .default_attrs = (struct attribute **) edac_pci_attr,
550};
551
552/**
553 * edac_sysfs_pci_setup()
554 *
555 */
556static int edac_sysfs_pci_setup(void)
557{
558 int err;
559
560 debugf1("MC: " __FILE__ ": %s()\n", __func__);
561
562 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
563
564 kobject_init(&edac_pci_kobj);
565 edac_pci_kobj.parent = &edac_class.kset.kobj;
566 edac_pci_kobj.ktype = &ktype_edac_pci;
567
568 err = kobject_set_name(&edac_pci_kobj, "pci");
569 if (!err) {
570 /* Instanstiate the csrow object */
571 /* FIXME: maybe new sysdev_create_subdir() */
572 err = kobject_register(&edac_pci_kobj);
573 if (err)
574 debugf1("Failed to register '.../edac/pci'\n");
575 else
576 debugf1("Registered '.../edac/pci' kobject\n");
577 }
578 return err;
579}
580
581
582static void edac_sysfs_pci_teardown(void)
583{
584 debugf0("MC: " __FILE__ ": %s()\n", __func__);
585
586 kobject_unregister(&edac_pci_kobj);
587 kobject_put(&edac_pci_kobj);
588}
589
590/* EDAC sysfs CSROW data structures and methods */
591
592/* Set of more detailed csrow<id> attribute show/store functions */
593static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
594{
595 ssize_t size = 0;
596
597 if (csrow->nr_channels > 0) {
598 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
599 csrow->channels[0].label);
600 }
601 return size;
602}
603
604static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
605{
606 ssize_t size = 0;
607
608 if (csrow->nr_channels > 0) {
609 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
610 csrow->channels[1].label);
611 }
612 return size;
613}
614
615static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
616 const char *data, size_t size)
617{
618 ssize_t max_size = 0;
619
620 if (csrow->nr_channels > 0) {
621 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
622 strncpy(csrow->channels[0].label, data, max_size);
623 csrow->channels[0].label[max_size] = '\0';
624 }
625 return size;
626}
627
628static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
629 const char *data, size_t size)
630{
631 ssize_t max_size = 0;
632
633 if (csrow->nr_channels > 1) {
634 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
635 strncpy(csrow->channels[1].label, data, max_size);
636 csrow->channels[1].label[max_size] = '\0';
637 }
638 return max_size;
639}
640
641static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data)
642{
643 return sprintf(data,"%u\n", csrow->ue_count);
644}
645
646static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data)
647{
648 return sprintf(data,"%u\n", csrow->ce_count);
649}
650
651static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
652{
653 ssize_t size = 0;
654
655 if (csrow->nr_channels > 0) {
656 size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
657 }
658 return size;
659}
660
661static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
662{
663 ssize_t size = 0;
664
665 if (csrow->nr_channels > 1) {
666 size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
667 }
668 return size;
669}
670
671static ssize_t csrow_size_show(struct csrow_info *csrow, char *data)
672{
673 return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
674}
675
676static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data)
677{
678 return sprintf(data,"%s\n", mem_types[csrow->mtype]);
679}
680
681static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data)
682{
683 return sprintf(data,"%s\n", dev_types[csrow->dtype]);
684}
685
686static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
687{
688 return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
689}
690
691struct csrowdev_attribute {
692 struct attribute attr;
693 ssize_t (*show)(struct csrow_info *,char *);
694 ssize_t (*store)(struct csrow_info *, const char *,size_t);
695};
696
697#define to_csrow(k) container_of(k, struct csrow_info, kobj)
698#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
699
700/* Set of show/store higher level functions for csrow objects */
701static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
702 char *buffer)
703{
704 struct csrow_info *csrow = to_csrow(kobj);
705 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
706
707 if (csrowdev_attr->show)
708 return csrowdev_attr->show(csrow, buffer);
709 return -EIO;
710}
711
712static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
713 const char *buffer, size_t count)
714{
715 struct csrow_info *csrow = to_csrow(kobj);
716 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
717
718 if (csrowdev_attr->store)
719 return csrowdev_attr->store(csrow, buffer, count);
720 return -EIO;
721}
722
723static struct sysfs_ops csrowfs_ops = {
724 .show = csrowdev_show,
725 .store = csrowdev_store
726};
727
728#define CSROWDEV_ATTR(_name,_mode,_show,_store) \
729struct csrowdev_attribute attr_##_name = { \
730 .attr = {.name = __stringify(_name), .mode = _mode }, \
731 .show = _show, \
732 .store = _store, \
733};
734
735/* cwrow<id>/attribute files */
736CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL);
737CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL);
738CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL);
739CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL);
740CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL);
741CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL);
742CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL);
743CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL);
744
745/* control/attribute files */
746CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
747 csrow_ch0_dimm_label_show,
748 csrow_ch0_dimm_label_store);
749CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
750 csrow_ch1_dimm_label_show,
751 csrow_ch1_dimm_label_store);
752
753
754/* Attributes of the CSROW<id> object */
755static struct csrowdev_attribute *csrow_attr[] = {
756 &attr_dev_type,
757 &attr_mem_type,
758 &attr_edac_mode,
759 &attr_size_mb,
760 &attr_ue_count,
761 &attr_ce_count,
762 &attr_ch0_ce_count,
763 &attr_ch1_ce_count,
764 &attr_ch0_dimm_label,
765 &attr_ch1_dimm_label,
766 NULL,
767};
768
769
770/* No memory to release */
771static void edac_csrow_instance_release(struct kobject *kobj)
772{
773 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
774}
775
776static struct kobj_type ktype_csrow = {
777 .release = edac_csrow_instance_release,
778 .sysfs_ops = &csrowfs_ops,
779 .default_attrs = (struct attribute **) csrow_attr,
780};
781
782/* Create a CSROW object under specifed edac_mc_device */
783static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
784 struct csrow_info *csrow, int index )
785{
786 int err = 0;
787
788 debugf0("MC: " __FILE__ ": %s()\n", __func__);
789
790 memset(&csrow->kobj, 0, sizeof(csrow->kobj));
791
792 /* generate ..../edac/mc/mc<id>/csrow<index> */
793
794 kobject_init(&csrow->kobj);
795 csrow->kobj.parent = edac_mci_kobj;
796 csrow->kobj.ktype = &ktype_csrow;
797
798 /* name this instance of csrow<id> */
799 err = kobject_set_name(&csrow->kobj,"csrow%d",index);
800 if (!err) {
801 /* Instanstiate the csrow object */
802 err = kobject_register(&csrow->kobj);
803 if (err)
804 debugf0("Failed to register CSROW%d\n",index);
805 else
806 debugf0("Registered CSROW%d\n",index);
807 }
808
809 return err;
810}
811
812/* sysfs data structures and methods for the MCI kobjects */
813
814static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
815 const char *data, size_t count )
816{
817 int row, chan;
818
819 mci->ue_noinfo_count = 0;
820 mci->ce_noinfo_count = 0;
821 mci->ue_count = 0;
822 mci->ce_count = 0;
823 for (row = 0; row < mci->nr_csrows; row++) {
824 struct csrow_info *ri = &mci->csrows[row];
825
826 ri->ue_count = 0;
827 ri->ce_count = 0;
828 for (chan = 0; chan < ri->nr_channels; chan++)
829 ri->channels[chan].ce_count = 0;
830 }
831 mci->start_time = jiffies;
832
833 return count;
834}
835
836static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
837{
838 return sprintf(data,"%d\n", mci->ue_count);
839}
840
841static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
842{
843 return sprintf(data,"%d\n", mci->ce_count);
844}
845
846static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
847{
848 return sprintf(data,"%d\n", mci->ce_noinfo_count);
849}
850
851static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
852{
853 return sprintf(data,"%d\n", mci->ue_noinfo_count);
854}
855
856static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
857{
858 return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
859}
860
861static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data)
862{
863 return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver);
864}
865
866static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
867{
868 return sprintf(data,"%s\n", mci->ctl_name);
869}
870
871static int mci_output_edac_cap(char *buf, unsigned long edac_cap)
872{
873 char *p = buf;
874 int bit_idx;
875
876 for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) {
877 if ((edac_cap >> bit_idx) & 0x1)
878 p += sprintf(p, "%s ", edac_caps[bit_idx]);
879 }
880
881 return p - buf;
882}
883
884static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
885{
886 char *p = data;
887
888 p += mci_output_edac_cap(p,mci->edac_ctl_cap);
889 p += sprintf(p, "\n");
890
891 return p - data;
892}
893
894static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
895 char *data)
896{
897 char *p = data;
898
899 p += mci_output_edac_cap(p,mci->edac_cap);
900 p += sprintf(p, "\n");
901
902 return p - data;
903}
904
905static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
906{
907 char *p = buf;
908 int bit_idx;
909
910 for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) {
911 if ((mtype_cap >> bit_idx) & 0x1)
912 p += sprintf(p, "%s ", mem_types[bit_idx]);
913 }
914
915 return p - buf;
916}
917
918static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data)
919{
920 char *p = data;
921
922 p += mci_output_mtype_cap(p,mci->mtype_cap);
923 p += sprintf(p, "\n");
924
925 return p - data;
926}
927
928static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
929{
930 int total_pages, csrow_idx;
931
932 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
933 csrow_idx++) {
934 struct csrow_info *csrow = &mci->csrows[csrow_idx];
935
936 if (!csrow->nr_pages)
937 continue;
938 total_pages += csrow->nr_pages;
939 }
940
941 return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
942}
943
944struct mcidev_attribute {
945 struct attribute attr;
946 ssize_t (*show)(struct mem_ctl_info *,char *);
947 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
948};
949
950#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
951#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
952
953static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
954 char *buffer)
955{
956 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
957 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
958
959 if (mcidev_attr->show)
960 return mcidev_attr->show(mem_ctl_info, buffer);
961 return -EIO;
962}
963
964static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
965 const char *buffer, size_t count)
966{
967 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
968 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
969
970 if (mcidev_attr->store)
971 return mcidev_attr->store(mem_ctl_info, buffer, count);
972 return -EIO;
973}
974
975static struct sysfs_ops mci_ops = {
976 .show = mcidev_show,
977 .store = mcidev_store
978};
979
980#define MCIDEV_ATTR(_name,_mode,_show,_store) \
981struct mcidev_attribute mci_attr_##_name = { \
982 .attr = {.name = __stringify(_name), .mode = _mode }, \
983 .show = _show, \
984 .store = _store, \
985};
986
987/* Control file */
988MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
989
990/* Attribute files */
991MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
992MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL);
993MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL);
994MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
995MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
996MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
997MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
998MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
999MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
1000MCIDEV_ATTR(edac_current_capability,S_IRUGO,
1001 mci_edac_current_capability_show,NULL);
1002MCIDEV_ATTR(supported_mem_type,S_IRUGO,
1003 mci_supported_mem_type_show,NULL);
1004
1005
1006static struct mcidev_attribute *mci_attr[] = {
1007 &mci_attr_reset_counters,
1008 &mci_attr_module_name,
1009 &mci_attr_mc_name,
1010 &mci_attr_edac_capability,
1011 &mci_attr_edac_current_capability,
1012 &mci_attr_supported_mem_type,
1013 &mci_attr_size_mb,
1014 &mci_attr_seconds_since_reset,
1015 &mci_attr_ue_noinfo_count,
1016 &mci_attr_ce_noinfo_count,
1017 &mci_attr_ue_count,
1018 &mci_attr_ce_count,
1019 NULL
1020};
1021
1022
1023/*
1024 * Release of a MC controlling instance
1025 */
1026static void edac_mci_instance_release(struct kobject *kobj)
1027{
1028 struct mem_ctl_info *mci;
1029 mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
1030
1031 debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n",
1032 __func__, mci->mc_idx);
1033
1034 kfree(mci);
1035}
1036
1037static struct kobj_type ktype_mci = {
1038 .release = edac_mci_instance_release,
1039 .sysfs_ops = &mci_ops,
1040 .default_attrs = (struct attribute **) mci_attr,
1041};
1042
1043#define EDAC_DEVICE_SYMLINK "device"
1044
1045/*
1046 * Create a new Memory Controller kobject instance,
1047 * mc<id> under the 'mc' directory
1048 *
1049 * Return:
1050 * 0 Success
1051 * !0 Failure
1052 */
1053static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1054{
1055 int i;
1056 int err;
1057 struct csrow_info *csrow;
1058 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
1059
1060 debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx);
1061
1062 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
1063 kobject_init(edac_mci_kobj);
1064
1065 /* set the name of the mc<id> object */
1066 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
1067 if (err)
1068 return err;
1069
1070 /* link to our parent the '..../edac/mc' object */
1071 edac_mci_kobj->parent = &edac_memctrl_kobj;
1072 edac_mci_kobj->ktype = &ktype_mci;
1073
1074 /* register the mc<id> kobject */
1075 err = kobject_register(edac_mci_kobj);
1076 if (err)
1077 return err;
1078
1079 /* create a symlink for the device */
1080 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
1081 EDAC_DEVICE_SYMLINK);
1082 if (err) {
1083 kobject_unregister(edac_mci_kobj);
1084 return err;
1085 }
1086
1087 /* Make directories for each CSROW object
1088 * under the mc<id> kobject
1089 */
1090 for (i = 0; i < mci->nr_csrows; i++) {
1091
1092 csrow = &mci->csrows[i];
1093
1094 /* Only expose populated CSROWs */
1095 if (csrow->nr_pages > 0) {
1096 err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
1097 if (err)
1098 goto fail;
1099 }
1100 }
1101
1102 /* Mark this MCI instance as having sysfs entries */
1103 mci->sysfs_active = MCI_SYSFS_ACTIVE;
1104
1105 return 0;
1106
1107
1108 /* CSROW error: backout what has already been registered, */
1109fail:
1110 for ( i--; i >= 0; i--) {
1111 if (csrow->nr_pages > 0) {
1112 kobject_unregister(&mci->csrows[i].kobj);
1113 kobject_put(&mci->csrows[i].kobj);
1114 }
1115 }
1116
1117 kobject_unregister(edac_mci_kobj);
1118 kobject_put(edac_mci_kobj);
1119
1120 return err;
1121}
1122
1123/*
1124 * remove a Memory Controller instance
1125 */
1126static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1127{
1128 int i;
1129
1130 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1131
1132 /* remove all csrow kobjects */
1133 for (i = 0; i < mci->nr_csrows; i++) {
1134 if (mci->csrows[i].nr_pages > 0) {
1135 kobject_unregister(&mci->csrows[i].kobj);
1136 kobject_put(&mci->csrows[i].kobj);
1137 }
1138 }
1139
1140 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1141
1142 kobject_unregister(&mci->edac_mci_kobj);
1143 kobject_put(&mci->edac_mci_kobj);
1144}
1145
1146/* END OF sysfs data and methods */
1147
1148#ifdef CONFIG_EDAC_DEBUG
1149
1150EXPORT_SYMBOL(edac_mc_dump_channel);
1151
1152void edac_mc_dump_channel(struct channel_info *chan)
1153{
1154 debugf4("\tchannel = %p\n", chan);
1155 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
1156 debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
1157 debugf4("\tchannel->label = '%s'\n", chan->label);
1158 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
1159}
1160
1161
1162EXPORT_SYMBOL(edac_mc_dump_csrow);
1163
1164void edac_mc_dump_csrow(struct csrow_info *csrow)
1165{
1166 debugf4("\tcsrow = %p\n", csrow);
1167 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
1168 debugf4("\tcsrow->first_page = 0x%lx\n",
1169 csrow->first_page);
1170 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
1171 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
1172 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
1173 debugf4("\tcsrow->nr_channels = %d\n",
1174 csrow->nr_channels);
1175 debugf4("\tcsrow->channels = %p\n", csrow->channels);
1176 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
1177}
1178
1179
1180EXPORT_SYMBOL(edac_mc_dump_mci);
1181
1182void edac_mc_dump_mci(struct mem_ctl_info *mci)
1183{
1184 debugf3("\tmci = %p\n", mci);
1185 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
1186 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
1187 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
1188 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
1189 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
1190 mci->nr_csrows, mci->csrows);
1191 debugf3("\tpdev = %p\n", mci->pdev);
1192 debugf3("\tmod_name:ctl_name = %s:%s\n",
1193 mci->mod_name, mci->ctl_name);
1194 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
1195}
1196
1197
1198#endif /* CONFIG_EDAC_DEBUG */
1199
1200/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
1201 * Adjust 'ptr' so that its alignment is at least as stringent as what the
1202 * compiler would provide for X and return the aligned result.
1203 *
1204 * If 'size' is a constant, the compiler will optimize this whole function
1205 * down to either a no-op or the addition of a constant to the value of 'ptr'.
1206 */
1207static inline char * align_ptr (void *ptr, unsigned size)
1208{
1209 unsigned align, r;
1210
1211 /* Here we assume that the alignment of a "long long" is the most
1212 * stringent alignment that the compiler will ever provide by default.
1213 * As far as I know, this is a reasonable assumption.
1214 */
1215 if (size > sizeof(long))
1216 align = sizeof(long long);
1217 else if (size > sizeof(int))
1218 align = sizeof(long);
1219 else if (size > sizeof(short))
1220 align = sizeof(int);
1221 else if (size > sizeof(char))
1222 align = sizeof(short);
1223 else
1224 return (char *) ptr;
1225
1226 r = size % align;
1227
1228 if (r == 0)
1229 return (char *) ptr;
1230
1231 return (char *) (((unsigned long) ptr) + align - r);
1232}
1233
1234
1235EXPORT_SYMBOL(edac_mc_alloc);
1236
1237/**
1238 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
1239 * @size_pvt: size of private storage needed
1240 * @nr_csrows: Number of CWROWS needed for this MC
1241 * @nr_chans: Number of channels for the MC
1242 *
1243 * Everything is kmalloc'ed as one big chunk - more efficient.
1244 * Only can be used if all structures have the same lifetime - otherwise
1245 * you have to allocate and initialize your own structures.
1246 *
1247 * Use edac_mc_free() to free mc structures allocated by this function.
1248 *
1249 * Returns:
1250 * NULL allocation failed
1251 * struct mem_ctl_info pointer
1252 */
1253struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1254 unsigned nr_chans)
1255{
1256 struct mem_ctl_info *mci;
1257 struct csrow_info *csi, *csrow;
1258 struct channel_info *chi, *chp, *chan;
1259 void *pvt;
1260 unsigned size;
1261 int row, chn;
1262
1263 /* Figure out the offsets of the various items from the start of an mc
1264 * structure. We want the alignment of each item to be at least as
1265 * stringent as what the compiler would provide if we could simply
1266 * hardcode everything into a single struct.
1267 */
1268 mci = (struct mem_ctl_info *) 0;
1269 csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
1270 chi = (struct channel_info *)
1271 align_ptr(&csi[nr_csrows], sizeof(*chi));
1272 pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
1273 size = ((unsigned long) pvt) + sz_pvt;
1274
1275 if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
1276 return NULL;
1277
1278 /* Adjust pointers so they point within the memory we just allocated
1279 * rather than an imaginary chunk of memory located at address 0.
1280 */
1281 csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
1282 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
1283 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
1284
1285 memset(mci, 0, size); /* clear all fields */
1286
1287 mci->csrows = csi;
1288 mci->pvt_info = pvt;
1289 mci->nr_csrows = nr_csrows;
1290
1291 for (row = 0; row < nr_csrows; row++) {
1292 csrow = &csi[row];
1293 csrow->csrow_idx = row;
1294 csrow->mci = mci;
1295 csrow->nr_channels = nr_chans;
1296 chp = &chi[row * nr_chans];
1297 csrow->channels = chp;
1298
1299 for (chn = 0; chn < nr_chans; chn++) {
1300 chan = &chp[chn];
1301 chan->chan_idx = chn;
1302 chan->csrow = csrow;
1303 }
1304 }
1305
1306 return mci;
1307}
1308
1309
1310EXPORT_SYMBOL(edac_mc_free);
1311
1312/**
1313 * edac_mc_free: Free a previously allocated 'mci' structure
1314 * @mci: pointer to a struct mem_ctl_info structure
1315 *
1316 * Free up a previously allocated mci structure
1317 * A MCI structure can be in 2 states after being allocated
1318 * by edac_mc_alloc().
1319 * 1) Allocated in a MC driver's probe, but not yet committed
1320 * 2) Allocated and committed, by a call to edac_mc_add_mc()
1321 * edac_mc_add_mc() is the function that adds the sysfs entries
1322 * thus, this free function must determine which state the 'mci'
1323 * structure is in, then either free it directly or
1324 * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
1325 *
1326 * VOID Return
1327 */
1328void edac_mc_free(struct mem_ctl_info *mci)
1329{
1330 /* only if sysfs entries for this mci instance exist
1331 * do we remove them and defer the actual kfree via
1332 * the kobject 'release()' callback.
1333 *
1334 * Otherwise, do a straight kfree now.
1335 */
1336 if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
1337 edac_remove_sysfs_mci_device(mci);
1338 else
1339 kfree(mci);
1340}
1341
1342
1343
1344EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
1345
1346struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
1347{
1348 struct mem_ctl_info *mci;
1349 struct list_head *item;
1350
1351 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1352
1353 list_for_each(item, &mc_devices) {
1354 mci = list_entry(item, struct mem_ctl_info, link);
1355
1356 if (mci->pdev == pdev)
1357 return mci;
1358 }
1359
1360 return NULL;
1361}
1362
1363static int add_mc_to_global_list (struct mem_ctl_info *mci)
1364{
1365 struct list_head *item, *insert_before;
1366 struct mem_ctl_info *p;
1367 int i;
1368
1369 if (list_empty(&mc_devices)) {
1370 mci->mc_idx = 0;
1371 insert_before = &mc_devices;
1372 } else {
1373 if (edac_mc_find_mci_by_pdev(mci->pdev)) {
1374 printk(KERN_WARNING
1375 "EDAC MC: %s (%s) %s %s already assigned %d\n",
1376 mci->pdev->dev.bus_id, pci_name(mci->pdev),
1377 mci->mod_name, mci->ctl_name, mci->mc_idx);
1378 return 1;
1379 }
1380
1381 insert_before = NULL;
1382 i = 0;
1383
1384 list_for_each(item, &mc_devices) {
1385 p = list_entry(item, struct mem_ctl_info, link);
1386
1387 if (p->mc_idx != i) {
1388 insert_before = item;
1389 break;
1390 }
1391
1392 i++;
1393 }
1394
1395 mci->mc_idx = i;
1396
1397 if (insert_before == NULL)
1398 insert_before = &mc_devices;
1399 }
1400
1401 list_add_tail_rcu(&mci->link, insert_before);
1402 return 0;
1403}
1404
1405
1406
1407EXPORT_SYMBOL(edac_mc_add_mc);
1408
1409/**
1410 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list
1411 * @mci: pointer to the mci structure to be added to the list
1412 *
1413 * Return:
1414 * 0 Success
1415 * !0 Failure
1416 */
1417
1418/* FIXME - should a warning be printed if no error detection? correction? */
1419int edac_mc_add_mc(struct mem_ctl_info *mci)
1420{
1421 int rc = 1;
1422
1423 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1424#ifdef CONFIG_EDAC_DEBUG
1425 if (edac_debug_level >= 3)
1426 edac_mc_dump_mci(mci);
1427 if (edac_debug_level >= 4) {
1428 int i;
1429
1430 for (i = 0; i < mci->nr_csrows; i++) {
1431 int j;
1432 edac_mc_dump_csrow(&mci->csrows[i]);
1433 for (j = 0; j < mci->csrows[i].nr_channels; j++)
1434 edac_mc_dump_channel(&mci->csrows[i].
1435 channels[j]);
1436 }
1437 }
1438#endif
1439 down(&mem_ctls_mutex);
1440
1441 if (add_mc_to_global_list(mci))
1442 goto finish;
1443
1444 /* set load time so that error rate can be tracked */
1445 mci->start_time = jiffies;
1446
1447 if (edac_create_sysfs_mci_device(mci)) {
1448 printk(KERN_WARNING
1449 "EDAC MC%d: failed to create sysfs device\n",
1450 mci->mc_idx);
1451 /* FIXME - should there be an error code and unwind? */
1452 goto finish;
1453 }
1454
1455 /* Report action taken */
1456 printk(KERN_INFO
1457 "EDAC MC%d: Giving out device to %s %s: PCI %s\n",
1458 mci->mc_idx, mci->mod_name, mci->ctl_name,
1459 pci_name(mci->pdev));
1460
1461
1462 rc = 0;
1463
1464finish:
1465 up(&mem_ctls_mutex);
1466 return rc;
1467}
1468
1469
1470
1471static void complete_mc_list_del (struct rcu_head *head)
1472{
1473 struct mem_ctl_info *mci;
1474
1475 mci = container_of(head, struct mem_ctl_info, rcu);
1476 INIT_LIST_HEAD(&mci->link);
1477 complete(&mci->complete);
1478}
1479
1480static void del_mc_from_global_list (struct mem_ctl_info *mci)
1481{
1482 list_del_rcu(&mci->link);
1483 init_completion(&mci->complete);
1484 call_rcu(&mci->rcu, complete_mc_list_del);
1485 wait_for_completion(&mci->complete);
1486}
1487
1488EXPORT_SYMBOL(edac_mc_del_mc);
1489
1490/**
1491 * edac_mc_del_mc: Remove the specified mci structure from global list
1492 * @mci: Pointer to struct mem_ctl_info structure
1493 *
1494 * Returns:
1495 * 0 Success
1496 * 1 Failure
1497 */
1498int edac_mc_del_mc(struct mem_ctl_info *mci)
1499{
1500 int rc = 1;
1501
1502 debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1503 down(&mem_ctls_mutex);
1504 del_mc_from_global_list(mci);
1505 printk(KERN_INFO
1506 "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
1507 mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
1508 pci_name(mci->pdev));
1509 rc = 0;
1510 up(&mem_ctls_mutex);
1511
1512 return rc;
1513}
1514
1515
1516EXPORT_SYMBOL(edac_mc_scrub_block);
1517
1518void edac_mc_scrub_block(unsigned long page, unsigned long offset,
1519 u32 size)
1520{
1521 struct page *pg;
1522 void *virt_addr;
1523 unsigned long flags = 0;
1524
1525 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1526
1527 /* ECC error page was not in our memory. Ignore it. */
1528 if(!pfn_valid(page))
1529 return;
1530
1531 /* Find the actual page structure then map it and fix */
1532 pg = pfn_to_page(page);
1533
1534 if (PageHighMem(pg))
1535 local_irq_save(flags);
1536
1537 virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
1538
1539 /* Perform architecture specific atomic scrub operation */
1540 atomic_scrub(virt_addr + offset, size);
1541
1542 /* Unmap and complete */
1543 kunmap_atomic(virt_addr, KM_BOUNCE_READ);
1544
1545 if (PageHighMem(pg))
1546 local_irq_restore(flags);
1547}
1548
1549
1550/* FIXME - should return -1 */
1551EXPORT_SYMBOL(edac_mc_find_csrow_by_page);
1552
1553int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1554 unsigned long page)
1555{
1556 struct csrow_info *csrows = mci->csrows;
1557 int row, i;
1558
1559 debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__,
1560 page);
1561 row = -1;
1562
1563 for (i = 0; i < mci->nr_csrows; i++) {
1564 struct csrow_info *csrow = &csrows[i];
1565
1566 if (csrow->nr_pages == 0)
1567 continue;
1568
1569 debugf3("MC%d: " __FILE__
1570 ": %s(): first(0x%lx) page(0x%lx)"
1571 " last(0x%lx) mask(0x%lx)\n", mci->mc_idx,
1572 __func__, csrow->first_page, page,
1573 csrow->last_page, csrow->page_mask);
1574
1575 if ((page >= csrow->first_page) &&
1576 (page <= csrow->last_page) &&
1577 ((page & csrow->page_mask) ==
1578 (csrow->first_page & csrow->page_mask))) {
1579 row = i;
1580 break;
1581 }
1582 }
1583
1584 if (row == -1)
1585 printk(KERN_ERR
1586 "EDAC MC%d: could not look up page error address %lx\n",
1587 mci->mc_idx, (unsigned long) page);
1588
1589 return row;
1590}
1591
1592
1593EXPORT_SYMBOL(edac_mc_handle_ce);
1594
1595/* FIXME - setable log (warning/emerg) levels */
1596/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
1597void edac_mc_handle_ce(struct mem_ctl_info *mci,
1598 unsigned long page_frame_number,
1599 unsigned long offset_in_page,
1600 unsigned long syndrome, int row, int channel,
1601 const char *msg)
1602{
1603 unsigned long remapped_page;
1604
1605 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1606
1607 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1608 if (row >= mci->nr_csrows || row < 0) {
1609 /* something is wrong */
1610 printk(KERN_ERR
1611 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
1612 mci->mc_idx, row, mci->nr_csrows);
1613 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1614 return;
1615 }
1616 if (channel >= mci->csrows[row].nr_channels || channel < 0) {
1617 /* something is wrong */
1618 printk(KERN_ERR
1619 "EDAC MC%d: INTERNAL ERROR: channel out of range "
1620 "(%d >= %d)\n",
1621 mci->mc_idx, channel, mci->csrows[row].nr_channels);
1622 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1623 return;
1624 }
1625
1626 if (log_ce)
1627 /* FIXME - put in DIMM location */
1628 printk(KERN_WARNING
1629 "EDAC MC%d: CE page 0x%lx, offset 0x%lx,"
1630 " grain %d, syndrome 0x%lx, row %d, channel %d,"
1631 " label \"%s\": %s\n", mci->mc_idx,
1632 page_frame_number, offset_in_page,
1633 mci->csrows[row].grain, syndrome, row, channel,
1634 mci->csrows[row].channels[channel].label, msg);
1635
1636 mci->ce_count++;
1637 mci->csrows[row].ce_count++;
1638 mci->csrows[row].channels[channel].ce_count++;
1639
1640 if (mci->scrub_mode & SCRUB_SW_SRC) {
1641 /*
1642 * Some MC's can remap memory so that it is still available
1643 * at a different address when PCI devices map into memory.
1644 * MC's that can't do this lose the memory where PCI devices
1645 * are mapped. This mapping is MC dependant and so we call
1646 * back into the MC driver for it to map the MC page to
1647 * a physical (CPU) page which can then be mapped to a virtual
1648 * page - which can then be scrubbed.
1649 */
1650 remapped_page = mci->ctl_page_to_phys ?
1651 mci->ctl_page_to_phys(mci, page_frame_number) :
1652 page_frame_number;
1653
1654 edac_mc_scrub_block(remapped_page, offset_in_page,
1655 mci->csrows[row].grain);
1656 }
1657}
1658
1659
1660EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
1661
1662void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
1663 const char *msg)
1664{
1665 if (log_ce)
1666 printk(KERN_WARNING
1667 "EDAC MC%d: CE - no information available: %s\n",
1668 mci->mc_idx, msg);
1669 mci->ce_noinfo_count++;
1670 mci->ce_count++;
1671}
1672
1673
1674EXPORT_SYMBOL(edac_mc_handle_ue);
1675
1676void edac_mc_handle_ue(struct mem_ctl_info *mci,
1677 unsigned long page_frame_number,
1678 unsigned long offset_in_page, int row,
1679 const char *msg)
1680{
1681 int len = EDAC_MC_LABEL_LEN * 4;
1682 char labels[len + 1];
1683 char *pos = labels;
1684 int chan;
1685 int chars;
1686
1687 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1688
1689 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1690 if (row >= mci->nr_csrows || row < 0) {
1691 /* something is wrong */
1692 printk(KERN_ERR
1693 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
1694 mci->mc_idx, row, mci->nr_csrows);
1695 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1696 return;
1697 }
1698
1699 chars = snprintf(pos, len + 1, "%s",
1700 mci->csrows[row].channels[0].label);
1701 len -= chars;
1702 pos += chars;
1703 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
1704 chan++) {
1705 chars = snprintf(pos, len + 1, ":%s",
1706 mci->csrows[row].channels[chan].label);
1707 len -= chars;
1708 pos += chars;
1709 }
1710
1711 if (log_ue)
1712 printk(KERN_EMERG
1713 "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
1714 " labels \"%s\": %s\n", mci->mc_idx,
1715 page_frame_number, offset_in_page,
1716 mci->csrows[row].grain, row, labels, msg);
1717
1718 if (panic_on_ue)
1719 panic
1720 ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
1721 " labels \"%s\": %s\n", mci->mc_idx,
1722 page_frame_number, offset_in_page,
1723 mci->csrows[row].grain, row, labels, msg);
1724
1725 mci->ue_count++;
1726 mci->csrows[row].ue_count++;
1727}
1728
1729
1730EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
1731
1732void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
1733 const char *msg)
1734{
1735 if (panic_on_ue)
1736 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
1737
1738 if (log_ue)
1739 printk(KERN_WARNING
1740 "EDAC MC%d: UE - no information available: %s\n",
1741 mci->mc_idx, msg);
1742 mci->ue_noinfo_count++;
1743 mci->ue_count++;
1744}
1745
1746
1747#ifdef CONFIG_PCI
1748
1749static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1750{
1751 int where;
1752 u16 status;
1753
1754 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
1755 pci_read_config_word(dev, where, &status);
1756
1757 /* If we get back 0xFFFF then we must suspect that the card has been pulled but
1758 the Linux PCI layer has not yet finished cleaning up. We don't want to report
1759 on such devices */
1760
1761 if (status == 0xFFFF) {
1762 u32 sanity;
1763 pci_read_config_dword(dev, 0, &sanity);
1764 if (sanity == 0xFFFFFFFF)
1765 return 0;
1766 }
1767 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
1768 PCI_STATUS_PARITY;
1769
1770 if (status)
1771 /* reset only the bits we are interested in */
1772 pci_write_config_word(dev, where, status);
1773
1774 return status;
1775}
1776
1777typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
1778
1779/* Clear any PCI parity errors logged by this device. */
1780static void edac_pci_dev_parity_clear( struct pci_dev *dev )
1781{
1782 u8 header_type;
1783
1784 get_pci_parity_status(dev, 0);
1785
1786 /* read the device TYPE, looking for bridges */
1787 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1788
1789 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
1790 get_pci_parity_status(dev, 1);
1791}
1792
1793/*
1794 * PCI Parity polling
1795 *
1796 */
1797static void edac_pci_dev_parity_test(struct pci_dev *dev)
1798{
1799 u16 status;
1800 u8 header_type;
1801
1802 /* read the STATUS register on this device
1803 */
1804 status = get_pci_parity_status(dev, 0);
1805
1806 debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
1807
1808 /* check the status reg for errors */
1809 if (status) {
1810 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1811 printk(KERN_CRIT
1812 "EDAC PCI- "
1813 "Signaled System Error on %s\n",
1814 pci_name (dev));
1815
1816 if (status & (PCI_STATUS_PARITY)) {
1817 printk(KERN_CRIT
1818 "EDAC PCI- "
1819 "Master Data Parity Error on %s\n",
1820 pci_name (dev));
1821
1822 atomic_inc(&pci_parity_count);
1823 }
1824
1825 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1826 printk(KERN_CRIT
1827 "EDAC PCI- "
1828 "Detected Parity Error on %s\n",
1829 pci_name (dev));
1830
1831 atomic_inc(&pci_parity_count);
1832 }
1833 }
1834
1835 /* read the device TYPE, looking for bridges */
1836 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1837
1838 debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
1839
1840 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1841 /* On bridges, need to examine secondary status register */
1842 status = get_pci_parity_status(dev, 1);
1843
1844 debugf2("PCI SEC_STATUS= 0x%04x %s\n",
1845 status, dev->dev.bus_id );
1846
1847 /* check the secondary status reg for errors */
1848 if (status) {
1849 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1850 printk(KERN_CRIT
1851 "EDAC PCI-Bridge- "
1852 "Signaled System Error on %s\n",
1853 pci_name (dev));
1854
1855 if (status & (PCI_STATUS_PARITY)) {
1856 printk(KERN_CRIT
1857 "EDAC PCI-Bridge- "
1858 "Master Data Parity Error on %s\n",
1859 pci_name (dev));
1860
1861 atomic_inc(&pci_parity_count);
1862 }
1863
1864 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1865 printk(KERN_CRIT
1866 "EDAC PCI-Bridge- "
1867 "Detected Parity Error on %s\n",
1868 pci_name (dev));
1869
1870 atomic_inc(&pci_parity_count);
1871 }
1872 }
1873 }
1874}
1875
1876/*
1877 * check_dev_on_list: Scan for a PCI device on a white/black list
1878 * @list: an EDAC &edac_pci_device_list white/black list pointer
1879 * @free_index: index of next free entry on the list
1880 * @pci_dev: PCI Device pointer
1881 *
1882 * see if list contains the device.
1883 *
1884 * Returns: 0 not found
1885 * 1 found on list
1886 */
1887static int check_dev_on_list(struct edac_pci_device_list *list, int free_index,
1888 struct pci_dev *dev)
1889{
1890 int i;
1891 int rc = 0; /* Assume not found */
1892 unsigned short vendor=dev->vendor;
1893 unsigned short device=dev->device;
1894
1895 /* Scan the list, looking for a vendor/device match
1896 */
1897 for (i = 0; i < free_index; i++, list++ ) {
1898 if ( (list->vendor == vendor ) &&
1899 (list->device == device )) {
1900 rc = 1;
1901 break;
1902 }
1903 }
1904
1905 return rc;
1906}
1907
1908/*
1909 * pci_dev parity list iterator
1910 * Scan the PCI device list for one iteration, looking for SERRORs
1911 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
1912 */
1913static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
1914{
1915 struct pci_dev *dev=NULL;
1916
1917 /* request for kernel access to the next PCI device, if any,
1918 * and while we are looking at it have its reference count
1919 * bumped until we are done with it
1920 */
1921 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1922
1923 /* if whitelist exists then it has priority, so only scan those
1924 * devices on the whitelist
1925 */
1926 if (pci_whitelist_count > 0 ) {
1927 if (check_dev_on_list(pci_whitelist,
1928 pci_whitelist_count, dev))
1929 fn(dev);
1930 } else {
1931 /*
1932 * if no whitelist, then check if this devices is
1933 * blacklisted
1934 */
1935 if (!check_dev_on_list(pci_blacklist,
1936 pci_blacklist_count, dev))
1937 fn(dev);
1938 }
1939 }
1940}
1941
1942static void do_pci_parity_check(void)
1943{
1944 unsigned long flags;
1945 int before_count;
1946
1947 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1948
1949 if (!check_pci_parity)
1950 return;
1951
1952 before_count = atomic_read(&pci_parity_count);
1953
1954 /* scan all PCI devices looking for a Parity Error on devices and
1955 * bridges
1956 */
1957 local_irq_save(flags);
1958 edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
1959 local_irq_restore(flags);
1960
1961 /* Only if operator has selected panic on PCI Error */
1962 if (panic_on_pci_parity) {
1963 /* If the count is different 'after' from 'before' */
1964 if (before_count != atomic_read(&pci_parity_count))
1965 panic("EDAC: PCI Parity Error");
1966 }
1967}
1968
1969
1970static inline void clear_pci_parity_errors(void)
1971{
1972 /* Clear any PCI bus parity errors that devices initially have logged
1973 * in their registers.
1974 */
1975 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
1976}
1977
1978
1979#else /* CONFIG_PCI */
1980
1981
1982static inline void do_pci_parity_check(void)
1983{
1984 /* no-op */
1985}
1986
1987
1988static inline void clear_pci_parity_errors(void)
1989{
1990 /* no-op */
1991}
1992
1993
1994#endif /* CONFIG_PCI */
1995
1996/*
1997 * Iterate over all MC instances and check for ECC, et al, errors
1998 */
1999static inline void check_mc_devices (void)
2000{
2001 unsigned long flags;
2002 struct list_head *item;
2003 struct mem_ctl_info *mci;
2004
2005 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2006
2007 /* during poll, have interrupts off */
2008 local_irq_save(flags);
2009
2010 list_for_each(item, &mc_devices) {
2011 mci = list_entry(item, struct mem_ctl_info, link);
2012
2013 if (mci->edac_check != NULL)
2014 mci->edac_check(mci);
2015 }
2016
2017 local_irq_restore(flags);
2018}
2019
2020
2021/*
2022 * Check MC status every poll_msec.
2023 * Check PCI status every poll_msec as well.
2024 *
2025 * This where the work gets done for edac.
2026 *
2027 * SMP safe, doesn't use NMI, and auto-rate-limits.
2028 */
2029static void do_edac_check(void)
2030{
2031
2032 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2033
2034 check_mc_devices();
2035
2036 do_pci_parity_check();
2037}
2038
2039
2040/*
2041 * EDAC thread state information
2042 */
2043struct bs_thread_info
2044{
2045 struct task_struct *task;
2046 struct completion *event;
2047 char *name;
2048 void (*run)(void);
2049};
2050
2051static struct bs_thread_info bs_thread;
2052
2053/*
2054 * edac_kernel_thread
2055 * This the kernel thread that processes edac operations
2056 * in a normal thread environment
2057 */
2058static int edac_kernel_thread(void *arg)
2059{
2060 struct bs_thread_info *thread = (struct bs_thread_info *) arg;
2061
2062 /* detach thread */
2063 daemonize(thread->name);
2064
2065 current->exit_signal = SIGCHLD;
2066 allow_signal(SIGKILL);
2067 thread->task = current;
2068
2069 /* indicate to starting task we have started */
2070 complete(thread->event);
2071
2072 /* loop forever, until we are told to stop */
2073 while(thread->run != NULL) {
2074 void (*run)(void);
2075
2076 /* call the function to check the memory controllers */
2077 run = thread->run;
2078 if (run)
2079 run();
2080
2081 if (signal_pending(current))
2082 flush_signals(current);
2083
2084 /* ensure we are interruptable */
2085 set_current_state(TASK_INTERRUPTIBLE);
2086
2087 /* goto sleep for the interval */
2088 schedule_timeout((HZ * poll_msec) / 1000);
2089 try_to_freeze();
2090 }
2091
2092 /* notify waiter that we are exiting */
2093 complete(thread->event);
2094
2095 return 0;
2096}
2097
2098/*
2099 * edac_mc_init
2100 * module initialization entry point
2101 */
2102static int __init edac_mc_init(void)
2103{
2104 int ret;
2105 struct completion event;
2106
2107 printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
2108
2109 /*
2110 * Harvest and clear any boot/initialization PCI parity errors
2111 *
2112 * FIXME: This only clears errors logged by devices present at time of
2113 * module initialization. We should also do an initial clear
2114 * of each newly hotplugged device.
2115 */
2116 clear_pci_parity_errors();
2117
2118 /* perform check for first time to harvest boot leftovers */
2119 do_edac_check();
2120
2121 /* Create the MC sysfs entires */
2122 if (edac_sysfs_memctrl_setup()) {
2123 printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n");
2124 return -ENODEV;
2125 }
2126
2127 /* Create the PCI parity sysfs entries */
2128 if (edac_sysfs_pci_setup()) {
2129 edac_sysfs_memctrl_teardown();
2130 printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n");
2131 return -ENODEV;
2132 }
2133
2134 /* Create our kernel thread */
2135 init_completion(&event);
2136 bs_thread.event = &event;
2137 bs_thread.name = "kedac";
2138 bs_thread.run = do_edac_check;
2139
2140 /* create our kernel thread */
2141 ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL);
2142 if (ret < 0) {
2143 /* remove the sysfs entries */
2144 edac_sysfs_memctrl_teardown();
2145 edac_sysfs_pci_teardown();
2146 return -ENOMEM;
2147 }
2148
2149 /* wait for our kernel theard ack that it is up and running */
2150 wait_for_completion(&event);
2151
2152 return 0;
2153}
2154
2155
2156/*
2157 * edac_mc_exit()
2158 * module exit/termination functioni
2159 */
2160static void __exit edac_mc_exit(void)
2161{
2162 struct completion event;
2163
2164 debugf0("MC: " __FILE__ ": %s()\n", __func__);
2165
2166 init_completion(&event);
2167 bs_thread.event = &event;
2168
2169 /* As soon as ->run is set to NULL, the task could disappear,
2170 * so we need to hold tasklist_lock until we have sent the signal
2171 */
2172 read_lock(&tasklist_lock);
2173 bs_thread.run = NULL;
2174 send_sig(SIGKILL, bs_thread.task, 1);
2175 read_unlock(&tasklist_lock);
2176 wait_for_completion(&event);
2177
2178 /* tear down the sysfs device */
2179 edac_sysfs_memctrl_teardown();
2180 edac_sysfs_pci_teardown();
2181}
2182
2183
2184
2185
2186module_init(edac_mc_init);
2187module_exit(edac_mc_exit);
2188
2189MODULE_LICENSE("GPL");
2190MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
2191 "Based on.work by Dan Hollis et al");
2192MODULE_DESCRIPTION("Core library routines for MC reporting");
2193
2194module_param(panic_on_ue, int, 0644);
2195MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
2196module_param(check_pci_parity, int, 0644);
2197MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
2198module_param(panic_on_pci_parity, int, 0644);
2199MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
2200module_param(log_ue, int, 0644);
2201MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
2202module_param(log_ce, int, 0644);
2203MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
2204module_param(poll_msec, int, 0644);
2205MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
2206#ifdef CONFIG_EDAC_DEBUG
2207module_param(edac_debug_level, int, 0644);
2208MODULE_PARM_DESC(edac_debug_level, "Debug level");
2209#endif
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 000000000000..75ecf484a43a
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,448 @@
1/*
2 * MC kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * NMI handling support added by
12 * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
13 *
14 * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
15 *
16 */
17
18
19#ifndef _EDAC_MC_H_
20#define _EDAC_MC_H_
21
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/spinlock.h>
28#include <linux/smp.h>
29#include <linux/pci.h>
30#include <linux/time.h>
31#include <linux/nmi.h>
32#include <linux/rcupdate.h>
33#include <linux/completion.h>
34#include <linux/kobject.h>
35
36
37#define EDAC_MC_LABEL_LEN 31
38#define MC_PROC_NAME_MAX_LEN 7
39
40#if PAGE_SHIFT < 20
41#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
42#else /* PAGE_SHIFT > 20 */
43#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
44#endif
45
46#ifdef CONFIG_EDAC_DEBUG
47extern int edac_debug_level;
48#define edac_debug_printk(level, fmt, args...) \
49do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0)
50#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
51#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
52#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
53#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
54#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
55#else /* !CONFIG_EDAC_DEBUG */
56#define debugf0( ... )
57#define debugf1( ... )
58#define debugf2( ... )
59#define debugf3( ... )
60#define debugf4( ... )
61#endif /* !CONFIG_EDAC_DEBUG */
62
63
64#define bs_xstr(s) bs_str(s)
65#define bs_str(s) #s
66#define BS_MOD_STR bs_xstr(KBUILD_BASENAME)
67
68#define BIT(x) (1 << (x))
69
70#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev
71
72/* memory devices */
73enum dev_type {
74 DEV_UNKNOWN = 0,
75 DEV_X1,
76 DEV_X2,
77 DEV_X4,
78 DEV_X8,
79 DEV_X16,
80 DEV_X32, /* Do these parts exist? */
81 DEV_X64 /* Do these parts exist? */
82};
83
84#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
85#define DEV_FLAG_X1 BIT(DEV_X1)
86#define DEV_FLAG_X2 BIT(DEV_X2)
87#define DEV_FLAG_X4 BIT(DEV_X4)
88#define DEV_FLAG_X8 BIT(DEV_X8)
89#define DEV_FLAG_X16 BIT(DEV_X16)
90#define DEV_FLAG_X32 BIT(DEV_X32)
91#define DEV_FLAG_X64 BIT(DEV_X64)
92
93/* memory types */
94enum mem_type {
95 MEM_EMPTY = 0, /* Empty csrow */
96 MEM_RESERVED, /* Reserved csrow type */
97 MEM_UNKNOWN, /* Unknown csrow type */
98 MEM_FPM, /* Fast page mode */
99 MEM_EDO, /* Extended data out */
100 MEM_BEDO, /* Burst Extended data out */
101 MEM_SDR, /* Single data rate SDRAM */
102 MEM_RDR, /* Registered single data rate SDRAM */
103 MEM_DDR, /* Double data rate SDRAM */
104 MEM_RDDR, /* Registered Double data rate SDRAM */
105 MEM_RMBS /* Rambus DRAM */
106};
107
108#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
109#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
110#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
111#define MEM_FLAG_FPM BIT(MEM_FPM)
112#define MEM_FLAG_EDO BIT(MEM_EDO)
113#define MEM_FLAG_BEDO BIT(MEM_BEDO)
114#define MEM_FLAG_SDR BIT(MEM_SDR)
115#define MEM_FLAG_RDR BIT(MEM_RDR)
116#define MEM_FLAG_DDR BIT(MEM_DDR)
117#define MEM_FLAG_RDDR BIT(MEM_RDDR)
118#define MEM_FLAG_RMBS BIT(MEM_RMBS)
119
120
121/* chipset Error Detection and Correction capabilities and mode */
122enum edac_type {
123 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
124 EDAC_NONE, /* Doesnt support ECC */
125 EDAC_RESERVED, /* Reserved ECC type */
126 EDAC_PARITY, /* Detects parity errors */
127 EDAC_EC, /* Error Checking - no correction */
128 EDAC_SECDED, /* Single bit error correction, Double detection */
129 EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
130 EDAC_S4ECD4ED, /* Chipkill x4 devices */
131 EDAC_S8ECD8ED, /* Chipkill x8 devices */
132 EDAC_S16ECD16ED, /* Chipkill x16 devices */
133};
134
135#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
136#define EDAC_FLAG_NONE BIT(EDAC_NONE)
137#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
138#define EDAC_FLAG_EC BIT(EDAC_EC)
139#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
140#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
141#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
142#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
143#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
144
145
146/* scrubbing capabilities */
147enum scrub_type {
148 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
149 SCRUB_NONE, /* No scrubber */
150 SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
151 SCRUB_SW_SRC, /* Software scrub only errors */
152 SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
153 SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
154 SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
155 SCRUB_HW_SRC, /* Hardware scrub only errors */
156 SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
157 SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
158};
159
160#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
161#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC_CORR)
162#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR)
163#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
164#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
165#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC_CORR)
166#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
167#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
168
169enum mci_sysfs_status {
170 MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
171 MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
172};
173
174/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
175
176/*
177 * There are several things to be aware of that aren't at all obvious:
178 *
179 *
180 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
181 *
182 * These are some of the many terms that are thrown about that don't always
183 * mean what people think they mean (Inconceivable!). In the interest of
184 * creating a common ground for discussion, terms and their definitions
185 * will be established.
186 *
187 * Memory devices: The individual chip on a memory stick. These devices
188 * commonly output 4 and 8 bits each. Grouping several
189 * of these in parallel provides 64 bits which is common
190 * for a memory stick.
191 *
192 * Memory Stick: A printed circuit board that agregates multiple
193 * memory devices in parallel. This is the atomic
194 * memory component that is purchaseable by Joe consumer
195 * and loaded into a memory socket.
196 *
197 * Socket: A physical connector on the motherboard that accepts
198 * a single memory stick.
199 *
200 * Channel: Set of memory devices on a memory stick that must be
201 * grouped in parallel with one or more additional
202 * channels from other memory sticks. This parallel
203 * grouping of the output from multiple channels are
204 * necessary for the smallest granularity of memory access.
205 * Some memory controllers are capable of single channel -
206 * which means that memory sticks can be loaded
207 * individually. Other memory controllers are only
208 * capable of dual channel - which means that memory
209 * sticks must be loaded as pairs (see "socket set").
210 *
211 * Chip-select row: All of the memory devices that are selected together.
212 * for a single, minimum grain of memory access.
213 * This selects all of the parallel memory devices across
214 * all of the parallel channels. Common chip-select rows
215 * for single channel are 64 bits, for dual channel 128
216 * bits.
217 *
218 * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
219 * Motherboards commonly drive two chip-select pins to
220 * a memory stick. A single-ranked stick, will occupy
221 * only one of those rows. The other will be unused.
222 *
223 * Double-Ranked stick: A double-ranked stick has two chip-select rows which
224 * access different sets of memory devices. The two
225 * rows cannot be accessed concurrently.
226 *
227 * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
228 * A double-sided stick has two chip-select rows which
229 * access different sets of memory devices. The two
230 * rows cannot be accessed concurrently. "Double-sided"
231 * is irrespective of the memory devices being mounted
232 * on both sides of the memory stick.
233 *
234 * Socket set: All of the memory sticks that are required for for
235 * a single memory access or all of the memory sticks
236 * spanned by a chip-select row. A single socket set
237 * has two chip-select rows and if double-sided sticks
238 * are used these will occupy those chip-select rows.
239 *
240 * Bank: This term is avoided because it is unclear when
241 * needing to distinguish between chip-select rows and
242 * socket sets.
243 *
244 * Controller pages:
245 *
246 * Physical pages:
247 *
248 * Virtual pages:
249 *
250 *
251 * STRUCTURE ORGANIZATION AND CHOICES
252 *
253 *
254 *
255 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
256 */
257
258
259struct channel_info {
260 int chan_idx; /* channel index */
261 u32 ce_count; /* Correctable Errors for this CHANNEL */
262 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
263 struct csrow_info *csrow; /* the parent */
264};
265
266
267struct csrow_info {
268 unsigned long first_page; /* first page number in dimm */
269 unsigned long last_page; /* last page number in dimm */
270 unsigned long page_mask; /* used for interleaving -
271 0UL for non intlv */
272 u32 nr_pages; /* number of pages in csrow */
273 u32 grain; /* granularity of reported error in bytes */
274 int csrow_idx; /* the chip-select row */
275 enum dev_type dtype; /* memory device type */
276 u32 ue_count; /* Uncorrectable Errors for this csrow */
277 u32 ce_count; /* Correctable Errors for this csrow */
278 enum mem_type mtype; /* memory csrow type */
279 enum edac_type edac_mode; /* EDAC mode for this csrow */
280 struct mem_ctl_info *mci; /* the parent */
281
282 struct kobject kobj; /* sysfs kobject for this csrow */
283
284 /* FIXME the number of CHANNELs might need to become dynamic */
285 u32 nr_channels;
286 struct channel_info *channels;
287};
288
289
290struct mem_ctl_info {
291 struct list_head link; /* for global list of mem_ctl_info structs */
292 unsigned long mtype_cap; /* memory types supported by mc */
293 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
294 unsigned long edac_cap; /* configuration capabilities - this is
295 closely related to edac_ctl_cap. The
296 difference is that the controller
297 may be capable of s4ecd4ed which would
298 be listed in edac_ctl_cap, but if
299 channels aren't capable of s4ecd4ed then the
300 edac_cap would not have that capability. */
301 unsigned long scrub_cap; /* chipset scrub capabilities */
302 enum scrub_type scrub_mode; /* current scrub mode */
303
304 enum mci_sysfs_status sysfs_active; /* status of sysfs */
305
306 /* pointer to edac checking routine */
307 void (*edac_check) (struct mem_ctl_info * mci);
308 /*
309 * Remaps memory pages: controller pages to physical pages.
310 * For most MC's, this will be NULL.
311 */
312 /* FIXME - why not send the phys page to begin with? */
313 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
314 unsigned long page);
315 int mc_idx;
316 int nr_csrows;
317 struct csrow_info *csrows;
318 /*
319 * FIXME - what about controllers on other busses? - IDs must be
320 * unique. pdev pointer should be sufficiently unique, but
321 * BUS:SLOT.FUNC numbers may not be unique.
322 */
323 struct pci_dev *pdev;
324 const char *mod_name;
325 const char *mod_ver;
326 const char *ctl_name;
327 char proc_name[MC_PROC_NAME_MAX_LEN + 1];
328 void *pvt_info;
329 u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
330 u32 ce_noinfo_count; /* Correctable Errors w/o info */
331 u32 ue_count; /* Total Uncorrectable Errors for this MC */
332 u32 ce_count; /* Total Correctable Errors for this MC */
333 unsigned long start_time; /* mci load start time (in jiffies) */
334
335 /* this stuff is for safe removal of mc devices from global list while
336 * NMI handlers may be traversing list
337 */
338 struct rcu_head rcu;
339 struct completion complete;
340
341 /* edac sysfs device control */
342 struct kobject edac_mci_kobj;
343};
344
345
346
347/* write all or some bits in a byte-register*/
348static inline void pci_write_bits8(struct pci_dev *pdev, int offset,
349 u8 value, u8 mask)
350{
351 if (mask != 0xff) {
352 u8 buf;
353 pci_read_config_byte(pdev, offset, &buf);
354 value &= mask;
355 buf &= ~mask;
356 value |= buf;
357 }
358 pci_write_config_byte(pdev, offset, value);
359}
360
361
362/* write all or some bits in a word-register*/
363static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
364 u16 value, u16 mask)
365{
366 if (mask != 0xffff) {
367 u16 buf;
368 pci_read_config_word(pdev, offset, &buf);
369 value &= mask;
370 buf &= ~mask;
371 value |= buf;
372 }
373 pci_write_config_word(pdev, offset, value);
374}
375
376
377/* write all or some bits in a dword-register*/
378static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
379 u32 value, u32 mask)
380{
381 if (mask != 0xffff) {
382 u32 buf;
383 pci_read_config_dword(pdev, offset, &buf);
384 value &= mask;
385 buf &= ~mask;
386 value |= buf;
387 }
388 pci_write_config_dword(pdev, offset, value);
389}
390
391
392#ifdef CONFIG_EDAC_DEBUG
393void edac_mc_dump_channel(struct channel_info *chan);
394void edac_mc_dump_mci(struct mem_ctl_info *mci);
395void edac_mc_dump_csrow(struct csrow_info *csrow);
396#endif /* CONFIG_EDAC_DEBUG */
397
398extern int edac_mc_add_mc(struct mem_ctl_info *mci);
399extern int edac_mc_del_mc(struct mem_ctl_info *mci);
400
401extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
402 unsigned long page);
403
404extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev
405 *pdev);
406
407extern void edac_mc_scrub_block(unsigned long page,
408 unsigned long offset, u32 size);
409
410/*
411 * The no info errors are used when error overflows are reported.
412 * There are a limited number of error logging registers that can
413 * be exausted. When all registers are exhausted and an additional
414 * error occurs then an error overflow register records that an
415 * error occured and the type of error, but doesn't have any
416 * further information. The ce/ue versions make for cleaner
417 * reporting logic and function interface - reduces conditional
418 * statement clutter and extra function arguments.
419 */
420extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
421 unsigned long page_frame_number,
422 unsigned long offset_in_page,
423 unsigned long syndrome,
424 int row, int channel, const char *msg);
425
426extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
427 const char *msg);
428
429extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
430 unsigned long page_frame_number,
431 unsigned long offset_in_page,
432 int row, const char *msg);
433
434extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
435 const char *msg);
436
437/*
438 * This kmalloc's and initializes all the structures.
439 * Can't be used if all structures don't have the same lifetime.
440 */
441extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt,
442 unsigned nr_csrows, unsigned nr_chans);
443
444/* Free an mc previously allocated by edac_mc_alloc() */
445extern void edac_mc_free(struct mem_ctl_info *mci);
446
447
448#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644
index 000000000000..52596e75f9c2
--- /dev/null
+++ b/drivers/edac/i82860_edac.c
@@ -0,0 +1,299 @@
1/*
2 * Intel 82860 Memory Controller kernel module
3 * (C) 2005 Red Hat (http://www.redhat.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Ben Woodard <woodard@redhat.com>
8 * shamelessly copied from and based upon the edac_i82875 driver
9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
10 */
11
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17#include <linux/pci_ids.h>
18#include <linux/slab.h>
19#include "edac_mc.h"
20
21
22#ifndef PCI_DEVICE_ID_INTEL_82860_0
23#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
24#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
25
26#define I82860_MCHCFG 0x50
27#define I82860_GBA 0x60
28#define I82860_GBA_MASK 0x7FF
29#define I82860_GBA_SHIFT 24
30#define I82860_ERRSTS 0xC8
31#define I82860_EAP 0xE4
32#define I82860_DERRCTL_STS 0xE2
33
34enum i82860_chips {
35 I82860 = 0,
36};
37
38struct i82860_dev_info {
39 const char *ctl_name;
40};
41
42struct i82860_error_info {
43 u16 errsts;
44 u32 eap;
45 u16 derrsyn;
46 u16 errsts2;
47};
48
49static const struct i82860_dev_info i82860_devs[] = {
50 [I82860] = {
51 .ctl_name = "i82860"},
52};
53
54static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
55 has already registered driver */
56
57static int i82860_registered = 1;
58
59static void i82860_get_error_info (struct mem_ctl_info *mci,
60 struct i82860_error_info *info)
61{
62 /*
63 * This is a mess because there is no atomic way to read all the
64 * registers at once and the registers can transition from CE being
65 * overwritten by UE.
66 */
67 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts);
68 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
69 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, &info->derrsyn);
70 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts2);
71
72 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
73
74 /*
75 * If the error is the same for both reads then the first set of reads
76 * is valid. If there is a change then there is a CE no info and the
77 * second set of reads is valid and should be UE info.
78 */
79 if (!(info->errsts2 & 0x0003))
80 return;
81 if ((info->errsts ^ info->errsts2) & 0x0003) {
82 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
83 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
84 &info->derrsyn);
85 }
86}
87
88static int i82860_process_error_info (struct mem_ctl_info *mci,
89 struct i82860_error_info *info, int handle_errors)
90{
91 int row;
92
93 if (!(info->errsts2 & 0x0003))
94 return 0;
95
96 if (!handle_errors)
97 return 1;
98
99 if ((info->errsts ^ info->errsts2) & 0x0003) {
100 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
101 info->errsts = info->errsts2;
102 }
103
104 info->eap >>= PAGE_SHIFT;
105 row = edac_mc_find_csrow_by_page(mci, info->eap);
106
107 if (info->errsts & 0x0002)
108 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
109 else
110 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
111 0, "i82860 UE");
112
113 return 1;
114}
115
116static void i82860_check(struct mem_ctl_info *mci)
117{
118 struct i82860_error_info info;
119
120 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
121 i82860_get_error_info(mci, &info);
122 i82860_process_error_info(mci, &info, 1);
123}
124
125static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
126{
127 int rc = -ENODEV;
128 int index;
129 struct mem_ctl_info *mci = NULL;
130 unsigned long last_cumul_size;
131
132 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
133
134 /* RDRAM has channels but these don't map onto the abstractions that
135 edac uses.
136 The device groups from the GRA registers seem to map reasonably
137 well onto the notion of a chip select row.
138 There are 16 GRA registers and since the name is associated with
139 the channel and the GRA registers map to physical devices so we are
140 going to make 1 channel for group.
141 */
142 mci = edac_mc_alloc(0, 16, 1);
143 if (!mci)
144 return -ENOMEM;
145
146 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
147
148 mci->pdev = pdev;
149 mci->mtype_cap = MEM_FLAG_DDR;
150
151
152 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
153 /* I"m not sure about this but I think that all RDRAM is SECDED */
154 mci->edac_cap = EDAC_FLAG_SECDED;
155 /* adjust FLAGS */
156
157 mci->mod_name = BS_MOD_STR;
158 mci->mod_ver = "$Revision: 1.1.2.6 $";
159 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
160 mci->edac_check = i82860_check;
161 mci->ctl_page_to_phys = NULL;
162
163 pci_read_config_word(mci->pdev, I82860_MCHCFG, &mchcfg_ddim);
164 mchcfg_ddim = mchcfg_ddim & 0x180;
165
166 /*
167 * The group row boundary (GRA) reg values are boundary address
168 * for each DRAM row with a granularity of 16MB. GRA regs are
169 * cumulative; therefore GRA15 will contain the total memory contained
170 * in all eight rows.
171 */
172 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
173 u16 value;
174 u32 cumul_size;
175 struct csrow_info *csrow = &mci->csrows[index];
176
177 pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
178 &value);
179
180 cumul_size = (value & I82860_GBA_MASK) <<
181 (I82860_GBA_SHIFT - PAGE_SHIFT);
182 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
183 __func__, index, cumul_size);
184 if (cumul_size == last_cumul_size)
185 continue; /* not populated */
186
187 csrow->first_page = last_cumul_size;
188 csrow->last_page = cumul_size - 1;
189 csrow->nr_pages = cumul_size - last_cumul_size;
190 last_cumul_size = cumul_size;
191 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
192 csrow->mtype = MEM_RMBS;
193 csrow->dtype = DEV_UNKNOWN;
194 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
195 }
196
197 /* clear counters */
198 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
199
200 if (edac_mc_add_mc(mci)) {
201 debugf3("MC: " __FILE__
202 ": %s(): failed edac_mc_add_mc()\n",
203 __func__);
204 edac_mc_free(mci);
205 } else {
206 /* get this far and it's successful */
207 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
208 rc = 0;
209 }
210 return rc;
211}
212
213/* returns count (>= 0), or negative on error */
214static int __devinit i82860_init_one(struct pci_dev *pdev,
215 const struct pci_device_id *ent)
216{
217 int rc;
218
219 debugf0("MC: " __FILE__ ": %s()\n", __func__);
220
221 printk(KERN_INFO "i82860 init one\n");
222 if(pci_enable_device(pdev) < 0)
223 return -EIO;
224 rc = i82860_probe1(pdev, ent->driver_data);
225 if(rc == 0)
226 mci_pdev = pci_dev_get(pdev);
227 return rc;
228}
229
230static void __devexit i82860_remove_one(struct pci_dev *pdev)
231{
232 struct mem_ctl_info *mci;
233
234 debugf0(__FILE__ ": %s()\n", __func__);
235
236 mci = edac_mc_find_mci_by_pdev(pdev);
237 if ((mci != NULL) && (edac_mc_del_mc(mci) == 0))
238 edac_mc_free(mci);
239}
240
241static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
242 {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
243 I82860},
244 {0,} /* 0 terminated list. */
245};
246
247MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
248
249static struct pci_driver i82860_driver = {
250 .name = BS_MOD_STR,
251 .probe = i82860_init_one,
252 .remove = __devexit_p(i82860_remove_one),
253 .id_table = i82860_pci_tbl,
254};
255
256static int __init i82860_init(void)
257{
258 int pci_rc;
259
260 debugf3("MC: " __FILE__ ": %s()\n", __func__);
261 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
262 return pci_rc;
263
264 if (!mci_pdev) {
265 i82860_registered = 0;
266 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
267 PCI_DEVICE_ID_INTEL_82860_0, NULL);
268 if (mci_pdev == NULL) {
269 debugf0("860 pci_get_device fail\n");
270 return -ENODEV;
271 }
272 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
273 if (pci_rc < 0) {
274 debugf0("860 init fail\n");
275 pci_dev_put(mci_pdev);
276 return -ENODEV;
277 }
278 }
279 return 0;
280}
281
282static void __exit i82860_exit(void)
283{
284 debugf3("MC: " __FILE__ ": %s()\n", __func__);
285
286 pci_unregister_driver(&i82860_driver);
287 if (!i82860_registered) {
288 i82860_remove_one(mci_pdev);
289 pci_dev_put(mci_pdev);
290 }
291}
292
293module_init(i82860_init);
294module_exit(i82860_exit);
295
296MODULE_LICENSE("GPL");
297MODULE_AUTHOR
298 ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>");
299MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644
index 000000000000..009c08fe5d69
--- /dev/null
+++ b/drivers/edac/i82875p_edac.c
@@ -0,0 +1,532 @@
1/*
2 * Intel D82875P Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Contributors:
9 * Wang Zhenyu at intel.com
10 *
11 * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
12 *
13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
14 */
15
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/slab.h>
25
26#include "edac_mc.h"
27
28
29#ifndef PCI_DEVICE_ID_INTEL_82875_0
30#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
31#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
32
33#ifndef PCI_DEVICE_ID_INTEL_82875_6
34#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
35#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
36
37
38/* four csrows in dual channel, eight in single channel */
39#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
40
41
42/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
43#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
44 *
45 * 31:12 block address
46 * 11:0 reserved
47 */
48
49#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
50 *
51 * 7:0 DRAM ECC Syndrome
52 */
53
54#define I82875P_DES 0x5d /* DRAM Error Status (8b)
55 *
56 * 7:1 reserved
57 * 0 Error channel 0/1
58 */
59
60#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
61 *
62 * 15:10 reserved
63 * 9 non-DRAM lock error (ndlock)
64 * 8 Sftwr Generated SMI
65 * 7 ECC UE
66 * 6 reserved
67 * 5 MCH detects unimplemented cycle
68 * 4 AGP access outside GA
69 * 3 Invalid AGP access
70 * 2 Invalid GA translation table
71 * 1 Unsupported AGP command
72 * 0 ECC CE
73 */
74
75#define I82875P_ERRCMD 0xca /* Error Command (16b)
76 *
77 * 15:10 reserved
78 * 9 SERR on non-DRAM lock
79 * 8 SERR on ECC UE
80 * 7 SERR on ECC CE
81 * 6 target abort on high exception
82 * 5 detect unimplemented cyc
83 * 4 AGP access outside of GA
84 * 3 SERR on invalid AGP access
85 * 2 invalid translation table
86 * 1 SERR on unsupported AGP command
87 * 0 reserved
88 */
89
90
91/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
92#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
93 *
94 * 15:10 reserved
95 * 9 fast back-to-back - ro 0
96 * 8 SERR enable - ro 0
97 * 7 addr/data stepping - ro 0
98 * 6 parity err enable - ro 0
99 * 5 VGA palette snoop - ro 0
100 * 4 mem wr & invalidate - ro 0
101 * 3 special cycle - ro 0
102 * 2 bus master - ro 0
103 * 1 mem access dev6 - 0(dis),1(en)
104 * 0 IO access dev3 - 0(dis),1(en)
105 */
106
107#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
108 *
109 * 31:12 mem base addr [31:12]
110 * 11:4 address mask - ro 0
111 * 3 prefetchable - ro 0(non),1(pre)
112 * 2:1 mem type - ro 0
113 * 0 mem space - ro 0
114 */
115
116/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
117
118#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
119#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
120 *
121 * 7 reserved
122 * 6:0 64MiB row boundary addr
123 */
124
125#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
126 *
127 * 7 reserved
128 * 6:4 row attr row 1
129 * 3 reserved
130 * 2:0 row attr row 0
131 *
132 * 000 = 4KiB
133 * 001 = 8KiB
134 * 010 = 16KiB
135 * 011 = 32KiB
136 */
137
138#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
139 *
140 * 31:30 reserved
141 * 29 init complete
142 * 28:23 reserved
143 * 22:21 nr chan 00=1,01=2
144 * 20 reserved
145 * 19:18 Data Integ Mode 00=none,01=ecc
146 * 17:11 reserved
147 * 10:8 refresh mode
148 * 7 reserved
149 * 6:4 mode select
150 * 3:2 reserved
151 * 1:0 DRAM type 01=DDR
152 */
153
154
155enum i82875p_chips {
156 I82875P = 0,
157};
158
159
160struct i82875p_pvt {
161 struct pci_dev *ovrfl_pdev;
162 void *ovrfl_window;
163};
164
165
166struct i82875p_dev_info {
167 const char *ctl_name;
168};
169
170
171struct i82875p_error_info {
172 u16 errsts;
173 u32 eap;
174 u8 des;
175 u8 derrsyn;
176 u16 errsts2;
177};
178
179
180static const struct i82875p_dev_info i82875p_devs[] = {
181 [I82875P] = {
182 .ctl_name = "i82875p"},
183};
184
185static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
186 has already registered driver */
187static int i82875p_registered = 1;
188
189static void i82875p_get_error_info (struct mem_ctl_info *mci,
190 struct i82875p_error_info *info)
191{
192 /*
193 * This is a mess because there is no atomic way to read all the
194 * registers at once and the registers can transition from CE being
195 * overwritten by UE.
196 */
197 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts);
198 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
199 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
200 pci_read_config_byte(mci->pdev, I82875P_DERRSYN, &info->derrsyn);
201 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts2);
202
203 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
204
205 /*
206 * If the error is the same then we can for both reads then
207 * the first set of reads is valid. If there is a change then
208 * there is a CE no info and the second set of reads is valid
209 * and should be UE info.
210 */
211 if (!(info->errsts2 & 0x0081))
212 return;
213 if ((info->errsts ^ info->errsts2) & 0x0081) {
214 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
215 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
216 pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
217 &info->derrsyn);
218 }
219}
220
221static int i82875p_process_error_info (struct mem_ctl_info *mci,
222 struct i82875p_error_info *info, int handle_errors)
223{
224 int row, multi_chan;
225
226 multi_chan = mci->csrows[0].nr_channels - 1;
227
228 if (!(info->errsts2 & 0x0081))
229 return 0;
230
231 if (!handle_errors)
232 return 1;
233
234 if ((info->errsts ^ info->errsts2) & 0x0081) {
235 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
236 info->errsts = info->errsts2;
237 }
238
239 info->eap >>= PAGE_SHIFT;
240 row = edac_mc_find_csrow_by_page(mci, info->eap);
241
242 if (info->errsts & 0x0080)
243 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
244 else
245 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
246 multi_chan ? (info->des & 0x1) : 0,
247 "i82875p CE");
248
249 return 1;
250}
251
252
253static void i82875p_check(struct mem_ctl_info *mci)
254{
255 struct i82875p_error_info info;
256
257 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
258 i82875p_get_error_info(mci, &info);
259 i82875p_process_error_info(mci, &info, 1);
260}
261
262
263#ifdef CONFIG_PROC_FS
264extern int pci_proc_attach_device(struct pci_dev *);
265#endif
266
267static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
268{
269 int rc = -ENODEV;
270 int index;
271 struct mem_ctl_info *mci = NULL;
272 struct i82875p_pvt *pvt = NULL;
273 unsigned long last_cumul_size;
274 struct pci_dev *ovrfl_pdev;
275 void __iomem *ovrfl_window = NULL;
276
277 u32 drc;
278 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
279 u32 nr_chans;
280 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
281
282 debugf0("MC: " __FILE__ ": %s()\n", __func__);
283
284 ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
285
286 if (!ovrfl_pdev) {
287 /*
288 * Intel tells BIOS developers to hide device 6 which
289 * configures the overflow device access containing
290 * the DRBs - this is where we expose device 6.
291 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
292 */
293 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
294 ovrfl_pdev =
295 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
296 if (!ovrfl_pdev)
297 goto fail;
298 }
299#ifdef CONFIG_PROC_FS
300 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
301 printk(KERN_ERR "MC: " __FILE__
302 ": %s(): Failed to attach overflow device\n",
303 __func__);
304 goto fail;
305 }
306#endif /* CONFIG_PROC_FS */
307 if (pci_enable_device(ovrfl_pdev)) {
308 printk(KERN_ERR "MC: " __FILE__
309 ": %s(): Failed to enable overflow device\n",
310 __func__);
311 goto fail;
312 }
313
314 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
315#ifdef CORRECT_BIOS
316 goto fail;
317#endif
318 }
319 /* cache is irrelevant for PCI bus reads/writes */
320 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
321 pci_resource_len(ovrfl_pdev, 0));
322
323 if (!ovrfl_window) {
324 printk(KERN_ERR "MC: " __FILE__
325 ": %s(): Failed to ioremap bar6\n", __func__);
326 goto fail;
327 }
328
329 /* need to find out the number of channels */
330 drc = readl(ovrfl_window + I82875P_DRC);
331 drc_chan = ((drc >> 21) & 0x1);
332 nr_chans = drc_chan + 1;
333 drc_ddim = (drc >> 18) & 0x1;
334
335 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
336 nr_chans);
337
338 if (!mci) {
339 rc = -ENOMEM;
340 goto fail;
341 }
342
343 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
344
345 mci->pdev = pdev;
346 mci->mtype_cap = MEM_FLAG_DDR;
347
348 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
349 mci->edac_cap = EDAC_FLAG_UNKNOWN;
350 /* adjust FLAGS */
351
352 mci->mod_name = BS_MOD_STR;
353 mci->mod_ver = "$Revision: 1.5.2.11 $";
354 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
355 mci->edac_check = i82875p_check;
356 mci->ctl_page_to_phys = NULL;
357
358 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
359
360 pvt = (struct i82875p_pvt *) mci->pvt_info;
361 pvt->ovrfl_pdev = ovrfl_pdev;
362 pvt->ovrfl_window = ovrfl_window;
363
364 /*
365 * The dram row boundary (DRB) reg values are boundary address
366 * for each DRAM row with a granularity of 32 or 64MB (single/dual
367 * channel operation). DRB regs are cumulative; therefore DRB7 will
368 * contain the total memory contained in all eight rows.
369 */
370 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
371 u8 value;
372 u32 cumul_size;
373 struct csrow_info *csrow = &mci->csrows[index];
374
375 value = readb(ovrfl_window + I82875P_DRB + index);
376 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
377 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
378 __func__, index, cumul_size);
379 if (cumul_size == last_cumul_size)
380 continue; /* not populated */
381
382 csrow->first_page = last_cumul_size;
383 csrow->last_page = cumul_size - 1;
384 csrow->nr_pages = cumul_size - last_cumul_size;
385 last_cumul_size = cumul_size;
386 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
387 csrow->mtype = MEM_DDR;
388 csrow->dtype = DEV_UNKNOWN;
389 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
390 }
391
392 /* clear counters */
393 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
394
395 if (edac_mc_add_mc(mci)) {
396 debugf3("MC: " __FILE__
397 ": %s(): failed edac_mc_add_mc()\n", __func__);
398 goto fail;
399 }
400
401 /* get this far and it's successful */
402 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
403 return 0;
404
405 fail:
406 if (mci)
407 edac_mc_free(mci);
408
409 if (ovrfl_window)
410 iounmap(ovrfl_window);
411
412 if (ovrfl_pdev) {
413 pci_release_regions(ovrfl_pdev);
414 pci_disable_device(ovrfl_pdev);
415 }
416
417 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
418 return rc;
419}
420
421
422/* returns count (>= 0), or negative on error */
423static int __devinit i82875p_init_one(struct pci_dev *pdev,
424 const struct pci_device_id *ent)
425{
426 int rc;
427
428 debugf0("MC: " __FILE__ ": %s()\n", __func__);
429
430 printk(KERN_INFO "i82875p init one\n");
431 if(pci_enable_device(pdev) < 0)
432 return -EIO;
433 rc = i82875p_probe1(pdev, ent->driver_data);
434 if (mci_pdev == NULL)
435 mci_pdev = pci_dev_get(pdev);
436 return rc;
437}
438
439
440static void __devexit i82875p_remove_one(struct pci_dev *pdev)
441{
442 struct mem_ctl_info *mci;
443 struct i82875p_pvt *pvt = NULL;
444
445 debugf0(__FILE__ ": %s()\n", __func__);
446
447 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
448 return;
449
450 pvt = (struct i82875p_pvt *) mci->pvt_info;
451 if (pvt->ovrfl_window)
452 iounmap(pvt->ovrfl_window);
453
454 if (pvt->ovrfl_pdev) {
455#ifdef CORRECT_BIOS
456 pci_release_regions(pvt->ovrfl_pdev);
457#endif /*CORRECT_BIOS */
458 pci_disable_device(pvt->ovrfl_pdev);
459 pci_dev_put(pvt->ovrfl_pdev);
460 }
461
462 if (edac_mc_del_mc(mci))
463 return;
464
465 edac_mc_free(mci);
466}
467
468
469static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
470 {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
471 I82875P},
472 {0,} /* 0 terminated list. */
473};
474
475MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
476
477
478static struct pci_driver i82875p_driver = {
479 .name = BS_MOD_STR,
480 .probe = i82875p_init_one,
481 .remove = __devexit_p(i82875p_remove_one),
482 .id_table = i82875p_pci_tbl,
483};
484
485
486static int __init i82875p_init(void)
487{
488 int pci_rc;
489
490 debugf3("MC: " __FILE__ ": %s()\n", __func__);
491 pci_rc = pci_register_driver(&i82875p_driver);
492 if (pci_rc < 0)
493 return pci_rc;
494 if (mci_pdev == NULL) {
495 i82875p_registered = 0;
496 mci_pdev =
497 pci_get_device(PCI_VENDOR_ID_INTEL,
498 PCI_DEVICE_ID_INTEL_82875_0, NULL);
499 if (!mci_pdev) {
500 debugf0("875p pci_get_device fail\n");
501 return -ENODEV;
502 }
503 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
504 if (pci_rc < 0) {
505 debugf0("875p init fail\n");
506 pci_dev_put(mci_pdev);
507 return -ENODEV;
508 }
509 }
510 return 0;
511}
512
513
514static void __exit i82875p_exit(void)
515{
516 debugf3("MC: " __FILE__ ": %s()\n", __func__);
517
518 pci_unregister_driver(&i82875p_driver);
519 if (!i82875p_registered) {
520 i82875p_remove_one(mci_pdev);
521 pci_dev_put(mci_pdev);
522 }
523}
524
525
526module_init(i82875p_init);
527module_exit(i82875p_exit);
528
529
530MODULE_LICENSE("GPL");
531MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
532MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644
index 000000000000..e90892831b90
--- /dev/null
+++ b/drivers/edac/r82600_edac.c
@@ -0,0 +1,407 @@
1/*
2 * Radisys 82600 Embedded chipset Memory Controller kernel module
3 * (C) 2005 EADS Astrium
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
8 * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
9 *
10 * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
11 *
12 * Written with reference to 82600 High Integration Dual PCI System
13 * Controller Data Book:
14 * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
15 * references to this document given in []
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/init.h>
21
22#include <linux/pci.h>
23#include <linux/pci_ids.h>
24
25#include <linux/slab.h>
26
27#include "edac_mc.h"
28
29/* Radisys say "The 82600 integrates a main memory SDRAM controller that
30 * supports up to four banks of memory. The four banks can support a mix of
31 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
32 * each of which can be any size from 16MB to 512MB. Both registered (control
33 * signals buffered) and unbuffered DIMM types are supported. Mixing of
34 * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
35 * is not allowed. The 82600 SDRAM interface operates at the same frequency as
36 * the CPU bus, 66MHz, 100MHz or 133MHz."
37 */
38
39#define R82600_NR_CSROWS 4
40#define R82600_NR_CHANS 1
41#define R82600_NR_DIMMS 4
42
43#define R82600_BRIDGE_ID 0x8200
44
45/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
46#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
47 * all bits are R/W
48 *
49 * 7 SDRAM ISA Hole Enable
50 * 6 Flash Page Mode Enable
51 * 5 ECC Enable: 1=ECC 0=noECC
52 * 4 DRAM DIMM Type: 1=
53 * 3 BIOS Alias Disable
54 * 2 SDRAM BIOS Flash Write Enable
55 * 1:0 SDRAM Refresh Rate: 00=Disabled
56 * 01=7.8usec (256Mbit SDRAMs)
57 * 10=15.6us 11=125usec
58 */
59
60#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
61 * More SDRAM related control bits
62 * all bits are R/W
63 *
64 * 15:8 Reserved.
65 *
66 * 7:5 Special SDRAM Mode Select
67 *
68 * 4 Force ECC
69 *
70 * 1=Drive ECC bits to 0 during
71 * write cycles (i.e. ECC test mode)
72 *
73 * 0=Normal ECC functioning
74 *
75 * 3 Enhanced Paging Enable
76 *
77 * 2 CAS# Latency 0=3clks 1=2clks
78 *
79 * 1 RAS# to CAS# Delay 0=3 1=2
80 *
81 * 0 RAS# Precharge 0=3 1=2
82 */
83
84#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
85 *
86 * 31 Disable Hardware Scrubbing (RW)
87 * 0=Scrub on corrected read
88 * 1=Don't scrub on corrected read
89 *
90 * 30:12 Error Address Pointer (RO)
91 * Upper 19 bits of error address
92 *
93 * 11:4 Syndrome Bits (RO)
94 *
95 * 3 BSERR# on multibit error (RW)
96 * 1=enable 0=disable
97 *
98 * 2 NMI on Single Bit Eror (RW)
99 * 1=NMI triggered by SBE n.b. other
100 * prerequeists
101 * 0=NMI not triggered
102 *
103 * 1 MBE (R/WC)
104 * read 1=MBE at EAP (see above)
105 * read 0=no MBE, or SBE occurred first
106 * write 1=Clear MBE status (must also
107 * clear SBE)
108 * write 0=NOP
109 *
110 * 1 SBE (R/WC)
111 * read 1=SBE at EAP (see above)
112 * read 0=no SBE, or MBE occurred first
113 * write 1=Clear SBE status (must also
114 * clear MBE)
115 * write 0=NOP
116 */
117
118#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address
119 * Registers
120 *
121 * 7:0 Address lines 30:24 - upper limit of
122 * each row [p57]
123 */
124
125struct r82600_error_info {
126 u32 eapr;
127};
128
129
130static unsigned int disable_hardware_scrub = 0;
131
132
133static void r82600_get_error_info (struct mem_ctl_info *mci,
134 struct r82600_error_info *info)
135{
136 pci_read_config_dword(mci->pdev, R82600_EAP, &info->eapr);
137
138 if (info->eapr & BIT(0))
139 /* Clear error to allow next error to be reported [p.62] */
140 pci_write_bits32(mci->pdev, R82600_EAP,
141 ((u32) BIT(0) & (u32) BIT(1)),
142 ((u32) BIT(0) & (u32) BIT(1)));
143
144 if (info->eapr & BIT(1))
145 /* Clear error to allow next error to be reported [p.62] */
146 pci_write_bits32(mci->pdev, R82600_EAP,
147 ((u32) BIT(0) & (u32) BIT(1)),
148 ((u32) BIT(0) & (u32) BIT(1)));
149}
150
151
152static int r82600_process_error_info (struct mem_ctl_info *mci,
153 struct r82600_error_info *info, int handle_errors)
154{
155 int error_found;
156 u32 eapaddr, page;
157 u32 syndrome;
158
159 error_found = 0;
160
161 /* bits 30:12 store the upper 19 bits of the 32 bit error address */
162 eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
163 /* Syndrome in bits 11:4 [p.62] */
164 syndrome = (info->eapr >> 4) & 0xFF;
165
166 /* the R82600 reports at less than page *
167 * granularity (upper 19 bits only) */
168 page = eapaddr >> PAGE_SHIFT;
169
170 if (info->eapr & BIT(0)) { /* CE? */
171 error_found = 1;
172
173 if (handle_errors)
174 edac_mc_handle_ce(
175 mci, page, 0, /* not avail */
176 syndrome,
177 edac_mc_find_csrow_by_page(mci, page),
178 0, /* channel */
179 mci->ctl_name);
180 }
181
182 if (info->eapr & BIT(1)) { /* UE? */
183 error_found = 1;
184
185 if (handle_errors)
186 /* 82600 doesn't give enough info */
187 edac_mc_handle_ue(mci, page, 0,
188 edac_mc_find_csrow_by_page(mci, page),
189 mci->ctl_name);
190 }
191
192 return error_found;
193}
194
195static void r82600_check(struct mem_ctl_info *mci)
196{
197 struct r82600_error_info info;
198
199 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
200 r82600_get_error_info(mci, &info);
201 r82600_process_error_info(mci, &info, 1);
202}
203
204static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
205{
206 int rc = -ENODEV;
207 int index;
208 struct mem_ctl_info *mci = NULL;
209 u8 dramcr;
210 u32 ecc_on;
211 u32 reg_sdram;
212 u32 eapr;
213 u32 scrub_disabled;
214 u32 sdram_refresh_rate;
215 u32 row_high_limit_last = 0;
216 u32 eap_init_bits;
217
218 debugf0("MC: " __FILE__ ": %s()\n", __func__);
219
220
221 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
222 pci_read_config_dword(pdev, R82600_EAP, &eapr);
223
224 ecc_on = dramcr & BIT(5);
225 reg_sdram = dramcr & BIT(4);
226 scrub_disabled = eapr & BIT(31);
227 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
228
229 debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n",
230 __func__, sdram_refresh_rate);
231
232 debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
233 dramcr);
234
235 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
236
237 if (mci == NULL) {
238 rc = -ENOMEM;
239 goto fail;
240 }
241
242 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
243
244 mci->pdev = pdev;
245 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
246
247 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
248 /* FIXME try to work out if the chip leads have been *
249 * used for COM2 instead on this board? [MA6?] MAYBE: */
250
251 /* On the R82600, the pins for memory bits 72:65 - i.e. the *
252 * EC bits are shared with the pins for COM2 (!), so if COM2 *
253 * is enabled, we assume COM2 is wired up, and thus no EDAC *
254 * is possible. */
255 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
256 if (ecc_on) {
257 if (scrub_disabled)
258 debugf3("MC: " __FILE__ ": %s(): mci = %p - "
259 "Scrubbing disabled! EAP: %#0x\n", __func__,
260 mci, eapr);
261 } else
262 mci->edac_cap = EDAC_FLAG_NONE;
263
264 mci->mod_name = BS_MOD_STR;
265 mci->mod_ver = "$Revision: 1.1.2.6 $";
266 mci->ctl_name = "R82600";
267 mci->edac_check = r82600_check;
268 mci->ctl_page_to_phys = NULL;
269
270 for (index = 0; index < mci->nr_csrows; index++) {
271 struct csrow_info *csrow = &mci->csrows[index];
272 u8 drbar; /* sDram Row Boundry Address Register */
273 u32 row_high_limit;
274 u32 row_base;
275
276 /* find the DRAM Chip Select Base address and mask */
277 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
278
279 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n",
280 mci->mc_idx, __func__, index, drbar);
281
282 row_high_limit = ((u32) drbar << 24);
283/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
284
285 debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
286 "Boundry Address=%#0x, Last = %#0x \n",
287 mci->mc_idx, __func__, index, row_high_limit,
288 row_high_limit_last);
289
290 /* Empty row [p.57] */
291 if (row_high_limit == row_high_limit_last)
292 continue;
293
294 row_base = row_high_limit_last;
295
296 csrow->first_page = row_base >> PAGE_SHIFT;
297 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
298 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
299 /* Error address is top 19 bits - so granularity is *
300 * 14 bits */
301 csrow->grain = 1 << 14;
302 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
303 /* FIXME - check that this is unknowable with this chipset */
304 csrow->dtype = DEV_UNKNOWN;
305
306 /* Mode is global on 82600 */
307 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
308 row_high_limit_last = row_high_limit;
309 }
310
311 /* clear counters */
312 /* FIXME should we? */
313
314 if (edac_mc_add_mc(mci)) {
315 debugf3("MC: " __FILE__
316 ": %s(): failed edac_mc_add_mc()\n", __func__);
317 goto fail;
318 }
319
320 /* get this far and it's successful */
321
322 /* Clear error flags to allow next error to be reported [p.62] */
323 /* Test systems seem to always have the UE flag raised on boot */
324
325 eap_init_bits = BIT(0) & BIT(1);
326 if (disable_hardware_scrub) {
327 eap_init_bits |= BIT(31);
328 debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub "
329 "(scrub on error)\n", __func__);
330 }
331
332 pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits,
333 eap_init_bits);
334
335 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
336 return 0;
337
338fail:
339 if (mci)
340 edac_mc_free(mci);
341
342 return rc;
343}
344
345/* returns count (>= 0), or negative on error */
346static int __devinit r82600_init_one(struct pci_dev *pdev,
347 const struct pci_device_id *ent)
348{
349 debugf0("MC: " __FILE__ ": %s()\n", __func__);
350
351 /* don't need to call pci_device_enable() */
352 return r82600_probe1(pdev, ent->driver_data);
353}
354
355
356static void __devexit r82600_remove_one(struct pci_dev *pdev)
357{
358 struct mem_ctl_info *mci;
359
360 debugf0(__FILE__ ": %s()\n", __func__);
361
362 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) &&
363 !edac_mc_del_mc(mci))
364 edac_mc_free(mci);
365}
366
367
368static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
369 {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)},
370 {0,} /* 0 terminated list. */
371};
372
373MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
374
375
376static struct pci_driver r82600_driver = {
377 .name = BS_MOD_STR,
378 .probe = r82600_init_one,
379 .remove = __devexit_p(r82600_remove_one),
380 .id_table = r82600_pci_tbl,
381};
382
383
384static int __init r82600_init(void)
385{
386 return pci_register_driver(&r82600_driver);
387}
388
389
390static void __exit r82600_exit(void)
391{
392 pci_unregister_driver(&r82600_driver);
393}
394
395
396module_init(r82600_init);
397module_exit(r82600_exit);
398
399
400MODULE_LICENSE("GPL");
401MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
402 "on behalf of EADS Astrium");
403MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
404
405module_param(disable_hardware_scrub, bool, 0644);
406MODULE_PARM_DESC(disable_hardware_scrub,
407 "If set, disable the chipset's automatic scrub for CEs");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index ca99979c868a..8b3515f394a6 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -8,6 +8,7 @@
8 * completion notification. 8 * completion notification.
9 */ 9 */
10 10
11#include <asm/types.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12 13
13#include <linux/blkdev.h> 14#include <linux/blkdev.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1421941487c4..626508afe1b1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -7,6 +7,7 @@ menu "Network device support"
7 7
8config NETDEVICES 8config NETDEVICES
9 depends on NET 9 depends on NET
10 default y if UML
10 bool "Network device support" 11 bool "Network device support"
11 ---help--- 12 ---help---
12 You can say N here if you don't intend to connect your Linux box to 13 You can say N here if you don't intend to connect your Linux box to
@@ -1914,6 +1915,15 @@ config E1000_NAPI
1914 1915
1915 If in doubt, say N. 1916 If in doubt, say N.
1916 1917
1918config E1000_DISABLE_PACKET_SPLIT
1919 bool "Disable Packet Split for PCI express adapters"
1920 depends on E1000
1921 help
1922 Say Y here if you want to use the legacy receive path for PCI express
1923 hadware.
1924
1925 If in doubt, say N.
1926
1917source "drivers/net/ixp2000/Kconfig" 1927source "drivers/net/ixp2000/Kconfig"
1918 1928
1919config MYRI_SBUS 1929config MYRI_SBUS
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 7aa49b974dc5..df9d6e80c4f2 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2136,7 +2136,7 @@ static int __init b44_init(void)
2136 2136
2137 /* Setup paramaters for syncing RX/TX DMA descriptors */ 2137 /* Setup paramaters for syncing RX/TX DMA descriptors */
2138 dma_desc_align_mask = ~(dma_desc_align_size - 1); 2138 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2139 dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); 2139 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2140 2140
2141 return pci_module_init(&b44_driver); 2141 return pci_module_init(&b44_driver);
2142} 2142}
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index dde631f8f685..6e295fce5c6f 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -335,6 +335,30 @@ static inline void cas_mask_intr(struct cas *cp)
335 cas_disable_irq(cp, i); 335 cas_disable_irq(cp, i);
336} 336}
337 337
338static inline void cas_buffer_init(cas_page_t *cp)
339{
340 struct page *page = cp->buffer;
341 atomic_set((atomic_t *)&page->lru.next, 1);
342}
343
344static inline int cas_buffer_count(cas_page_t *cp)
345{
346 struct page *page = cp->buffer;
347 return atomic_read((atomic_t *)&page->lru.next);
348}
349
350static inline void cas_buffer_inc(cas_page_t *cp)
351{
352 struct page *page = cp->buffer;
353 atomic_inc((atomic_t *)&page->lru.next);
354}
355
356static inline void cas_buffer_dec(cas_page_t *cp)
357{
358 struct page *page = cp->buffer;
359 atomic_dec((atomic_t *)&page->lru.next);
360}
361
338static void cas_enable_irq(struct cas *cp, const int ring) 362static void cas_enable_irq(struct cas *cp, const int ring)
339{ 363{
340 if (ring == 0) { /* all but TX_DONE */ 364 if (ring == 0) { /* all but TX_DONE */
@@ -472,6 +496,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
472{ 496{
473 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 497 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
474 PCI_DMA_FROMDEVICE); 498 PCI_DMA_FROMDEVICE);
499 cas_buffer_dec(page);
475 __free_pages(page->buffer, cp->page_order); 500 __free_pages(page->buffer, cp->page_order);
476 kfree(page); 501 kfree(page);
477 return 0; 502 return 0;
@@ -501,6 +526,7 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
501 page->buffer = alloc_pages(flags, cp->page_order); 526 page->buffer = alloc_pages(flags, cp->page_order);
502 if (!page->buffer) 527 if (!page->buffer)
503 goto page_err; 528 goto page_err;
529 cas_buffer_init(page);
504 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 530 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
505 cp->page_size, PCI_DMA_FROMDEVICE); 531 cp->page_size, PCI_DMA_FROMDEVICE);
506 return page; 532 return page;
@@ -579,7 +605,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
579 list_for_each_safe(elem, tmp, &list) { 605 list_for_each_safe(elem, tmp, &list) {
580 cas_page_t *page = list_entry(elem, cas_page_t, list); 606 cas_page_t *page = list_entry(elem, cas_page_t, list);
581 607
582 if (page_count(page->buffer) > 1) 608 if (cas_buffer_count(page) > 1)
583 continue; 609 continue;
584 610
585 list_del(elem); 611 list_del(elem);
@@ -1347,7 +1373,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1347 cas_page_t *page = cp->rx_pages[1][index]; 1373 cas_page_t *page = cp->rx_pages[1][index];
1348 cas_page_t *new; 1374 cas_page_t *new;
1349 1375
1350 if (page_count(page->buffer) == 1) 1376 if (cas_buffer_count(page) == 1)
1351 return page; 1377 return page;
1352 1378
1353 new = cas_page_dequeue(cp); 1379 new = cas_page_dequeue(cp);
@@ -1367,7 +1393,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1367 cas_page_t **page1 = cp->rx_pages[1]; 1393 cas_page_t **page1 = cp->rx_pages[1];
1368 1394
1369 /* swap if buffer is in use */ 1395 /* swap if buffer is in use */
1370 if (page_count(page0[index]->buffer) > 1) { 1396 if (cas_buffer_count(page0[index]) > 1) {
1371 cas_page_t *new = cas_page_spare(cp, index); 1397 cas_page_t *new = cas_page_spare(cp, index);
1372 if (new) { 1398 if (new) {
1373 page1[index] = page0[index]; 1399 page1[index] = page0[index];
@@ -2039,6 +2065,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2039 skb->len += hlen - swivel; 2065 skb->len += hlen - swivel;
2040 2066
2041 get_page(page->buffer); 2067 get_page(page->buffer);
2068 cas_buffer_inc(page);
2042 frag->page = page->buffer; 2069 frag->page = page->buffer;
2043 frag->page_offset = off; 2070 frag->page_offset = off;
2044 frag->size = hlen - swivel; 2071 frag->size = hlen - swivel;
@@ -2063,6 +2090,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2063 frag++; 2090 frag++;
2064 2091
2065 get_page(page->buffer); 2092 get_page(page->buffer);
2093 cas_buffer_inc(page);
2066 frag->page = page->buffer; 2094 frag->page = page->buffer;
2067 frag->page_offset = 0; 2095 frag->page_offset = 0;
2068 frag->size = hlen; 2096 frag->size = hlen;
@@ -2225,7 +2253,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2225 released = 0; 2253 released = 0;
2226 while (entry != last) { 2254 while (entry != last) {
2227 /* make a new buffer if it's still in use */ 2255 /* make a new buffer if it's still in use */
2228 if (page_count(page[entry]->buffer) > 1) { 2256 if (cas_buffer_count(page[entry]) > 1) {
2229 cas_page_t *new = cas_page_dequeue(cp); 2257 cas_page_t *new = cas_page_dequeue(cp);
2230 if (!new) { 2258 if (!new) {
2231 /* let the timer know that we need to 2259 /* let the timer know that we need to
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 4726722a0635..bf1fd2b98bf8 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,25 +1,25 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option) 8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version. 9 any later version.
10 10
11 This program is distributed in the hope that it will be useful, but WITHOUT 11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details. 14 more details.
15 15
16 You should have received a copy of the GNU General Public License along with 16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59 17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 19
20 The full GNU General Public License is included in this distribution in the 20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE. 21 file called LICENSE.
22 22
23 Contact Information: 23 Contact Information:
24 Linux NICS <linux.nics@intel.com> 24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -160,7 +160,7 @@
160 160
161#define DRV_NAME "e100" 161#define DRV_NAME "e100"
162#define DRV_EXT "-NAPI" 162#define DRV_EXT "-NAPI"
163#define DRV_VERSION "3.4.14-k4"DRV_EXT 163#define DRV_VERSION "3.5.10-k2"DRV_EXT
164#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 164#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
165#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 165#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
166#define PFX DRV_NAME ": " 166#define PFX DRV_NAME ": "
@@ -320,7 +320,7 @@ enum cuc_dump {
320 cuc_dump_complete = 0x0000A005, 320 cuc_dump_complete = 0x0000A005,
321 cuc_dump_reset_complete = 0x0000A007, 321 cuc_dump_reset_complete = 0x0000A007,
322}; 322};
323 323
324enum port { 324enum port {
325 software_reset = 0x0000, 325 software_reset = 0x0000,
326 selftest = 0x0001, 326 selftest = 0x0001,
@@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
715 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; 715 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
716 writeb(ctrl, &nic->csr->eeprom_ctrl_lo); 716 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
717 e100_write_flush(nic); udelay(4); 717 e100_write_flush(nic); udelay(4);
718 718
719 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); 719 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
720 e100_write_flush(nic); udelay(4); 720 e100_write_flush(nic); udelay(4);
721 721
722 /* Eeprom drives a dummy zero to EEDO after receiving 722 /* Eeprom drives a dummy zero to EEDO after receiving
723 * complete address. Use this to adjust addr_len. */ 723 * complete address. Use this to adjust addr_len. */
724 ctrl = readb(&nic->csr->eeprom_ctrl_lo); 724 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
@@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
726 *addr_len -= (i - 16); 726 *addr_len -= (i - 16);
727 i = 17; 727 i = 17;
728 } 728 }
729 729
730 data = (data << 1) | (ctrl & eedo ? 1 : 0); 730 data = (data << 1) | (ctrl & eedo ? 1 : 0);
731 } 731 }
732 732
@@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \ 11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
1171} 1171}
1172 1172
1173static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1173static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1174{ 1174{
1175/* *INDENT-OFF* */ 1175/* *INDENT-OFF* */
1176 static struct { 1176 static struct {
@@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1213* driver can change the algorithm. 1213* driver can change the algorithm.
1214* 1214*
1215* INTDELAY - This loads the dead-man timer with its inital value. 1215* INTDELAY - This loads the dead-man timer with its inital value.
1216* When this timer expires the interrupt is asserted, and the 1216* When this timer expires the interrupt is asserted, and the
1217* timer is reset each time a new packet is received. (see 1217* timer is reset each time a new packet is received. (see
1218* BUNDLEMAX below to set the limit on number of chained packets) 1218* BUNDLEMAX below to set the limit on number of chained packets)
1219* The current default is 0x600 or 1536. Experiments show that 1219* The current default is 0x600 or 1536. Experiments show that
1220* the value should probably stay within the 0x200 - 0x1000. 1220* the value should probably stay within the 0x200 - 0x1000.
1221* 1221*
1222* BUNDLEMAX - 1222* BUNDLEMAX -
1223* This sets the maximum number of frames that will be bundled. In 1223* This sets the maximum number of frames that will be bundled. In
1224* some situations, such as the TCP windowing algorithm, it may be 1224* some situations, such as the TCP windowing algorithm, it may be
1225* better to limit the growth of the bundle size than let it go as 1225* better to limit the growth of the bundle size than let it go as
@@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1229* an interrupt for every frame received. If you do not want to put 1229* an interrupt for every frame received. If you do not want to put
1230* a limit on the bundle size, set this value to xFFFF. 1230* a limit on the bundle size, set this value to xFFFF.
1231* 1231*
1232* BUNDLESMALL - 1232* BUNDLESMALL -
1233* This contains a bit-mask describing the minimum size frame that 1233* This contains a bit-mask describing the minimum size frame that
1234* will be bundled. The default masks the lower 7 bits, which means 1234* will be bundled. The default masks the lower 7 bits, which means
1235* that any frame less than 128 bytes in length will not be bundled, 1235* that any frame less than 128 bytes in length will not be bundled,
@@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1244* 1244*
1245* The current default is 0xFF80, which masks out the lower 7 bits. 1245* The current default is 0xFF80, which masks out the lower 7 bits.
1246* This means that any frame which is x7F (127) bytes or smaller 1246* This means that any frame which is x7F (127) bytes or smaller
1247* will cause an immediate interrupt. Because this value must be a 1247* will cause an immediate interrupt. Because this value must be a
1248* bit mask, there are only a few valid values that can be used. To 1248* bit mask, there are only a few valid values that can be used. To
1249* turn this feature off, the driver can write the value xFFFF to the 1249* turn this feature off, the driver can write the value xFFFF to the
1250* lower word of this instruction (in the same way that the other 1250* lower word of this instruction (in the same way that the other
@@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1253* standard Ethernet frames are <= 2047 bytes in length. 1253* standard Ethernet frames are <= 2047 bytes in length.
1254*************************************************************************/ 1254*************************************************************************/
1255 1255
1256/* if you wish to disable the ucode functionality, while maintaining the 1256/* if you wish to disable the ucode functionality, while maintaining the
1257 * workarounds it provides, set the following defines to: 1257 * workarounds it provides, set the following defines to:
1258 * BUNDLESMALL 0 1258 * BUNDLESMALL 0
1259 * BUNDLEMAX 1 1259 * BUNDLEMAX 1
@@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1284 1284
1285 for (i = 0; i < UCODE_SIZE; i++) 1285 for (i = 0; i < UCODE_SIZE; i++)
1286 cb->u.ucode[i] = cpu_to_le32(ucode[i]); 1286 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1287 cb->command = cpu_to_le16(cb_ucode); 1287 cb->command = cpu_to_le16(cb_ucode | cb_el);
1288 return; 1288 return;
1289 } 1289 }
1290 1290
1291noloaducode: 1291noloaducode:
1292 cb->command = cpu_to_le16(cb_nop); 1292 cb->command = cpu_to_le16(cb_nop | cb_el);
1293}
1294
1295static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1296 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1297{
1298 int err = 0, counter = 50;
1299 struct cb *cb = nic->cb_to_clean;
1300
1301 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1302 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1303
1304 /* must restart cuc */
1305 nic->cuc_cmd = cuc_start;
1306
1307 /* wait for completion */
1308 e100_write_flush(nic);
1309 udelay(10);
1310
1311 /* wait for possibly (ouch) 500ms */
1312 while (!(cb->status & cpu_to_le16(cb_complete))) {
1313 msleep(10);
1314 if (!--counter) break;
1315 }
1316
1317 /* ack any interupts, something could have been set */
1318 writeb(~0, &nic->csr->scb.stat_ack);
1319
1320 /* if the command failed, or is not OK, notify and return */
1321 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1322 DPRINTK(PROBE,ERR, "ucode load failed\n");
1323 err = -EPERM;
1324 }
1325
1326 return err;
1293} 1327}
1294 1328
1295static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1329static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
1357 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); 1391 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1358 } 1392 }
1359 1393
1360 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1394 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1361 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) { 1395 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
1362 /* enable/disable MDI/MDI-X auto-switching. 1396 /* enable/disable MDI/MDI-X auto-switching.
1363 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */ 1397 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
1364 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) || 1398 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
1365 (nic->mac == mac_82551_10) || (nic->mii.force_media) || 1399 (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
1366 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) 1400 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
1367 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0); 1401 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
1368 else 1402 else
1369 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH); 1403 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
@@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
1388 return err; 1422 return err;
1389 if((err = e100_exec_cmd(nic, ruc_load_base, 0))) 1423 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1390 return err; 1424 return err;
1391 if((err = e100_exec_cb(nic, NULL, e100_load_ucode))) 1425 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1392 return err; 1426 return err;
1393 if((err = e100_exec_cb(nic, NULL, e100_configure))) 1427 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1394 return err; 1428 return err;
@@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
1493 } 1527 }
1494 } 1528 }
1495 1529
1496 1530
1497 if(e100_exec_cmd(nic, cuc_dump_reset, 0)) 1531 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1498 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1532 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1499} 1533}
@@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
1542 mii_check_link(&nic->mii); 1576 mii_check_link(&nic->mii);
1543 1577
1544 /* Software generated interrupt to recover from (rare) Rx 1578 /* Software generated interrupt to recover from (rare) Rx
1545 * allocation failure. 1579 * allocation failure.
1546 * Unfortunately have to use a spinlock to not re-enable interrupts 1580 * Unfortunately have to use a spinlock to not re-enable interrupts
1547 * accidentally, due to hardware that shares a register between the 1581 * accidentally, due to hardware that shares a register between the
1548 * interrupt mask bit and the SW Interrupt generation bit */ 1582 * interrupt mask bit and the SW Interrupt generation bit */
1549 spin_lock_irq(&nic->cmd_lock); 1583 spin_lock_irq(&nic->cmd_lock);
1550 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); 1584 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1551 spin_unlock_irq(&nic->cmd_lock); 1585 spin_unlock_irq(&nic->cmd_lock);
@@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1830 struct rx *rx_to_start = NULL; 1864 struct rx *rx_to_start = NULL;
1831 1865
1832 /* are we already rnr? then pay attention!!! this ensures that 1866 /* are we already rnr? then pay attention!!! this ensures that
1833 * the state machine progression never allows a start with a 1867 * the state machine progression never allows a start with a
1834 * partially cleaned list, avoiding a race between hardware 1868 * partially cleaned list, avoiding a race between hardware
1835 * and rx_to_clean when in NAPI mode */ 1869 * and rx_to_clean when in NAPI mode */
1836 if(RU_SUSPENDED == nic->ru_running) 1870 if(RU_SUSPENDED == nic->ru_running)
@@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
2066{ 2100{
2067 struct nic *nic = netdev_priv(netdev); 2101 struct nic *nic = netdev_priv(netdev);
2068 2102
2069 /* Reset outside of interrupt context, to avoid request_irq 2103 /* Reset outside of interrupt context, to avoid request_irq
2070 * in interrupt context */ 2104 * in interrupt context */
2071 schedule_work(&nic->tx_timeout_task); 2105 schedule_work(&nic->tx_timeout_task);
2072} 2106}
@@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
2313 struct param_range *rfds = &nic->params.rfds; 2347 struct param_range *rfds = &nic->params.rfds;
2314 struct param_range *cbs = &nic->params.cbs; 2348 struct param_range *cbs = &nic->params.cbs;
2315 2349
2316 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2350 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2317 return -EINVAL; 2351 return -EINVAL;
2318 2352
2319 if(netif_running(netdev)) 2353 if(netif_running(netdev))
@@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2631 nic->flags |= wol_magic; 2665 nic->flags |= wol_magic;
2632 2666
2633 /* ack any pending wake events, disable PME */ 2667 /* ack any pending wake events, disable PME */
2634 pci_enable_wake(pdev, 0, 0); 2668 err = pci_enable_wake(pdev, 0, 0);
2669 if (err)
2670 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
2635 2671
2636 strcpy(netdev->name, "eth%d"); 2672 strcpy(netdev->name, "eth%d");
2637 if((err = register_netdev(netdev))) { 2673 if((err = register_netdev(netdev))) {
@@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2682{ 2718{
2683 struct net_device *netdev = pci_get_drvdata(pdev); 2719 struct net_device *netdev = pci_get_drvdata(pdev);
2684 struct nic *nic = netdev_priv(netdev); 2720 struct nic *nic = netdev_priv(netdev);
2721 int retval;
2685 2722
2686 if(netif_running(netdev)) 2723 if(netif_running(netdev))
2687 e100_down(nic); 2724 e100_down(nic);
@@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2689 netif_device_detach(netdev); 2726 netif_device_detach(netdev);
2690 2727
2691 pci_save_state(pdev); 2728 pci_save_state(pdev);
2692 pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic))); 2729 retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
2730 nic->flags & (wol_magic | e100_asf(nic)));
2731 if (retval)
2732 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2693 pci_disable_device(pdev); 2733 pci_disable_device(pdev);
2694 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2734 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
2735 if (retval)
2736 DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
2695 2737
2696 return 0; 2738 return 0;
2697} 2739}
@@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
2700{ 2742{
2701 struct net_device *netdev = pci_get_drvdata(pdev); 2743 struct net_device *netdev = pci_get_drvdata(pdev);
2702 struct nic *nic = netdev_priv(netdev); 2744 struct nic *nic = netdev_priv(netdev);
2745 int retval;
2703 2746
2704 pci_set_power_state(pdev, PCI_D0); 2747 retval = pci_set_power_state(pdev, PCI_D0);
2748 if (retval)
2749 DPRINTK(PROBE,ERR, "Error waking adapter\n");
2705 pci_restore_state(pdev); 2750 pci_restore_state(pdev);
2706 /* ack any pending wake events, disable PME */ 2751 /* ack any pending wake events, disable PME */
2707 pci_enable_wake(pdev, 0, 0); 2752 retval = pci_enable_wake(pdev, 0, 0);
2753 if (retval)
2754 DPRINTK(PROBE,ERR, "Error clearing wake events\n");
2708 if(e100_hw_init(nic)) 2755 if(e100_hw_init(nic))
2709 DPRINTK(HW, ERR, "e100_hw_init failed\n"); 2756 DPRINTK(HW, ERR, "e100_hw_init failed\n");
2710 2757
@@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
2721{ 2768{
2722 struct net_device *netdev = pci_get_drvdata(pdev); 2769 struct net_device *netdev = pci_get_drvdata(pdev);
2723 struct nic *nic = netdev_priv(netdev); 2770 struct nic *nic = netdev_priv(netdev);
2771 int retval;
2724 2772
2725#ifdef CONFIG_PM 2773#ifdef CONFIG_PM
2726 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2774 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
2727#else 2775#else
2728 pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); 2776 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
2729#endif 2777#endif
2778 if (retval)
2779 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2730} 2780}
2731 2781
2732 2782
@@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = {
2739 .suspend = e100_suspend, 2789 .suspend = e100_suspend,
2740 .resume = e100_resume, 2790 .resume = e100_resume,
2741#endif 2791#endif
2742 .shutdown = e100_shutdown, 2792 .shutdown = e100_shutdown,
2743}; 2793};
2744 2794
2745static int __init e100_init_module(void) 2795static int __init e100_init_module(void)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e02e9ba2e18b..27c77306193b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,10 +72,6 @@
72#include <linux/mii.h> 72#include <linux/mii.h>
73#include <linux/ethtool.h> 73#include <linux/ethtool.h>
74#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
75#ifdef CONFIG_E1000_MQ
76#include <linux/cpu.h>
77#include <linux/smp.h>
78#endif
79 75
80#define BAR_0 0 76#define BAR_0 0
81#define BAR_1 1 77#define BAR_1 1
@@ -87,6 +83,10 @@
87struct e1000_adapter; 83struct e1000_adapter;
88 84
89#include "e1000_hw.h" 85#include "e1000_hw.h"
86#ifdef CONFIG_E1000_MQ
87#include <linux/cpu.h>
88#include <linux/smp.h>
89#endif
90 90
91#ifdef DBG 91#ifdef DBG
92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) 92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,6 +169,13 @@ struct e1000_buffer {
169 uint16_t next_to_watch; 169 uint16_t next_to_watch;
170}; 170};
171 171
172#ifdef CONFIG_E1000_MQ
173struct e1000_queue_stats {
174 uint64_t packets;
175 uint64_t bytes;
176};
177#endif
178
172struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; 179struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
173struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; 180struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
174 181
@@ -191,10 +198,12 @@ struct e1000_tx_ring {
191 spinlock_t tx_lock; 198 spinlock_t tx_lock;
192 uint16_t tdh; 199 uint16_t tdh;
193 uint16_t tdt; 200 uint16_t tdt;
194 uint64_t pkt;
195 201
196 boolean_t last_tx_tso; 202 boolean_t last_tx_tso;
197 203
204#ifdef CONFIG_E1000_MQ
205 struct e1000_queue_stats tx_stats;
206#endif
198}; 207};
199 208
200struct e1000_rx_ring { 209struct e1000_rx_ring {
@@ -216,9 +225,17 @@ struct e1000_rx_ring {
216 struct e1000_ps_page *ps_page; 225 struct e1000_ps_page *ps_page;
217 struct e1000_ps_page_dma *ps_page_dma; 226 struct e1000_ps_page_dma *ps_page_dma;
218 227
228 struct sk_buff *rx_skb_top;
229 struct sk_buff *rx_skb_prev;
230
231 /* cpu for rx queue */
232 int cpu;
233
219 uint16_t rdh; 234 uint16_t rdh;
220 uint16_t rdt; 235 uint16_t rdt;
221 uint64_t pkt; 236#ifdef CONFIG_E1000_MQ
237 struct e1000_queue_stats rx_stats;
238#endif
222}; 239};
223 240
224#define E1000_DESC_UNUSED(R) \ 241#define E1000_DESC_UNUSED(R) \
@@ -251,6 +268,9 @@ struct e1000_adapter {
251 uint16_t link_speed; 268 uint16_t link_speed;
252 uint16_t link_duplex; 269 uint16_t link_duplex;
253 spinlock_t stats_lock; 270 spinlock_t stats_lock;
271#ifdef CONFIG_E1000_NAPI
272 spinlock_t tx_queue_lock;
273#endif
254 atomic_t irq_sem; 274 atomic_t irq_sem;
255 struct work_struct tx_timeout_task; 275 struct work_struct tx_timeout_task;
256 struct work_struct watchdog_task; 276 struct work_struct watchdog_task;
@@ -264,6 +284,7 @@ struct e1000_adapter {
264#ifdef CONFIG_E1000_MQ 284#ifdef CONFIG_E1000_MQ
265 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ 285 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
266#endif 286#endif
287 unsigned long tx_queue_len;
267 uint32_t txd_cmd; 288 uint32_t txd_cmd;
268 uint32_t tx_int_delay; 289 uint32_t tx_int_delay;
269 uint32_t tx_abs_int_delay; 290 uint32_t tx_abs_int_delay;
@@ -271,9 +292,11 @@ struct e1000_adapter {
271 uint64_t gotcl_old; 292 uint64_t gotcl_old;
272 uint64_t tpt_old; 293 uint64_t tpt_old;
273 uint64_t colc_old; 294 uint64_t colc_old;
295 uint32_t tx_timeout_count;
274 uint32_t tx_fifo_head; 296 uint32_t tx_fifo_head;
275 uint32_t tx_head_addr; 297 uint32_t tx_head_addr;
276 uint32_t tx_fifo_size; 298 uint32_t tx_fifo_size;
299 uint8_t tx_timeout_factor;
277 atomic_t tx_fifo_stall; 300 atomic_t tx_fifo_stall;
278 boolean_t pcix_82544; 301 boolean_t pcix_82544;
279 boolean_t detect_tx_hung; 302 boolean_t detect_tx_hung;
@@ -281,14 +304,15 @@ struct e1000_adapter {
281 /* RX */ 304 /* RX */
282#ifdef CONFIG_E1000_NAPI 305#ifdef CONFIG_E1000_NAPI
283 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 306 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
284 struct e1000_rx_ring *rx_ring, 307 struct e1000_rx_ring *rx_ring,
285 int *work_done, int work_to_do); 308 int *work_done, int work_to_do);
286#else 309#else
287 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 310 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
288 struct e1000_rx_ring *rx_ring); 311 struct e1000_rx_ring *rx_ring);
289#endif 312#endif
290 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 313 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
291 struct e1000_rx_ring *rx_ring); 314 struct e1000_rx_ring *rx_ring,
315 int cleaned_count);
292 struct e1000_rx_ring *rx_ring; /* One per active queue */ 316 struct e1000_rx_ring *rx_ring; /* One per active queue */
293#ifdef CONFIG_E1000_NAPI 317#ifdef CONFIG_E1000_NAPI
294 struct net_device *polling_netdev; /* One per active queue */ 318 struct net_device *polling_netdev; /* One per active queue */
@@ -296,13 +320,15 @@ struct e1000_adapter {
296#ifdef CONFIG_E1000_MQ 320#ifdef CONFIG_E1000_MQ
297 struct net_device **cpu_netdev; /* per-cpu */ 321 struct net_device **cpu_netdev; /* per-cpu */
298 struct call_async_data_struct rx_sched_call_data; 322 struct call_async_data_struct rx_sched_call_data;
299 int cpu_for_queue[4]; 323 cpumask_t cpumask;
300#endif 324#endif
301 int num_queues; 325 int num_tx_queues;
326 int num_rx_queues;
302 327
303 uint64_t hw_csum_err; 328 uint64_t hw_csum_err;
304 uint64_t hw_csum_good; 329 uint64_t hw_csum_good;
305 uint64_t rx_hdr_split; 330 uint64_t rx_hdr_split;
331 uint32_t alloc_rx_buff_failed;
306 uint32_t rx_int_delay; 332 uint32_t rx_int_delay;
307 uint32_t rx_abs_int_delay; 333 uint32_t rx_abs_int_delay;
308 boolean_t rx_csum; 334 boolean_t rx_csum;
@@ -330,6 +356,7 @@ struct e1000_adapter {
330 struct e1000_rx_ring test_rx_ring; 356 struct e1000_rx_ring test_rx_ring;
331 357
332 358
359 u32 *config_space;
333 int msg_enable; 360 int msg_enable;
334#ifdef CONFIG_PCI_MSI 361#ifdef CONFIG_PCI_MSI
335 boolean_t have_msi; 362 boolean_t have_msi;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c88f1a3c1b1d..5cedc81786e3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
80 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 80 { "tx_deferred_ok", E1000_STAT(stats.dc) },
81 { "tx_single_coll_ok", E1000_STAT(stats.scc) }, 81 { "tx_single_coll_ok", E1000_STAT(stats.scc) },
82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, 82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
83 { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
83 { "rx_long_length_errors", E1000_STAT(stats.roc) }, 84 { "rx_long_length_errors", E1000_STAT(stats.roc) },
84 { "rx_short_length_errors", E1000_STAT(stats.ruc) }, 85 { "rx_short_length_errors", E1000_STAT(stats.ruc) },
85 { "rx_align_errors", E1000_STAT(stats.algnerrc) }, 86 { "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 94 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 95 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
95 { "rx_header_split", E1000_STAT(rx_hdr_split) }, 96 { "rx_header_split", E1000_STAT(rx_hdr_split) },
97 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
96}; 98};
97#define E1000_STATS_LEN \ 99
100#ifdef CONFIG_E1000_MQ
101#define E1000_QUEUE_STATS_LEN \
102 (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
103 ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
104 * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
105#else
106#define E1000_QUEUE_STATS_LEN 0
107#endif
108#define E1000_GLOBAL_STATS_LEN \
98 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) 109 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
110#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
99static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { 111static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
100 "Register test (offline)", "Eeprom test (offline)", 112 "Register test (offline)", "Eeprom test (offline)",
101 "Interrupt test (offline)", "Loopback test (offline)", 113 "Interrupt test (offline)", "Loopback test (offline)",
@@ -109,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
109 struct e1000_adapter *adapter = netdev_priv(netdev); 121 struct e1000_adapter *adapter = netdev_priv(netdev);
110 struct e1000_hw *hw = &adapter->hw; 122 struct e1000_hw *hw = &adapter->hw;
111 123
112 if(hw->media_type == e1000_media_type_copper) { 124 if (hw->media_type == e1000_media_type_copper) {
113 125
114 ecmd->supported = (SUPPORTED_10baseT_Half | 126 ecmd->supported = (SUPPORTED_10baseT_Half |
115 SUPPORTED_10baseT_Full | 127 SUPPORTED_10baseT_Full |
@@ -121,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
121 133
122 ecmd->advertising = ADVERTISED_TP; 134 ecmd->advertising = ADVERTISED_TP;
123 135
124 if(hw->autoneg == 1) { 136 if (hw->autoneg == 1) {
125 ecmd->advertising |= ADVERTISED_Autoneg; 137 ecmd->advertising |= ADVERTISED_Autoneg;
126 138
127 /* the e1000 autoneg seems to match ethtool nicely */ 139 /* the e1000 autoneg seems to match ethtool nicely */
@@ -132,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
132 ecmd->port = PORT_TP; 144 ecmd->port = PORT_TP;
133 ecmd->phy_address = hw->phy_addr; 145 ecmd->phy_address = hw->phy_addr;
134 146
135 if(hw->mac_type == e1000_82543) 147 if (hw->mac_type == e1000_82543)
136 ecmd->transceiver = XCVR_EXTERNAL; 148 ecmd->transceiver = XCVR_EXTERNAL;
137 else 149 else
138 ecmd->transceiver = XCVR_INTERNAL; 150 ecmd->transceiver = XCVR_INTERNAL;
@@ -148,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
148 160
149 ecmd->port = PORT_FIBRE; 161 ecmd->port = PORT_FIBRE;
150 162
151 if(hw->mac_type >= e1000_82545) 163 if (hw->mac_type >= e1000_82545)
152 ecmd->transceiver = XCVR_INTERNAL; 164 ecmd->transceiver = XCVR_INTERNAL;
153 else 165 else
154 ecmd->transceiver = XCVR_EXTERNAL; 166 ecmd->transceiver = XCVR_EXTERNAL;
155 } 167 }
156 168
157 if(netif_carrier_ok(adapter->netdev)) { 169 if (netif_carrier_ok(adapter->netdev)) {
158 170
159 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 171 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
160 &adapter->link_duplex); 172 &adapter->link_duplex);
@@ -163,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
163 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL 175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
164 * and HALF_DUPLEX != DUPLEX_HALF */ 176 * and HALF_DUPLEX != DUPLEX_HALF */
165 177
166 if(adapter->link_duplex == FULL_DUPLEX) 178 if (adapter->link_duplex == FULL_DUPLEX)
167 ecmd->duplex = DUPLEX_FULL; 179 ecmd->duplex = DUPLEX_FULL;
168 else 180 else
169 ecmd->duplex = DUPLEX_HALF; 181 ecmd->duplex = DUPLEX_HALF;
@@ -183,13 +195,21 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
183 struct e1000_adapter *adapter = netdev_priv(netdev); 195 struct e1000_adapter *adapter = netdev_priv(netdev);
184 struct e1000_hw *hw = &adapter->hw; 196 struct e1000_hw *hw = &adapter->hw;
185 197
186 if(ecmd->autoneg == AUTONEG_ENABLE) { 198 /* When SoL/IDER sessions are active, autoneg/speed/duplex
199 * cannot be changed */
200 if (e1000_check_phy_reset_block(hw)) {
201 DPRINTK(DRV, ERR, "Cannot change link characteristics "
202 "when SoL/IDER is active.\n");
203 return -EINVAL;
204 }
205
206 if (ecmd->autoneg == AUTONEG_ENABLE) {
187 hw->autoneg = 1; 207 hw->autoneg = 1;
188 if(hw->media_type == e1000_media_type_fiber) 208 if (hw->media_type == e1000_media_type_fiber)
189 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
190 ADVERTISED_FIBRE | 210 ADVERTISED_FIBRE |
191 ADVERTISED_Autoneg; 211 ADVERTISED_Autoneg;
192 else 212 else
193 hw->autoneg_advertised = ADVERTISED_10baseT_Half | 213 hw->autoneg_advertised = ADVERTISED_10baseT_Half |
194 ADVERTISED_10baseT_Full | 214 ADVERTISED_10baseT_Full |
195 ADVERTISED_100baseT_Half | 215 ADVERTISED_100baseT_Half |
@@ -199,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
199 ADVERTISED_TP; 219 ADVERTISED_TP;
200 ecmd->advertising = hw->autoneg_advertised; 220 ecmd->advertising = hw->autoneg_advertised;
201 } else 221 } else
202 if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) 222 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
203 return -EINVAL; 223 return -EINVAL;
204 224
205 /* reset the link */ 225 /* reset the link */
206 226
207 if(netif_running(adapter->netdev)) { 227 if (netif_running(adapter->netdev)) {
208 e1000_down(adapter); 228 e1000_down(adapter);
209 e1000_reset(adapter); 229 e1000_reset(adapter);
210 e1000_up(adapter); 230 e1000_up(adapter);
@@ -221,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
221 struct e1000_adapter *adapter = netdev_priv(netdev); 241 struct e1000_adapter *adapter = netdev_priv(netdev);
222 struct e1000_hw *hw = &adapter->hw; 242 struct e1000_hw *hw = &adapter->hw;
223 243
224 pause->autoneg = 244 pause->autoneg =
225 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
226 246
227 if(hw->fc == e1000_fc_rx_pause) 247 if (hw->fc == e1000_fc_rx_pause)
228 pause->rx_pause = 1; 248 pause->rx_pause = 1;
229 else if(hw->fc == e1000_fc_tx_pause) 249 else if (hw->fc == e1000_fc_tx_pause)
230 pause->tx_pause = 1; 250 pause->tx_pause = 1;
231 else if(hw->fc == e1000_fc_full) { 251 else if (hw->fc == e1000_fc_full) {
232 pause->rx_pause = 1; 252 pause->rx_pause = 1;
233 pause->tx_pause = 1; 253 pause->tx_pause = 1;
234 } 254 }
@@ -240,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
240{ 260{
241 struct e1000_adapter *adapter = netdev_priv(netdev); 261 struct e1000_adapter *adapter = netdev_priv(netdev);
242 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
243 263
244 adapter->fc_autoneg = pause->autoneg; 264 adapter->fc_autoneg = pause->autoneg;
245 265
246 if(pause->rx_pause && pause->tx_pause) 266 if (pause->rx_pause && pause->tx_pause)
247 hw->fc = e1000_fc_full; 267 hw->fc = e1000_fc_full;
248 else if(pause->rx_pause && !pause->tx_pause) 268 else if (pause->rx_pause && !pause->tx_pause)
249 hw->fc = e1000_fc_rx_pause; 269 hw->fc = e1000_fc_rx_pause;
250 else if(!pause->rx_pause && pause->tx_pause) 270 else if (!pause->rx_pause && pause->tx_pause)
251 hw->fc = e1000_fc_tx_pause; 271 hw->fc = e1000_fc_tx_pause;
252 else if(!pause->rx_pause && !pause->tx_pause) 272 else if (!pause->rx_pause && !pause->tx_pause)
253 hw->fc = e1000_fc_none; 273 hw->fc = e1000_fc_none;
254 274
255 hw->original_fc = hw->fc; 275 hw->original_fc = hw->fc;
256 276
257 if(adapter->fc_autoneg == AUTONEG_ENABLE) { 277 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
258 if(netif_running(adapter->netdev)) { 278 if (netif_running(adapter->netdev)) {
259 e1000_down(adapter); 279 e1000_down(adapter);
260 e1000_up(adapter); 280 e1000_up(adapter);
261 } else 281 } else
262 e1000_reset(adapter); 282 e1000_reset(adapter);
263 } 283 } else
264 else
265 return ((hw->media_type == e1000_media_type_fiber) ? 284 return ((hw->media_type == e1000_media_type_fiber) ?
266 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 285 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
267 286
268 return 0; 287 return 0;
269} 288}
270 289
@@ -281,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
281 struct e1000_adapter *adapter = netdev_priv(netdev); 300 struct e1000_adapter *adapter = netdev_priv(netdev);
282 adapter->rx_csum = data; 301 adapter->rx_csum = data;
283 302
284 if(netif_running(netdev)) { 303 if (netif_running(netdev)) {
285 e1000_down(adapter); 304 e1000_down(adapter);
286 e1000_up(adapter); 305 e1000_up(adapter);
287 } else 306 } else
288 e1000_reset(adapter); 307 e1000_reset(adapter);
289 return 0; 308 return 0;
290} 309}
291 310
292static uint32_t 311static uint32_t
293e1000_get_tx_csum(struct net_device *netdev) 312e1000_get_tx_csum(struct net_device *netdev)
294{ 313{
@@ -300,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
300{ 319{
301 struct e1000_adapter *adapter = netdev_priv(netdev); 320 struct e1000_adapter *adapter = netdev_priv(netdev);
302 321
303 if(adapter->hw.mac_type < e1000_82543) { 322 if (adapter->hw.mac_type < e1000_82543) {
304 if (!data) 323 if (!data)
305 return -EINVAL; 324 return -EINVAL;
306 return 0; 325 return 0;
@@ -319,8 +338,8 @@ static int
319e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, uint32_t data)
320{ 339{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
322 if((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
323 (adapter->hw.mac_type == e1000_82547)) 342 (adapter->hw.mac_type == e1000_82547))
324 return data ? -EINVAL : 0; 343 return data ? -EINVAL : 0;
325 344
326 if (data) 345 if (data)
@@ -328,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
328 else 347 else
329 netdev->features &= ~NETIF_F_TSO; 348 netdev->features &= ~NETIF_F_TSO;
330 return 0; 349 return 0;
331} 350}
332#endif /* NETIF_F_TSO */ 351#endif /* NETIF_F_TSO */
333 352
334static uint32_t 353static uint32_t
@@ -345,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
345 adapter->msg_enable = data; 364 adapter->msg_enable = data;
346} 365}
347 366
348static int 367static int
349e1000_get_regs_len(struct net_device *netdev) 368e1000_get_regs_len(struct net_device *netdev)
350{ 369{
351#define E1000_REGS_LEN 32 370#define E1000_REGS_LEN 32
@@ -381,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
381 regs_buff[11] = E1000_READ_REG(hw, TIDV); 400 regs_buff[11] = E1000_READ_REG(hw, TIDV);
382 401
383 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ 402 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
384 if(hw->phy_type == e1000_phy_igp) { 403 if (hw->phy_type == e1000_phy_igp) {
385 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 404 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
386 IGP01E1000_PHY_AGC_A); 405 IGP01E1000_PHY_AGC_A);
387 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 406 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -435,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
435 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 454 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
436 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 455 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
437 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 456 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
438 if(hw->mac_type >= e1000_82540 && 457 if (hw->mac_type >= e1000_82540 &&
439 hw->media_type == e1000_media_type_copper) { 458 hw->media_type == e1000_media_type_copper) {
440 regs_buff[26] = E1000_READ_REG(hw, MANC); 459 regs_buff[26] = E1000_READ_REG(hw, MANC);
441 } 460 }
@@ -459,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
459 int ret_val = 0; 478 int ret_val = 0;
460 uint16_t i; 479 uint16_t i;
461 480
462 if(eeprom->len == 0) 481 if (eeprom->len == 0)
463 return -EINVAL; 482 return -EINVAL;
464 483
465 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 484 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -469,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
469 488
470 eeprom_buff = kmalloc(sizeof(uint16_t) * 489 eeprom_buff = kmalloc(sizeof(uint16_t) *
471 (last_word - first_word + 1), GFP_KERNEL); 490 (last_word - first_word + 1), GFP_KERNEL);
472 if(!eeprom_buff) 491 if (!eeprom_buff)
473 return -ENOMEM; 492 return -ENOMEM;
474 493
475 if(hw->eeprom.type == e1000_eeprom_spi) 494 if (hw->eeprom.type == e1000_eeprom_spi)
476 ret_val = e1000_read_eeprom(hw, first_word, 495 ret_val = e1000_read_eeprom(hw, first_word,
477 last_word - first_word + 1, 496 last_word - first_word + 1,
478 eeprom_buff); 497 eeprom_buff);
479 else { 498 else {
480 for (i = 0; i < last_word - first_word + 1; i++) 499 for (i = 0; i < last_word - first_word + 1; i++)
481 if((ret_val = e1000_read_eeprom(hw, first_word + i, 1, 500 if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
482 &eeprom_buff[i]))) 501 &eeprom_buff[i])))
483 break; 502 break;
484 } 503 }
@@ -505,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
505 int max_len, first_word, last_word, ret_val = 0; 524 int max_len, first_word, last_word, ret_val = 0;
506 uint16_t i; 525 uint16_t i;
507 526
508 if(eeprom->len == 0) 527 if (eeprom->len == 0)
509 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
510 529
511 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 530 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
512 return -EFAULT; 531 return -EFAULT;
513 532
514 max_len = hw->eeprom.word_size * 2; 533 max_len = hw->eeprom.word_size * 2;
@@ -516,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
516 first_word = eeprom->offset >> 1; 535 first_word = eeprom->offset >> 1;
517 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 536 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
518 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 537 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
519 if(!eeprom_buff) 538 if (!eeprom_buff)
520 return -ENOMEM; 539 return -ENOMEM;
521 540
522 ptr = (void *)eeprom_buff; 541 ptr = (void *)eeprom_buff;
523 542
524 if(eeprom->offset & 1) { 543 if (eeprom->offset & 1) {
525 /* need read/modify/write of first changed EEPROM word */ 544 /* need read/modify/write of first changed EEPROM word */
526 /* only the second byte of the word is being modified */ 545 /* only the second byte of the word is being modified */
527 ret_val = e1000_read_eeprom(hw, first_word, 1, 546 ret_val = e1000_read_eeprom(hw, first_word, 1,
528 &eeprom_buff[0]); 547 &eeprom_buff[0]);
529 ptr++; 548 ptr++;
530 } 549 }
531 if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 550 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
532 /* need read/modify/write of last changed EEPROM word */ 551 /* need read/modify/write of last changed EEPROM word */
533 /* only the first byte of the word is being modified */ 552 /* only the first byte of the word is being modified */
534 ret_val = e1000_read_eeprom(hw, last_word, 1, 553 ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -547,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
547 ret_val = e1000_write_eeprom(hw, first_word, 566 ret_val = e1000_write_eeprom(hw, first_word,
548 last_word - first_word + 1, eeprom_buff); 567 last_word - first_word + 1, eeprom_buff);
549 568
550 /* Update the checksum over the first part of the EEPROM if needed 569 /* Update the checksum over the first part of the EEPROM if needed
551 * and flush shadow RAM for 82573 conrollers */ 570 * and flush shadow RAM for 82573 conrollers */
552 if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 571 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
553 (hw->mac_type == e1000_82573))) 572 (hw->mac_type == e1000_82573)))
554 e1000_update_eeprom_checksum(hw); 573 e1000_update_eeprom_checksum(hw);
555 574
@@ -567,21 +586,21 @@ e1000_get_drvinfo(struct net_device *netdev,
567 586
568 strncpy(drvinfo->driver, e1000_driver_name, 32); 587 strncpy(drvinfo->driver, e1000_driver_name, 32);
569 strncpy(drvinfo->version, e1000_driver_version, 32); 588 strncpy(drvinfo->version, e1000_driver_version, 32);
570 589
571 /* EEPROM image version # is reported as firware version # for 590 /* EEPROM image version # is reported as firmware version # for
572 * 8257{1|2|3} controllers */ 591 * 8257{1|2|3} controllers */
573 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); 592 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
574 switch (adapter->hw.mac_type) { 593 switch (adapter->hw.mac_type) {
575 case e1000_82571: 594 case e1000_82571:
576 case e1000_82572: 595 case e1000_82572:
577 case e1000_82573: 596 case e1000_82573:
578 sprintf(firmware_version, "%d.%d-%d", 597 sprintf(firmware_version, "%d.%d-%d",
579 (eeprom_data & 0xF000) >> 12, 598 (eeprom_data & 0xF000) >> 12,
580 (eeprom_data & 0x0FF0) >> 4, 599 (eeprom_data & 0x0FF0) >> 4,
581 eeprom_data & 0x000F); 600 eeprom_data & 0x000F);
582 break; 601 break;
583 default: 602 default:
584 sprintf(firmware_version, "n/a"); 603 sprintf(firmware_version, "N/A");
585 } 604 }
586 605
587 strncpy(drvinfo->fw_version, firmware_version, 32); 606 strncpy(drvinfo->fw_version, firmware_version, 32);
@@ -613,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
613 ring->rx_jumbo_pending = 0; 632 ring->rx_jumbo_pending = 0;
614} 633}
615 634
616static int 635static int
617e1000_set_ringparam(struct net_device *netdev, 636e1000_set_ringparam(struct net_device *netdev,
618 struct ethtool_ringparam *ring) 637 struct ethtool_ringparam *ring)
619{ 638{
@@ -623,8 +642,8 @@ e1000_set_ringparam(struct net_device *netdev,
623 struct e1000_rx_ring *rxdr, *rx_old, *rx_new; 642 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
624 int i, err, tx_ring_size, rx_ring_size; 643 int i, err, tx_ring_size, rx_ring_size;
625 644
626 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 645 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
627 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 646 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
628 647
629 if (netif_running(adapter->netdev)) 648 if (netif_running(adapter->netdev))
630 e1000_down(adapter); 649 e1000_down(adapter);
@@ -650,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
650 txdr = adapter->tx_ring; 669 txdr = adapter->tx_ring;
651 rxdr = adapter->rx_ring; 670 rxdr = adapter->rx_ring;
652 671
653 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 672 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
654 return -EINVAL; 673 return -EINVAL;
655 674
656 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 675 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
657 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 676 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
658 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 677 E1000_MAX_RXD : E1000_MAX_82544_RXD));
659 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 678 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
660 679
661 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 680 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
662 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 681 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
663 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 682 E1000_MAX_TXD : E1000_MAX_82544_TXD));
664 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 683 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
665 684
666 for (i = 0; i < adapter->num_queues; i++) { 685 for (i = 0; i < adapter->num_tx_queues; i++)
667 txdr[i].count = txdr->count; 686 txdr[i].count = txdr->count;
687 for (i = 0; i < adapter->num_rx_queues; i++)
668 rxdr[i].count = rxdr->count; 688 rxdr[i].count = rxdr->count;
669 }
670 689
671 if(netif_running(adapter->netdev)) { 690 if (netif_running(adapter->netdev)) {
672 /* Try to get new resources before deleting old */ 691 /* Try to get new resources before deleting old */
673 if ((err = e1000_setup_all_rx_resources(adapter))) 692 if ((err = e1000_setup_all_rx_resources(adapter)))
674 goto err_setup_rx; 693 goto err_setup_rx;
@@ -688,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
688 kfree(rx_old); 707 kfree(rx_old);
689 adapter->rx_ring = rx_new; 708 adapter->rx_ring = rx_new;
690 adapter->tx_ring = tx_new; 709 adapter->tx_ring = tx_new;
691 if((err = e1000_up(adapter))) 710 if ((err = e1000_up(adapter)))
692 return err; 711 return err;
693 } 712 }
694 713
@@ -707,10 +726,10 @@ err_setup_rx:
707 uint32_t pat, value; \ 726 uint32_t pat, value; \
708 uint32_t test[] = \ 727 uint32_t test[] = \
709 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 728 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
710 for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ 729 for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
711 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ 730 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
712 value = E1000_READ_REG(&adapter->hw, R); \ 731 value = E1000_READ_REG(&adapter->hw, R); \
713 if(value != (test[pat] & W & M)) { \ 732 if (value != (test[pat] & W & M)) { \
714 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ 733 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
715 "0x%08X expected 0x%08X\n", \ 734 "0x%08X expected 0x%08X\n", \
716 E1000_##R, value, (test[pat] & W & M)); \ 735 E1000_##R, value, (test[pat] & W & M)); \
@@ -726,7 +745,7 @@ err_setup_rx:
726 uint32_t value; \ 745 uint32_t value; \
727 E1000_WRITE_REG(&adapter->hw, R, W & M); \ 746 E1000_WRITE_REG(&adapter->hw, R, W & M); \
728 value = E1000_READ_REG(&adapter->hw, R); \ 747 value = E1000_READ_REG(&adapter->hw, R); \
729 if((W & M) != (value & M)) { \ 748 if ((W & M) != (value & M)) { \
730 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 749 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
731 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ 750 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
732 *data = (adapter->hw.mac_type < e1000_82543) ? \ 751 *data = (adapter->hw.mac_type < e1000_82543) ? \
@@ -762,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
762 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); 781 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
763 E1000_WRITE_REG(&adapter->hw, STATUS, toggle); 782 E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
764 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; 783 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
765 if(value != after) { 784 if (value != after) {
766 DPRINTK(DRV, ERR, "failed STATUS register test got: " 785 DPRINTK(DRV, ERR, "failed STATUS register test got: "
767 "0x%08X expected: 0x%08X\n", after, value); 786 "0x%08X expected: 0x%08X\n", after, value);
768 *data = 1; 787 *data = 1;
@@ -790,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
790 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); 809 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
791 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 810 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
792 811
793 if(adapter->hw.mac_type >= e1000_82543) { 812 if (adapter->hw.mac_type >= e1000_82543) {
794 813
795 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); 814 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
796 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 815 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -798,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
798 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 817 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
799 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
800 819
801 for(i = 0; i < E1000_RAR_ENTRIES; i++) { 820 for (i = 0; i < E1000_RAR_ENTRIES; i++) {
802 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, 821 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
803 0xFFFFFFFF); 822 0xFFFFFFFF);
804 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 823 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -814,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
814 833
815 } 834 }
816 835
817 for(i = 0; i < E1000_MC_TBL_SIZE; i++) 836 for (i = 0; i < E1000_MC_TBL_SIZE; i++)
818 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 837 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
819 838
820 *data = 0; 839 *data = 0;
@@ -830,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
830 849
831 *data = 0; 850 *data = 0;
832 /* Read and add up the contents of the EEPROM */ 851 /* Read and add up the contents of the EEPROM */
833 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 852 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
834 if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { 853 if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
835 *data = 1; 854 *data = 1;
836 break; 855 break;
837 } 856 }
@@ -839,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
839 } 858 }
840 859
841 /* If Checksum is not Correct return error else test passed */ 860 /* If Checksum is not Correct return error else test passed */
842 if((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 861 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
843 *data = 2; 862 *data = 2;
844 863
845 return *data; 864 return *data;
@@ -868,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
868 *data = 0; 887 *data = 0;
869 888
870 /* Hook up test interrupt handler just for this test */ 889 /* Hook up test interrupt handler just for this test */
871 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 890 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
872 shared_int = FALSE; 891 shared_int = FALSE;
873 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 892 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
874 netdev->name, netdev)){ 893 netdev->name, netdev)){
875 *data = 1; 894 *data = 1;
876 return -1; 895 return -1;
@@ -881,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
881 msec_delay(10); 900 msec_delay(10);
882 901
883 /* Test each interrupt */ 902 /* Test each interrupt */
884 for(; i < 10; i++) { 903 for (; i < 10; i++) {
885 904
886 /* Interrupt to test */ 905 /* Interrupt to test */
887 mask = 1 << i; 906 mask = 1 << i;
888 907
889 if(!shared_int) { 908 if (!shared_int) {
890 /* Disable the interrupt to be reported in 909 /* Disable the interrupt to be reported in
891 * the cause register and then force the same 910 * the cause register and then force the same
892 * interrupt and see if one gets posted. If 911 * interrupt and see if one gets posted. If
@@ -897,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
897 E1000_WRITE_REG(&adapter->hw, IMC, mask); 916 E1000_WRITE_REG(&adapter->hw, IMC, mask);
898 E1000_WRITE_REG(&adapter->hw, ICS, mask); 917 E1000_WRITE_REG(&adapter->hw, ICS, mask);
899 msec_delay(10); 918 msec_delay(10);
900 919
901 if(adapter->test_icr & mask) { 920 if (adapter->test_icr & mask) {
902 *data = 3; 921 *data = 3;
903 break; 922 break;
904 } 923 }
@@ -915,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
915 E1000_WRITE_REG(&adapter->hw, ICS, mask); 934 E1000_WRITE_REG(&adapter->hw, ICS, mask);
916 msec_delay(10); 935 msec_delay(10);
917 936
918 if(!(adapter->test_icr & mask)) { 937 if (!(adapter->test_icr & mask)) {
919 *data = 4; 938 *data = 4;
920 break; 939 break;
921 } 940 }
922 941
923 if(!shared_int) { 942 if (!shared_int) {
924 /* Disable the other interrupts to be reported in 943 /* Disable the other interrupts to be reported in
925 * the cause register and then force the other 944 * the cause register and then force the other
926 * interrupts and see if any get posted. If 945 * interrupts and see if any get posted. If
@@ -932,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
932 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); 951 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
933 msec_delay(10); 952 msec_delay(10);
934 953
935 if(adapter->test_icr) { 954 if (adapter->test_icr) {
936 *data = 5; 955 *data = 5;
937 break; 956 break;
938 } 957 }
@@ -957,40 +976,39 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
957 struct pci_dev *pdev = adapter->pdev; 976 struct pci_dev *pdev = adapter->pdev;
958 int i; 977 int i;
959 978
960 if(txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
961 for(i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
962 if(txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
963 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 pci_unmap_single(pdev, txdr->buffer_info[i].dma,
964 txdr->buffer_info[i].length, 983 txdr->buffer_info[i].length,
965 PCI_DMA_TODEVICE); 984 PCI_DMA_TODEVICE);
966 if(txdr->buffer_info[i].skb) 985 if (txdr->buffer_info[i].skb)
967 dev_kfree_skb(txdr->buffer_info[i].skb); 986 dev_kfree_skb(txdr->buffer_info[i].skb);
968 } 987 }
969 } 988 }
970 989
971 if(rxdr->desc && rxdr->buffer_info) { 990 if (rxdr->desc && rxdr->buffer_info) {
972 for(i = 0; i < rxdr->count; i++) { 991 for (i = 0; i < rxdr->count; i++) {
973 if(rxdr->buffer_info[i].dma) 992 if (rxdr->buffer_info[i].dma)
974 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 993 pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
975 rxdr->buffer_info[i].length, 994 rxdr->buffer_info[i].length,
976 PCI_DMA_FROMDEVICE); 995 PCI_DMA_FROMDEVICE);
977 if(rxdr->buffer_info[i].skb) 996 if (rxdr->buffer_info[i].skb)
978 dev_kfree_skb(rxdr->buffer_info[i].skb); 997 dev_kfree_skb(rxdr->buffer_info[i].skb);
979 } 998 }
980 } 999 }
981 1000
982 if(txdr->desc) { 1001 if (txdr->desc) {
983 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); 1002 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
984 txdr->desc = NULL; 1003 txdr->desc = NULL;
985 } 1004 }
986 if(rxdr->desc) { 1005 if (rxdr->desc) {
987 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 1006 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
988 rxdr->desc = NULL; 1007 rxdr->desc = NULL;
989 } 1008 }
990 1009
991 kfree(txdr->buffer_info); 1010 kfree(txdr->buffer_info);
992 txdr->buffer_info = NULL; 1011 txdr->buffer_info = NULL;
993
994 kfree(rxdr->buffer_info); 1012 kfree(rxdr->buffer_info);
995 rxdr->buffer_info = NULL; 1013 rxdr->buffer_info = NULL;
996 1014
@@ -1008,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1008 1026
1009 /* Setup Tx descriptor ring and Tx buffers */ 1027 /* Setup Tx descriptor ring and Tx buffers */
1010 1028
1011 if(!txdr->count) 1029 if (!txdr->count)
1012 txdr->count = E1000_DEFAULT_TXD; 1030 txdr->count = E1000_DEFAULT_TXD;
1013 1031
1014 size = txdr->count * sizeof(struct e1000_buffer); 1032 size = txdr->count * sizeof(struct e1000_buffer);
1015 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1033 if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1016 ret_val = 1; 1034 ret_val = 1;
1017 goto err_nomem; 1035 goto err_nomem;
1018 } 1036 }
@@ -1020,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1020 1038
1021 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1039 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1022 E1000_ROUNDUP(txdr->size, 4096); 1040 E1000_ROUNDUP(txdr->size, 4096);
1023 if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { 1041 if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
1024 ret_val = 2; 1042 ret_val = 2;
1025 goto err_nomem; 1043 goto err_nomem;
1026 } 1044 }
@@ -1039,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1057 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1040 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1058 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1041 1059
1042 for(i = 0; i < txdr->count; i++) { 1060 for (i = 0; i < txdr->count; i++) {
1043 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); 1061 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1044 struct sk_buff *skb; 1062 struct sk_buff *skb;
1045 unsigned int size = 1024; 1063 unsigned int size = 1024;
1046 1064
1047 if(!(skb = alloc_skb(size, GFP_KERNEL))) { 1065 if (!(skb = alloc_skb(size, GFP_KERNEL))) {
1048 ret_val = 3; 1066 ret_val = 3;
1049 goto err_nomem; 1067 goto err_nomem;
1050 } 1068 }
@@ -1064,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1064 1082
1065 /* Setup Rx descriptor ring and Rx buffers */ 1083 /* Setup Rx descriptor ring and Rx buffers */
1066 1084
1067 if(!rxdr->count) 1085 if (!rxdr->count)
1068 rxdr->count = E1000_DEFAULT_RXD; 1086 rxdr->count = E1000_DEFAULT_RXD;
1069 1087
1070 size = rxdr->count * sizeof(struct e1000_buffer); 1088 size = rxdr->count * sizeof(struct e1000_buffer);
1071 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1089 if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1072 ret_val = 4; 1090 ret_val = 4;
1073 goto err_nomem; 1091 goto err_nomem;
1074 } 1092 }
1075 memset(rxdr->buffer_info, 0, size); 1093 memset(rxdr->buffer_info, 0, size);
1076 1094
1077 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1078 if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { 1096 if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
1079 ret_val = 5; 1097 ret_val = 5;
1080 goto err_nomem; 1098 goto err_nomem;
1081 } 1099 }
@@ -1095,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1095 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1113 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1096 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1114 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1097 1115
1098 for(i = 0; i < rxdr->count; i++) { 1116 for (i = 0; i < rxdr->count; i++) {
1099 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1117 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1100 struct sk_buff *skb; 1118 struct sk_buff *skb;
1101 1119
1102 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1120 if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1103 GFP_KERNEL))) { 1121 GFP_KERNEL))) {
1104 ret_val = 6; 1122 ret_val = 6;
1105 goto err_nomem; 1123 goto err_nomem;
@@ -1208,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1208 1226
1209 /* Check Phy Configuration */ 1227 /* Check Phy Configuration */
1210 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); 1228 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1211 if(phy_reg != 0x4100) 1229 if (phy_reg != 0x4100)
1212 return 9; 1230 return 9;
1213 1231
1214 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1232 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1215 if(phy_reg != 0x0070) 1233 if (phy_reg != 0x0070)
1216 return 10; 1234 return 10;
1217 1235
1218 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); 1236 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
1219 if(phy_reg != 0x001A) 1237 if (phy_reg != 0x001A)
1220 return 11; 1238 return 11;
1221 1239
1222 return 0; 1240 return 0;
@@ -1230,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1230 1248
1231 adapter->hw.autoneg = FALSE; 1249 adapter->hw.autoneg = FALSE;
1232 1250
1233 if(adapter->hw.phy_type == e1000_phy_m88) { 1251 if (adapter->hw.phy_type == e1000_phy_m88) {
1234 /* Auto-MDI/MDIX Off */ 1252 /* Auto-MDI/MDIX Off */
1235 e1000_write_phy_reg(&adapter->hw, 1253 e1000_write_phy_reg(&adapter->hw,
1236 M88E1000_PHY_SPEC_CTRL, 0x0808); 1254 M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1250,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1250 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1268 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1251 E1000_CTRL_FD); /* Force Duplex to FULL */ 1269 E1000_CTRL_FD); /* Force Duplex to FULL */
1252 1270
1253 if(adapter->hw.media_type == e1000_media_type_copper && 1271 if (adapter->hw.media_type == e1000_media_type_copper &&
1254 adapter->hw.phy_type == e1000_phy_m88) { 1272 adapter->hw.phy_type == e1000_phy_m88) {
1255 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1273 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1256 } else { 1274 } else {
1257 /* Set the ILOS bit on the fiber Nic is half 1275 /* Set the ILOS bit on the fiber Nic is half
1258 * duplex link is detected. */ 1276 * duplex link is detected. */
1259 stat_reg = E1000_READ_REG(&adapter->hw, STATUS); 1277 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
1260 if((stat_reg & E1000_STATUS_FD) == 0) 1278 if ((stat_reg & E1000_STATUS_FD) == 0)
1261 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1279 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1262 } 1280 }
1263 1281
@@ -1266,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1266 /* Disable the receiver on the PHY so when a cable is plugged in, the 1284 /* Disable the receiver on the PHY so when a cable is plugged in, the
1267 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1285 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1268 */ 1286 */
1269 if(adapter->hw.phy_type == e1000_phy_m88) 1287 if (adapter->hw.phy_type == e1000_phy_m88)
1270 e1000_phy_disable_receiver(adapter); 1288 e1000_phy_disable_receiver(adapter);
1271 1289
1272 udelay(500); 1290 udelay(500);
@@ -1282,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1282 1300
1283 switch (adapter->hw.mac_type) { 1301 switch (adapter->hw.mac_type) {
1284 case e1000_82543: 1302 case e1000_82543:
1285 if(adapter->hw.media_type == e1000_media_type_copper) { 1303 if (adapter->hw.media_type == e1000_media_type_copper) {
1286 /* Attempt to setup Loopback mode on Non-integrated PHY. 1304 /* Attempt to setup Loopback mode on Non-integrated PHY.
1287 * Some PHY registers get corrupted at random, so 1305 * Some PHY registers get corrupted at random, so
1288 * attempt this 10 times. 1306 * attempt this 10 times.
1289 */ 1307 */
1290 while(e1000_nonintegrated_phy_loopback(adapter) && 1308 while (e1000_nonintegrated_phy_loopback(adapter) &&
1291 count++ < 10); 1309 count++ < 10);
1292 if(count < 11) 1310 if (count < 11)
1293 return 0; 1311 return 0;
1294 } 1312 }
1295 break; 1313 break;
@@ -1327,11 +1345,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1327static int 1345static int
1328e1000_setup_loopback_test(struct e1000_adapter *adapter) 1346e1000_setup_loopback_test(struct e1000_adapter *adapter)
1329{ 1347{
1330 uint32_t rctl;
1331 struct e1000_hw *hw = &adapter->hw; 1348 struct e1000_hw *hw = &adapter->hw;
1349 uint32_t rctl;
1332 1350
1333 if (hw->media_type == e1000_media_type_fiber || 1351 if (hw->media_type == e1000_media_type_fiber ||
1334 hw->media_type == e1000_media_type_internal_serdes) { 1352 hw->media_type == e1000_media_type_internal_serdes) {
1335 switch (hw->mac_type) { 1353 switch (hw->mac_type) {
1336 case e1000_82545: 1354 case e1000_82545:
1337 case e1000_82546: 1355 case e1000_82546:
@@ -1362,25 +1380,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
1362static void 1380static void
1363e1000_loopback_cleanup(struct e1000_adapter *adapter) 1381e1000_loopback_cleanup(struct e1000_adapter *adapter)
1364{ 1382{
1383 struct e1000_hw *hw = &adapter->hw;
1365 uint32_t rctl; 1384 uint32_t rctl;
1366 uint16_t phy_reg; 1385 uint16_t phy_reg;
1367 struct e1000_hw *hw = &adapter->hw;
1368 1386
1369 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1387 rctl = E1000_READ_REG(hw, RCTL);
1370 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1388 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1389 E1000_WRITE_REG(hw, RCTL, rctl);
1372 1390
1373 switch (hw->mac_type) { 1391 switch (hw->mac_type) {
1374 case e1000_82571: 1392 case e1000_82571:
1375 case e1000_82572: 1393 case e1000_82572:
1376 if (hw->media_type == e1000_media_type_fiber || 1394 if (hw->media_type == e1000_media_type_fiber ||
1377 hw->media_type == e1000_media_type_internal_serdes){ 1395 hw->media_type == e1000_media_type_internal_serdes) {
1378#define E1000_SERDES_LB_OFF 0x400 1396#define E1000_SERDES_LB_OFF 0x400
1379 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); 1397 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
1380 msec_delay(10); 1398 msec_delay(10);
1381 break; 1399 break;
1382 } 1400 }
1383 /* fall thru for Cu adapters */ 1401 /* Fall Through */
1384 case e1000_82545: 1402 case e1000_82545:
1385 case e1000_82546: 1403 case e1000_82546:
1386 case e1000_82545_rev_3: 1404 case e1000_82545_rev_3:
@@ -1401,7 +1419,7 @@ static void
1401e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1419e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1402{ 1420{
1403 memset(skb->data, 0xFF, frame_size); 1421 memset(skb->data, 0xFF, frame_size);
1404 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1422 frame_size &= ~1;
1405 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1423 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1406 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1424 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1407 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1425 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
@@ -1410,9 +1428,9 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1410static int 1428static int
1411e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1429e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1412{ 1430{
1413 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1431 frame_size &= ~1;
1414 if(*(skb->data + 3) == 0xFF) { 1432 if (*(skb->data + 3) == 0xFF) {
1415 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1433 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1416 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1434 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1417 return 0; 1435 return 0;
1418 } 1436 }
@@ -1431,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1431 1449
1432 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1450 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1433 1451
1434 /* Calculate the loop count based on the largest descriptor ring 1452 /* Calculate the loop count based on the largest descriptor ring
1435 * The idea is to wrap the largest ring a number of times using 64 1453 * The idea is to wrap the largest ring a number of times using 64
1436 * send/receive pairs during each loop 1454 * send/receive pairs during each loop
1437 */ 1455 */
1438 1456
1439 if(rxdr->count <= txdr->count) 1457 if (rxdr->count <= txdr->count)
1440 lc = ((txdr->count / 64) * 2) + 1; 1458 lc = ((txdr->count / 64) * 2) + 1;
1441 else 1459 else
1442 lc = ((rxdr->count / 64) * 2) + 1; 1460 lc = ((rxdr->count / 64) * 2) + 1;
1443 1461
1444 k = l = 0; 1462 k = l = 0;
1445 for(j = 0; j <= lc; j++) { /* loop count loop */ 1463 for (j = 0; j <= lc; j++) { /* loop count loop */
1446 for(i = 0; i < 64; i++) { /* send the packets */ 1464 for (i = 0; i < 64; i++) { /* send the packets */
1447 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1465 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1448 1024); 1466 1024);
1449 pci_dma_sync_single_for_device(pdev, 1467 pci_dma_sync_single_for_device(pdev,
1450 txdr->buffer_info[k].dma, 1468 txdr->buffer_info[k].dma,
1451 txdr->buffer_info[k].length, 1469 txdr->buffer_info[k].length,
1452 PCI_DMA_TODEVICE); 1470 PCI_DMA_TODEVICE);
1453 if(unlikely(++k == txdr->count)) k = 0; 1471 if (unlikely(++k == txdr->count)) k = 0;
1454 } 1472 }
1455 E1000_WRITE_REG(&adapter->hw, TDT, k); 1473 E1000_WRITE_REG(&adapter->hw, TDT, k);
1456 msec_delay(200); 1474 msec_delay(200);
1457 time = jiffies; /* set the start time for the receive */ 1475 time = jiffies; /* set the start time for the receive */
1458 good_cnt = 0; 1476 good_cnt = 0;
1459 do { /* receive the sent packets */ 1477 do { /* receive the sent packets */
1460 pci_dma_sync_single_for_cpu(pdev, 1478 pci_dma_sync_single_for_cpu(pdev,
1461 rxdr->buffer_info[l].dma, 1479 rxdr->buffer_info[l].dma,
1462 rxdr->buffer_info[l].length, 1480 rxdr->buffer_info[l].length,
1463 PCI_DMA_FROMDEVICE); 1481 PCI_DMA_FROMDEVICE);
1464 1482
1465 ret_val = e1000_check_lbtest_frame( 1483 ret_val = e1000_check_lbtest_frame(
1466 rxdr->buffer_info[l].skb, 1484 rxdr->buffer_info[l].skb,
1467 1024); 1485 1024);
1468 if(!ret_val) 1486 if (!ret_val)
1469 good_cnt++; 1487 good_cnt++;
1470 if(unlikely(++l == rxdr->count)) l = 0; 1488 if (unlikely(++l == rxdr->count)) l = 0;
1471 /* time + 20 msecs (200 msecs on 2.4) is more than 1489 /* time + 20 msecs (200 msecs on 2.4) is more than
1472 * enough time to complete the receives, if it's 1490 * enough time to complete the receives, if it's
1473 * exceeded, break and error off 1491 * exceeded, break and error off
1474 */ 1492 */
1475 } while (good_cnt < 64 && jiffies < (time + 20)); 1493 } while (good_cnt < 64 && jiffies < (time + 20));
1476 if(good_cnt != 64) { 1494 if (good_cnt != 64) {
1477 ret_val = 13; /* ret_val is the same as mis-compare */ 1495 ret_val = 13; /* ret_val is the same as mis-compare */
1478 break; 1496 break;
1479 } 1497 }
1480 if(jiffies >= (time + 2)) { 1498 if (jiffies >= (time + 2)) {
1481 ret_val = 14; /* error code for time out error */ 1499 ret_val = 14; /* error code for time out error */
1482 break; 1500 break;
1483 } 1501 }
@@ -1488,14 +1506,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1488static int 1506static int
1489e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) 1507e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
1490{ 1508{
1491 if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; 1509 /* PHY loopback cannot be performed if SoL/IDER
1492 if((*data = e1000_setup_loopback_test(adapter))) 1510 * sessions are active */
1493 goto err_loopback_setup; 1511 if (e1000_check_phy_reset_block(&adapter->hw)) {
1512 DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
1513 "when SoL/IDER is active.\n");
1514 *data = 0;
1515 goto out;
1516 }
1517
1518 if ((*data = e1000_setup_desc_rings(adapter)))
1519 goto out;
1520 if ((*data = e1000_setup_loopback_test(adapter)))
1521 goto err_loopback;
1494 *data = e1000_run_loopback_test(adapter); 1522 *data = e1000_run_loopback_test(adapter);
1495 e1000_loopback_cleanup(adapter); 1523 e1000_loopback_cleanup(adapter);
1496err_loopback_setup: 1524
1497 e1000_free_desc_rings(adapter);
1498err_loopback: 1525err_loopback:
1526 e1000_free_desc_rings(adapter);
1527out:
1499 return *data; 1528 return *data;
1500} 1529}
1501 1530
@@ -1519,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1519 *data = 1; 1548 *data = 1;
1520 } else { 1549 } else {
1521 e1000_check_for_link(&adapter->hw); 1550 e1000_check_for_link(&adapter->hw);
1522 if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ 1551 if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
1523 msec_delay(4000); 1552 msec_delay(4000);
1524 1553
1525 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1554 if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1526 *data = 1; 1555 *data = 1;
1527 } 1556 }
1528 } 1557 }
1529 return *data; 1558 return *data;
1530} 1559}
1531 1560
1532static int 1561static int
1533e1000_diag_test_count(struct net_device *netdev) 1562e1000_diag_test_count(struct net_device *netdev)
1534{ 1563{
1535 return E1000_TEST_LEN; 1564 return E1000_TEST_LEN;
@@ -1542,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
1542 struct e1000_adapter *adapter = netdev_priv(netdev); 1571 struct e1000_adapter *adapter = netdev_priv(netdev);
1543 boolean_t if_running = netif_running(netdev); 1572 boolean_t if_running = netif_running(netdev);
1544 1573
1545 if(eth_test->flags == ETH_TEST_FL_OFFLINE) { 1574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1546 /* Offline tests */ 1575 /* Offline tests */
1547 1576
1548 /* save speed, duplex, autoneg settings */ 1577 /* save speed, duplex, autoneg settings */
@@ -1552,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
1552 1581
1553 /* Link test performed before hardware reset so autoneg doesn't 1582 /* Link test performed before hardware reset so autoneg doesn't
1554 * interfere with test result */ 1583 * interfere with test result */
1555 if(e1000_link_test(adapter, &data[4])) 1584 if (e1000_link_test(adapter, &data[4]))
1556 eth_test->flags |= ETH_TEST_FL_FAILED; 1585 eth_test->flags |= ETH_TEST_FL_FAILED;
1557 1586
1558 if(if_running) 1587 if (if_running)
1559 e1000_down(adapter); 1588 e1000_down(adapter);
1560 else 1589 else
1561 e1000_reset(adapter); 1590 e1000_reset(adapter);
1562 1591
1563 if(e1000_reg_test(adapter, &data[0])) 1592 if (e1000_reg_test(adapter, &data[0]))
1564 eth_test->flags |= ETH_TEST_FL_FAILED; 1593 eth_test->flags |= ETH_TEST_FL_FAILED;
1565 1594
1566 e1000_reset(adapter); 1595 e1000_reset(adapter);
1567 if(e1000_eeprom_test(adapter, &data[1])) 1596 if (e1000_eeprom_test(adapter, &data[1]))
1568 eth_test->flags |= ETH_TEST_FL_FAILED; 1597 eth_test->flags |= ETH_TEST_FL_FAILED;
1569 1598
1570 e1000_reset(adapter); 1599 e1000_reset(adapter);
1571 if(e1000_intr_test(adapter, &data[2])) 1600 if (e1000_intr_test(adapter, &data[2]))
1572 eth_test->flags |= ETH_TEST_FL_FAILED; 1601 eth_test->flags |= ETH_TEST_FL_FAILED;
1573 1602
1574 e1000_reset(adapter); 1603 e1000_reset(adapter);
1575 if(e1000_loopback_test(adapter, &data[3])) 1604 if (e1000_loopback_test(adapter, &data[3]))
1576 eth_test->flags |= ETH_TEST_FL_FAILED; 1605 eth_test->flags |= ETH_TEST_FL_FAILED;
1577 1606
1578 /* restore speed, duplex, autoneg settings */ 1607 /* restore speed, duplex, autoneg settings */
@@ -1581,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
1581 adapter->hw.autoneg = autoneg; 1610 adapter->hw.autoneg = autoneg;
1582 1611
1583 e1000_reset(adapter); 1612 e1000_reset(adapter);
1584 if(if_running) 1613 if (if_running)
1585 e1000_up(adapter); 1614 e1000_up(adapter);
1586 } else { 1615 } else {
1587 /* Online tests */ 1616 /* Online tests */
1588 if(e1000_link_test(adapter, &data[4])) 1617 if (e1000_link_test(adapter, &data[4]))
1589 eth_test->flags |= ETH_TEST_FL_FAILED; 1618 eth_test->flags |= ETH_TEST_FL_FAILED;
1590 1619
1591 /* Offline tests aren't run; pass by default */ 1620 /* Offline tests aren't run; pass by default */
@@ -1603,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1603 struct e1000_adapter *adapter = netdev_priv(netdev); 1632 struct e1000_adapter *adapter = netdev_priv(netdev);
1604 struct e1000_hw *hw = &adapter->hw; 1633 struct e1000_hw *hw = &adapter->hw;
1605 1634
1606 switch(adapter->hw.device_id) { 1635 switch (adapter->hw.device_id) {
1607 case E1000_DEV_ID_82542: 1636 case E1000_DEV_ID_82542:
1608 case E1000_DEV_ID_82543GC_FIBER: 1637 case E1000_DEV_ID_82543GC_FIBER:
1609 case E1000_DEV_ID_82543GC_COPPER: 1638 case E1000_DEV_ID_82543GC_COPPER:
@@ -1617,8 +1646,9 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1617 1646
1618 case E1000_DEV_ID_82546EB_FIBER: 1647 case E1000_DEV_ID_82546EB_FIBER:
1619 case E1000_DEV_ID_82546GB_FIBER: 1648 case E1000_DEV_ID_82546GB_FIBER:
1649 case E1000_DEV_ID_82571EB_FIBER:
1620 /* Wake events only supported on port A for dual fiber */ 1650 /* Wake events only supported on port A for dual fiber */
1621 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1651 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1622 wol->supported = 0; 1652 wol->supported = 0;
1623 wol->wolopts = 0; 1653 wol->wolopts = 0;
1624 return; 1654 return;
@@ -1630,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1630 WAKE_BCAST | WAKE_MAGIC; 1660 WAKE_BCAST | WAKE_MAGIC;
1631 1661
1632 wol->wolopts = 0; 1662 wol->wolopts = 0;
1633 if(adapter->wol & E1000_WUFC_EX) 1663 if (adapter->wol & E1000_WUFC_EX)
1634 wol->wolopts |= WAKE_UCAST; 1664 wol->wolopts |= WAKE_UCAST;
1635 if(adapter->wol & E1000_WUFC_MC) 1665 if (adapter->wol & E1000_WUFC_MC)
1636 wol->wolopts |= WAKE_MCAST; 1666 wol->wolopts |= WAKE_MCAST;
1637 if(adapter->wol & E1000_WUFC_BC) 1667 if (adapter->wol & E1000_WUFC_BC)
1638 wol->wolopts |= WAKE_BCAST; 1668 wol->wolopts |= WAKE_BCAST;
1639 if(adapter->wol & E1000_WUFC_MAG) 1669 if (adapter->wol & E1000_WUFC_MAG)
1640 wol->wolopts |= WAKE_MAGIC; 1670 wol->wolopts |= WAKE_MAGIC;
1641 return; 1671 return;
1642 } 1672 }
@@ -1648,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1648 struct e1000_adapter *adapter = netdev_priv(netdev); 1678 struct e1000_adapter *adapter = netdev_priv(netdev);
1649 struct e1000_hw *hw = &adapter->hw; 1679 struct e1000_hw *hw = &adapter->hw;
1650 1680
1651 switch(adapter->hw.device_id) { 1681 switch (adapter->hw.device_id) {
1652 case E1000_DEV_ID_82542: 1682 case E1000_DEV_ID_82542:
1653 case E1000_DEV_ID_82543GC_FIBER: 1683 case E1000_DEV_ID_82543GC_FIBER:
1654 case E1000_DEV_ID_82543GC_COPPER: 1684 case E1000_DEV_ID_82543GC_COPPER:
@@ -1660,24 +1690,25 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1660 1690
1661 case E1000_DEV_ID_82546EB_FIBER: 1691 case E1000_DEV_ID_82546EB_FIBER:
1662 case E1000_DEV_ID_82546GB_FIBER: 1692 case E1000_DEV_ID_82546GB_FIBER:
1693 case E1000_DEV_ID_82571EB_FIBER:
1663 /* Wake events only supported on port A for dual fiber */ 1694 /* Wake events only supported on port A for dual fiber */
1664 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1695 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1665 return wol->wolopts ? -EOPNOTSUPP : 0; 1696 return wol->wolopts ? -EOPNOTSUPP : 0;
1666 /* Fall Through */ 1697 /* Fall Through */
1667 1698
1668 default: 1699 default:
1669 if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1700 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1670 return -EOPNOTSUPP; 1701 return -EOPNOTSUPP;
1671 1702
1672 adapter->wol = 0; 1703 adapter->wol = 0;
1673 1704
1674 if(wol->wolopts & WAKE_UCAST) 1705 if (wol->wolopts & WAKE_UCAST)
1675 adapter->wol |= E1000_WUFC_EX; 1706 adapter->wol |= E1000_WUFC_EX;
1676 if(wol->wolopts & WAKE_MCAST) 1707 if (wol->wolopts & WAKE_MCAST)
1677 adapter->wol |= E1000_WUFC_MC; 1708 adapter->wol |= E1000_WUFC_MC;
1678 if(wol->wolopts & WAKE_BCAST) 1709 if (wol->wolopts & WAKE_BCAST)
1679 adapter->wol |= E1000_WUFC_BC; 1710 adapter->wol |= E1000_WUFC_BC;
1680 if(wol->wolopts & WAKE_MAGIC) 1711 if (wol->wolopts & WAKE_MAGIC)
1681 adapter->wol |= E1000_WUFC_MAG; 1712 adapter->wol |= E1000_WUFC_MAG;
1682 } 1713 }
1683 1714
@@ -1695,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
1695{ 1726{
1696 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1727 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1697 1728
1698 if(test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1729 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1699 e1000_led_off(&adapter->hw); 1730 e1000_led_off(&adapter->hw);
1700 else 1731 else
1701 e1000_led_on(&adapter->hw); 1732 e1000_led_on(&adapter->hw);
@@ -1708,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1708{ 1739{
1709 struct e1000_adapter *adapter = netdev_priv(netdev); 1740 struct e1000_adapter *adapter = netdev_priv(netdev);
1710 1741
1711 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 1742 if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1712 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 1743 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1713 1744
1714 if(adapter->hw.mac_type < e1000_82571) { 1745 if (adapter->hw.mac_type < e1000_82571) {
1715 if(!adapter->blink_timer.function) { 1746 if (!adapter->blink_timer.function) {
1716 init_timer(&adapter->blink_timer); 1747 init_timer(&adapter->blink_timer);
1717 adapter->blink_timer.function = e1000_led_blink_callback; 1748 adapter->blink_timer.function = e1000_led_blink_callback;
1718 adapter->blink_timer.data = (unsigned long) adapter; 1749 adapter->blink_timer.data = (unsigned long) adapter;
@@ -1721,21 +1752,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1721 mod_timer(&adapter->blink_timer, jiffies); 1752 mod_timer(&adapter->blink_timer, jiffies);
1722 msleep_interruptible(data * 1000); 1753 msleep_interruptible(data * 1000);
1723 del_timer_sync(&adapter->blink_timer); 1754 del_timer_sync(&adapter->blink_timer);
1724 } 1755 } else if (adapter->hw.mac_type < e1000_82573) {
1725 else if(adapter->hw.mac_type < e1000_82573) { 1756 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1726 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1757 (E1000_LEDCTL_LED2_BLINK_RATE |
1727 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | 1758 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
1728 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1759 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1729 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | 1760 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
1730 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); 1761 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
1731 msleep_interruptible(data * 1000); 1762 msleep_interruptible(data * 1000);
1732 } 1763 } else {
1733 else { 1764 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1734 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1765 (E1000_LEDCTL_LED2_BLINK_RATE |
1735 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | 1766 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
1736 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1767 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1737 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | 1768 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
1738 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); 1769 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
1739 msleep_interruptible(data * 1000); 1770 msleep_interruptible(data * 1000);
1740 } 1771 }
1741 1772
@@ -1750,50 +1781,89 @@ static int
1750e1000_nway_reset(struct net_device *netdev) 1781e1000_nway_reset(struct net_device *netdev)
1751{ 1782{
1752 struct e1000_adapter *adapter = netdev_priv(netdev); 1783 struct e1000_adapter *adapter = netdev_priv(netdev);
1753 if(netif_running(netdev)) { 1784 if (netif_running(netdev)) {
1754 e1000_down(adapter); 1785 e1000_down(adapter);
1755 e1000_up(adapter); 1786 e1000_up(adapter);
1756 } 1787 }
1757 return 0; 1788 return 0;
1758} 1789}
1759 1790
1760static int 1791static int
1761e1000_get_stats_count(struct net_device *netdev) 1792e1000_get_stats_count(struct net_device *netdev)
1762{ 1793{
1763 return E1000_STATS_LEN; 1794 return E1000_STATS_LEN;
1764} 1795}
1765 1796
1766static void 1797static void
1767e1000_get_ethtool_stats(struct net_device *netdev, 1798e1000_get_ethtool_stats(struct net_device *netdev,
1768 struct ethtool_stats *stats, uint64_t *data) 1799 struct ethtool_stats *stats, uint64_t *data)
1769{ 1800{
1770 struct e1000_adapter *adapter = netdev_priv(netdev); 1801 struct e1000_adapter *adapter = netdev_priv(netdev);
1802#ifdef CONFIG_E1000_MQ
1803 uint64_t *queue_stat;
1804 int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
1805 int j, k;
1806#endif
1771 int i; 1807 int i;
1772 1808
1773 e1000_update_stats(adapter); 1809 e1000_update_stats(adapter);
1774 for(i = 0; i < E1000_STATS_LEN; i++) { 1810 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1775 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1811 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1776 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1812 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1777 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 1813 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
1778 } 1814 }
1815#ifdef CONFIG_E1000_MQ
1816 for (j = 0; j < adapter->num_tx_queues; j++) {
1817 queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
1818 for (k = 0; k < stat_count; k++)
1819 data[i + k] = queue_stat[k];
1820 i += k;
1821 }
1822 for (j = 0; j < adapter->num_rx_queues; j++) {
1823 queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
1824 for (k = 0; k < stat_count; k++)
1825 data[i + k] = queue_stat[k];
1826 i += k;
1827 }
1828#endif
1829/* BUG_ON(i != E1000_STATS_LEN); */
1779} 1830}
1780 1831
1781static void 1832static void
1782e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1833e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1783{ 1834{
1835#ifdef CONFIG_E1000_MQ
1836 struct e1000_adapter *adapter = netdev_priv(netdev);
1837#endif
1838 uint8_t *p = data;
1784 int i; 1839 int i;
1785 1840
1786 switch(stringset) { 1841 switch (stringset) {
1787 case ETH_SS_TEST: 1842 case ETH_SS_TEST:
1788 memcpy(data, *e1000_gstrings_test, 1843 memcpy(data, *e1000_gstrings_test,
1789 E1000_TEST_LEN*ETH_GSTRING_LEN); 1844 E1000_TEST_LEN*ETH_GSTRING_LEN);
1790 break; 1845 break;
1791 case ETH_SS_STATS: 1846 case ETH_SS_STATS:
1792 for (i=0; i < E1000_STATS_LEN; i++) { 1847 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1793 memcpy(data + i * ETH_GSTRING_LEN, 1848 memcpy(p, e1000_gstrings_stats[i].stat_string,
1794 e1000_gstrings_stats[i].stat_string, 1849 ETH_GSTRING_LEN);
1795 ETH_GSTRING_LEN); 1850 p += ETH_GSTRING_LEN;
1796 } 1851 }
1852#ifdef CONFIG_E1000_MQ
1853 for (i = 0; i < adapter->num_tx_queues; i++) {
1854 sprintf(p, "tx_queue_%u_packets", i);
1855 p += ETH_GSTRING_LEN;
1856 sprintf(p, "tx_queue_%u_bytes", i);
1857 p += ETH_GSTRING_LEN;
1858 }
1859 for (i = 0; i < adapter->num_rx_queues; i++) {
1860 sprintf(p, "rx_queue_%u_packets", i);
1861 p += ETH_GSTRING_LEN;
1862 sprintf(p, "rx_queue_%u_bytes", i);
1863 p += ETH_GSTRING_LEN;
1864 }
1865#endif
1866/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1797 break; 1867 break;
1798 } 1868 }
1799} 1869}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 136fc031e4ad..beeec0fbbeac 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
318 case E1000_DEV_ID_82546GB_FIBER: 318 case E1000_DEV_ID_82546GB_FIBER:
319 case E1000_DEV_ID_82546GB_SERDES: 319 case E1000_DEV_ID_82546GB_SERDES:
320 case E1000_DEV_ID_82546GB_PCIE: 320 case E1000_DEV_ID_82546GB_PCIE:
321 case E1000_DEV_ID_82546GB_QUAD_COPPER:
322 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
321 hw->mac_type = e1000_82546_rev_3; 323 hw->mac_type = e1000_82546_rev_3;
322 break; 324 break;
323 case E1000_DEV_ID_82541EI: 325 case E1000_DEV_ID_82541EI:
@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
639 uint16_t cmd_mmrbc; 641 uint16_t cmd_mmrbc;
640 uint16_t stat_mmrbc; 642 uint16_t stat_mmrbc;
641 uint32_t mta_size; 643 uint32_t mta_size;
644 uint32_t ctrl_ext;
642 645
643 DEBUGFUNC("e1000_init_hw"); 646 DEBUGFUNC("e1000_init_hw");
644 647
@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
735 break; 738 break;
736 case e1000_82571: 739 case e1000_82571:
737 case e1000_82572: 740 case e1000_82572:
738 ctrl |= (1 << 22);
739 case e1000_82573: 741 case e1000_82573:
740 ctrl |= E1000_TXDCTL_COUNT_DESC; 742 ctrl |= E1000_TXDCTL_COUNT_DESC;
741 break; 743 break;
@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
775 */ 777 */
776 e1000_clear_hw_cntrs(hw); 778 e1000_clear_hw_cntrs(hw);
777 779
780 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
781 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
782 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
783 /* Relaxed ordering must be disabled to avoid a parity
784 * error crash in a PCI slot. */
785 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
786 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
787 }
788
778 return ret_val; 789 return ret_val;
779} 790}
780 791
@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
838 849
839 DEBUGFUNC("e1000_setup_link"); 850 DEBUGFUNC("e1000_setup_link");
840 851
852 /* In the case of the phy reset being blocked, we already have a link.
853 * We do not have to set it up again. */
854 if (e1000_check_phy_reset_block(hw))
855 return E1000_SUCCESS;
856
841 /* Read and store word 0x0F of the EEPROM. This word contains bits 857 /* Read and store word 0x0F of the EEPROM. This word contains bits
842 * that determine the hardware's default PAUSE (flow control) mode, 858 * that determine the hardware's default PAUSE (flow control) mode,
843 * a bit that determines whether the HW defaults to enabling or 859 * a bit that determines whether the HW defaults to enabling or
@@ -1584,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1584 if(ret_val) 1600 if(ret_val)
1585 return ret_val; 1601 return ret_val;
1586 1602
1587 /* Read the MII 1000Base-T Control Register (Address 9). */ 1603 /* Read the MII 1000Base-T Control Register (Address 9). */
1588 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1589 if(ret_val) 1605 if(ret_val)
1590 return ret_val; 1606 return ret_val;
1591 1607
1592 /* Need to parse both autoneg_advertised and fc and set up 1608 /* Need to parse both autoneg_advertised and fc and set up
1593 * the appropriate PHY registers. First we will parse for 1609 * the appropriate PHY registers. First we will parse for
@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1929void 1945void
1930e1000_config_collision_dist(struct e1000_hw *hw) 1946e1000_config_collision_dist(struct e1000_hw *hw)
1931{ 1947{
1932 uint32_t tctl; 1948 uint32_t tctl, coll_dist;
1933 1949
1934 DEBUGFUNC("e1000_config_collision_dist"); 1950 DEBUGFUNC("e1000_config_collision_dist");
1935 1951
1952 if (hw->mac_type < e1000_82543)
1953 coll_dist = E1000_COLLISION_DISTANCE_82542;
1954 else
1955 coll_dist = E1000_COLLISION_DISTANCE;
1956
1936 tctl = E1000_READ_REG(hw, TCTL); 1957 tctl = E1000_READ_REG(hw, TCTL);
1937 1958
1938 tctl &= ~E1000_TCTL_COLD; 1959 tctl &= ~E1000_TCTL_COLD;
1939 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 1960 tctl |= coll_dist << E1000_COLD_SHIFT;
1940 1961
1941 E1000_WRITE_REG(hw, TCTL, tctl); 1962 E1000_WRITE_REG(hw, TCTL, tctl);
1942 E1000_WRITE_FLUSH(hw); 1963 E1000_WRITE_FLUSH(hw);
@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2982 3003
2983 if (hw->mac_type < e1000_82571) 3004 if (hw->mac_type < e1000_82571)
2984 msec_delay(10); 3005 msec_delay(10);
3006 else
3007 udelay(100);
2985 3008
2986 E1000_WRITE_REG(hw, CTRL, ctrl); 3009 E1000_WRITE_REG(hw, CTRL, ctrl);
2987 E1000_WRITE_FLUSH(hw); 3010 E1000_WRITE_FLUSH(hw);
@@ -3881,17 +3904,19 @@ e1000_read_eeprom(struct e1000_hw *hw,
3881 return -E1000_ERR_EEPROM; 3904 return -E1000_ERR_EEPROM;
3882 } 3905 }
3883 3906
3884 /* FLASH reads without acquiring the semaphore are safe in 82573-based 3907 /* FLASH reads without acquiring the semaphore are safe */
3885 * controllers. 3908 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
3886 */ 3909 hw->eeprom.use_eerd == FALSE) {
3887 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3910 switch (hw->mac_type) {
3888 (hw->mac_type != e1000_82573)) { 3911 default:
3889 /* Prepare the EEPROM for reading */ 3912 /* Prepare the EEPROM for reading */
3890 if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3913 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3891 return -E1000_ERR_EEPROM; 3914 return -E1000_ERR_EEPROM;
3915 break;
3916 }
3892 } 3917 }
3893 3918
3894 if(eeprom->use_eerd == TRUE) { 3919 if (eeprom->use_eerd == TRUE) {
3895 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
3896 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3897 (hw->mac_type != e1000_82573)) 3922 (hw->mac_type != e1000_82573))
@@ -4398,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4398 return -E1000_ERR_EEPROM; 4423 return -E1000_ERR_EEPROM;
4399 } 4424 }
4400 4425
4401 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */
4402 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) {
4403 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
4404 } 4429 }
@@ -4406,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4406 /* Perform the flash update */ 4431 /* Perform the flash update */
4407 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
4408 4433
4409 for (i=0; i < attempts; i++) { 4434 for (i=0; i < attempts; i++) {
4410 eecd = E1000_READ_REG(hw, EECD); 4435 eecd = E1000_READ_REG(hw, EECD);
4411 if ((eecd & E1000_EECD_FLUPD) == 0) { 4436 if ((eecd & E1000_EECD_FLUPD) == 0) {
4412 break; 4437 break;
@@ -4479,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4479 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
4480 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
4481 } 4506 }
4507
4482 switch (hw->mac_type) { 4508 switch (hw->mac_type) {
4483 default: 4509 default:
4484 break; 4510 break;
@@ -6720,6 +6746,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
6720 break; 6746 break;
6721 } 6747 }
6722 6748
6749 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
6750 * Need to wait for PHY configuration completion before accessing NVM
6751 * and PHY. */
6752 if (hw->mac_type == e1000_82573)
6753 msec_delay(25);
6754
6723 return E1000_SUCCESS; 6755 return E1000_SUCCESS;
6724} 6756}
6725 6757
@@ -6809,7 +6841,8 @@ int32_t
6809e1000_check_phy_reset_block(struct e1000_hw *hw) 6841e1000_check_phy_reset_block(struct e1000_hw *hw)
6810{ 6842{
6811 uint32_t manc = 0; 6843 uint32_t manc = 0;
6812 if(hw->mac_type > e1000_82547_rev_2) 6844
6845 if (hw->mac_type > e1000_82547_rev_2)
6813 manc = E1000_READ_REG(hw, MANC); 6846 manc = E1000_READ_REG(hw, MANC);
6814 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 6847 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
6815 E1000_BLK_PHY_RESET : E1000_SUCCESS; 6848 E1000_BLK_PHY_RESET : E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 7caa35748cea..f1219dd9dbac 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
378 378
379/* Filters (multicast, vlan, receive) */ 379/* Filters (multicast, vlan, receive) */
380void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
380uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 381uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
381void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 382void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
382void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 383void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
401void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 402void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
402/* Port I/O is only supported on 82544 and newer */ 403/* Port I/O is only supported on 82544 and newer */
403uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); 404uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
405uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
404void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 406void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
407void e1000_enable_pciex_master(struct e1000_hw *hw);
405int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 408int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
406int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 409int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
407void e1000_release_software_semaphore(struct e1000_hw *hw); 410void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -439,6 +442,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
439#define E1000_DEV_ID_82546GB_FIBER 0x107A 442#define E1000_DEV_ID_82546GB_FIBER 0x107A
440#define E1000_DEV_ID_82546GB_SERDES 0x107B 443#define E1000_DEV_ID_82546GB_SERDES 0x107B
441#define E1000_DEV_ID_82546GB_PCIE 0x108A 444#define E1000_DEV_ID_82546GB_PCIE 0x108A
445#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
442#define E1000_DEV_ID_82547EI 0x1019 446#define E1000_DEV_ID_82547EI 0x1019
443#define E1000_DEV_ID_82571EB_COPPER 0x105E 447#define E1000_DEV_ID_82571EB_COPPER 0x105E
444#define E1000_DEV_ID_82571EB_FIBER 0x105F 448#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -449,6 +453,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
449#define E1000_DEV_ID_82573E 0x108B 453#define E1000_DEV_ID_82573E 0x108B
450#define E1000_DEV_ID_82573E_IAMT 0x108C 454#define E1000_DEV_ID_82573E_IAMT 0x108C
451#define E1000_DEV_ID_82573L 0x109A 455#define E1000_DEV_ID_82573L 0x109A
456#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
452 457
453 458
454#define NODE_ADDRESS_SIZE 6 459#define NODE_ADDRESS_SIZE 6
@@ -897,14 +902,14 @@ struct e1000_ffvt_entry {
897#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 902#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
898#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 903#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
899#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 904#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
900#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 905#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
901#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 906#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
902#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 907#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
903#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 908#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
904#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 909#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
905#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 910#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
906#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 911#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
907#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 912#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
908#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 913#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
909#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 914#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
910#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 915#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -1497,6 +1502,7 @@ struct e1000_hw {
1497#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 1502#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
1498#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ 1503#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
1499#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 1504#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
1505#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
1500#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 1506#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1501#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 1507#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1502#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 1508#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1758,7 +1764,6 @@ struct e1000_hw {
1758#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1764#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1759#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1765#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1760 still to be processed. */ 1766 still to be processed. */
1761
1762/* Transmit Configuration Word */ 1767/* Transmit Configuration Word */
1763#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1768#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1764#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1769#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
@@ -1954,6 +1959,23 @@ struct e1000_host_command_info {
1954 1959
1955#define E1000_MDALIGN 4096 1960#define E1000_MDALIGN 4096
1956 1961
1962/* PCI-Ex registers */
1963
1964/* PCI-Ex Control Register */
1965#define E1000_GCR_RXD_NO_SNOOP 0x00000001
1966#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
1967#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
1968#define E1000_GCR_TXD_NO_SNOOP 0x00000008
1969#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
1970#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
1971
1972#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
1973 E1000_GCR_RXDSCW_NO_SNOOP | \
1974 E1000_GCR_RXDSCR_NO_SNOOP | \
1975 E1000_GCR TXD_NO_SNOOP | \
1976 E1000_GCR_TXDSCW_NO_SNOOP | \
1977 E1000_GCR_TXDSCR_NO_SNOOP)
1978
1957#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 1979#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1958/* Function Active and Power State to MNG */ 1980/* Function Active and Power State to MNG */
1959#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1981#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2077,7 +2099,10 @@ struct e1000_host_command_info {
2077/* Collision related configuration parameters */ 2099/* Collision related configuration parameters */
2078#define E1000_COLLISION_THRESHOLD 15 2100#define E1000_COLLISION_THRESHOLD 15
2079#define E1000_CT_SHIFT 4 2101#define E1000_CT_SHIFT 4
2080#define E1000_COLLISION_DISTANCE 64 2102/* Collision distance is a 0-based value that applies to
2103 * half-duplex-capable hardware only. */
2104#define E1000_COLLISION_DISTANCE 63
2105#define E1000_COLLISION_DISTANCE_82542 64
2081#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2106#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2082#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2107#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2083#define E1000_COLD_SHIFT 12 2108#define E1000_COLD_SHIFT 12
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 438a931fd55d..31e332935e5a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -29,11 +29,71 @@
29#include "e1000.h" 29#include "e1000.h"
30 30
31/* Change Log 31/* Change Log
32 * 6.0.58 4/20/05 32 * 6.3.9 12/16/2005
33 * o Accepted ethtool cleanup patch from Stephen Hemminger 33 * o incorporate fix for recycled skbs from IBM LTC
34 * 6.0.44+ 2/15/05 34 * 6.3.7 11/18/2005
35 * o applied Anton's patch to resolve tx hang in hardware 35 * o Honor eeprom setting for enabling/disabling Wake On Lan
36 * o Applied Andrew Mortons patch - e1000 stops working after resume 36 * 6.3.5 11/17/2005
37 * o Fix memory leak in rx ring handling for PCI Express adapters
38 * 6.3.4 11/8/05
39 * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
40 * 6.3.2 9/20/05
41 * o Render logic that sets/resets DRV_LOAD as inline functions to
42 * avoid code replication. If f/w is AMT then set DRV_LOAD only when
43 * network interface is open.
44 * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
45 * o Adjust PBA partioning for Jumbo frames using MTU size and not
46 * rx_buffer_len
47 * 6.3.1 9/19/05
48 * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
49 (e1000_clean_tx_irq)
50 * o Support for 8086:10B5 device (Quad Port)
51 * 6.2.14 9/15/05
52 * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface
53 * open/close
54 * 6.2.13 9/14/05
55 * o Invoke e1000_check_mng_mode only for 8257x controllers since it
56 * accesses the FWSM that is not supported in other controllers
57 * 6.2.12 9/9/05
58 * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
59 * o set RCTL:SECRC only for controllers newer than 82543.
60 * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
61 * This code was moved from e1000_remove to e1000_close
62 * 6.2.10 9/6/05
63 * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
64 * o Enable fc by default on 82573 controllers (do not read eeprom)
65 * o Fix rx_errors statistic not to include missed_packet_count
66 * o Fix rx_dropped statistic not to include missed_packet_count
67 (Padraig Brady)
68 * 6.2.9 8/30/05
69 * o Remove call to update statistics from the controller ib e1000_get_stats
70 * 6.2.8 8/30/05
71 * o Improved algorithm for rx buffer allocation/rdt update
72 * o Flow control watermarks relative to rx PBA size
73 * o Simplified 'Tx Hung' detect logic
74 * 6.2.7 8/17/05
75 * o Report rx buffer allocation failures and tx timeout counts in stats
76 * 6.2.6 8/16/05
77 * o Implement workaround for controller erratum -- linear non-tso packet
78 * following a TSO gets written back prematurely
79 * 6.2.5 8/15/05
80 * o Set netdev->tx_queue_len based on link speed/duplex settings.
81 * o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
82 * o Do not power off PHY if SoL/IDER session is active
83 * 6.2.4 8/10/05
84 * o Fix loopback test setup/cleanup for 82571/3 controllers
85 * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
86 * all packets as raw
87 * o Prevent operations that will cause the PHY to be reset if SoL/IDER
88 * sessions are active and log a message
89 * 6.2.2 7/21/05
90 * o used fixed size descriptors for all MTU sizes, reduces memory load
91 * 6.1.2 4/13/05
92 * o Fixed ethtool diagnostics
93 * o Enabled flow control to take default eeprom settings
94 * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
95 * calls, one from mii_ioctl and other from within update_stats while
96 * processing MIIREG ioctl.
37 */ 97 */
38 98
39char e1000_driver_name[] = "e1000"; 99char e1000_driver_name[] = "e1000";
@@ -43,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
43#else 103#else
44#define DRIVERNAPI "-NAPI" 104#define DRIVERNAPI "-NAPI"
45#endif 105#endif
46#define DRV_VERSION "6.1.16-k2"DRIVERNAPI 106#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
47char e1000_driver_version[] = DRV_VERSION; 107char e1000_driver_version[] = DRV_VERSION;
48static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 108static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49 109
@@ -97,7 +157,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
97 INTEL_E1000_ETHERNET_DEVICE(0x108A), 157 INTEL_E1000_ETHERNET_DEVICE(0x108A),
98 INTEL_E1000_ETHERNET_DEVICE(0x108B), 158 INTEL_E1000_ETHERNET_DEVICE(0x108B),
99 INTEL_E1000_ETHERNET_DEVICE(0x108C), 159 INTEL_E1000_ETHERNET_DEVICE(0x108C),
160 INTEL_E1000_ETHERNET_DEVICE(0x1099),
100 INTEL_E1000_ETHERNET_DEVICE(0x109A), 161 INTEL_E1000_ETHERNET_DEVICE(0x109A),
162 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
101 /* required last entry */ 163 /* required last entry */
102 {0,} 164 {0,}
103}; 165};
@@ -171,9 +233,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
171 struct e1000_rx_ring *rx_ring); 233 struct e1000_rx_ring *rx_ring);
172#endif 234#endif
173static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 235static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
174 struct e1000_rx_ring *rx_ring); 236 struct e1000_rx_ring *rx_ring,
237 int cleaned_count);
175static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 238static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring); 239 struct e1000_rx_ring *rx_ring,
240 int cleaned_count);
177static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 241static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
178static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 242static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
179 int cmd); 243 int cmd);
@@ -291,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
291static inline void 355static inline void
292e1000_irq_enable(struct e1000_adapter *adapter) 356e1000_irq_enable(struct e1000_adapter *adapter)
293{ 357{
294 if(likely(atomic_dec_and_test(&adapter->irq_sem))) { 358 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
295 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 359 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
296 E1000_WRITE_FLUSH(&adapter->hw); 360 E1000_WRITE_FLUSH(&adapter->hw);
297 } 361 }
@@ -303,23 +367,91 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
303 struct net_device *netdev = adapter->netdev; 367 struct net_device *netdev = adapter->netdev;
304 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 368 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
305 uint16_t old_vid = adapter->mng_vlan_id; 369 uint16_t old_vid = adapter->mng_vlan_id;
306 if(adapter->vlgrp) { 370 if (adapter->vlgrp) {
307 if(!adapter->vlgrp->vlan_devices[vid]) { 371 if (!adapter->vlgrp->vlan_devices[vid]) {
308 if(adapter->hw.mng_cookie.status & 372 if (adapter->hw.mng_cookie.status &
309 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 373 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
310 e1000_vlan_rx_add_vid(netdev, vid); 374 e1000_vlan_rx_add_vid(netdev, vid);
311 adapter->mng_vlan_id = vid; 375 adapter->mng_vlan_id = vid;
312 } else 376 } else
313 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
314 378
315 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 379 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
316 (vid != old_vid) && 380 (vid != old_vid) &&
317 !adapter->vlgrp->vlan_devices[old_vid]) 381 !adapter->vlgrp->vlan_devices[old_vid])
318 e1000_vlan_rx_kill_vid(netdev, old_vid); 382 e1000_vlan_rx_kill_vid(netdev, old_vid);
319 } 383 }
320 } 384 }
321} 385}
322 386
387/**
388 * e1000_release_hw_control - release control of the h/w to f/w
389 * @adapter: address of board private structure
390 *
391 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
392 * For ASF and Pass Through versions of f/w this means that the
393 * driver is no longer loaded. For AMT version (only with 82573) i
394 * of the f/w this means that the netowrk i/f is closed.
395 *
396 **/
397
398static inline void
399e1000_release_hw_control(struct e1000_adapter *adapter)
400{
401 uint32_t ctrl_ext;
402 uint32_t swsm;
403
404 /* Let firmware taken over control of h/w */
405 switch (adapter->hw.mac_type) {
406 case e1000_82571:
407 case e1000_82572:
408 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
409 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
410 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
411 break;
412 case e1000_82573:
413 swsm = E1000_READ_REG(&adapter->hw, SWSM);
414 E1000_WRITE_REG(&adapter->hw, SWSM,
415 swsm & ~E1000_SWSM_DRV_LOAD);
416 default:
417 break;
418 }
419}
420
421/**
422 * e1000_get_hw_control - get control of the h/w from f/w
423 * @adapter: address of board private structure
424 *
425 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
426 * For ASF and Pass Through versions of f/w this means that
427 * the driver is loaded. For AMT version (only with 82573)
428 * of the f/w this means that the netowrk i/f is open.
429 *
430 **/
431
432static inline void
433e1000_get_hw_control(struct e1000_adapter *adapter)
434{
435 uint32_t ctrl_ext;
436 uint32_t swsm;
437 /* Let firmware know the driver has taken over */
438 switch (adapter->hw.mac_type) {
439 case e1000_82571:
440 case e1000_82572:
441 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
442 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
443 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
444 break;
445 case e1000_82573:
446 swsm = E1000_READ_REG(&adapter->hw, SWSM);
447 E1000_WRITE_REG(&adapter->hw, SWSM,
448 swsm | E1000_SWSM_DRV_LOAD);
449 break;
450 default:
451 break;
452 }
453}
454
323int 455int
324e1000_up(struct e1000_adapter *adapter) 456e1000_up(struct e1000_adapter *adapter)
325{ 457{
@@ -329,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
329 /* hardware has been reset, we need to reload some things */ 461 /* hardware has been reset, we need to reload some things */
330 462
331 /* Reset the PHY if it was previously powered down */ 463 /* Reset the PHY if it was previously powered down */
332 if(adapter->hw.media_type == e1000_media_type_copper) { 464 if (adapter->hw.media_type == e1000_media_type_copper) {
333 uint16_t mii_reg; 465 uint16_t mii_reg;
334 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 466 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
335 if(mii_reg & MII_CR_POWER_DOWN) 467 if (mii_reg & MII_CR_POWER_DOWN)
336 e1000_phy_reset(&adapter->hw); 468 e1000_phy_reset(&adapter->hw);
337 } 469 }
338 470
@@ -343,20 +475,26 @@ e1000_up(struct e1000_adapter *adapter)
343 e1000_configure_tx(adapter); 475 e1000_configure_tx(adapter);
344 e1000_setup_rctl(adapter); 476 e1000_setup_rctl(adapter);
345 e1000_configure_rx(adapter); 477 e1000_configure_rx(adapter);
346 for (i = 0; i < adapter->num_queues; i++) 478 /* call E1000_DESC_UNUSED which always leaves
347 adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); 479 * at least 1 descriptor unused to make sure
480 * next_to_use != next_to_clean */
481 for (i = 0; i < adapter->num_rx_queues; i++) {
482 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
483 adapter->alloc_rx_buf(adapter, ring,
484 E1000_DESC_UNUSED(ring));
485 }
348 486
349#ifdef CONFIG_PCI_MSI 487#ifdef CONFIG_PCI_MSI
350 if(adapter->hw.mac_type > e1000_82547_rev_2) { 488 if (adapter->hw.mac_type > e1000_82547_rev_2) {
351 adapter->have_msi = TRUE; 489 adapter->have_msi = TRUE;
352 if((err = pci_enable_msi(adapter->pdev))) { 490 if ((err = pci_enable_msi(adapter->pdev))) {
353 DPRINTK(PROBE, ERR, 491 DPRINTK(PROBE, ERR,
354 "Unable to allocate MSI interrupt Error: %d\n", err); 492 "Unable to allocate MSI interrupt Error: %d\n", err);
355 adapter->have_msi = FALSE; 493 adapter->have_msi = FALSE;
356 } 494 }
357 } 495 }
358#endif 496#endif
359 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 497 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
360 SA_SHIRQ | SA_SAMPLE_RANDOM, 498 SA_SHIRQ | SA_SAMPLE_RANDOM,
361 netdev->name, netdev))) { 499 netdev->name, netdev))) {
362 DPRINTK(PROBE, ERR, 500 DPRINTK(PROBE, ERR,
@@ -364,6 +502,12 @@ e1000_up(struct e1000_adapter *adapter)
364 return err; 502 return err;
365 } 503 }
366 504
505#ifdef CONFIG_E1000_MQ
506 e1000_setup_queue_mapping(adapter);
507#endif
508
509 adapter->tx_queue_len = netdev->tx_queue_len;
510
367 mod_timer(&adapter->watchdog_timer, jiffies); 511 mod_timer(&adapter->watchdog_timer, jiffies);
368 512
369#ifdef CONFIG_E1000_NAPI 513#ifdef CONFIG_E1000_NAPI
@@ -378,6 +522,8 @@ void
378e1000_down(struct e1000_adapter *adapter) 522e1000_down(struct e1000_adapter *adapter)
379{ 523{
380 struct net_device *netdev = adapter->netdev; 524 struct net_device *netdev = adapter->netdev;
525 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
526 e1000_check_mng_mode(&adapter->hw);
381 527
382 e1000_irq_disable(adapter); 528 e1000_irq_disable(adapter);
383#ifdef CONFIG_E1000_MQ 529#ifdef CONFIG_E1000_MQ
@@ -385,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
385#endif 531#endif
386 free_irq(adapter->pdev->irq, netdev); 532 free_irq(adapter->pdev->irq, netdev);
387#ifdef CONFIG_PCI_MSI 533#ifdef CONFIG_PCI_MSI
388 if(adapter->hw.mac_type > e1000_82547_rev_2 && 534 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
389 adapter->have_msi == TRUE) 535 adapter->have_msi == TRUE)
390 pci_disable_msi(adapter->pdev); 536 pci_disable_msi(adapter->pdev);
391#endif 537#endif
@@ -396,6 +542,7 @@ e1000_down(struct e1000_adapter *adapter)
396#ifdef CONFIG_E1000_NAPI 542#ifdef CONFIG_E1000_NAPI
397 netif_poll_disable(netdev); 543 netif_poll_disable(netdev);
398#endif 544#endif
545 netdev->tx_queue_len = adapter->tx_queue_len;
399 adapter->link_speed = 0; 546 adapter->link_speed = 0;
400 adapter->link_duplex = 0; 547 adapter->link_duplex = 0;
401 netif_carrier_off(netdev); 548 netif_carrier_off(netdev);
@@ -405,12 +552,16 @@ e1000_down(struct e1000_adapter *adapter)
405 e1000_clean_all_tx_rings(adapter); 552 e1000_clean_all_tx_rings(adapter);
406 e1000_clean_all_rx_rings(adapter); 553 e1000_clean_all_rx_rings(adapter);
407 554
408 /* If WoL is not enabled and management mode is not IAMT 555 /* Power down the PHY so no link is implied when interface is down *
409 * Power down the PHY so no link is implied when interface is down */ 556 * The PHY cannot be powered down if any of the following is TRUE *
410 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 557 * (a) WoL is enabled
558 * (b) AMT is active
559 * (c) SoL/IDER session is active */
560 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
411 adapter->hw.media_type == e1000_media_type_copper && 561 adapter->hw.media_type == e1000_media_type_copper &&
412 !e1000_check_mng_mode(&adapter->hw) && 562 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
413 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { 563 !mng_mode_enabled &&
564 !e1000_check_phy_reset_block(&adapter->hw)) {
414 uint16_t mii_reg; 565 uint16_t mii_reg;
415 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 566 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
416 mii_reg |= MII_CR_POWER_DOWN; 567 mii_reg |= MII_CR_POWER_DOWN;
@@ -422,10 +573,8 @@ e1000_down(struct e1000_adapter *adapter)
422void 573void
423e1000_reset(struct e1000_adapter *adapter) 574e1000_reset(struct e1000_adapter *adapter)
424{ 575{
425 struct net_device *netdev = adapter->netdev;
426 uint32_t pba, manc; 576 uint32_t pba, manc;
427 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 577 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
428 uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
429 578
430 /* Repartition Pba for greater than 9k mtu 579 /* Repartition Pba for greater than 9k mtu
431 * To take effect CTRL.RST is required. 580 * To take effect CTRL.RST is required.
@@ -448,19 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
448 break; 597 break;
449 } 598 }
450 599
451 if((adapter->hw.mac_type != e1000_82573) && 600 if ((adapter->hw.mac_type != e1000_82573) &&
452 (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { 601 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
453 pba -= 8; /* allocate more FIFO for Tx */ 602 pba -= 8; /* allocate more FIFO for Tx */
454 /* send an XOFF when there is enough space in the
455 * Rx FIFO to hold one extra full size Rx packet
456 */
457 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
458 ETHERNET_FCS_SIZE + 1;
459 fc_low_water_mark = fc_high_water_mark + 8;
460 }
461 603
462 604
463 if(adapter->hw.mac_type == e1000_82547) { 605 if (adapter->hw.mac_type == e1000_82547) {
464 adapter->tx_fifo_head = 0; 606 adapter->tx_fifo_head = 0;
465 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 607 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
466 adapter->tx_fifo_size = 608 adapter->tx_fifo_size =
@@ -471,19 +613,21 @@ e1000_reset(struct e1000_adapter *adapter)
471 E1000_WRITE_REG(&adapter->hw, PBA, pba); 613 E1000_WRITE_REG(&adapter->hw, PBA, pba);
472 614
473 /* flow control settings */ 615 /* flow control settings */
474 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - 616 /* Set the FC high water mark to 90% of the FIFO size.
475 fc_high_water_mark; 617 * Required to clear last 3 LSB */
476 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - 618 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
477 fc_low_water_mark; 619
620 adapter->hw.fc_high_water = fc_high_water_mark;
621 adapter->hw.fc_low_water = fc_high_water_mark - 8;
478 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 622 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
479 adapter->hw.fc_send_xon = 1; 623 adapter->hw.fc_send_xon = 1;
480 adapter->hw.fc = adapter->hw.original_fc; 624 adapter->hw.fc = adapter->hw.original_fc;
481 625
482 /* Allow time for pending master requests to run */ 626 /* Allow time for pending master requests to run */
483 e1000_reset_hw(&adapter->hw); 627 e1000_reset_hw(&adapter->hw);
484 if(adapter->hw.mac_type >= e1000_82544) 628 if (adapter->hw.mac_type >= e1000_82544)
485 E1000_WRITE_REG(&adapter->hw, WUC, 0); 629 E1000_WRITE_REG(&adapter->hw, WUC, 0);
486 if(e1000_init_hw(&adapter->hw)) 630 if (e1000_init_hw(&adapter->hw))
487 DPRINTK(PROBE, ERR, "Hardware Error\n"); 631 DPRINTK(PROBE, ERR, "Hardware Error\n");
488 e1000_update_mng_vlan(adapter); 632 e1000_update_mng_vlan(adapter);
489 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 633 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -517,33 +661,31 @@ e1000_probe(struct pci_dev *pdev,
517 struct net_device *netdev; 661 struct net_device *netdev;
518 struct e1000_adapter *adapter; 662 struct e1000_adapter *adapter;
519 unsigned long mmio_start, mmio_len; 663 unsigned long mmio_start, mmio_len;
520 uint32_t ctrl_ext;
521 uint32_t swsm;
522 664
523 static int cards_found = 0; 665 static int cards_found = 0;
524 int i, err, pci_using_dac; 666 int i, err, pci_using_dac;
525 uint16_t eeprom_data; 667 uint16_t eeprom_data;
526 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 668 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
527 if((err = pci_enable_device(pdev))) 669 if ((err = pci_enable_device(pdev)))
528 return err; 670 return err;
529 671
530 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 672 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
531 pci_using_dac = 1; 673 pci_using_dac = 1;
532 } else { 674 } else {
533 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 675 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
534 E1000_ERR("No usable DMA configuration, aborting\n"); 676 E1000_ERR("No usable DMA configuration, aborting\n");
535 return err; 677 return err;
536 } 678 }
537 pci_using_dac = 0; 679 pci_using_dac = 0;
538 } 680 }
539 681
540 if((err = pci_request_regions(pdev, e1000_driver_name))) 682 if ((err = pci_request_regions(pdev, e1000_driver_name)))
541 return err; 683 return err;
542 684
543 pci_set_master(pdev); 685 pci_set_master(pdev);
544 686
545 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 687 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
546 if(!netdev) { 688 if (!netdev) {
547 err = -ENOMEM; 689 err = -ENOMEM;
548 goto err_alloc_etherdev; 690 goto err_alloc_etherdev;
549 } 691 }
@@ -562,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
562 mmio_len = pci_resource_len(pdev, BAR_0); 704 mmio_len = pci_resource_len(pdev, BAR_0);
563 705
564 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 706 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
565 if(!adapter->hw.hw_addr) { 707 if (!adapter->hw.hw_addr) {
566 err = -EIO; 708 err = -EIO;
567 goto err_ioremap; 709 goto err_ioremap;
568 } 710 }
569 711
570 for(i = BAR_1; i <= BAR_5; i++) { 712 for (i = BAR_1; i <= BAR_5; i++) {
571 if(pci_resource_len(pdev, i) == 0) 713 if (pci_resource_len(pdev, i) == 0)
572 continue; 714 continue;
573 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 715 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
574 adapter->hw.io_base = pci_resource_start(pdev, i); 716 adapter->hw.io_base = pci_resource_start(pdev, i);
575 break; 717 break;
576 } 718 }
@@ -607,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
607 749
608 /* setup the private structure */ 750 /* setup the private structure */
609 751
610 if((err = e1000_sw_init(adapter))) 752 if ((err = e1000_sw_init(adapter)))
611 goto err_sw_init; 753 goto err_sw_init;
612 754
613 if((err = e1000_check_phy_reset_block(&adapter->hw))) 755 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
614 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 756 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
615 757
616 if(adapter->hw.mac_type >= e1000_82543) { 758 if (adapter->hw.mac_type >= e1000_82543) {
617 netdev->features = NETIF_F_SG | 759 netdev->features = NETIF_F_SG |
618 NETIF_F_HW_CSUM | 760 NETIF_F_HW_CSUM |
619 NETIF_F_HW_VLAN_TX | 761 NETIF_F_HW_VLAN_TX |
@@ -622,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
622 } 764 }
623 765
624#ifdef NETIF_F_TSO 766#ifdef NETIF_F_TSO
625 if((adapter->hw.mac_type >= e1000_82544) && 767 if ((adapter->hw.mac_type >= e1000_82544) &&
626 (adapter->hw.mac_type != e1000_82547)) 768 (adapter->hw.mac_type != e1000_82547))
627 netdev->features |= NETIF_F_TSO; 769 netdev->features |= NETIF_F_TSO;
628 770
629#ifdef NETIF_F_TSO_IPV6 771#ifdef NETIF_F_TSO_IPV6
630 if(adapter->hw.mac_type > e1000_82547_rev_2) 772 if (adapter->hw.mac_type > e1000_82547_rev_2)
631 netdev->features |= NETIF_F_TSO_IPV6; 773 netdev->features |= NETIF_F_TSO_IPV6;
632#endif 774#endif
633#endif 775#endif
634 if(pci_using_dac) 776 if (pci_using_dac)
635 netdev->features |= NETIF_F_HIGHDMA; 777 netdev->features |= NETIF_F_HIGHDMA;
636 778
637 /* hard_start_xmit is safe against parallel locking */ 779 /* hard_start_xmit is safe against parallel locking */
@@ -639,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
639 781
640 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 782 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
641 783
642 /* before reading the EEPROM, reset the controller to 784 /* before reading the EEPROM, reset the controller to
643 * put the device in a known good starting state */ 785 * put the device in a known good starting state */
644 786
645 e1000_reset_hw(&adapter->hw); 787 e1000_reset_hw(&adapter->hw);
646 788
647 /* make sure the EEPROM is good */ 789 /* make sure the EEPROM is good */
648 790
649 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 791 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
650 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 792 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
651 err = -EIO; 793 err = -EIO;
652 goto err_eeprom; 794 goto err_eeprom;
@@ -654,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
654 796
655 /* copy the MAC address out of the EEPROM */ 797 /* copy the MAC address out of the EEPROM */
656 798
657 if(e1000_read_mac_addr(&adapter->hw)) 799 if (e1000_read_mac_addr(&adapter->hw))
658 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 800 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
659 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 801 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
660 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 802 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
661 803
662 if(!is_valid_ether_addr(netdev->perm_addr)) { 804 if (!is_valid_ether_addr(netdev->perm_addr)) {
663 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 805 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
664 err = -EIO; 806 err = -EIO;
665 goto err_eeprom; 807 goto err_eeprom;
@@ -699,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
699 * enable the ACPI Magic Packet filter 841 * enable the ACPI Magic Packet filter
700 */ 842 */
701 843
702 switch(adapter->hw.mac_type) { 844 switch (adapter->hw.mac_type) {
703 case e1000_82542_rev2_0: 845 case e1000_82542_rev2_0:
704 case e1000_82542_rev2_1: 846 case e1000_82542_rev2_1:
705 case e1000_82543: 847 case e1000_82543:
@@ -712,8 +854,7 @@ e1000_probe(struct pci_dev *pdev,
712 case e1000_82546: 854 case e1000_82546:
713 case e1000_82546_rev_3: 855 case e1000_82546_rev_3:
714 case e1000_82571: 856 case e1000_82571:
715 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) 857 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
716 && (adapter->hw.media_type == e1000_media_type_copper)) {
717 e1000_read_eeprom(&adapter->hw, 858 e1000_read_eeprom(&adapter->hw,
718 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 859 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
719 break; 860 break;
@@ -724,31 +865,42 @@ e1000_probe(struct pci_dev *pdev,
724 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 865 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
725 break; 866 break;
726 } 867 }
727 if(eeprom_data & eeprom_apme_mask) 868 if (eeprom_data & eeprom_apme_mask)
728 adapter->wol |= E1000_WUFC_MAG; 869 adapter->wol |= E1000_WUFC_MAG;
729 870
871 /* print bus type/speed/width info */
872 {
873 struct e1000_hw *hw = &adapter->hw;
874 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
875 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
876 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
877 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
878 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
879 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
880 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
881 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
882 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
883 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
884 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
885 "32-bit"));
886 }
887
888 for (i = 0; i < 6; i++)
889 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
890
730 /* reset the hardware with the new settings */ 891 /* reset the hardware with the new settings */
731 e1000_reset(adapter); 892 e1000_reset(adapter);
732 893
733 /* Let firmware know the driver has taken over */ 894 /* If the controller is 82573 and f/w is AMT, do not set
734 switch(adapter->hw.mac_type) { 895 * DRV_LOAD until the interface is up. For all other cases,
735 case e1000_82571: 896 * let the f/w know that the h/w is now under the control
736 case e1000_82572: 897 * of the driver. */
737 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 898 if (adapter->hw.mac_type != e1000_82573 ||
738 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 899 !e1000_check_mng_mode(&adapter->hw))
739 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 900 e1000_get_hw_control(adapter);
740 break;
741 case e1000_82573:
742 swsm = E1000_READ_REG(&adapter->hw, SWSM);
743 E1000_WRITE_REG(&adapter->hw, SWSM,
744 swsm | E1000_SWSM_DRV_LOAD);
745 break;
746 default:
747 break;
748 }
749 901
750 strcpy(netdev->name, "eth%d"); 902 strcpy(netdev->name, "eth%d");
751 if((err = register_netdev(netdev))) 903 if ((err = register_netdev(netdev)))
752 goto err_register; 904 goto err_register;
753 905
754 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 906 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -782,47 +934,33 @@ e1000_remove(struct pci_dev *pdev)
782{ 934{
783 struct net_device *netdev = pci_get_drvdata(pdev); 935 struct net_device *netdev = pci_get_drvdata(pdev);
784 struct e1000_adapter *adapter = netdev_priv(netdev); 936 struct e1000_adapter *adapter = netdev_priv(netdev);
785 uint32_t ctrl_ext; 937 uint32_t manc;
786 uint32_t manc, swsm;
787#ifdef CONFIG_E1000_NAPI 938#ifdef CONFIG_E1000_NAPI
788 int i; 939 int i;
789#endif 940#endif
790 941
791 flush_scheduled_work(); 942 flush_scheduled_work();
792 943
793 if(adapter->hw.mac_type >= e1000_82540 && 944 if (adapter->hw.mac_type >= e1000_82540 &&
794 adapter->hw.media_type == e1000_media_type_copper) { 945 adapter->hw.media_type == e1000_media_type_copper) {
795 manc = E1000_READ_REG(&adapter->hw, MANC); 946 manc = E1000_READ_REG(&adapter->hw, MANC);
796 if(manc & E1000_MANC_SMBUS_EN) { 947 if (manc & E1000_MANC_SMBUS_EN) {
797 manc |= E1000_MANC_ARP_EN; 948 manc |= E1000_MANC_ARP_EN;
798 E1000_WRITE_REG(&adapter->hw, MANC, manc); 949 E1000_WRITE_REG(&adapter->hw, MANC, manc);
799 } 950 }
800 } 951 }
801 952
802 switch(adapter->hw.mac_type) { 953 /* Release control of h/w to f/w. If f/w is AMT enabled, this
803 case e1000_82571: 954 * would have already happened in close and is redundant. */
804 case e1000_82572: 955 e1000_release_hw_control(adapter);
805 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
806 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
807 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
808 break;
809 case e1000_82573:
810 swsm = E1000_READ_REG(&adapter->hw, SWSM);
811 E1000_WRITE_REG(&adapter->hw, SWSM,
812 swsm & ~E1000_SWSM_DRV_LOAD);
813 break;
814
815 default:
816 break;
817 }
818 956
819 unregister_netdev(netdev); 957 unregister_netdev(netdev);
820#ifdef CONFIG_E1000_NAPI 958#ifdef CONFIG_E1000_NAPI
821 for (i = 0; i < adapter->num_queues; i++) 959 for (i = 0; i < adapter->num_rx_queues; i++)
822 __dev_put(&adapter->polling_netdev[i]); 960 __dev_put(&adapter->polling_netdev[i]);
823#endif 961#endif
824 962
825 if(!e1000_check_phy_reset_block(&adapter->hw)) 963 if (!e1000_check_phy_reset_block(&adapter->hw))
826 e1000_phy_hw_reset(&adapter->hw); 964 e1000_phy_hw_reset(&adapter->hw);
827 965
828 kfree(adapter->tx_ring); 966 kfree(adapter->tx_ring);
@@ -881,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
881 1019
882 /* identify the MAC */ 1020 /* identify the MAC */
883 1021
884 if(e1000_set_mac_type(hw)) { 1022 if (e1000_set_mac_type(hw)) {
885 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1023 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
886 return -EIO; 1024 return -EIO;
887 } 1025 }
888 1026
889 /* initialize eeprom parameters */ 1027 /* initialize eeprom parameters */
890 1028
891 if(e1000_init_eeprom_params(hw)) { 1029 if (e1000_init_eeprom_params(hw)) {
892 E1000_ERR("EEPROM initialization failed\n"); 1030 E1000_ERR("EEPROM initialization failed\n");
893 return -EIO; 1031 return -EIO;
894 } 1032 }
895 1033
896 switch(hw->mac_type) { 1034 switch (hw->mac_type) {
897 default: 1035 default:
898 break; 1036 break;
899 case e1000_82541: 1037 case e1000_82541:
@@ -912,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
912 1050
913 /* Copper options */ 1051 /* Copper options */
914 1052
915 if(hw->media_type == e1000_media_type_copper) { 1053 if (hw->media_type == e1000_media_type_copper) {
916 hw->mdix = AUTO_ALL_MODES; 1054 hw->mdix = AUTO_ALL_MODES;
917 hw->disable_polarity_correction = FALSE; 1055 hw->disable_polarity_correction = FALSE;
918 hw->master_slave = E1000_MASTER_SLAVE; 1056 hw->master_slave = E1000_MASTER_SLAVE;
@@ -923,15 +1061,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
923 switch (hw->mac_type) { 1061 switch (hw->mac_type) {
924 case e1000_82571: 1062 case e1000_82571:
925 case e1000_82572: 1063 case e1000_82572:
926 adapter->num_queues = 2; 1064 /* These controllers support 2 tx queues, but with a single
1065 * qdisc implementation, multiple tx queues aren't quite as
1066 * interesting. If we can find a logical way of mapping
1067 * flows to a queue, then perhaps we can up the num_tx_queue
1068 * count back to its default. Until then, we run the risk of
1069 * terrible performance due to SACK overload. */
1070 adapter->num_tx_queues = 1;
1071 adapter->num_rx_queues = 2;
927 break; 1072 break;
928 default: 1073 default:
929 adapter->num_queues = 1; 1074 adapter->num_tx_queues = 1;
1075 adapter->num_rx_queues = 1;
930 break; 1076 break;
931 } 1077 }
932 adapter->num_queues = min(adapter->num_queues, num_online_cpus()); 1078 adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
1079 adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
1080 DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
1081 adapter->num_rx_queues,
1082 ((adapter->num_rx_queues == 1)
1083 ? ((num_online_cpus() > 1)
1084 ? "(due to unsupported feature in current adapter)"
1085 : "(due to unsupported system configuration)")
1086 : ""));
1087 DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
1088 adapter->num_tx_queues);
933#else 1089#else
934 adapter->num_queues = 1; 1090 adapter->num_tx_queues = 1;
1091 adapter->num_rx_queues = 1;
935#endif 1092#endif
936 1093
937 if (e1000_alloc_queues(adapter)) { 1094 if (e1000_alloc_queues(adapter)) {
@@ -940,17 +1097,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
940 } 1097 }
941 1098
942#ifdef CONFIG_E1000_NAPI 1099#ifdef CONFIG_E1000_NAPI
943 for (i = 0; i < adapter->num_queues; i++) { 1100 for (i = 0; i < adapter->num_rx_queues; i++) {
944 adapter->polling_netdev[i].priv = adapter; 1101 adapter->polling_netdev[i].priv = adapter;
945 adapter->polling_netdev[i].poll = &e1000_clean; 1102 adapter->polling_netdev[i].poll = &e1000_clean;
946 adapter->polling_netdev[i].weight = 64; 1103 adapter->polling_netdev[i].weight = 64;
947 dev_hold(&adapter->polling_netdev[i]); 1104 dev_hold(&adapter->polling_netdev[i]);
948 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); 1105 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
949 } 1106 }
950#endif 1107 spin_lock_init(&adapter->tx_queue_lock);
951
952#ifdef CONFIG_E1000_MQ
953 e1000_setup_queue_mapping(adapter);
954#endif 1108#endif
955 1109
956 atomic_set(&adapter->irq_sem, 1); 1110 atomic_set(&adapter->irq_sem, 1);
@@ -973,13 +1127,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
973{ 1127{
974 int size; 1128 int size;
975 1129
976 size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 1130 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
977 adapter->tx_ring = kmalloc(size, GFP_KERNEL); 1131 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
978 if (!adapter->tx_ring) 1132 if (!adapter->tx_ring)
979 return -ENOMEM; 1133 return -ENOMEM;
980 memset(adapter->tx_ring, 0, size); 1134 memset(adapter->tx_ring, 0, size);
981 1135
982 size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 1136 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
983 adapter->rx_ring = kmalloc(size, GFP_KERNEL); 1137 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
984 if (!adapter->rx_ring) { 1138 if (!adapter->rx_ring) {
985 kfree(adapter->tx_ring); 1139 kfree(adapter->tx_ring);
@@ -988,7 +1142,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
988 memset(adapter->rx_ring, 0, size); 1142 memset(adapter->rx_ring, 0, size);
989 1143
990#ifdef CONFIG_E1000_NAPI 1144#ifdef CONFIG_E1000_NAPI
991 size = sizeof(struct net_device) * adapter->num_queues; 1145 size = sizeof(struct net_device) * adapter->num_rx_queues;
992 adapter->polling_netdev = kmalloc(size, GFP_KERNEL); 1146 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
993 if (!adapter->polling_netdev) { 1147 if (!adapter->polling_netdev) {
994 kfree(adapter->tx_ring); 1148 kfree(adapter->tx_ring);
@@ -998,6 +1152,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
998 memset(adapter->polling_netdev, 0, size); 1152 memset(adapter->polling_netdev, 0, size);
999#endif 1153#endif
1000 1154
1155#ifdef CONFIG_E1000_MQ
1156 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1157 adapter->rx_sched_call_data.info = adapter->netdev;
1158
1159 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1160 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1161#endif
1162
1001 return E1000_SUCCESS; 1163 return E1000_SUCCESS;
1002} 1164}
1003 1165
@@ -1017,14 +1179,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
1017 lock_cpu_hotplug(); 1179 lock_cpu_hotplug();
1018 i = 0; 1180 i = 0;
1019 for_each_online_cpu(cpu) { 1181 for_each_online_cpu(cpu) {
1020 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; 1182 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
1021 /* This is incomplete because we'd like to assign separate 1183 /* This is incomplete because we'd like to assign separate
1022 * physical cpus to these netdev polling structures and 1184 * physical cpus to these netdev polling structures and
1023 * avoid saturating a subset of cpus. 1185 * avoid saturating a subset of cpus.
1024 */ 1186 */
1025 if (i < adapter->num_queues) { 1187 if (i < adapter->num_rx_queues) {
1026 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; 1188 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
1027 adapter->cpu_for_queue[i] = cpu; 1189 adapter->rx_ring[i].cpu = cpu;
1190 cpu_set(cpu, adapter->cpumask);
1028 } else 1191 } else
1029 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; 1192 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
1030 1193
@@ -1063,14 +1226,20 @@ e1000_open(struct net_device *netdev)
1063 if ((err = e1000_setup_all_rx_resources(adapter))) 1226 if ((err = e1000_setup_all_rx_resources(adapter)))
1064 goto err_setup_rx; 1227 goto err_setup_rx;
1065 1228
1066 if((err = e1000_up(adapter))) 1229 if ((err = e1000_up(adapter)))
1067 goto err_up; 1230 goto err_up;
1068 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1231 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1069 if((adapter->hw.mng_cookie.status & 1232 if ((adapter->hw.mng_cookie.status &
1070 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1233 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1071 e1000_update_mng_vlan(adapter); 1234 e1000_update_mng_vlan(adapter);
1072 } 1235 }
1073 1236
1237 /* If AMT is enabled, let the firmware know that the network
1238 * interface is now open */
1239 if (adapter->hw.mac_type == e1000_82573 &&
1240 e1000_check_mng_mode(&adapter->hw))
1241 e1000_get_hw_control(adapter);
1242
1074 return E1000_SUCCESS; 1243 return E1000_SUCCESS;
1075 1244
1076err_up: 1245err_up:
@@ -1105,10 +1274,17 @@ e1000_close(struct net_device *netdev)
1105 e1000_free_all_tx_resources(adapter); 1274 e1000_free_all_tx_resources(adapter);
1106 e1000_free_all_rx_resources(adapter); 1275 e1000_free_all_rx_resources(adapter);
1107 1276
1108 if((adapter->hw.mng_cookie.status & 1277 if ((adapter->hw.mng_cookie.status &
1109 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1278 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1110 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1279 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1111 } 1280 }
1281
1282 /* If AMT is enabled, let the firmware know that the network
1283 * interface is now closed */
1284 if (adapter->hw.mac_type == e1000_82573 &&
1285 e1000_check_mng_mode(&adapter->hw))
1286 e1000_release_hw_control(adapter);
1287
1112 return 0; 1288 return 0;
1113} 1289}
1114 1290
@@ -1153,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1153 size = sizeof(struct e1000_buffer) * txdr->count; 1329 size = sizeof(struct e1000_buffer) * txdr->count;
1154 1330
1155 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1331 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1156 if(!txdr->buffer_info) { 1332 if (!txdr->buffer_info) {
1157 DPRINTK(PROBE, ERR, 1333 DPRINTK(PROBE, ERR,
1158 "Unable to allocate memory for the transmit descriptor ring\n"); 1334 "Unable to allocate memory for the transmit descriptor ring\n");
1159 return -ENOMEM; 1335 return -ENOMEM;
@@ -1166,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1166 E1000_ROUNDUP(txdr->size, 4096); 1342 E1000_ROUNDUP(txdr->size, 4096);
1167 1343
1168 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1344 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1169 if(!txdr->desc) { 1345 if (!txdr->desc) {
1170setup_tx_desc_die: 1346setup_tx_desc_die:
1171 vfree(txdr->buffer_info); 1347 vfree(txdr->buffer_info);
1172 DPRINTK(PROBE, ERR, 1348 DPRINTK(PROBE, ERR,
@@ -1182,8 +1358,8 @@ setup_tx_desc_die:
1182 "at %p\n", txdr->size, txdr->desc); 1358 "at %p\n", txdr->size, txdr->desc);
1183 /* Try again, without freeing the previous */ 1359 /* Try again, without freeing the previous */
1184 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1360 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1185 if(!txdr->desc) {
1186 /* Failed allocation, critical failure */ 1361 /* Failed allocation, critical failure */
1362 if (!txdr->desc) {
1187 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1363 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1188 goto setup_tx_desc_die; 1364 goto setup_tx_desc_die;
1189 } 1365 }
@@ -1229,7 +1405,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1229{ 1405{
1230 int i, err = 0; 1406 int i, err = 0;
1231 1407
1232 for (i = 0; i < adapter->num_queues; i++) { 1408 for (i = 0; i < adapter->num_tx_queues; i++) {
1233 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1409 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1234 if (err) { 1410 if (err) {
1235 DPRINTK(PROBE, ERR, 1411 DPRINTK(PROBE, ERR,
@@ -1254,10 +1430,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1254 uint64_t tdba; 1430 uint64_t tdba;
1255 struct e1000_hw *hw = &adapter->hw; 1431 struct e1000_hw *hw = &adapter->hw;
1256 uint32_t tdlen, tctl, tipg, tarc; 1432 uint32_t tdlen, tctl, tipg, tarc;
1433 uint32_t ipgr1, ipgr2;
1257 1434
1258 /* Setup the HW Tx Head and Tail descriptor pointers */ 1435 /* Setup the HW Tx Head and Tail descriptor pointers */
1259 1436
1260 switch (adapter->num_queues) { 1437 switch (adapter->num_tx_queues) {
1261 case 2: 1438 case 2:
1262 tdba = adapter->tx_ring[1].dma; 1439 tdba = adapter->tx_ring[1].dma;
1263 tdlen = adapter->tx_ring[1].count * 1440 tdlen = adapter->tx_ring[1].count *
@@ -1287,22 +1464,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1287 1464
1288 /* Set the default values for the Tx Inter Packet Gap timer */ 1465 /* Set the default values for the Tx Inter Packet Gap timer */
1289 1466
1467 if (hw->media_type == e1000_media_type_fiber ||
1468 hw->media_type == e1000_media_type_internal_serdes)
1469 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1470 else
1471 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1472
1290 switch (hw->mac_type) { 1473 switch (hw->mac_type) {
1291 case e1000_82542_rev2_0: 1474 case e1000_82542_rev2_0:
1292 case e1000_82542_rev2_1: 1475 case e1000_82542_rev2_1:
1293 tipg = DEFAULT_82542_TIPG_IPGT; 1476 tipg = DEFAULT_82542_TIPG_IPGT;
1294 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1477 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1295 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1478 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1296 break; 1479 break;
1297 default: 1480 default:
1298 if (hw->media_type == e1000_media_type_fiber || 1481 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1299 hw->media_type == e1000_media_type_internal_serdes) 1482 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1300 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1483 break;
1301 else
1302 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1303 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1304 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1305 } 1484 }
1485 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1486 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1306 E1000_WRITE_REG(hw, TIPG, tipg); 1487 E1000_WRITE_REG(hw, TIPG, tipg);
1307 1488
1308 /* Set the Tx Interrupt Delay register */ 1489 /* Set the Tx Interrupt Delay register */
@@ -1378,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1378 1559
1379 size = sizeof(struct e1000_ps_page) * rxdr->count; 1560 size = sizeof(struct e1000_ps_page) * rxdr->count;
1380 rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1561 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1381 if(!rxdr->ps_page) { 1562 if (!rxdr->ps_page) {
1382 vfree(rxdr->buffer_info); 1563 vfree(rxdr->buffer_info);
1383 DPRINTK(PROBE, ERR, 1564 DPRINTK(PROBE, ERR,
1384 "Unable to allocate memory for the receive descriptor ring\n"); 1565 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1388,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1388 1569
1389 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1570 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1390 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1571 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1391 if(!rxdr->ps_page_dma) { 1572 if (!rxdr->ps_page_dma) {
1392 vfree(rxdr->buffer_info); 1573 vfree(rxdr->buffer_info);
1393 kfree(rxdr->ps_page); 1574 kfree(rxdr->ps_page);
1394 DPRINTK(PROBE, ERR, 1575 DPRINTK(PROBE, ERR,
@@ -1397,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1397 } 1578 }
1398 memset(rxdr->ps_page_dma, 0, size); 1579 memset(rxdr->ps_page_dma, 0, size);
1399 1580
1400 if(adapter->hw.mac_type <= e1000_82547_rev_2) 1581 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1401 desc_len = sizeof(struct e1000_rx_desc); 1582 desc_len = sizeof(struct e1000_rx_desc);
1402 else 1583 else
1403 desc_len = sizeof(union e1000_rx_desc_packet_split); 1584 desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1454,6 +1635,8 @@ setup_rx_desc_die:
1454 1635
1455 rxdr->next_to_clean = 0; 1636 rxdr->next_to_clean = 0;
1456 rxdr->next_to_use = 0; 1637 rxdr->next_to_use = 0;
1638 rxdr->rx_skb_top = NULL;
1639 rxdr->rx_skb_prev = NULL;
1457 1640
1458 return 0; 1641 return 0;
1459} 1642}
@@ -1475,7 +1658,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1475{ 1658{
1476 int i, err = 0; 1659 int i, err = 0;
1477 1660
1478 for (i = 0; i < adapter->num_queues; i++) { 1661 for (i = 0; i < adapter->num_rx_queues; i++) {
1479 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1662 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1480 if (err) { 1663 if (err) {
1481 DPRINTK(PROBE, ERR, 1664 DPRINTK(PROBE, ERR,
@@ -1498,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1498{ 1681{
1499 uint32_t rctl, rfctl; 1682 uint32_t rctl, rfctl;
1500 uint32_t psrctl = 0; 1683 uint32_t psrctl = 0;
1501#ifdef CONFIG_E1000_PACKET_SPLIT 1684#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1502 uint32_t pages = 0; 1685 uint32_t pages = 0;
1503#endif 1686#endif
1504 1687
@@ -1510,7 +1693,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1510 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1693 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1511 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1694 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1512 1695
1513 if(adapter->hw.tbi_compatibility_on == 1) 1696 if (adapter->hw.mac_type > e1000_82543)
1697 rctl |= E1000_RCTL_SECRC;
1698
1699 if (adapter->hw.tbi_compatibility_on == 1)
1514 rctl |= E1000_RCTL_SBP; 1700 rctl |= E1000_RCTL_SBP;
1515 else 1701 else
1516 rctl &= ~E1000_RCTL_SBP; 1702 rctl &= ~E1000_RCTL_SBP;
@@ -1521,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1521 rctl |= E1000_RCTL_LPE; 1707 rctl |= E1000_RCTL_LPE;
1522 1708
1523 /* Setup buffer sizes */ 1709 /* Setup buffer sizes */
1524 if(adapter->hw.mac_type >= e1000_82571) { 1710 if (adapter->hw.mac_type >= e1000_82571) {
1525 /* We can now specify buffers in 1K increments. 1711 /* We can now specify buffers in 1K increments.
1526 * BSIZE and BSEX are ignored in this case. */ 1712 * BSIZE and BSEX are ignored in this case. */
1527 rctl |= adapter->rx_buffer_len << 0x11; 1713 rctl |= adapter->rx_buffer_len << 0x11;
1528 } else { 1714 } else {
1529 rctl &= ~E1000_RCTL_SZ_4096; 1715 rctl &= ~E1000_RCTL_SZ_4096;
1530 rctl |= E1000_RCTL_BSEX; 1716 rctl &= ~E1000_RCTL_BSEX;
1531 switch (adapter->rx_buffer_len) { 1717 rctl |= E1000_RCTL_SZ_2048;
1532 case E1000_RXBUFFER_2048:
1533 default:
1534 rctl |= E1000_RCTL_SZ_2048;
1535 rctl &= ~E1000_RCTL_BSEX;
1536 break;
1537 case E1000_RXBUFFER_4096:
1538 rctl |= E1000_RCTL_SZ_4096;
1539 break;
1540 case E1000_RXBUFFER_8192:
1541 rctl |= E1000_RCTL_SZ_8192;
1542 break;
1543 case E1000_RXBUFFER_16384:
1544 rctl |= E1000_RCTL_SZ_16384;
1545 break;
1546 }
1547 } 1718 }
1548 1719
1549#ifdef CONFIG_E1000_PACKET_SPLIT 1720#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1550 /* 82571 and greater support packet-split where the protocol 1721 /* 82571 and greater support packet-split where the protocol
1551 * header is placed in skb->data and the packet data is 1722 * header is placed in skb->data and the packet data is
1552 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1723 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1570,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1570 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1741 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1571 1742
1572 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1743 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1573 1744
1574 psrctl |= adapter->rx_ps_bsize0 >> 1745 psrctl |= adapter->rx_ps_bsize0 >>
1575 E1000_PSRCTL_BSIZE0_SHIFT; 1746 E1000_PSRCTL_BSIZE0_SHIFT;
1576 1747
@@ -1632,22 +1803,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1632 1803
1633 if (hw->mac_type >= e1000_82540) { 1804 if (hw->mac_type >= e1000_82540) {
1634 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 1805 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1635 if(adapter->itr > 1) 1806 if (adapter->itr > 1)
1636 E1000_WRITE_REG(hw, ITR, 1807 E1000_WRITE_REG(hw, ITR,
1637 1000000000 / (adapter->itr * 256)); 1808 1000000000 / (adapter->itr * 256));
1638 } 1809 }
1639 1810
1640 if (hw->mac_type >= e1000_82571) { 1811 if (hw->mac_type >= e1000_82571) {
1641 /* Reset delay timers after every interrupt */
1642 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 1812 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1813 /* Reset delay timers after every interrupt */
1643 ctrl_ext |= E1000_CTRL_EXT_CANC; 1814 ctrl_ext |= E1000_CTRL_EXT_CANC;
1815#ifdef CONFIG_E1000_NAPI
1816 /* Auto-Mask interrupts upon ICR read. */
1817 ctrl_ext |= E1000_CTRL_EXT_IAME;
1818#endif
1644 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 1819 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1820 E1000_WRITE_REG(hw, IAM, ~0);
1645 E1000_WRITE_FLUSH(hw); 1821 E1000_WRITE_FLUSH(hw);
1646 } 1822 }
1647 1823
1648 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1824 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1649 * the Base and Length of the Rx Descriptor Ring */ 1825 * the Base and Length of the Rx Descriptor Ring */
1650 switch (adapter->num_queues) { 1826 switch (adapter->num_rx_queues) {
1651#ifdef CONFIG_E1000_MQ 1827#ifdef CONFIG_E1000_MQ
1652 case 2: 1828 case 2:
1653 rdba = adapter->rx_ring[1].dma; 1829 rdba = adapter->rx_ring[1].dma;
@@ -1674,7 +1850,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1674 } 1850 }
1675 1851
1676#ifdef CONFIG_E1000_MQ 1852#ifdef CONFIG_E1000_MQ
1677 if (adapter->num_queues > 1) { 1853 if (adapter->num_rx_queues > 1) {
1678 uint32_t random[10]; 1854 uint32_t random[10];
1679 1855
1680 get_random_bytes(&random[0], 40); 1856 get_random_bytes(&random[0], 40);
@@ -1684,7 +1860,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1684 E1000_WRITE_REG(hw, RSSIM, 0); 1860 E1000_WRITE_REG(hw, RSSIM, 0);
1685 } 1861 }
1686 1862
1687 switch (adapter->num_queues) { 1863 switch (adapter->num_rx_queues) {
1688 case 2: 1864 case 2:
1689 default: 1865 default:
1690 reta = 0x00800080; 1866 reta = 0x00800080;
@@ -1716,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1716 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1892 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1717 if (hw->mac_type >= e1000_82543) { 1893 if (hw->mac_type >= e1000_82543) {
1718 rxcsum = E1000_READ_REG(hw, RXCSUM); 1894 rxcsum = E1000_READ_REG(hw, RXCSUM);
1719 if(adapter->rx_csum == TRUE) { 1895 if (adapter->rx_csum == TRUE) {
1720 rxcsum |= E1000_RXCSUM_TUOFL; 1896 rxcsum |= E1000_RXCSUM_TUOFL;
1721 1897
1722 /* Enable 82571 IPv4 payload checksum for UDP fragments 1898 /* Enable 82571 IPv4 payload checksum for UDP fragments
1723 * Must be used in conjunction with packet-split. */ 1899 * Must be used in conjunction with packet-split. */
1724 if ((hw->mac_type >= e1000_82571) && 1900 if ((hw->mac_type >= e1000_82571) &&
1725 (adapter->rx_ps_pages)) { 1901 (adapter->rx_ps_pages)) {
1726 rxcsum |= E1000_RXCSUM_IPPCSE; 1902 rxcsum |= E1000_RXCSUM_IPPCSE;
1727 } 1903 }
1728 } else { 1904 } else {
@@ -1776,7 +1952,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1776{ 1952{
1777 int i; 1953 int i;
1778 1954
1779 for (i = 0; i < adapter->num_queues; i++) 1955 for (i = 0; i < adapter->num_tx_queues; i++)
1780 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1956 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1781} 1957}
1782 1958
@@ -1784,17 +1960,15 @@ static inline void
1784e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1960e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1785 struct e1000_buffer *buffer_info) 1961 struct e1000_buffer *buffer_info)
1786{ 1962{
1787 if(buffer_info->dma) { 1963 if (buffer_info->dma) {
1788 pci_unmap_page(adapter->pdev, 1964 pci_unmap_page(adapter->pdev,
1789 buffer_info->dma, 1965 buffer_info->dma,
1790 buffer_info->length, 1966 buffer_info->length,
1791 PCI_DMA_TODEVICE); 1967 PCI_DMA_TODEVICE);
1792 buffer_info->dma = 0;
1793 } 1968 }
1794 if(buffer_info->skb) { 1969 if (buffer_info->skb)
1795 dev_kfree_skb_any(buffer_info->skb); 1970 dev_kfree_skb_any(buffer_info->skb);
1796 buffer_info->skb = NULL; 1971 memset(buffer_info, 0, sizeof(struct e1000_buffer));
1797 }
1798} 1972}
1799 1973
1800/** 1974/**
@@ -1813,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1813 1987
1814 /* Free all the Tx ring sk_buffs */ 1988 /* Free all the Tx ring sk_buffs */
1815 1989
1816 for(i = 0; i < tx_ring->count; i++) { 1990 for (i = 0; i < tx_ring->count; i++) {
1817 buffer_info = &tx_ring->buffer_info[i]; 1991 buffer_info = &tx_ring->buffer_info[i];
1818 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1992 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1819 } 1993 }
@@ -1843,7 +2017,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1843{ 2017{
1844 int i; 2018 int i;
1845 2019
1846 for (i = 0; i < adapter->num_queues; i++) 2020 for (i = 0; i < adapter->num_tx_queues; i++)
1847 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2021 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1848} 2022}
1849 2023
@@ -1887,7 +2061,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
1887{ 2061{
1888 int i; 2062 int i;
1889 2063
1890 for (i = 0; i < adapter->num_queues; i++) 2064 for (i = 0; i < adapter->num_rx_queues; i++)
1891 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2065 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
1892} 2066}
1893 2067
@@ -1909,12 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1909 unsigned int i, j; 2083 unsigned int i, j;
1910 2084
1911 /* Free all the Rx ring sk_buffs */ 2085 /* Free all the Rx ring sk_buffs */
1912 2086 for (i = 0; i < rx_ring->count; i++) {
1913 for(i = 0; i < rx_ring->count; i++) {
1914 buffer_info = &rx_ring->buffer_info[i]; 2087 buffer_info = &rx_ring->buffer_info[i];
1915 if(buffer_info->skb) { 2088 if (buffer_info->skb) {
1916 ps_page = &rx_ring->ps_page[i];
1917 ps_page_dma = &rx_ring->ps_page_dma[i];
1918 pci_unmap_single(pdev, 2089 pci_unmap_single(pdev,
1919 buffer_info->dma, 2090 buffer_info->dma,
1920 buffer_info->length, 2091 buffer_info->length,
@@ -1922,19 +2093,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1922 2093
1923 dev_kfree_skb(buffer_info->skb); 2094 dev_kfree_skb(buffer_info->skb);
1924 buffer_info->skb = NULL; 2095 buffer_info->skb = NULL;
1925
1926 for(j = 0; j < adapter->rx_ps_pages; j++) {
1927 if(!ps_page->ps_page[j]) break;
1928 pci_unmap_single(pdev,
1929 ps_page_dma->ps_page_dma[j],
1930 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1931 ps_page_dma->ps_page_dma[j] = 0;
1932 put_page(ps_page->ps_page[j]);
1933 ps_page->ps_page[j] = NULL;
1934 }
1935 } 2096 }
2097 ps_page = &rx_ring->ps_page[i];
2098 ps_page_dma = &rx_ring->ps_page_dma[i];
2099 for (j = 0; j < adapter->rx_ps_pages; j++) {
2100 if (!ps_page->ps_page[j]) break;
2101 pci_unmap_page(pdev,
2102 ps_page_dma->ps_page_dma[j],
2103 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2104 ps_page_dma->ps_page_dma[j] = 0;
2105 put_page(ps_page->ps_page[j]);
2106 ps_page->ps_page[j] = NULL;
2107 }
2108 }
2109
2110 /* there also may be some cached data in our adapter */
2111 if (rx_ring->rx_skb_top) {
2112 dev_kfree_skb(rx_ring->rx_skb_top);
2113
2114 /* rx_skb_prev will be wiped out by rx_skb_top */
2115 rx_ring->rx_skb_top = NULL;
2116 rx_ring->rx_skb_prev = NULL;
1936 } 2117 }
1937 2118
2119
1938 size = sizeof(struct e1000_buffer) * rx_ring->count; 2120 size = sizeof(struct e1000_buffer) * rx_ring->count;
1939 memset(rx_ring->buffer_info, 0, size); 2121 memset(rx_ring->buffer_info, 0, size);
1940 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2122 size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -1963,7 +2145,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
1963{ 2145{
1964 int i; 2146 int i;
1965 2147
1966 for (i = 0; i < adapter->num_queues; i++) 2148 for (i = 0; i < adapter->num_rx_queues; i++)
1967 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2149 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1968} 2150}
1969 2151
@@ -1984,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
1984 E1000_WRITE_FLUSH(&adapter->hw); 2166 E1000_WRITE_FLUSH(&adapter->hw);
1985 mdelay(5); 2167 mdelay(5);
1986 2168
1987 if(netif_running(netdev)) 2169 if (netif_running(netdev))
1988 e1000_clean_all_rx_rings(adapter); 2170 e1000_clean_all_rx_rings(adapter);
1989} 2171}
1990 2172
@@ -2000,12 +2182,14 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2000 E1000_WRITE_FLUSH(&adapter->hw); 2182 E1000_WRITE_FLUSH(&adapter->hw);
2001 mdelay(5); 2183 mdelay(5);
2002 2184
2003 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) 2185 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2004 e1000_pci_set_mwi(&adapter->hw); 2186 e1000_pci_set_mwi(&adapter->hw);
2005 2187
2006 if(netif_running(netdev)) { 2188 if (netif_running(netdev)) {
2189 /* No need to loop, because 82542 supports only 1 queue */
2190 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2007 e1000_configure_rx(adapter); 2191 e1000_configure_rx(adapter);
2008 e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); 2192 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2009 } 2193 }
2010} 2194}
2011 2195
@@ -2023,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
2023 struct e1000_adapter *adapter = netdev_priv(netdev); 2207 struct e1000_adapter *adapter = netdev_priv(netdev);
2024 struct sockaddr *addr = p; 2208 struct sockaddr *addr = p;
2025 2209
2026 if(!is_valid_ether_addr(addr->sa_data)) 2210 if (!is_valid_ether_addr(addr->sa_data))
2027 return -EADDRNOTAVAIL; 2211 return -EADDRNOTAVAIL;
2028 2212
2029 /* 82542 2.0 needs to be in reset to write receive address registers */ 2213 /* 82542 2.0 needs to be in reset to write receive address registers */
2030 2214
2031 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2215 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2032 e1000_enter_82542_rst(adapter); 2216 e1000_enter_82542_rst(adapter);
2033 2217
2034 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2042,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
2042 /* activate the work around */ 2226 /* activate the work around */
2043 adapter->hw.laa_is_present = 1; 2227 adapter->hw.laa_is_present = 1;
2044 2228
2045 /* Hold a copy of the LAA in RAR[14] This is done so that 2229 /* Hold a copy of the LAA in RAR[14] This is done so that
2046 * between the time RAR[0] gets clobbered and the time it 2230 * between the time RAR[0] gets clobbered and the time it
2047 * gets fixed (in e1000_watchdog), the actual LAA is in one 2231 * gets fixed (in e1000_watchdog), the actual LAA is in one
2048 * of the RARs and no incoming packets directed to this port 2232 * of the RARs and no incoming packets directed to this port
2049 * are dropped. Eventaully the LAA will be in RAR[0] and 2233 * are dropped. Eventaully the LAA will be in RAR[0] and
2050 * RAR[14] */ 2234 * RAR[14] */
2051 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 2235 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2052 E1000_RAR_ENTRIES - 1); 2236 E1000_RAR_ENTRIES - 1);
2053 } 2237 }
2054 2238
2055 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2239 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2056 e1000_leave_82542_rst(adapter); 2240 e1000_leave_82542_rst(adapter);
2057 2241
2058 return 0; 2242 return 0;
@@ -2086,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
2086 2270
2087 rctl = E1000_READ_REG(hw, RCTL); 2271 rctl = E1000_READ_REG(hw, RCTL);
2088 2272
2089 if(netdev->flags & IFF_PROMISC) { 2273 if (netdev->flags & IFF_PROMISC) {
2090 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2274 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2091 } else if(netdev->flags & IFF_ALLMULTI) { 2275 } else if (netdev->flags & IFF_ALLMULTI) {
2092 rctl |= E1000_RCTL_MPE; 2276 rctl |= E1000_RCTL_MPE;
2093 rctl &= ~E1000_RCTL_UPE; 2277 rctl &= ~E1000_RCTL_UPE;
2094 } else { 2278 } else {
@@ -2099,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
2099 2283
2100 /* 82542 2.0 needs to be in reset to write receive address registers */ 2284 /* 82542 2.0 needs to be in reset to write receive address registers */
2101 2285
2102 if(hw->mac_type == e1000_82542_rev2_0) 2286 if (hw->mac_type == e1000_82542_rev2_0)
2103 e1000_enter_82542_rst(adapter); 2287 e1000_enter_82542_rst(adapter);
2104 2288
2105 /* load the first 14 multicast address into the exact filters 1-14 2289 /* load the first 14 multicast address into the exact filters 1-14
@@ -2109,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
2109 */ 2293 */
2110 mc_ptr = netdev->mc_list; 2294 mc_ptr = netdev->mc_list;
2111 2295
2112 for(i = 1; i < rar_entries; i++) { 2296 for (i = 1; i < rar_entries; i++) {
2113 if (mc_ptr) { 2297 if (mc_ptr) {
2114 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2298 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2115 mc_ptr = mc_ptr->next; 2299 mc_ptr = mc_ptr->next;
@@ -2121,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
2121 2305
2122 /* clear the old settings from the multicast hash table */ 2306 /* clear the old settings from the multicast hash table */
2123 2307
2124 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2308 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
2125 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2309 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2126 2310
2127 /* load any remaining addresses into the hash table */ 2311 /* load any remaining addresses into the hash table */
2128 2312
2129 for(; mc_ptr; mc_ptr = mc_ptr->next) { 2313 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2130 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); 2314 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2131 e1000_mta_set(hw, hash_value); 2315 e1000_mta_set(hw, hash_value);
2132 } 2316 }
2133 2317
2134 if(hw->mac_type == e1000_82542_rev2_0) 2318 if (hw->mac_type == e1000_82542_rev2_0)
2135 e1000_leave_82542_rst(adapter); 2319 e1000_leave_82542_rst(adapter);
2136} 2320}
2137 2321
@@ -2157,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2157 struct net_device *netdev = adapter->netdev; 2341 struct net_device *netdev = adapter->netdev;
2158 uint32_t tctl; 2342 uint32_t tctl;
2159 2343
2160 if(atomic_read(&adapter->tx_fifo_stall)) { 2344 if (atomic_read(&adapter->tx_fifo_stall)) {
2161 if((E1000_READ_REG(&adapter->hw, TDT) == 2345 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2162 E1000_READ_REG(&adapter->hw, TDH)) && 2346 E1000_READ_REG(&adapter->hw, TDH)) &&
2163 (E1000_READ_REG(&adapter->hw, TDFT) == 2347 (E1000_READ_REG(&adapter->hw, TDFT) ==
2164 E1000_READ_REG(&adapter->hw, TDFH)) && 2348 E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2204,24 +2388,24 @@ static void
2204e1000_watchdog_task(struct e1000_adapter *adapter) 2388e1000_watchdog_task(struct e1000_adapter *adapter)
2205{ 2389{
2206 struct net_device *netdev = adapter->netdev; 2390 struct net_device *netdev = adapter->netdev;
2207 struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; 2391 struct e1000_tx_ring *txdr = adapter->tx_ring;
2208 uint32_t link; 2392 uint32_t link;
2209 2393
2210 e1000_check_for_link(&adapter->hw); 2394 e1000_check_for_link(&adapter->hw);
2211 if (adapter->hw.mac_type == e1000_82573) { 2395 if (adapter->hw.mac_type == e1000_82573) {
2212 e1000_enable_tx_pkt_filtering(&adapter->hw); 2396 e1000_enable_tx_pkt_filtering(&adapter->hw);
2213 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2397 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2214 e1000_update_mng_vlan(adapter); 2398 e1000_update_mng_vlan(adapter);
2215 } 2399 }
2216 2400
2217 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 2401 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2218 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 2402 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2219 link = !adapter->hw.serdes_link_down; 2403 link = !adapter->hw.serdes_link_down;
2220 else 2404 else
2221 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; 2405 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2222 2406
2223 if(link) { 2407 if (link) {
2224 if(!netif_carrier_ok(netdev)) { 2408 if (!netif_carrier_ok(netdev)) {
2225 e1000_get_speed_and_duplex(&adapter->hw, 2409 e1000_get_speed_and_duplex(&adapter->hw,
2226 &adapter->link_speed, 2410 &adapter->link_speed,
2227 &adapter->link_duplex); 2411 &adapter->link_duplex);
@@ -2231,13 +2415,28 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2231 adapter->link_duplex == FULL_DUPLEX ? 2415 adapter->link_duplex == FULL_DUPLEX ?
2232 "Full Duplex" : "Half Duplex"); 2416 "Full Duplex" : "Half Duplex");
2233 2417
2418 /* tweak tx_queue_len according to speed/duplex */
2419 netdev->tx_queue_len = adapter->tx_queue_len;
2420 adapter->tx_timeout_factor = 1;
2421 if (adapter->link_duplex == HALF_DUPLEX) {
2422 switch (adapter->link_speed) {
2423 case SPEED_10:
2424 netdev->tx_queue_len = 10;
2425 adapter->tx_timeout_factor = 8;
2426 break;
2427 case SPEED_100:
2428 netdev->tx_queue_len = 100;
2429 break;
2430 }
2431 }
2432
2234 netif_carrier_on(netdev); 2433 netif_carrier_on(netdev);
2235 netif_wake_queue(netdev); 2434 netif_wake_queue(netdev);
2236 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); 2435 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2237 adapter->smartspeed = 0; 2436 adapter->smartspeed = 0;
2238 } 2437 }
2239 } else { 2438 } else {
2240 if(netif_carrier_ok(netdev)) { 2439 if (netif_carrier_ok(netdev)) {
2241 adapter->link_speed = 0; 2440 adapter->link_speed = 0;
2242 adapter->link_duplex = 0; 2441 adapter->link_duplex = 0;
2243 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2442 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2263,7 +2462,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2263 2462
2264 e1000_update_adaptive(&adapter->hw); 2463 e1000_update_adaptive(&adapter->hw);
2265 2464
2266 if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { 2465#ifdef CONFIG_E1000_MQ
2466 txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2467#endif
2468 if (!netif_carrier_ok(netdev)) {
2267 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2469 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2268 /* We've lost link, so the controller stops DMA, 2470 /* We've lost link, so the controller stops DMA,
2269 * but we've got queued Tx work that's never going 2471 * but we've got queued Tx work that's never going
@@ -2274,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2274 } 2476 }
2275 2477
2276 /* Dynamic mode for Interrupt Throttle Rate (ITR) */ 2478 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2277 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { 2479 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2278 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total 2480 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2279 * asymmetrical Tx or Rx gets ITR=8000; everyone 2481 * asymmetrical Tx or Rx gets ITR=8000; everyone
2280 * else is between 2000-8000. */ 2482 * else is between 2000-8000. */
2281 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; 2483 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2282 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 2484 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2283 adapter->gotcl - adapter->gorcl : 2485 adapter->gotcl - adapter->gorcl :
2284 adapter->gorcl - adapter->gotcl) / 10000; 2486 adapter->gorcl - adapter->gotcl) / 10000;
2285 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2487 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2292,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2292 /* Force detection of hung controller every watchdog period */ 2494 /* Force detection of hung controller every watchdog period */
2293 adapter->detect_tx_hung = TRUE; 2495 adapter->detect_tx_hung = TRUE;
2294 2496
2295 /* With 82571 controllers, LAA may be overwritten due to controller 2497 /* With 82571 controllers, LAA may be overwritten due to controller
2296 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2498 * reset from the other port. Set the appropriate LAA in RAR[0] */
2297 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) 2499 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2298 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2500 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2314,13 +2516,14 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2314{ 2516{
2315#ifdef NETIF_F_TSO 2517#ifdef NETIF_F_TSO
2316 struct e1000_context_desc *context_desc; 2518 struct e1000_context_desc *context_desc;
2519 struct e1000_buffer *buffer_info;
2317 unsigned int i; 2520 unsigned int i;
2318 uint32_t cmd_length = 0; 2521 uint32_t cmd_length = 0;
2319 uint16_t ipcse = 0, tucse, mss; 2522 uint16_t ipcse = 0, tucse, mss;
2320 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2523 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2321 int err; 2524 int err;
2322 2525
2323 if(skb_shinfo(skb)->tso_size) { 2526 if (skb_shinfo(skb)->tso_size) {
2324 if (skb_header_cloned(skb)) { 2527 if (skb_header_cloned(skb)) {
2325 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2528 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2326 if (err) 2529 if (err)
@@ -2329,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2329 2532
2330 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2533 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2331 mss = skb_shinfo(skb)->tso_size; 2534 mss = skb_shinfo(skb)->tso_size;
2332 if(skb->protocol == ntohs(ETH_P_IP)) { 2535 if (skb->protocol == ntohs(ETH_P_IP)) {
2333 skb->nh.iph->tot_len = 0; 2536 skb->nh.iph->tot_len = 0;
2334 skb->nh.iph->check = 0; 2537 skb->nh.iph->check = 0;
2335 skb->h.th->check = 2538 skb->h.th->check =
@@ -2341,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2341 cmd_length = E1000_TXD_CMD_IP; 2544 cmd_length = E1000_TXD_CMD_IP;
2342 ipcse = skb->h.raw - skb->data - 1; 2545 ipcse = skb->h.raw - skb->data - 1;
2343#ifdef NETIF_F_TSO_IPV6 2546#ifdef NETIF_F_TSO_IPV6
2344 } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 2547 } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
2345 skb->nh.ipv6h->payload_len = 0; 2548 skb->nh.ipv6h->payload_len = 0;
2346 skb->h.th->check = 2549 skb->h.th->check =
2347 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2550 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2363,6 +2566,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2363 2566
2364 i = tx_ring->next_to_use; 2567 i = tx_ring->next_to_use;
2365 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2568 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2569 buffer_info = &tx_ring->buffer_info[i];
2366 2570
2367 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2571 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2368 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2572 context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -2374,14 +2578,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2374 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2578 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2375 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2579 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2376 2580
2581 buffer_info->time_stamp = jiffies;
2582
2377 if (++i == tx_ring->count) i = 0; 2583 if (++i == tx_ring->count) i = 0;
2378 tx_ring->next_to_use = i; 2584 tx_ring->next_to_use = i;
2379 2585
2380 return 1; 2586 return TRUE;
2381 } 2587 }
2382#endif 2588#endif
2383 2589
2384 return 0; 2590 return FALSE;
2385} 2591}
2386 2592
2387static inline boolean_t 2593static inline boolean_t
@@ -2389,13 +2595,15 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2389 struct sk_buff *skb) 2595 struct sk_buff *skb)
2390{ 2596{
2391 struct e1000_context_desc *context_desc; 2597 struct e1000_context_desc *context_desc;
2598 struct e1000_buffer *buffer_info;
2392 unsigned int i; 2599 unsigned int i;
2393 uint8_t css; 2600 uint8_t css;
2394 2601
2395 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2602 if (likely(skb->ip_summed == CHECKSUM_HW)) {
2396 css = skb->h.raw - skb->data; 2603 css = skb->h.raw - skb->data;
2397 2604
2398 i = tx_ring->next_to_use; 2605 i = tx_ring->next_to_use;
2606 buffer_info = &tx_ring->buffer_info[i];
2399 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2607 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2400 2608
2401 context_desc->upper_setup.tcp_fields.tucss = css; 2609 context_desc->upper_setup.tcp_fields.tucss = css;
@@ -2404,6 +2612,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2404 context_desc->tcp_seg_setup.data = 0; 2612 context_desc->tcp_seg_setup.data = 0;
2405 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2613 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2406 2614
2615 buffer_info->time_stamp = jiffies;
2616
2407 if (unlikely(++i == tx_ring->count)) i = 0; 2617 if (unlikely(++i == tx_ring->count)) i = 0;
2408 tx_ring->next_to_use = i; 2618 tx_ring->next_to_use = i;
2409 2619
@@ -2429,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2429 2639
2430 i = tx_ring->next_to_use; 2640 i = tx_ring->next_to_use;
2431 2641
2432 while(len) { 2642 while (len) {
2433 buffer_info = &tx_ring->buffer_info[i]; 2643 buffer_info = &tx_ring->buffer_info[i];
2434 size = min(len, max_per_txd); 2644 size = min(len, max_per_txd);
2435#ifdef NETIF_F_TSO 2645#ifdef NETIF_F_TSO
@@ -2445,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2445 2655
2446 /* Workaround for premature desc write-backs 2656 /* Workaround for premature desc write-backs
2447 * in TSO mode. Append 4-byte sentinel desc */ 2657 * in TSO mode. Append 4-byte sentinel desc */
2448 if(unlikely(mss && !nr_frags && size == len && size > 8)) 2658 if (unlikely(mss && !nr_frags && size == len && size > 8))
2449 size -= 4; 2659 size -= 4;
2450#endif 2660#endif
2451 /* work-around for errata 10 and it applies 2661 /* work-around for errata 10 and it applies
@@ -2453,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2453 * The fix is to make sure that the first descriptor of a 2663 * The fix is to make sure that the first descriptor of a
2454 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2664 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2455 */ 2665 */
2456 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2666 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2457 (size > 2015) && count == 0)) 2667 (size > 2015) && count == 0))
2458 size = 2015; 2668 size = 2015;
2459 2669
2460 /* Workaround for potential 82544 hang in PCI-X. Avoid 2670 /* Workaround for potential 82544 hang in PCI-X. Avoid
2461 * terminating buffers within evenly-aligned dwords. */ 2671 * terminating buffers within evenly-aligned dwords. */
2462 if(unlikely(adapter->pcix_82544 && 2672 if (unlikely(adapter->pcix_82544 &&
2463 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2673 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2464 size > 4)) 2674 size > 4))
2465 size -= 4; 2675 size -= 4;
@@ -2475,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2475 len -= size; 2685 len -= size;
2476 offset += size; 2686 offset += size;
2477 count++; 2687 count++;
2478 if(unlikely(++i == tx_ring->count)) i = 0; 2688 if (unlikely(++i == tx_ring->count)) i = 0;
2479 } 2689 }
2480 2690
2481 for(f = 0; f < nr_frags; f++) { 2691 for (f = 0; f < nr_frags; f++) {
2482 struct skb_frag_struct *frag; 2692 struct skb_frag_struct *frag;
2483 2693
2484 frag = &skb_shinfo(skb)->frags[f]; 2694 frag = &skb_shinfo(skb)->frags[f];
2485 len = frag->size; 2695 len = frag->size;
2486 offset = frag->page_offset; 2696 offset = frag->page_offset;
2487 2697
2488 while(len) { 2698 while (len) {
2489 buffer_info = &tx_ring->buffer_info[i]; 2699 buffer_info = &tx_ring->buffer_info[i];
2490 size = min(len, max_per_txd); 2700 size = min(len, max_per_txd);
2491#ifdef NETIF_F_TSO 2701#ifdef NETIF_F_TSO
2492 /* Workaround for premature desc write-backs 2702 /* Workaround for premature desc write-backs
2493 * in TSO mode. Append 4-byte sentinel desc */ 2703 * in TSO mode. Append 4-byte sentinel desc */
2494 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2704 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2495 size -= 4; 2705 size -= 4;
2496#endif 2706#endif
2497 /* Workaround for potential 82544 hang in PCI-X. 2707 /* Workaround for potential 82544 hang in PCI-X.
2498 * Avoid terminating buffers within evenly-aligned 2708 * Avoid terminating buffers within evenly-aligned
2499 * dwords. */ 2709 * dwords. */
2500 if(unlikely(adapter->pcix_82544 && 2710 if (unlikely(adapter->pcix_82544 &&
2501 !((unsigned long)(frag->page+offset+size-1) & 4) && 2711 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2502 size > 4)) 2712 size > 4))
2503 size -= 4; 2713 size -= 4;
@@ -2514,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2514 len -= size; 2724 len -= size;
2515 offset += size; 2725 offset += size;
2516 count++; 2726 count++;
2517 if(unlikely(++i == tx_ring->count)) i = 0; 2727 if (unlikely(++i == tx_ring->count)) i = 0;
2518 } 2728 }
2519 } 2729 }
2520 2730
@@ -2534,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2534 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2744 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2535 unsigned int i; 2745 unsigned int i;
2536 2746
2537 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2747 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2538 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2748 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2539 E1000_TXD_CMD_TSE; 2749 E1000_TXD_CMD_TSE;
2540 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2750 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2541 2751
2542 if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2752 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2543 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2753 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2544 } 2754 }
2545 2755
2546 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2756 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2547 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2757 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2548 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2758 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2549 } 2759 }
2550 2760
2551 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2761 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2552 txd_lower |= E1000_TXD_CMD_VLE; 2762 txd_lower |= E1000_TXD_CMD_VLE;
2553 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2763 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2554 } 2764 }
2555 2765
2556 i = tx_ring->next_to_use; 2766 i = tx_ring->next_to_use;
2557 2767
2558 while(count--) { 2768 while (count--) {
2559 buffer_info = &tx_ring->buffer_info[i]; 2769 buffer_info = &tx_ring->buffer_info[i];
2560 tx_desc = E1000_TX_DESC(*tx_ring, i); 2770 tx_desc = E1000_TX_DESC(*tx_ring, i);
2561 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2771 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2562 tx_desc->lower.data = 2772 tx_desc->lower.data =
2563 cpu_to_le32(txd_lower | buffer_info->length); 2773 cpu_to_le32(txd_lower | buffer_info->length);
2564 tx_desc->upper.data = cpu_to_le32(txd_upper); 2774 tx_desc->upper.data = cpu_to_le32(txd_upper);
2565 if(unlikely(++i == tx_ring->count)) i = 0; 2775 if (unlikely(++i == tx_ring->count)) i = 0;
2566 } 2776 }
2567 2777
2568 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2778 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2597,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2597 2807
2598 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); 2808 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2599 2809
2600 if(adapter->link_duplex != HALF_DUPLEX) 2810 if (adapter->link_duplex != HALF_DUPLEX)
2601 goto no_fifo_stall_required; 2811 goto no_fifo_stall_required;
2602 2812
2603 if(atomic_read(&adapter->tx_fifo_stall)) 2813 if (atomic_read(&adapter->tx_fifo_stall))
2604 return 1; 2814 return 1;
2605 2815
2606 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2816 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2607 atomic_set(&adapter->tx_fifo_stall, 1); 2817 atomic_set(&adapter->tx_fifo_stall, 1);
2608 return 1; 2818 return 1;
2609 } 2819 }
2610 2820
2611no_fifo_stall_required: 2821no_fifo_stall_required:
2612 adapter->tx_fifo_head += skb_fifo_len; 2822 adapter->tx_fifo_head += skb_fifo_len;
2613 if(adapter->tx_fifo_head >= adapter->tx_fifo_size) 2823 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
2614 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2824 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2615 return 0; 2825 return 0;
2616} 2826}
@@ -2621,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2621{ 2831{
2622 struct e1000_hw *hw = &adapter->hw; 2832 struct e1000_hw *hw = &adapter->hw;
2623 uint16_t length, offset; 2833 uint16_t length, offset;
2624 if(vlan_tx_tag_present(skb)) { 2834 if (vlan_tx_tag_present(skb)) {
2625 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 2835 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2626 ( adapter->hw.mng_cookie.status & 2836 ( adapter->hw.mng_cookie.status &
2627 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2837 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2628 return 0; 2838 return 0;
2629 } 2839 }
2630 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 2840 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2631 struct ethhdr *eth = (struct ethhdr *) skb->data; 2841 struct ethhdr *eth = (struct ethhdr *) skb->data;
2632 if((htons(ETH_P_IP) == eth->h_proto)) { 2842 if ((htons(ETH_P_IP) == eth->h_proto)) {
2633 const struct iphdr *ip = 2843 const struct iphdr *ip =
2634 (struct iphdr *)((uint8_t *)skb->data+14); 2844 (struct iphdr *)((uint8_t *)skb->data+14);
2635 if(IPPROTO_UDP == ip->protocol) { 2845 if (IPPROTO_UDP == ip->protocol) {
2636 struct udphdr *udp = 2846 struct udphdr *udp =
2637 (struct udphdr *)((uint8_t *)ip + 2847 (struct udphdr *)((uint8_t *)ip +
2638 (ip->ihl << 2)); 2848 (ip->ihl << 2));
2639 if(ntohs(udp->dest) == 67) { 2849 if (ntohs(udp->dest) == 67) {
2640 offset = (uint8_t *)udp + 8 - skb->data; 2850 offset = (uint8_t *)udp + 8 - skb->data;
2641 length = skb->len - offset; 2851 length = skb->len - offset;
2642 2852
2643 return e1000_mng_write_dhcp_info(hw, 2853 return e1000_mng_write_dhcp_info(hw,
2644 (uint8_t *)udp + 8, 2854 (uint8_t *)udp + 8,
2645 length); 2855 length);
2646 } 2856 }
2647 } 2857 }
@@ -2664,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2664 unsigned int nr_frags = 0; 2874 unsigned int nr_frags = 0;
2665 unsigned int mss = 0; 2875 unsigned int mss = 0;
2666 int count = 0; 2876 int count = 0;
2667 int tso; 2877 int tso;
2668 unsigned int f; 2878 unsigned int f;
2669 len -= skb->data_len; 2879 len -= skb->data_len;
2670 2880
@@ -2687,16 +2897,35 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2687 * 4 = ceil(buffer len/mss). To make sure we don't 2897 * 4 = ceil(buffer len/mss). To make sure we don't
2688 * overrun the FIFO, adjust the max buffer len if mss 2898 * overrun the FIFO, adjust the max buffer len if mss
2689 * drops. */ 2899 * drops. */
2690 if(mss) { 2900 if (mss) {
2901 uint8_t hdr_len;
2691 max_per_txd = min(mss << 2, max_per_txd); 2902 max_per_txd = min(mss << 2, max_per_txd);
2692 max_txd_pwr = fls(max_per_txd) - 1; 2903 max_txd_pwr = fls(max_per_txd) - 1;
2904
2905 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2906 * points to just header, pull a few bytes of payload from
2907 * frags into skb->data */
2908 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2909 if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
2910 (adapter->hw.mac_type == e1000_82571 ||
2911 adapter->hw.mac_type == e1000_82572)) {
2912 unsigned int pull_size;
2913 pull_size = min((unsigned int)4, skb->data_len);
2914 if (!__pskb_pull_tail(skb, pull_size)) {
2915 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2916 dev_kfree_skb_any(skb);
2917 return -EFAULT;
2918 }
2919 len = skb->len - skb->data_len;
2920 }
2693 } 2921 }
2694 2922
2695 if((mss) || (skb->ip_summed == CHECKSUM_HW)) 2923 /* reserve a descriptor for the offload context */
2924 if ((mss) || (skb->ip_summed == CHECKSUM_HW))
2696 count++; 2925 count++;
2697 count++; 2926 count++;
2698#else 2927#else
2699 if(skb->ip_summed == CHECKSUM_HW) 2928 if (skb->ip_summed == CHECKSUM_HW)
2700 count++; 2929 count++;
2701#endif 2930#endif
2702 2931
@@ -2709,45 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2709 2938
2710 count += TXD_USE_COUNT(len, max_txd_pwr); 2939 count += TXD_USE_COUNT(len, max_txd_pwr);
2711 2940
2712 if(adapter->pcix_82544) 2941 if (adapter->pcix_82544)
2713 count++; 2942 count++;
2714 2943
2715 /* work-around for errata 10 and it applies to all controllers 2944 /* work-around for errata 10 and it applies to all controllers
2716 * in PCI-X mode, so add one more descriptor to the count 2945 * in PCI-X mode, so add one more descriptor to the count
2717 */ 2946 */
2718 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2947 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2719 (len > 2015))) 2948 (len > 2015)))
2720 count++; 2949 count++;
2721 2950
2722 nr_frags = skb_shinfo(skb)->nr_frags; 2951 nr_frags = skb_shinfo(skb)->nr_frags;
2723 for(f = 0; f < nr_frags; f++) 2952 for (f = 0; f < nr_frags; f++)
2724 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 2953 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2725 max_txd_pwr); 2954 max_txd_pwr);
2726 if(adapter->pcix_82544) 2955 if (adapter->pcix_82544)
2727 count += nr_frags; 2956 count += nr_frags;
2728 2957
2729#ifdef NETIF_F_TSO 2958 if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2730 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2731 * points to just header, pull a few bytes of payload from
2732 * frags into skb->data */
2733 if (skb_shinfo(skb)->tso_size) {
2734 uint8_t hdr_len;
2735 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2736 if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
2737 (adapter->hw.mac_type == e1000_82571 ||
2738 adapter->hw.mac_type == e1000_82572)) {
2739 unsigned int pull_size;
2740 pull_size = min((unsigned int)4, skb->data_len);
2741 if (!__pskb_pull_tail(skb, pull_size)) {
2742 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2743 dev_kfree_skb_any(skb);
2744 return -EFAULT;
2745 }
2746 }
2747 }
2748#endif
2749
2750 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2751 e1000_transfer_dhcp_info(adapter, skb); 2959 e1000_transfer_dhcp_info(adapter, skb);
2752 2960
2753 local_irq_save(flags); 2961 local_irq_save(flags);
@@ -2765,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2765 return NETDEV_TX_BUSY; 2973 return NETDEV_TX_BUSY;
2766 } 2974 }
2767 2975
2768 if(unlikely(adapter->hw.mac_type == e1000_82547)) { 2976 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
2769 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2977 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2770 netif_stop_queue(netdev); 2978 netif_stop_queue(netdev);
2771 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2979 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2772 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2980 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2774,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2774 } 2982 }
2775 } 2983 }
2776 2984
2777 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 2985 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2778 tx_flags |= E1000_TX_FLAGS_VLAN; 2986 tx_flags |= E1000_TX_FLAGS_VLAN;
2779 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2987 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2780 } 2988 }
2781 2989
2782 first = tx_ring->next_to_use; 2990 first = tx_ring->next_to_use;
2783 2991
2784 tso = e1000_tso(adapter, tx_ring, skb); 2992 tso = e1000_tso(adapter, tx_ring, skb);
2785 if (tso < 0) { 2993 if (tso < 0) {
2786 dev_kfree_skb_any(skb); 2994 dev_kfree_skb_any(skb);
@@ -2833,6 +3041,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
2833{ 3041{
2834 struct e1000_adapter *adapter = netdev_priv(netdev); 3042 struct e1000_adapter *adapter = netdev_priv(netdev);
2835 3043
3044 adapter->tx_timeout_count++;
2836 e1000_down(adapter); 3045 e1000_down(adapter);
2837 e1000_up(adapter); 3046 e1000_up(adapter);
2838} 3047}
@@ -2850,7 +3059,7 @@ e1000_get_stats(struct net_device *netdev)
2850{ 3059{
2851 struct e1000_adapter *adapter = netdev_priv(netdev); 3060 struct e1000_adapter *adapter = netdev_priv(netdev);
2852 3061
2853 e1000_update_stats(adapter); 3062 /* only return the current stats */
2854 return &adapter->net_stats; 3063 return &adapter->net_stats;
2855} 3064}
2856 3065
@@ -2868,56 +3077,57 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2868 struct e1000_adapter *adapter = netdev_priv(netdev); 3077 struct e1000_adapter *adapter = netdev_priv(netdev);
2869 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3078 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2870 3079
2871 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3080 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
2872 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3081 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2873 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3082 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
2874 return -EINVAL;
2875 }
2876
2877#define MAX_STD_JUMBO_FRAME_SIZE 9234
2878 /* might want this to be bigger enum check... */
2879 /* 82571 controllers limit jumbo frame size to 10500 bytes */
2880 if ((adapter->hw.mac_type == e1000_82571 ||
2881 adapter->hw.mac_type == e1000_82572) &&
2882 max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2883 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2884 "on 82571 and 82572 controllers.\n");
2885 return -EINVAL;
2886 }
2887
2888 if(adapter->hw.mac_type == e1000_82573 &&
2889 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2890 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2891 "on 82573\n");
2892 return -EINVAL; 3083 return -EINVAL;
2893 } 3084 }
2894 3085
2895 if(adapter->hw.mac_type > e1000_82547_rev_2) { 3086 /* Adapter-specific max frame size limits. */
2896 adapter->rx_buffer_len = max_frame; 3087 switch (adapter->hw.mac_type) {
2897 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 3088 case e1000_82542_rev2_0:
2898 } else { 3089 case e1000_82542_rev2_1:
2899 if(unlikely((adapter->hw.mac_type < e1000_82543) && 3090 case e1000_82573:
2900 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { 3091 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2901 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3092 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2902 "on 82542\n"); 3093 return -EINVAL;
3094 }
3095 break;
3096 case e1000_82571:
3097 case e1000_82572:
3098#define MAX_STD_JUMBO_FRAME_SIZE 9234
3099 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3100 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
2903 return -EINVAL; 3101 return -EINVAL;
2904
2905 } else {
2906 if(max_frame <= E1000_RXBUFFER_2048) {
2907 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2908 } else if(max_frame <= E1000_RXBUFFER_4096) {
2909 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2910 } else if(max_frame <= E1000_RXBUFFER_8192) {
2911 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2912 } else if(max_frame <= E1000_RXBUFFER_16384) {
2913 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2914 }
2915 } 3102 }
3103 break;
3104 default:
3105 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3106 break;
2916 } 3107 }
2917 3108
3109 /* since the driver code now supports splitting a packet across
3110 * multiple descriptors, most of the fifo related limitations on
3111 * jumbo frame traffic have gone away.
3112 * simply use 2k descriptors for everything.
3113 *
3114 * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3115 * means we reserve 2 more, this pushes us to allocate from the next
3116 * larger slab size
3117 * i.e. RXBUFFER_2048 --> size-4096 slab */
3118
3119 /* recent hardware supports 1KB granularity */
3120 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3121 adapter->rx_buffer_len =
3122 ((max_frame < E1000_RXBUFFER_2048) ?
3123 max_frame : E1000_RXBUFFER_2048);
3124 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3125 } else
3126 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3127
2918 netdev->mtu = new_mtu; 3128 netdev->mtu = new_mtu;
2919 3129
2920 if(netif_running(netdev)) { 3130 if (netif_running(netdev)) {
2921 e1000_down(adapter); 3131 e1000_down(adapter);
2922 e1000_up(adapter); 3132 e1000_up(adapter);
2923 } 3133 }
@@ -3004,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3004 hw->collision_delta = E1000_READ_REG(hw, COLC); 3214 hw->collision_delta = E1000_READ_REG(hw, COLC);
3005 adapter->stats.colc += hw->collision_delta; 3215 adapter->stats.colc += hw->collision_delta;
3006 3216
3007 if(hw->mac_type >= e1000_82543) { 3217 if (hw->mac_type >= e1000_82543) {
3008 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); 3218 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3009 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); 3219 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3010 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); 3220 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3012,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3012 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 3222 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3013 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 3223 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3014 } 3224 }
3015 if(hw->mac_type > e1000_82547_rev_2) { 3225 if (hw->mac_type > e1000_82547_rev_2) {
3016 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3226 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3017 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3227 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3018 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3228 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3037,12 +3247,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
3037 3247
3038 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3248 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3039 adapter->stats.crcerrs + adapter->stats.algnerrc + 3249 adapter->stats.crcerrs + adapter->stats.algnerrc +
3040 adapter->stats.rlec + adapter->stats.mpc + 3250 adapter->stats.rlec + adapter->stats.cexterr;
3041 adapter->stats.cexterr; 3251 adapter->net_stats.rx_dropped = 0;
3042 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3252 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3043 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3253 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3044 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3254 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3045 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
3046 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3255 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3047 3256
3048 /* Tx Errors */ 3257 /* Tx Errors */
@@ -3057,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
3057 3266
3058 /* Phy Stats */ 3267 /* Phy Stats */
3059 3268
3060 if(hw->media_type == e1000_media_type_copper) { 3269 if (hw->media_type == e1000_media_type_copper) {
3061 if((adapter->link_speed == SPEED_1000) && 3270 if ((adapter->link_speed == SPEED_1000) &&
3062 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3271 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3063 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3272 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3064 adapter->phy_stats.idle_errors += phy_tmp; 3273 adapter->phy_stats.idle_errors += phy_tmp;
3065 } 3274 }
3066 3275
3067 if((hw->mac_type <= e1000_82546) && 3276 if ((hw->mac_type <= e1000_82546) &&
3068 (hw->phy_type == e1000_phy_m88) && 3277 (hw->phy_type == e1000_phy_m88) &&
3069 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3278 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3070 adapter->phy_stats.receive_errors += phy_tmp; 3279 adapter->phy_stats.receive_errors += phy_tmp;
@@ -3110,32 +3319,44 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3110 struct e1000_adapter *adapter = netdev_priv(netdev); 3319 struct e1000_adapter *adapter = netdev_priv(netdev);
3111 struct e1000_hw *hw = &adapter->hw; 3320 struct e1000_hw *hw = &adapter->hw;
3112 uint32_t icr = E1000_READ_REG(hw, ICR); 3321 uint32_t icr = E1000_READ_REG(hw, ICR);
3113#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) 3322#ifndef CONFIG_E1000_NAPI
3114 int i; 3323 int i;
3324#else
3325 /* Interrupt Auto-Mask...upon reading ICR,
3326 * interrupts are masked. No need for the
3327 * IMC write, but it does mean we should
3328 * account for it ASAP. */
3329 if (likely(hw->mac_type >= e1000_82571))
3330 atomic_inc(&adapter->irq_sem);
3115#endif 3331#endif
3116 3332
3117 if(unlikely(!icr)) 3333 if (unlikely(!icr)) {
3334#ifdef CONFIG_E1000_NAPI
3335 if (hw->mac_type >= e1000_82571)
3336 e1000_irq_enable(adapter);
3337#endif
3118 return IRQ_NONE; /* Not our interrupt */ 3338 return IRQ_NONE; /* Not our interrupt */
3339 }
3119 3340
3120 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3341 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3121 hw->get_link_status = 1; 3342 hw->get_link_status = 1;
3122 mod_timer(&adapter->watchdog_timer, jiffies); 3343 mod_timer(&adapter->watchdog_timer, jiffies);
3123 } 3344 }
3124 3345
3125#ifdef CONFIG_E1000_NAPI 3346#ifdef CONFIG_E1000_NAPI
3126 atomic_inc(&adapter->irq_sem); 3347 if (unlikely(hw->mac_type < e1000_82571)) {
3127 E1000_WRITE_REG(hw, IMC, ~0); 3348 atomic_inc(&adapter->irq_sem);
3128 E1000_WRITE_FLUSH(hw); 3349 E1000_WRITE_REG(hw, IMC, ~0);
3350 E1000_WRITE_FLUSH(hw);
3351 }
3129#ifdef CONFIG_E1000_MQ 3352#ifdef CONFIG_E1000_MQ
3130 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { 3353 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
3131 cpu_set(adapter->cpu_for_queue[0], 3354 /* We must setup the cpumask once count == 0 since
3132 adapter->rx_sched_call_data.cpumask); 3355 * each cpu bit is cleared when the work is done. */
3133 for (i = 1; i < adapter->num_queues; i++) { 3356 adapter->rx_sched_call_data.cpumask = adapter->cpumask;
3134 cpu_set(adapter->cpu_for_queue[i], 3357 atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
3135 adapter->rx_sched_call_data.cpumask); 3358 atomic_set(&adapter->rx_sched_call_data.count,
3136 atomic_inc(&adapter->irq_sem); 3359 adapter->num_rx_queues);
3137 }
3138 atomic_set(&adapter->rx_sched_call_data.count, i);
3139 smp_call_async_mask(&adapter->rx_sched_call_data); 3360 smp_call_async_mask(&adapter->rx_sched_call_data);
3140 } else { 3361 } else {
3141 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); 3362 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
@@ -3149,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3149 3370
3150#else /* if !CONFIG_E1000_NAPI */ 3371#else /* if !CONFIG_E1000_NAPI */
3151 /* Writing IMC and IMS is needed for 82547. 3372 /* Writing IMC and IMS is needed for 82547.
3152 Due to Hub Link bus being occupied, an interrupt 3373 * Due to Hub Link bus being occupied, an interrupt
3153 de-assertion message is not able to be sent. 3374 * de-assertion message is not able to be sent.
3154 When an interrupt assertion message is generated later, 3375 * When an interrupt assertion message is generated later,
3155 two messages are re-ordered and sent out. 3376 * two messages are re-ordered and sent out.
3156 That causes APIC to think 82547 is in de-assertion 3377 * That causes APIC to think 82547 is in de-assertion
3157 state, while 82547 is in assertion state, resulting 3378 * state, while 82547 is in assertion state, resulting
3158 in dead lock. Writing IMC forces 82547 into 3379 * in dead lock. Writing IMC forces 82547 into
3159 de-assertion state. 3380 * de-assertion state.
3160 */ 3381 */
3161 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 3382 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3162 atomic_inc(&adapter->irq_sem); 3383 atomic_inc(&adapter->irq_sem);
3163 E1000_WRITE_REG(hw, IMC, ~0); 3384 E1000_WRITE_REG(hw, IMC, ~0);
3164 } 3385 }
3165 3386
3166 for(i = 0; i < E1000_MAX_INTR; i++) 3387 for (i = 0; i < E1000_MAX_INTR; i++)
3167 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3388 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3168 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3389 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3169 break; 3390 break;
3170 3391
3171 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3392 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3172 e1000_irq_enable(adapter); 3393 e1000_irq_enable(adapter);
3173 3394
3174#endif /* CONFIG_E1000_NAPI */ 3395#endif /* CONFIG_E1000_NAPI */
@@ -3187,7 +3408,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3187{ 3408{
3188 struct e1000_adapter *adapter; 3409 struct e1000_adapter *adapter;
3189 int work_to_do = min(*budget, poll_dev->quota); 3410 int work_to_do = min(*budget, poll_dev->quota);
3190 int tx_cleaned, i = 0, work_done = 0; 3411 int tx_cleaned = 0, i = 0, work_done = 0;
3191 3412
3192 /* Must NOT use netdev_priv macro here. */ 3413 /* Must NOT use netdev_priv macro here. */
3193 adapter = poll_dev->priv; 3414 adapter = poll_dev->priv;
@@ -3198,19 +3419,31 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3198 3419
3199 while (poll_dev != &adapter->polling_netdev[i]) { 3420 while (poll_dev != &adapter->polling_netdev[i]) {
3200 i++; 3421 i++;
3201 if (unlikely(i == adapter->num_queues)) 3422 if (unlikely(i == adapter->num_rx_queues))
3202 BUG(); 3423 BUG();
3203 } 3424 }
3204 3425
3205 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); 3426 if (likely(adapter->num_tx_queues == 1)) {
3427 /* e1000_clean is called per-cpu. This lock protects
3428 * tx_ring[0] from being cleaned by multiple cpus
3429 * simultaneously. A failure obtaining the lock means
3430 * tx_ring[0] is currently being cleaned anyway. */
3431 if (spin_trylock(&adapter->tx_queue_lock)) {
3432 tx_cleaned = e1000_clean_tx_irq(adapter,
3433 &adapter->tx_ring[0]);
3434 spin_unlock(&adapter->tx_queue_lock);
3435 }
3436 } else
3437 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3438
3206 adapter->clean_rx(adapter, &adapter->rx_ring[i], 3439 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3207 &work_done, work_to_do); 3440 &work_done, work_to_do);
3208 3441
3209 *budget -= work_done; 3442 *budget -= work_done;
3210 poll_dev->quota -= work_done; 3443 poll_dev->quota -= work_done;
3211 3444
3212 /* If no Tx and not enough Rx work done, exit the polling mode */ 3445 /* If no Tx and not enough Rx work done, exit the polling mode */
3213 if((!tx_cleaned && (work_done == 0)) || 3446 if ((!tx_cleaned && (work_done == 0)) ||
3214 !netif_running(adapter->netdev)) { 3447 !netif_running(adapter->netdev)) {
3215quit_polling: 3448quit_polling:
3216 netif_rx_complete(poll_dev); 3449 netif_rx_complete(poll_dev);
@@ -3242,22 +3475,24 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3242 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3475 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3243 3476
3244 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3477 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3245 for(cleaned = FALSE; !cleaned; ) { 3478 for (cleaned = FALSE; !cleaned; ) {
3246 tx_desc = E1000_TX_DESC(*tx_ring, i); 3479 tx_desc = E1000_TX_DESC(*tx_ring, i);
3247 buffer_info = &tx_ring->buffer_info[i]; 3480 buffer_info = &tx_ring->buffer_info[i];
3248 cleaned = (i == eop); 3481 cleaned = (i == eop);
3249 3482
3483#ifdef CONFIG_E1000_MQ
3484 tx_ring->tx_stats.bytes += buffer_info->length;
3485#endif
3250 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3486 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3487 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3251 3488
3252 tx_desc->buffer_addr = 0; 3489 if (unlikely(++i == tx_ring->count)) i = 0;
3253 tx_desc->lower.data = 0;
3254 tx_desc->upper.data = 0;
3255
3256 if(unlikely(++i == tx_ring->count)) i = 0;
3257 } 3490 }
3258 3491
3259 tx_ring->pkt++; 3492#ifdef CONFIG_E1000_MQ
3260 3493 tx_ring->tx_stats.packets++;
3494#endif
3495
3261 eop = tx_ring->buffer_info[i].next_to_watch; 3496 eop = tx_ring->buffer_info[i].next_to_watch;
3262 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3497 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3263 } 3498 }
@@ -3266,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3266 3501
3267 spin_lock(&tx_ring->tx_lock); 3502 spin_lock(&tx_ring->tx_lock);
3268 3503
3269 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3504 if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3270 netif_carrier_ok(netdev))) 3505 netif_carrier_ok(netdev)))
3271 netif_wake_queue(netdev); 3506 netif_wake_queue(netdev);
3272 3507
@@ -3276,32 +3511,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3276 /* Detect a transmit hang in hardware, this serializes the 3511 /* Detect a transmit hang in hardware, this serializes the
3277 * check with the clearing of time_stamp and movement of i */ 3512 * check with the clearing of time_stamp and movement of i */
3278 adapter->detect_tx_hung = FALSE; 3513 adapter->detect_tx_hung = FALSE;
3279 if (tx_ring->buffer_info[i].dma && 3514 if (tx_ring->buffer_info[eop].dma &&
3280 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) 3515 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3516 adapter->tx_timeout_factor * HZ)
3281 && !(E1000_READ_REG(&adapter->hw, STATUS) & 3517 && !(E1000_READ_REG(&adapter->hw, STATUS) &
3282 E1000_STATUS_TXOFF)) { 3518 E1000_STATUS_TXOFF)) {
3283 3519
3284 /* detected Tx unit hang */ 3520 /* detected Tx unit hang */
3285 i = tx_ring->next_to_clean;
3286 eop = tx_ring->buffer_info[i].next_to_watch;
3287 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3288 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3521 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3522 " Tx Queue <%lu>\n"
3289 " TDH <%x>\n" 3523 " TDH <%x>\n"
3290 " TDT <%x>\n" 3524 " TDT <%x>\n"
3291 " next_to_use <%x>\n" 3525 " next_to_use <%x>\n"
3292 " next_to_clean <%x>\n" 3526 " next_to_clean <%x>\n"
3293 "buffer_info[next_to_clean]\n" 3527 "buffer_info[next_to_clean]\n"
3294 " dma <%llx>\n"
3295 " time_stamp <%lx>\n" 3528 " time_stamp <%lx>\n"
3296 " next_to_watch <%x>\n" 3529 " next_to_watch <%x>\n"
3297 " jiffies <%lx>\n" 3530 " jiffies <%lx>\n"
3298 " next_to_watch.status <%x>\n", 3531 " next_to_watch.status <%x>\n",
3532 (unsigned long)((tx_ring - adapter->tx_ring) /
3533 sizeof(struct e1000_tx_ring)),
3299 readl(adapter->hw.hw_addr + tx_ring->tdh), 3534 readl(adapter->hw.hw_addr + tx_ring->tdh),
3300 readl(adapter->hw.hw_addr + tx_ring->tdt), 3535 readl(adapter->hw.hw_addr + tx_ring->tdt),
3301 tx_ring->next_to_use, 3536 tx_ring->next_to_use,
3302 i, 3537 tx_ring->next_to_clean,
3303 (unsigned long long)tx_ring->buffer_info[i].dma, 3538 tx_ring->buffer_info[eop].time_stamp,
3304 tx_ring->buffer_info[i].time_stamp,
3305 eop, 3539 eop,
3306 jiffies, 3540 jiffies,
3307 eop_desc->upper.fields.status); 3541 eop_desc->upper.fields.status);
@@ -3329,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3329 skb->ip_summed = CHECKSUM_NONE; 3563 skb->ip_summed = CHECKSUM_NONE;
3330 3564
3331 /* 82543 or newer only */ 3565 /* 82543 or newer only */
3332 if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 3566 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
3333 /* Ignore Checksum bit is set */ 3567 /* Ignore Checksum bit is set */
3334 if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 3568 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3335 /* TCP/UDP checksum error bit is set */ 3569 /* TCP/UDP checksum error bit is set */
3336 if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 3570 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3337 /* let the stack verify checksum errors */ 3571 /* let the stack verify checksum errors */
3338 adapter->hw_csum_err++; 3572 adapter->hw_csum_err++;
3339 return; 3573 return;
3340 } 3574 }
3341 /* TCP/UDP Checksum has not been calculated */ 3575 /* TCP/UDP Checksum has not been calculated */
3342 if(adapter->hw.mac_type <= e1000_82547_rev_2) { 3576 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
3343 if(!(status & E1000_RXD_STAT_TCPCS)) 3577 if (!(status & E1000_RXD_STAT_TCPCS))
3344 return; 3578 return;
3345 } else { 3579 } else {
3346 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 3580 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3347 return; 3581 return;
3348 } 3582 }
3349 /* It must be a TCP or UDP packet with a valid checksum */ 3583 /* It must be a TCP or UDP packet with a valid checksum */
@@ -3379,46 +3613,87 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3379{ 3613{
3380 struct net_device *netdev = adapter->netdev; 3614 struct net_device *netdev = adapter->netdev;
3381 struct pci_dev *pdev = adapter->pdev; 3615 struct pci_dev *pdev = adapter->pdev;
3382 struct e1000_rx_desc *rx_desc; 3616 struct e1000_rx_desc *rx_desc, *next_rxd;
3383 struct e1000_buffer *buffer_info; 3617 struct e1000_buffer *buffer_info, *next_buffer;
3384 struct sk_buff *skb;
3385 unsigned long flags; 3618 unsigned long flags;
3386 uint32_t length; 3619 uint32_t length;
3387 uint8_t last_byte; 3620 uint8_t last_byte;
3388 unsigned int i; 3621 unsigned int i;
3389 boolean_t cleaned = FALSE; 3622 int cleaned_count = 0;
3623 boolean_t cleaned = FALSE, multi_descriptor = FALSE;
3390 3624
3391 i = rx_ring->next_to_clean; 3625 i = rx_ring->next_to_clean;
3392 rx_desc = E1000_RX_DESC(*rx_ring, i); 3626 rx_desc = E1000_RX_DESC(*rx_ring, i);
3627 buffer_info = &rx_ring->buffer_info[i];
3393 3628
3394 while(rx_desc->status & E1000_RXD_STAT_DD) { 3629 while (rx_desc->status & E1000_RXD_STAT_DD) {
3395 buffer_info = &rx_ring->buffer_info[i]; 3630 struct sk_buff *skb, *next_skb;
3631 u8 status;
3396#ifdef CONFIG_E1000_NAPI 3632#ifdef CONFIG_E1000_NAPI
3397 if(*work_done >= work_to_do) 3633 if (*work_done >= work_to_do)
3398 break; 3634 break;
3399 (*work_done)++; 3635 (*work_done)++;
3400#endif 3636#endif
3401 cleaned = TRUE; 3637 status = rx_desc->status;
3638 skb = buffer_info->skb;
3639 buffer_info->skb = NULL;
3640
3641 if (++i == rx_ring->count) i = 0;
3642 next_rxd = E1000_RX_DESC(*rx_ring, i);
3643 next_buffer = &rx_ring->buffer_info[i];
3644 next_skb = next_buffer->skb;
3402 3645
3646 cleaned = TRUE;
3647 cleaned_count++;
3403 pci_unmap_single(pdev, 3648 pci_unmap_single(pdev,
3404 buffer_info->dma, 3649 buffer_info->dma,
3405 buffer_info->length, 3650 buffer_info->length,
3406 PCI_DMA_FROMDEVICE); 3651 PCI_DMA_FROMDEVICE);
3407 3652
3408 skb = buffer_info->skb;
3409 length = le16_to_cpu(rx_desc->length); 3653 length = le16_to_cpu(rx_desc->length);
3410 3654
3411 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 3655 skb_put(skb, length);
3412 /* All receives must fit into a single buffer */ 3656
3413 E1000_DBG("%s: Receive packet consumed multiple" 3657 if (!(status & E1000_RXD_STAT_EOP)) {
3414 " buffers\n", netdev->name); 3658 if (!rx_ring->rx_skb_top) {
3415 dev_kfree_skb_irq(skb); 3659 rx_ring->rx_skb_top = skb;
3660 rx_ring->rx_skb_top->len = length;
3661 rx_ring->rx_skb_prev = skb;
3662 } else {
3663 if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664 rx_ring->rx_skb_prev->next = skb;
3665 skb->prev = rx_ring->rx_skb_prev;
3666 } else {
3667 skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668 }
3669 rx_ring->rx_skb_prev = skb;
3670 rx_ring->rx_skb_top->data_len += length;
3671 }
3416 goto next_desc; 3672 goto next_desc;
3673 } else {
3674 if (rx_ring->rx_skb_top) {
3675 if (skb_shinfo(rx_ring->rx_skb_top)
3676 ->frag_list) {
3677 rx_ring->rx_skb_prev->next = skb;
3678 skb->prev = rx_ring->rx_skb_prev;
3679 } else
3680 skb_shinfo(rx_ring->rx_skb_top)
3681 ->frag_list = skb;
3682
3683 rx_ring->rx_skb_top->data_len += length;
3684 rx_ring->rx_skb_top->len +=
3685 rx_ring->rx_skb_top->data_len;
3686
3687 skb = rx_ring->rx_skb_top;
3688 multi_descriptor = TRUE;
3689 rx_ring->rx_skb_top = NULL;
3690 rx_ring->rx_skb_prev = NULL;
3691 }
3417 } 3692 }
3418 3693
3419 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3694 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
3420 last_byte = *(skb->data + length - 1); 3695 last_byte = *(skb->data + length - 1);
3421 if(TBI_ACCEPT(&adapter->hw, rx_desc->status, 3696 if (TBI_ACCEPT(&adapter->hw, status,
3422 rx_desc->errors, length, last_byte)) { 3697 rx_desc->errors, length, last_byte)) {
3423 spin_lock_irqsave(&adapter->stats_lock, flags); 3698 spin_lock_irqsave(&adapter->stats_lock, flags);
3424 e1000_tbi_adjust_stats(&adapter->hw, 3699 e1000_tbi_adjust_stats(&adapter->hw,
@@ -3433,18 +3708,41 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3433 } 3708 }
3434 } 3709 }
3435 3710
3436 /* Good Receive */ 3711 /* code added for copybreak, this should improve
3437 skb_put(skb, length - ETHERNET_FCS_SIZE); 3712 * performance for small packets with large amounts
3713 * of reassembly being done in the stack */
3714#define E1000_CB_LENGTH 256
3715 if ((length < E1000_CB_LENGTH) &&
3716 !rx_ring->rx_skb_top &&
3717 /* or maybe (status & E1000_RXD_STAT_EOP) && */
3718 !multi_descriptor) {
3719 struct sk_buff *new_skb =
3720 dev_alloc_skb(length + NET_IP_ALIGN);
3721 if (new_skb) {
3722 skb_reserve(new_skb, NET_IP_ALIGN);
3723 new_skb->dev = netdev;
3724 memcpy(new_skb->data - NET_IP_ALIGN,
3725 skb->data - NET_IP_ALIGN,
3726 length + NET_IP_ALIGN);
3727 /* save the skb in buffer_info as good */
3728 buffer_info->skb = skb;
3729 skb = new_skb;
3730 skb_put(skb, length);
3731 }
3732 }
3733
3734 /* end copybreak code */
3438 3735
3439 /* Receive Checksum Offload */ 3736 /* Receive Checksum Offload */
3440 e1000_rx_checksum(adapter, 3737 e1000_rx_checksum(adapter,
3441 (uint32_t)(rx_desc->status) | 3738 (uint32_t)(status) |
3442 ((uint32_t)(rx_desc->errors) << 24), 3739 ((uint32_t)(rx_desc->errors) << 24),
3443 rx_desc->csum, skb); 3740 rx_desc->csum, skb);
3741
3444 skb->protocol = eth_type_trans(skb, netdev); 3742 skb->protocol = eth_type_trans(skb, netdev);
3445#ifdef CONFIG_E1000_NAPI 3743#ifdef CONFIG_E1000_NAPI
3446 if(unlikely(adapter->vlgrp && 3744 if (unlikely(adapter->vlgrp &&
3447 (rx_desc->status & E1000_RXD_STAT_VP))) { 3745 (status & E1000_RXD_STAT_VP))) {
3448 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3746 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3449 le16_to_cpu(rx_desc->special) & 3747 le16_to_cpu(rx_desc->special) &
3450 E1000_RXD_SPC_VLAN_MASK); 3748 E1000_RXD_SPC_VLAN_MASK);
@@ -3452,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3452 netif_receive_skb(skb); 3750 netif_receive_skb(skb);
3453 } 3751 }
3454#else /* CONFIG_E1000_NAPI */ 3752#else /* CONFIG_E1000_NAPI */
3455 if(unlikely(adapter->vlgrp && 3753 if (unlikely(adapter->vlgrp &&
3456 (rx_desc->status & E1000_RXD_STAT_VP))) { 3754 (status & E1000_RXD_STAT_VP))) {
3457 vlan_hwaccel_rx(skb, adapter->vlgrp, 3755 vlan_hwaccel_rx(skb, adapter->vlgrp,
3458 le16_to_cpu(rx_desc->special) & 3756 le16_to_cpu(rx_desc->special) &
3459 E1000_RXD_SPC_VLAN_MASK); 3757 E1000_RXD_SPC_VLAN_MASK);
@@ -3462,17 +3760,28 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3462 } 3760 }
3463#endif /* CONFIG_E1000_NAPI */ 3761#endif /* CONFIG_E1000_NAPI */
3464 netdev->last_rx = jiffies; 3762 netdev->last_rx = jiffies;
3465 rx_ring->pkt++; 3763#ifdef CONFIG_E1000_MQ
3764 rx_ring->rx_stats.packets++;
3765 rx_ring->rx_stats.bytes += length;
3766#endif
3466 3767
3467next_desc: 3768next_desc:
3468 rx_desc->status = 0; 3769 rx_desc->status = 0;
3469 buffer_info->skb = NULL;
3470 if(unlikely(++i == rx_ring->count)) i = 0;
3471 3770
3472 rx_desc = E1000_RX_DESC(*rx_ring, i); 3771 /* return some buffers to hardware, one at a time is too slow */
3772 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3773 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3774 cleaned_count = 0;
3775 }
3776
3777 rx_desc = next_rxd;
3778 buffer_info = next_buffer;
3473 } 3779 }
3474 rx_ring->next_to_clean = i; 3780 rx_ring->next_to_clean = i;
3475 adapter->alloc_rx_buf(adapter, rx_ring); 3781
3782 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3783 if (cleaned_count)
3784 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3476 3785
3477 return cleaned; 3786 return cleaned;
3478} 3787}
@@ -3492,52 +3801,59 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3492 struct e1000_rx_ring *rx_ring) 3801 struct e1000_rx_ring *rx_ring)
3493#endif 3802#endif
3494{ 3803{
3495 union e1000_rx_desc_packet_split *rx_desc; 3804 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
3496 struct net_device *netdev = adapter->netdev; 3805 struct net_device *netdev = adapter->netdev;
3497 struct pci_dev *pdev = adapter->pdev; 3806 struct pci_dev *pdev = adapter->pdev;
3498 struct e1000_buffer *buffer_info; 3807 struct e1000_buffer *buffer_info, *next_buffer;
3499 struct e1000_ps_page *ps_page; 3808 struct e1000_ps_page *ps_page;
3500 struct e1000_ps_page_dma *ps_page_dma; 3809 struct e1000_ps_page_dma *ps_page_dma;
3501 struct sk_buff *skb; 3810 struct sk_buff *skb, *next_skb;
3502 unsigned int i, j; 3811 unsigned int i, j;
3503 uint32_t length, staterr; 3812 uint32_t length, staterr;
3813 int cleaned_count = 0;
3504 boolean_t cleaned = FALSE; 3814 boolean_t cleaned = FALSE;
3505 3815
3506 i = rx_ring->next_to_clean; 3816 i = rx_ring->next_to_clean;
3507 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3817 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3508 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3818 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3819 buffer_info = &rx_ring->buffer_info[i];
3509 3820
3510 while(staterr & E1000_RXD_STAT_DD) { 3821 while (staterr & E1000_RXD_STAT_DD) {
3511 buffer_info = &rx_ring->buffer_info[i];
3512 ps_page = &rx_ring->ps_page[i]; 3822 ps_page = &rx_ring->ps_page[i];
3513 ps_page_dma = &rx_ring->ps_page_dma[i]; 3823 ps_page_dma = &rx_ring->ps_page_dma[i];
3514#ifdef CONFIG_E1000_NAPI 3824#ifdef CONFIG_E1000_NAPI
3515 if(unlikely(*work_done >= work_to_do)) 3825 if (unlikely(*work_done >= work_to_do))
3516 break; 3826 break;
3517 (*work_done)++; 3827 (*work_done)++;
3518#endif 3828#endif
3829 skb = buffer_info->skb;
3830
3831 if (++i == rx_ring->count) i = 0;
3832 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
3833 next_buffer = &rx_ring->buffer_info[i];
3834 next_skb = next_buffer->skb;
3835
3519 cleaned = TRUE; 3836 cleaned = TRUE;
3837 cleaned_count++;
3520 pci_unmap_single(pdev, buffer_info->dma, 3838 pci_unmap_single(pdev, buffer_info->dma,
3521 buffer_info->length, 3839 buffer_info->length,
3522 PCI_DMA_FROMDEVICE); 3840 PCI_DMA_FROMDEVICE);
3523 3841
3524 skb = buffer_info->skb; 3842 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3525
3526 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3527 E1000_DBG("%s: Packet Split buffers didn't pick up" 3843 E1000_DBG("%s: Packet Split buffers didn't pick up"
3528 " the full packet\n", netdev->name); 3844 " the full packet\n", netdev->name);
3529 dev_kfree_skb_irq(skb); 3845 dev_kfree_skb_irq(skb);
3530 goto next_desc; 3846 goto next_desc;
3531 } 3847 }
3532 3848
3533 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3849 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3534 dev_kfree_skb_irq(skb); 3850 dev_kfree_skb_irq(skb);
3535 goto next_desc; 3851 goto next_desc;
3536 } 3852 }
3537 3853
3538 length = le16_to_cpu(rx_desc->wb.middle.length0); 3854 length = le16_to_cpu(rx_desc->wb.middle.length0);
3539 3855
3540 if(unlikely(!length)) { 3856 if (unlikely(!length)) {
3541 E1000_DBG("%s: Last part of the packet spanning" 3857 E1000_DBG("%s: Last part of the packet spanning"
3542 " multiple descriptors\n", netdev->name); 3858 " multiple descriptors\n", netdev->name);
3543 dev_kfree_skb_irq(skb); 3859 dev_kfree_skb_irq(skb);
@@ -3547,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3547 /* Good Receive */ 3863 /* Good Receive */
3548 skb_put(skb, length); 3864 skb_put(skb, length);
3549 3865
3550 for(j = 0; j < adapter->rx_ps_pages; j++) { 3866 for (j = 0; j < adapter->rx_ps_pages; j++) {
3551 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3867 if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3552 break; 3868 break;
3553 3869
3554 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 3870 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3568,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3568 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3884 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3569 skb->protocol = eth_type_trans(skb, netdev); 3885 skb->protocol = eth_type_trans(skb, netdev);
3570 3886
3571 if(likely(rx_desc->wb.upper.header_status & 3887 if (likely(rx_desc->wb.upper.header_status &
3572 E1000_RXDPS_HDRSTAT_HDRSP)) { 3888 E1000_RXDPS_HDRSTAT_HDRSP))
3573 adapter->rx_hdr_split++; 3889 adapter->rx_hdr_split++;
3574#ifdef HAVE_RX_ZERO_COPY
3575 skb_shinfo(skb)->zero_copy = TRUE;
3576#endif
3577 }
3578#ifdef CONFIG_E1000_NAPI 3890#ifdef CONFIG_E1000_NAPI
3579 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3891 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3580 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3892 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3581 le16_to_cpu(rx_desc->wb.middle.vlan) & 3893 le16_to_cpu(rx_desc->wb.middle.vlan) &
3582 E1000_RXD_SPC_VLAN_MASK); 3894 E1000_RXD_SPC_VLAN_MASK);
@@ -3584,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3584 netif_receive_skb(skb); 3896 netif_receive_skb(skb);
3585 } 3897 }
3586#else /* CONFIG_E1000_NAPI */ 3898#else /* CONFIG_E1000_NAPI */
3587 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3899 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3588 vlan_hwaccel_rx(skb, adapter->vlgrp, 3900 vlan_hwaccel_rx(skb, adapter->vlgrp,
3589 le16_to_cpu(rx_desc->wb.middle.vlan) & 3901 le16_to_cpu(rx_desc->wb.middle.vlan) &
3590 E1000_RXD_SPC_VLAN_MASK); 3902 E1000_RXD_SPC_VLAN_MASK);
@@ -3593,18 +3905,31 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3593 } 3905 }
3594#endif /* CONFIG_E1000_NAPI */ 3906#endif /* CONFIG_E1000_NAPI */
3595 netdev->last_rx = jiffies; 3907 netdev->last_rx = jiffies;
3596 rx_ring->pkt++; 3908#ifdef CONFIG_E1000_MQ
3909 rx_ring->rx_stats.packets++;
3910 rx_ring->rx_stats.bytes += length;
3911#endif
3597 3912
3598next_desc: 3913next_desc:
3599 rx_desc->wb.middle.status_error &= ~0xFF; 3914 rx_desc->wb.middle.status_error &= ~0xFF;
3600 buffer_info->skb = NULL; 3915 buffer_info->skb = NULL;
3601 if(unlikely(++i == rx_ring->count)) i = 0;
3602 3916
3603 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3917 /* return some buffers to hardware, one at a time is too slow */
3918 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3919 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3920 cleaned_count = 0;
3921 }
3922
3923 rx_desc = next_rxd;
3924 buffer_info = next_buffer;
3925
3604 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3926 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3605 } 3927 }
3606 rx_ring->next_to_clean = i; 3928 rx_ring->next_to_clean = i;
3607 adapter->alloc_rx_buf(adapter, rx_ring); 3929
3930 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3931 if (cleaned_count)
3932 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3608 3933
3609 return cleaned; 3934 return cleaned;
3610} 3935}
@@ -3616,7 +3941,8 @@ next_desc:
3616 3941
3617static void 3942static void
3618e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 3943e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3619 struct e1000_rx_ring *rx_ring) 3944 struct e1000_rx_ring *rx_ring,
3945 int cleaned_count)
3620{ 3946{
3621 struct net_device *netdev = adapter->netdev; 3947 struct net_device *netdev = adapter->netdev;
3622 struct pci_dev *pdev = adapter->pdev; 3948 struct pci_dev *pdev = adapter->pdev;
@@ -3629,11 +3955,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3629 i = rx_ring->next_to_use; 3955 i = rx_ring->next_to_use;
3630 buffer_info = &rx_ring->buffer_info[i]; 3956 buffer_info = &rx_ring->buffer_info[i];
3631 3957
3632 while(!buffer_info->skb) { 3958 while (cleaned_count--) {
3633 skb = dev_alloc_skb(bufsz); 3959 if (!(skb = buffer_info->skb))
3960 skb = dev_alloc_skb(bufsz);
3961 else {
3962 skb_trim(skb, 0);
3963 goto map_skb;
3964 }
3965
3634 3966
3635 if(unlikely(!skb)) { 3967 if (unlikely(!skb)) {
3636 /* Better luck next round */ 3968 /* Better luck next round */
3969 adapter->alloc_rx_buff_failed++;
3637 break; 3970 break;
3638 } 3971 }
3639 3972
@@ -3670,6 +4003,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3670 4003
3671 buffer_info->skb = skb; 4004 buffer_info->skb = skb;
3672 buffer_info->length = adapter->rx_buffer_len; 4005 buffer_info->length = adapter->rx_buffer_len;
4006map_skb:
3673 buffer_info->dma = pci_map_single(pdev, 4007 buffer_info->dma = pci_map_single(pdev,
3674 skb->data, 4008 skb->data,
3675 adapter->rx_buffer_len, 4009 adapter->rx_buffer_len,
@@ -3695,20 +4029,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3695 rx_desc = E1000_RX_DESC(*rx_ring, i); 4029 rx_desc = E1000_RX_DESC(*rx_ring, i);
3696 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4030 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3697 4031
3698 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4032 if (unlikely(++i == rx_ring->count))
3699 /* Force memory writes to complete before letting h/w 4033 i = 0;
3700 * know there are new descriptors to fetch. (Only
3701 * applicable for weak-ordered memory model archs,
3702 * such as IA-64). */
3703 wmb();
3704 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3705 }
3706
3707 if(unlikely(++i == rx_ring->count)) i = 0;
3708 buffer_info = &rx_ring->buffer_info[i]; 4034 buffer_info = &rx_ring->buffer_info[i];
3709 } 4035 }
3710 4036
3711 rx_ring->next_to_use = i; 4037 if (likely(rx_ring->next_to_use != i)) {
4038 rx_ring->next_to_use = i;
4039 if (unlikely(i-- == 0))
4040 i = (rx_ring->count - 1);
4041
4042 /* Force memory writes to complete before letting h/w
4043 * know there are new descriptors to fetch. (Only
4044 * applicable for weak-ordered memory model archs,
4045 * such as IA-64). */
4046 wmb();
4047 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4048 }
3712} 4049}
3713 4050
3714/** 4051/**
@@ -3718,7 +4055,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3718 4055
3719static void 4056static void
3720e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 4057e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3721 struct e1000_rx_ring *rx_ring) 4058 struct e1000_rx_ring *rx_ring,
4059 int cleaned_count)
3722{ 4060{
3723 struct net_device *netdev = adapter->netdev; 4061 struct net_device *netdev = adapter->netdev;
3724 struct pci_dev *pdev = adapter->pdev; 4062 struct pci_dev *pdev = adapter->pdev;
@@ -3734,16 +4072,18 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3734 ps_page = &rx_ring->ps_page[i]; 4072 ps_page = &rx_ring->ps_page[i];
3735 ps_page_dma = &rx_ring->ps_page_dma[i]; 4073 ps_page_dma = &rx_ring->ps_page_dma[i];
3736 4074
3737 while(!buffer_info->skb) { 4075 while (cleaned_count--) {
3738 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4076 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3739 4077
3740 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 4078 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
3741 if (j < adapter->rx_ps_pages) { 4079 if (j < adapter->rx_ps_pages) {
3742 if (likely(!ps_page->ps_page[j])) { 4080 if (likely(!ps_page->ps_page[j])) {
3743 ps_page->ps_page[j] = 4081 ps_page->ps_page[j] =
3744 alloc_page(GFP_ATOMIC); 4082 alloc_page(GFP_ATOMIC);
3745 if (unlikely(!ps_page->ps_page[j])) 4083 if (unlikely(!ps_page->ps_page[j])) {
4084 adapter->alloc_rx_buff_failed++;
3746 goto no_buffers; 4085 goto no_buffers;
4086 }
3747 ps_page_dma->ps_page_dma[j] = 4087 ps_page_dma->ps_page_dma[j] =
3748 pci_map_page(pdev, 4088 pci_map_page(pdev,
3749 ps_page->ps_page[j], 4089 ps_page->ps_page[j],
@@ -3751,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3751 PCI_DMA_FROMDEVICE); 4091 PCI_DMA_FROMDEVICE);
3752 } 4092 }
3753 /* Refresh the desc even if buffer_addrs didn't 4093 /* Refresh the desc even if buffer_addrs didn't
3754 * change because each write-back erases 4094 * change because each write-back erases
3755 * this info. 4095 * this info.
3756 */ 4096 */
3757 rx_desc->read.buffer_addr[j+1] = 4097 rx_desc->read.buffer_addr[j+1] =
@@ -3762,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3762 4102
3763 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4103 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
3764 4104
3765 if(unlikely(!skb)) 4105 if (unlikely(!skb)) {
4106 adapter->alloc_rx_buff_failed++;
3766 break; 4107 break;
4108 }
3767 4109
3768 /* Make buffer alignment 2 beyond a 16 byte boundary 4110 /* Make buffer alignment 2 beyond a 16 byte boundary
3769 * this will result in a 16 byte aligned IP header after 4111 * this will result in a 16 byte aligned IP header after
@@ -3781,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3781 4123
3782 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 4124 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
3783 4125
3784 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4126 if (unlikely(++i == rx_ring->count)) i = 0;
3785 /* Force memory writes to complete before letting h/w
3786 * know there are new descriptors to fetch. (Only
3787 * applicable for weak-ordered memory model archs,
3788 * such as IA-64). */
3789 wmb();
3790 /* Hardware increments by 16 bytes, but packet split
3791 * descriptors are 32 bytes...so we increment tail
3792 * twice as much.
3793 */
3794 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
3795 }
3796
3797 if(unlikely(++i == rx_ring->count)) i = 0;
3798 buffer_info = &rx_ring->buffer_info[i]; 4127 buffer_info = &rx_ring->buffer_info[i];
3799 ps_page = &rx_ring->ps_page[i]; 4128 ps_page = &rx_ring->ps_page[i];
3800 ps_page_dma = &rx_ring->ps_page_dma[i]; 4129 ps_page_dma = &rx_ring->ps_page_dma[i];
3801 } 4130 }
3802 4131
3803no_buffers: 4132no_buffers:
3804 rx_ring->next_to_use = i; 4133 if (likely(rx_ring->next_to_use != i)) {
4134 rx_ring->next_to_use = i;
4135 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4136
4137 /* Force memory writes to complete before letting h/w
4138 * know there are new descriptors to fetch. (Only
4139 * applicable for weak-ordered memory model archs,
4140 * such as IA-64). */
4141 wmb();
4142 /* Hardware increments by 16 bytes, but packet split
4143 * descriptors are 32 bytes...so we increment tail
4144 * twice as much.
4145 */
4146 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4147 }
3805} 4148}
3806 4149
3807/** 4150/**
@@ -3815,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
3815 uint16_t phy_status; 4158 uint16_t phy_status;
3816 uint16_t phy_ctrl; 4159 uint16_t phy_ctrl;
3817 4160
3818 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4161 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
3819 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4162 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
3820 return; 4163 return;
3821 4164
3822 if(adapter->smartspeed == 0) { 4165 if (adapter->smartspeed == 0) {
3823 /* If Master/Slave config fault is asserted twice, 4166 /* If Master/Slave config fault is asserted twice,
3824 * we assume back-to-back */ 4167 * we assume back-to-back */
3825 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4168 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3826 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4169 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3827 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4170 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3828 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4171 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3829 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4172 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3830 if(phy_ctrl & CR_1000T_MS_ENABLE) { 4173 if (phy_ctrl & CR_1000T_MS_ENABLE) {
3831 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4174 phy_ctrl &= ~CR_1000T_MS_ENABLE;
3832 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 4175 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
3833 phy_ctrl); 4176 phy_ctrl);
3834 adapter->smartspeed++; 4177 adapter->smartspeed++;
3835 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4178 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
3836 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, 4179 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
3837 &phy_ctrl)) { 4180 &phy_ctrl)) {
3838 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4181 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -3842,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
3842 } 4185 }
3843 } 4186 }
3844 return; 4187 return;
3845 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4188 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
3846 /* If still no link, perhaps using 2/3 pair cable */ 4189 /* If still no link, perhaps using 2/3 pair cable */
3847 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4190 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3848 phy_ctrl |= CR_1000T_MS_ENABLE; 4191 phy_ctrl |= CR_1000T_MS_ENABLE;
3849 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); 4192 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
3850 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4193 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
3851 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { 4194 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
3852 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4195 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3853 MII_CR_RESTART_AUTO_NEG); 4196 MII_CR_RESTART_AUTO_NEG);
@@ -3855,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
3855 } 4198 }
3856 } 4199 }
3857 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4200 /* Restart process after E1000_SMARTSPEED_MAX iterations */
3858 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4201 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
3859 adapter->smartspeed = 0; 4202 adapter->smartspeed = 0;
3860} 4203}
3861 4204
@@ -3896,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3896 uint16_t spddplx; 4239 uint16_t spddplx;
3897 unsigned long flags; 4240 unsigned long flags;
3898 4241
3899 if(adapter->hw.media_type != e1000_media_type_copper) 4242 if (adapter->hw.media_type != e1000_media_type_copper)
3900 return -EOPNOTSUPP; 4243 return -EOPNOTSUPP;
3901 4244
3902 switch (cmd) { 4245 switch (cmd) {
@@ -3904,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3904 data->phy_id = adapter->hw.phy_addr; 4247 data->phy_id = adapter->hw.phy_addr;
3905 break; 4248 break;
3906 case SIOCGMIIREG: 4249 case SIOCGMIIREG:
3907 if(!capable(CAP_NET_ADMIN)) 4250 if (!capable(CAP_NET_ADMIN))
3908 return -EPERM; 4251 return -EPERM;
3909 spin_lock_irqsave(&adapter->stats_lock, flags); 4252 spin_lock_irqsave(&adapter->stats_lock, flags);
3910 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 4253 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
3911 &data->val_out)) { 4254 &data->val_out)) {
3912 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4255 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3913 return -EIO; 4256 return -EIO;
@@ -3915,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3915 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4258 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3916 break; 4259 break;
3917 case SIOCSMIIREG: 4260 case SIOCSMIIREG:
3918 if(!capable(CAP_NET_ADMIN)) 4261 if (!capable(CAP_NET_ADMIN))
3919 return -EPERM; 4262 return -EPERM;
3920 if(data->reg_num & ~(0x1F)) 4263 if (data->reg_num & ~(0x1F))
3921 return -EFAULT; 4264 return -EFAULT;
3922 mii_reg = data->val_in; 4265 mii_reg = data->val_in;
3923 spin_lock_irqsave(&adapter->stats_lock, flags); 4266 spin_lock_irqsave(&adapter->stats_lock, flags);
3924 if(e1000_write_phy_reg(&adapter->hw, data->reg_num, 4267 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
3925 mii_reg)) { 4268 mii_reg)) {
3926 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4269 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3927 return -EIO; 4270 return -EIO;
3928 } 4271 }
3929 if(adapter->hw.phy_type == e1000_phy_m88) { 4272 if (adapter->hw.phy_type == e1000_phy_m88) {
3930 switch (data->reg_num) { 4273 switch (data->reg_num) {
3931 case PHY_CTRL: 4274 case PHY_CTRL:
3932 if(mii_reg & MII_CR_POWER_DOWN) 4275 if (mii_reg & MII_CR_POWER_DOWN)
3933 break; 4276 break;
3934 if(mii_reg & MII_CR_AUTO_NEG_EN) { 4277 if (mii_reg & MII_CR_AUTO_NEG_EN) {
3935 adapter->hw.autoneg = 1; 4278 adapter->hw.autoneg = 1;
3936 adapter->hw.autoneg_advertised = 0x2F; 4279 adapter->hw.autoneg_advertised = 0x2F;
3937 } else { 4280 } else {
@@ -3946,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3946 HALF_DUPLEX; 4289 HALF_DUPLEX;
3947 retval = e1000_set_spd_dplx(adapter, 4290 retval = e1000_set_spd_dplx(adapter,
3948 spddplx); 4291 spddplx);
3949 if(retval) { 4292 if (retval) {
3950 spin_unlock_irqrestore( 4293 spin_unlock_irqrestore(
3951 &adapter->stats_lock, 4294 &adapter->stats_lock,
3952 flags); 4295 flags);
3953 return retval; 4296 return retval;
3954 } 4297 }
3955 } 4298 }
3956 if(netif_running(adapter->netdev)) { 4299 if (netif_running(adapter->netdev)) {
3957 e1000_down(adapter); 4300 e1000_down(adapter);
3958 e1000_up(adapter); 4301 e1000_up(adapter);
3959 } else 4302 } else
@@ -3961,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3961 break; 4304 break;
3962 case M88E1000_PHY_SPEC_CTRL: 4305 case M88E1000_PHY_SPEC_CTRL:
3963 case M88E1000_EXT_PHY_SPEC_CTRL: 4306 case M88E1000_EXT_PHY_SPEC_CTRL:
3964 if(e1000_phy_reset(&adapter->hw)) { 4307 if (e1000_phy_reset(&adapter->hw)) {
3965 spin_unlock_irqrestore( 4308 spin_unlock_irqrestore(
3966 &adapter->stats_lock, flags); 4309 &adapter->stats_lock, flags);
3967 return -EIO; 4310 return -EIO;
@@ -3971,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3971 } else { 4314 } else {
3972 switch (data->reg_num) { 4315 switch (data->reg_num) {
3973 case PHY_CTRL: 4316 case PHY_CTRL:
3974 if(mii_reg & MII_CR_POWER_DOWN) 4317 if (mii_reg & MII_CR_POWER_DOWN)
3975 break; 4318 break;
3976 if(netif_running(adapter->netdev)) { 4319 if (netif_running(adapter->netdev)) {
3977 e1000_down(adapter); 4320 e1000_down(adapter);
3978 e1000_up(adapter); 4321 e1000_up(adapter);
3979 } else 4322 } else
@@ -3995,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
3995 struct e1000_adapter *adapter = hw->back; 4338 struct e1000_adapter *adapter = hw->back;
3996 int ret_val = pci_set_mwi(adapter->pdev); 4339 int ret_val = pci_set_mwi(adapter->pdev);
3997 4340
3998 if(ret_val) 4341 if (ret_val)
3999 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4342 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4000} 4343}
4001 4344
@@ -4044,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4044 e1000_irq_disable(adapter); 4387 e1000_irq_disable(adapter);
4045 adapter->vlgrp = grp; 4388 adapter->vlgrp = grp;
4046 4389
4047 if(grp) { 4390 if (grp) {
4048 /* enable VLAN tag insert/strip */ 4391 /* enable VLAN tag insert/strip */
4049 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4392 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4050 ctrl |= E1000_CTRL_VME; 4393 ctrl |= E1000_CTRL_VME;
@@ -4066,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4066 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4409 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4067 rctl &= ~E1000_RCTL_VFE; 4410 rctl &= ~E1000_RCTL_VFE;
4068 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4411 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4069 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 4412 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
4070 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4413 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4071 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4414 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4072 } 4415 }
@@ -4080,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4080{ 4423{
4081 struct e1000_adapter *adapter = netdev_priv(netdev); 4424 struct e1000_adapter *adapter = netdev_priv(netdev);
4082 uint32_t vfta, index; 4425 uint32_t vfta, index;
4083 if((adapter->hw.mng_cookie.status & 4426
4084 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4427 if ((adapter->hw.mng_cookie.status &
4085 (vid == adapter->mng_vlan_id)) 4428 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4429 (vid == adapter->mng_vlan_id))
4086 return; 4430 return;
4087 /* add VID to filter table */ 4431 /* add VID to filter table */
4088 index = (vid >> 5) & 0x7F; 4432 index = (vid >> 5) & 0x7F;
@@ -4099,15 +4443,19 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4099 4443
4100 e1000_irq_disable(adapter); 4444 e1000_irq_disable(adapter);
4101 4445
4102 if(adapter->vlgrp) 4446 if (adapter->vlgrp)
4103 adapter->vlgrp->vlan_devices[vid] = NULL; 4447 adapter->vlgrp->vlan_devices[vid] = NULL;
4104 4448
4105 e1000_irq_enable(adapter); 4449 e1000_irq_enable(adapter);
4106 4450
4107 if((adapter->hw.mng_cookie.status & 4451 if ((adapter->hw.mng_cookie.status &
4108 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4452 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4109 (vid == adapter->mng_vlan_id)) 4453 (vid == adapter->mng_vlan_id)) {
4454 /* release control to f/w */
4455 e1000_release_hw_control(adapter);
4110 return; 4456 return;
4457 }
4458
4111 /* remove VID from filter table */ 4459 /* remove VID from filter table */
4112 index = (vid >> 5) & 0x7F; 4460 index = (vid >> 5) & 0x7F;
4113 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 4461 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -4120,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
4120{ 4468{
4121 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4469 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4122 4470
4123 if(adapter->vlgrp) { 4471 if (adapter->vlgrp) {
4124 uint16_t vid; 4472 uint16_t vid;
4125 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4473 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4126 if(!adapter->vlgrp->vlan_devices[vid]) 4474 if (!adapter->vlgrp->vlan_devices[vid])
4127 continue; 4475 continue;
4128 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4476 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4129 } 4477 }
@@ -4136,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4136 adapter->hw.autoneg = 0; 4484 adapter->hw.autoneg = 0;
4137 4485
4138 /* Fiber NICs only allow 1000 gbps Full duplex */ 4486 /* Fiber NICs only allow 1000 gbps Full duplex */
4139 if((adapter->hw.media_type == e1000_media_type_fiber) && 4487 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4140 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4488 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4141 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4489 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4142 return -EINVAL; 4490 return -EINVAL;
4143 } 4491 }
4144 4492
4145 switch(spddplx) { 4493 switch (spddplx) {
4146 case SPEED_10 + DUPLEX_HALF: 4494 case SPEED_10 + DUPLEX_HALF:
4147 adapter->hw.forced_speed_duplex = e1000_10_half; 4495 adapter->hw.forced_speed_duplex = e1000_10_half;
4148 break; 4496 break;
@@ -4168,35 +4516,92 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4168} 4516}
4169 4517
4170#ifdef CONFIG_PM 4518#ifdef CONFIG_PM
4519/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
4520 * space versus the 64 bytes that pci_[save|restore]_state handle
4521 */
4522#define PCIE_CONFIG_SPACE_LEN 256
4523#define PCI_CONFIG_SPACE_LEN 64
4524static int
4525e1000_pci_save_state(struct e1000_adapter *adapter)
4526{
4527 struct pci_dev *dev = adapter->pdev;
4528 int size;
4529 int i;
4530 if (adapter->hw.mac_type >= e1000_82571)
4531 size = PCIE_CONFIG_SPACE_LEN;
4532 else
4533 size = PCI_CONFIG_SPACE_LEN;
4534
4535 WARN_ON(adapter->config_space != NULL);
4536
4537 adapter->config_space = kmalloc(size, GFP_KERNEL);
4538 if (!adapter->config_space) {
4539 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
4540 return -ENOMEM;
4541 }
4542 for (i = 0; i < (size / 4); i++)
4543 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
4544 return 0;
4545}
4546
4547static void
4548e1000_pci_restore_state(struct e1000_adapter *adapter)
4549{
4550 struct pci_dev *dev = adapter->pdev;
4551 int size;
4552 int i;
4553 if (adapter->config_space == NULL)
4554 return;
4555 if (adapter->hw.mac_type >= e1000_82571)
4556 size = PCIE_CONFIG_SPACE_LEN;
4557 else
4558 size = PCI_CONFIG_SPACE_LEN;
4559 for (i = 0; i < (size / 4); i++)
4560 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
4561 kfree(adapter->config_space);
4562 adapter->config_space = NULL;
4563 return;
4564}
4565#endif /* CONFIG_PM */
4566
4171static int 4567static int
4172e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4568e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4173{ 4569{
4174 struct net_device *netdev = pci_get_drvdata(pdev); 4570 struct net_device *netdev = pci_get_drvdata(pdev);
4175 struct e1000_adapter *adapter = netdev_priv(netdev); 4571 struct e1000_adapter *adapter = netdev_priv(netdev);
4176 uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; 4572 uint32_t ctrl, ctrl_ext, rctl, manc, status;
4177 uint32_t wufc = adapter->wol; 4573 uint32_t wufc = adapter->wol;
4574 int retval = 0;
4178 4575
4179 netif_device_detach(netdev); 4576 netif_device_detach(netdev);
4180 4577
4181 if(netif_running(netdev)) 4578 if (netif_running(netdev))
4182 e1000_down(adapter); 4579 e1000_down(adapter);
4183 4580
4581#ifdef CONFIG_PM
4582 /* implement our own version of pci_save_state(pdev) because pci
4583 * express adapters have larger 256 byte config spaces */
4584 retval = e1000_pci_save_state(adapter);
4585 if (retval)
4586 return retval;
4587#endif
4588
4184 status = E1000_READ_REG(&adapter->hw, STATUS); 4589 status = E1000_READ_REG(&adapter->hw, STATUS);
4185 if(status & E1000_STATUS_LU) 4590 if (status & E1000_STATUS_LU)
4186 wufc &= ~E1000_WUFC_LNKC; 4591 wufc &= ~E1000_WUFC_LNKC;
4187 4592
4188 if(wufc) { 4593 if (wufc) {
4189 e1000_setup_rctl(adapter); 4594 e1000_setup_rctl(adapter);
4190 e1000_set_multi(netdev); 4595 e1000_set_multi(netdev);
4191 4596
4192 /* turn on all-multi mode if wake on multicast is enabled */ 4597 /* turn on all-multi mode if wake on multicast is enabled */
4193 if(adapter->wol & E1000_WUFC_MC) { 4598 if (adapter->wol & E1000_WUFC_MC) {
4194 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4599 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4195 rctl |= E1000_RCTL_MPE; 4600 rctl |= E1000_RCTL_MPE;
4196 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4601 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4197 } 4602 }
4198 4603
4199 if(adapter->hw.mac_type >= e1000_82540) { 4604 if (adapter->hw.mac_type >= e1000_82540) {
4200 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4605 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4201 /* advertise wake from D3Cold */ 4606 /* advertise wake from D3Cold */
4202 #define E1000_CTRL_ADVD3WUC 0x00100000 4607 #define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4207,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4207 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4612 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4208 } 4613 }
4209 4614
4210 if(adapter->hw.media_type == e1000_media_type_fiber || 4615 if (adapter->hw.media_type == e1000_media_type_fiber ||
4211 adapter->hw.media_type == e1000_media_type_internal_serdes) { 4616 adapter->hw.media_type == e1000_media_type_internal_serdes) {
4212 /* keep the laser running in D3 */ 4617 /* keep the laser running in D3 */
4213 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4618 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4220,96 +4625,96 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4220 4625
4221 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 4626 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
4222 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 4627 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
4223 pci_enable_wake(pdev, 3, 1); 4628 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4224 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4629 if (retval)
4630 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4631 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4632 if (retval)
4633 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4225 } else { 4634 } else {
4226 E1000_WRITE_REG(&adapter->hw, WUC, 0); 4635 E1000_WRITE_REG(&adapter->hw, WUC, 0);
4227 E1000_WRITE_REG(&adapter->hw, WUFC, 0); 4636 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
4228 pci_enable_wake(pdev, 3, 0); 4637 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4229 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ 4638 if (retval)
4639 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4640 retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
4641 if (retval)
4642 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4230 } 4643 }
4231 4644
4232 pci_save_state(pdev); 4645 if (adapter->hw.mac_type >= e1000_82540 &&
4233
4234 if(adapter->hw.mac_type >= e1000_82540 &&
4235 adapter->hw.media_type == e1000_media_type_copper) { 4646 adapter->hw.media_type == e1000_media_type_copper) {
4236 manc = E1000_READ_REG(&adapter->hw, MANC); 4647 manc = E1000_READ_REG(&adapter->hw, MANC);
4237 if(manc & E1000_MANC_SMBUS_EN) { 4648 if (manc & E1000_MANC_SMBUS_EN) {
4238 manc |= E1000_MANC_ARP_EN; 4649 manc |= E1000_MANC_ARP_EN;
4239 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4650 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4240 pci_enable_wake(pdev, 3, 1); 4651 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4241 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4652 if (retval)
4653 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4654 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4655 if (retval)
4656 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4242 } 4657 }
4243 } 4658 }
4244 4659
4245 switch(adapter->hw.mac_type) { 4660 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4246 case e1000_82571: 4661 * would have already happened in close and is redundant. */
4247 case e1000_82572: 4662 e1000_release_hw_control(adapter);
4248 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4249 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4250 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4251 break;
4252 case e1000_82573:
4253 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4254 E1000_WRITE_REG(&adapter->hw, SWSM,
4255 swsm & ~E1000_SWSM_DRV_LOAD);
4256 break;
4257 default:
4258 break;
4259 }
4260 4663
4261 pci_disable_device(pdev); 4664 pci_disable_device(pdev);
4262 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4665
4666 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
4667 if (retval)
4668 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4263 4669
4264 return 0; 4670 return 0;
4265} 4671}
4266 4672
4673#ifdef CONFIG_PM
4267static int 4674static int
4268e1000_resume(struct pci_dev *pdev) 4675e1000_resume(struct pci_dev *pdev)
4269{ 4676{
4270 struct net_device *netdev = pci_get_drvdata(pdev); 4677 struct net_device *netdev = pci_get_drvdata(pdev);
4271 struct e1000_adapter *adapter = netdev_priv(netdev); 4678 struct e1000_adapter *adapter = netdev_priv(netdev);
4272 uint32_t manc, ret_val, swsm; 4679 int retval;
4273 uint32_t ctrl_ext; 4680 uint32_t manc, ret_val;
4274 4681
4275 pci_set_power_state(pdev, PCI_D0); 4682 retval = pci_set_power_state(pdev, PCI_D0);
4276 pci_restore_state(pdev); 4683 if (retval)
4684 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4685 e1000_pci_restore_state(adapter);
4277 ret_val = pci_enable_device(pdev); 4686 ret_val = pci_enable_device(pdev);
4278 pci_set_master(pdev); 4687 pci_set_master(pdev);
4279 4688
4280 pci_enable_wake(pdev, PCI_D3hot, 0); 4689 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4281 pci_enable_wake(pdev, PCI_D3cold, 0); 4690 if (retval)
4691 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4692 retval = pci_enable_wake(pdev, PCI_D3cold, 0);
4693 if (retval)
4694 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4282 4695
4283 e1000_reset(adapter); 4696 e1000_reset(adapter);
4284 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4697 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4285 4698
4286 if(netif_running(netdev)) 4699 if (netif_running(netdev))
4287 e1000_up(adapter); 4700 e1000_up(adapter);
4288 4701
4289 netif_device_attach(netdev); 4702 netif_device_attach(netdev);
4290 4703
4291 if(adapter->hw.mac_type >= e1000_82540 && 4704 if (adapter->hw.mac_type >= e1000_82540 &&
4292 adapter->hw.media_type == e1000_media_type_copper) { 4705 adapter->hw.media_type == e1000_media_type_copper) {
4293 manc = E1000_READ_REG(&adapter->hw, MANC); 4706 manc = E1000_READ_REG(&adapter->hw, MANC);
4294 manc &= ~(E1000_MANC_ARP_EN); 4707 manc &= ~(E1000_MANC_ARP_EN);
4295 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4708 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4296 } 4709 }
4297 4710
4298 switch(adapter->hw.mac_type) { 4711 /* If the controller is 82573 and f/w is AMT, do not set
4299 case e1000_82571: 4712 * DRV_LOAD until the interface is up. For all other cases,
4300 case e1000_82572: 4713 * let the f/w know that the h/w is now under the control
4301 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4714 * of the driver. */
4302 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 4715 if (adapter->hw.mac_type != e1000_82573 ||
4303 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 4716 !e1000_check_mng_mode(&adapter->hw))
4304 break; 4717 e1000_get_hw_control(adapter);
4305 case e1000_82573:
4306 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4307 E1000_WRITE_REG(&adapter->hw, SWSM,
4308 swsm | E1000_SWSM_DRV_LOAD);
4309 break;
4310 default:
4311 break;
4312 }
4313 4718
4314 return 0; 4719 return 0;
4315} 4720}
@@ -4327,6 +4732,9 @@ e1000_netpoll(struct net_device *netdev)
4327 disable_irq(adapter->pdev->irq); 4732 disable_irq(adapter->pdev->irq);
4328 e1000_intr(adapter->pdev->irq, netdev, NULL); 4733 e1000_intr(adapter->pdev->irq, netdev, NULL);
4329 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4734 e1000_clean_tx_irq(adapter, adapter->tx_ring);
4735#ifndef CONFIG_E1000_NAPI
4736 adapter->clean_rx(adapter, adapter->rx_ring);
4737#endif
4330 enable_irq(adapter->pdev->irq); 4738 enable_irq(adapter->pdev->irq);
4331} 4739}
4332#endif 4740#endif
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index aac64de61437..9790db974dc1 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -47,7 +47,7 @@
47 BUG(); \ 47 BUG(); \
48 } else { \ 48 } else { \
49 msleep(x); \ 49 msleep(x); \
50 } } while(0) 50 } } while (0)
51 51
52/* Some workarounds require millisecond delays and are run during interrupt 52/* Some workarounds require millisecond delays and are run during interrupt
53 * context. Most notably, when establishing link, the phy may need tweaking 53 * context. Most notably, when establishing link, the phy may need tweaking
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index ccbbe5ad8e0f..3768d83cd577 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
177 * 177 *
178 * Valid Range: 100-100000 (0=off, 1=dynamic) 178 * Valid Range: 100-100000 (0=off, 1=dynamic)
179 * 179 *
180 * Default Value: 1 180 * Default Value: 8000
181 */ 181 */
182 182
183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); 183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
@@ -227,7 +227,7 @@ static int __devinit
227e1000_validate_option(int *value, struct e1000_option *opt, 227e1000_validate_option(int *value, struct e1000_option *opt,
228 struct e1000_adapter *adapter) 228 struct e1000_adapter *adapter)
229{ 229{
230 if(*value == OPTION_UNSET) { 230 if (*value == OPTION_UNSET) {
231 *value = opt->def; 231 *value = opt->def;
232 return 0; 232 return 0;
233 } 233 }
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
244 } 244 }
245 break; 245 break;
246 case range_option: 246 case range_option:
247 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 DPRINTK(PROBE, INFO, 248 DPRINTK(PROBE, INFO,
249 "%s set to %i\n", opt->name, *value); 249 "%s set to %i\n", opt->name, *value);
250 return 0; 250 return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
254 int i; 254 int i;
255 struct e1000_opt_list *ent; 255 struct e1000_opt_list *ent;
256 256
257 for(i = 0; i < opt->arg.l.nr; i++) { 257 for (i = 0; i < opt->arg.l.nr; i++) {
258 ent = &opt->arg.l.p[i]; 258 ent = &opt->arg.l.p[i];
259 if(*value == ent->i) { 259 if (*value == ent->i) {
260 if(ent->str[0] != '\0') 260 if (ent->str[0] != '\0')
261 DPRINTK(PROBE, INFO, "%s\n", ent->str); 261 DPRINTK(PROBE, INFO, "%s\n", ent->str);
262 return 0; 262 return 0;
263 } 263 }
@@ -291,7 +291,7 @@ void __devinit
291e1000_check_options(struct e1000_adapter *adapter) 291e1000_check_options(struct e1000_adapter *adapter)
292{ 292{
293 int bd = adapter->bd_number; 293 int bd = adapter->bd_number;
294 if(bd >= E1000_MAX_NIC) { 294 if (bd >= E1000_MAX_NIC) {
295 DPRINTK(PROBE, NOTICE, 295 DPRINTK(PROBE, NOTICE,
296 "Warning: no configuration for board #%i\n", bd); 296 "Warning: no configuration for board #%i\n", bd);
297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,12 +315,12 @@ e1000_check_options(struct e1000_adapter *adapter)
315 if (num_TxDescriptors > bd) { 315 if (num_TxDescriptors > bd) {
316 tx_ring->count = TxDescriptors[bd]; 316 tx_ring->count = TxDescriptors[bd];
317 e1000_validate_option(&tx_ring->count, &opt, adapter); 317 e1000_validate_option(&tx_ring->count, &opt, adapter);
318 E1000_ROUNDUP(tx_ring->count, 318 E1000_ROUNDUP(tx_ring->count,
319 REQ_TX_DESCRIPTOR_MULTIPLE); 319 REQ_TX_DESCRIPTOR_MULTIPLE);
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
322 } 322 }
323 for (i = 0; i < adapter->num_queues; i++) 323 for (i = 0; i < adapter->num_tx_queues; i++)
324 tx_ring[i].count = tx_ring->count; 324 tx_ring[i].count = tx_ring->count;
325 } 325 }
326 { /* Receive Descriptor Count */ 326 { /* Receive Descriptor Count */
@@ -341,12 +341,12 @@ e1000_check_options(struct e1000_adapter *adapter)
341 if (num_RxDescriptors > bd) { 341 if (num_RxDescriptors > bd) {
342 rx_ring->count = RxDescriptors[bd]; 342 rx_ring->count = RxDescriptors[bd];
343 e1000_validate_option(&rx_ring->count, &opt, adapter); 343 e1000_validate_option(&rx_ring->count, &opt, adapter);
344 E1000_ROUNDUP(rx_ring->count, 344 E1000_ROUNDUP(rx_ring->count,
345 REQ_RX_DESCRIPTOR_MULTIPLE); 345 REQ_RX_DESCRIPTOR_MULTIPLE);
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
348 } 348 }
349 for (i = 0; i < adapter->num_queues; i++) 349 for (i = 0; i < adapter->num_rx_queues; i++)
350 rx_ring[i].count = rx_ring->count; 350 rx_ring[i].count = rx_ring->count;
351 } 351 }
352 { /* Checksum Offload Enable/Disable */ 352 { /* Checksum Offload Enable/Disable */
@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
388 e1000_validate_option(&fc, &opt, adapter); 388 e1000_validate_option(&fc, &opt, adapter);
389 adapter->hw.fc = adapter->hw.original_fc = fc; 389 adapter->hw.fc = adapter->hw.original_fc = fc;
390 } else { 390 } else {
391 adapter->hw.fc = opt.def; 391 adapter->hw.fc = adapter->hw.original_fc = opt.def;
392 } 392 }
393 } 393 }
394 { /* Transmit Interrupt Delay */ 394 { /* Transmit Interrupt Delay */
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
403 403
404 if (num_TxIntDelay > bd) { 404 if (num_TxIntDelay > bd) {
405 adapter->tx_int_delay = TxIntDelay[bd]; 405 adapter->tx_int_delay = TxIntDelay[bd];
406 e1000_validate_option(&adapter->tx_int_delay, &opt, 406 e1000_validate_option(&adapter->tx_int_delay, &opt,
407 adapter); 407 adapter);
408 } else { 408 } else {
409 adapter->tx_int_delay = opt.def; 409 adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
421 421
422 if (num_TxAbsIntDelay > bd) { 422 if (num_TxAbsIntDelay > bd) {
423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
425 adapter); 425 adapter);
426 } else { 426 } else {
427 adapter->tx_abs_int_delay = opt.def; 427 adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
439 439
440 if (num_RxIntDelay > bd) { 440 if (num_RxIntDelay > bd) {
441 adapter->rx_int_delay = RxIntDelay[bd]; 441 adapter->rx_int_delay = RxIntDelay[bd];
442 e1000_validate_option(&adapter->rx_int_delay, &opt, 442 e1000_validate_option(&adapter->rx_int_delay, &opt,
443 adapter); 443 adapter);
444 } else { 444 } else {
445 adapter->rx_int_delay = opt.def; 445 adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
457 457
458 if (num_RxAbsIntDelay > bd) { 458 if (num_RxAbsIntDelay > bd) {
459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
461 adapter); 461 adapter);
462 } else { 462 } else {
463 adapter->rx_abs_int_delay = opt.def; 463 adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
475 475
476 if (num_InterruptThrottleRate > bd) { 476 if (num_InterruptThrottleRate > bd) {
477 adapter->itr = InterruptThrottleRate[bd]; 477 adapter->itr = InterruptThrottleRate[bd];
478 switch(adapter->itr) { 478 switch (adapter->itr) {
479 case 0: 479 case 0:
480 DPRINTK(PROBE, INFO, "%s turned off\n", 480 DPRINTK(PROBE, INFO, "%s turned off\n",
481 opt.name); 481 opt.name);
482 break; 482 break;
483 case 1: 483 case 1:
484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
485 opt.name); 485 opt.name);
486 break; 486 break;
487 default: 487 default:
488 e1000_validate_option(&adapter->itr, &opt, 488 e1000_validate_option(&adapter->itr, &opt,
489 adapter); 489 adapter);
490 break; 490 break;
491 } 491 }
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
494 } 494 }
495 } 495 }
496 496
497 switch(adapter->hw.media_type) { 497 switch (adapter->hw.media_type) {
498 case e1000_media_type_fiber: 498 case e1000_media_type_fiber:
499 case e1000_media_type_internal_serdes: 499 case e1000_media_type_internal_serdes:
500 e1000_check_fiber_options(adapter); 500 e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
518e1000_check_fiber_options(struct e1000_adapter *adapter) 518e1000_check_fiber_options(struct e1000_adapter *adapter)
519{ 519{
520 int bd = adapter->bd_number; 520 int bd = adapter->bd_number;
521 if(num_Speed > bd) { 521 if (num_Speed > bd) {
522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
523 "parameter ignored\n"); 523 "parameter ignored\n");
524 } 524 }
525 525
526 if(num_Duplex > bd) { 526 if (num_Duplex > bd) {
527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
528 "parameter ignored\n"); 528 "parameter ignored\n");
529 } 529 }
530 530
531 if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 531 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
533 "not valid for fiber adapters, " 533 "not valid for fiber adapters, "
534 "parameter ignored\n"); 534 "parameter ignored\n");
@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
584 .p = dplx_list }} 584 .p = dplx_list }}
585 }; 585 };
586 586
587 if (e1000_check_phy_reset_block(&adapter->hw)) {
588 DPRINTK(PROBE, INFO,
589 "Link active due to SoL/IDER Session. "
590 "Speed/Duplex/AutoNeg parameter ignored.\n");
591 return;
592 }
587 if (num_Duplex > bd) { 593 if (num_Duplex > bd) {
588 dplx = Duplex[bd]; 594 dplx = Duplex[bd];
589 e1000_validate_option(&dplx, &opt, adapter); 595 e1000_validate_option(&dplx, &opt, adapter);
@@ -592,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
592 } 598 }
593 } 599 }
594 600
595 if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 601 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
596 DPRINTK(PROBE, INFO, 602 DPRINTK(PROBE, INFO,
597 "AutoNeg specified along with Speed or Duplex, " 603 "AutoNeg specified along with Speed or Duplex, "
598 "parameter ignored\n"); 604 "parameter ignored\n");
@@ -653,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
653 switch (speed + dplx) { 659 switch (speed + dplx) {
654 case 0: 660 case 0:
655 adapter->hw.autoneg = adapter->fc_autoneg = 1; 661 adapter->hw.autoneg = adapter->fc_autoneg = 1;
656 if((num_Speed > bd) && (speed != 0 || dplx != 0)) 662 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
657 DPRINTK(PROBE, INFO, 663 DPRINTK(PROBE, INFO,
658 "Speed and duplex autonegotiation enabled\n"); 664 "Speed and duplex autonegotiation enabled\n");
659 break; 665 break;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 22c3a37bba5a..40ae36b20c9d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
35#include <linux/tcp.h> 35#include <linux/tcp.h>
36#include <linux/udp.h> 36#include <linux/udp.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <linux/in.h>
39#include <linux/ip.h>
38 40
39#include <linux/bitops.h> 41#include <linux/bitops.h>
40#include <linux/delay.h> 42#include <linux/delay.h>
@@ -55,13 +57,15 @@
55/* Constants */ 57/* Constants */
56#define VLAN_HLEN 4 58#define VLAN_HLEN 4
57#define FCS_LEN 4 59#define FCS_LEN 4
58#define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN 60#define DMA_ALIGN 8 /* hw requires 8-byte alignment */
61#define HW_IP_ALIGN 2 /* hw aligns IP header */
62#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
59#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) 63#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
60 64
61#define INT_CAUSE_UNMASK_ALL 0x0007ffff 65#define INT_UNMASK_ALL 0x0007ffff
62#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff 66#define INT_UNMASK_ALL_EXT 0x0011ffff
63#define INT_CAUSE_MASK_ALL 0x00000000 67#define INT_MASK_ALL 0x00000000
64#define INT_CAUSE_MASK_ALL_EXT 0x00000000 68#define INT_MASK_ALL_EXT 0x00000000
65#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL 69#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
66#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT 70#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
67 71
@@ -78,8 +82,9 @@
78static int eth_port_link_is_up(unsigned int eth_port_num); 82static int eth_port_link_is_up(unsigned int eth_port_num);
79static void eth_port_uc_addr_get(struct net_device *dev, 83static void eth_port_uc_addr_get(struct net_device *dev,
80 unsigned char *MacAddr); 84 unsigned char *MacAddr);
81static int mv643xx_eth_real_open(struct net_device *); 85static void eth_port_set_multicast_list(struct net_device *);
82static int mv643xx_eth_real_stop(struct net_device *); 86static int mv643xx_eth_open(struct net_device *);
87static int mv643xx_eth_stop(struct net_device *);
83static int mv643xx_eth_change_mtu(struct net_device *, int); 88static int mv643xx_eth_change_mtu(struct net_device *, int);
84static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); 89static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
85static void eth_port_init_mac_tables(unsigned int eth_port_num); 90static void eth_port_init_mac_tables(unsigned int eth_port_num);
@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
124 */ 129 */
125static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 130static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
126{ 131{
127 struct mv643xx_private *mp = netdev_priv(dev); 132 if ((new_mtu > 9500) || (new_mtu < 64))
128 unsigned long flags;
129
130 spin_lock_irqsave(&mp->lock, flags);
131
132 if ((new_mtu > 9500) || (new_mtu < 64)) {
133 spin_unlock_irqrestore(&mp->lock, flags);
134 return -EINVAL; 133 return -EINVAL;
135 }
136 134
137 dev->mtu = new_mtu; 135 dev->mtu = new_mtu;
138 /* 136 /*
@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
142 * to memory is full, which might fail the open function. 140 * to memory is full, which might fail the open function.
143 */ 141 */
144 if (netif_running(dev)) { 142 if (netif_running(dev)) {
145 if (mv643xx_eth_real_stop(dev)) 143 mv643xx_eth_stop(dev);
146 printk(KERN_ERR 144 if (mv643xx_eth_open(dev))
147 "%s: Fatal error on stopping device\n",
148 dev->name);
149 if (mv643xx_eth_real_open(dev))
150 printk(KERN_ERR 145 printk(KERN_ERR
151 "%s: Fatal error on opening device\n", 146 "%s: Fatal error on opening device\n",
152 dev->name); 147 dev->name);
153 } 148 }
154 149
155 spin_unlock_irqrestore(&mp->lock, flags);
156 return 0; 150 return 0;
157} 151}
158 152
@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data)
170 struct mv643xx_private *mp = netdev_priv(dev); 164 struct mv643xx_private *mp = netdev_priv(dev);
171 struct pkt_info pkt_info; 165 struct pkt_info pkt_info;
172 struct sk_buff *skb; 166 struct sk_buff *skb;
167 int unaligned;
173 168
174 if (test_and_set_bit(0, &mp->rx_task_busy)) 169 if (test_and_set_bit(0, &mp->rx_task_busy))
175 panic("%s: Error in test_set_bit / clear_bit", dev->name); 170 panic("%s: Error in test_set_bit / clear_bit", dev->name);
176 171
177 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { 172 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
178 skb = dev_alloc_skb(RX_SKB_SIZE); 173 skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
179 if (!skb) 174 if (!skb)
180 break; 175 break;
181 mp->rx_ring_skbs++; 176 mp->rx_ring_skbs++;
177 unaligned = (u32)skb->data & (DMA_ALIGN - 1);
178 if (unaligned)
179 skb_reserve(skb, DMA_ALIGN - unaligned);
182 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; 180 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
183 pkt_info.byte_cnt = RX_SKB_SIZE; 181 pkt_info.byte_cnt = RX_SKB_SIZE;
184 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, 182 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data)
189 "%s: Error allocating RX Ring\n", dev->name); 187 "%s: Error allocating RX Ring\n", dev->name);
190 break; 188 break;
191 } 189 }
192 skb_reserve(skb, 2); 190 skb_reserve(skb, HW_IP_ALIGN);
193 } 191 }
194 clear_bit(0, &mp->rx_task_busy); 192 clear_bit(0, &mp->rx_task_busy);
195 /* 193 /*
@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data)
207 else { 205 else {
208 /* Return interrupts */ 206 /* Return interrupts */
209 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), 207 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
210 INT_CAUSE_UNMASK_ALL); 208 INT_UNMASK_ALL);
211 } 209 }
212#endif 210#endif
213} 211}
@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
267 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 265 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
268 266
269 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); 267 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
268
269 eth_port_set_multicast_list(dev);
270} 270}
271 271
272/* 272/*
@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
342 if (!(eth_int_cause_ext & (BIT0 | BIT8))) 342 if (!(eth_int_cause_ext & (BIT0 | BIT8)))
343 return released; 343 return released;
344 344
345 spin_lock(&mp->lock);
346
347 /* Check only queue 0 */ 345 /* Check only queue 0 */
348 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 346 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
349 if (pkt_info.cmd_sts & BIT0) { 347 if (pkt_info.cmd_sts & BIT0) {
@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
351 stats->tx_errors++; 349 stats->tx_errors++;
352 } 350 }
353 351
354 /* 352 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
355 * If return_info is different than 0, release the skb. 353 dma_unmap_single(NULL, pkt_info.buf_ptr,
356 * The case where return_info is not 0 is only in case 354 pkt_info.byte_cnt,
357 * when transmitted a scatter/gather packet, where only 355 DMA_TO_DEVICE);
358 * last skb releases the whole chain. 356 else
359 */ 357 dma_unmap_page(NULL, pkt_info.buf_ptr,
360 if (pkt_info.return_info) { 358 pkt_info.byte_cnt,
361 if (skb_shinfo(pkt_info.return_info)->nr_frags) 359 DMA_TO_DEVICE);
362 dma_unmap_page(NULL, pkt_info.buf_ptr,
363 pkt_info.byte_cnt,
364 DMA_TO_DEVICE);
365 else
366 dma_unmap_single(NULL, pkt_info.buf_ptr,
367 pkt_info.byte_cnt,
368 DMA_TO_DEVICE);
369 360
361 if (pkt_info.return_info) {
370 dev_kfree_skb_irq(pkt_info.return_info); 362 dev_kfree_skb_irq(pkt_info.return_info);
371 released = 0; 363 released = 0;
372 } else 364 }
373 dma_unmap_page(NULL, pkt_info.buf_ptr,
374 pkt_info.byte_cnt, DMA_TO_DEVICE);
375 } 365 }
376 366
377 spin_unlock(&mp->lock);
378
379 return released; 367 return released;
380} 368}
381 369
@@ -482,12 +470,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
482 470
483 /* Read interrupt cause registers */ 471 /* Read interrupt cause registers */
484 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 472 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
485 INT_CAUSE_UNMASK_ALL; 473 INT_UNMASK_ALL;
486 474
487 if (eth_int_cause & BIT1) 475 if (eth_int_cause & BIT1)
488 eth_int_cause_ext = mv_read( 476 eth_int_cause_ext = mv_read(
489 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 477 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
490 INT_CAUSE_UNMASK_ALL_EXT; 478 INT_UNMASK_ALL_EXT;
491 479
492#ifdef MV643XX_NAPI 480#ifdef MV643XX_NAPI
493 if (!(eth_int_cause & 0x0007fffd)) { 481 if (!(eth_int_cause & 0x0007fffd)) {
@@ -512,9 +500,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
512 } else { 500 } else {
513 if (netif_rx_schedule_prep(dev)) { 501 if (netif_rx_schedule_prep(dev)) {
514 /* Mask all the interrupts */ 502 /* Mask all the interrupts */
515 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 503 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
516 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG 504 INT_MASK_ALL);
517 (port_num), 0); 505 /* wait for previous write to complete */
506 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
518 __netif_rx_schedule(dev); 507 __netif_rx_schedule(dev);
519 } 508 }
520#else 509#else
@@ -527,9 +516,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
527 * with skb's. 516 * with skb's.
528 */ 517 */
529#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK 518#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
530 /* Unmask all interrupts on ethernet port */ 519 /* Mask all interrupts on ethernet port */
531 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 520 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
532 INT_CAUSE_MASK_ALL); 521 INT_MASK_ALL);
522 /* wait for previous write to take effect */
523 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
524
533 queue_task(&mp->rx_task, &tq_immediate); 525 queue_task(&mp->rx_task, &tq_immediate);
534 mark_bh(IMMEDIATE_BH); 526 mark_bh(IMMEDIATE_BH);
535#else 527#else
@@ -636,56 +628,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
636} 628}
637 629
638/* 630/*
639 * mv643xx_eth_open
640 *
641 * This function is called when openning the network device. The function
642 * should initialize all the hardware, initialize cyclic Rx/Tx
643 * descriptors chain and buffers and allocate an IRQ to the network
644 * device.
645 *
646 * Input : a pointer to the network device structure
647 *
648 * Output : zero of success , nonzero if fails.
649 */
650
651static int mv643xx_eth_open(struct net_device *dev)
652{
653 struct mv643xx_private *mp = netdev_priv(dev);
654 unsigned int port_num = mp->port_num;
655 int err;
656
657 spin_lock_irq(&mp->lock);
658
659 err = request_irq(dev->irq, mv643xx_eth_int_handler,
660 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
661
662 if (err) {
663 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
664 port_num);
665 err = -EAGAIN;
666 goto out;
667 }
668
669 if (mv643xx_eth_real_open(dev)) {
670 printk("%s: Error opening interface\n", dev->name);
671 err = -EBUSY;
672 goto out_free;
673 }
674
675 spin_unlock_irq(&mp->lock);
676
677 return 0;
678
679out_free:
680 free_irq(dev->irq, dev);
681
682out:
683 spin_unlock_irq(&mp->lock);
684
685 return err;
686}
687
688/*
689 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. 631 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
690 * 632 *
691 * DESCRIPTION: 633 * DESCRIPTION:
@@ -777,28 +719,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
777 mp->port_tx_queue_command |= 1; 719 mp->port_tx_queue_command |= 1;
778} 720}
779 721
780/* Helper function for mv643xx_eth_open */ 722/*
781static int mv643xx_eth_real_open(struct net_device *dev) 723 * mv643xx_eth_open
724 *
725 * This function is called when openning the network device. The function
726 * should initialize all the hardware, initialize cyclic Rx/Tx
727 * descriptors chain and buffers and allocate an IRQ to the network
728 * device.
729 *
730 * Input : a pointer to the network device structure
731 *
732 * Output : zero of success , nonzero if fails.
733 */
734
735static int mv643xx_eth_open(struct net_device *dev)
782{ 736{
783 struct mv643xx_private *mp = netdev_priv(dev); 737 struct mv643xx_private *mp = netdev_priv(dev);
784 unsigned int port_num = mp->port_num; 738 unsigned int port_num = mp->port_num;
785 unsigned int size; 739 unsigned int size;
740 int err;
741
742 err = request_irq(dev->irq, mv643xx_eth_int_handler,
743 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
744 if (err) {
745 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
746 port_num);
747 return -EAGAIN;
748 }
786 749
787 /* Stop RX Queues */ 750 /* Stop RX Queues */
788 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 751 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
789 752
790 /* Clear the ethernet port interrupts */
791 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
792 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
793
794 /* Unmask RX buffer and TX end interrupt */
795 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
796 INT_CAUSE_UNMASK_ALL);
797
798 /* Unmask phy and link status changes interrupts */
799 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
800 INT_CAUSE_UNMASK_ALL_EXT);
801
802 /* Set the MAC Address */ 753 /* Set the MAC Address */
803 memcpy(mp->port_mac_addr, dev->dev_addr, 6); 754 memcpy(mp->port_mac_addr, dev->dev_addr, 6);
804 755
@@ -818,14 +769,15 @@ static int mv643xx_eth_real_open(struct net_device *dev)
818 GFP_KERNEL); 769 GFP_KERNEL);
819 if (!mp->rx_skb) { 770 if (!mp->rx_skb) {
820 printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); 771 printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
821 return -ENOMEM; 772 err = -ENOMEM;
773 goto out_free_irq;
822 } 774 }
823 mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, 775 mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
824 GFP_KERNEL); 776 GFP_KERNEL);
825 if (!mp->tx_skb) { 777 if (!mp->tx_skb) {
826 printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name); 778 printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
827 kfree(mp->rx_skb); 779 err = -ENOMEM;
828 return -ENOMEM; 780 goto out_free_rx_skb;
829 } 781 }
830 782
831 /* Allocate TX ring */ 783 /* Allocate TX ring */
@@ -845,9 +797,8 @@ static int mv643xx_eth_real_open(struct net_device *dev)
845 if (!mp->p_tx_desc_area) { 797 if (!mp->p_tx_desc_area) {
846 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", 798 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
847 dev->name, size); 799 dev->name, size);
848 kfree(mp->rx_skb); 800 err = -ENOMEM;
849 kfree(mp->tx_skb); 801 goto out_free_tx_skb;
850 return -ENOMEM;
851 } 802 }
852 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */ 803 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
853 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size); 804 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
@@ -874,13 +825,12 @@ static int mv643xx_eth_real_open(struct net_device *dev)
874 printk(KERN_ERR "%s: Freeing previously allocated TX queues...", 825 printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
875 dev->name); 826 dev->name);
876 if (mp->rx_sram_size) 827 if (mp->rx_sram_size)
877 iounmap(mp->p_rx_desc_area); 828 iounmap(mp->p_tx_desc_area);
878 else 829 else
879 dma_free_coherent(NULL, mp->tx_desc_area_size, 830 dma_free_coherent(NULL, mp->tx_desc_area_size,
880 mp->p_tx_desc_area, mp->tx_desc_dma); 831 mp->p_tx_desc_area, mp->tx_desc_dma);
881 kfree(mp->rx_skb); 832 err = -ENOMEM;
882 kfree(mp->tx_skb); 833 goto out_free_tx_skb;
883 return -ENOMEM;
884 } 834 }
885 memset((void *)mp->p_rx_desc_area, 0, size); 835 memset((void *)mp->p_rx_desc_area, 0, size);
886 836
@@ -900,9 +850,26 @@ static int mv643xx_eth_real_open(struct net_device *dev)
900 mp->tx_int_coal = 850 mp->tx_int_coal =
901 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 851 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
902 852
903 netif_start_queue(dev); 853 /* Clear any pending ethernet port interrupts */
854 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
855 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
856
857 /* Unmask phy and link status changes interrupts */
858 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
859 INT_UNMASK_ALL_EXT);
904 860
861 /* Unmask RX buffer and TX end interrupt */
862 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
905 return 0; 863 return 0;
864
865out_free_tx_skb:
866 kfree(mp->tx_skb);
867out_free_rx_skb:
868 kfree(mp->rx_skb);
869out_free_irq:
870 free_irq(dev->irq, dev);
871
872 return err;
906} 873}
907 874
908static void mv643xx_eth_free_tx_rings(struct net_device *dev) 875static void mv643xx_eth_free_tx_rings(struct net_device *dev)
@@ -910,14 +877,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
910 struct mv643xx_private *mp = netdev_priv(dev); 877 struct mv643xx_private *mp = netdev_priv(dev);
911 unsigned int port_num = mp->port_num; 878 unsigned int port_num = mp->port_num;
912 unsigned int curr; 879 unsigned int curr;
880 struct sk_buff *skb;
913 881
914 /* Stop Tx Queues */ 882 /* Stop Tx Queues */
915 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 883 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
916 884
917 /* Free outstanding skb's on TX rings */ 885 /* Free outstanding skb's on TX rings */
918 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { 886 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
919 if (mp->tx_skb[curr]) { 887 skb = mp->tx_skb[curr];
920 dev_kfree_skb(mp->tx_skb[curr]); 888 if (skb) {
889 mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
890 dev_kfree_skb(skb);
921 mp->tx_ring_skbs--; 891 mp->tx_ring_skbs--;
922 } 892 }
923 } 893 }
@@ -973,44 +943,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
973 * Output : zero if success , nonzero if fails 943 * Output : zero if success , nonzero if fails
974 */ 944 */
975 945
976/* Helper function for mv643xx_eth_stop */ 946static int mv643xx_eth_stop(struct net_device *dev)
977
978static int mv643xx_eth_real_stop(struct net_device *dev)
979{ 947{
980 struct mv643xx_private *mp = netdev_priv(dev); 948 struct mv643xx_private *mp = netdev_priv(dev);
981 unsigned int port_num = mp->port_num; 949 unsigned int port_num = mp->port_num;
982 950
951 /* Mask all interrupts on ethernet port */
952 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
953 /* wait for previous write to complete */
954 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
955
956#ifdef MV643XX_NAPI
957 netif_poll_disable(dev);
958#endif
983 netif_carrier_off(dev); 959 netif_carrier_off(dev);
984 netif_stop_queue(dev); 960 netif_stop_queue(dev);
985 961
986 mv643xx_eth_free_tx_rings(dev);
987 mv643xx_eth_free_rx_rings(dev);
988
989 eth_port_reset(mp->port_num); 962 eth_port_reset(mp->port_num);
990 963
991 /* Disable ethernet port interrupts */ 964 mv643xx_eth_free_tx_rings(dev);
992 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 965 mv643xx_eth_free_rx_rings(dev);
993 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
994
995 /* Mask RX buffer and TX end interrupt */
996 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
997
998 /* Mask phy and link status changes interrupts */
999 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
1000
1001 return 0;
1002}
1003
1004static int mv643xx_eth_stop(struct net_device *dev)
1005{
1006 struct mv643xx_private *mp = netdev_priv(dev);
1007
1008 spin_lock_irq(&mp->lock);
1009 966
1010 mv643xx_eth_real_stop(dev); 967#ifdef MV643XX_NAPI
968 netif_poll_enable(dev);
969#endif
1011 970
1012 free_irq(dev->irq, dev); 971 free_irq(dev->irq, dev);
1013 spin_unlock_irq(&mp->lock);
1014 972
1015 return 0; 973 return 0;
1016} 974}
@@ -1022,20 +980,17 @@ static void mv643xx_tx(struct net_device *dev)
1022 struct pkt_info pkt_info; 980 struct pkt_info pkt_info;
1023 981
1024 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 982 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
1025 if (pkt_info.return_info) { 983 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
1026 if (skb_shinfo(pkt_info.return_info)->nr_frags) 984 dma_unmap_single(NULL, pkt_info.buf_ptr,
1027 dma_unmap_page(NULL, pkt_info.buf_ptr, 985 pkt_info.byte_cnt,
1028 pkt_info.byte_cnt, 986 DMA_TO_DEVICE);
1029 DMA_TO_DEVICE); 987 else
1030 else 988 dma_unmap_page(NULL, pkt_info.buf_ptr,
1031 dma_unmap_single(NULL, pkt_info.buf_ptr, 989 pkt_info.byte_cnt,
1032 pkt_info.byte_cnt, 990 DMA_TO_DEVICE);
1033 DMA_TO_DEVICE);
1034 991
992 if (pkt_info.return_info)
1035 dev_kfree_skb_irq(pkt_info.return_info); 993 dev_kfree_skb_irq(pkt_info.return_info);
1036 } else
1037 dma_unmap_page(NULL, pkt_info.buf_ptr,
1038 pkt_info.byte_cnt, DMA_TO_DEVICE);
1039 } 994 }
1040 995
1041 if (netif_queue_stopped(dev) && 996 if (netif_queue_stopped(dev) &&
@@ -1053,14 +1008,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1053 struct mv643xx_private *mp = netdev_priv(dev); 1008 struct mv643xx_private *mp = netdev_priv(dev);
1054 int done = 1, orig_budget, work_done; 1009 int done = 1, orig_budget, work_done;
1055 unsigned int port_num = mp->port_num; 1010 unsigned int port_num = mp->port_num;
1056 unsigned long flags;
1057 1011
1058#ifdef MV643XX_TX_FAST_REFILL 1012#ifdef MV643XX_TX_FAST_REFILL
1059 if (++mp->tx_clean_threshold > 5) { 1013 if (++mp->tx_clean_threshold > 5) {
1060 spin_lock_irqsave(&mp->lock, flags);
1061 mv643xx_tx(dev); 1014 mv643xx_tx(dev);
1062 mp->tx_clean_threshold = 0; 1015 mp->tx_clean_threshold = 0;
1063 spin_unlock_irqrestore(&mp->lock, flags);
1064 } 1016 }
1065#endif 1017#endif
1066 1018
@@ -1078,21 +1030,36 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1078 } 1030 }
1079 1031
1080 if (done) { 1032 if (done) {
1081 spin_lock_irqsave(&mp->lock, flags); 1033 netif_rx_complete(dev);
1082 __netif_rx_complete(dev);
1083 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1034 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1084 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1035 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1085 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1036 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1086 INT_CAUSE_UNMASK_ALL); 1037 INT_UNMASK_ALL);
1087 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1088 INT_CAUSE_UNMASK_ALL_EXT);
1089 spin_unlock_irqrestore(&mp->lock, flags);
1090 } 1038 }
1091 1039
1092 return done ? 0 : 1; 1040 return done ? 0 : 1;
1093} 1041}
1094#endif 1042#endif
1095 1043
1044/* Hardware can't handle unaligned fragments smaller than 9 bytes.
1045 * This helper function detects that case.
1046 */
1047
1048static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1049{
1050 unsigned int frag;
1051 skb_frag_t *fragp;
1052
1053 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1054 fragp = &skb_shinfo(skb)->frags[frag];
1055 if (fragp->size <= 8 && fragp->page_offset & 0x7)
1056 return 1;
1057
1058 }
1059 return 0;
1060}
1061
1062
1096/* 1063/*
1097 * mv643xx_eth_start_xmit 1064 * mv643xx_eth_start_xmit
1098 * 1065 *
@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1136 return 1; 1103 return 1;
1137 } 1104 }
1138 1105
1106#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1107 if (has_tiny_unaligned_frags(skb)) {
1108 if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
1109 stats->tx_dropped++;
1110 printk(KERN_DEBUG "%s: failed to linearize tiny "
1111 "unaligned fragment\n", dev->name);
1112 return 1;
1113 }
1114 }
1115
1139 spin_lock_irqsave(&mp->lock, flags); 1116 spin_lock_irqsave(&mp->lock, flags);
1140 1117
1141 /* Update packet info data structure -- DMA owned, first last */
1142#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1143 if (!skb_shinfo(skb)->nr_frags) { 1118 if (!skb_shinfo(skb)->nr_frags) {
1144linear:
1145 if (skb->ip_summed != CHECKSUM_HW) { 1119 if (skb->ip_summed != CHECKSUM_HW) {
1146 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 1120 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1147 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1121 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
@@ -1150,7 +1124,6 @@ linear:
1150 5 << ETH_TX_IHL_SHIFT; 1124 5 << ETH_TX_IHL_SHIFT;
1151 pkt_info.l4i_chk = 0; 1125 pkt_info.l4i_chk = 0;
1152 } else { 1126 } else {
1153
1154 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1127 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1155 ETH_TX_FIRST_DESC | 1128 ETH_TX_FIRST_DESC |
1156 ETH_TX_LAST_DESC | 1129 ETH_TX_LAST_DESC |
@@ -1158,14 +1131,16 @@ linear:
1158 ETH_GEN_IP_V_4_CHECKSUM | 1131 ETH_GEN_IP_V_4_CHECKSUM |
1159 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; 1132 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1160 /* CPU already calculated pseudo header checksum. */ 1133 /* CPU already calculated pseudo header checksum. */
1161 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1134 if ((skb->protocol == ETH_P_IP) &&
1135 (skb->nh.iph->protocol == IPPROTO_UDP) ) {
1162 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1136 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1163 pkt_info.l4i_chk = skb->h.uh->check; 1137 pkt_info.l4i_chk = skb->h.uh->check;
1164 } else if (skb->nh.iph->protocol == IPPROTO_TCP) 1138 } else if ((skb->protocol == ETH_P_IP) &&
1139 (skb->nh.iph->protocol == IPPROTO_TCP))
1165 pkt_info.l4i_chk = skb->h.th->check; 1140 pkt_info.l4i_chk = skb->h.th->check;
1166 else { 1141 else {
1167 printk(KERN_ERR 1142 printk(KERN_ERR
1168 "%s: chksum proto != TCP or UDP\n", 1143 "%s: chksum proto != IPv4 TCP or UDP\n",
1169 dev->name); 1144 dev->name);
1170 spin_unlock_irqrestore(&mp->lock, flags); 1145 spin_unlock_irqrestore(&mp->lock, flags);
1171 return 1; 1146 return 1;
@@ -1183,26 +1158,6 @@ linear:
1183 } else { 1158 } else {
1184 unsigned int frag; 1159 unsigned int frag;
1185 1160
1186 /* Since hardware can't handle unaligned fragments smaller
1187 * than 9 bytes, if we find any, we linearize the skb
1188 * and start again. When I've seen it, it's always been
1189 * the first frag (probably near the end of the page),
1190 * but we check all frags to be safe.
1191 */
1192 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1193 skb_frag_t *fragp;
1194
1195 fragp = &skb_shinfo(skb)->frags[frag];
1196 if (fragp->size <= 8 && fragp->page_offset & 0x7) {
1197 skb_linearize(skb, GFP_ATOMIC);
1198 printk(KERN_DEBUG "%s: unaligned tiny fragment"
1199 "%d of %d, fixed\n",
1200 dev->name, frag,
1201 skb_shinfo(skb)->nr_frags);
1202 goto linear;
1203 }
1204 }
1205
1206 /* first frag which is skb header */ 1161 /* first frag which is skb header */
1207 pkt_info.byte_cnt = skb_headlen(skb); 1162 pkt_info.byte_cnt = skb_headlen(skb);
1208 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, 1163 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
@@ -1221,14 +1176,16 @@ linear:
1221 ETH_GEN_IP_V_4_CHECKSUM | 1176 ETH_GEN_IP_V_4_CHECKSUM |
1222 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; 1177 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1223 /* CPU already calculated pseudo header checksum. */ 1178 /* CPU already calculated pseudo header checksum. */
1224 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1179 if ((skb->protocol == ETH_P_IP) &&
1180 (skb->nh.iph->protocol == IPPROTO_UDP)) {
1225 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1181 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1226 pkt_info.l4i_chk = skb->h.uh->check; 1182 pkt_info.l4i_chk = skb->h.uh->check;
1227 } else if (skb->nh.iph->protocol == IPPROTO_TCP) 1183 } else if ((skb->protocol == ETH_P_IP) &&
1184 (skb->nh.iph->protocol == IPPROTO_TCP))
1228 pkt_info.l4i_chk = skb->h.th->check; 1185 pkt_info.l4i_chk = skb->h.th->check;
1229 else { 1186 else {
1230 printk(KERN_ERR 1187 printk(KERN_ERR
1231 "%s: chksum proto != TCP or UDP\n", 1188 "%s: chksum proto != IPv4 TCP or UDP\n",
1232 dev->name); 1189 dev->name);
1233 spin_unlock_irqrestore(&mp->lock, flags); 1190 spin_unlock_irqrestore(&mp->lock, flags);
1234 return 1; 1191 return 1;
@@ -1288,6 +1245,8 @@ linear:
1288 } 1245 }
1289 } 1246 }
1290#else 1247#else
1248 spin_lock_irqsave(&mp->lock, flags);
1249
1291 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | 1250 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
1292 ETH_TX_LAST_DESC; 1251 ETH_TX_LAST_DESC;
1293 pkt_info.l4i_chk = 0; 1252 pkt_info.l4i_chk = 0;
@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1340} 1299}
1341 1300
1342#ifdef CONFIG_NET_POLL_CONTROLLER 1301#ifdef CONFIG_NET_POLL_CONTROLLER
1343static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
1344{
1345 int port_num = mp->port_num;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(&mp->lock, flags);
1349 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1350 INT_CAUSE_UNMASK_ALL);
1351 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1352 INT_CAUSE_UNMASK_ALL_EXT);
1353 spin_unlock_irqrestore(&mp->lock, flags);
1354}
1355
1356static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
1357{
1358 int port_num = mp->port_num;
1359 unsigned long flags;
1360
1361 spin_lock_irqsave(&mp->lock, flags);
1362 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1363 INT_CAUSE_MASK_ALL);
1364 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1365 INT_CAUSE_MASK_ALL_EXT);
1366 spin_unlock_irqrestore(&mp->lock, flags);
1367}
1368
1369static void mv643xx_netpoll(struct net_device *netdev) 1302static void mv643xx_netpoll(struct net_device *netdev)
1370{ 1303{
1371 struct mv643xx_private *mp = netdev_priv(netdev); 1304 struct mv643xx_private *mp = netdev_priv(netdev);
1305 int port_num = mp->port_num;
1306
1307 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
1308 /* wait for previous write to complete */
1309 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
1372 1310
1373 mv643xx_disable_irq(mp);
1374 mv643xx_eth_int_handler(netdev->irq, netdev, NULL); 1311 mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
1375 mv643xx_enable_irq(mp); 1312
1313 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
1376} 1314}
1377#endif 1315#endif
1378 1316
@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1441 * Zero copy can only work if we use Discovery II memory. Else, we will 1379 * Zero copy can only work if we use Discovery II memory. Else, we will
1442 * have to map the buffers to ISA memory which is only 16 MB 1380 * have to map the buffers to ISA memory which is only 16 MB
1443 */ 1381 */
1444 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM; 1382 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
1445#endif 1383#endif
1446#endif 1384#endif
1447 1385
@@ -2054,6 +1992,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
2054} 1992}
2055 1993
2056/* 1994/*
1995 * The entries in each table are indexed by a hash of a packet's MAC
1996 * address. One bit in each entry determines whether the packet is
1997 * accepted. There are 4 entries (each 8 bits wide) in each register
1998 * of the table. The bits in each entry are defined as follows:
1999 * 0 Accept=1, Drop=0
2000 * 3-1 Queue (ETH_Q0=0)
2001 * 7-4 Reserved = 0;
2002 */
2003static void eth_port_set_filter_table_entry(int table, unsigned char entry)
2004{
2005 unsigned int table_reg;
2006 unsigned int tbl_offset;
2007 unsigned int reg_offset;
2008
2009 tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
2010 reg_offset = entry % 4; /* Entry offset within the register */
2011
2012 /* Set "accepts frame bit" at specified table entry */
2013 table_reg = mv_read(table + tbl_offset);
2014 table_reg |= 0x01 << (8 * reg_offset);
2015 mv_write(table + tbl_offset, table_reg);
2016}
2017
2018/*
2019 * eth_port_mc_addr - Multicast address settings.
2020 *
2021 * The MV device supports multicast using two tables:
2022 * 1) Special Multicast Table for MAC addresses of the form
2023 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
2024 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2025 * Table entries in the DA-Filter table.
2026 * 2) Other Multicast Table for multicast of another type. A CRC-8bit
2027 * is used as an index to the Other Multicast Table entries in the
2028 * DA-Filter table. This function calculates the CRC-8bit value.
2029 * In either case, eth_port_set_filter_table_entry() is then called
2030 * to set to set the actual table entry.
2031 */
2032static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
2033{
2034 unsigned int mac_h;
2035 unsigned int mac_l;
2036 unsigned char crc_result = 0;
2037 int table;
2038 int mac_array[48];
2039 int crc[8];
2040 int i;
2041
2042 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
2043 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
2044 table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2045 (eth_port_num);
2046 eth_port_set_filter_table_entry(table, p_addr[5]);
2047 return;
2048 }
2049
2050 /* Calculate CRC-8 out of the given address */
2051 mac_h = (p_addr[0] << 8) | (p_addr[1]);
2052 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
2053 (p_addr[4] << 8) | (p_addr[5] << 0);
2054
2055 for (i = 0; i < 32; i++)
2056 mac_array[i] = (mac_l >> i) & 0x1;
2057 for (i = 32; i < 48; i++)
2058 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
2059
2060 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
2061 mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
2062 mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
2063 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
2064 mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0];
2065
2066 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
2067 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
2068 mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
2069 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
2070 mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
2071 mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
2072 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0];
2073
2074 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
2075 mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
2076 mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
2077 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
2078 mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^
2079 mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0];
2080
2081 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
2082 mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
2083 mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
2084 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
2085 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^
2086 mac_array[3] ^ mac_array[2] ^ mac_array[1];
2087
2088 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
2089 mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
2090 mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
2091 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
2092 mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^
2093 mac_array[3] ^ mac_array[2];
2094
2095 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
2096 mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
2097 mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
2098 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
2099 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^
2100 mac_array[4] ^ mac_array[3];
2101
2102 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
2103 mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
2104 mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
2105 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
2106 mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^
2107 mac_array[4];
2108
2109 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
2110 mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
2111 mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
2112 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
2113 mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5];
2114
2115 for (i = 0; i < 8; i++)
2116 crc_result = crc_result | (crc[i] << i);
2117
2118 table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
2119 eth_port_set_filter_table_entry(table, crc_result);
2120}
2121
2122/*
2123 * Set the entire multicast list based on dev->mc_list.
2124 */
2125static void eth_port_set_multicast_list(struct net_device *dev)
2126{
2127
2128 struct dev_mc_list *mc_list;
2129 int i;
2130 int table_index;
2131 struct mv643xx_private *mp = netdev_priv(dev);
2132 unsigned int eth_port_num = mp->port_num;
2133
2134 /* If the device is in promiscuous mode or in all multicast mode,
2135 * we will fully populate both multicast tables with accept.
2136 * This is guaranteed to yield a match on all multicast addresses...
2137 */
2138 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
2139 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2140 /* Set all entries in DA filter special multicast
2141 * table (Ex_dFSMT)
2142 * Set for ETH_Q0 for now
2143 * Bits
2144 * 0 Accept=1, Drop=0
2145 * 3-1 Queue ETH_Q0=0
2146 * 7-4 Reserved = 0;
2147 */
2148 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2149
2150 /* Set all entries in DA filter other multicast
2151 * table (Ex_dFOMT)
2152 * Set for ETH_Q0 for now
2153 * Bits
2154 * 0 Accept=1, Drop=0
2155 * 3-1 Queue ETH_Q0=0
2156 * 7-4 Reserved = 0;
2157 */
2158 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2159 }
2160 return;
2161 }
2162
2163 /* We will clear out multicast tables every time we get the list.
2164 * Then add the entire new list...
2165 */
2166 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2167 /* Clear DA filter special multicast table (Ex_dFSMT) */
2168 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2169 (eth_port_num) + table_index, 0);
2170
2171 /* Clear DA filter other multicast table (Ex_dFOMT) */
2172 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2173 (eth_port_num) + table_index, 0);
2174 }
2175
2176 /* Get pointer to net_device multicast list and add each one... */
2177 for (i = 0, mc_list = dev->mc_list;
2178 (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
2179 i++, mc_list = mc_list->next)
2180 if (mc_list->dmi_addrlen == 6)
2181 eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
2182}
2183
2184/*
2057 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables 2185 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2058 * 2186 *
2059 * DESCRIPTION: 2187 * DESCRIPTION:
@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2080 2208
2081 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2209 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2082 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2210 /* Clear DA filter special multicast table (Ex_dFSMT) */
2083 mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2211 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2084 (eth_port_num) + table_index), 0); 2212 (eth_port_num) + table_index, 0);
2085 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2213 /* Clear DA filter other multicast table (Ex_dFOMT) */
2086 mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2214 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2087 (eth_port_num) + table_index), 0); 2215 (eth_port_num) + table_index, 0);
2088 } 2216 }
2089} 2217}
2090 2218
@@ -2489,6 +2617,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2489 struct eth_tx_desc *current_descriptor; 2617 struct eth_tx_desc *current_descriptor;
2490 struct eth_tx_desc *first_descriptor; 2618 struct eth_tx_desc *first_descriptor;
2491 u32 command; 2619 u32 command;
2620 unsigned long flags;
2492 2621
2493 /* Do not process Tx ring in case of Tx ring resource error */ 2622 /* Do not process Tx ring in case of Tx ring resource error */
2494 if (mp->tx_resource_err) 2623 if (mp->tx_resource_err)
@@ -2505,6 +2634,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2505 return ETH_ERROR; 2634 return ETH_ERROR;
2506 } 2635 }
2507 2636
2637 spin_lock_irqsave(&mp->lock, flags);
2638
2508 mp->tx_ring_skbs++; 2639 mp->tx_ring_skbs++;
2509 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2640 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2510 2641
@@ -2554,11 +2685,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2554 mp->tx_resource_err = 1; 2685 mp->tx_resource_err = 1;
2555 mp->tx_curr_desc_q = tx_first_desc; 2686 mp->tx_curr_desc_q = tx_first_desc;
2556 2687
2688 spin_unlock_irqrestore(&mp->lock, flags);
2689
2557 return ETH_QUEUE_LAST_RESOURCE; 2690 return ETH_QUEUE_LAST_RESOURCE;
2558 } 2691 }
2559 2692
2560 mp->tx_curr_desc_q = tx_next_desc; 2693 mp->tx_curr_desc_q = tx_next_desc;
2561 2694
2695 spin_unlock_irqrestore(&mp->lock, flags);
2696
2562 return ETH_OK; 2697 return ETH_OK;
2563} 2698}
2564#else 2699#else
@@ -2569,11 +2704,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2569 int tx_desc_used; 2704 int tx_desc_used;
2570 struct eth_tx_desc *current_descriptor; 2705 struct eth_tx_desc *current_descriptor;
2571 unsigned int command_status; 2706 unsigned int command_status;
2707 unsigned long flags;
2572 2708
2573 /* Do not process Tx ring in case of Tx ring resource error */ 2709 /* Do not process Tx ring in case of Tx ring resource error */
2574 if (mp->tx_resource_err) 2710 if (mp->tx_resource_err)
2575 return ETH_QUEUE_FULL; 2711 return ETH_QUEUE_FULL;
2576 2712
2713 spin_lock_irqsave(&mp->lock, flags);
2714
2577 mp->tx_ring_skbs++; 2715 mp->tx_ring_skbs++;
2578 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2716 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2579 2717
@@ -2604,9 +2742,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2604 /* Check for ring index overlap in the Tx desc ring */ 2742 /* Check for ring index overlap in the Tx desc ring */
2605 if (tx_desc_curr == tx_desc_used) { 2743 if (tx_desc_curr == tx_desc_used) {
2606 mp->tx_resource_err = 1; 2744 mp->tx_resource_err = 1;
2745
2746 spin_unlock_irqrestore(&mp->lock, flags);
2607 return ETH_QUEUE_LAST_RESOURCE; 2747 return ETH_QUEUE_LAST_RESOURCE;
2608 } 2748 }
2609 2749
2750 spin_unlock_irqrestore(&mp->lock, flags);
2610 return ETH_OK; 2751 return ETH_OK;
2611} 2752}
2612#endif 2753#endif
@@ -2629,23 +2770,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2629 * Tx ring 'first' and 'used' indexes are updated. 2770 * Tx ring 'first' and 'used' indexes are updated.
2630 * 2771 *
2631 * RETURN: 2772 * RETURN:
2632 * ETH_ERROR in case the routine can not access Tx desc ring. 2773 * ETH_OK on success
2633 * ETH_RETRY in case there is transmission in process. 2774 * ETH_ERROR otherwise.
2634 * ETH_END_OF_JOB if the routine has nothing to release.
2635 * ETH_OK otherwise.
2636 * 2775 *
2637 */ 2776 */
2638static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, 2777static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2639 struct pkt_info *p_pkt_info) 2778 struct pkt_info *p_pkt_info)
2640{ 2779{
2641 int tx_desc_used; 2780 int tx_desc_used;
2781 int tx_busy_desc;
2782 struct eth_tx_desc *p_tx_desc_used;
2783 unsigned int command_status;
2784 unsigned long flags;
2785 int err = ETH_OK;
2786
2787 spin_lock_irqsave(&mp->lock, flags);
2788
2642#ifdef MV643XX_CHECKSUM_OFFLOAD_TX 2789#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2643 int tx_busy_desc = mp->tx_first_desc_q; 2790 tx_busy_desc = mp->tx_first_desc_q;
2644#else 2791#else
2645 int tx_busy_desc = mp->tx_curr_desc_q; 2792 tx_busy_desc = mp->tx_curr_desc_q;
2646#endif 2793#endif
2647 struct eth_tx_desc *p_tx_desc_used;
2648 unsigned int command_status;
2649 2794
2650 /* Get the Tx Desc ring indexes */ 2795 /* Get the Tx Desc ring indexes */
2651 tx_desc_used = mp->tx_used_desc_q; 2796 tx_desc_used = mp->tx_used_desc_q;
@@ -2653,22 +2798,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2653 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; 2798 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
2654 2799
2655 /* Sanity check */ 2800 /* Sanity check */
2656 if (p_tx_desc_used == NULL) 2801 if (p_tx_desc_used == NULL) {
2657 return ETH_ERROR; 2802 err = ETH_ERROR;
2803 goto out;
2804 }
2658 2805
2659 /* Stop release. About to overlap the current available Tx descriptor */ 2806 /* Stop release. About to overlap the current available Tx descriptor */
2660 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) 2807 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
2661 return ETH_END_OF_JOB; 2808 err = ETH_ERROR;
2809 goto out;
2810 }
2662 2811
2663 command_status = p_tx_desc_used->cmd_sts; 2812 command_status = p_tx_desc_used->cmd_sts;
2664 2813
2665 /* Still transmitting... */ 2814 /* Still transmitting... */
2666 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2815 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2667 return ETH_RETRY; 2816 err = ETH_ERROR;
2817 goto out;
2818 }
2668 2819
2669 /* Pass the packet information to the caller */ 2820 /* Pass the packet information to the caller */
2670 p_pkt_info->cmd_sts = command_status; 2821 p_pkt_info->cmd_sts = command_status;
2671 p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; 2822 p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
2823 p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
2824 p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
2672 mp->tx_skb[tx_desc_used] = NULL; 2825 mp->tx_skb[tx_desc_used] = NULL;
2673 2826
2674 /* Update the next descriptor to release. */ 2827 /* Update the next descriptor to release. */
@@ -2680,7 +2833,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2680 BUG_ON(mp->tx_ring_skbs == 0); 2833 BUG_ON(mp->tx_ring_skbs == 0);
2681 mp->tx_ring_skbs--; 2834 mp->tx_ring_skbs--;
2682 2835
2683 return ETH_OK; 2836out:
2837 spin_unlock_irqrestore(&mp->lock, flags);
2838
2839 return err;
2684} 2840}
2685 2841
2686/* 2842/*
@@ -2712,11 +2868,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2712 int rx_next_curr_desc, rx_curr_desc, rx_used_desc; 2868 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
2713 volatile struct eth_rx_desc *p_rx_desc; 2869 volatile struct eth_rx_desc *p_rx_desc;
2714 unsigned int command_status; 2870 unsigned int command_status;
2871 unsigned long flags;
2715 2872
2716 /* Do not process Rx ring in case of Rx ring resource error */ 2873 /* Do not process Rx ring in case of Rx ring resource error */
2717 if (mp->rx_resource_err) 2874 if (mp->rx_resource_err)
2718 return ETH_QUEUE_FULL; 2875 return ETH_QUEUE_FULL;
2719 2876
2877 spin_lock_irqsave(&mp->lock, flags);
2878
2720 /* Get the Rx Desc ring 'curr and 'used' indexes */ 2879 /* Get the Rx Desc ring 'curr and 'used' indexes */
2721 rx_curr_desc = mp->rx_curr_desc_q; 2880 rx_curr_desc = mp->rx_curr_desc_q;
2722 rx_used_desc = mp->rx_used_desc_q; 2881 rx_used_desc = mp->rx_used_desc_q;
@@ -2728,8 +2887,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2728 rmb(); 2887 rmb();
2729 2888
2730 /* Nothing to receive... */ 2889 /* Nothing to receive... */
2731 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2890 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2891 spin_unlock_irqrestore(&mp->lock, flags);
2732 return ETH_END_OF_JOB; 2892 return ETH_END_OF_JOB;
2893 }
2733 2894
2734 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; 2895 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
2735 p_pkt_info->cmd_sts = command_status; 2896 p_pkt_info->cmd_sts = command_status;
@@ -2749,6 +2910,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2749 if (rx_next_curr_desc == rx_used_desc) 2910 if (rx_next_curr_desc == rx_used_desc)
2750 mp->rx_resource_err = 1; 2911 mp->rx_resource_err = 1;
2751 2912
2913 spin_unlock_irqrestore(&mp->lock, flags);
2914
2752 return ETH_OK; 2915 return ETH_OK;
2753} 2916}
2754 2917
@@ -2777,6 +2940,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2777{ 2940{
2778 int used_rx_desc; /* Where to return Rx resource */ 2941 int used_rx_desc; /* Where to return Rx resource */
2779 volatile struct eth_rx_desc *p_used_rx_desc; 2942 volatile struct eth_rx_desc *p_used_rx_desc;
2943 unsigned long flags;
2944
2945 spin_lock_irqsave(&mp->lock, flags);
2780 2946
2781 /* Get 'used' Rx descriptor */ 2947 /* Get 'used' Rx descriptor */
2782 used_rx_desc = mp->rx_used_desc_q; 2948 used_rx_desc = mp->rx_used_desc_q;
@@ -2800,6 +2966,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2800 /* Any Rx return cancels the Rx resource error status */ 2966 /* Any Rx return cancels the Rx resource error status */
2801 mp->rx_resource_err = 0; 2967 mp->rx_resource_err = 0;
2802 2968
2969 spin_unlock_irqrestore(&mp->lock, flags);
2970
2803 return ETH_OK; 2971 return ETH_OK;
2804} 2972}
2805 2973
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b538e3038058..bf55a4cfb3d2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3243 3243
3244 pci_set_master(pdev); 3244 pci_set_master(pdev);
3245 3245
3246 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) 3246 if (sizeof(dma_addr_t) > sizeof(u32) &&
3247 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3247 using_dac = 1; 3248 using_dac = 1;
3248 else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 3249 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3249 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3250 if (err < 0) {
3250 pci_name(pdev)); 3251 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
3251 goto err_out_free_regions; 3252 "for consistent allocations\n", pci_name(pdev));
3253 goto err_out_free_regions;
3254 }
3255 } else {
3256 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3257 if (err) {
3258 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3259 pci_name(pdev));
3260 goto err_out_free_regions;
3261 }
3252 } 3262 }
3253 3263
3254#ifdef __BIG_ENDIAN 3264#ifdef __BIG_ENDIAN
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index f5d697c0c031..f8b973a04b65 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -57,7 +57,7 @@
57#include "sky2.h" 57#include "sky2.h"
58 58
59#define DRV_NAME "sky2" 59#define DRV_NAME "sky2"
60#define DRV_VERSION "0.11" 60#define DRV_VERSION "0.13"
61#define PFX DRV_NAME " " 61#define PFX DRV_NAME " "
62 62
63/* 63/*
@@ -75,6 +75,7 @@
75#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 75#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
76#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) 76#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
77#define RX_DEF_PENDING RX_MAX_PENDING 77#define RX_DEF_PENDING RX_MAX_PENDING
78#define RX_SKB_ALIGN 8
78 79
79#define TX_RING_SIZE 512 80#define TX_RING_SIZE 512
80#define TX_DEF_PENDING (TX_RING_SIZE - 1) 81#define TX_DEF_PENDING (TX_RING_SIZE - 1)
@@ -91,7 +92,7 @@
91static const u32 default_msg = 92static const u32 default_msg =
92 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 93 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
93 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 94 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
94 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR; 95 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
95 96
96static int debug = -1; /* defaults above */ 97static int debug = -1; /* defaults above */
97module_param(debug, int, 0); 98module_param(debug, int, 0);
@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
624 625
625} 626}
626 627
627static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len) 628/* Assign Ram Buffer allocation.
629 * start and end are in units of 4k bytes
630 * ram registers are in units of 64bit words
631 */
632static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
628{ 633{
629 u32 end; 634 u32 start, end;
630 635
631 start /= 8; 636 start = startk * 4096/8;
632 len /= 8; 637 end = (endk * 4096/8) - 1;
633 end = start + len - 1;
634 638
635 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 639 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
636 sky2_write32(hw, RB_ADDR(q, RB_START), start); 640 sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
639 sky2_write32(hw, RB_ADDR(q, RB_RP), start); 643 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
640 644
641 if (q == Q_R1 || q == Q_R2) { 645 if (q == Q_R1 || q == Q_R2) {
642 u32 rxup, rxlo; 646 u32 space = (endk - startk) * 4096/8;
647 u32 tp = space - space/4;
643 648
644 rxlo = len/2; 649 /* On receive queue's set the thresholds
645 rxup = rxlo + len/4; 650 * give receiver priority when > 3/4 full
651 * send pause when down to 2K
652 */
653 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
654 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
646 655
647 /* Set thresholds on receive queue's */ 656 tp = space - 2048/8;
648 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup); 657 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
649 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo); 658 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
650 } else { 659 } else {
651 /* Enable store & forward on Tx queue's because 660 /* Enable store & forward on Tx queue's because
652 * Tx FIFO is only 1K on Yukon 661 * Tx FIFO is only 1K on Yukon
@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
695 * This is a workaround code taken from SysKonnect sk98lin driver 704 * This is a workaround code taken from SysKonnect sk98lin driver
696 * to deal with chip bug on Yukon EC rev 0 in the wraparound case. 705 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
697 */ 706 */
698static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, 707static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
699 u16 idx, u16 *last, u16 size) 708 u16 idx, u16 *last, u16 size)
700{ 709{
710 wmb();
701 if (is_ec_a1(hw) && idx < *last) { 711 if (is_ec_a1(hw) && idx < *last) {
702 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); 712 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
703 713
@@ -721,6 +731,7 @@ setnew:
721 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); 731 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
722 } 732 }
723 *last = idx; 733 *last = idx;
734 mmiowb();
724} 735}
725 736
726 737
@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
734/* Return high part of DMA address (could be 32 or 64 bit) */ 745/* Return high part of DMA address (could be 32 or 64 bit) */
735static inline u32 high32(dma_addr_t a) 746static inline u32 high32(dma_addr_t a)
736{ 747{
737 return (a >> 16) >> 16; 748 return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
738} 749}
739 750
740/* Build description to hardware about buffer */ 751/* Build description to hardware about buffer */
741static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) 752static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
742{ 753{
743 struct sky2_rx_le *le; 754 struct sky2_rx_le *le;
744 u32 hi = high32(map); 755 u32 hi = high32(map);
@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
878 struct sky2_hw *hw = sky2->hw; 889 struct sky2_hw *hw = sky2->hw;
879 u16 port = sky2->port; 890 u16 port = sky2->port;
880 891
881 spin_lock(&sky2->tx_lock); 892 spin_lock_bh(&sky2->tx_lock);
882 893
883 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); 894 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
884 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); 895 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
885 sky2->vlgrp = grp; 896 sky2->vlgrp = grp;
886 897
887 spin_unlock(&sky2->tx_lock); 898 spin_unlock_bh(&sky2->tx_lock);
888} 899}
889 900
890static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 901static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
893 struct sky2_hw *hw = sky2->hw; 904 struct sky2_hw *hw = sky2->hw;
894 u16 port = sky2->port; 905 u16 port = sky2->port;
895 906
896 spin_lock(&sky2->tx_lock); 907 spin_lock_bh(&sky2->tx_lock);
897 908
898 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 909 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
899 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 910 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
900 if (sky2->vlgrp) 911 if (sky2->vlgrp)
901 sky2->vlgrp->vlan_devices[vid] = NULL; 912 sky2->vlgrp->vlan_devices[vid] = NULL;
902 913
903 spin_unlock(&sky2->tx_lock); 914 spin_unlock_bh(&sky2->tx_lock);
904} 915}
905#endif 916#endif
906 917
907/* 918/*
919 * It appears the hardware has a bug in the FIFO logic that
920 * cause it to hang if the FIFO gets overrun and the receive buffer
921 * is not aligned. ALso alloc_skb() won't align properly if slab
922 * debugging is enabled.
923 */
924static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
925{
926 struct sk_buff *skb;
927
928 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
929 if (likely(skb)) {
930 unsigned long p = (unsigned long) skb->data;
931 skb_reserve(skb,
932 ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
933 }
934
935 return skb;
936}
937
938/*
908 * Allocate and setup receiver buffer pool. 939 * Allocate and setup receiver buffer pool.
909 * In case of 64 bit dma, there are 2X as many list elements 940 * In case of 64 bit dma, there are 2X as many list elements
910 * available as ring entries 941 * available as ring entries
911 * and need to reserve one list element so we don't wrap around. 942 * and need to reserve one list element so we don't wrap around.
912 *
913 * It appears the hardware has a bug in the FIFO logic that
914 * cause it to hang if the FIFO gets overrun and the receive buffer
915 * is not aligned. This means we can't use skb_reserve to align
916 * the IP header.
917 */ 943 */
918static int sky2_rx_start(struct sky2_port *sky2) 944static int sky2_rx_start(struct sky2_port *sky2)
919{ 945{
@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
929 for (i = 0; i < sky2->rx_pending; i++) { 955 for (i = 0; i < sky2->rx_pending; i++) {
930 struct ring_info *re = sky2->rx_ring + i; 956 struct ring_info *re = sky2->rx_ring + i;
931 957
932 re->skb = dev_alloc_skb(sky2->rx_bufsize); 958 re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
933 if (!re->skb) 959 if (!re->skb)
934 goto nomem; 960 goto nomem;
935 961
@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
986 1012
987 sky2_mac_init(hw, port); 1013 sky2_mac_init(hw, port);
988 1014
989 /* Configure RAM buffers */ 1015 /* Determine available ram buffer space (in 4K blocks).
990 if (hw->chip_id == CHIP_ID_YUKON_FE || 1016 * Note: not sure about the FE setting below yet
991 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2)) 1017 */
992 ramsize = 4096; 1018 if (hw->chip_id == CHIP_ID_YUKON_FE)
993 else { 1019 ramsize = 4;
994 u8 e0 = sky2_read8(hw, B2_E_0); 1020 else
995 ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096); 1021 ramsize = sky2_read8(hw, B2_E_0);
996 } 1022
1023 /* Give transmitter one third (rounded up) */
1024 rxspace = ramsize - (ramsize + 2) / 3;
997 1025
998 /* 2/3 for Rx */
999 rxspace = (2 * ramsize) / 3;
1000 sky2_ramset(hw, rxqaddr[port], 0, rxspace); 1026 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1001 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); 1027 sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
1002 1028
1003 /* Make sure SyncQ is disabled */ 1029 /* Make sure SyncQ is disabled */
1004 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), 1030 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
1054} 1080}
1055 1081
1056/* Estimate of number of transmit list elements required */ 1082/* Estimate of number of transmit list elements required */
1057static inline unsigned tx_le_req(const struct sk_buff *skb) 1083static unsigned tx_le_req(const struct sk_buff *skb)
1058{ 1084{
1059 unsigned count; 1085 unsigned count;
1060 1086
@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1090 u16 mss; 1116 u16 mss;
1091 u8 ctrl; 1117 u8 ctrl;
1092 1118
1119 /* No BH disabling for tx_lock here. We are running in BH disabled
1120 * context and TX reclaim runs via poll inside of a software
1121 * interrupt, and no related locks in IRQ processing.
1122 */
1093 if (!spin_trylock(&sky2->tx_lock)) 1123 if (!spin_trylock(&sky2->tx_lock))
1094 return NETDEV_TX_LOCKED; 1124 return NETDEV_TX_LOCKED;
1095 1125
@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1099 */ 1129 */
1100 if (!netif_queue_stopped(dev)) { 1130 if (!netif_queue_stopped(dev)) {
1101 netif_stop_queue(dev); 1131 netif_stop_queue(dev);
1102 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 1132 if (net_ratelimit())
1103 dev->name); 1133 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1134 dev->name);
1104 } 1135 }
1105 spin_unlock(&sky2->tx_lock); 1136 spin_unlock(&sky2->tx_lock);
1106 1137
@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1199 1230
1200 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, 1231 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1201 frag->size, PCI_DMA_TODEVICE); 1232 frag->size, PCI_DMA_TODEVICE);
1202 addr64 = (mapping >> 16) >> 16; 1233 addr64 = high32(mapping);
1203 if (addr64 != sky2->tx_addr64) { 1234 if (addr64 != sky2->tx_addr64) {
1204 le = get_tx_le(sky2); 1235 le = get_tx_le(sky2);
1205 le->tx.addr = cpu_to_le32(addr64); 1236 le->tx.addr = cpu_to_le32(addr64);
@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1229 netif_stop_queue(dev); 1260 netif_stop_queue(dev);
1230 1261
1231out_unlock: 1262out_unlock:
1232 mmiowb();
1233 spin_unlock(&sky2->tx_lock); 1263 spin_unlock(&sky2->tx_lock);
1234 1264
1235 dev->trans_start = jiffies; 1265 dev->trans_start = jiffies;
@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1282 dev_kfree_skb_any(skb); 1312 dev_kfree_skb_any(skb);
1283 } 1313 }
1284 1314
1285 spin_lock(&sky2->tx_lock);
1286 sky2->tx_cons = put; 1315 sky2->tx_cons = put;
1287 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) 1316 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
1288 netif_wake_queue(dev); 1317 netif_wake_queue(dev);
1289 spin_unlock(&sky2->tx_lock);
1290} 1318}
1291 1319
1292/* Cleanup all untransmitted buffers, assume transmitter not running */ 1320/* Cleanup all untransmitted buffers, assume transmitter not running */
1293static void sky2_tx_clean(struct sky2_port *sky2) 1321static void sky2_tx_clean(struct sky2_port *sky2)
1294{ 1322{
1323 spin_lock_bh(&sky2->tx_lock);
1295 sky2_tx_complete(sky2, sky2->tx_prod); 1324 sky2_tx_complete(sky2, sky2->tx_prod);
1325 spin_unlock_bh(&sky2->tx_lock);
1296} 1326}
1297 1327
1298/* Network shutdown */ 1328/* Network shutdown */
@@ -1582,28 +1612,40 @@ out:
1582 local_irq_enable(); 1612 local_irq_enable();
1583} 1613}
1584 1614
1615
1616/* Transmit timeout is only called if we are running, carries is up
1617 * and tx queue is full (stopped).
1618 */
1585static void sky2_tx_timeout(struct net_device *dev) 1619static void sky2_tx_timeout(struct net_device *dev)
1586{ 1620{
1587 struct sky2_port *sky2 = netdev_priv(dev); 1621 struct sky2_port *sky2 = netdev_priv(dev);
1588 struct sky2_hw *hw = sky2->hw; 1622 struct sky2_hw *hw = sky2->hw;
1589 unsigned txq = txqaddr[sky2->port]; 1623 unsigned txq = txqaddr[sky2->port];
1624 u16 ridx;
1625
1626 /* Maybe we just missed an status interrupt */
1627 spin_lock(&sky2->tx_lock);
1628 ridx = sky2_read16(hw,
1629 sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1630 sky2_tx_complete(sky2, ridx);
1631 spin_unlock(&sky2->tx_lock);
1632
1633 if (!netif_queue_stopped(dev)) {
1634 if (net_ratelimit())
1635 pr_info(PFX "transmit interrupt missed? recovered\n");
1636 return;
1637 }
1590 1638
1591 if (netif_msg_timer(sky2)) 1639 if (netif_msg_timer(sky2))
1592 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1640 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1593 1641
1594 netif_stop_queue(dev);
1595
1596 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1642 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1597 sky2_read32(hw, Q_ADDR(txq, Q_CSR));
1598
1599 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1643 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1600 1644
1601 sky2_tx_clean(sky2); 1645 sky2_tx_clean(sky2);
1602 1646
1603 sky2_qset(hw, txq); 1647 sky2_qset(hw, txq);
1604 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); 1648 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1605
1606 netif_wake_queue(dev);
1607} 1649}
1608 1650
1609 1651
@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1713 } else { 1755 } else {
1714 struct sk_buff *nskb; 1756 struct sk_buff *nskb;
1715 1757
1716 nskb = dev_alloc_skb(sky2->rx_bufsize); 1758 nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
1717 if (!nskb) 1759 if (!nskb)
1718 goto resubmit; 1760 goto resubmit;
1719 1761
@@ -1745,7 +1787,7 @@ oversize:
1745error: 1787error:
1746 ++sky2->net_stats.rx_errors; 1788 ++sky2->net_stats.rx_errors;
1747 1789
1748 if (netif_msg_rx_err(sky2)) 1790 if (netif_msg_rx_err(sky2) && net_ratelimit())
1749 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", 1791 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1750 sky2->netdev->name, status, length); 1792 sky2->netdev->name, status, length);
1751 1793
@@ -1766,13 +1808,16 @@ error:
1766 */ 1808 */
1767#define TX_NO_STATUS 0xffff 1809#define TX_NO_STATUS 0xffff
1768 1810
1769static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) 1811static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
1770{ 1812{
1771 if (last != TX_NO_STATUS) { 1813 if (last != TX_NO_STATUS) {
1772 struct net_device *dev = hw->dev[port]; 1814 struct net_device *dev = hw->dev[port];
1773 if (dev && netif_running(dev)) { 1815 if (dev && netif_running(dev)) {
1774 struct sky2_port *sky2 = netdev_priv(dev); 1816 struct sky2_port *sky2 = netdev_priv(dev);
1817
1818 spin_lock(&sky2->tx_lock);
1775 sky2_tx_complete(sky2, last); 1819 sky2_tx_complete(sky2, last);
1820 spin_unlock(&sky2->tx_lock);
1776 } 1821 }
1777 } 1822 }
1778} 1823}
@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1800 struct sk_buff *skb; 1845 struct sk_buff *skb;
1801 u32 status; 1846 u32 status;
1802 u16 length; 1847 u16 length;
1803 u8 op;
1804 1848
1805 le = hw->st_le + hw->st_idx; 1849 le = hw->st_le + hw->st_idx;
1806 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; 1850 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1814 sky2 = netdev_priv(dev); 1858 sky2 = netdev_priv(dev);
1815 status = le32_to_cpu(le->status); 1859 status = le32_to_cpu(le->status);
1816 length = le16_to_cpu(le->length); 1860 length = le16_to_cpu(le->length);
1817 op = le->opcode & ~HW_OWNER;
1818 le->opcode = 0;
1819 1861
1820 switch (op) { 1862 switch (le->opcode & ~HW_OWNER) {
1821 case OP_RXSTAT: 1863 case OP_RXSTAT:
1822 skb = sky2_receive(sky2, length, status); 1864 skb = sky2_receive(sky2, length, status);
1823 if (!skb) 1865 if (!skb)
@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1865 default: 1907 default:
1866 if (net_ratelimit()) 1908 if (net_ratelimit())
1867 printk(KERN_WARNING PFX 1909 printk(KERN_WARNING PFX
1868 "unknown status opcode 0x%x\n", op); 1910 "unknown status opcode 0x%x\n", le->opcode);
1869 break; 1911 break;
1870 } 1912 }
1871 } 1913 }
1872 1914
1873exit_loop: 1915exit_loop:
1874 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 1916 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1875 mmiowb();
1876 1917
1877 sky2_tx_check(hw, 0, tx_done[0]); 1918 sky2_tx_check(hw, 0, tx_done[0]);
1878 sky2_tx_check(hw, 1, tx_done[1]); 1919 sky2_tx_check(hw, 1, tx_done[1]);
@@ -1887,7 +1928,6 @@ exit_loop:
1887 netif_rx_complete(dev0); 1928 netif_rx_complete(dev0);
1888 hw->intr_mask |= Y2_IS_STAT_BMU; 1929 hw->intr_mask |= Y2_IS_STAT_BMU;
1889 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1930 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1890 mmiowb();
1891 return 0; 1931 return 0;
1892 } else { 1932 } else {
1893 *budget -= work_done; 1933 *budget -= work_done;
@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1900{ 1940{
1901 struct net_device *dev = hw->dev[port]; 1941 struct net_device *dev = hw->dev[port];
1902 1942
1903 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", 1943 if (net_ratelimit())
1904 dev->name, status); 1944 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1945 dev->name, status);
1905 1946
1906 if (status & Y2_IS_PAR_RD1) { 1947 if (status & Y2_IS_PAR_RD1) {
1907 printk(KERN_ERR PFX "%s: ram data read parity error\n", 1948 if (net_ratelimit())
1908 dev->name); 1949 printk(KERN_ERR PFX "%s: ram data read parity error\n",
1950 dev->name);
1909 /* Clear IRQ */ 1951 /* Clear IRQ */
1910 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); 1952 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1911 } 1953 }
1912 1954
1913 if (status & Y2_IS_PAR_WR1) { 1955 if (status & Y2_IS_PAR_WR1) {
1914 printk(KERN_ERR PFX "%s: ram data write parity error\n", 1956 if (net_ratelimit())
1915 dev->name); 1957 printk(KERN_ERR PFX "%s: ram data write parity error\n",
1958 dev->name);
1916 1959
1917 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); 1960 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1918 } 1961 }
1919 1962
1920 if (status & Y2_IS_PAR_MAC1) { 1963 if (status & Y2_IS_PAR_MAC1) {
1921 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); 1964 if (net_ratelimit())
1965 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1922 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); 1966 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1923 } 1967 }
1924 1968
1925 if (status & Y2_IS_PAR_RX1) { 1969 if (status & Y2_IS_PAR_RX1) {
1926 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); 1970 if (net_ratelimit())
1971 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1927 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); 1972 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1928 } 1973 }
1929 1974
1930 if (status & Y2_IS_TCP_TXA1) { 1975 if (status & Y2_IS_TCP_TXA1) {
1931 printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name); 1976 if (net_ratelimit())
1977 printk(KERN_ERR PFX "%s: TCP segmentation error\n",
1978 dev->name);
1932 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); 1979 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1933 } 1980 }
1934} 1981}
@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1944 u16 pci_err; 1991 u16 pci_err;
1945 1992
1946 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); 1993 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
1947 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 1994 if (net_ratelimit())
1948 pci_name(hw->pdev), pci_err); 1995 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1996 pci_name(hw->pdev), pci_err);
1949 1997
1950 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1998 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1951 pci_write_config_word(hw->pdev, PCI_STATUS, 1999 pci_write_config_word(hw->pdev, PCI_STATUS,
@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1959 2007
1960 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); 2008 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
1961 2009
1962 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2010 if (net_ratelimit())
1963 pci_name(hw->pdev), pex_err); 2011 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
2012 pci_name(hw->pdev), pex_err);
1964 2013
1965 /* clear the interrupt */ 2014 /* clear the interrupt */
1966 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2015 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
2250 return 0; 2299 return 0;
2251} 2300}
2252 2301
2253static inline u32 sky2_supported_modes(const struct sky2_hw *hw) 2302static u32 sky2_supported_modes(const struct sky2_hw *hw)
2254{ 2303{
2255 u32 modes; 2304 u32 modes;
2256 if (hw->copper) { 2305 if (hw->copper) {
@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2995 return dev; 3044 return dev;
2996} 3045}
2997 3046
2998static inline void sky2_show_addr(struct net_device *dev) 3047static void __devinit sky2_show_addr(struct net_device *dev)
2999{ 3048{
3000 const struct sky2_port *sky2 = netdev_priv(dev); 3049 const struct sky2_port *sky2 = netdev_priv(dev);
3001 3050
@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3038 goto err_out_free_regions; 3087 goto err_out_free_regions;
3039 } 3088 }
3040 3089
3041 if (sizeof(dma_addr_t) > sizeof(u32)) { 3090 if (sizeof(dma_addr_t) > sizeof(u32) &&
3042 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 3091 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3043 if (!err) 3092 using_dac = 1;
3044 using_dac = 1; 3093 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3045 } 3094 if (err < 0) {
3095 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
3096 "for consistent allocations\n", pci_name(pdev));
3097 goto err_out_free_regions;
3098 }
3046 3099
3047 if (!using_dac) { 3100 } else {
3048 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3101 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3049 if (err) { 3102 if (err) {
3050 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3103 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3052 goto err_out_free_regions; 3105 goto err_out_free_regions;
3053 } 3106 }
3054 } 3107 }
3108
3055#ifdef __BIG_ENDIAN 3109#ifdef __BIG_ENDIAN
3056 /* byte swap descriptors in hardware */ 3110 /* byte swap descriptors in hardware */
3057 { 3111 {
@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3064#endif 3118#endif
3065 3119
3066 err = -ENOMEM; 3120 err = -ENOMEM;
3067 hw = kmalloc(sizeof(*hw), GFP_KERNEL); 3121 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3068 if (!hw) { 3122 if (!hw) {
3069 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3123 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3070 pci_name(pdev)); 3124 pci_name(pdev));
3071 goto err_out_free_regions; 3125 goto err_out_free_regions;
3072 } 3126 }
3073 3127
3074 memset(hw, 0, sizeof(*hw));
3075 hw->pdev = pdev; 3128 hw->pdev = pdev;
3076 3129
3077 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3130 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 0d765f1733b5..1f5975a61e1f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/config.h> 24#include <linux/config.h>
25
26#include <linux/compiler.h> 25#include <linux/compiler.h>
27#include <linux/crc32.h> 26#include <linux/crc32.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
@@ -30,6 +29,7 @@
30#include <linux/ethtool.h> 29#include <linux/ethtool.h>
31#include <linux/firmware.h> 30#include <linux/firmware.h>
32#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/in.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/ip.h> 35#include <linux/ip.h>
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/tcp.h> 44#include <linux/tcp.h>
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/vmalloc.h>
46#include <linux/wait.h> 47#include <linux/wait.h>
47#include <linux/workqueue.h> 48#include <linux/workqueue.h>
48#include <asm/bitops.h> 49#include <asm/bitops.h>
@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
108 writel(value, card->regs + reg); 109 writel(value, card->regs + reg);
109} 110}
110 111
111/**
112 * spider_net_write_reg_sync - writes to an SMMIO register of a card
113 * @card: device structure
114 * @reg: register to write to
115 * @value: value to write into the specified SMMIO register
116 *
117 * Unlike spider_net_write_reg, this will also make sure the
118 * data arrives on the card by reading the reg again.
119 */
120static void
121spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
122{
123 value = cpu_to_le32(value);
124 writel(value, card->regs + reg);
125 (void)readl(card->regs + reg);
126}
127
128/**
129 * spider_net_rx_irq_off - switch off rx irq on this spider card
130 * @card: device structure
131 *
132 * switches off rx irq by masking them out in the GHIINTnMSK register
133 */
134static void
135spider_net_rx_irq_off(struct spider_net_card *card)
136{
137 u32 regvalue;
138 unsigned long flags;
139
140 spin_lock_irqsave(&card->intmask_lock, flags);
141 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
142 regvalue &= ~SPIDER_NET_RXINT;
143 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
144 spin_unlock_irqrestore(&card->intmask_lock, flags);
145}
146
147/** spider_net_write_phy - write to phy register 112/** spider_net_write_phy - write to phy register
148 * @netdev: adapter to be written to 113 * @netdev: adapter to be written to
149 * @mii_id: id of MII 114 * @mii_id: id of MII
@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
199} 164}
200 165
201/** 166/**
202 * spider_net_rx_irq_on - switch on rx irq on this spider card 167 * spider_net_rx_irq_off - switch off rx irq on this spider card
203 * @card: device structure
204 *
205 * switches on rx irq by enabling them in the GHIINTnMSK register
206 */
207static void
208spider_net_rx_irq_on(struct spider_net_card *card)
209{
210 u32 regvalue;
211 unsigned long flags;
212
213 spin_lock_irqsave(&card->intmask_lock, flags);
214 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
215 regvalue |= SPIDER_NET_RXINT;
216 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
217 spin_unlock_irqrestore(&card->intmask_lock, flags);
218}
219
220/**
221 * spider_net_tx_irq_off - switch off tx irq on this spider card
222 * @card: device structure 168 * @card: device structure
223 * 169 *
224 * switches off tx irq by masking them out in the GHIINTnMSK register 170 * switches off rx irq by masking them out in the GHIINTnMSK register
225 */ 171 */
226static void 172static void
227spider_net_tx_irq_off(struct spider_net_card *card) 173spider_net_rx_irq_off(struct spider_net_card *card)
228{ 174{
229 u32 regvalue; 175 u32 regvalue;
230 unsigned long flags;
231 176
232 spin_lock_irqsave(&card->intmask_lock, flags); 177 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
233 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 178 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
234 regvalue &= ~SPIDER_NET_TXINT;
235 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
236 spin_unlock_irqrestore(&card->intmask_lock, flags);
237} 179}
238 180
239/** 181/**
240 * spider_net_tx_irq_on - switch on tx irq on this spider card 182 * spider_net_rx_irq_on - switch on rx irq on this spider card
241 * @card: device structure 183 * @card: device structure
242 * 184 *
243 * switches on tx irq by enabling them in the GHIINTnMSK register 185 * switches on rx irq by enabling them in the GHIINTnMSK register
244 */ 186 */
245static void 187static void
246spider_net_tx_irq_on(struct spider_net_card *card) 188spider_net_rx_irq_on(struct spider_net_card *card)
247{ 189{
248 u32 regvalue; 190 u32 regvalue;
249 unsigned long flags;
250 191
251 spin_lock_irqsave(&card->intmask_lock, flags); 192 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
252 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 193 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
253 regvalue |= SPIDER_NET_TXINT;
254 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
255 spin_unlock_irqrestore(&card->intmask_lock, flags);
256} 194}
257 195
258/** 196/**
@@ -326,9 +264,8 @@ static enum spider_net_descr_status
326spider_net_get_descr_status(struct spider_net_descr *descr) 264spider_net_get_descr_status(struct spider_net_descr *descr)
327{ 265{
328 u32 cmd_status; 266 u32 cmd_status;
329 rmb(); 267
330 cmd_status = descr->dmac_cmd_status; 268 cmd_status = descr->dmac_cmd_status;
331 rmb();
332 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; 269 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
333 /* no need to mask out any bits, as cmd_status is 32 bits wide only 270 /* no need to mask out any bits, as cmd_status is 32 bits wide only
334 * (and unsigned) */ 271 * (and unsigned) */
@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
349{ 286{
350 u32 cmd_status; 287 u32 cmd_status;
351 /* read the status */ 288 /* read the status */
352 mb();
353 cmd_status = descr->dmac_cmd_status; 289 cmd_status = descr->dmac_cmd_status;
354 /* clean the upper 4 bits */ 290 /* clean the upper 4 bits */
355 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; 291 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
357 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; 293 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
358 /* and write it back */ 294 /* and write it back */
359 descr->dmac_cmd_status = cmd_status; 295 descr->dmac_cmd_status = cmd_status;
360 wmb();
361} 296}
362 297
363/** 298/**
@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
398{ 333{
399 int i; 334 int i;
400 struct spider_net_descr *descr; 335 struct spider_net_descr *descr;
336 dma_addr_t buf;
401 337
402 spin_lock_init(&card->chain_lock); 338 atomic_set(&card->rx_chain_refill,0);
403 339
404 descr = start_descr; 340 descr = start_descr;
405 memset(descr, 0, sizeof(*descr) * no); 341 memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
408 for (i=0; i<no; i++, descr++) { 344 for (i=0; i<no; i++, descr++) {
409 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 345 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
410 346
411 descr->bus_addr = 347 buf = pci_map_single(card->pdev, descr,
412 pci_map_single(card->pdev, descr, 348 SPIDER_NET_DESCR_SIZE,
413 SPIDER_NET_DESCR_SIZE, 349 PCI_DMA_BIDIRECTIONAL);
414 PCI_DMA_BIDIRECTIONAL);
415 350
416 if (descr->bus_addr == DMA_ERROR_CODE) 351 if (buf == DMA_ERROR_CODE)
417 goto iommu_error; 352 goto iommu_error;
418 353
354 descr->bus_addr = buf;
419 descr->next = descr + 1; 355 descr->next = descr + 1;
420 descr->prev = descr - 1; 356 descr->prev = descr - 1;
421 357
@@ -439,7 +375,8 @@ iommu_error:
439 for (i=0; i < no; i++, descr++) 375 for (i=0; i < no; i++, descr++)
440 if (descr->bus_addr) 376 if (descr->bus_addr)
441 pci_unmap_single(card->pdev, descr->bus_addr, 377 pci_unmap_single(card->pdev, descr->bus_addr,
442 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL); 378 SPIDER_NET_DESCR_SIZE,
379 PCI_DMA_BIDIRECTIONAL);
443 return -ENOMEM; 380 return -ENOMEM;
444} 381}
445 382
@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
459 if (descr->skb) { 396 if (descr->skb) {
460 dev_kfree_skb(descr->skb); 397 dev_kfree_skb(descr->skb);
461 pci_unmap_single(card->pdev, descr->buf_addr, 398 pci_unmap_single(card->pdev, descr->buf_addr,
462 SPIDER_NET_MAX_MTU, 399 SPIDER_NET_MAX_FRAME,
463 PCI_DMA_BIDIRECTIONAL); 400 PCI_DMA_BIDIRECTIONAL);
464 } 401 }
465 descr = descr->next; 402 descr = descr->next;
@@ -480,12 +417,13 @@ static int
480spider_net_prepare_rx_descr(struct spider_net_card *card, 417spider_net_prepare_rx_descr(struct spider_net_card *card,
481 struct spider_net_descr *descr) 418 struct spider_net_descr *descr)
482{ 419{
420 dma_addr_t buf;
483 int error = 0; 421 int error = 0;
484 int offset; 422 int offset;
485 int bufsize; 423 int bufsize;
486 424
487 /* we need to round up the buffer size to a multiple of 128 */ 425 /* we need to round up the buffer size to a multiple of 128 */
488 bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) & 426 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
489 (~(SPIDER_NET_RXBUF_ALIGN - 1)); 427 (~(SPIDER_NET_RXBUF_ALIGN - 1));
490 428
491 /* and we need to have it 128 byte aligned, therefore we allocate a 429 /* and we need to have it 128 byte aligned, therefore we allocate a
@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
493 /* allocate an skb */ 431 /* allocate an skb */
494 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 432 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
495 if (!descr->skb) { 433 if (!descr->skb) {
496 if (net_ratelimit()) 434 if (netif_msg_rx_err(card) && net_ratelimit())
497 if (netif_msg_rx_err(card)) 435 pr_err("Not enough memory to allocate rx buffer\n");
498 pr_err("Not enough memory to allocate "
499 "rx buffer\n");
500 return -ENOMEM; 436 return -ENOMEM;
501 } 437 }
502 descr->buf_size = bufsize; 438 descr->buf_size = bufsize;
@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
510 if (offset) 446 if (offset)
511 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 447 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
512 /* io-mmu-map the skb */ 448 /* io-mmu-map the skb */
513 descr->buf_addr = pci_map_single(card->pdev, descr->skb->data, 449 buf = pci_map_single(card->pdev, descr->skb->data,
514 SPIDER_NET_MAX_MTU, 450 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
515 PCI_DMA_BIDIRECTIONAL); 451 descr->buf_addr = buf;
516 if (descr->buf_addr == DMA_ERROR_CODE) { 452 if (buf == DMA_ERROR_CODE) {
517 dev_kfree_skb_any(descr->skb); 453 dev_kfree_skb_any(descr->skb);
518 if (netif_msg_rx_err(card)) 454 if (netif_msg_rx_err(card) && net_ratelimit())
519 pr_err("Could not iommu-map rx buffer\n"); 455 pr_err("Could not iommu-map rx buffer\n");
520 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 456 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
521 } else { 457 } else {
@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
526} 462}
527 463
528/** 464/**
529 * spider_net_enable_rxctails - sets RX dmac chain tail addresses 465 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
530 * @card: card structure 466 * @card: card structure
531 * 467 *
532 * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the 468 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
533 * chip by writing to the appropriate register. DMA is enabled in 469 * chip by writing to the appropriate register. DMA is enabled in
534 * spider_net_enable_rxdmac. 470 * spider_net_enable_rxdmac.
535 */ 471 */
@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
551static void 487static void
552spider_net_enable_rxdmac(struct spider_net_card *card) 488spider_net_enable_rxdmac(struct spider_net_card *card)
553{ 489{
490 wmb();
554 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 491 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
555 SPIDER_NET_DMA_RX_VALUE); 492 SPIDER_NET_DMA_RX_VALUE);
556} 493}
@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
559 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 496 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
560 * @card: card structure 497 * @card: card structure
561 * 498 *
562 * refills descriptors in all chains (last used chain first): allocates skbs 499 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
563 * and iommu-maps them.
564 */ 500 */
565static void 501static void
566spider_net_refill_rx_chain(struct spider_net_card *card) 502spider_net_refill_rx_chain(struct spider_net_card *card)
567{ 503{
568 struct spider_net_descr_chain *chain; 504 struct spider_net_descr_chain *chain;
569 int count = 0;
570 unsigned long flags;
571 505
572 chain = &card->rx_chain; 506 chain = &card->rx_chain;
573 507
574 spin_lock_irqsave(&card->chain_lock, flags); 508 /* one context doing the refill (and a second context seeing that
575 while (spider_net_get_descr_status(chain->head) == 509 * and omitting it) is ok. If called by NAPI, we'll be called again
576 SPIDER_NET_DESCR_NOT_IN_USE) { 510 * as spider_net_decode_one_descr is called several times. If some
577 if (spider_net_prepare_rx_descr(card, chain->head)) 511 * interrupt calls us, the NAPI is about to clean up anyway. */
578 break; 512 if (atomic_inc_return(&card->rx_chain_refill) == 1)
579 count++; 513 while (spider_net_get_descr_status(chain->head) ==
580 chain->head = chain->head->next; 514 SPIDER_NET_DESCR_NOT_IN_USE) {
581 } 515 if (spider_net_prepare_rx_descr(card, chain->head))
582 spin_unlock_irqrestore(&card->chain_lock, flags); 516 break;
517 chain->head = chain->head->next;
518 }
583 519
584 /* could be optimized, only do that, if we know the DMA processing 520 atomic_dec(&card->rx_chain_refill);
585 * has terminated */
586 if (count)
587 spider_net_enable_rxdmac(card);
588} 521}
589 522
590/** 523/**
@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
613 /* this will allocate the rest of the rx buffers; if not, it's 546 /* this will allocate the rest of the rx buffers; if not, it's
614 * business as usual later on */ 547 * business as usual later on */
615 spider_net_refill_rx_chain(card); 548 spider_net_refill_rx_chain(card);
549 spider_net_enable_rxdmac(card);
616 return 0; 550 return 0;
617 551
618error: 552error:
@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
649 * @card: adapter structure 583 * @card: adapter structure
650 * @brutal: if set, don't care about whether descriptor seems to be in use 584 * @brutal: if set, don't care about whether descriptor seems to be in use
651 * 585 *
652 * releases the tx descriptors that spider has finished with (if non-brutal) 586 * returns 0 if the tx ring is empty, otherwise 1.
653 * or simply release tx descriptors (if brutal) 587 *
588 * spider_net_release_tx_chain releases the tx descriptors that spider has
589 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
590 * If some other context is calling this function, we return 1 so that we're
591 * scheduled again (if we were scheduled) and will not loose initiative.
654 */ 592 */
655static void 593static int
656spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 594spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
657{ 595{
658 struct spider_net_descr_chain *tx_chain = &card->tx_chain; 596 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
659 enum spider_net_descr_status status; 597 enum spider_net_descr_status status;
660 598
661 spider_net_tx_irq_off(card); 599 if (atomic_inc_return(&card->tx_chain_release) != 1) {
600 atomic_dec(&card->tx_chain_release);
601 return 1;
602 }
662 603
663 /* no lock for chain needed, if this is only executed once at a time */
664again:
665 for (;;) { 604 for (;;) {
666 status = spider_net_get_descr_status(tx_chain->tail); 605 status = spider_net_get_descr_status(tx_chain->tail);
667 switch (status) { 606 switch (status) {
668 case SPIDER_NET_DESCR_CARDOWNED: 607 case SPIDER_NET_DESCR_CARDOWNED:
669 if (!brutal) goto out; 608 if (!brutal)
609 goto out;
670 /* fallthrough, if we release the descriptors 610 /* fallthrough, if we release the descriptors
671 * brutally (then we don't care about 611 * brutally (then we don't care about
672 * SPIDER_NET_DESCR_CARDOWNED) */ 612 * SPIDER_NET_DESCR_CARDOWNED) */
@@ -693,25 +633,30 @@ again:
693 tx_chain->tail = tx_chain->tail->next; 633 tx_chain->tail = tx_chain->tail->next;
694 } 634 }
695out: 635out:
636 atomic_dec(&card->tx_chain_release);
637
696 netif_wake_queue(card->netdev); 638 netif_wake_queue(card->netdev);
697 639
698 if (!brutal) { 640 if (status == SPIDER_NET_DESCR_CARDOWNED)
699 /* switch on tx irqs (while we are still in the interrupt 641 return 1;
700 * handler, so we don't get an interrupt), check again 642 return 0;
701 * for done descriptors. This results in fewer interrupts */ 643}
702 spider_net_tx_irq_on(card);
703 status = spider_net_get_descr_status(tx_chain->tail);
704 switch (status) {
705 case SPIDER_NET_DESCR_RESPONSE_ERROR:
706 case SPIDER_NET_DESCR_PROTECTION_ERROR:
707 case SPIDER_NET_DESCR_FORCE_END:
708 case SPIDER_NET_DESCR_COMPLETE:
709 goto again;
710 default:
711 break;
712 }
713 }
714 644
645/**
646 * spider_net_cleanup_tx_ring - cleans up the TX ring
647 * @card: card structure
648 *
649 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
650 * interrupts to cleanup our TX ring) and returns sent packets to the stack
651 * by freeing them
652 */
653static void
654spider_net_cleanup_tx_ring(struct spider_net_card *card)
655{
656 if ( (spider_net_release_tx_chain(card, 0)) &&
657 (card->netdev->flags & IFF_UP) ) {
658 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
659 }
715} 660}
716 661
717/** 662/**
@@ -726,16 +671,22 @@ out:
726static u8 671static u8
727spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) 672spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
728{ 673{
729 /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
730 * ff:ff:ff:ff:ff:ff must result in 0xfd */
731 u32 crc; 674 u32 crc;
732 u8 hash; 675 u8 hash;
676 char addr_for_crc[ETH_ALEN] = { 0, };
677 int i, bit;
733 678
734 crc = crc32_be(~0, addr, netdev->addr_len); 679 for (i = 0; i < ETH_ALEN * 8; i++) {
680 bit = (addr[i / 8] >> (i % 8)) & 1;
681 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
682 }
683
684 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
735 685
736 hash = (crc >> 27); 686 hash = (crc >> 27);
737 hash <<= 3; 687 hash <<= 3;
738 hash |= crc & 7; 688 hash |= crc & 7;
689 hash &= 0xff;
739 690
740 return hash; 691 return hash;
741} 692}
@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
821{ 772{
822 struct spider_net_card *card = netdev_priv(netdev); 773 struct spider_net_card *card = netdev_priv(netdev);
823 774
775 tasklet_kill(&card->rxram_full_tl);
824 netif_poll_disable(netdev); 776 netif_poll_disable(netdev);
825 netif_carrier_off(netdev); 777 netif_carrier_off(netdev);
826 netif_stop_queue(netdev); 778 netif_stop_queue(netdev);
779 del_timer_sync(&card->tx_timer);
827 780
828 /* disable/mask all interrupts */ 781 /* disable/mask all interrupts */
829 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 782 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
872 * @skb: packet to consider 825 * @skb: packet to consider
873 * 826 *
874 * fills out the command and status field of the descriptor structure, 827 * fills out the command and status field of the descriptor structure,
875 * depending on hardware checksum settings. This function assumes a wmb() 828 * depending on hardware checksum settings.
876 * has executed before.
877 */ 829 */
878static void 830static void
879spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, 831spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
880 struct sk_buff *skb) 832 struct sk_buff *skb)
881{ 833{
834 /* make sure the other fields in the descriptor are written */
835 wmb();
836
882 if (skb->ip_summed != CHECKSUM_HW) { 837 if (skb->ip_summed != CHECKSUM_HW) {
883 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 838 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
884 return; 839 return;
@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
887 /* is packet ip? 842 /* is packet ip?
888 * if yes: tcp? udp? */ 843 * if yes: tcp? udp? */
889 if (skb->protocol == htons(ETH_P_IP)) { 844 if (skb->protocol == htons(ETH_P_IP)) {
890 if (skb->nh.iph->protocol == IPPROTO_TCP) { 845 if (skb->nh.iph->protocol == IPPROTO_TCP)
891 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; 846 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
892 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 847 else if (skb->nh.iph->protocol == IPPROTO_UDP)
893 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; 848 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
894 } else { /* the stack should checksum non-tcp and non-udp 849 else /* the stack should checksum non-tcp and non-udp
895 packets on his own: NETIF_F_IP_CSUM */ 850 packets on his own: NETIF_F_IP_CSUM */
896 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 851 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
897 }
898 } 852 }
899} 853}
900 854
@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
914 struct spider_net_descr *descr, 868 struct spider_net_descr *descr,
915 struct sk_buff *skb) 869 struct sk_buff *skb)
916{ 870{
917 descr->buf_addr = pci_map_single(card->pdev, skb->data, 871 dma_addr_t buf;
918 skb->len, PCI_DMA_BIDIRECTIONAL); 872
919 if (descr->buf_addr == DMA_ERROR_CODE) { 873 buf = pci_map_single(card->pdev, skb->data,
920 if (netif_msg_tx_err(card)) 874 skb->len, PCI_DMA_BIDIRECTIONAL);
875 if (buf == DMA_ERROR_CODE) {
876 if (netif_msg_tx_err(card) && net_ratelimit())
921 pr_err("could not iommu-map packet (%p, %i). " 877 pr_err("could not iommu-map packet (%p, %i). "
922 "Dropping packet\n", skb->data, skb->len); 878 "Dropping packet\n", skb->data, skb->len);
923 return -ENOMEM; 879 return -ENOMEM;
924 } 880 }
925 881
882 descr->buf_addr = buf;
926 descr->buf_size = skb->len; 883 descr->buf_size = skb->len;
927 descr->skb = skb; 884 descr->skb = skb;
928 descr->data_status = 0; 885 descr->data_status = 0;
929 886
930 /* make sure the above values are in memory before we change the
931 * status */
932 wmb();
933
934 spider_net_set_txdescr_cmdstat(descr,skb); 887 spider_net_set_txdescr_cmdstat(descr,skb);
935 888
936 return 0; 889 return 0;
@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
972 struct spider_net_descr *descr; 925 struct spider_net_descr *descr;
973 int result; 926 int result;
974 927
975 descr = spider_net_get_next_tx_descr(card); 928 spider_net_release_tx_chain(card, 0);
976 929
977 if (!descr) { 930 descr = spider_net_get_next_tx_descr(card);
978 netif_stop_queue(netdev);
979 931
980 descr = spider_net_get_next_tx_descr(card); 932 if (!descr)
981 if (!descr) 933 goto error;
982 goto error;
983 else
984 netif_start_queue(netdev);
985 }
986 934
987 result = spider_net_prepare_tx_descr(card, descr, skb); 935 result = spider_net_prepare_tx_descr(card, descr, skb);
988 if (result) 936 if (result)
@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
990 938
991 card->tx_chain.head = card->tx_chain.head->next; 939 card->tx_chain.head = card->tx_chain.head->next;
992 940
993 /* make sure the status from spider_net_prepare_tx_descr is in
994 * memory before we check out the previous descriptor */
995 wmb();
996
997 if (spider_net_get_descr_status(descr->prev) != 941 if (spider_net_get_descr_status(descr->prev) !=
998 SPIDER_NET_DESCR_CARDOWNED) 942 SPIDER_NET_DESCR_CARDOWNED) {
999 spider_net_kick_tx_dma(card, descr); 943 /* make sure the current descriptor is in memory. Then
944 * kicking it on again makes sense, if the previous is not
945 * card-owned anymore. Check the previous descriptor twice
946 * to omit an mb() in heavy traffic cases */
947 mb();
948 if (spider_net_get_descr_status(descr->prev) !=
949 SPIDER_NET_DESCR_CARDOWNED)
950 spider_net_kick_tx_dma(card, descr);
951 }
952
953 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
1000 954
1001 return NETDEV_TX_OK; 955 return NETDEV_TX_OK;
1002 956
1003error: 957error:
1004 card->netdev_stats.tx_dropped++; 958 card->netdev_stats.tx_dropped++;
1005 return NETDEV_TX_LOCKED; 959 return NETDEV_TX_BUSY;
1006} 960}
1007 961
1008/** 962/**
@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1027 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 981 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
1028 * @descr: descriptor to process 982 * @descr: descriptor to process
1029 * @card: card structure 983 * @card: card structure
984 * @napi: whether caller is in NAPI context
1030 * 985 *
1031 * returns 1 on success, 0 if no packet was passed to the stack 986 * returns 1 on success, 0 if no packet was passed to the stack
1032 * 987 *
@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1035 */ 990 */
1036static int 991static int
1037spider_net_pass_skb_up(struct spider_net_descr *descr, 992spider_net_pass_skb_up(struct spider_net_descr *descr,
1038 struct spider_net_card *card) 993 struct spider_net_card *card, int napi)
1039{ 994{
1040 struct sk_buff *skb; 995 struct sk_buff *skb;
1041 struct net_device *netdev; 996 struct net_device *netdev;
@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1046 1001
1047 netdev = card->netdev; 1002 netdev = card->netdev;
1048 1003
1049 /* check for errors in the data_error flag */ 1004 /* unmap descriptor */
1050 if ((data_error & SPIDER_NET_DATA_ERROR_MASK) && 1005 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1051 netif_msg_rx_err(card))
1052 pr_err("error in received descriptor found, "
1053 "data_status=x%08x, data_error=x%08x\n",
1054 data_status, data_error);
1055
1056 /* prepare skb, unmap descriptor */
1057 skb = descr->skb;
1058 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
1059 PCI_DMA_BIDIRECTIONAL); 1006 PCI_DMA_BIDIRECTIONAL);
1060 1007
1061 /* the cases we'll throw away the packet immediately */ 1008 /* the cases we'll throw away the packet immediately */
1062 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) 1009 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1010 if (netif_msg_rx_err(card))
1011 pr_err("error in received descriptor found, "
1012 "data_status=x%08x, data_error=x%08x\n",
1013 data_status, data_error);
1063 return 0; 1014 return 0;
1015 }
1064 1016
1017 skb = descr->skb;
1065 skb->dev = netdev; 1018 skb->dev = netdev;
1066 skb_put(skb, descr->valid_size); 1019 skb_put(skb, descr->valid_size);
1067 1020
@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1073 1026
1074 /* checksum offload */ 1027 /* checksum offload */
1075 if (card->options.rx_csum) { 1028 if (card->options.rx_csum) {
1076 if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) && 1029 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
1077 (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) ) 1030 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1031 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1078 skb->ip_summed = CHECKSUM_UNNECESSARY; 1032 skb->ip_summed = CHECKSUM_UNNECESSARY;
1079 else 1033 else
1080 skb->ip_summed = CHECKSUM_NONE; 1034 skb->ip_summed = CHECKSUM_NONE;
1081 } else { 1035 } else
1082 skb->ip_summed = CHECKSUM_NONE; 1036 skb->ip_summed = CHECKSUM_NONE;
1083 }
1084 1037
1085 if (data_status & SPIDER_NET_VLAN_PACKET) { 1038 if (data_status & SPIDER_NET_VLAN_PACKET) {
1086 /* further enhancements: HW-accel VLAN 1039 /* further enhancements: HW-accel VLAN
@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1089 } 1042 }
1090 1043
1091 /* pass skb up to stack */ 1044 /* pass skb up to stack */
1092 netif_receive_skb(skb); 1045 if (napi)
1046 netif_receive_skb(skb);
1047 else
1048 netif_rx_ni(skb);
1093 1049
1094 /* update netdevice statistics */ 1050 /* update netdevice statistics */
1095 card->netdev_stats.rx_packets++; 1051 card->netdev_stats.rx_packets++;
@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1099} 1055}
1100 1056
1101/** 1057/**
1102 * spider_net_decode_descr - processes an rx descriptor 1058 * spider_net_decode_one_descr - processes an rx descriptor
1103 * @card: card structure 1059 * @card: card structure
1060 * @napi: whether caller is in NAPI context
1104 * 1061 *
1105 * returns 1 if a packet has been sent to the stack, otherwise 0 1062 * returns 1 if a packet has been sent to the stack, otherwise 0
1106 * 1063 *
1107 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1064 * processes an rx descriptor by iommu-unmapping the data buffer and passing
1108 * the packet up to the stack 1065 * the packet up to the stack. This function is called in softirq
1066 * context, e.g. either bottom half from interrupt or NAPI polling context
1109 */ 1067 */
1110static int 1068static int
1111spider_net_decode_one_descr(struct spider_net_card *card) 1069spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1112{ 1070{
1113 enum spider_net_descr_status status; 1071 enum spider_net_descr_status status;
1114 struct spider_net_descr *descr; 1072 struct spider_net_descr *descr;
@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1122 1080
1123 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1081 if (status == SPIDER_NET_DESCR_CARDOWNED) {
1124 /* nothing in the descriptor yet */ 1082 /* nothing in the descriptor yet */
1125 return 0; 1083 result=0;
1084 goto out;
1126 } 1085 }
1127 1086
1128 if (status == SPIDER_NET_DESCR_NOT_IN_USE) { 1087 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1129 /* not initialized yet, I bet chain->tail == chain->head 1088 /* not initialized yet, the ring must be empty */
1130 * and the ring is empty */
1131 spider_net_refill_rx_chain(card); 1089 spider_net_refill_rx_chain(card);
1132 return 0; 1090 spider_net_enable_rxdmac(card);
1091 result=0;
1092 goto out;
1133 } 1093 }
1134 1094
1135 /* descriptor definitively used -- move on head */ 1095 /* descriptor definitively used -- move on tail */
1136 chain->tail = descr->next; 1096 chain->tail = descr->next;
1137 1097
1138 result = 0; 1098 result = 0;
@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1143 pr_err("%s: dropping RX descriptor with state %d\n", 1103 pr_err("%s: dropping RX descriptor with state %d\n",
1144 card->netdev->name, status); 1104 card->netdev->name, status);
1145 card->netdev_stats.rx_dropped++; 1105 card->netdev_stats.rx_dropped++;
1106 pci_unmap_single(card->pdev, descr->buf_addr,
1107 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
1108 dev_kfree_skb_irq(descr->skb);
1146 goto refill; 1109 goto refill;
1147 } 1110 }
1148 1111
@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1155 } 1118 }
1156 1119
1157 /* ok, we've got a packet in descr */ 1120 /* ok, we've got a packet in descr */
1158 result = spider_net_pass_skb_up(descr, card); 1121 result = spider_net_pass_skb_up(descr, card, napi);
1159refill: 1122refill:
1160 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1123 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
1161 /* change the descriptor state: */ 1124 /* change the descriptor state: */
1162 spider_net_refill_rx_chain(card); 1125 if (!napi)
1163 1126 spider_net_refill_rx_chain(card);
1127out:
1164 return result; 1128 return result;
1165} 1129}
1166 1130
@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1186 packets_to_do = min(*budget, netdev->quota); 1150 packets_to_do = min(*budget, netdev->quota);
1187 1151
1188 while (packets_to_do) { 1152 while (packets_to_do) {
1189 if (spider_net_decode_one_descr(card)) { 1153 if (spider_net_decode_one_descr(card, 1)) {
1190 packets_done++; 1154 packets_done++;
1191 packets_to_do--; 1155 packets_to_do--;
1192 } else { 1156 } else {
@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1198 1162
1199 netdev->quota -= packets_done; 1163 netdev->quota -= packets_done;
1200 *budget -= packets_done; 1164 *budget -= packets_done;
1165 spider_net_refill_rx_chain(card);
1201 1166
1202 /* if all packets are in the stack, enable interrupts and return 0 */ 1167 /* if all packets are in the stack, enable interrupts and return 0 */
1203 /* if not, return 1 */ 1168 /* if not, return 1 */
@@ -1342,6 +1307,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
1342} 1307}
1343 1308
1344/** 1309/**
1310 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1311 * @card: card structure
1312 *
1313 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1314 * more packets in it and empty its RX RAM. This is called in bottom half
1315 * context
1316 */
1317static void
1318spider_net_handle_rxram_full(struct spider_net_card *card)
1319{
1320 while (spider_net_decode_one_descr(card, 0))
1321 ;
1322 spider_net_enable_rxchtails(card);
1323 spider_net_enable_rxdmac(card);
1324 netif_rx_schedule(card->netdev);
1325}
1326
1327/**
1345 * spider_net_handle_error_irq - handles errors raised by an interrupt 1328 * spider_net_handle_error_irq - handles errors raised by an interrupt
1346 * @card: card structure 1329 * @card: card structure
1347 * @status_reg: interrupt status register 0 (GHIINT0STS) 1330 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1449 switch (i) 1432 switch (i)
1450 { 1433 {
1451 case SPIDER_NET_GTMFLLINT: 1434 case SPIDER_NET_GTMFLLINT:
1452 if (netif_msg_intr(card)) 1435 if (netif_msg_intr(card) && net_ratelimit())
1453 pr_err("Spider TX RAM full\n"); 1436 pr_err("Spider TX RAM full\n");
1454 show_error = 0; 1437 show_error = 0;
1455 break; 1438 break;
1439 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
1440 case SPIDER_NET_GRFCFLLINT: /* fallthrough */
1441 case SPIDER_NET_GRFBFLLINT: /* fallthrough */
1442 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1456 case SPIDER_NET_GRMFLLINT: 1443 case SPIDER_NET_GRMFLLINT:
1457 if (netif_msg_intr(card)) 1444 if (netif_msg_intr(card) && net_ratelimit())
1458 pr_err("Spider RX RAM full, incoming packets " 1445 pr_err("Spider RX RAM full, incoming packets "
1459 "might be discarded !\n"); 1446 "might be discarded!\n");
1460 netif_rx_schedule(card->netdev); 1447 spider_net_rx_irq_off(card);
1461 spider_net_enable_rxchtails(card); 1448 tasklet_schedule(&card->rxram_full_tl);
1462 spider_net_enable_rxdmac(card); 1449 show_error = 0;
1463 break; 1450 break;
1464 1451
1465 /* case SPIDER_NET_GTMSHTINT: problem, print a message */ 1452 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1467 /* allrighty. tx from previous descr ok */ 1454 /* allrighty. tx from previous descr ok */
1468 show_error = 0; 1455 show_error = 0;
1469 break; 1456 break;
1470 /* case SPIDER_NET_GRFDFLLINT: print a message down there */
1471 /* case SPIDER_NET_GRFCFLLINT: print a message down there */
1472 /* case SPIDER_NET_GRFBFLLINT: print a message down there */
1473 /* case SPIDER_NET_GRFAFLLINT: print a message down there */
1474 1457
1475 /* chain end */ 1458 /* chain end */
1476 case SPIDER_NET_GDDDCEINT: /* fallthrough */ 1459 case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1482 "restarting DMAC %c.\n", 1465 "restarting DMAC %c.\n",
1483 'D'+i-SPIDER_NET_GDDDCEINT); 1466 'D'+i-SPIDER_NET_GDDDCEINT);
1484 spider_net_refill_rx_chain(card); 1467 spider_net_refill_rx_chain(card);
1468 spider_net_enable_rxdmac(card);
1485 show_error = 0; 1469 show_error = 0;
1486 break; 1470 break;
1487 1471
@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1492 case SPIDER_NET_GDAINVDINT: 1476 case SPIDER_NET_GDAINVDINT:
1493 /* could happen when rx chain is full */ 1477 /* could happen when rx chain is full */
1494 spider_net_refill_rx_chain(card); 1478 spider_net_refill_rx_chain(card);
1479 spider_net_enable_rxdmac(card);
1495 show_error = 0; 1480 show_error = 0;
1496 break; 1481 break;
1497 1482
@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
1580 if (!status_reg) 1565 if (!status_reg)
1581 return IRQ_NONE; 1566 return IRQ_NONE;
1582 1567
1583 if (status_reg & SPIDER_NET_TXINT)
1584 spider_net_release_tx_chain(card, 0);
1585
1586 if (status_reg & SPIDER_NET_RXINT ) { 1568 if (status_reg & SPIDER_NET_RXINT ) {
1587 spider_net_rx_irq_off(card); 1569 spider_net_rx_irq_off(card);
1588 netif_rx_schedule(netdev); 1570 netif_rx_schedule(netdev);
1589 } 1571 }
1590 1572
1591 /* we do this after rx and tx processing, as we want the tx chain 1573 if (status_reg & SPIDER_NET_ERRINT )
1592 * processed to see, whether we should restart tx dma processing */ 1574 spider_net_handle_error_irq(card, status_reg);
1593 spider_net_handle_error_irq(card, status_reg);
1594 1575
1595 /* clear interrupt sources */ 1576 /* clear interrupt sources */
1596 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1577 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
1831/** 1812/**
1832 * spider_net_download_firmware - loads firmware into the adapter 1813 * spider_net_download_firmware - loads firmware into the adapter
1833 * @card: card structure 1814 * @card: card structure
1834 * @firmware: firmware pointer 1815 * @firmware_ptr: pointer to firmware data
1835 * 1816 *
1836 * spider_net_download_firmware loads the firmware opened by 1817 * spider_net_download_firmware loads the firmware data into the
1837 * spider_net_init_firmware into the adapter. 1818 * adapter. It assumes the length etc. to be allright.
1838 */ 1819 */
1839static void 1820static int
1840spider_net_download_firmware(struct spider_net_card *card, 1821spider_net_download_firmware(struct spider_net_card *card,
1841 const struct firmware *firmware) 1822 u8 *firmware_ptr)
1842{ 1823{
1843 int sequencer, i; 1824 int sequencer, i;
1844 u32 *fw_ptr = (u32 *)firmware->data; 1825 u32 *fw_ptr = (u32 *)firmware_ptr;
1845 1826
1846 /* stop sequencers */ 1827 /* stop sequencers */
1847 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1828 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1848 SPIDER_NET_STOP_SEQ_VALUE); 1829 SPIDER_NET_STOP_SEQ_VALUE);
1849 1830
1850 for (sequencer = 0; sequencer < 6; sequencer++) { 1831 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1832 sequencer++) {
1851 spider_net_write_reg(card, 1833 spider_net_write_reg(card,
1852 SPIDER_NET_GSnPRGADR + sequencer * 8, 0); 1834 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1853 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1835 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1854 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1836 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1855 sequencer * 8, *fw_ptr); 1837 sequencer * 8, *fw_ptr);
1856 fw_ptr++; 1838 fw_ptr++;
1857 } 1839 }
1858 } 1840 }
1859 1841
1842 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1843 return -EIO;
1844
1860 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1845 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1861 SPIDER_NET_RUN_SEQ_VALUE); 1846 SPIDER_NET_RUN_SEQ_VALUE);
1847
1848 return 0;
1862} 1849}
1863 1850
1864/** 1851/**
@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
1890static int 1877static int
1891spider_net_init_firmware(struct spider_net_card *card) 1878spider_net_init_firmware(struct spider_net_card *card)
1892{ 1879{
1893 const struct firmware *firmware; 1880 struct firmware *firmware = NULL;
1894 int err = -EIO; 1881 struct device_node *dn;
1882 u8 *fw_prop = NULL;
1883 int err = -ENOENT;
1884 int fw_size;
1885
1886 if (request_firmware((const struct firmware **)&firmware,
1887 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1888 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1889 netif_msg_probe(card) ) {
1890 pr_err("Incorrect size of spidernet firmware in " \
1891 "filesystem. Looking in host firmware...\n");
1892 goto try_host_fw;
1893 }
1894 err = spider_net_download_firmware(card, firmware->data);
1895 1895
1896 if (request_firmware(&firmware, 1896 release_firmware(firmware);
1897 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) { 1897 if (err)
1898 if (netif_msg_probe(card)) 1898 goto try_host_fw;
1899 pr_err("Couldn't read in sequencer data file %s.\n",
1900 SPIDER_NET_FIRMWARE_NAME);
1901 firmware = NULL;
1902 goto out;
1903 }
1904 1899
1905 if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) { 1900 goto done;
1906 if (netif_msg_probe(card))
1907 pr_err("Invalid size of sequencer data file %s.\n",
1908 SPIDER_NET_FIRMWARE_NAME);
1909 goto out;
1910 } 1901 }
1911 1902
1912 spider_net_download_firmware(card, firmware); 1903try_host_fw:
1904 dn = pci_device_to_OF_node(card->pdev);
1905 if (!dn)
1906 goto out_err;
1913 1907
1914 err = 0; 1908 fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
1915out: 1909 if (!fw_prop)
1916 release_firmware(firmware); 1910 goto out_err;
1911
1912 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1913 netif_msg_probe(card) ) {
1914 pr_err("Incorrect size of spidernet firmware in " \
1915 "host firmware\n");
1916 goto done;
1917 }
1917 1918
1919 err = spider_net_download_firmware(card, fw_prop);
1920
1921done:
1922 return err;
1923out_err:
1924 if (netif_msg_probe(card))
1925 pr_err("Couldn't find spidernet firmware in filesystem " \
1926 "or host firmware\n");
1918 return err; 1927 return err;
1919} 1928}
1920 1929
@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1934 SPIDER_NET_CKRCTRL_RUN_VALUE); 1943 SPIDER_NET_CKRCTRL_RUN_VALUE);
1935 1944
1936 /* empty sequencer data */ 1945 /* empty sequencer data */
1937 for (sequencer = 0; sequencer < 6; sequencer++) { 1946 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1947 sequencer++) {
1938 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1948 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1939 sequencer * 8, 0x0); 1949 sequencer * 8, 0x0);
1940 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1950 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1941 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1951 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1942 sequencer * 8, 0x0); 1952 sequencer * 8, 0x0);
1943 } 1953 }
@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
2061 SET_NETDEV_DEV(netdev, &card->pdev->dev); 2071 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2062 2072
2063 pci_set_drvdata(card->pdev, netdev); 2073 pci_set_drvdata(card->pdev, netdev);
2064 spin_lock_init(&card->intmask_lock); 2074
2075 atomic_set(&card->tx_chain_release,0);
2076 card->rxram_full_tl.data = (unsigned long) card;
2077 card->rxram_full_tl.func =
2078 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2079 init_timer(&card->tx_timer);
2080 card->tx_timer.function =
2081 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
2082 card->tx_timer.data = (unsigned long) card;
2065 netdev->irq = card->pdev->irq; 2083 netdev->irq = card->pdev->irq;
2066 2084
2067 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2085 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 22b2f2347351..5922b529a048 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
33 33
34extern char spider_net_driver_name[]; 34extern char spider_net_driver_name[];
35 35
36#define SPIDER_NET_MAX_MTU 2308 36#define SPIDER_NET_MAX_FRAME 2312
37#define SPIDER_NET_MAX_MTU 2294
37#define SPIDER_NET_MIN_MTU 64 38#define SPIDER_NET_MIN_MTU 64
38 39
39#define SPIDER_NET_RXBUF_ALIGN 128 40#define SPIDER_NET_RXBUF_ALIGN 128
40 41
41#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64 42#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
42#define SPIDER_NET_RX_DESCRIPTORS_MIN 16 43#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
43#define SPIDER_NET_RX_DESCRIPTORS_MAX 256 44#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
44 45
45#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64 46#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
46#define SPIDER_NET_TX_DESCRIPTORS_MIN 16 47#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
47#define SPIDER_NET_TX_DESCRIPTORS_MAX 256 48#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
49
50#define SPIDER_NET_TX_TIMER 20
48 51
49#define SPIDER_NET_RX_CSUM_DEFAULT 1 52#define SPIDER_NET_RX_CSUM_DEFAULT 1
50 53
51#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ 54#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
52#define SPIDER_NET_NAPI_WEIGHT 64 55#define SPIDER_NET_NAPI_WEIGHT 64
53 56
54#define SPIDER_NET_FIRMWARE_LEN 1024 57#define SPIDER_NET_FIRMWARE_SEQS 6
58#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
59#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
60 SPIDER_NET_FIRMWARE_SEQWORDS * \
61 sizeof(u32))
55#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin" 62#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
56 63
57/** spider_net SMMIO registers */ 64/** spider_net SMMIO registers */
@@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
142/** SCONFIG registers */ 149/** SCONFIG registers */
143#define SPIDER_NET_SCONFIG_IOACTE 0x00002810 150#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
144 151
145/** hardcoded register values */ 152/** interrupt mask registers */
146#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff 153#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
147#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff 154#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7
148/* no MAC aborts -> auto retransmission */ 155/* no MAC aborts -> auto retransmission */
149#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1 156#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
150 157
151/* clear counter when interrupt sources are cleared
152#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
153/* we rely on flagged descriptor interrupts */ 158/* we rely on flagged descriptor interrupts */
154#define SPIDER_NET_FRAMENUM_VALUE 0x00000000 159#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
155/* set this first, then the FRAMENUM_VALUE */ 160/* set this first, then the FRAMENUM_VALUE */
@@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
168#if 0 173#if 0
169#define SPIDER_NET_WOL_VALUE 0x00000000 174#define SPIDER_NET_WOL_VALUE 0x00000000
170#endif 175#endif
171#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8 176#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
172 177
173/* pause frames: automatic, no upper retransmission count */ 178/* pause frames: automatic, no upper retransmission count */
174/* outside loopback mode: ETOMOD signal dont matter, not connected */ 179/* outside loopback mode: ETOMOD signal dont matter, not connected */
@@ -318,6 +323,10 @@ enum spider_net_int2_status {
318#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ 323#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
319 (1 << SPIDER_NET_GRMFLLINT) ) 324 (1 << SPIDER_NET_GRMFLLINT) )
320 325
326#define SPIDER_NET_ERRINT ( 0xffffffff & \
327 (~SPIDER_NET_TXINT) & \
328 (~SPIDER_NET_RXINT) )
329
321#define SPIDER_NET_GPREXEC 0x80000000 330#define SPIDER_NET_GPREXEC 0x80000000
322#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 331#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
323 332
@@ -358,9 +367,6 @@ enum spider_net_int2_status {
358/* descr ready, descr is in middle of chain, get interrupt on completion */ 367/* descr ready, descr is in middle of chain, get interrupt on completion */
359#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 368#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
360 369
361/* multicast is no problem */
362#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
363
364enum spider_net_descr_status { 370enum spider_net_descr_status {
365 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ 371 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
366 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ 372 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
@@ -373,9 +379,9 @@ enum spider_net_descr_status {
373 379
374struct spider_net_descr { 380struct spider_net_descr {
375 /* as defined by the hardware */ 381 /* as defined by the hardware */
376 dma_addr_t buf_addr; 382 u32 buf_addr;
377 u32 buf_size; 383 u32 buf_size;
378 dma_addr_t next_descr_addr; 384 u32 next_descr_addr;
379 u32 dmac_cmd_status; 385 u32 dmac_cmd_status;
380 u32 result_size; 386 u32 result_size;
381 u32 valid_size; /* all zeroes for tx */ 387 u32 valid_size; /* all zeroes for tx */
@@ -384,7 +390,7 @@ struct spider_net_descr {
384 390
385 /* used in the driver */ 391 /* used in the driver */
386 struct sk_buff *skb; 392 struct sk_buff *skb;
387 dma_addr_t bus_addr; 393 u32 bus_addr;
388 struct spider_net_descr *next; 394 struct spider_net_descr *next;
389 struct spider_net_descr *prev; 395 struct spider_net_descr *prev;
390} __attribute__((aligned(32))); 396} __attribute__((aligned(32)));
@@ -396,21 +402,21 @@ struct spider_net_descr_chain {
396}; 402};
397 403
398/* descriptor data_status bits */ 404/* descriptor data_status bits */
399#define SPIDER_NET_RXIPCHK 29 405#define SPIDER_NET_RX_IPCHK 29
400#define SPIDER_NET_TCPUDPIPCHK 28 406#define SPIDER_NET_RX_TCPCHK 28
401#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
402 1 << SPIDER_NET_TCPUDPIPCHK)
403
404#define SPIDER_NET_VLAN_PACKET 21 407#define SPIDER_NET_VLAN_PACKET 21
408#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
409 (1 << SPIDER_NET_RX_TCPCHK) )
405 410
406/* descriptor data_error bits */ 411/* descriptor data_error bits */
407#define SPIDER_NET_RXIPCHKERR 27 412#define SPIDER_NET_RX_IPCHKERR 27
408#define SPIDER_NET_RXTCPCHKERR 26 413#define SPIDER_NET_RX_RXTCPCHKERR 28
409#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \ 414
410 1 << SPIDER_NET_RXTCPCHKERR) 415#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
411 416
412/* the cases we don't pass the packet to the stack */ 417/* the cases we don't pass the packet to the stack.
413#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000 418 * 701b8000 would be correct, but every packets gets that flag */
419#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
414 420
415#define SPIDER_NET_DESCR_SIZE 32 421#define SPIDER_NET_DESCR_SIZE 32
416 422
@@ -445,13 +451,16 @@ struct spider_net_card {
445 451
446 struct spider_net_descr_chain tx_chain; 452 struct spider_net_descr_chain tx_chain;
447 struct spider_net_descr_chain rx_chain; 453 struct spider_net_descr_chain rx_chain;
448 spinlock_t chain_lock; 454 atomic_t rx_chain_refill;
455 atomic_t tx_chain_release;
449 456
450 struct net_device_stats netdev_stats; 457 struct net_device_stats netdev_stats;
451 458
452 struct spider_net_options options; 459 struct spider_net_options options;
453 460
454 spinlock_t intmask_lock; 461 spinlock_t intmask_lock;
462 struct tasklet_struct rxram_full_tl;
463 struct timer_list tx_timer;
455 464
456 struct work_struct tx_timeout_task; 465 struct work_struct tx_timeout_task;
457 atomic_t tx_timeout_task_counter; 466 atomic_t tx_timeout_task_counter;
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index d42e60ba74ce..a5bb0b7633af 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
113 return 0; 113 return 0;
114} 114}
115 115
116static uint32_t
117spider_net_ethtool_get_tx_csum(struct net_device *netdev)
118{
119 return (netdev->features & NETIF_F_HW_CSUM) != 0;
120}
121
122static int
123spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
124{
125 if (data)
126 netdev->features |= NETIF_F_HW_CSUM;
127 else
128 netdev->features &= ~NETIF_F_HW_CSUM;
129
130 return 0;
131}
132
116struct ethtool_ops spider_net_ethtool_ops = { 133struct ethtool_ops spider_net_ethtool_ops = {
117 .get_settings = spider_net_ethtool_get_settings, 134 .get_settings = spider_net_ethtool_get_settings,
118 .get_drvinfo = spider_net_ethtool_get_drvinfo, 135 .get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
122 .nway_reset = spider_net_ethtool_nway_reset, 139 .nway_reset = spider_net_ethtool_nway_reset,
123 .get_rx_csum = spider_net_ethtool_get_rx_csum, 140 .get_rx_csum = spider_net_ethtool_get_rx_csum,
124 .set_rx_csum = spider_net_ethtool_set_rx_csum, 141 .set_rx_csum = spider_net_ethtool_set_rx_csum,
142 .get_tx_csum = spider_net_ethtool_get_tx_csum,
143 .set_tx_csum = spider_net_ethtool_set_tx_csum,
125}; 144};
126 145
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ee866fd6957d..a4c7ae94614d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
5668 int channel = fwrq->m; 5668 int channel = fwrq->m;
5669 /* We should do a better check than that, 5669 /* We should do a better check than that,
5670 * based on the card capability !!! */ 5670 * based on the card capability !!! */
5671 if((channel < 1) || (channel > 16)) { 5671 if((channel < 1) || (channel > 14)) {
5672 printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m); 5672 printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
5673 rc = -EINVAL; 5673 rc = -EINVAL;
5674 } else { 5674 } else {
5675 readConfigRid(local, 1); 5675 readConfigRid(local, 1);
5676 /* Yes ! We can set it !!! */ 5676 /* Yes ! We can set it !!! */
5677 local->config.channelSet = (u16)(channel - 1); 5677 local->config.channelSet = (u16) channel;
5678 set_bit (FLAG_COMMIT, &local->flags); 5678 set_bit (FLAG_COMMIT, &local->flags);
5679 } 5679 }
5680 } 5680 }
@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
5692{ 5692{
5693 struct airo_info *local = dev->priv; 5693 struct airo_info *local = dev->priv;
5694 StatusRid status_rid; /* Card status info */ 5694 StatusRid status_rid; /* Card status info */
5695 int ch;
5695 5696
5696 readConfigRid(local, 1); 5697 readConfigRid(local, 1);
5697 if ((local->config.opmode & 0xFF) == MODE_STA_ESS) 5698 if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
5699 else 5700 else
5700 readStatusRid(local, &status_rid, 1); 5701 readStatusRid(local, &status_rid, 1);
5701 5702
5702#ifdef WEXT_USECHANNELS 5703 ch = (int)status_rid.channel;
5703 fwrq->m = ((int)status_rid.channel) + 1; 5704 if((ch > 0) && (ch < 15)) {
5704 fwrq->e = 0; 5705 fwrq->m = frequency_list[ch - 1] * 100000;
5705#else
5706 {
5707 int f = (int)status_rid.channel;
5708 fwrq->m = frequency_list[f] * 100000;
5709 fwrq->e = 1; 5706 fwrq->e = 1;
5707 } else {
5708 fwrq->m = ch;
5709 fwrq->e = 0;
5710 } 5710 }
5711#endif
5712 5711
5713 return 0; 5712 return 0;
5714} 5713}
@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
5783 /* If none, we may want to get the one that was set */ 5782 /* If none, we may want to get the one that was set */
5784 5783
5785 /* Push it out ! */ 5784 /* Push it out ! */
5786 dwrq->length = status_rid.SSIDlen + 1; 5785 dwrq->length = status_rid.SSIDlen;
5787 dwrq->flags = 1; /* active */ 5786 dwrq->flags = 1; /* active */
5788 5787
5789 return 0; 5788 return 0;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index f0ccfef66445..98a76f10a0f7 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
1718 if (priv->new_SSID_size != 0) { 1718 if (priv->new_SSID_size != 0) {
1719 memcpy(extra, priv->new_SSID, priv->new_SSID_size); 1719 memcpy(extra, priv->new_SSID, priv->new_SSID_size);
1720 extra[priv->new_SSID_size] = '\0'; 1720 extra[priv->new_SSID_size] = '\0';
1721 dwrq->length = priv->new_SSID_size + 1; 1721 dwrq->length = priv->new_SSID_size;
1722 } else { 1722 } else {
1723 memcpy(extra, priv->SSID, priv->SSID_size); 1723 memcpy(extra, priv->SSID, priv->SSID_size);
1724 extra[priv->SSID_size] = '\0'; 1724 extra[priv->SSID_size] = '\0';
1725 dwrq->length = priv->SSID_size + 1; 1725 dwrq->length = priv->SSID_size;
1726 } 1726 }
1727 1727
1728 dwrq->flags = !priv->connect_to_any_BSS; /* active */ 1728 dwrq->flags = !priv->connect_to_any_BSS; /* active */
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 56f41c714d38..c8f6286dd35f 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
26 depends on HOSTAP 26 depends on HOSTAP
27 ---help--- 27 ---help---
28 Configure Host AP driver to include support for firmware image 28 Configure Host AP driver to include support for firmware image
29 download. Current version supports only downloading to volatile, i.e., 29 download. This option by itself only enables downloading to the
30 RAM memory. Flash upgrade is not yet supported. 30 volatile memory, i.e. the card RAM. This option is required to
31 support cards that don't have firmware in flash, such as D-Link
32 DWL-520 rev E and D-Link DWL-650 rev P.
31 33
32 Firmware image downloading needs user space tool, prism2_srec. It is 34 Firmware image downloading needs a user space tool, prism2_srec.
33 available from http://hostap.epitest.fi/. 35 It is available from http://hostap.epitest.fi/.
36
37config HOSTAP_FIRMWARE_NVRAM
38 bool "Support for non-volatile firmware download"
39 depends on HOSTAP_FIRMWARE
40 ---help---
41 Allow Host AP driver to write firmware images to the non-volatile
42 card memory, i.e. flash memory that survives power cycling.
43 Enable this option if you want to be able to change card firmware
44 permanently.
45
46 Firmware image downloading needs a user space tool, prism2_srec.
47 It is available from http://hostap.epitest.fi/.
34 48
35config HOSTAP_PLX 49config HOSTAP_PLX
36 tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors" 50 tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
index 353ccb93134b..b8e41a702c00 100644
--- a/drivers/net/wireless/hostap/Makefile
+++ b/drivers/net/wireless/hostap/Makefile
@@ -1,4 +1,5 @@
1hostap-y := hostap_main.o 1hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
2 hostap_ioctl.o hostap_main.o hostap_proc.o
2obj-$(CONFIG_HOSTAP) += hostap.o 3obj-$(CONFIG_HOSTAP) += hostap.o
3 4
4obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o 5obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index 5fac89b8ce3a..5e63765219fe 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -1,6 +1,15 @@
1#ifndef HOSTAP_H 1#ifndef HOSTAP_H
2#define HOSTAP_H 2#define HOSTAP_H
3 3
4#include <linux/ethtool.h>
5
6#include "hostap_wlan.h"
7#include "hostap_ap.h"
8
9static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
10 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
11#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
12
4/* hostap.c */ 13/* hostap.c */
5 14
6extern struct proc_dir_entry *hostap_proc; 15extern struct proc_dir_entry *hostap_proc;
@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
40int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, 49int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
41 u8 *body, size_t bodylen); 50 u8 *body, size_t bodylen);
42int prism2_sta_deauth(local_info_t *local, u16 reason); 51int prism2_sta_deauth(local_info_t *local, u16 reason);
52int prism2_wds_add(local_info_t *local, u8 *remote_addr,
53 int rtnl_locked);
54int prism2_wds_del(local_info_t *local, u8 *remote_addr,
55 int rtnl_locked, int do_not_remove);
56
57
58/* hostap_ap.c */
59
60int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
61int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
62void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
63int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
64void ap_control_kickall(struct ap_data *ap);
65void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
66 struct ieee80211_crypt_data ***crypt);
67int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
68 struct iw_quality qual[], int buf_size,
69 int aplist);
70int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
71int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
43 72
44 73
45/* hostap_proc.c */ 74/* hostap_proc.c */
@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
54void hostap_info_process(local_info_t *local, struct sk_buff *skb); 83void hostap_info_process(local_info_t *local, struct sk_buff *skb);
55 84
56 85
86/* hostap_ioctl.c */
87
88extern const struct iw_handler_def hostap_iw_handler_def;
89extern struct ethtool_ops prism2_ethtool_ops;
90
91int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
92
93
57#endif /* HOSTAP_H */ 94#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index bf506f50d722..1fc72fe511e9 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -1,6 +1,9 @@
1#ifndef HOSTAP_80211_H 1#ifndef HOSTAP_80211_H
2#define HOSTAP_80211_H 2#define HOSTAP_80211_H
3 3
4#include <linux/types.h>
5#include <net/ieee80211_crypt.h>
6
4struct hostap_ieee80211_mgmt { 7struct hostap_ieee80211_mgmt {
5 u16 frame_control; 8 u16 frame_control;
6 u16 duration; 9 u16 duration;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index 4b13b76425c1..7e04dc94b3bc 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,7 +1,18 @@
1#include <linux/etherdevice.h> 1#include <linux/etherdevice.h>
2#include <net/ieee80211_crypt.h>
2 3
3#include "hostap_80211.h" 4#include "hostap_80211.h"
4#include "hostap.h" 5#include "hostap.h"
6#include "hostap_ap.h"
7
8/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
9/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
10static unsigned char rfc1042_header[] =
11{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
12/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
13static unsigned char bridge_tunnel_header[] =
14{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
15/* No encapsulation header if EtherType < 0x600 (=length) */
5 16
6void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, 17void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
7 struct hostap_80211_rx_status *rx_stats) 18 struct hostap_80211_rx_status *rx_stats)
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 9d24f8a38ac5..4a85e63906f1 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,3 +1,18 @@
1#include "hostap_80211.h"
2#include "hostap_common.h"
3#include "hostap_wlan.h"
4#include "hostap.h"
5#include "hostap_ap.h"
6
7/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
8/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
9static unsigned char rfc1042_header[] =
10{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
11/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
12static unsigned char bridge_tunnel_header[] =
13{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
14/* No encapsulation header if EtherType < 0x600 (=length) */
15
1void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) 16void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
2{ 17{
3 struct ieee80211_hdr_4addr *hdr; 18 struct ieee80211_hdr_4addr *hdr;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 9da94ab7f05f..753a1de6664b 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -16,6 +16,14 @@
16 * (8802.11: 5.5) 16 * (8802.11: 5.5)
17 */ 17 */
18 18
19#include <linux/proc_fs.h>
20#include <linux/delay.h>
21#include <linux/random.h>
22
23#include "hostap_wlan.h"
24#include "hostap.h"
25#include "hostap_ap.h"
26
19static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL, 27static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
20 DEF_INTS }; 28 DEF_INTS };
21module_param_array(other_ap_policy, int, NULL, 0444); 29module_param_array(other_ap_policy, int, NULL, 0444);
@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
360} 368}
361 369
362 370
363static int ap_control_add_mac(struct mac_restrictions *mac_restrictions, 371int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
364 u8 *mac)
365{ 372{
366 struct mac_entry *entry; 373 struct mac_entry *entry;
367 374
@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
380} 387}
381 388
382 389
383static int ap_control_del_mac(struct mac_restrictions *mac_restrictions, 390int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
384 u8 *mac)
385{ 391{
386 struct list_head *ptr; 392 struct list_head *ptr;
387 struct mac_entry *entry; 393 struct mac_entry *entry;
@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
433} 439}
434 440
435 441
436static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) 442void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
437{ 443{
438 struct list_head *ptr, *n; 444 struct list_head *ptr, *n;
439 struct mac_entry *entry; 445 struct mac_entry *entry;
@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
454} 460}
455 461
456 462
457static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, 463int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
458 u8 *mac)
459{ 464{
460 struct sta_info *sta; 465 struct sta_info *sta;
461 u16 resp; 466 u16 resp;
@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
486#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 491#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
487 492
488 493
489static void ap_control_kickall(struct ap_data *ap) 494void ap_control_kickall(struct ap_data *ap)
490{ 495{
491 struct list_head *ptr, *n; 496 struct list_head *ptr, *n;
492 struct sta_info *sta; 497 struct sta_info *sta;
@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2321} 2326}
2322 2327
2323 2328
2324static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], 2329int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2325 struct iw_quality qual[], int buf_size, 2330 struct iw_quality qual[], int buf_size,
2326 int aplist) 2331 int aplist)
2327{ 2332{
2328 struct ap_data *ap = local->ap; 2333 struct ap_data *ap = local->ap;
2329 struct list_head *ptr; 2334 struct list_head *ptr;
@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2363 2368
2364/* Translate our list of Access Points & Stations to a card independant 2369/* Translate our list of Access Points & Stations to a card independant
2365 * format that the Wireless Tools will understand - Jean II */ 2370 * format that the Wireless Tools will understand - Jean II */
2366static int prism2_ap_translate_scan(struct net_device *dev, char *buffer) 2371int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
2367{ 2372{
2368 struct hostap_interface *iface; 2373 struct hostap_interface *iface;
2369 local_info_t *local; 2374 local_info_t *local;
@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
2608} 2613}
2609 2614
2610 2615
2611static int prism2_hostapd(struct ap_data *ap, 2616int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
2612 struct prism2_hostapd_param *param)
2613{ 2617{
2614 switch (param->cmd) { 2618 switch (param->cmd) {
2615 case PRISM2_HOSTAPD_FLUSH: 2619 case PRISM2_HOSTAPD_FLUSH:
@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
3207} 3211}
3208 3212
3209 3213
3210static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, 3214void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
3211 struct ieee80211_crypt_data ***crypt) 3215 struct ieee80211_crypt_data ***crypt)
3212{ 3216{
3213 struct sta_info *sta; 3217 struct sta_info *sta;
3214 3218
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 6d00df69c2e3..2fa2452b6b07 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -1,6 +1,8 @@
1#ifndef HOSTAP_AP_H 1#ifndef HOSTAP_AP_H
2#define HOSTAP_AP_H 2#define HOSTAP_AP_H
3 3
4#include "hostap_80211.h"
5
4/* AP data structures for STAs */ 6/* AP data structures for STAs */
5 7
6/* maximum number of frames to buffer per STA */ 8/* maximum number of frames to buffer per STA */
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 6f4fa9dc308f..01624005d808 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -1,6 +1,9 @@
1#ifndef HOSTAP_COMMON_H 1#ifndef HOSTAP_COMMON_H
2#define HOSTAP_COMMON_H 2#define HOSTAP_COMMON_H
3 3
4#include <linux/types.h>
5#include <linux/if_ether.h>
6
4#define BIT(x) (1 << (x)) 7#define BIT(x) (1 << (x))
5 8
6#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] 9#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 7ed3425d08c1..c090a5aebb58 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -21,15 +21,10 @@
21#define PRISM2_DOWNLOAD_SUPPORT 21#define PRISM2_DOWNLOAD_SUPPORT
22#endif 22#endif
23 23
24#ifdef PRISM2_DOWNLOAD_SUPPORT 24/* Allow kernel configuration to enable non-volatile download support. */
25/* Allow writing firmware images into flash, i.e., to non-volatile storage. 25#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
26 * Before you enable this option, you should make absolutely sure that you are 26#define PRISM2_NON_VOLATILE_DOWNLOAD
27 * using prism2_srec utility that comes with THIS version of the driver! 27#endif
28 * In addition, please note that it is possible to kill your card with
29 * non-volatile download if you are using incorrect image. This feature has not
30 * been fully tested, so please be careful with it. */
31/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
32#endif /* PRISM2_DOWNLOAD_SUPPORT */
33 28
34/* Save low-level I/O for debugging. This should not be enabled in normal use. 29/* Save low-level I/O for debugging. This should not be enabled in normal use.
35 */ 30 */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 5aa998fdf1c4..50f72d831cf4 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -1,5 +1,8 @@
1/* Host AP driver Info Frame processing (part of hostap.o module) */ 1/* Host AP driver Info Frame processing (part of hostap.o module) */
2 2
3#include "hostap_wlan.h"
4#include "hostap.h"
5#include "hostap_ap.h"
3 6
4/* Called only as a tasklet (software IRQ) */ 7/* Called only as a tasklet (software IRQ) */
5static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf, 8static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 2617d70bcda9..f3e0ce1ee037 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,11 +1,13 @@
1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ 1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
2 2
3#ifdef in_atomic 3#include <linux/types.h>
4/* Get kernel_locked() for in_atomic() */
5#include <linux/smp_lock.h> 4#include <linux/smp_lock.h>
6#endif
7#include <linux/ethtool.h> 5#include <linux/ethtool.h>
6#include <net/ieee80211_crypt.h>
8 7
8#include "hostap_wlan.h"
9#include "hostap.h"
10#include "hostap_ap.h"
9 11
10static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) 12static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
11{ 13{
@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
3910 local->sta_fw_ver & 0xff); 3912 local->sta_fw_ver & 0xff);
3911} 3913}
3912 3914
3913static struct ethtool_ops prism2_ethtool_ops = { 3915struct ethtool_ops prism2_ethtool_ops = {
3914 .get_drvinfo = prism2_get_drvinfo 3916 .get_drvinfo = prism2_get_drvinfo
3915}; 3917};
3916 3918
@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
3985 (iw_handler) prism2_ioctl_priv_readmif, /* 3 */ 3987 (iw_handler) prism2_ioctl_priv_readmif, /* 3 */
3986}; 3988};
3987 3989
3988static const struct iw_handler_def hostap_iw_handler_def = 3990const struct iw_handler_def hostap_iw_handler_def =
3989{ 3991{
3990 .num_standard = sizeof(prism2_handler) / sizeof(iw_handler), 3992 .num_standard = sizeof(prism2_handler) / sizeof(iw_handler),
3991 .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler), 3993 .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler),
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 3d2ea61033be..8dd4c4446a64 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -24,6 +24,7 @@
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/rtnetlink.h> 25#include <linux/rtnetlink.h>
26#include <linux/wireless.h> 26#include <linux/wireless.h>
27#include <linux/etherdevice.h>
27#include <net/iw_handler.h> 28#include <net/iw_handler.h>
28#include <net/ieee80211.h> 29#include <net/ieee80211.h>
29#include <net/ieee80211_crypt.h> 30#include <net/ieee80211_crypt.h>
@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
47#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */)) 48#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
48 49
49 50
50/* hostap.c */
51static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
52 int rtnl_locked);
53static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
54 int rtnl_locked, int do_not_remove);
55
56/* hostap_ap.c */
57static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
58 struct iw_quality qual[], int buf_size,
59 int aplist);
60static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
61static int prism2_hostapd(struct ap_data *ap,
62 struct prism2_hostapd_param *param);
63static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
64 struct ieee80211_crypt_data ***crypt);
65static void ap_control_kickall(struct ap_data *ap);
66#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
67static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
68 u8 *mac);
69static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
70 u8 *mac);
71static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
72static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
73 u8 *mac);
74#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
75
76
77static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
78 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
79#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
80
81
82/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
83/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
84static unsigned char rfc1042_header[] =
85{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
86/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
87static unsigned char bridge_tunnel_header[] =
88{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
89/* No encapsulation header if EtherType < 0x600 (=length) */
90
91
92/* FIX: these could be compiled separately and linked together to hostap.o */
93#include "hostap_ap.c"
94#include "hostap_info.c"
95#include "hostap_ioctl.c"
96#include "hostap_proc.c"
97#include "hostap_80211_rx.c"
98#include "hostap_80211_tx.c"
99
100
101struct net_device * hostap_add_interface(struct local_info *local, 51struct net_device * hostap_add_interface(struct local_info *local,
102 int type, int rtnl_locked, 52 int type, int rtnl_locked,
103 const char *prefix, 53 const char *prefix,
@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
196} 146}
197 147
198 148
199static int prism2_wds_add(local_info_t *local, u8 *remote_addr, 149int prism2_wds_add(local_info_t *local, u8 *remote_addr,
200 int rtnl_locked) 150 int rtnl_locked)
201{ 151{
202 struct net_device *dev; 152 struct net_device *dev;
203 struct list_head *ptr; 153 struct list_head *ptr;
@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
258} 208}
259 209
260 210
261static int prism2_wds_del(local_info_t *local, u8 *remote_addr, 211int prism2_wds_del(local_info_t *local, u8 *remote_addr,
262 int rtnl_locked, int do_not_remove) 212 int rtnl_locked, int do_not_remove)
263{ 213{
264 unsigned long flags; 214 unsigned long flags;
265 struct list_head *ptr; 215 struct list_head *ptr;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index a0a4cbd4937a..d1d8ce022e63 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -1,5 +1,12 @@
1/* /proc routines for Host AP driver */ 1/* /proc routines for Host AP driver */
2 2
3#include <linux/types.h>
4#include <linux/proc_fs.h>
5#include <net/ieee80211_crypt.h>
6
7#include "hostap_wlan.h"
8#include "hostap.h"
9
3#define PROC_LIMIT (PAGE_SIZE - 80) 10#define PROC_LIMIT (PAGE_SIZE - 80)
4 11
5 12
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index cfd801559492..87a54aa6f4dd 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -1,6 +1,10 @@
1#ifndef HOSTAP_WLAN_H 1#ifndef HOSTAP_WLAN_H
2#define HOSTAP_WLAN_H 2#define HOSTAP_WLAN_H
3 3
4#include <linux/wireless.h>
5#include <linux/netdevice.h>
6#include <net/iw_handler.h>
7
4#include "hostap_config.h" 8#include "hostap_config.h"
5#include "hostap_common.h" 9#include "hostap_common.h"
6 10
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 7518384f34d9..8bf02763b5c7 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
5735 return &priv->ieee->stats; 5735 return &priv->ieee->stats;
5736} 5736}
5737 5737
5738#if WIRELESS_EXT < 18
5739/* Support for wpa_supplicant before WE-18, deprecated. */
5740
5741/* following definitions must match definitions in driver_ipw.c */
5742
5743#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
5744
5745#define IPW2100_CMD_SET_WPA_PARAM 1
5746#define IPW2100_CMD_SET_WPA_IE 2
5747#define IPW2100_CMD_SET_ENCRYPTION 3
5748#define IPW2100_CMD_MLME 4
5749
5750#define IPW2100_PARAM_WPA_ENABLED 1
5751#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
5752#define IPW2100_PARAM_DROP_UNENCRYPTED 3
5753#define IPW2100_PARAM_PRIVACY_INVOKED 4
5754#define IPW2100_PARAM_AUTH_ALGS 5
5755#define IPW2100_PARAM_IEEE_802_1X 6
5756
5757#define IPW2100_MLME_STA_DEAUTH 1
5758#define IPW2100_MLME_STA_DISASSOC 2
5759
5760#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
5761#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
5762#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
5763#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
5764#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
5765#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
5766
5767#define IPW2100_CRYPT_ALG_NAME_LEN 16
5768
5769struct ipw2100_param {
5770 u32 cmd;
5771 u8 sta_addr[ETH_ALEN];
5772 union {
5773 struct {
5774 u8 name;
5775 u32 value;
5776 } wpa_param;
5777 struct {
5778 u32 len;
5779 u8 reserved[32];
5780 u8 data[0];
5781 } wpa_ie;
5782 struct {
5783 u32 command;
5784 u32 reason_code;
5785 } mlme;
5786 struct {
5787 u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
5788 u8 set_tx;
5789 u32 err;
5790 u8 idx;
5791 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
5792 u16 key_len;
5793 u8 key[0];
5794 } crypt;
5795
5796 } u;
5797};
5798
5799/* end of driver_ipw.c code */
5800#endif /* WIRELESS_EXT < 18 */
5801
5802static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) 5738static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
5803{ 5739{
5804 /* This is called when wpa_supplicant loads and closes the driver 5740 /* This is called when wpa_supplicant loads and closes the driver
@@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
5807 return 0; 5743 return 0;
5808} 5744}
5809 5745
5810#if WIRELESS_EXT < 18
5811#define IW_AUTH_ALG_OPEN_SYSTEM 0x1
5812#define IW_AUTH_ALG_SHARED_KEY 0x2
5813#endif
5814
5815static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) 5746static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
5816{ 5747{
5817 5748
@@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
5855 ipw2100_set_wpa_ie(priv, &frame, 0); 5786 ipw2100_set_wpa_ie(priv, &frame, 0);
5856} 5787}
5857 5788
5858#if WIRELESS_EXT < 18
5859static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
5860{
5861 struct ipw2100_priv *priv = ieee80211_priv(dev);
5862 struct ieee80211_crypt_data *crypt;
5863 unsigned long flags;
5864 int ret = 0;
5865
5866 switch (name) {
5867 case IPW2100_PARAM_WPA_ENABLED:
5868 ret = ipw2100_wpa_enable(priv, value);
5869 break;
5870
5871 case IPW2100_PARAM_TKIP_COUNTERMEASURES:
5872 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
5873 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
5874 break;
5875
5876 flags = crypt->ops->get_flags(crypt->priv);
5877
5878 if (value)
5879 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5880 else
5881 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5882
5883 crypt->ops->set_flags(flags, crypt->priv);
5884
5885 break;
5886
5887 case IPW2100_PARAM_DROP_UNENCRYPTED:{
5888 /* See IW_AUTH_DROP_UNENCRYPTED handling for details */
5889 struct ieee80211_security sec = {
5890 .flags = SEC_ENABLED,
5891 .enabled = value,
5892 };
5893 priv->ieee->drop_unencrypted = value;
5894 /* We only change SEC_LEVEL for open mode. Others
5895 * are set by ipw_wpa_set_encryption.
5896 */
5897 if (!value) {
5898 sec.flags |= SEC_LEVEL;
5899 sec.level = SEC_LEVEL_0;
5900 } else {
5901 sec.flags |= SEC_LEVEL;
5902 sec.level = SEC_LEVEL_1;
5903 }
5904 if (priv->ieee->set_security)
5905 priv->ieee->set_security(priv->ieee->dev, &sec);
5906 break;
5907 }
5908
5909 case IPW2100_PARAM_PRIVACY_INVOKED:
5910 priv->ieee->privacy_invoked = value;
5911 break;
5912
5913 case IPW2100_PARAM_AUTH_ALGS:
5914 ret = ipw2100_wpa_set_auth_algs(priv, value);
5915 break;
5916
5917 case IPW2100_PARAM_IEEE_802_1X:
5918 priv->ieee->ieee802_1x = value;
5919 break;
5920
5921 default:
5922 printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
5923 dev->name, name);
5924 ret = -EOPNOTSUPP;
5925 }
5926
5927 return ret;
5928}
5929
5930static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
5931{
5932
5933 struct ipw2100_priv *priv = ieee80211_priv(dev);
5934 int ret = 0;
5935
5936 switch (command) {
5937 case IPW2100_MLME_STA_DEAUTH:
5938 // silently ignore
5939 break;
5940
5941 case IPW2100_MLME_STA_DISASSOC:
5942 ipw2100_disassociate_bssid(priv);
5943 break;
5944
5945 default:
5946 printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
5947 dev->name, command);
5948 ret = -EOPNOTSUPP;
5949 }
5950
5951 return ret;
5952}
5953
5954static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
5955 struct ipw2100_param *param, int plen)
5956{
5957
5958 struct ipw2100_priv *priv = ieee80211_priv(dev);
5959 struct ieee80211_device *ieee = priv->ieee;
5960 u8 *buf;
5961
5962 if (!ieee->wpa_enabled)
5963 return -EOPNOTSUPP;
5964
5965 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
5966 (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
5967 return -EINVAL;
5968
5969 if (param->u.wpa_ie.len) {
5970 buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
5971 if (buf == NULL)
5972 return -ENOMEM;
5973
5974 memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
5975
5976 kfree(ieee->wpa_ie);
5977 ieee->wpa_ie = buf;
5978 ieee->wpa_ie_len = param->u.wpa_ie.len;
5979
5980 } else {
5981 kfree(ieee->wpa_ie);
5982 ieee->wpa_ie = NULL;
5983 ieee->wpa_ie_len = 0;
5984 }
5985
5986 ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
5987
5988 return 0;
5989}
5990
5991/* implementation borrowed from hostap driver */
5992
5993static int ipw2100_wpa_set_encryption(struct net_device *dev,
5994 struct ipw2100_param *param,
5995 int param_len)
5996{
5997 int ret = 0;
5998 struct ipw2100_priv *priv = ieee80211_priv(dev);
5999 struct ieee80211_device *ieee = priv->ieee;
6000 struct ieee80211_crypto_ops *ops;
6001 struct ieee80211_crypt_data **crypt;
6002
6003 struct ieee80211_security sec = {
6004 .flags = 0,
6005 };
6006
6007 param->u.crypt.err = 0;
6008 param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
6009
6010 if (param_len !=
6011 (int)((char *)param->u.crypt.key - (char *)param) +
6012 param->u.crypt.key_len) {
6013 IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
6014 param->u.crypt.key_len);
6015 return -EINVAL;
6016 }
6017 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
6018 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
6019 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
6020 if (param->u.crypt.idx >= WEP_KEYS)
6021 return -EINVAL;
6022 crypt = &ieee->crypt[param->u.crypt.idx];
6023 } else {
6024 return -EINVAL;
6025 }
6026
6027 sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
6028 if (strcmp(param->u.crypt.alg, "none") == 0) {
6029 if (crypt) {
6030 sec.enabled = 0;
6031 sec.encrypt = 0;
6032 sec.level = SEC_LEVEL_0;
6033 sec.flags |= SEC_LEVEL;
6034 ieee80211_crypt_delayed_deinit(ieee, crypt);
6035 }
6036 goto done;
6037 }
6038 sec.enabled = 1;
6039 sec.encrypt = 1;
6040
6041 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6042 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
6043 request_module("ieee80211_crypt_wep");
6044 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6045 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
6046 request_module("ieee80211_crypt_tkip");
6047 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6048 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
6049 request_module("ieee80211_crypt_ccmp");
6050 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6051 }
6052 if (ops == NULL) {
6053 IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
6054 dev->name, param->u.crypt.alg);
6055 param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
6056 ret = -EINVAL;
6057 goto done;
6058 }
6059
6060 if (*crypt == NULL || (*crypt)->ops != ops) {
6061 struct ieee80211_crypt_data *new_crypt;
6062
6063 ieee80211_crypt_delayed_deinit(ieee, crypt);
6064
6065 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
6066 if (new_crypt == NULL) {
6067 ret = -ENOMEM;
6068 goto done;
6069 }
6070 new_crypt->ops = ops;
6071 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
6072 new_crypt->priv =
6073 new_crypt->ops->init(param->u.crypt.idx);
6074
6075 if (new_crypt->priv == NULL) {
6076 kfree(new_crypt);
6077 param->u.crypt.err =
6078 IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
6079 ret = -EINVAL;
6080 goto done;
6081 }
6082
6083 *crypt = new_crypt;
6084 }
6085
6086 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
6087 (*crypt)->ops->set_key(param->u.crypt.key,
6088 param->u.crypt.key_len, param->u.crypt.seq,
6089 (*crypt)->priv) < 0) {
6090 IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
6091 param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
6092 ret = -EINVAL;
6093 goto done;
6094 }
6095
6096 if (param->u.crypt.set_tx) {
6097 ieee->tx_keyidx = param->u.crypt.idx;
6098 sec.active_key = param->u.crypt.idx;
6099 sec.flags |= SEC_ACTIVE_KEY;
6100 }
6101
6102 if (ops->name != NULL) {
6103
6104 if (strcmp(ops->name, "WEP") == 0) {
6105 memcpy(sec.keys[param->u.crypt.idx],
6106 param->u.crypt.key, param->u.crypt.key_len);
6107 sec.key_sizes[param->u.crypt.idx] =
6108 param->u.crypt.key_len;
6109 sec.flags |= (1 << param->u.crypt.idx);
6110 sec.flags |= SEC_LEVEL;
6111 sec.level = SEC_LEVEL_1;
6112 } else if (strcmp(ops->name, "TKIP") == 0) {
6113 sec.flags |= SEC_LEVEL;
6114 sec.level = SEC_LEVEL_2;
6115 } else if (strcmp(ops->name, "CCMP") == 0) {
6116 sec.flags |= SEC_LEVEL;
6117 sec.level = SEC_LEVEL_3;
6118 }
6119 }
6120 done:
6121 if (ieee->set_security)
6122 ieee->set_security(ieee->dev, &sec);
6123
6124 /* Do not reset port if card is in Managed mode since resetting will
6125 * generate new IEEE 802.11 authentication which may end up in looping
6126 * with IEEE 802.1X. If your hardware requires a reset after WEP
6127 * configuration (for example... Prism2), implement the reset_port in
6128 * the callbacks structures used to initialize the 802.11 stack. */
6129 if (ieee->reset_on_keychange &&
6130 ieee->iw_mode != IW_MODE_INFRA &&
6131 ieee->reset_port && ieee->reset_port(dev)) {
6132 IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
6133 param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
6134 return -EINVAL;
6135 }
6136
6137 return ret;
6138}
6139
6140static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
6141{
6142
6143 struct ipw2100_param *param;
6144 int ret = 0;
6145
6146 IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
6147
6148 if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
6149 return -EINVAL;
6150
6151 param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
6152 if (param == NULL)
6153 return -ENOMEM;
6154
6155 if (copy_from_user(param, p->pointer, p->length)) {
6156 kfree(param);
6157 return -EFAULT;
6158 }
6159
6160 switch (param->cmd) {
6161
6162 case IPW2100_CMD_SET_WPA_PARAM:
6163 ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
6164 param->u.wpa_param.value);
6165 break;
6166
6167 case IPW2100_CMD_SET_WPA_IE:
6168 ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
6169 break;
6170
6171 case IPW2100_CMD_SET_ENCRYPTION:
6172 ret = ipw2100_wpa_set_encryption(dev, param, p->length);
6173 break;
6174
6175 case IPW2100_CMD_MLME:
6176 ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
6177 param->u.mlme.reason_code);
6178 break;
6179
6180 default:
6181 printk(KERN_ERR DRV_NAME
6182 ": %s: Unknown WPA supplicant request: %d\n", dev->name,
6183 param->cmd);
6184 ret = -EOPNOTSUPP;
6185
6186 }
6187
6188 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
6189 ret = -EFAULT;
6190
6191 kfree(param);
6192 return ret;
6193}
6194
6195static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6196{
6197 struct iwreq *wrq = (struct iwreq *)rq;
6198 int ret = -1;
6199 switch (cmd) {
6200 case IPW2100_IOCTL_WPA_SUPPLICANT:
6201 ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
6202 return ret;
6203
6204 default:
6205 return -EOPNOTSUPP;
6206 }
6207
6208 return -EOPNOTSUPP;
6209}
6210#endif /* WIRELESS_EXT < 18 */
6211
6212static void ipw_ethtool_get_drvinfo(struct net_device *dev, 5789static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6213 struct ethtool_drvinfo *info) 5790 struct ethtool_drvinfo *info)
6214{ 5791{
@@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6337 dev->open = ipw2100_open; 5914 dev->open = ipw2100_open;
6338 dev->stop = ipw2100_close; 5915 dev->stop = ipw2100_close;
6339 dev->init = ipw2100_net_init; 5916 dev->init = ipw2100_net_init;
6340#if WIRELESS_EXT < 18
6341 dev->do_ioctl = ipw2100_ioctl;
6342#endif
6343 dev->get_stats = ipw2100_stats; 5917 dev->get_stats = ipw2100_stats;
6344 dev->ethtool_ops = &ipw2100_ethtool_ops; 5918 dev->ethtool_ops = &ipw2100_ethtool_ops;
6345 dev->tx_timeout = ipw2100_tx_timeout; 5919 dev->tx_timeout = ipw2100_tx_timeout;
@@ -7855,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
7855 return 0; 7429 return 0;
7856} 7430}
7857 7431
7858#if WIRELESS_EXT > 17
7859/* 7432/*
7860 * WE-18 WPA support 7433 * WE-18 WPA support
7861 */ 7434 */
@@ -8117,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
8117 } 7690 }
8118 return 0; 7691 return 0;
8119} 7692}
8120#endif /* WIRELESS_EXT > 17 */
8121 7693
8122/* 7694/*
8123 * 7695 *
@@ -8350,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = {
8350 NULL, /* SIOCWIWTHRSPY */ 7922 NULL, /* SIOCWIWTHRSPY */
8351 ipw2100_wx_set_wap, /* SIOCSIWAP */ 7923 ipw2100_wx_set_wap, /* SIOCSIWAP */
8352 ipw2100_wx_get_wap, /* SIOCGIWAP */ 7924 ipw2100_wx_get_wap, /* SIOCGIWAP */
8353#if WIRELESS_EXT > 17
8354 ipw2100_wx_set_mlme, /* SIOCSIWMLME */ 7925 ipw2100_wx_set_mlme, /* SIOCSIWMLME */
8355#else
8356 NULL, /* -- hole -- */
8357#endif
8358 NULL, /* SIOCGIWAPLIST -- deprecated */ 7926 NULL, /* SIOCGIWAPLIST -- deprecated */
8359 ipw2100_wx_set_scan, /* SIOCSIWSCAN */ 7927 ipw2100_wx_set_scan, /* SIOCSIWSCAN */
8360 ipw2100_wx_get_scan, /* SIOCGIWSCAN */ 7928 ipw2100_wx_get_scan, /* SIOCGIWSCAN */
@@ -8378,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = {
8378 ipw2100_wx_get_encode, /* SIOCGIWENCODE */ 7946 ipw2100_wx_get_encode, /* SIOCGIWENCODE */
8379 ipw2100_wx_set_power, /* SIOCSIWPOWER */ 7947 ipw2100_wx_set_power, /* SIOCSIWPOWER */
8380 ipw2100_wx_get_power, /* SIOCGIWPOWER */ 7948 ipw2100_wx_get_power, /* SIOCGIWPOWER */
8381#if WIRELESS_EXT > 17
8382 NULL, /* -- hole -- */ 7949 NULL, /* -- hole -- */
8383 NULL, /* -- hole -- */ 7950 NULL, /* -- hole -- */
8384 ipw2100_wx_set_genie, /* SIOCSIWGENIE */ 7951 ipw2100_wx_set_genie, /* SIOCSIWGENIE */
@@ -8388,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = {
8388 ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */ 7955 ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */
8389 ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */ 7956 ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */
8390 NULL, /* SIOCSIWPMKSA */ 7957 NULL, /* SIOCSIWPMKSA */
8391#endif
8392}; 7958};
8393 7959
8394#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV 7960#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 819be2b6b7df..4c28e332ecc3 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8936 IPW_DEBUG_HC("starting request direct scan!\n"); 8936 IPW_DEBUG_HC("starting request direct scan!\n");
8937 8937
8938 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 8938 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8939 err = wait_event_interruptible(priv->wait_state, 8939 /* We should not sleep here; otherwise we will block most
8940 !(priv-> 8940 * of the system (for instance, we hold rtnl_lock when we
8941 status & (STATUS_SCANNING | 8941 * get here).
8942 STATUS_SCAN_ABORTING))); 8942 */
8943 if (err) { 8943 err = -EAGAIN;
8944 IPW_DEBUG_HC("aborting direct scan"); 8944 goto done;
8945 goto done;
8946 }
8947 } 8945 }
8948 memset(&scan, 0, sizeof(scan)); 8946 memset(&scan, 0, sizeof(scan));
8949 8947
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 135a156db25d..c5cd61c7f927 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
748 if (essid->length) { 748 if (essid->length) {
749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ 749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
750 /* if it is to big, trunk it */ 750 /* if it is to big, trunk it */
751 dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1); 751 dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
752 } else { 752 } else {
753 dwrq->flags = 0; 753 dwrq->flags = 0;
754 dwrq->length = 0; 754 dwrq->length = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 33d64d2ee53f..a8261d8454dd 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
177#endif 177#endif
178 178
179 newskb->dev = skb->dev; 179 newskb->dev = skb->dev;
180 dev_kfree_skb(skb); 180 dev_kfree_skb_irq(skb);
181 skb = newskb; 181 skb = newskb;
182 } 182 }
183 } 183 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 319180ca7e71..7880d8c31aad 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
1256 extra[IW_ESSID_MAX_SIZE] = '\0'; 1256 extra[IW_ESSID_MAX_SIZE] = '\0';
1257 1257
1258 /* Push it out ! */ 1258 /* Push it out ! */
1259 dwrq->length = strlen(extra) + 1; 1259 dwrq->length = strlen(extra);
1260 dwrq->flags = 1; /* active */ 1260 dwrq->flags = 1; /* active */
1261 1261
1262 return 0; 1262 return 0;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 7e2039f52c49..cf373625fc70 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
2280 extra[IW_ESSID_MAX_SIZE] = '\0'; 2280 extra[IW_ESSID_MAX_SIZE] = '\0';
2281 2281
2282 /* Set the length */ 2282 /* Set the length */
2283 wrqu->data.length = strlen(extra) + 1; 2283 wrqu->data.length = strlen(extra);
2284 2284
2285 return 0; 2285 return 0;
2286} 2286}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 605f0df0bfba..dda6099903c1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1142,6 +1142,9 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
1142 case 0x27c4: 1142 case 0x27c4:
1143 ich = 7; 1143 ich = 7;
1144 break; 1144 break;
1145 case 0x2828: /* ICH8M */
1146 ich = 8;
1147 break;
1145 default: 1148 default:
1146 /* we do not handle this PCI device */ 1149 /* we do not handle this PCI device */
1147 return; 1150 return;
@@ -1161,7 +1164,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
1161 else 1164 else
1162 return; /* not in combined mode */ 1165 return; /* not in combined mode */
1163 } else { 1166 } else {
1164 WARN_ON((ich != 6) && (ich != 7)); 1167 WARN_ON((ich != 6) && (ich != 7) && (ich != 8));
1165 tmp &= 0x3; /* interesting bits 1:0 */ 1168 tmp &= 0x3; /* interesting bits 1:0 */
1166 if (tmp & (1 << 0)) 1169 if (tmp & (1 << 0))
1167 comb = (1 << 2); /* PATA port 0, SATA port 1 */ 1170 comb = (1 << 2); /* PATA port 0, SATA port 1 */
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index d113290b5fc0..19bd346951dd 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -276,6 +276,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
276 board_ahci }, /* ESB2 */ 276 board_ahci }, /* ESB2 */
277 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 277 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
278 board_ahci }, /* ICH7-M DH */ 278 board_ahci }, /* ICH7-M DH */
279 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
280 board_ahci }, /* ICH8 */
281 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
282 board_ahci }, /* ICH8 */
283 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
284 board_ahci }, /* ICH8 */
285 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
286 board_ahci }, /* ICH8M */
287 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
288 board_ahci }, /* ICH8M */
279 { } /* terminate list */ 289 { } /* terminate list */
280}; 290};
281 291
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 557788ec4eec..fc3ca051ceed 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -157,6 +157,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
160 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
161 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
162 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
160 163
161 { } /* terminate list */ 164 { } /* terminate list */
162}; 165};
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 99bae8369ab2..46c4cdbaee86 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -611,6 +611,10 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
611 if (dev->flags & ATA_DFLAG_PIO) { 611 if (dev->flags & ATA_DFLAG_PIO) {
612 tf->protocol = ATA_PROT_PIO; 612 tf->protocol = ATA_PROT_PIO;
613 index = dev->multi_count ? 0 : 8; 613 index = dev->multi_count ? 0 : 8;
614 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
615 /* Unable to use DMA due to host limitation */
616 tf->protocol = ATA_PROT_PIO;
617 index = dev->multi_count ? 0 : 4;
614 } else { 618 } else {
615 tf->protocol = ATA_PROT_DMA; 619 tf->protocol = ATA_PROT_DMA;
616 index = 16; 620 index = 16;
@@ -1051,18 +1055,22 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
1051{ 1055{
1052 u16 modes; 1056 u16 modes;
1053 1057
1054 /* Usual case. Word 53 indicates word 88 is valid */ 1058 /* Usual case. Word 53 indicates word 64 is valid */
1055 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) { 1059 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1056 modes = adev->id[ATA_ID_PIO_MODES] & 0x03; 1060 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
1057 modes <<= 3; 1061 modes <<= 3;
1058 modes |= 0x7; 1062 modes |= 0x7;
1059 return modes; 1063 return modes;
1060 } 1064 }
1061 1065
1062 /* If word 88 isn't valid then Word 51 holds the PIO timing number 1066 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
1063 for the maximum. Turn it into a mask and return it */ 1067 number for the maximum. Turn it into a mask and return it */
1064 modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; 1068 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
1065 return modes; 1069 return modes;
1070 /* But wait.. there's more. Design your standards by committee and
1071 you too can get a free iordy field to process. However its the
1072 speeds not the modes that are supported... Note drivers using the
1073 timing API will get this right anyway */
1066} 1074}
1067 1075
1068struct ata_exec_internal_arg { 1076struct ata_exec_internal_arg {
@@ -1165,6 +1173,39 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1165} 1173}
1166 1174
1167/** 1175/**
1176 * ata_pio_need_iordy - check if iordy needed
1177 * @adev: ATA device
1178 *
1179 * Check if the current speed of the device requires IORDY. Used
1180 * by various controllers for chip configuration.
1181 */
1182
1183unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1184{
1185 int pio;
1186 int speed = adev->pio_mode - XFER_PIO_0;
1187
1188 if (speed < 2)
1189 return 0;
1190 if (speed > 2)
1191 return 1;
1192
1193 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1194
1195 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1196 pio = adev->id[ATA_ID_EIDE_PIO];
1197 /* Is the speed faster than the drive allows non IORDY ? */
1198 if (pio) {
1199 /* This is cycle times not frequency - watch the logic! */
1200 if (pio > 240) /* PIO2 is 240nS per cycle */
1201 return 1;
1202 return 0;
1203 }
1204 }
1205 return 0;
1206}
1207
1208/**
1168 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1209 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1169 * @ap: port on which device we wish to probe resides 1210 * @ap: port on which device we wish to probe resides
1170 * @device: device bus address, starting at zero 1211 * @device: device bus address, starting at zero
@@ -1415,7 +1456,7 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
1415 ap->udma_mask &= ATA_UDMA5; 1456 ap->udma_mask &= ATA_UDMA5;
1416 ap->host->max_sectors = ATA_MAX_SECTORS; 1457 ap->host->max_sectors = ATA_MAX_SECTORS;
1417 ap->host->hostt->max_sectors = ATA_MAX_SECTORS; 1458 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1418 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS; 1459 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1419 } 1460 }
1420 1461
1421 if (ap->ops->dev_config) 1462 if (ap->ops->dev_config)
@@ -3056,10 +3097,21 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3056static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 3097static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3057 unsigned int buflen, int do_write) 3098 unsigned int buflen, int do_write)
3058{ 3099{
3059 if (ap->flags & ATA_FLAG_MMIO) 3100 /* Make the crap hardware pay the costs not the good stuff */
3060 ata_mmio_data_xfer(ap, buf, buflen, do_write); 3101 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3061 else 3102 unsigned long flags;
3062 ata_pio_data_xfer(ap, buf, buflen, do_write); 3103 local_irq_save(flags);
3104 if (ap->flags & ATA_FLAG_MMIO)
3105 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3106 else
3107 ata_pio_data_xfer(ap, buf, buflen, do_write);
3108 local_irq_restore(flags);
3109 } else {
3110 if (ap->flags & ATA_FLAG_MMIO)
3111 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3112 else
3113 ata_pio_data_xfer(ap, buf, buflen, do_write);
3114 }
3063} 3115}
3064 3116
3065/** 3117/**
@@ -5122,6 +5174,7 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
5122EXPORT_SYMBOL_GPL(ata_dev_config); 5174EXPORT_SYMBOL_GPL(ata_dev_config);
5123EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5175EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5124 5176
5177EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5125EXPORT_SYMBOL_GPL(ata_timing_compute); 5178EXPORT_SYMBOL_GPL(ata_timing_compute);
5126EXPORT_SYMBOL_GPL(ata_timing_merge); 5179EXPORT_SYMBOL_GPL(ata_timing_merge);
5127 5180
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 3d1ea09a06a1..b0b0a69b3563 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -66,6 +66,7 @@ enum {
66 board_2037x = 0, /* FastTrak S150 TX2plus */ 66 board_2037x = 0, /* FastTrak S150 TX2plus */
67 board_20319 = 1, /* FastTrak S150 TX4 */ 67 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 68 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */
69 70
70 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */
71 72
@@ -190,6 +191,16 @@ static const struct ata_port_info pdc_port_info[] = {
190 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
191 .port_ops = &pdc_pata_ops, 192 .port_ops = &pdc_pata_ops,
192 }, 193 },
194
195 /* board_20771 */
196 {
197 .sht = &pdc_ata_sht,
198 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops,
203 },
193}; 204};
194 205
195static const struct pci_device_id pdc_ata_pci_tbl[] = { 206static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -226,6 +237,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
226 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
227 board_20619 }, 238 board_20619 },
228 239
240 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
241 board_20771 },
229 { } /* terminate list */ 242 { } /* terminate list */
230}; 243};
231 244
@@ -706,6 +719,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
706 case board_2037x: 719 case board_2037x:
707 probe_ent->n_ports = 2; 720 probe_ent->n_ports = 2;
708 break; 721 break;
722 case board_20771:
723 probe_ent->n_ports = 2;
724 break;
709 case board_20619: 725 case board_20619:
710 probe_ent->n_ports = 4; 726 probe_ent->n_ports = 4;
711 727
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 668373590aa4..d8472563fde8 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -470,6 +470,7 @@ static const struct pci_device_id k2_sata_pci_tbl[] = {
470 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 470 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
471 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 471 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
472 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 472 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { } 474 { }
474}; 475};
475 476
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index d9ce8c549416..bc36edff2058 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2595,15 +2595,11 @@ static int __init serial8250_init(void)
2595 if (ret) 2595 if (ret)
2596 goto out; 2596 goto out;
2597 2597
2598 ret = platform_driver_register(&serial8250_isa_driver);
2599 if (ret)
2600 goto unreg_uart_drv;
2601
2602 serial8250_isa_devs = platform_device_alloc("serial8250", 2598 serial8250_isa_devs = platform_device_alloc("serial8250",
2603 PLAT8250_DEV_LEGACY); 2599 PLAT8250_DEV_LEGACY);
2604 if (!serial8250_isa_devs) { 2600 if (!serial8250_isa_devs) {
2605 ret = -ENOMEM; 2601 ret = -ENOMEM;
2606 goto unreg_plat_drv; 2602 goto unreg_uart_drv;
2607 } 2603 }
2608 2604
2609 ret = platform_device_add(serial8250_isa_devs); 2605 ret = platform_device_add(serial8250_isa_devs);
@@ -2612,12 +2608,13 @@ static int __init serial8250_init(void)
2612 2608
2613 serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev); 2609 serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
2614 2610
2615 goto out; 2611 ret = platform_driver_register(&serial8250_isa_driver);
2612 if (ret == 0)
2613 goto out;
2616 2614
2615 platform_device_del(serial8250_isa_devs);
2617 put_dev: 2616 put_dev:
2618 platform_device_put(serial8250_isa_devs); 2617 platform_device_put(serial8250_isa_devs);
2619 unreg_plat_drv:
2620 platform_driver_unregister(&serial8250_isa_driver);
2621 unreg_uart_drv: 2618 unreg_uart_drv:
2622 uart_unregister_driver(&serial8250_reg); 2619 uart_unregister_driver(&serial8250_reg);
2623 out: 2620 out:
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 589fb076654a..2a912153321e 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -940,6 +940,7 @@ enum pci_board_num_t {
940 pbn_b2_bt_2_921600, 940 pbn_b2_bt_2_921600,
941 pbn_b2_bt_4_921600, 941 pbn_b2_bt_4_921600,
942 942
943 pbn_b3_2_115200,
943 pbn_b3_4_115200, 944 pbn_b3_4_115200,
944 pbn_b3_8_115200, 945 pbn_b3_8_115200,
945 946
@@ -1311,6 +1312,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1311 .uart_offset = 8, 1312 .uart_offset = 8,
1312 }, 1313 },
1313 1314
1315 [pbn_b3_2_115200] = {
1316 .flags = FL_BASE3,
1317 .num_ports = 2,
1318 .base_baud = 115200,
1319 .uart_offset = 8,
1320 },
1314 [pbn_b3_4_115200] = { 1321 [pbn_b3_4_115200] = {
1315 .flags = FL_BASE3, 1322 .flags = FL_BASE3,
1316 .num_ports = 4, 1323 .num_ports = 4,
@@ -2272,6 +2279,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2273 pbn_nec_nile4 }, 2280 pbn_nec_nile4 },
2274 2281
2282 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
2283 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2284 pbn_b3_2_115200 },
2275 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4, 2285 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4,
2276 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2286 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2277 pbn_b3_4_115200 }, 2287 pbn_b3_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 5e7199f7b59c..9fd1925de361 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -301,7 +301,7 @@ config SERIAL_AT91_TTYAT
301 depends on SERIAL_AT91=y 301 depends on SERIAL_AT91=y
302 help 302 help
303 Say Y here if you wish to have the five internal AT91RM9200 UARTs 303 Say Y here if you wish to have the five internal AT91RM9200 UARTs
304 appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the 304 appear as /dev/ttyAT0-4 (major 204, minor 154-158) instead of the
305 normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if 305 normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
306 you also want other UARTs, such as external 8250/16C550 compatible 306 you also want other UARTs, such as external 8250/16C550 compatible
307 UARTs. 307 UARTs.
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
index 0e206063d685..2113feb75c39 100644
--- a/drivers/serial/at91_serial.c
+++ b/drivers/serial/at91_serial.c
@@ -222,8 +222,6 @@ static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
222 while (status & (AT91_US_RXRDY)) { 222 while (status & (AT91_US_RXRDY)) {
223 ch = UART_GET_CHAR(port); 223 ch = UART_GET_CHAR(port);
224 224
225 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
226 goto ignore_char;
227 port->icount.rx++; 225 port->icount.rx++;
228 226
229 flg = TTY_NORMAL; 227 flg = TTY_NORMAL;
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 5fc4a62173d9..fa4ae94243c2 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -34,6 +34,7 @@ sunserial_console_termios(struct console *con)
34 char *mode_prop = "ttyX-mode"; 34 char *mode_prop = "ttyX-mode";
35 char *cd_prop = "ttyX-ignore-cd"; 35 char *cd_prop = "ttyX-ignore-cd";
36 char *dtr_prop = "ttyX-rts-dtr-off"; 36 char *dtr_prop = "ttyX-rts-dtr-off";
37 char *ssp_console_modes_prop = "ssp-console-modes";
37 int baud, bits, stop, cflag; 38 int baud, bits, stop, cflag;
38 char parity; 39 char parity;
39 int carrier = 0; 40 int carrier = 0;
@@ -43,14 +44,39 @@ sunserial_console_termios(struct console *con)
43 if (!serial_console) 44 if (!serial_console)
44 return; 45 return;
45 46
46 if (serial_console == 1) { 47 switch (serial_console) {
48 case PROMDEV_OTTYA:
47 mode_prop[3] = 'a'; 49 mode_prop[3] = 'a';
48 cd_prop[3] = 'a'; 50 cd_prop[3] = 'a';
49 dtr_prop[3] = 'a'; 51 dtr_prop[3] = 'a';
50 } else { 52 break;
53
54 case PROMDEV_OTTYB:
51 mode_prop[3] = 'b'; 55 mode_prop[3] = 'b';
52 cd_prop[3] = 'b'; 56 cd_prop[3] = 'b';
53 dtr_prop[3] = 'b'; 57 dtr_prop[3] = 'b';
58 break;
59
60 case PROMDEV_ORSC:
61
62 nd = prom_pathtoinode("rsc");
63 if (!nd) {
64 strcpy(mode, "115200,8,n,1,-");
65 goto no_options;
66 }
67
68 if (!prom_node_has_property(nd, ssp_console_modes_prop)) {
69 strcpy(mode, "115200,8,n,1,-");
70 goto no_options;
71 }
72
73 memset(mode, 0, sizeof(mode));
74 prom_getstring(nd, ssp_console_modes_prop, mode, sizeof(mode));
75 goto no_options;
76
77 default:
78 strcpy(mode, "9600,8,n,1,-");
79 goto no_options;
54 } 80 }
55 81
56 topnd = prom_getchild(prom_root_node); 82 topnd = prom_getchild(prom_root_node);
@@ -110,6 +136,10 @@ no_options:
110 case 9600: cflag |= B9600; break; 136 case 9600: cflag |= B9600; break;
111 case 19200: cflag |= B19200; break; 137 case 19200: cflag |= B19200; break;
112 case 38400: cflag |= B38400; break; 138 case 38400: cflag |= B38400; break;
139 case 57600: cflag |= B57600; break;
140 case 115200: cflag |= B115200; break;
141 case 230400: cflag |= B230400; break;
142 case 460800: cflag |= B460800; break;
113 default: baud = 9600; cflag |= B9600; break; 143 default: baud = 9600; cflag |= B9600; break;
114 } 144 }
115 145
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 7e773ff76c61..8bcaebcc0ad7 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -897,9 +897,6 @@ static int sunsab_console_setup(struct console *con, char *options)
897 897
898 sunserial_console_termios(con); 898 sunserial_console_termios(con);
899 899
900 /* Firmware console speed is limited to 150-->38400 baud so
901 * this hackish cflag thing is OK.
902 */
903 switch (con->cflag & CBAUD) { 900 switch (con->cflag & CBAUD) {
904 case B150: baud = 150; break; 901 case B150: baud = 150; break;
905 case B300: baud = 300; break; 902 case B300: baud = 300; break;
@@ -910,6 +907,10 @@ static int sunsab_console_setup(struct console *con, char *options)
910 default: case B9600: baud = 9600; break; 907 default: case B9600: baud = 9600; break;
911 case B19200: baud = 19200; break; 908 case B19200: baud = 19200; break;
912 case B38400: baud = 38400; break; 909 case B38400: baud = 38400; break;
910 case B57600: baud = 57600; break;
911 case B115200: baud = 115200; break;
912 case B230400: baud = 230400; break;
913 case B460800: baud = 460800; break;
913 }; 914 };
914 915
915 /* 916 /*