aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2006-01-20 03:59:41 -0500
committerSteven Whitehouse <steve@chygwyn.com>2006-01-20 03:59:41 -0500
commit044399b2cb6ad2d7f63cfca945268853d7443a4d (patch)
tree5f96eb307b0389ac0b919a4744a40862b615e9da /drivers
parent901359256b2666f52a3a7d3f31927677e91b3a2a (diff)
parent18a4144028f056b77d6576d4eb284246e9c7ea97 (diff)
Merge branch 'master'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/synclink_gt.c2
-rw-r--r--drivers/char/tlclk.c93
-rw-r--r--drivers/edac/Kconfig102
-rw-r--r--drivers/edac/Makefile18
-rw-r--r--drivers/edac/amd76x_edac.c356
-rw-r--r--drivers/edac/e752x_edac.c1071
-rw-r--r--drivers/edac/e7xxx_edac.c558
-rw-r--r--drivers/edac/edac_mc.c2209
-rw-r--r--drivers/edac/edac_mc.h448
-rw-r--r--drivers/edac/i82860_edac.c299
-rw-r--r--drivers/edac/i82875p_edac.c532
-rw-r--r--drivers/edac/r82600_edac.c407
-rw-r--r--drivers/md/kcopyd.c1
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/cassini.c36
-rw-r--r--drivers/net/e1000/e1000_ethtool.c307
-rw-r--r--drivers/net/e1000/e1000_hw.c18
-rw-r--r--drivers/net/e1000/e1000_hw.h20
-rw-r--r--drivers/net/e1000/e1000_main.c802
-rw-r--r--drivers/net/e1000/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/e1000_param.c44
-rw-r--r--drivers/serial/8250.c13
-rw-r--r--drivers/serial/8250_pci.c10
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/at91_serial.c2
-rw-r--r--drivers/serial/sn_console.c129
-rw-r--r--drivers/serial/suncore.c34
-rw-r--r--drivers/serial/sunsab.c7
30 files changed, 6890 insertions, 645 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 283c089537bc..bddf431bbb72 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,4 +68,6 @@ source "drivers/infiniband/Kconfig"
68 68
69source "drivers/sn/Kconfig" 69source "drivers/sn/Kconfig"
70 70
71source "drivers/edac/Kconfig"
72
71endmenu 73endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7c45050ecd03..619dd964c51c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_PHONE) += telephony/
63obj-$(CONFIG_MD) += md/ 63obj-$(CONFIG_MD) += md/
64obj-$(CONFIG_BT) += bluetooth/ 64obj-$(CONFIG_BT) += bluetooth/
65obj-$(CONFIG_ISDN) += isdn/ 65obj-$(CONFIG_ISDN) += isdn/
66obj-$(CONFIG_EDAC) += edac/
66obj-$(CONFIG_MCA) += mca/ 67obj-$(CONFIG_MCA) += mca/
67obj-$(CONFIG_EISA) += eisa/ 68obj-$(CONFIG_EISA) += eisa/
68obj-$(CONFIG_CPU_FREQ) += cpufreq/ 69obj-$(CONFIG_CPU_FREQ) += cpufreq/
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 07c9be6a6bbf..a85a60a93deb 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -2630,7 +2630,7 @@ static int get_interface(struct slgt_info *info, int __user *if_mode)
2630static int set_interface(struct slgt_info *info, int if_mode) 2630static int set_interface(struct slgt_info *info, int if_mode)
2631{ 2631{
2632 unsigned long flags; 2632 unsigned long flags;
2633 unsigned char val; 2633 unsigned short val;
2634 2634
2635 DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode)); 2635 DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
2636 spin_lock_irqsave(&info->lock,flags); 2636 spin_lock_irqsave(&info->lock,flags);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index bc56df8a3474..4c272189cd42 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -34,7 +34,6 @@
34#include <linux/kernel.h> /* printk() */ 34#include <linux/kernel.h> /* printk() */
35#include <linux/fs.h> /* everything... */ 35#include <linux/fs.h> /* everything... */
36#include <linux/errno.h> /* error codes */ 36#include <linux/errno.h> /* error codes */
37#include <linux/delay.h> /* udelay */
38#include <linux/slab.h> 37#include <linux/slab.h>
39#include <linux/ioport.h> 38#include <linux/ioport.h>
40#include <linux/interrupt.h> 39#include <linux/interrupt.h>
@@ -156,6 +155,8 @@ This directory exports the following interfaces. There operation is
156documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4. 155documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
157alarms : 156alarms :
158current_ref : 157current_ref :
158received_ref_clk3a :
159received_ref_clk3b :
159enable_clk3a_output : 160enable_clk3a_output :
160enable_clk3b_output : 161enable_clk3b_output :
161enable_clka0_output : 162enable_clka0_output :
@@ -165,7 +166,7 @@ enable_clkb1_output :
165filter_select : 166filter_select :
166hardware_switching : 167hardware_switching :
167hardware_switching_mode : 168hardware_switching_mode :
168interrupt_switch : 169telclock_version :
169mode_select : 170mode_select :
170refalign : 171refalign :
171reset : 172reset :
@@ -173,7 +174,6 @@ select_amcb1_transmit_clock :
173select_amcb2_transmit_clock : 174select_amcb2_transmit_clock :
174select_redundant_clock : 175select_redundant_clock :
175select_ref_frequency : 176select_ref_frequency :
176test_mode :
177 177
178All sysfs interfaces are integers in hex format, i.e echo 99 > refalign 178All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
179has the same effect as echo 0x99 > refalign. 179has the same effect as echo 0x99 > refalign.
@@ -226,7 +226,7 @@ static int tlclk_release(struct inode *inode, struct file *filp)
226 return 0; 226 return 0;
227} 227}
228 228
229ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count, 229static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
230 loff_t *f_pos) 230 loff_t *f_pos)
231{ 231{
232 if (count < sizeof(struct tlclk_alarms)) 232 if (count < sizeof(struct tlclk_alarms))
@@ -242,7 +242,7 @@ ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
242 return sizeof(struct tlclk_alarms); 242 return sizeof(struct tlclk_alarms);
243} 243}
244 244
245ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count, 245static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
246 loff_t *f_pos) 246 loff_t *f_pos)
247{ 247{
248 return 0; 248 return 0;
@@ -278,21 +278,21 @@ static ssize_t show_current_ref(struct device *d,
278static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL); 278static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
279 279
280 280
281static ssize_t show_interrupt_switch(struct device *d, 281static ssize_t show_telclock_version(struct device *d,
282 struct device_attribute *attr, char *buf) 282 struct device_attribute *attr, char *buf)
283{ 283{
284 unsigned long ret_val; 284 unsigned long ret_val;
285 unsigned long flags; 285 unsigned long flags;
286 286
287 spin_lock_irqsave(&event_lock, flags); 287 spin_lock_irqsave(&event_lock, flags);
288 ret_val = inb(TLCLK_REG6); 288 ret_val = inb(TLCLK_REG5);
289 spin_unlock_irqrestore(&event_lock, flags); 289 spin_unlock_irqrestore(&event_lock, flags);
290 290
291 return sprintf(buf, "0x%lX\n", ret_val); 291 return sprintf(buf, "0x%lX\n", ret_val);
292} 292}
293 293
294static DEVICE_ATTR(interrupt_switch, S_IRUGO, 294static DEVICE_ATTR(telclock_version, S_IRUGO,
295 show_interrupt_switch, NULL); 295 show_telclock_version, NULL);
296 296
297static ssize_t show_alarms(struct device *d, 297static ssize_t show_alarms(struct device *d,
298 struct device_attribute *attr, char *buf) 298 struct device_attribute *attr, char *buf)
@@ -309,6 +309,50 @@ static ssize_t show_alarms(struct device *d,
309 309
310static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 310static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
311 311
312static ssize_t store_received_ref_clk3a(struct device *d,
313 struct device_attribute *attr, const char *buf, size_t count)
314{
315 unsigned long tmp;
316 unsigned char val;
317 unsigned long flags;
318
319 sscanf(buf, "%lX", &tmp);
320 dev_dbg(d, ": tmp = 0x%lX\n", tmp);
321
322 val = (unsigned char)tmp;
323 spin_lock_irqsave(&event_lock, flags);
324 SET_PORT_BITS(TLCLK_REG1, 0xef, val);
325 spin_unlock_irqrestore(&event_lock, flags);
326
327 return strnlen(buf, count);
328}
329
330static DEVICE_ATTR(received_ref_clk3a, S_IWUGO, NULL,
331 store_received_ref_clk3a);
332
333
334static ssize_t store_received_ref_clk3b(struct device *d,
335 struct device_attribute *attr, const char *buf, size_t count)
336{
337 unsigned long tmp;
338 unsigned char val;
339 unsigned long flags;
340
341 sscanf(buf, "%lX", &tmp);
342 dev_dbg(d, ": tmp = 0x%lX\n", tmp);
343
344 val = (unsigned char)tmp;
345 spin_lock_irqsave(&event_lock, flags);
346 SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1);
347 spin_unlock_irqrestore(&event_lock, flags);
348
349 return strnlen(buf, count);
350}
351
352static DEVICE_ATTR(received_ref_clk3b, S_IWUGO, NULL,
353 store_received_ref_clk3b);
354
355
312static ssize_t store_enable_clk3b_output(struct device *d, 356static ssize_t store_enable_clk3b_output(struct device *d,
313 struct device_attribute *attr, const char *buf, size_t count) 357 struct device_attribute *attr, const char *buf, size_t count)
314{ 358{
@@ -436,26 +480,6 @@ static ssize_t store_enable_clka0_output(struct device *d,
436static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL, 480static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL,
437 store_enable_clka0_output); 481 store_enable_clka0_output);
438 482
439static ssize_t store_test_mode(struct device *d,
440 struct device_attribute *attr, const char *buf, size_t count)
441{
442 unsigned long flags;
443 unsigned long tmp;
444 unsigned char val;
445
446 sscanf(buf, "%lX", &tmp);
447 dev_dbg(d, "tmp = 0x%lX\n", tmp);
448
449 val = (unsigned char)tmp;
450 spin_lock_irqsave(&event_lock, flags);
451 SET_PORT_BITS(TLCLK_REG4, 0xfd, 2);
452 spin_unlock_irqrestore(&event_lock, flags);
453
454 return strnlen(buf, count);
455}
456
457static DEVICE_ATTR(test_mode, S_IWUGO, NULL, store_test_mode);
458
459static ssize_t store_select_amcb2_transmit_clock(struct device *d, 483static ssize_t store_select_amcb2_transmit_clock(struct device *d,
460 struct device_attribute *attr, const char *buf, size_t count) 484 struct device_attribute *attr, const char *buf, size_t count)
461{ 485{
@@ -475,7 +499,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
475 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); 499 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
476 switch (val) { 500 switch (val) {
477 case CLK_8_592MHz: 501 case CLK_8_592MHz:
478 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); 502 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
479 break; 503 break;
480 case CLK_11_184MHz: 504 case CLK_11_184MHz:
481 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); 505 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
@@ -484,7 +508,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
484 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); 508 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
485 break; 509 break;
486 case CLK_44_736MHz: 510 case CLK_44_736MHz:
487 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); 511 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
488 break; 512 break;
489 } 513 }
490 } else 514 } else
@@ -653,9 +677,7 @@ static ssize_t store_refalign (struct device *d,
653 dev_dbg(d, "tmp = 0x%lX\n", tmp); 677 dev_dbg(d, "tmp = 0x%lX\n", tmp);
654 spin_lock_irqsave(&event_lock, flags); 678 spin_lock_irqsave(&event_lock, flags);
655 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); 679 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
656 udelay(2);
657 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08); 680 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
658 udelay(2);
659 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); 681 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
660 spin_unlock_irqrestore(&event_lock, flags); 682 spin_unlock_irqrestore(&event_lock, flags);
661 683
@@ -706,15 +728,16 @@ static DEVICE_ATTR(reset, S_IWUGO, NULL, store_reset);
706 728
707static struct attribute *tlclk_sysfs_entries[] = { 729static struct attribute *tlclk_sysfs_entries[] = {
708 &dev_attr_current_ref.attr, 730 &dev_attr_current_ref.attr,
709 &dev_attr_interrupt_switch.attr, 731 &dev_attr_telclock_version.attr,
710 &dev_attr_alarms.attr, 732 &dev_attr_alarms.attr,
733 &dev_attr_received_ref_clk3a.attr,
734 &dev_attr_received_ref_clk3b.attr,
711 &dev_attr_enable_clk3a_output.attr, 735 &dev_attr_enable_clk3a_output.attr,
712 &dev_attr_enable_clk3b_output.attr, 736 &dev_attr_enable_clk3b_output.attr,
713 &dev_attr_enable_clkb1_output.attr, 737 &dev_attr_enable_clkb1_output.attr,
714 &dev_attr_enable_clka1_output.attr, 738 &dev_attr_enable_clka1_output.attr,
715 &dev_attr_enable_clkb0_output.attr, 739 &dev_attr_enable_clkb0_output.attr,
716 &dev_attr_enable_clka0_output.attr, 740 &dev_attr_enable_clka0_output.attr,
717 &dev_attr_test_mode.attr,
718 &dev_attr_select_amcb1_transmit_clock.attr, 741 &dev_attr_select_amcb1_transmit_clock.attr,
719 &dev_attr_select_amcb2_transmit_clock.attr, 742 &dev_attr_select_amcb2_transmit_clock.attr,
720 &dev_attr_select_redundant_clock.attr, 743 &dev_attr_select_redundant_clock.attr,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644
index 000000000000..4819e7fc00dd
--- /dev/null
+++ b/drivers/edac/Kconfig
@@ -0,0 +1,102 @@
1#
2# EDAC Kconfig
3# Copyright (c) 2003 Linux Networx
4# Licensed and distributed under the GPL
5#
6# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
7#
8
9menu 'EDAC - error detection and reporting (RAS)'
10
11config EDAC
12 tristate "EDAC core system error reporting"
13 depends on X86
14 default y
15 help
16 EDAC is designed to report errors in the core system.
17 These are low-level errors that are reported in the CPU or
18 supporting chipset: memory errors, cache errors, PCI errors,
19 thermal throttling, etc.. If unsure, select 'Y'.
20
21
22comment "Reporting subsystems"
23 depends on EDAC
24
25config EDAC_DEBUG
26 bool "Debugging"
27 depends on EDAC
28 help
29 This turns on debugging information for the entire EDAC
30 sub-system. You can insert module with "debug_level=x", current
31 there're four debug levels (x=0,1,2,3 from low to high).
32 Usually you should select 'N'.
33
34config EDAC_MM_EDAC
35 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
36 depends on EDAC
37 default y
38 help
39 Some systems are able to detect and correct errors in main
40 memory. EDAC can report statistics on memory error
41 detection and correction (EDAC - or commonly referred to ECC
42 errors). EDAC will also try to decode where these errors
43 occurred so that a particular failing memory module can be
44 replaced. If unsure, select 'Y'.
45
46
47config EDAC_AMD76X
48 tristate "AMD 76x (760, 762, 768)"
49 depends on EDAC_MM_EDAC && PCI
50 help
51 Support for error detection and correction on the AMD 76x
52 series of chipsets used with the Athlon processor.
53
54config EDAC_E7XXX
55 tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
56 depends on EDAC_MM_EDAC && PCI
57 help
58 Support for error detection and correction on the Intel
59 E7205, E7500, E7501 and E7505 server chipsets.
60
61config EDAC_E752X
62 tristate "Intel e752x (e7520, e7525, e7320)"
63 depends on EDAC_MM_EDAC && PCI
64 help
65 Support for error detection and correction on the Intel
66 E7520, E7525, E7320 server chipsets.
67
68config EDAC_I82875P
69 tristate "Intel 82875p (D82875P, E7210)"
70 depends on EDAC_MM_EDAC && PCI
71 help
72 Support for error detection and correction on the Intel
73 DP82785P and E7210 server chipsets.
74
75config EDAC_I82860
76 tristate "Intel 82860"
77 depends on EDAC_MM_EDAC && PCI
78 help
79 Support for error detection and correction on the Intel
80 82860 chipset.
81
82config EDAC_R82600
83 tristate "Radisys 82600 embedded chipset"
84 depends on EDAC_MM_EDAC
85 help
86 Support for error detection and correction on the Radisys
87 82600 embedded chipset.
88
89choice
90 prompt "Error detecting method"
91 depends on EDAC
92 default EDAC_POLL
93
94config EDAC_POLL
95 bool "Poll for errors"
96 depends on EDAC
97 help
98 Poll the chipset periodically to detect errors.
99
100endchoice
101
102endmenu
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644
index 000000000000..93137fdab4b3
--- /dev/null
+++ b/drivers/edac/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the Linux kernel EDAC drivers.
3#
4# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
5# This file may be distributed under the terms of the
6# GNU General Public License.
7#
8# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
9
10
11obj-$(CONFIG_EDAC_MM_EDAC) += edac_mc.o
12obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
13obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
14obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
15obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
16obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
17obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
18
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644
index 000000000000..2fcc8120b53c
--- /dev/null
+++ b/drivers/edac/amd76x_edac.c
@@ -0,0 +1,356 @@
1/*
2 * AMD 76x Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
12 *
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/init.h>
19
20#include <linux/pci.h>
21#include <linux/pci_ids.h>
22
23#include <linux/slab.h>
24
25#include "edac_mc.h"
26
27
28#define AMD76X_NR_CSROWS 8
29#define AMD76X_NR_CHANS 1
30#define AMD76X_NR_DIMMS 4
31
32
33/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
34#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
35 *
36 * 31:16 reserved
37 * 15:14 SERR enabled: x1=ue 1x=ce
38 * 13 reserved
39 * 12 diag: disabled, enabled
40 * 11:10 mode: dis, EC, ECC, ECC+scrub
41 * 9:8 status: x1=ue 1x=ce
42 * 7:4 UE cs row
43 * 3:0 CE cs row
44 */
45#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
46 *
47 * 31:26 clock disable 5 - 0
48 * 25 SDRAM init
49 * 24 reserved
50 * 23 mode register service
51 * 22:21 suspend to RAM
52 * 20 burst refresh enable
53 * 19 refresh disable
54 * 18 reserved
55 * 17:16 cycles-per-refresh
56 * 15:8 reserved
57 * 7:0 x4 mode enable 7 - 0
58 */
59#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
60 *
61 * 31:23 chip-select base
62 * 22:16 reserved
63 * 15:7 chip-select mask
64 * 6:3 reserved
65 * 2:1 address mode
66 * 0 chip-select enable
67 */
68
69
70struct amd76x_error_info {
71 u32 ecc_mode_status;
72};
73
74
75enum amd76x_chips {
76 AMD761 = 0,
77 AMD762
78};
79
80
81struct amd76x_dev_info {
82 const char *ctl_name;
83};
84
85
86static const struct amd76x_dev_info amd76x_devs[] = {
87 [AMD761] = {.ctl_name = "AMD761"},
88 [AMD762] = {.ctl_name = "AMD762"},
89};
90
91
92/**
93 * amd76x_get_error_info - fetch error information
94 * @mci: Memory controller
95 * @info: Info to fill in
96 *
97 * Fetch and store the AMD76x ECC status. Clear pending status
98 * on the chip so that further errors will be reported
99 */
100
101static void amd76x_get_error_info (struct mem_ctl_info *mci,
102 struct amd76x_error_info *info)
103{
104 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
105 &info->ecc_mode_status);
106
107 if (info->ecc_mode_status & BIT(8))
108 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
109 (u32) BIT(8), (u32) BIT(8));
110
111 if (info->ecc_mode_status & BIT(9))
112 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
113 (u32) BIT(9), (u32) BIT(9));
114}
115
116
117/**
118 * amd76x_process_error_info - Error check
119 * @mci: Memory controller
120 * @info: Previously fetched information from chip
121 * @handle_errors: 1 if we should do recovery
122 *
123 * Process the chip state and decide if an error has occurred.
124 * A return of 1 indicates an error. Also if handle_errors is true
125 * then attempt to handle and clean up after the error
126 */
127
128static int amd76x_process_error_info (struct mem_ctl_info *mci,
129 struct amd76x_error_info *info, int handle_errors)
130{
131 int error_found;
132 u32 row;
133
134 error_found = 0;
135
136 /*
137 * Check for an uncorrectable error
138 */
139 if (info->ecc_mode_status & BIT(8)) {
140 error_found = 1;
141
142 if (handle_errors) {
143 row = (info->ecc_mode_status >> 4) & 0xf;
144 edac_mc_handle_ue(mci,
145 mci->csrows[row].first_page, 0, row,
146 mci->ctl_name);
147 }
148 }
149
150 /*
151 * Check for a correctable error
152 */
153 if (info->ecc_mode_status & BIT(9)) {
154 error_found = 1;
155
156 if (handle_errors) {
157 row = info->ecc_mode_status & 0xf;
158 edac_mc_handle_ce(mci,
159 mci->csrows[row].first_page, 0, 0, row, 0,
160 mci->ctl_name);
161 }
162 }
163 return error_found;
164}
165
166/**
167 * amd76x_check - Poll the controller
168 * @mci: Memory controller
169 *
170 * Called by the poll handlers this function reads the status
171 * from the controller and checks for errors.
172 */
173
174static void amd76x_check(struct mem_ctl_info *mci)
175{
176 struct amd76x_error_info info;
177 debugf3("MC: " __FILE__ ": %s()\n", __func__);
178 amd76x_get_error_info(mci, &info);
179 amd76x_process_error_info(mci, &info, 1);
180}
181
182
183/**
184 * amd76x_probe1 - Perform set up for detected device
185 * @pdev; PCI device detected
186 * @dev_idx: Device type index
187 *
188 * We have found an AMD76x and now need to set up the memory
189 * controller status reporting. We configure and set up the
190 * memory controller reporting and claim the device.
191 */
192
193static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
194{
195 int rc = -ENODEV;
196 int index;
197 struct mem_ctl_info *mci = NULL;
198 enum edac_type ems_modes[] = {
199 EDAC_NONE,
200 EDAC_EC,
201 EDAC_SECDED,
202 EDAC_SECDED
203 };
204 u32 ems;
205 u32 ems_mode;
206
207 debugf0("MC: " __FILE__ ": %s()\n", __func__);
208
209 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
210 ems_mode = (ems >> 10) & 0x3;
211
212 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
213
214 if (mci == NULL) {
215 rc = -ENOMEM;
216 goto fail;
217 }
218
219 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
220
221 mci->pdev = pci_dev_get(pdev);
222 mci->mtype_cap = MEM_FLAG_RDDR;
223
224 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
225 mci->edac_cap = ems_mode ?
226 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
227
228 mci->mod_name = BS_MOD_STR;
229 mci->mod_ver = "$Revision: 1.4.2.5 $";
230 mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
231 mci->edac_check = amd76x_check;
232 mci->ctl_page_to_phys = NULL;
233
234 for (index = 0; index < mci->nr_csrows; index++) {
235 struct csrow_info *csrow = &mci->csrows[index];
236 u32 mba;
237 u32 mba_base;
238 u32 mba_mask;
239 u32 dms;
240
241 /* find the DRAM Chip Select Base address and mask */
242 pci_read_config_dword(mci->pdev,
243 AMD76X_MEM_BASE_ADDR + (index * 4),
244 &mba);
245
246 if (!(mba & BIT(0)))
247 continue;
248
249 mba_base = mba & 0xff800000UL;
250 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
251
252 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
253 &dms);
254
255 csrow->first_page = mba_base >> PAGE_SHIFT;
256 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
257 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
258 csrow->page_mask = mba_mask >> PAGE_SHIFT;
259 csrow->grain = csrow->nr_pages << PAGE_SHIFT;
260 csrow->mtype = MEM_RDDR;
261 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
262 csrow->edac_mode = ems_modes[ems_mode];
263 }
264
265 /* clear counters */
266 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
267 (u32) (0x3 << 8));
268
269 if (edac_mc_add_mc(mci)) {
270 debugf3("MC: " __FILE__
271 ": %s(): failed edac_mc_add_mc()\n", __func__);
272 goto fail;
273 }
274
275 /* get this far and it's successful */
276 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
277 return 0;
278
279fail:
280 if (mci) {
281 if(mci->pdev)
282 pci_dev_put(mci->pdev);
283 edac_mc_free(mci);
284 }
285 return rc;
286}
287
288/* returns count (>= 0), or negative on error */
289static int __devinit amd76x_init_one(struct pci_dev *pdev,
290 const struct pci_device_id *ent)
291{
292 debugf0("MC: " __FILE__ ": %s()\n", __func__);
293
294 /* don't need to call pci_device_enable() */
295 return amd76x_probe1(pdev, ent->driver_data);
296}
297
298
299/**
300 * amd76x_remove_one - driver shutdown
301 * @pdev: PCI device being handed back
302 *
303 * Called when the driver is unloaded. Find the matching mci
304 * structure for the device then delete the mci and free the
305 * resources.
306 */
307
308static void __devexit amd76x_remove_one(struct pci_dev *pdev)
309{
310 struct mem_ctl_info *mci;
311
312 debugf0(__FILE__ ": %s()\n", __func__);
313
314 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
315 return;
316 if (edac_mc_del_mc(mci))
317 return;
318 pci_dev_put(mci->pdev);
319 edac_mc_free(mci);
320}
321
322
323static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
324 {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 AMD762},
326 {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 AMD761},
328 {0,} /* 0 terminated list. */
329};
330
331MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
332
333
334static struct pci_driver amd76x_driver = {
335 .name = BS_MOD_STR,
336 .probe = amd76x_init_one,
337 .remove = __devexit_p(amd76x_remove_one),
338 .id_table = amd76x_pci_tbl,
339};
340
341static int __init amd76x_init(void)
342{
343 return pci_register_driver(&amd76x_driver);
344}
345
346static void __exit amd76x_exit(void)
347{
348 pci_unregister_driver(&amd76x_driver);
349}
350
351module_init(amd76x_init);
352module_exit(amd76x_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
356MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644
index 000000000000..770a5a633079
--- /dev/null
+++ b/drivers/edac/e752x_edac.c
@@ -0,0 +1,1071 @@
1/*
2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e752x_chips" below for supported chipsets
8 *
9 * Written by Tom Zimmerman
10 *
11 * Contributors:
12 * Thayne Harbaugh at realmsys.com (?)
13 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com
15 *
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
17 *
18 */
19
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/init.h>
24
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27
28#include <linux/slab.h>
29
30#include "edac_mc.h"
31
32
33#ifndef PCI_DEVICE_ID_INTEL_7520_0
34#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
35#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
36
37#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
38#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
39#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
40
41#ifndef PCI_DEVICE_ID_INTEL_7525_0
42#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
43#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
44
45#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
46#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
47#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
48
49#ifndef PCI_DEVICE_ID_INTEL_7320_0
50#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
51#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
52
53#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
54#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
55#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
56
57#define E752X_NR_CSROWS 8 /* number of csrows */
58
59
60/* E752X register addresses - device 0 function 0 */
61#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
62#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
63 /*
64 * 31:30 Device width row 7
65 * 01=x8 10=x4 11=x8 DDR2
66 * 27:26 Device width row 6
67 * 23:22 Device width row 5
68 * 19:20 Device width row 4
69 * 15:14 Device width row 3
70 * 11:10 Device width row 2
71 * 7:6 Device width row 1
72 * 3:2 Device width row 0
73 */
74#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
75 /* FIXME:IS THIS RIGHT? */
76 /*
77 * 22 Number channels 0=1,1=2
78 * 19:18 DRB Granularity 32/64MB
79 */
80#define E752X_DRM 0x80 /* Dimm mapping register */
81#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
82 /*
83 * 14:12 1 single A, 2 single B, 3 dual
84 */
85#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
86#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
87#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
88#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
89
90/* E752X register addresses - device 0 function 1 */
91#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
92#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
93#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
94#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
95#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
96#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
97#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
98#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
99#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
100#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
101#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
102#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
103#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
104#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */
105#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
106#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
107#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
108#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
109#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
110#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
111 /* error address register (32b) */
112 /*
113 * 31 Reserved
114 * 30:2 CE address (64 byte block 34:6)
115 * 1 Reserved
116 * 0 HiLoCS
117 */
118#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
119 /* error address register (32b) */
120 /*
121 * 31 Reserved
122 * 30:2 CE address (64 byte block 34:6)
123 * 1 Reserved
124 * 0 HiLoCS
125 */
126#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
127 /* error address register (32b) */
128 /*
129 * 31 Reserved
130 * 30:2 CE address (64 byte block 34:6)
131 * 1 Reserved
132 * 0 HiLoCS
133 */
134#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */
135 /* error address register (32b) */
136 /*
137 * 31 Reserved
138 * 30:2 CE address (64 byte block 34:6)
139 * 1 Reserved
140 * 0 HiLoCS
141 */
142#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
143 /* error syndrome register (16b) */
144#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
145 /* error syndrome register (16b) */
146#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
147
148/* ICH5R register addresses - device 30 function 0 */
149#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
150#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
151#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
152
153enum e752x_chips {
154 E7520 = 0,
155 E7525 = 1,
156 E7320 = 2
157};
158
159
160struct e752x_pvt {
161 struct pci_dev *bridge_ck;
162 struct pci_dev *dev_d0f0;
163 struct pci_dev *dev_d0f1;
164 u32 tolm;
165 u32 remapbase;
166 u32 remaplimit;
167 int mc_symmetric;
168 u8 map[8];
169 int map_type;
170 const struct e752x_dev_info *dev_info;
171};
172
173
174struct e752x_dev_info {
175 u16 err_dev;
176 const char *ctl_name;
177};
178
179struct e752x_error_info {
180 u32 ferr_global;
181 u32 nerr_global;
182 u8 hi_ferr;
183 u8 hi_nerr;
184 u16 sysbus_ferr;
185 u16 sysbus_nerr;
186 u8 buf_ferr;
187 u8 buf_nerr;
188 u16 dram_ferr;
189 u16 dram_nerr;
190 u32 dram_sec1_add;
191 u32 dram_sec2_add;
192 u16 dram_sec1_syndrome;
193 u16 dram_sec2_syndrome;
194 u32 dram_ded_add;
195 u32 dram_scrb_add;
196 u32 dram_retr_add;
197};
198
199static const struct e752x_dev_info e752x_devs[] = {
200 [E7520] = {
201 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
202 .ctl_name = "E7520"},
203 [E7525] = {
204 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
205 .ctl_name = "E7525"},
206 [E7320] = {
207 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
208 .ctl_name = "E7320"},
209};
210
211
212static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
213 unsigned long page)
214{
215 u32 remap;
216 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
217
218 debugf3("MC: " __FILE__ ": %s()\n", __func__);
219
220 if (page < pvt->tolm)
221 return page;
222 if ((page >= 0x100000) && (page < pvt->remapbase))
223 return page;
224 remap = (page - pvt->tolm) + pvt->remapbase;
225 if (remap < pvt->remaplimit)
226 return remap;
227 printk(KERN_ERR "Invalid page %lx - out of range\n", page);
228 return pvt->tolm - 1;
229}
230
231static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
232 u32 sec1_add, u16 sec1_syndrome)
233{
234 u32 page;
235 int row;
236 int channel;
237 int i;
238 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
239
240 debugf3("MC: " __FILE__ ": %s()\n", __func__);
241
242 /* convert the addr to 4k page */
243 page = sec1_add >> (PAGE_SHIFT - 4);
244
245 /* FIXME - check for -1 */
246 if (pvt->mc_symmetric) {
247 /* chip select are bits 14 & 13 */
248 row = ((page >> 1) & 3);
249 printk(KERN_WARNING
250 "Test row %d Table %d %d %d %d %d %d %d %d\n",
251 row, pvt->map[0], pvt->map[1], pvt->map[2],
252 pvt->map[3], pvt->map[4], pvt->map[5],
253 pvt->map[6], pvt->map[7]);
254
255 /* test for channel remapping */
256 for (i = 0; i < 8; i++) {
257 if (pvt->map[i] == row)
258 break;
259 }
260 printk(KERN_WARNING "Test computed row %d\n", i);
261 if (i < 8)
262 row = i;
263 else
264 printk(KERN_WARNING
265 "MC%d: row %d not found in remap table\n",
266 mci->mc_idx, row);
267 } else
268 row = edac_mc_find_csrow_by_page(mci, page);
269 /* 0 = channel A, 1 = channel B */
270 channel = !(error_one & 1);
271
272 if (!pvt->map_type)
273 row = 7 - row;
274 edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
275 "e752x CE");
276}
277
278
279static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
280 u32 sec1_add, u16 sec1_syndrome, int *error_found,
281 int handle_error)
282{
283 *error_found = 1;
284
285 if (handle_error)
286 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
287}
288
289static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add,
290 u32 scrb_add)
291{
292 u32 error_2b, block_page;
293 int row;
294 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
295
296 debugf3("MC: " __FILE__ ": %s()\n", __func__);
297
298 if (error_one & 0x0202) {
299 error_2b = ded_add;
300 /* convert to 4k address */
301 block_page = error_2b >> (PAGE_SHIFT - 4);
302 row = pvt->mc_symmetric ?
303 /* chip select are bits 14 & 13 */
304 ((block_page >> 1) & 3) :
305 edac_mc_find_csrow_by_page(mci, block_page);
306 edac_mc_handle_ue(mci, block_page, 0, row,
307 "e752x UE from Read");
308 }
309 if (error_one & 0x0404) {
310 error_2b = scrb_add;
311 /* convert to 4k address */
312 block_page = error_2b >> (PAGE_SHIFT - 4);
313 row = pvt->mc_symmetric ?
314 /* chip select are bits 14 & 13 */
315 ((block_page >> 1) & 3) :
316 edac_mc_find_csrow_by_page(mci, block_page);
317 edac_mc_handle_ue(mci, block_page, 0, row,
318 "e752x UE from Scruber");
319 }
320}
321
322static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
323 u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
324{
325 *error_found = 1;
326
327 if (handle_error)
328 do_process_ue(mci, error_one, ded_add, scrb_add);
329}
330
331static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
332 int *error_found, int handle_error)
333{
334 *error_found = 1;
335
336 if (!handle_error)
337 return;
338
339 debugf3("MC: " __FILE__ ": %s()\n", __func__);
340 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
341}
342
343static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
344 u32 retry_add)
345{
346 u32 error_1b, page;
347 int row;
348 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
349
350 error_1b = retry_add;
351 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
352 row = pvt->mc_symmetric ?
353 ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
354 edac_mc_find_csrow_by_page(mci, page);
355 printk(KERN_WARNING
356 "MC%d: CE page 0x%lx, row %d : Memory read retry\n",
357 mci->mc_idx, (long unsigned int) page, row);
358}
359
360static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
361 u32 retry_add, int *error_found, int handle_error)
362{
363 *error_found = 1;
364
365 if (handle_error)
366 do_process_ded_retry(mci, error, retry_add);
367}
368
369static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
370 int *error_found, int handle_error)
371{
372 *error_found = 1;
373
374 if (handle_error)
375 printk(KERN_WARNING "MC%d: Memory threshold CE\n",
376 mci->mc_idx);
377}
378
379static char *global_message[11] = {
380 "PCI Express C1", "PCI Express C", "PCI Express B1",
381 "PCI Express B", "PCI Express A1", "PCI Express A",
382 "DMA Controler", "HUB Interface", "System Bus",
383 "DRAM Controler", "Internal Buffer"
384};
385
386static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
387
388static void do_global_error(int fatal, u32 errors)
389{
390 int i;
391
392 for (i = 0; i < 11; i++) {
393 if (errors & (1 << i))
394 printk(KERN_WARNING "%sError %s\n",
395 fatal_message[fatal], global_message[i]);
396 }
397}
398
399static inline void global_error(int fatal, u32 errors, int *error_found,
400 int handle_error)
401{
402 *error_found = 1;
403
404 if (handle_error)
405 do_global_error(fatal, errors);
406}
407
408static char *hub_message[7] = {
409 "HI Address or Command Parity", "HI Illegal Access",
410 "HI Internal Parity", "Out of Range Access",
411 "HI Data Parity", "Enhanced Config Access",
412 "Hub Interface Target Abort"
413};
414
415static void do_hub_error(int fatal, u8 errors)
416{
417 int i;
418
419 for (i = 0; i < 7; i++) {
420 if (errors & (1 << i))
421 printk(KERN_WARNING "%sError %s\n",
422 fatal_message[fatal], hub_message[i]);
423 }
424}
425
426static inline void hub_error(int fatal, u8 errors, int *error_found,
427 int handle_error)
428{
429 *error_found = 1;
430
431 if (handle_error)
432 do_hub_error(fatal, errors);
433}
434
435static char *membuf_message[4] = {
436 "Internal PMWB to DRAM parity",
437 "Internal PMWB to System Bus Parity",
438 "Internal System Bus or IO to PMWB Parity",
439 "Internal DRAM to PMWB Parity"
440};
441
442static void do_membuf_error(u8 errors)
443{
444 int i;
445
446 for (i = 0; i < 4; i++) {
447 if (errors & (1 << i))
448 printk(KERN_WARNING "Non-Fatal Error %s\n",
449 membuf_message[i]);
450 }
451}
452
453static inline void membuf_error(u8 errors, int *error_found, int handle_error)
454{
455 *error_found = 1;
456
457 if (handle_error)
458 do_membuf_error(errors);
459}
460
461#if 0
462char *sysbus_message[10] = {
463 "Addr or Request Parity",
464 "Data Strobe Glitch",
465 "Addr Strobe Glitch",
466 "Data Parity",
467 "Addr Above TOM",
468 "Non DRAM Lock Error",
469 "MCERR", "BINIT",
470 "Memory Parity",
471 "IO Subsystem Parity"
472};
473#endif /* 0 */
474
475static void do_sysbus_error(int fatal, u32 errors)
476{
477 int i;
478
479 for (i = 0; i < 10; i++) {
480 if (errors & (1 << i))
481 printk(KERN_WARNING "%sError System Bus %s\n",
482 fatal_message[fatal], global_message[i]);
483 }
484}
485
486static inline void sysbus_error(int fatal, u32 errors, int *error_found,
487 int handle_error)
488{
489 *error_found = 1;
490
491 if (handle_error)
492 do_sysbus_error(fatal, errors);
493}
494
495static void e752x_check_hub_interface (struct e752x_error_info *info,
496 int *error_found, int handle_error)
497{
498 u8 stat8;
499
500 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
501 stat8 = info->hi_ferr;
502 if(stat8 & 0x7f) { /* Error, so process */
503 stat8 &= 0x7f;
504 if(stat8 & 0x2b)
505 hub_error(1, stat8 & 0x2b, error_found, handle_error);
506 if(stat8 & 0x54)
507 hub_error(0, stat8 & 0x54, error_found, handle_error);
508 }
509 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
510 stat8 = info->hi_nerr;
511 if(stat8 & 0x7f) { /* Error, so process */
512 stat8 &= 0x7f;
513 if (stat8 & 0x2b)
514 hub_error(1, stat8 & 0x2b, error_found, handle_error);
515 if(stat8 & 0x54)
516 hub_error(0, stat8 & 0x54, error_found, handle_error);
517 }
518}
519
520static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
521 int handle_error)
522{
523 u32 stat32, error32;
524
525 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
526 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
527
528 if (stat32 == 0)
529 return; /* no errors */
530
531 error32 = (stat32 >> 16) & 0x3ff;
532 stat32 = stat32 & 0x3ff;
533 if(stat32 & 0x083)
534 sysbus_error(1, stat32 & 0x083, error_found, handle_error);
535 if(stat32 & 0x37c)
536 sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
537 if(error32 & 0x083)
538 sysbus_error(1, error32 & 0x083, error_found, handle_error);
539 if(error32 & 0x37c)
540 sysbus_error(0, error32 & 0x37c, error_found, handle_error);
541}
542
543static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
544 int handle_error)
545{
546 u8 stat8;
547
548 stat8 = info->buf_ferr;
549 if (stat8 & 0x0f) { /* Error, so process */
550 stat8 &= 0x0f;
551 membuf_error(stat8, error_found, handle_error);
552 }
553 stat8 = info->buf_nerr;
554 if (stat8 & 0x0f) { /* Error, so process */
555 stat8 &= 0x0f;
556 membuf_error(stat8, error_found, handle_error);
557 }
558}
559
560static void e752x_check_dram (struct mem_ctl_info *mci,
561 struct e752x_error_info *info, int *error_found, int handle_error)
562{
563 u16 error_one, error_next;
564
565 error_one = info->dram_ferr;
566 error_next = info->dram_nerr;
567
568 /* decode and report errors */
569 if(error_one & 0x0101) /* check first error correctable */
570 process_ce(mci, error_one, info->dram_sec1_add,
571 info->dram_sec1_syndrome, error_found,
572 handle_error);
573
574 if(error_next & 0x0101) /* check next error correctable */
575 process_ce(mci, error_next, info->dram_sec2_add,
576 info->dram_sec2_syndrome, error_found,
577 handle_error);
578
579 if(error_one & 0x4040)
580 process_ue_no_info_wr(mci, error_found, handle_error);
581
582 if(error_next & 0x4040)
583 process_ue_no_info_wr(mci, error_found, handle_error);
584
585 if(error_one & 0x2020)
586 process_ded_retry(mci, error_one, info->dram_retr_add,
587 error_found, handle_error);
588
589 if(error_next & 0x2020)
590 process_ded_retry(mci, error_next, info->dram_retr_add,
591 error_found, handle_error);
592
593 if(error_one & 0x0808)
594 process_threshold_ce(mci, error_one, error_found,
595 handle_error);
596
597 if(error_next & 0x0808)
598 process_threshold_ce(mci, error_next, error_found,
599 handle_error);
600
601 if(error_one & 0x0606)
602 process_ue(mci, error_one, info->dram_ded_add,
603 info->dram_scrb_add, error_found, handle_error);
604
605 if(error_next & 0x0606)
606 process_ue(mci, error_next, info->dram_ded_add,
607 info->dram_scrb_add, error_found, handle_error);
608}
609
610static void e752x_get_error_info (struct mem_ctl_info *mci,
611 struct e752x_error_info *info)
612{
613 struct pci_dev *dev;
614 struct e752x_pvt *pvt;
615
616 memset(info, 0, sizeof(*info));
617 pvt = (struct e752x_pvt *) mci->pvt_info;
618 dev = pvt->dev_d0f1;
619
620 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
621
622 if (info->ferr_global) {
623 pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
624 pci_read_config_word(dev, E752X_SYSBUS_FERR,
625 &info->sysbus_ferr);
626 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
627 pci_read_config_word(dev, E752X_DRAM_FERR,
628 &info->dram_ferr);
629 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
630 &info->dram_sec1_add);
631 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
632 &info->dram_sec1_syndrome);
633 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
634 &info->dram_ded_add);
635 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
636 &info->dram_scrb_add);
637 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
638 &info->dram_retr_add);
639
640 if (info->hi_ferr & 0x7f)
641 pci_write_config_byte(dev, E752X_HI_FERR,
642 info->hi_ferr);
643
644 if (info->sysbus_ferr)
645 pci_write_config_word(dev, E752X_SYSBUS_FERR,
646 info->sysbus_ferr);
647
648 if (info->buf_ferr & 0x0f)
649 pci_write_config_byte(dev, E752X_BUF_FERR,
650 info->buf_ferr);
651
652 if (info->dram_ferr)
653 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
654 info->dram_ferr, info->dram_ferr);
655
656 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
657 info->ferr_global);
658 }
659
660 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
661
662 if (info->nerr_global) {
663 pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
664 pci_read_config_word(dev, E752X_SYSBUS_NERR,
665 &info->sysbus_nerr);
666 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
667 pci_read_config_word(dev, E752X_DRAM_NERR,
668 &info->dram_nerr);
669 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
670 &info->dram_sec2_add);
671 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
672 &info->dram_sec2_syndrome);
673
674 if (info->hi_nerr & 0x7f)
675 pci_write_config_byte(dev, E752X_HI_NERR,
676 info->hi_nerr);
677
678 if (info->sysbus_nerr)
679 pci_write_config_word(dev, E752X_SYSBUS_NERR,
680 info->sysbus_nerr);
681
682 if (info->buf_nerr & 0x0f)
683 pci_write_config_byte(dev, E752X_BUF_NERR,
684 info->buf_nerr);
685
686 if (info->dram_nerr)
687 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
688 info->dram_nerr, info->dram_nerr);
689
690 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
691 info->nerr_global);
692 }
693}
694
695static int e752x_process_error_info (struct mem_ctl_info *mci,
696 struct e752x_error_info *info, int handle_errors)
697{
698 u32 error32, stat32;
699 int error_found;
700
701 error_found = 0;
702 error32 = (info->ferr_global >> 18) & 0x3ff;
703 stat32 = (info->ferr_global >> 4) & 0x7ff;
704
705 if (error32)
706 global_error(1, error32, &error_found, handle_errors);
707
708 if (stat32)
709 global_error(0, stat32, &error_found, handle_errors);
710
711 error32 = (info->nerr_global >> 18) & 0x3ff;
712 stat32 = (info->nerr_global >> 4) & 0x7ff;
713
714 if (error32)
715 global_error(1, error32, &error_found, handle_errors);
716
717 if (stat32)
718 global_error(0, stat32, &error_found, handle_errors);
719
720 e752x_check_hub_interface(info, &error_found, handle_errors);
721 e752x_check_sysbus(info, &error_found, handle_errors);
722 e752x_check_membuf(info, &error_found, handle_errors);
723 e752x_check_dram(mci, info, &error_found, handle_errors);
724 return error_found;
725}
726
727static void e752x_check(struct mem_ctl_info *mci)
728{
729 struct e752x_error_info info;
730 debugf3("MC: " __FILE__ ": %s()\n", __func__);
731 e752x_get_error_info(mci, &info);
732 e752x_process_error_info(mci, &info, 1);
733}
734
735static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
736{
737 int rc = -ENODEV;
738 int index;
739 u16 pci_data, stat;
740 u32 stat32;
741 u16 stat16;
742 u8 stat8;
743 struct mem_ctl_info *mci = NULL;
744 struct e752x_pvt *pvt = NULL;
745 u16 ddrcsr;
746 u32 drc;
747 int drc_chan; /* Number of channels 0=1chan,1=2chan */
748 int drc_drbg; /* DRB granularity 0=64mb,1=128mb */
749 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
750 u32 dra;
751 unsigned long last_cumul_size;
752 struct pci_dev *pres_dev;
753 struct pci_dev *dev = NULL;
754
755 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
756 debugf0("Starting Probe1\n");
757
758 /* enable device 0 function 1 */
759 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
760 stat8 |= (1 << 5);
761 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
762
763 /* need to find out the number of channels */
764 pci_read_config_dword(pdev, E752X_DRC, &drc);
765 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
766 /* FIXME: should check >>12 or 0xf, true for all? */
767 /* Dual channel = 1, Single channel = 0 */
768 drc_chan = (((ddrcsr >> 12) & 3) == 3);
769 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
770 drc_ddim = (drc >> 20) & 0x3;
771
772 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
773
774 if (mci == NULL) {
775 rc = -ENOMEM;
776 goto fail;
777 }
778
779 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
780
781 mci->mtype_cap = MEM_FLAG_RDDR;
782 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
783 EDAC_FLAG_S4ECD4ED;
784 /* FIXME - what if different memory types are in different csrows? */
785 mci->mod_name = BS_MOD_STR;
786 mci->mod_ver = "$Revision: 1.5.2.11 $";
787 mci->pdev = pdev;
788
789 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
790 pvt = (struct e752x_pvt *) mci->pvt_info;
791 pvt->dev_info = &e752x_devs[dev_idx];
792 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
793 pvt->dev_info->err_dev,
794 pvt->bridge_ck);
795 if (pvt->bridge_ck == NULL)
796 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
797 PCI_DEVFN(0, 1));
798 if (pvt->bridge_ck == NULL) {
799 printk(KERN_ERR "MC: error reporting device not found:"
800 "vendor %x device 0x%x (broken BIOS?)\n",
801 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
802 goto fail;
803 }
804 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
805
806 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
807 mci->ctl_name = pvt->dev_info->ctl_name;
808 mci->edac_check = e752x_check;
809 mci->ctl_page_to_phys = ctl_page_to_phys;
810
811 /* find out the device types */
812 pci_read_config_dword(pdev, E752X_DRA, &dra);
813
814 /*
815 * The dram row boundary (DRB) reg values are boundary address for
816 * each DRAM row with a granularity of 64 or 128MB (single/dual
817 * channel operation). DRB regs are cumulative; therefore DRB7 will
818 * contain the total memory contained in all eight rows.
819 */
820 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
821 u8 value;
822 u32 cumul_size;
823 /* mem_dev 0=x8, 1=x4 */
824 int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
825 struct csrow_info *csrow = &mci->csrows[index];
826
827 mem_dev = (mem_dev == 2);
828 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
829 /* convert a 128 or 64 MiB DRB to a page size. */
830 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
831 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
832 __func__, index, cumul_size);
833 if (cumul_size == last_cumul_size)
834 continue; /* not populated */
835
836 csrow->first_page = last_cumul_size;
837 csrow->last_page = cumul_size - 1;
838 csrow->nr_pages = cumul_size - last_cumul_size;
839 last_cumul_size = cumul_size;
840 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
841 csrow->mtype = MEM_RDDR; /* only one type supported */
842 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
843
844 /*
845 * if single channel or x8 devices then SECDED
846 * if dual channel and x4 then S4ECD4ED
847 */
848 if (drc_ddim) {
849 if (drc_chan && mem_dev) {
850 csrow->edac_mode = EDAC_S4ECD4ED;
851 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
852 } else {
853 csrow->edac_mode = EDAC_SECDED;
854 mci->edac_cap |= EDAC_FLAG_SECDED;
855 }
856 } else
857 csrow->edac_mode = EDAC_NONE;
858 }
859
860 /* Fill in the memory map table */
861 {
862 u8 value;
863 u8 last = 0;
864 u8 row = 0;
865 for (index = 0; index < 8; index += 2) {
866
867 pci_read_config_byte(mci->pdev, E752X_DRB + index,
868 &value);
869 /* test if there is a dimm in this slot */
870 if (value == last) {
871 /* no dimm in the slot, so flag it as empty */
872 pvt->map[index] = 0xff;
873 pvt->map[index + 1] = 0xff;
874 } else { /* there is a dimm in the slot */
875 pvt->map[index] = row;
876 row++;
877 last = value;
878 /* test the next value to see if the dimm is
879 double sided */
880 pci_read_config_byte(mci->pdev,
881 E752X_DRB + index + 1,
882 &value);
883 pvt->map[index + 1] = (value == last) ?
884 0xff : /* the dimm is single sided,
885 so flag as empty */
886 row; /* this is a double sided dimm
887 to save the next row # */
888 row++;
889 last = value;
890 }
891 }
892 }
893
894 /* set the map type. 1 = normal, 0 = reversed */
895 pci_read_config_byte(mci->pdev, E752X_DRM, &stat8);
896 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
897
898 mci->edac_cap |= EDAC_FLAG_NONE;
899
900 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
901 __func__);
902 /* load the top of low memory, remap base, and remap limit vars */
903 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
904 pvt->tolm = ((u32) pci_data) << 4;
905 pci_read_config_word(mci->pdev, E752X_REMAPBASE, &pci_data);
906 pvt->remapbase = ((u32) pci_data) << 14;
907 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
908 pvt->remaplimit = ((u32) pci_data) << 14;
909 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
910 pvt->remapbase, pvt->remaplimit);
911
912 if (edac_mc_add_mc(mci)) {
913 debugf3("MC: " __FILE__
914 ": %s(): failed edac_mc_add_mc()\n",
915 __func__);
916 goto fail;
917 }
918
919 /* Walk through the PCI table and clear errors */
920 switch (dev_idx) {
921 case E7520:
922 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
923 PCI_DEVICE_ID_INTEL_7520_0, NULL);
924 break;
925 case E7525:
926 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
927 PCI_DEVICE_ID_INTEL_7525_0, NULL);
928 break;
929 case E7320:
930 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
931 PCI_DEVICE_ID_INTEL_7320_0, NULL);
932 break;
933 }
934
935
936 pvt->dev_d0f0 = dev;
937 for (pres_dev = dev;
938 ((struct pci_dev *) pres_dev->global_list.next != dev);
939 pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
940 pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
941 stat = (u16) (stat32 >> 16);
942 /* clear any error bits */
943 if (stat32 & ((1 << 6) + (1 << 8)))
944 pci_write_config_word(pres_dev, PCI_STATUS, stat);
945 }
946 /* find the error reporting device and clear errors */
947 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
948 /* Turn off error disable & SMI in case the BIOS turned it on */
949 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
950 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
951 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
952 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
953 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
954 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
955 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
956 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
957 /* clear other MCH errors */
958 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32);
959 pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
960 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
961 pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
962 pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
963 pci_write_config_byte(dev, E752X_HI_FERR, stat8);
964 pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
965 pci_write_config_byte(dev, E752X_HI_NERR, stat8);
966 pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
967 pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
968 pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
969 pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
970 pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
971 pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
972 pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
973 pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
974 pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
975 pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
976
977 /* get this far and it's successful */
978 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
979 return 0;
980
981fail:
982 if (mci) {
983 if (pvt->dev_d0f0)
984 pci_dev_put(pvt->dev_d0f0);
985 if (pvt->dev_d0f1)
986 pci_dev_put(pvt->dev_d0f1);
987 if (pvt->bridge_ck)
988 pci_dev_put(pvt->bridge_ck);
989 edac_mc_free(mci);
990 }
991 return rc;
992}
993
994/* returns count (>= 0), or negative on error */
995static int __devinit e752x_init_one(struct pci_dev *pdev,
996 const struct pci_device_id *ent)
997{
998 debugf0("MC: " __FILE__ ": %s()\n", __func__);
999
1000 /* wake up and enable device */
1001 if(pci_enable_device(pdev) < 0)
1002 return -EIO;
1003 return e752x_probe1(pdev, ent->driver_data);
1004}
1005
1006
1007static void __devexit e752x_remove_one(struct pci_dev *pdev)
1008{
1009 struct mem_ctl_info *mci;
1010 struct e752x_pvt *pvt;
1011
1012 debugf0(__FILE__ ": %s()\n", __func__);
1013
1014 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
1015 return;
1016
1017 if (edac_mc_del_mc(mci))
1018 return;
1019
1020 pvt = (struct e752x_pvt *) mci->pvt_info;
1021 pci_dev_put(pvt->dev_d0f0);
1022 pci_dev_put(pvt->dev_d0f1);
1023 pci_dev_put(pvt->bridge_ck);
1024 edac_mc_free(mci);
1025}
1026
1027
1028static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1029 {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1030 E7520},
1031 {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1032 E7525},
1033 {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1034 E7320},
1035 {0,} /* 0 terminated list. */
1036};
1037
1038MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1039
1040
1041static struct pci_driver e752x_driver = {
1042 name: BS_MOD_STR,
1043 probe: e752x_init_one,
1044 remove: __devexit_p(e752x_remove_one),
1045 id_table: e752x_pci_tbl,
1046};
1047
1048
1049static int __init e752x_init(void)
1050{
1051 int pci_rc;
1052
1053 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1054 pci_rc = pci_register_driver(&e752x_driver);
1055 return (pci_rc < 0) ? pci_rc : 0;
1056}
1057
1058
1059static void __exit e752x_exit(void)
1060{
1061 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1062 pci_unregister_driver(&e752x_driver);
1063}
1064
1065
1066module_init(e752x_init);
1067module_exit(e752x_exit);
1068
1069MODULE_LICENSE("GPL");
1070MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1071MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644
index 000000000000..d5e320dfc66f
--- /dev/null
+++ b/drivers/edac/e7xxx_edac.c
@@ -0,0 +1,558 @@
1/*
2 * Intel e7xxx Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e7xxx_chips" below for supported chipsets
8 *
9 * Written by Thayne Harbaugh
10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
11 * http://www.anime.net/~goemon/linux-ecc/
12 *
13 * Contributors:
14 * Eric Biederman (Linux Networx)
15 * Tom Zimmerman (Linux Networx)
16 * Jim Garlick (Lawrence Livermore National Labs)
17 * Dave Peterson (Lawrence Livermore National Labs)
18 * That One Guy (Some other place)
19 * Wang Zhenyu (intel.com)
20 *
21 * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
22 *
23 */
24
25
26#include <linux/config.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include "edac_mc.h"
33
34
35#ifndef PCI_DEVICE_ID_INTEL_7205_0
36#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
37#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
38
39#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
40#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
41#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
42
43#ifndef PCI_DEVICE_ID_INTEL_7500_0
44#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
45#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
46
47#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
48#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
49#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
50
51#ifndef PCI_DEVICE_ID_INTEL_7501_0
52#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
53#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
54
55#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
56#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
57#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
58
59#ifndef PCI_DEVICE_ID_INTEL_7505_0
60#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
61#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
62
63#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
64#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
65#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
66
67
68#define E7XXX_NR_CSROWS 8 /* number of csrows */
69#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
70
71
72/* E7XXX register addresses - device 0 function 0 */
73#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
74#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
75 /*
76 * 31 Device width row 7 0=x8 1=x4
77 * 27 Device width row 6
78 * 23 Device width row 5
79 * 19 Device width row 4
80 * 15 Device width row 3
81 * 11 Device width row 2
82 * 7 Device width row 1
83 * 3 Device width row 0
84 */
85#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
86 /*
87 * 22 Number channels 0=1,1=2
88 * 19:18 DRB Granularity 32/64MB
89 */
90#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
91#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
92#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
93
94/* E7XXX register addresses - device 0 function 1 */
95#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
96#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
97#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
98 /* error address register (32b) */
99 /*
100 * 31:28 Reserved
101 * 27:6 CE address (4k block 33:12)
102 * 5:0 Reserved
103 */
104#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
105 /* error address register (32b) */
106 /*
107 * 31:28 Reserved
108 * 27:6 CE address (4k block 33:12)
109 * 5:0 Reserved
110 */
111#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
112 /* error syndrome register (16b) */
113
114enum e7xxx_chips {
115 E7500 = 0,
116 E7501,
117 E7505,
118 E7205,
119};
120
121
122struct e7xxx_pvt {
123 struct pci_dev *bridge_ck;
124 u32 tolm;
125 u32 remapbase;
126 u32 remaplimit;
127 const struct e7xxx_dev_info *dev_info;
128};
129
130
131struct e7xxx_dev_info {
132 u16 err_dev;
133 const char *ctl_name;
134};
135
136
137struct e7xxx_error_info {
138 u8 dram_ferr;
139 u8 dram_nerr;
140 u32 dram_celog_add;
141 u16 dram_celog_syndrome;
142 u32 dram_uelog_add;
143};
144
145static const struct e7xxx_dev_info e7xxx_devs[] = {
146 [E7500] = {
147 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
148 .ctl_name = "E7500"},
149 [E7501] = {
150 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
151 .ctl_name = "E7501"},
152 [E7505] = {
153 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
154 .ctl_name = "E7505"},
155 [E7205] = {
156 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
157 .ctl_name = "E7205"},
158};
159
160
161/* FIXME - is this valid for both SECDED and S4ECD4ED? */
162static inline int e7xxx_find_channel(u16 syndrome)
163{
164 debugf3("MC: " __FILE__ ": %s()\n", __func__);
165
166 if ((syndrome & 0xff00) == 0)
167 return 0;
168 if ((syndrome & 0x00ff) == 0)
169 return 1;
170 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
171 return 0;
172 return 1;
173}
174
175
176static unsigned long
177ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
178{
179 u32 remap;
180 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
181
182 debugf3("MC: " __FILE__ ": %s()\n", __func__);
183
184 if ((page < pvt->tolm) ||
185 ((page >= 0x100000) && (page < pvt->remapbase)))
186 return page;
187 remap = (page - pvt->tolm) + pvt->remapbase;
188 if (remap < pvt->remaplimit)
189 return remap;
190 printk(KERN_ERR "Invalid page %lx - out of range\n", page);
191 return pvt->tolm - 1;
192}
193
194
195static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
196{
197 u32 error_1b, page;
198 u16 syndrome;
199 int row;
200 int channel;
201
202 debugf3("MC: " __FILE__ ": %s()\n", __func__);
203
204 /* read the error address */
205 error_1b = info->dram_celog_add;
206 /* FIXME - should use PAGE_SHIFT */
207 page = error_1b >> 6; /* convert the address to 4k page */
208 /* read the syndrome */
209 syndrome = info->dram_celog_syndrome;
210 /* FIXME - check for -1 */
211 row = edac_mc_find_csrow_by_page(mci, page);
212 /* convert syndrome to channel */
213 channel = e7xxx_find_channel(syndrome);
214 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel,
215 "e7xxx CE");
216}
217
218
219static void process_ce_no_info(struct mem_ctl_info *mci)
220{
221 debugf3("MC: " __FILE__ ": %s()\n", __func__);
222 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
223}
224
225
226static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
227{
228 u32 error_2b, block_page;
229 int row;
230
231 debugf3("MC: " __FILE__ ": %s()\n", __func__);
232
233 /* read the error address */
234 error_2b = info->dram_uelog_add;
235 /* FIXME - should use PAGE_SHIFT */
236 block_page = error_2b >> 6; /* convert to 4k address */
237 row = edac_mc_find_csrow_by_page(mci, block_page);
238 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
239}
240
241
242static void process_ue_no_info(struct mem_ctl_info *mci)
243{
244 debugf3("MC: " __FILE__ ": %s()\n", __func__);
245 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
246}
247
248
249static void e7xxx_get_error_info (struct mem_ctl_info *mci,
250 struct e7xxx_error_info *info)
251{
252 struct e7xxx_pvt *pvt;
253
254 pvt = (struct e7xxx_pvt *) mci->pvt_info;
255 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
256 &info->dram_ferr);
257 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
258 &info->dram_nerr);
259
260 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
261 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
262 &info->dram_celog_add);
263 pci_read_config_word(pvt->bridge_ck,
264 E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome);
265 }
266
267 if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
268 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
269 &info->dram_uelog_add);
270
271 if (info->dram_ferr & 3)
272 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03,
273 0x03);
274
275 if (info->dram_nerr & 3)
276 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03,
277 0x03);
278}
279
280
281static int e7xxx_process_error_info (struct mem_ctl_info *mci,
282 struct e7xxx_error_info *info, int handle_errors)
283{
284 int error_found;
285
286 error_found = 0;
287
288 /* decode and report errors */
289 if (info->dram_ferr & 1) { /* check first error correctable */
290 error_found = 1;
291
292 if (handle_errors)
293 process_ce(mci, info);
294 }
295
296 if (info->dram_ferr & 2) { /* check first error uncorrectable */
297 error_found = 1;
298
299 if (handle_errors)
300 process_ue(mci, info);
301 }
302
303 if (info->dram_nerr & 1) { /* check next error correctable */
304 error_found = 1;
305
306 if (handle_errors) {
307 if (info->dram_ferr & 1)
308 process_ce_no_info(mci);
309 else
310 process_ce(mci, info);
311 }
312 }
313
314 if (info->dram_nerr & 2) { /* check next error uncorrectable */
315 error_found = 1;
316
317 if (handle_errors) {
318 if (info->dram_ferr & 2)
319 process_ue_no_info(mci);
320 else
321 process_ue(mci, info);
322 }
323 }
324
325 return error_found;
326}
327
328
329static void e7xxx_check(struct mem_ctl_info *mci)
330{
331 struct e7xxx_error_info info;
332
333 debugf3("MC: " __FILE__ ": %s()\n", __func__);
334 e7xxx_get_error_info(mci, &info);
335 e7xxx_process_error_info(mci, &info, 1);
336}
337
338
339static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
340{
341 int rc = -ENODEV;
342 int index;
343 u16 pci_data;
344 struct mem_ctl_info *mci = NULL;
345 struct e7xxx_pvt *pvt = NULL;
346 u32 drc;
347 int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
348 int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
349 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
350 u32 dra;
351 unsigned long last_cumul_size;
352
353
354 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
355
356 /* need to find out the number of channels */
357 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
358 /* only e7501 can be single channel */
359 if (dev_idx == E7501) {
360 drc_chan = ((drc >> 22) & 0x1);
361 drc_drbg = (drc >> 18) & 0x3;
362 }
363 drc_ddim = (drc >> 20) & 0x3;
364
365 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
366
367 if (mci == NULL) {
368 rc = -ENOMEM;
369 goto fail;
370 }
371
372 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
373
374 mci->mtype_cap = MEM_FLAG_RDDR;
375 mci->edac_ctl_cap =
376 EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED;
377 /* FIXME - what if different memory types are in different csrows? */
378 mci->mod_name = BS_MOD_STR;
379 mci->mod_ver = "$Revision: 1.5.2.9 $";
380 mci->pdev = pdev;
381
382 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
383 pvt = (struct e7xxx_pvt *) mci->pvt_info;
384 pvt->dev_info = &e7xxx_devs[dev_idx];
385 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
386 pvt->dev_info->err_dev,
387 pvt->bridge_ck);
388 if (!pvt->bridge_ck) {
389 printk(KERN_ERR
390 "MC: error reporting device not found:"
391 "vendor %x device 0x%x (broken BIOS?)\n",
392 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
393 goto fail;
394 }
395
396 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
397 mci->ctl_name = pvt->dev_info->ctl_name;
398
399 mci->edac_check = e7xxx_check;
400 mci->ctl_page_to_phys = ctl_page_to_phys;
401
402 /* find out the device types */
403 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
404
405 /*
406 * The dram row boundary (DRB) reg values are boundary address
407 * for each DRAM row with a granularity of 32 or 64MB (single/dual
408 * channel operation). DRB regs are cumulative; therefore DRB7 will
409 * contain the total memory contained in all eight rows.
410 */
411 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
412 u8 value;
413 u32 cumul_size;
414 /* mem_dev 0=x8, 1=x4 */
415 int mem_dev = (dra >> (index * 4 + 3)) & 0x1;
416 struct csrow_info *csrow = &mci->csrows[index];
417
418 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
419 /* convert a 64 or 32 MiB DRB to a page size. */
420 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
421 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
422 __func__, index, cumul_size);
423 if (cumul_size == last_cumul_size)
424 continue; /* not populated */
425
426 csrow->first_page = last_cumul_size;
427 csrow->last_page = cumul_size - 1;
428 csrow->nr_pages = cumul_size - last_cumul_size;
429 last_cumul_size = cumul_size;
430 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
431 csrow->mtype = MEM_RDDR; /* only one type supported */
432 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
433
434 /*
435 * if single channel or x8 devices then SECDED
436 * if dual channel and x4 then S4ECD4ED
437 */
438 if (drc_ddim) {
439 if (drc_chan && mem_dev) {
440 csrow->edac_mode = EDAC_S4ECD4ED;
441 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
442 } else {
443 csrow->edac_mode = EDAC_SECDED;
444 mci->edac_cap |= EDAC_FLAG_SECDED;
445 }
446 } else
447 csrow->edac_mode = EDAC_NONE;
448 }
449
450 mci->edac_cap |= EDAC_FLAG_NONE;
451
452 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
453 __func__);
454 /* load the top of low memory, remap base, and remap limit vars */
455 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
456 pvt->tolm = ((u32) pci_data) << 4;
457 pci_read_config_word(mci->pdev, E7XXX_REMAPBASE, &pci_data);
458 pvt->remapbase = ((u32) pci_data) << 14;
459 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
460 pvt->remaplimit = ((u32) pci_data) << 14;
461 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
462 pvt->remapbase, pvt->remaplimit);
463
464 /* clear any pending errors, or initial state bits */
465 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
466 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
467
468 if (edac_mc_add_mc(mci) != 0) {
469 debugf3("MC: " __FILE__
470 ": %s(): failed edac_mc_add_mc()\n",
471 __func__);
472 goto fail;
473 }
474
475 /* get this far and it's successful */
476 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
477 return 0;
478
479fail:
480 if (mci != NULL) {
481 if(pvt != NULL && pvt->bridge_ck)
482 pci_dev_put(pvt->bridge_ck);
483 edac_mc_free(mci);
484 }
485
486 return rc;
487}
488
489/* returns count (>= 0), or negative on error */
490static int __devinit
491e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
492{
493 debugf0("MC: " __FILE__ ": %s()\n", __func__);
494
495 /* wake up and enable device */
496 return pci_enable_device(pdev) ?
497 -EIO : e7xxx_probe1(pdev, ent->driver_data);
498}
499
500
501static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
502{
503 struct mem_ctl_info *mci;
504 struct e7xxx_pvt *pvt;
505
506 debugf0(__FILE__ ": %s()\n", __func__);
507
508 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) &&
509 edac_mc_del_mc(mci)) {
510 pvt = (struct e7xxx_pvt *) mci->pvt_info;
511 pci_dev_put(pvt->bridge_ck);
512 edac_mc_free(mci);
513 }
514}
515
516
517static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
518 {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
519 E7205},
520 {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
521 E7500},
522 {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
523 E7501},
524 {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
525 E7505},
526 {0,} /* 0 terminated list. */
527};
528
529MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
530
531
532static struct pci_driver e7xxx_driver = {
533 .name = BS_MOD_STR,
534 .probe = e7xxx_init_one,
535 .remove = __devexit_p(e7xxx_remove_one),
536 .id_table = e7xxx_pci_tbl,
537};
538
539
540static int __init e7xxx_init(void)
541{
542 return pci_register_driver(&e7xxx_driver);
543}
544
545
546static void __exit e7xxx_exit(void)
547{
548 pci_unregister_driver(&e7xxx_driver);
549}
550
551module_init(e7xxx_init);
552module_exit(e7xxx_exit);
553
554
555MODULE_LICENSE("GPL");
556MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
557 "Based on.work by Dan Hollis et al");
558MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644
index 000000000000..4be9bd0a1267
--- /dev/null
+++ b/drivers/edac/edac_mc.c
@@ -0,0 +1,2209 @@
1/*
2 * edac_mc kernel module
3 * (C) 2005 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15
16#include <linux/config.h>
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/proc_fs.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/sysctl.h>
25#include <linux/highmem.h>
26#include <linux/timer.h>
27#include <linux/slab.h>
28#include <linux/jiffies.h>
29#include <linux/spinlock.h>
30#include <linux/list.h>
31#include <linux/sysdev.h>
32#include <linux/ctype.h>
33
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/edac.h>
37
38#include "edac_mc.h"
39
40#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__
41
42#ifdef CONFIG_EDAC_DEBUG
43/* Values of 0 to 4 will generate output */
44int edac_debug_level = 1;
45EXPORT_SYMBOL(edac_debug_level);
46#endif
47
48/* EDAC Controls, setable by module parameter, and sysfs */
49static int log_ue = 1;
50static int log_ce = 1;
51static int panic_on_ue = 1;
52static int poll_msec = 1000;
53
54static int check_pci_parity = 0; /* default YES check PCI parity */
55static int panic_on_pci_parity; /* default no panic on PCI Parity */
56static atomic_t pci_parity_count = ATOMIC_INIT(0);
57
58/* lock to memory controller's control array */
59static DECLARE_MUTEX(mem_ctls_mutex);
60static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
61
62/* Structure of the whitelist and blacklist arrays */
63struct edac_pci_device_list {
64 unsigned int vendor; /* Vendor ID */
65 unsigned int device; /* Deviice ID */
66};
67
68
69#define MAX_LISTED_PCI_DEVICES 32
70
71/* List of PCI devices (vendor-id:device-id) that should be skipped */
72static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES];
73static int pci_blacklist_count;
74
75/* List of PCI devices (vendor-id:device-id) that should be scanned */
76static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
77static int pci_whitelist_count ;
78
79/* START sysfs data and methods */
80
81static const char *mem_types[] = {
82 [MEM_EMPTY] = "Empty",
83 [MEM_RESERVED] = "Reserved",
84 [MEM_UNKNOWN] = "Unknown",
85 [MEM_FPM] = "FPM",
86 [MEM_EDO] = "EDO",
87 [MEM_BEDO] = "BEDO",
88 [MEM_SDR] = "Unbuffered-SDR",
89 [MEM_RDR] = "Registered-SDR",
90 [MEM_DDR] = "Unbuffered-DDR",
91 [MEM_RDDR] = "Registered-DDR",
92 [MEM_RMBS] = "RMBS"
93};
94
95static const char *dev_types[] = {
96 [DEV_UNKNOWN] = "Unknown",
97 [DEV_X1] = "x1",
98 [DEV_X2] = "x2",
99 [DEV_X4] = "x4",
100 [DEV_X8] = "x8",
101 [DEV_X16] = "x16",
102 [DEV_X32] = "x32",
103 [DEV_X64] = "x64"
104};
105
106static const char *edac_caps[] = {
107 [EDAC_UNKNOWN] = "Unknown",
108 [EDAC_NONE] = "None",
109 [EDAC_RESERVED] = "Reserved",
110 [EDAC_PARITY] = "PARITY",
111 [EDAC_EC] = "EC",
112 [EDAC_SECDED] = "SECDED",
113 [EDAC_S2ECD2ED] = "S2ECD2ED",
114 [EDAC_S4ECD4ED] = "S4ECD4ED",
115 [EDAC_S8ECD8ED] = "S8ECD8ED",
116 [EDAC_S16ECD16ED] = "S16ECD16ED"
117};
118
119
120/* sysfs object: /sys/devices/system/edac */
121static struct sysdev_class edac_class = {
122 set_kset_name("edac"),
123};
124
125/* sysfs objects:
126 * /sys/devices/system/edac/mc
127 * /sys/devices/system/edac/pci
128 */
129static struct kobject edac_memctrl_kobj;
130static struct kobject edac_pci_kobj;
131
132/*
133 * /sys/devices/system/edac/mc;
134 * data structures and methods
135 */
136static ssize_t memctrl_string_show(void *ptr, char *buffer)
137{
138 char *value = (char*) ptr;
139 return sprintf(buffer, "%s\n", value);
140}
141
142static ssize_t memctrl_int_show(void *ptr, char *buffer)
143{
144 int *value = (int*) ptr;
145 return sprintf(buffer, "%d\n", *value);
146}
147
148static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
149{
150 int *value = (int*) ptr;
151
152 if (isdigit(*buffer))
153 *value = simple_strtoul(buffer, NULL, 0);
154
155 return count;
156}
157
158struct memctrl_dev_attribute {
159 struct attribute attr;
160 void *value;
161 ssize_t (*show)(void *,char *);
162 ssize_t (*store)(void *, const char *, size_t);
163};
164
165/* Set of show/store abstract level functions for memory control object */
166static ssize_t
167memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
168{
169 struct memctrl_dev_attribute *memctrl_dev;
170 memctrl_dev = (struct memctrl_dev_attribute*)attr;
171
172 if (memctrl_dev->show)
173 return memctrl_dev->show(memctrl_dev->value, buffer);
174 return -EIO;
175}
176
177static ssize_t
178memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
179 const char *buffer, size_t count)
180{
181 struct memctrl_dev_attribute *memctrl_dev;
182 memctrl_dev = (struct memctrl_dev_attribute*)attr;
183
184 if (memctrl_dev->store)
185 return memctrl_dev->store(memctrl_dev->value, buffer, count);
186 return -EIO;
187}
188
189static struct sysfs_ops memctrlfs_ops = {
190 .show = memctrl_dev_show,
191 .store = memctrl_dev_store
192};
193
194#define MEMCTRL_ATTR(_name,_mode,_show,_store) \
195struct memctrl_dev_attribute attr_##_name = { \
196 .attr = {.name = __stringify(_name), .mode = _mode }, \
197 .value = &_name, \
198 .show = _show, \
199 .store = _store, \
200};
201
202#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \
203struct memctrl_dev_attribute attr_##_name = { \
204 .attr = {.name = __stringify(_name), .mode = _mode }, \
205 .value = _data, \
206 .show = _show, \
207 .store = _store, \
208};
209
210/* cwrow<id> attribute f*/
211MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL);
212
213/* csrow<id> control files */
214MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
215MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
216MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
217MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
218
219
220/* Base Attributes of the memory ECC object */
221static struct memctrl_dev_attribute *memctrl_attr[] = {
222 &attr_panic_on_ue,
223 &attr_log_ue,
224 &attr_log_ce,
225 &attr_poll_msec,
226 &attr_mc_version,
227 NULL,
228};
229
230/* Main MC kobject release() function */
231static void edac_memctrl_master_release(struct kobject *kobj)
232{
233 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
234}
235
236static struct kobj_type ktype_memctrl = {
237 .release = edac_memctrl_master_release,
238 .sysfs_ops = &memctrlfs_ops,
239 .default_attrs = (struct attribute **) memctrl_attr,
240};
241
242
243/* Initialize the main sysfs entries for edac:
244 * /sys/devices/system/edac
245 *
246 * and children
247 *
248 * Return: 0 SUCCESS
249 * !0 FAILURE
250 */
251static int edac_sysfs_memctrl_setup(void)
252{
253 int err=0;
254
255 debugf1("MC: " __FILE__ ": %s()\n", __func__);
256
257 /* create the /sys/devices/system/edac directory */
258 err = sysdev_class_register(&edac_class);
259 if (!err) {
260 /* Init the MC's kobject */
261 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
262 kobject_init(&edac_memctrl_kobj);
263
264 edac_memctrl_kobj.parent = &edac_class.kset.kobj;
265 edac_memctrl_kobj.ktype = &ktype_memctrl;
266
267 /* generate sysfs "..../edac/mc" */
268 err = kobject_set_name(&edac_memctrl_kobj,"mc");
269 if (!err) {
270 /* FIXME: maybe new sysdev_create_subdir() */
271 err = kobject_register(&edac_memctrl_kobj);
272 if (err) {
273 debugf1("Failed to register '.../edac/mc'\n");
274 } else {
275 debugf1("Registered '.../edac/mc' kobject\n");
276 }
277 }
278 } else {
279 debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err);
280 }
281
282 return err;
283}
284
285/*
286 * MC teardown:
287 * the '..../edac/mc' kobject followed by '..../edac' itself
288 */
289static void edac_sysfs_memctrl_teardown(void)
290{
291 debugf0("MC: " __FILE__ ": %s()\n", __func__);
292
293 /* Unregister the MC's kobject */
294 kobject_unregister(&edac_memctrl_kobj);
295
296 /* release the master edac mc kobject */
297 kobject_put(&edac_memctrl_kobj);
298
299 /* Unregister the 'edac' object */
300 sysdev_class_unregister(&edac_class);
301}
302
303/*
304 * /sys/devices/system/edac/pci;
305 * data structures and methods
306 */
307
308struct list_control {
309 struct edac_pci_device_list *list;
310 int *count;
311};
312
313/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
314static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
315{
316 struct list_control *listctl;
317 struct edac_pci_device_list *list;
318 char *p = buffer;
319 int len=0;
320 int i;
321
322 listctl = ptr;
323 list = listctl->list;
324
325 for (i = 0; i < *(listctl->count); i++, list++ ) {
326 if (len > 0)
327 len += snprintf(p + len, (PAGE_SIZE-len), ",");
328
329 len += snprintf(p + len,
330 (PAGE_SIZE-len),
331 "%x:%x",
332 list->vendor,list->device);
333 }
334
335 len += snprintf(p + len,(PAGE_SIZE-len), "\n");
336
337 return (ssize_t) len;
338}
339
340/**
341 *
342 * Scan string from **s to **e looking for one 'vendor:device' tuple
343 * where each field is a hex value
344 *
345 * return 0 if an entry is NOT found
346 * return 1 if an entry is found
347 * fill in *vendor_id and *device_id with values found
348 *
349 * In both cases, make sure *s has been moved forward toward *e
350 */
351static int parse_one_device(const char **s,const char **e,
352 unsigned int *vendor_id, unsigned int *device_id)
353{
354 const char *runner, *p;
355
356 /* if null byte, we are done */
357 if (!**s) {
358 (*s)++; /* keep *s moving */
359 return 0;
360 }
361
362 /* skip over newlines & whitespace */
363 if ((**s == '\n') || isspace(**s)) {
364 (*s)++;
365 return 0;
366 }
367
368 if (!isxdigit(**s)) {
369 (*s)++;
370 return 0;
371 }
372
373 /* parse vendor_id */
374 runner = *s;
375 while (runner < *e) {
376 /* scan for vendor:device delimiter */
377 if (*runner == ':') {
378 *vendor_id = simple_strtol((char*) *s, (char**) &p, 16);
379 runner = p + 1;
380 break;
381 }
382 runner++;
383 }
384
385 if (!isxdigit(*runner)) {
386 *s = ++runner;
387 return 0;
388 }
389
390 /* parse device_id */
391 if (runner < *e) {
392 *device_id = simple_strtol((char*)runner, (char**)&p, 16);
393 runner = p;
394 }
395
396 *s = runner;
397
398 return 1;
399}
400
401static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
402 size_t count)
403{
404 struct list_control *listctl;
405 struct edac_pci_device_list *list;
406 unsigned int vendor_id, device_id;
407 const char *s, *e;
408 int *index;
409
410 s = (char*)buffer;
411 e = s + count;
412
413 listctl = ptr;
414 list = listctl->list;
415 index = listctl->count;
416
417 *index = 0;
418 while (*index < MAX_LISTED_PCI_DEVICES) {
419
420 if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
421 list[ *index ].vendor = vendor_id;
422 list[ *index ].device = device_id;
423 (*index)++;
424 }
425
426 /* check for all data consume */
427 if (s >= e)
428 break;
429 }
430
431 return count;
432}
433
434static ssize_t edac_pci_int_show(void *ptr, char *buffer)
435{
436 int *value = ptr;
437 return sprintf(buffer,"%d\n",*value);
438}
439
440static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
441{
442 int *value = ptr;
443
444 if (isdigit(*buffer))
445 *value = simple_strtoul(buffer,NULL,0);
446
447 return count;
448}
449
450struct edac_pci_dev_attribute {
451 struct attribute attr;
452 void *value;
453 ssize_t (*show)(void *,char *);
454 ssize_t (*store)(void *, const char *,size_t);
455};
456
457/* Set of show/store abstract level functions for PCI Parity object */
458static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
459 char *buffer)
460{
461 struct edac_pci_dev_attribute *edac_pci_dev;
462 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
463
464 if (edac_pci_dev->show)
465 return edac_pci_dev->show(edac_pci_dev->value, buffer);
466 return -EIO;
467}
468
469static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr,
470 const char *buffer, size_t count)
471{
472 struct edac_pci_dev_attribute *edac_pci_dev;
473 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
474
475 if (edac_pci_dev->show)
476 return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
477 return -EIO;
478}
479
480static struct sysfs_ops edac_pci_sysfs_ops = {
481 .show = edac_pci_dev_show,
482 .store = edac_pci_dev_store
483};
484
485
486#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
487struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
488 .attr = {.name = __stringify(_name), .mode = _mode }, \
489 .value = &_name, \
490 .show = _show, \
491 .store = _store, \
492};
493
494#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
495struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
496 .attr = {.name = __stringify(_name), .mode = _mode }, \
497 .value = _data, \
498 .show = _show, \
499 .store = _store, \
500};
501
502static struct list_control pci_whitelist_control = {
503 .list = pci_whitelist,
504 .count = &pci_whitelist_count
505};
506
507static struct list_control pci_blacklist_control = {
508 .list = pci_blacklist,
509 .count = &pci_blacklist_count
510};
511
512/* whitelist attribute */
513EDAC_PCI_STRING_ATTR(pci_parity_whitelist,
514 &pci_whitelist_control,
515 S_IRUGO|S_IWUSR,
516 edac_pci_list_string_show,
517 edac_pci_list_string_store);
518
519EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
520 &pci_blacklist_control,
521 S_IRUGO|S_IWUSR,
522 edac_pci_list_string_show,
523 edac_pci_list_string_store);
524
525/* PCI Parity control files */
526EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
527EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
528EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL);
529
530/* Base Attributes of the memory ECC object */
531static struct edac_pci_dev_attribute *edac_pci_attr[] = {
532 &edac_pci_attr_check_pci_parity,
533 &edac_pci_attr_panic_on_pci_parity,
534 &edac_pci_attr_pci_parity_count,
535 &edac_pci_attr_pci_parity_whitelist,
536 &edac_pci_attr_pci_parity_blacklist,
537 NULL,
538};
539
540/* No memory to release */
541static void edac_pci_release(struct kobject *kobj)
542{
543 debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__);
544}
545
546static struct kobj_type ktype_edac_pci = {
547 .release = edac_pci_release,
548 .sysfs_ops = &edac_pci_sysfs_ops,
549 .default_attrs = (struct attribute **) edac_pci_attr,
550};
551
552/**
553 * edac_sysfs_pci_setup()
554 *
555 */
556static int edac_sysfs_pci_setup(void)
557{
558 int err;
559
560 debugf1("MC: " __FILE__ ": %s()\n", __func__);
561
562 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
563
564 kobject_init(&edac_pci_kobj);
565 edac_pci_kobj.parent = &edac_class.kset.kobj;
566 edac_pci_kobj.ktype = &ktype_edac_pci;
567
568 err = kobject_set_name(&edac_pci_kobj, "pci");
569 if (!err) {
570 /* Instanstiate the csrow object */
571 /* FIXME: maybe new sysdev_create_subdir() */
572 err = kobject_register(&edac_pci_kobj);
573 if (err)
574 debugf1("Failed to register '.../edac/pci'\n");
575 else
576 debugf1("Registered '.../edac/pci' kobject\n");
577 }
578 return err;
579}
580
581
582static void edac_sysfs_pci_teardown(void)
583{
584 debugf0("MC: " __FILE__ ": %s()\n", __func__);
585
586 kobject_unregister(&edac_pci_kobj);
587 kobject_put(&edac_pci_kobj);
588}
589
590/* EDAC sysfs CSROW data structures and methods */
591
592/* Set of more detailed csrow<id> attribute show/store functions */
593static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
594{
595 ssize_t size = 0;
596
597 if (csrow->nr_channels > 0) {
598 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
599 csrow->channels[0].label);
600 }
601 return size;
602}
603
604static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
605{
606 ssize_t size = 0;
607
608 if (csrow->nr_channels > 0) {
609 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
610 csrow->channels[1].label);
611 }
612 return size;
613}
614
615static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
616 const char *data, size_t size)
617{
618 ssize_t max_size = 0;
619
620 if (csrow->nr_channels > 0) {
621 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
622 strncpy(csrow->channels[0].label, data, max_size);
623 csrow->channels[0].label[max_size] = '\0';
624 }
625 return size;
626}
627
628static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
629 const char *data, size_t size)
630{
631 ssize_t max_size = 0;
632
633 if (csrow->nr_channels > 1) {
634 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
635 strncpy(csrow->channels[1].label, data, max_size);
636 csrow->channels[1].label[max_size] = '\0';
637 }
638 return max_size;
639}
640
641static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data)
642{
643 return sprintf(data,"%u\n", csrow->ue_count);
644}
645
646static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data)
647{
648 return sprintf(data,"%u\n", csrow->ce_count);
649}
650
651static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
652{
653 ssize_t size = 0;
654
655 if (csrow->nr_channels > 0) {
656 size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
657 }
658 return size;
659}
660
661static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
662{
663 ssize_t size = 0;
664
665 if (csrow->nr_channels > 1) {
666 size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
667 }
668 return size;
669}
670
671static ssize_t csrow_size_show(struct csrow_info *csrow, char *data)
672{
673 return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
674}
675
676static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data)
677{
678 return sprintf(data,"%s\n", mem_types[csrow->mtype]);
679}
680
681static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data)
682{
683 return sprintf(data,"%s\n", dev_types[csrow->dtype]);
684}
685
686static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
687{
688 return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
689}
690
691struct csrowdev_attribute {
692 struct attribute attr;
693 ssize_t (*show)(struct csrow_info *,char *);
694 ssize_t (*store)(struct csrow_info *, const char *,size_t);
695};
696
697#define to_csrow(k) container_of(k, struct csrow_info, kobj)
698#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
699
700/* Set of show/store higher level functions for csrow objects */
701static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
702 char *buffer)
703{
704 struct csrow_info *csrow = to_csrow(kobj);
705 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
706
707 if (csrowdev_attr->show)
708 return csrowdev_attr->show(csrow, buffer);
709 return -EIO;
710}
711
712static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
713 const char *buffer, size_t count)
714{
715 struct csrow_info *csrow = to_csrow(kobj);
716 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
717
718 if (csrowdev_attr->store)
719 return csrowdev_attr->store(csrow, buffer, count);
720 return -EIO;
721}
722
723static struct sysfs_ops csrowfs_ops = {
724 .show = csrowdev_show,
725 .store = csrowdev_store
726};
727
728#define CSROWDEV_ATTR(_name,_mode,_show,_store) \
729struct csrowdev_attribute attr_##_name = { \
730 .attr = {.name = __stringify(_name), .mode = _mode }, \
731 .show = _show, \
732 .store = _store, \
733};
734
735/* cwrow<id>/attribute files */
736CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL);
737CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL);
738CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL);
739CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL);
740CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL);
741CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL);
742CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL);
743CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL);
744
745/* control/attribute files */
746CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
747 csrow_ch0_dimm_label_show,
748 csrow_ch0_dimm_label_store);
749CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
750 csrow_ch1_dimm_label_show,
751 csrow_ch1_dimm_label_store);
752
753
754/* Attributes of the CSROW<id> object */
755static struct csrowdev_attribute *csrow_attr[] = {
756 &attr_dev_type,
757 &attr_mem_type,
758 &attr_edac_mode,
759 &attr_size_mb,
760 &attr_ue_count,
761 &attr_ce_count,
762 &attr_ch0_ce_count,
763 &attr_ch1_ce_count,
764 &attr_ch0_dimm_label,
765 &attr_ch1_dimm_label,
766 NULL,
767};
768
769
770/* No memory to release */
771static void edac_csrow_instance_release(struct kobject *kobj)
772{
773 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
774}
775
776static struct kobj_type ktype_csrow = {
777 .release = edac_csrow_instance_release,
778 .sysfs_ops = &csrowfs_ops,
779 .default_attrs = (struct attribute **) csrow_attr,
780};
781
782/* Create a CSROW object under specifed edac_mc_device */
783static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
784 struct csrow_info *csrow, int index )
785{
786 int err = 0;
787
788 debugf0("MC: " __FILE__ ": %s()\n", __func__);
789
790 memset(&csrow->kobj, 0, sizeof(csrow->kobj));
791
792 /* generate ..../edac/mc/mc<id>/csrow<index> */
793
794 kobject_init(&csrow->kobj);
795 csrow->kobj.parent = edac_mci_kobj;
796 csrow->kobj.ktype = &ktype_csrow;
797
798 /* name this instance of csrow<id> */
799 err = kobject_set_name(&csrow->kobj,"csrow%d",index);
800 if (!err) {
801 /* Instanstiate the csrow object */
802 err = kobject_register(&csrow->kobj);
803 if (err)
804 debugf0("Failed to register CSROW%d\n",index);
805 else
806 debugf0("Registered CSROW%d\n",index);
807 }
808
809 return err;
810}
811
812/* sysfs data structures and methods for the MCI kobjects */
813
814static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
815 const char *data, size_t count )
816{
817 int row, chan;
818
819 mci->ue_noinfo_count = 0;
820 mci->ce_noinfo_count = 0;
821 mci->ue_count = 0;
822 mci->ce_count = 0;
823 for (row = 0; row < mci->nr_csrows; row++) {
824 struct csrow_info *ri = &mci->csrows[row];
825
826 ri->ue_count = 0;
827 ri->ce_count = 0;
828 for (chan = 0; chan < ri->nr_channels; chan++)
829 ri->channels[chan].ce_count = 0;
830 }
831 mci->start_time = jiffies;
832
833 return count;
834}
835
836static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
837{
838 return sprintf(data,"%d\n", mci->ue_count);
839}
840
841static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
842{
843 return sprintf(data,"%d\n", mci->ce_count);
844}
845
846static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
847{
848 return sprintf(data,"%d\n", mci->ce_noinfo_count);
849}
850
851static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
852{
853 return sprintf(data,"%d\n", mci->ue_noinfo_count);
854}
855
856static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
857{
858 return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
859}
860
861static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data)
862{
863 return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver);
864}
865
866static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
867{
868 return sprintf(data,"%s\n", mci->ctl_name);
869}
870
871static int mci_output_edac_cap(char *buf, unsigned long edac_cap)
872{
873 char *p = buf;
874 int bit_idx;
875
876 for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) {
877 if ((edac_cap >> bit_idx) & 0x1)
878 p += sprintf(p, "%s ", edac_caps[bit_idx]);
879 }
880
881 return p - buf;
882}
883
884static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
885{
886 char *p = data;
887
888 p += mci_output_edac_cap(p,mci->edac_ctl_cap);
889 p += sprintf(p, "\n");
890
891 return p - data;
892}
893
894static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
895 char *data)
896{
897 char *p = data;
898
899 p += mci_output_edac_cap(p,mci->edac_cap);
900 p += sprintf(p, "\n");
901
902 return p - data;
903}
904
905static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
906{
907 char *p = buf;
908 int bit_idx;
909
910 for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) {
911 if ((mtype_cap >> bit_idx) & 0x1)
912 p += sprintf(p, "%s ", mem_types[bit_idx]);
913 }
914
915 return p - buf;
916}
917
918static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data)
919{
920 char *p = data;
921
922 p += mci_output_mtype_cap(p,mci->mtype_cap);
923 p += sprintf(p, "\n");
924
925 return p - data;
926}
927
928static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
929{
930 int total_pages, csrow_idx;
931
932 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
933 csrow_idx++) {
934 struct csrow_info *csrow = &mci->csrows[csrow_idx];
935
936 if (!csrow->nr_pages)
937 continue;
938 total_pages += csrow->nr_pages;
939 }
940
941 return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
942}
943
944struct mcidev_attribute {
945 struct attribute attr;
946 ssize_t (*show)(struct mem_ctl_info *,char *);
947 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
948};
949
950#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
951#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
952
953static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
954 char *buffer)
955{
956 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
957 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
958
959 if (mcidev_attr->show)
960 return mcidev_attr->show(mem_ctl_info, buffer);
961 return -EIO;
962}
963
964static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
965 const char *buffer, size_t count)
966{
967 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
968 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
969
970 if (mcidev_attr->store)
971 return mcidev_attr->store(mem_ctl_info, buffer, count);
972 return -EIO;
973}
974
975static struct sysfs_ops mci_ops = {
976 .show = mcidev_show,
977 .store = mcidev_store
978};
979
980#define MCIDEV_ATTR(_name,_mode,_show,_store) \
981struct mcidev_attribute mci_attr_##_name = { \
982 .attr = {.name = __stringify(_name), .mode = _mode }, \
983 .show = _show, \
984 .store = _store, \
985};
986
987/* Control file */
988MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
989
990/* Attribute files */
991MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
992MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL);
993MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL);
994MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
995MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
996MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
997MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
998MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
999MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
1000MCIDEV_ATTR(edac_current_capability,S_IRUGO,
1001 mci_edac_current_capability_show,NULL);
1002MCIDEV_ATTR(supported_mem_type,S_IRUGO,
1003 mci_supported_mem_type_show,NULL);
1004
1005
1006static struct mcidev_attribute *mci_attr[] = {
1007 &mci_attr_reset_counters,
1008 &mci_attr_module_name,
1009 &mci_attr_mc_name,
1010 &mci_attr_edac_capability,
1011 &mci_attr_edac_current_capability,
1012 &mci_attr_supported_mem_type,
1013 &mci_attr_size_mb,
1014 &mci_attr_seconds_since_reset,
1015 &mci_attr_ue_noinfo_count,
1016 &mci_attr_ce_noinfo_count,
1017 &mci_attr_ue_count,
1018 &mci_attr_ce_count,
1019 NULL
1020};
1021
1022
1023/*
1024 * Release of a MC controlling instance
1025 */
1026static void edac_mci_instance_release(struct kobject *kobj)
1027{
1028 struct mem_ctl_info *mci;
1029 mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
1030
1031 debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n",
1032 __func__, mci->mc_idx);
1033
1034 kfree(mci);
1035}
1036
1037static struct kobj_type ktype_mci = {
1038 .release = edac_mci_instance_release,
1039 .sysfs_ops = &mci_ops,
1040 .default_attrs = (struct attribute **) mci_attr,
1041};
1042
1043#define EDAC_DEVICE_SYMLINK "device"
1044
1045/*
1046 * Create a new Memory Controller kobject instance,
1047 * mc<id> under the 'mc' directory
1048 *
1049 * Return:
1050 * 0 Success
1051 * !0 Failure
1052 */
1053static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1054{
1055 int i;
1056 int err;
1057 struct csrow_info *csrow;
1058 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
1059
1060 debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx);
1061
1062 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
1063 kobject_init(edac_mci_kobj);
1064
1065 /* set the name of the mc<id> object */
1066 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
1067 if (err)
1068 return err;
1069
1070 /* link to our parent the '..../edac/mc' object */
1071 edac_mci_kobj->parent = &edac_memctrl_kobj;
1072 edac_mci_kobj->ktype = &ktype_mci;
1073
1074 /* register the mc<id> kobject */
1075 err = kobject_register(edac_mci_kobj);
1076 if (err)
1077 return err;
1078
1079 /* create a symlink for the device */
1080 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
1081 EDAC_DEVICE_SYMLINK);
1082 if (err) {
1083 kobject_unregister(edac_mci_kobj);
1084 return err;
1085 }
1086
1087 /* Make directories for each CSROW object
1088 * under the mc<id> kobject
1089 */
1090 for (i = 0; i < mci->nr_csrows; i++) {
1091
1092 csrow = &mci->csrows[i];
1093
1094 /* Only expose populated CSROWs */
1095 if (csrow->nr_pages > 0) {
1096 err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
1097 if (err)
1098 goto fail;
1099 }
1100 }
1101
1102 /* Mark this MCI instance as having sysfs entries */
1103 mci->sysfs_active = MCI_SYSFS_ACTIVE;
1104
1105 return 0;
1106
1107
1108 /* CSROW error: backout what has already been registered, */
1109fail:
1110 for ( i--; i >= 0; i--) {
1111 if (csrow->nr_pages > 0) {
1112 kobject_unregister(&mci->csrows[i].kobj);
1113 kobject_put(&mci->csrows[i].kobj);
1114 }
1115 }
1116
1117 kobject_unregister(edac_mci_kobj);
1118 kobject_put(edac_mci_kobj);
1119
1120 return err;
1121}
1122
1123/*
1124 * remove a Memory Controller instance
1125 */
1126static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1127{
1128 int i;
1129
1130 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1131
1132 /* remove all csrow kobjects */
1133 for (i = 0; i < mci->nr_csrows; i++) {
1134 if (mci->csrows[i].nr_pages > 0) {
1135 kobject_unregister(&mci->csrows[i].kobj);
1136 kobject_put(&mci->csrows[i].kobj);
1137 }
1138 }
1139
1140 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1141
1142 kobject_unregister(&mci->edac_mci_kobj);
1143 kobject_put(&mci->edac_mci_kobj);
1144}
1145
1146/* END OF sysfs data and methods */
1147
1148#ifdef CONFIG_EDAC_DEBUG
1149
1150EXPORT_SYMBOL(edac_mc_dump_channel);
1151
1152void edac_mc_dump_channel(struct channel_info *chan)
1153{
1154 debugf4("\tchannel = %p\n", chan);
1155 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
1156 debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
1157 debugf4("\tchannel->label = '%s'\n", chan->label);
1158 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
1159}
1160
1161
1162EXPORT_SYMBOL(edac_mc_dump_csrow);
1163
1164void edac_mc_dump_csrow(struct csrow_info *csrow)
1165{
1166 debugf4("\tcsrow = %p\n", csrow);
1167 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
1168 debugf4("\tcsrow->first_page = 0x%lx\n",
1169 csrow->first_page);
1170 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
1171 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
1172 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
1173 debugf4("\tcsrow->nr_channels = %d\n",
1174 csrow->nr_channels);
1175 debugf4("\tcsrow->channels = %p\n", csrow->channels);
1176 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
1177}
1178
1179
1180EXPORT_SYMBOL(edac_mc_dump_mci);
1181
1182void edac_mc_dump_mci(struct mem_ctl_info *mci)
1183{
1184 debugf3("\tmci = %p\n", mci);
1185 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
1186 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
1187 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
1188 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
1189 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
1190 mci->nr_csrows, mci->csrows);
1191 debugf3("\tpdev = %p\n", mci->pdev);
1192 debugf3("\tmod_name:ctl_name = %s:%s\n",
1193 mci->mod_name, mci->ctl_name);
1194 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
1195}
1196
1197
1198#endif /* CONFIG_EDAC_DEBUG */
1199
1200/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
1201 * Adjust 'ptr' so that its alignment is at least as stringent as what the
1202 * compiler would provide for X and return the aligned result.
1203 *
1204 * If 'size' is a constant, the compiler will optimize this whole function
1205 * down to either a no-op or the addition of a constant to the value of 'ptr'.
1206 */
1207static inline char * align_ptr (void *ptr, unsigned size)
1208{
1209 unsigned align, r;
1210
1211 /* Here we assume that the alignment of a "long long" is the most
1212 * stringent alignment that the compiler will ever provide by default.
1213 * As far as I know, this is a reasonable assumption.
1214 */
1215 if (size > sizeof(long))
1216 align = sizeof(long long);
1217 else if (size > sizeof(int))
1218 align = sizeof(long);
1219 else if (size > sizeof(short))
1220 align = sizeof(int);
1221 else if (size > sizeof(char))
1222 align = sizeof(short);
1223 else
1224 return (char *) ptr;
1225
1226 r = size % align;
1227
1228 if (r == 0)
1229 return (char *) ptr;
1230
1231 return (char *) (((unsigned long) ptr) + align - r);
1232}
1233
1234
1235EXPORT_SYMBOL(edac_mc_alloc);
1236
1237/**
1238 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
1239 * @size_pvt: size of private storage needed
1240 * @nr_csrows: Number of CWROWS needed for this MC
1241 * @nr_chans: Number of channels for the MC
1242 *
1243 * Everything is kmalloc'ed as one big chunk - more efficient.
1244 * Only can be used if all structures have the same lifetime - otherwise
1245 * you have to allocate and initialize your own structures.
1246 *
1247 * Use edac_mc_free() to free mc structures allocated by this function.
1248 *
1249 * Returns:
1250 * NULL allocation failed
1251 * struct mem_ctl_info pointer
1252 */
1253struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1254 unsigned nr_chans)
1255{
1256 struct mem_ctl_info *mci;
1257 struct csrow_info *csi, *csrow;
1258 struct channel_info *chi, *chp, *chan;
1259 void *pvt;
1260 unsigned size;
1261 int row, chn;
1262
1263 /* Figure out the offsets of the various items from the start of an mc
1264 * structure. We want the alignment of each item to be at least as
1265 * stringent as what the compiler would provide if we could simply
1266 * hardcode everything into a single struct.
1267 */
1268 mci = (struct mem_ctl_info *) 0;
1269 csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
1270 chi = (struct channel_info *)
1271 align_ptr(&csi[nr_csrows], sizeof(*chi));
1272 pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
1273 size = ((unsigned long) pvt) + sz_pvt;
1274
1275 if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
1276 return NULL;
1277
1278 /* Adjust pointers so they point within the memory we just allocated
1279 * rather than an imaginary chunk of memory located at address 0.
1280 */
1281 csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
1282 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
1283 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
1284
1285 memset(mci, 0, size); /* clear all fields */
1286
1287 mci->csrows = csi;
1288 mci->pvt_info = pvt;
1289 mci->nr_csrows = nr_csrows;
1290
1291 for (row = 0; row < nr_csrows; row++) {
1292 csrow = &csi[row];
1293 csrow->csrow_idx = row;
1294 csrow->mci = mci;
1295 csrow->nr_channels = nr_chans;
1296 chp = &chi[row * nr_chans];
1297 csrow->channels = chp;
1298
1299 for (chn = 0; chn < nr_chans; chn++) {
1300 chan = &chp[chn];
1301 chan->chan_idx = chn;
1302 chan->csrow = csrow;
1303 }
1304 }
1305
1306 return mci;
1307}
1308
1309
1310EXPORT_SYMBOL(edac_mc_free);
1311
1312/**
1313 * edac_mc_free: Free a previously allocated 'mci' structure
1314 * @mci: pointer to a struct mem_ctl_info structure
1315 *
1316 * Free up a previously allocated mci structure
1317 * A MCI structure can be in 2 states after being allocated
1318 * by edac_mc_alloc().
1319 * 1) Allocated in a MC driver's probe, but not yet committed
1320 * 2) Allocated and committed, by a call to edac_mc_add_mc()
1321 * edac_mc_add_mc() is the function that adds the sysfs entries
1322 * thus, this free function must determine which state the 'mci'
1323 * structure is in, then either free it directly or
1324 * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
1325 *
1326 * VOID Return
1327 */
1328void edac_mc_free(struct mem_ctl_info *mci)
1329{
1330 /* only if sysfs entries for this mci instance exist
1331 * do we remove them and defer the actual kfree via
1332 * the kobject 'release()' callback.
1333 *
1334 * Otherwise, do a straight kfree now.
1335 */
1336 if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
1337 edac_remove_sysfs_mci_device(mci);
1338 else
1339 kfree(mci);
1340}
1341
1342
1343
1344EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
1345
1346struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
1347{
1348 struct mem_ctl_info *mci;
1349 struct list_head *item;
1350
1351 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1352
1353 list_for_each(item, &mc_devices) {
1354 mci = list_entry(item, struct mem_ctl_info, link);
1355
1356 if (mci->pdev == pdev)
1357 return mci;
1358 }
1359
1360 return NULL;
1361}
1362
1363static int add_mc_to_global_list (struct mem_ctl_info *mci)
1364{
1365 struct list_head *item, *insert_before;
1366 struct mem_ctl_info *p;
1367 int i;
1368
1369 if (list_empty(&mc_devices)) {
1370 mci->mc_idx = 0;
1371 insert_before = &mc_devices;
1372 } else {
1373 if (edac_mc_find_mci_by_pdev(mci->pdev)) {
1374 printk(KERN_WARNING
1375 "EDAC MC: %s (%s) %s %s already assigned %d\n",
1376 mci->pdev->dev.bus_id, pci_name(mci->pdev),
1377 mci->mod_name, mci->ctl_name, mci->mc_idx);
1378 return 1;
1379 }
1380
1381 insert_before = NULL;
1382 i = 0;
1383
1384 list_for_each(item, &mc_devices) {
1385 p = list_entry(item, struct mem_ctl_info, link);
1386
1387 if (p->mc_idx != i) {
1388 insert_before = item;
1389 break;
1390 }
1391
1392 i++;
1393 }
1394
1395 mci->mc_idx = i;
1396
1397 if (insert_before == NULL)
1398 insert_before = &mc_devices;
1399 }
1400
1401 list_add_tail_rcu(&mci->link, insert_before);
1402 return 0;
1403}
1404
1405
1406
1407EXPORT_SYMBOL(edac_mc_add_mc);
1408
1409/**
1410 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list
1411 * @mci: pointer to the mci structure to be added to the list
1412 *
1413 * Return:
1414 * 0 Success
1415 * !0 Failure
1416 */
1417
1418/* FIXME - should a warning be printed if no error detection? correction? */
1419int edac_mc_add_mc(struct mem_ctl_info *mci)
1420{
1421 int rc = 1;
1422
1423 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1424#ifdef CONFIG_EDAC_DEBUG
1425 if (edac_debug_level >= 3)
1426 edac_mc_dump_mci(mci);
1427 if (edac_debug_level >= 4) {
1428 int i;
1429
1430 for (i = 0; i < mci->nr_csrows; i++) {
1431 int j;
1432 edac_mc_dump_csrow(&mci->csrows[i]);
1433 for (j = 0; j < mci->csrows[i].nr_channels; j++)
1434 edac_mc_dump_channel(&mci->csrows[i].
1435 channels[j]);
1436 }
1437 }
1438#endif
1439 down(&mem_ctls_mutex);
1440
1441 if (add_mc_to_global_list(mci))
1442 goto finish;
1443
1444 /* set load time so that error rate can be tracked */
1445 mci->start_time = jiffies;
1446
1447 if (edac_create_sysfs_mci_device(mci)) {
1448 printk(KERN_WARNING
1449 "EDAC MC%d: failed to create sysfs device\n",
1450 mci->mc_idx);
1451 /* FIXME - should there be an error code and unwind? */
1452 goto finish;
1453 }
1454
1455 /* Report action taken */
1456 printk(KERN_INFO
1457 "EDAC MC%d: Giving out device to %s %s: PCI %s\n",
1458 mci->mc_idx, mci->mod_name, mci->ctl_name,
1459 pci_name(mci->pdev));
1460
1461
1462 rc = 0;
1463
1464finish:
1465 up(&mem_ctls_mutex);
1466 return rc;
1467}
1468
1469
1470
1471static void complete_mc_list_del (struct rcu_head *head)
1472{
1473 struct mem_ctl_info *mci;
1474
1475 mci = container_of(head, struct mem_ctl_info, rcu);
1476 INIT_LIST_HEAD(&mci->link);
1477 complete(&mci->complete);
1478}
1479
1480static void del_mc_from_global_list (struct mem_ctl_info *mci)
1481{
1482 list_del_rcu(&mci->link);
1483 init_completion(&mci->complete);
1484 call_rcu(&mci->rcu, complete_mc_list_del);
1485 wait_for_completion(&mci->complete);
1486}
1487
1488EXPORT_SYMBOL(edac_mc_del_mc);
1489
1490/**
1491 * edac_mc_del_mc: Remove the specified mci structure from global list
1492 * @mci: Pointer to struct mem_ctl_info structure
1493 *
1494 * Returns:
1495 * 0 Success
1496 * 1 Failure
1497 */
1498int edac_mc_del_mc(struct mem_ctl_info *mci)
1499{
1500 int rc = 1;
1501
1502 debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1503 down(&mem_ctls_mutex);
1504 del_mc_from_global_list(mci);
1505 printk(KERN_INFO
1506 "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
1507 mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
1508 pci_name(mci->pdev));
1509 rc = 0;
1510 up(&mem_ctls_mutex);
1511
1512 return rc;
1513}
1514
1515
1516EXPORT_SYMBOL(edac_mc_scrub_block);
1517
1518void edac_mc_scrub_block(unsigned long page, unsigned long offset,
1519 u32 size)
1520{
1521 struct page *pg;
1522 void *virt_addr;
1523 unsigned long flags = 0;
1524
1525 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1526
1527 /* ECC error page was not in our memory. Ignore it. */
1528 if(!pfn_valid(page))
1529 return;
1530
1531 /* Find the actual page structure then map it and fix */
1532 pg = pfn_to_page(page);
1533
1534 if (PageHighMem(pg))
1535 local_irq_save(flags);
1536
1537 virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
1538
1539 /* Perform architecture specific atomic scrub operation */
1540 atomic_scrub(virt_addr + offset, size);
1541
1542 /* Unmap and complete */
1543 kunmap_atomic(virt_addr, KM_BOUNCE_READ);
1544
1545 if (PageHighMem(pg))
1546 local_irq_restore(flags);
1547}
1548
1549
1550/* FIXME - should return -1 */
1551EXPORT_SYMBOL(edac_mc_find_csrow_by_page);
1552
1553int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1554 unsigned long page)
1555{
1556 struct csrow_info *csrows = mci->csrows;
1557 int row, i;
1558
1559 debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__,
1560 page);
1561 row = -1;
1562
1563 for (i = 0; i < mci->nr_csrows; i++) {
1564 struct csrow_info *csrow = &csrows[i];
1565
1566 if (csrow->nr_pages == 0)
1567 continue;
1568
1569 debugf3("MC%d: " __FILE__
1570 ": %s(): first(0x%lx) page(0x%lx)"
1571 " last(0x%lx) mask(0x%lx)\n", mci->mc_idx,
1572 __func__, csrow->first_page, page,
1573 csrow->last_page, csrow->page_mask);
1574
1575 if ((page >= csrow->first_page) &&
1576 (page <= csrow->last_page) &&
1577 ((page & csrow->page_mask) ==
1578 (csrow->first_page & csrow->page_mask))) {
1579 row = i;
1580 break;
1581 }
1582 }
1583
1584 if (row == -1)
1585 printk(KERN_ERR
1586 "EDAC MC%d: could not look up page error address %lx\n",
1587 mci->mc_idx, (unsigned long) page);
1588
1589 return row;
1590}
1591
1592
1593EXPORT_SYMBOL(edac_mc_handle_ce);
1594
1595/* FIXME - setable log (warning/emerg) levels */
1596/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
1597void edac_mc_handle_ce(struct mem_ctl_info *mci,
1598 unsigned long page_frame_number,
1599 unsigned long offset_in_page,
1600 unsigned long syndrome, int row, int channel,
1601 const char *msg)
1602{
1603 unsigned long remapped_page;
1604
1605 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1606
1607 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1608 if (row >= mci->nr_csrows || row < 0) {
1609 /* something is wrong */
1610 printk(KERN_ERR
1611 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
1612 mci->mc_idx, row, mci->nr_csrows);
1613 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1614 return;
1615 }
1616 if (channel >= mci->csrows[row].nr_channels || channel < 0) {
1617 /* something is wrong */
1618 printk(KERN_ERR
1619 "EDAC MC%d: INTERNAL ERROR: channel out of range "
1620 "(%d >= %d)\n",
1621 mci->mc_idx, channel, mci->csrows[row].nr_channels);
1622 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1623 return;
1624 }
1625
1626 if (log_ce)
1627 /* FIXME - put in DIMM location */
1628 printk(KERN_WARNING
1629 "EDAC MC%d: CE page 0x%lx, offset 0x%lx,"
1630 " grain %d, syndrome 0x%lx, row %d, channel %d,"
1631 " label \"%s\": %s\n", mci->mc_idx,
1632 page_frame_number, offset_in_page,
1633 mci->csrows[row].grain, syndrome, row, channel,
1634 mci->csrows[row].channels[channel].label, msg);
1635
1636 mci->ce_count++;
1637 mci->csrows[row].ce_count++;
1638 mci->csrows[row].channels[channel].ce_count++;
1639
1640 if (mci->scrub_mode & SCRUB_SW_SRC) {
1641 /*
1642 * Some MC's can remap memory so that it is still available
1643 * at a different address when PCI devices map into memory.
1644 * MC's that can't do this lose the memory where PCI devices
1645 * are mapped. This mapping is MC dependant and so we call
1646 * back into the MC driver for it to map the MC page to
1647 * a physical (CPU) page which can then be mapped to a virtual
1648 * page - which can then be scrubbed.
1649 */
1650 remapped_page = mci->ctl_page_to_phys ?
1651 mci->ctl_page_to_phys(mci, page_frame_number) :
1652 page_frame_number;
1653
1654 edac_mc_scrub_block(remapped_page, offset_in_page,
1655 mci->csrows[row].grain);
1656 }
1657}
1658
1659
1660EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
1661
1662void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
1663 const char *msg)
1664{
1665 if (log_ce)
1666 printk(KERN_WARNING
1667 "EDAC MC%d: CE - no information available: %s\n",
1668 mci->mc_idx, msg);
1669 mci->ce_noinfo_count++;
1670 mci->ce_count++;
1671}
1672
1673
1674EXPORT_SYMBOL(edac_mc_handle_ue);
1675
1676void edac_mc_handle_ue(struct mem_ctl_info *mci,
1677 unsigned long page_frame_number,
1678 unsigned long offset_in_page, int row,
1679 const char *msg)
1680{
1681 int len = EDAC_MC_LABEL_LEN * 4;
1682 char labels[len + 1];
1683 char *pos = labels;
1684 int chan;
1685 int chars;
1686
1687 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
1688
1689 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1690 if (row >= mci->nr_csrows || row < 0) {
1691 /* something is wrong */
1692 printk(KERN_ERR
1693 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
1694 mci->mc_idx, row, mci->nr_csrows);
1695 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1696 return;
1697 }
1698
1699 chars = snprintf(pos, len + 1, "%s",
1700 mci->csrows[row].channels[0].label);
1701 len -= chars;
1702 pos += chars;
1703 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
1704 chan++) {
1705 chars = snprintf(pos, len + 1, ":%s",
1706 mci->csrows[row].channels[chan].label);
1707 len -= chars;
1708 pos += chars;
1709 }
1710
1711 if (log_ue)
1712 printk(KERN_EMERG
1713 "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
1714 " labels \"%s\": %s\n", mci->mc_idx,
1715 page_frame_number, offset_in_page,
1716 mci->csrows[row].grain, row, labels, msg);
1717
1718 if (panic_on_ue)
1719 panic
1720 ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
1721 " labels \"%s\": %s\n", mci->mc_idx,
1722 page_frame_number, offset_in_page,
1723 mci->csrows[row].grain, row, labels, msg);
1724
1725 mci->ue_count++;
1726 mci->csrows[row].ue_count++;
1727}
1728
1729
1730EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
1731
1732void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
1733 const char *msg)
1734{
1735 if (panic_on_ue)
1736 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
1737
1738 if (log_ue)
1739 printk(KERN_WARNING
1740 "EDAC MC%d: UE - no information available: %s\n",
1741 mci->mc_idx, msg);
1742 mci->ue_noinfo_count++;
1743 mci->ue_count++;
1744}
1745
1746
1747#ifdef CONFIG_PCI
1748
1749static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1750{
1751 int where;
1752 u16 status;
1753
1754 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
1755 pci_read_config_word(dev, where, &status);
1756
1757 /* If we get back 0xFFFF then we must suspect that the card has been pulled but
1758 the Linux PCI layer has not yet finished cleaning up. We don't want to report
1759 on such devices */
1760
1761 if (status == 0xFFFF) {
1762 u32 sanity;
1763 pci_read_config_dword(dev, 0, &sanity);
1764 if (sanity == 0xFFFFFFFF)
1765 return 0;
1766 }
1767 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
1768 PCI_STATUS_PARITY;
1769
1770 if (status)
1771 /* reset only the bits we are interested in */
1772 pci_write_config_word(dev, where, status);
1773
1774 return status;
1775}
1776
1777typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
1778
1779/* Clear any PCI parity errors logged by this device. */
1780static void edac_pci_dev_parity_clear( struct pci_dev *dev )
1781{
1782 u8 header_type;
1783
1784 get_pci_parity_status(dev, 0);
1785
1786 /* read the device TYPE, looking for bridges */
1787 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1788
1789 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
1790 get_pci_parity_status(dev, 1);
1791}
1792
1793/*
1794 * PCI Parity polling
1795 *
1796 */
1797static void edac_pci_dev_parity_test(struct pci_dev *dev)
1798{
1799 u16 status;
1800 u8 header_type;
1801
1802 /* read the STATUS register on this device
1803 */
1804 status = get_pci_parity_status(dev, 0);
1805
1806 debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
1807
1808 /* check the status reg for errors */
1809 if (status) {
1810 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1811 printk(KERN_CRIT
1812 "EDAC PCI- "
1813 "Signaled System Error on %s\n",
1814 pci_name (dev));
1815
1816 if (status & (PCI_STATUS_PARITY)) {
1817 printk(KERN_CRIT
1818 "EDAC PCI- "
1819 "Master Data Parity Error on %s\n",
1820 pci_name (dev));
1821
1822 atomic_inc(&pci_parity_count);
1823 }
1824
1825 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1826 printk(KERN_CRIT
1827 "EDAC PCI- "
1828 "Detected Parity Error on %s\n",
1829 pci_name (dev));
1830
1831 atomic_inc(&pci_parity_count);
1832 }
1833 }
1834
1835 /* read the device TYPE, looking for bridges */
1836 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1837
1838 debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
1839
1840 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1841 /* On bridges, need to examine secondary status register */
1842 status = get_pci_parity_status(dev, 1);
1843
1844 debugf2("PCI SEC_STATUS= 0x%04x %s\n",
1845 status, dev->dev.bus_id );
1846
1847 /* check the secondary status reg for errors */
1848 if (status) {
1849 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1850 printk(KERN_CRIT
1851 "EDAC PCI-Bridge- "
1852 "Signaled System Error on %s\n",
1853 pci_name (dev));
1854
1855 if (status & (PCI_STATUS_PARITY)) {
1856 printk(KERN_CRIT
1857 "EDAC PCI-Bridge- "
1858 "Master Data Parity Error on %s\n",
1859 pci_name (dev));
1860
1861 atomic_inc(&pci_parity_count);
1862 }
1863
1864 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1865 printk(KERN_CRIT
1866 "EDAC PCI-Bridge- "
1867 "Detected Parity Error on %s\n",
1868 pci_name (dev));
1869
1870 atomic_inc(&pci_parity_count);
1871 }
1872 }
1873 }
1874}
1875
1876/*
1877 * check_dev_on_list: Scan for a PCI device on a white/black list
1878 * @list: an EDAC &edac_pci_device_list white/black list pointer
1879 * @free_index: index of next free entry on the list
1880 * @pci_dev: PCI Device pointer
1881 *
1882 * see if list contains the device.
1883 *
1884 * Returns: 0 not found
1885 * 1 found on list
1886 */
1887static int check_dev_on_list(struct edac_pci_device_list *list, int free_index,
1888 struct pci_dev *dev)
1889{
1890 int i;
1891 int rc = 0; /* Assume not found */
1892 unsigned short vendor=dev->vendor;
1893 unsigned short device=dev->device;
1894
1895 /* Scan the list, looking for a vendor/device match
1896 */
1897 for (i = 0; i < free_index; i++, list++ ) {
1898 if ( (list->vendor == vendor ) &&
1899 (list->device == device )) {
1900 rc = 1;
1901 break;
1902 }
1903 }
1904
1905 return rc;
1906}
1907
1908/*
1909 * pci_dev parity list iterator
1910 * Scan the PCI device list for one iteration, looking for SERRORs
1911 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
1912 */
1913static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
1914{
1915 struct pci_dev *dev=NULL;
1916
1917 /* request for kernel access to the next PCI device, if any,
1918 * and while we are looking at it have its reference count
1919 * bumped until we are done with it
1920 */
1921 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1922
1923 /* if whitelist exists then it has priority, so only scan those
1924 * devices on the whitelist
1925 */
1926 if (pci_whitelist_count > 0 ) {
1927 if (check_dev_on_list(pci_whitelist,
1928 pci_whitelist_count, dev))
1929 fn(dev);
1930 } else {
1931 /*
1932 * if no whitelist, then check if this devices is
1933 * blacklisted
1934 */
1935 if (!check_dev_on_list(pci_blacklist,
1936 pci_blacklist_count, dev))
1937 fn(dev);
1938 }
1939 }
1940}
1941
1942static void do_pci_parity_check(void)
1943{
1944 unsigned long flags;
1945 int before_count;
1946
1947 debugf3("MC: " __FILE__ ": %s()\n", __func__);
1948
1949 if (!check_pci_parity)
1950 return;
1951
1952 before_count = atomic_read(&pci_parity_count);
1953
1954 /* scan all PCI devices looking for a Parity Error on devices and
1955 * bridges
1956 */
1957 local_irq_save(flags);
1958 edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
1959 local_irq_restore(flags);
1960
1961 /* Only if operator has selected panic on PCI Error */
1962 if (panic_on_pci_parity) {
1963 /* If the count is different 'after' from 'before' */
1964 if (before_count != atomic_read(&pci_parity_count))
1965 panic("EDAC: PCI Parity Error");
1966 }
1967}
1968
1969
1970static inline void clear_pci_parity_errors(void)
1971{
1972 /* Clear any PCI bus parity errors that devices initially have logged
1973 * in their registers.
1974 */
1975 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
1976}
1977
1978
1979#else /* CONFIG_PCI */
1980
1981
1982static inline void do_pci_parity_check(void)
1983{
1984 /* no-op */
1985}
1986
1987
1988static inline void clear_pci_parity_errors(void)
1989{
1990 /* no-op */
1991}
1992
1993
1994#endif /* CONFIG_PCI */
1995
1996/*
1997 * Iterate over all MC instances and check for ECC, et al, errors
1998 */
1999static inline void check_mc_devices (void)
2000{
2001 unsigned long flags;
2002 struct list_head *item;
2003 struct mem_ctl_info *mci;
2004
2005 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2006
2007 /* during poll, have interrupts off */
2008 local_irq_save(flags);
2009
2010 list_for_each(item, &mc_devices) {
2011 mci = list_entry(item, struct mem_ctl_info, link);
2012
2013 if (mci->edac_check != NULL)
2014 mci->edac_check(mci);
2015 }
2016
2017 local_irq_restore(flags);
2018}
2019
2020
2021/*
2022 * Check MC status every poll_msec.
2023 * Check PCI status every poll_msec as well.
2024 *
2025 * This where the work gets done for edac.
2026 *
2027 * SMP safe, doesn't use NMI, and auto-rate-limits.
2028 */
2029static void do_edac_check(void)
2030{
2031
2032 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2033
2034 check_mc_devices();
2035
2036 do_pci_parity_check();
2037}
2038
2039
2040/*
2041 * EDAC thread state information
2042 */
2043struct bs_thread_info
2044{
2045 struct task_struct *task;
2046 struct completion *event;
2047 char *name;
2048 void (*run)(void);
2049};
2050
2051static struct bs_thread_info bs_thread;
2052
2053/*
2054 * edac_kernel_thread
2055 * This the kernel thread that processes edac operations
2056 * in a normal thread environment
2057 */
2058static int edac_kernel_thread(void *arg)
2059{
2060 struct bs_thread_info *thread = (struct bs_thread_info *) arg;
2061
2062 /* detach thread */
2063 daemonize(thread->name);
2064
2065 current->exit_signal = SIGCHLD;
2066 allow_signal(SIGKILL);
2067 thread->task = current;
2068
2069 /* indicate to starting task we have started */
2070 complete(thread->event);
2071
2072 /* loop forever, until we are told to stop */
2073 while(thread->run != NULL) {
2074 void (*run)(void);
2075
2076 /* call the function to check the memory controllers */
2077 run = thread->run;
2078 if (run)
2079 run();
2080
2081 if (signal_pending(current))
2082 flush_signals(current);
2083
2084 /* ensure we are interruptable */
2085 set_current_state(TASK_INTERRUPTIBLE);
2086
2087 /* goto sleep for the interval */
2088 schedule_timeout((HZ * poll_msec) / 1000);
2089 try_to_freeze();
2090 }
2091
2092 /* notify waiter that we are exiting */
2093 complete(thread->event);
2094
2095 return 0;
2096}
2097
2098/*
2099 * edac_mc_init
2100 * module initialization entry point
2101 */
2102static int __init edac_mc_init(void)
2103{
2104 int ret;
2105 struct completion event;
2106
2107 printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
2108
2109 /*
2110 * Harvest and clear any boot/initialization PCI parity errors
2111 *
2112 * FIXME: This only clears errors logged by devices present at time of
2113 * module initialization. We should also do an initial clear
2114 * of each newly hotplugged device.
2115 */
2116 clear_pci_parity_errors();
2117
2118 /* perform check for first time to harvest boot leftovers */
2119 do_edac_check();
2120
2121 /* Create the MC sysfs entires */
2122 if (edac_sysfs_memctrl_setup()) {
2123 printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n");
2124 return -ENODEV;
2125 }
2126
2127 /* Create the PCI parity sysfs entries */
2128 if (edac_sysfs_pci_setup()) {
2129 edac_sysfs_memctrl_teardown();
2130 printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n");
2131 return -ENODEV;
2132 }
2133
2134 /* Create our kernel thread */
2135 init_completion(&event);
2136 bs_thread.event = &event;
2137 bs_thread.name = "kedac";
2138 bs_thread.run = do_edac_check;
2139
2140 /* create our kernel thread */
2141 ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL);
2142 if (ret < 0) {
2143 /* remove the sysfs entries */
2144 edac_sysfs_memctrl_teardown();
2145 edac_sysfs_pci_teardown();
2146 return -ENOMEM;
2147 }
2148
2149 /* wait for our kernel theard ack that it is up and running */
2150 wait_for_completion(&event);
2151
2152 return 0;
2153}
2154
2155
2156/*
2157 * edac_mc_exit()
2158 * module exit/termination functioni
2159 */
2160static void __exit edac_mc_exit(void)
2161{
2162 struct completion event;
2163
2164 debugf0("MC: " __FILE__ ": %s()\n", __func__);
2165
2166 init_completion(&event);
2167 bs_thread.event = &event;
2168
2169 /* As soon as ->run is set to NULL, the task could disappear,
2170 * so we need to hold tasklist_lock until we have sent the signal
2171 */
2172 read_lock(&tasklist_lock);
2173 bs_thread.run = NULL;
2174 send_sig(SIGKILL, bs_thread.task, 1);
2175 read_unlock(&tasklist_lock);
2176 wait_for_completion(&event);
2177
2178 /* tear down the sysfs device */
2179 edac_sysfs_memctrl_teardown();
2180 edac_sysfs_pci_teardown();
2181}
2182
2183
2184
2185
2186module_init(edac_mc_init);
2187module_exit(edac_mc_exit);
2188
2189MODULE_LICENSE("GPL");
2190MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
2191 "Based on.work by Dan Hollis et al");
2192MODULE_DESCRIPTION("Core library routines for MC reporting");
2193
2194module_param(panic_on_ue, int, 0644);
2195MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
2196module_param(check_pci_parity, int, 0644);
2197MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
2198module_param(panic_on_pci_parity, int, 0644);
2199MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
2200module_param(log_ue, int, 0644);
2201MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
2202module_param(log_ce, int, 0644);
2203MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
2204module_param(poll_msec, int, 0644);
2205MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
2206#ifdef CONFIG_EDAC_DEBUG
2207module_param(edac_debug_level, int, 0644);
2208MODULE_PARM_DESC(edac_debug_level, "Debug level");
2209#endif
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 000000000000..75ecf484a43a
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,448 @@
1/*
2 * MC kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * NMI handling support added by
12 * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
13 *
14 * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
15 *
16 */
17
18
19#ifndef _EDAC_MC_H_
20#define _EDAC_MC_H_
21
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/spinlock.h>
28#include <linux/smp.h>
29#include <linux/pci.h>
30#include <linux/time.h>
31#include <linux/nmi.h>
32#include <linux/rcupdate.h>
33#include <linux/completion.h>
34#include <linux/kobject.h>
35
36
37#define EDAC_MC_LABEL_LEN 31
38#define MC_PROC_NAME_MAX_LEN 7
39
40#if PAGE_SHIFT < 20
41#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
42#else /* PAGE_SHIFT > 20 */
43#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
44#endif
45
46#ifdef CONFIG_EDAC_DEBUG
47extern int edac_debug_level;
48#define edac_debug_printk(level, fmt, args...) \
49do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0)
50#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
51#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
52#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
53#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
54#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
55#else /* !CONFIG_EDAC_DEBUG */
56#define debugf0( ... )
57#define debugf1( ... )
58#define debugf2( ... )
59#define debugf3( ... )
60#define debugf4( ... )
61#endif /* !CONFIG_EDAC_DEBUG */
62
63
64#define bs_xstr(s) bs_str(s)
65#define bs_str(s) #s
66#define BS_MOD_STR bs_xstr(KBUILD_BASENAME)
67
68#define BIT(x) (1 << (x))
69
70#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev
71
72/* memory devices */
73enum dev_type {
74 DEV_UNKNOWN = 0,
75 DEV_X1,
76 DEV_X2,
77 DEV_X4,
78 DEV_X8,
79 DEV_X16,
80 DEV_X32, /* Do these parts exist? */
81 DEV_X64 /* Do these parts exist? */
82};
83
84#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
85#define DEV_FLAG_X1 BIT(DEV_X1)
86#define DEV_FLAG_X2 BIT(DEV_X2)
87#define DEV_FLAG_X4 BIT(DEV_X4)
88#define DEV_FLAG_X8 BIT(DEV_X8)
89#define DEV_FLAG_X16 BIT(DEV_X16)
90#define DEV_FLAG_X32 BIT(DEV_X32)
91#define DEV_FLAG_X64 BIT(DEV_X64)
92
93/* memory types */
94enum mem_type {
95 MEM_EMPTY = 0, /* Empty csrow */
96 MEM_RESERVED, /* Reserved csrow type */
97 MEM_UNKNOWN, /* Unknown csrow type */
98 MEM_FPM, /* Fast page mode */
99 MEM_EDO, /* Extended data out */
100 MEM_BEDO, /* Burst Extended data out */
101 MEM_SDR, /* Single data rate SDRAM */
102 MEM_RDR, /* Registered single data rate SDRAM */
103 MEM_DDR, /* Double data rate SDRAM */
104 MEM_RDDR, /* Registered Double data rate SDRAM */
105 MEM_RMBS /* Rambus DRAM */
106};
107
108#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
109#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
110#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
111#define MEM_FLAG_FPM BIT(MEM_FPM)
112#define MEM_FLAG_EDO BIT(MEM_EDO)
113#define MEM_FLAG_BEDO BIT(MEM_BEDO)
114#define MEM_FLAG_SDR BIT(MEM_SDR)
115#define MEM_FLAG_RDR BIT(MEM_RDR)
116#define MEM_FLAG_DDR BIT(MEM_DDR)
117#define MEM_FLAG_RDDR BIT(MEM_RDDR)
118#define MEM_FLAG_RMBS BIT(MEM_RMBS)
119
120
121/* chipset Error Detection and Correction capabilities and mode */
122enum edac_type {
123 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
124 EDAC_NONE, /* Doesnt support ECC */
125 EDAC_RESERVED, /* Reserved ECC type */
126 EDAC_PARITY, /* Detects parity errors */
127 EDAC_EC, /* Error Checking - no correction */
128 EDAC_SECDED, /* Single bit error correction, Double detection */
129 EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
130 EDAC_S4ECD4ED, /* Chipkill x4 devices */
131 EDAC_S8ECD8ED, /* Chipkill x8 devices */
132 EDAC_S16ECD16ED, /* Chipkill x16 devices */
133};
134
135#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
136#define EDAC_FLAG_NONE BIT(EDAC_NONE)
137#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
138#define EDAC_FLAG_EC BIT(EDAC_EC)
139#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
140#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
141#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
142#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
143#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
144
145
146/* scrubbing capabilities */
147enum scrub_type {
148 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
149 SCRUB_NONE, /* No scrubber */
150 SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
151 SCRUB_SW_SRC, /* Software scrub only errors */
152 SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
153 SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
154 SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
155 SCRUB_HW_SRC, /* Hardware scrub only errors */
156 SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
157 SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
158};
159
160#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
161#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC_CORR)
162#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR)
163#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
164#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
165#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC_CORR)
166#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
167#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
168
169enum mci_sysfs_status {
170 MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
171 MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
172};
173
174/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
175
176/*
177 * There are several things to be aware of that aren't at all obvious:
178 *
179 *
180 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
181 *
182 * These are some of the many terms that are thrown about that don't always
183 * mean what people think they mean (Inconceivable!). In the interest of
184 * creating a common ground for discussion, terms and their definitions
185 * will be established.
186 *
187 * Memory devices: The individual chip on a memory stick. These devices
188 * commonly output 4 and 8 bits each. Grouping several
189 * of these in parallel provides 64 bits which is common
190 * for a memory stick.
191 *
192 * Memory Stick: A printed circuit board that agregates multiple
193 * memory devices in parallel. This is the atomic
194 * memory component that is purchaseable by Joe consumer
195 * and loaded into a memory socket.
196 *
197 * Socket: A physical connector on the motherboard that accepts
198 * a single memory stick.
199 *
200 * Channel: Set of memory devices on a memory stick that must be
201 * grouped in parallel with one or more additional
202 * channels from other memory sticks. This parallel
203 * grouping of the output from multiple channels are
204 * necessary for the smallest granularity of memory access.
205 * Some memory controllers are capable of single channel -
206 * which means that memory sticks can be loaded
207 * individually. Other memory controllers are only
208 * capable of dual channel - which means that memory
209 * sticks must be loaded as pairs (see "socket set").
210 *
211 * Chip-select row: All of the memory devices that are selected together.
212 * for a single, minimum grain of memory access.
213 * This selects all of the parallel memory devices across
214 * all of the parallel channels. Common chip-select rows
215 * for single channel are 64 bits, for dual channel 128
216 * bits.
217 *
218 * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
219 * Motherboards commonly drive two chip-select pins to
220 * a memory stick. A single-ranked stick, will occupy
221 * only one of those rows. The other will be unused.
222 *
223 * Double-Ranked stick: A double-ranked stick has two chip-select rows which
224 * access different sets of memory devices. The two
225 * rows cannot be accessed concurrently.
226 *
227 * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
228 * A double-sided stick has two chip-select rows which
229 * access different sets of memory devices. The two
230 * rows cannot be accessed concurrently. "Double-sided"
231 * is irrespective of the memory devices being mounted
232 * on both sides of the memory stick.
233 *
234 * Socket set: All of the memory sticks that are required for for
235 * a single memory access or all of the memory sticks
236 * spanned by a chip-select row. A single socket set
237 * has two chip-select rows and if double-sided sticks
238 * are used these will occupy those chip-select rows.
239 *
240 * Bank: This term is avoided because it is unclear when
241 * needing to distinguish between chip-select rows and
242 * socket sets.
243 *
244 * Controller pages:
245 *
246 * Physical pages:
247 *
248 * Virtual pages:
249 *
250 *
251 * STRUCTURE ORGANIZATION AND CHOICES
252 *
253 *
254 *
255 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
256 */
257
258
259struct channel_info {
260 int chan_idx; /* channel index */
261 u32 ce_count; /* Correctable Errors for this CHANNEL */
262 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
263 struct csrow_info *csrow; /* the parent */
264};
265
266
267struct csrow_info {
268 unsigned long first_page; /* first page number in dimm */
269 unsigned long last_page; /* last page number in dimm */
270 unsigned long page_mask; /* used for interleaving -
271 0UL for non intlv */
272 u32 nr_pages; /* number of pages in csrow */
273 u32 grain; /* granularity of reported error in bytes */
274 int csrow_idx; /* the chip-select row */
275 enum dev_type dtype; /* memory device type */
276 u32 ue_count; /* Uncorrectable Errors for this csrow */
277 u32 ce_count; /* Correctable Errors for this csrow */
278 enum mem_type mtype; /* memory csrow type */
279 enum edac_type edac_mode; /* EDAC mode for this csrow */
280 struct mem_ctl_info *mci; /* the parent */
281
282 struct kobject kobj; /* sysfs kobject for this csrow */
283
284 /* FIXME the number of CHANNELs might need to become dynamic */
285 u32 nr_channels;
286 struct channel_info *channels;
287};
288
289
290struct mem_ctl_info {
291 struct list_head link; /* for global list of mem_ctl_info structs */
292 unsigned long mtype_cap; /* memory types supported by mc */
293 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
294 unsigned long edac_cap; /* configuration capabilities - this is
295 closely related to edac_ctl_cap. The
296 difference is that the controller
297 may be capable of s4ecd4ed which would
298 be listed in edac_ctl_cap, but if
299 channels aren't capable of s4ecd4ed then the
300 edac_cap would not have that capability. */
301 unsigned long scrub_cap; /* chipset scrub capabilities */
302 enum scrub_type scrub_mode; /* current scrub mode */
303
304 enum mci_sysfs_status sysfs_active; /* status of sysfs */
305
306 /* pointer to edac checking routine */
307 void (*edac_check) (struct mem_ctl_info * mci);
308 /*
309 * Remaps memory pages: controller pages to physical pages.
310 * For most MC's, this will be NULL.
311 */
312 /* FIXME - why not send the phys page to begin with? */
313 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
314 unsigned long page);
315 int mc_idx;
316 int nr_csrows;
317 struct csrow_info *csrows;
318 /*
319 * FIXME - what about controllers on other busses? - IDs must be
320 * unique. pdev pointer should be sufficiently unique, but
321 * BUS:SLOT.FUNC numbers may not be unique.
322 */
323 struct pci_dev *pdev;
324 const char *mod_name;
325 const char *mod_ver;
326 const char *ctl_name;
327 char proc_name[MC_PROC_NAME_MAX_LEN + 1];
328 void *pvt_info;
329 u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
330 u32 ce_noinfo_count; /* Correctable Errors w/o info */
331 u32 ue_count; /* Total Uncorrectable Errors for this MC */
332 u32 ce_count; /* Total Correctable Errors for this MC */
333 unsigned long start_time; /* mci load start time (in jiffies) */
334
335 /* this stuff is for safe removal of mc devices from global list while
336 * NMI handlers may be traversing list
337 */
338 struct rcu_head rcu;
339 struct completion complete;
340
341 /* edac sysfs device control */
342 struct kobject edac_mci_kobj;
343};
344
345
346
347/* write all or some bits in a byte-register*/
348static inline void pci_write_bits8(struct pci_dev *pdev, int offset,
349 u8 value, u8 mask)
350{
351 if (mask != 0xff) {
352 u8 buf;
353 pci_read_config_byte(pdev, offset, &buf);
354 value &= mask;
355 buf &= ~mask;
356 value |= buf;
357 }
358 pci_write_config_byte(pdev, offset, value);
359}
360
361
362/* write all or some bits in a word-register*/
363static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
364 u16 value, u16 mask)
365{
366 if (mask != 0xffff) {
367 u16 buf;
368 pci_read_config_word(pdev, offset, &buf);
369 value &= mask;
370 buf &= ~mask;
371 value |= buf;
372 }
373 pci_write_config_word(pdev, offset, value);
374}
375
376
377/* write all or some bits in a dword-register*/
378static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
379 u32 value, u32 mask)
380{
381 if (mask != 0xffff) {
382 u32 buf;
383 pci_read_config_dword(pdev, offset, &buf);
384 value &= mask;
385 buf &= ~mask;
386 value |= buf;
387 }
388 pci_write_config_dword(pdev, offset, value);
389}
390
391
392#ifdef CONFIG_EDAC_DEBUG
393void edac_mc_dump_channel(struct channel_info *chan);
394void edac_mc_dump_mci(struct mem_ctl_info *mci);
395void edac_mc_dump_csrow(struct csrow_info *csrow);
396#endif /* CONFIG_EDAC_DEBUG */
397
398extern int edac_mc_add_mc(struct mem_ctl_info *mci);
399extern int edac_mc_del_mc(struct mem_ctl_info *mci);
400
401extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
402 unsigned long page);
403
404extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev
405 *pdev);
406
407extern void edac_mc_scrub_block(unsigned long page,
408 unsigned long offset, u32 size);
409
410/*
411 * The no info errors are used when error overflows are reported.
412 * There are a limited number of error logging registers that can
413 * be exausted. When all registers are exhausted and an additional
414 * error occurs then an error overflow register records that an
415 * error occured and the type of error, but doesn't have any
416 * further information. The ce/ue versions make for cleaner
417 * reporting logic and function interface - reduces conditional
418 * statement clutter and extra function arguments.
419 */
420extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
421 unsigned long page_frame_number,
422 unsigned long offset_in_page,
423 unsigned long syndrome,
424 int row, int channel, const char *msg);
425
426extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
427 const char *msg);
428
429extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
430 unsigned long page_frame_number,
431 unsigned long offset_in_page,
432 int row, const char *msg);
433
434extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
435 const char *msg);
436
437/*
438 * This kmalloc's and initializes all the structures.
439 * Can't be used if all structures don't have the same lifetime.
440 */
441extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt,
442 unsigned nr_csrows, unsigned nr_chans);
443
444/* Free an mc previously allocated by edac_mc_alloc() */
445extern void edac_mc_free(struct mem_ctl_info *mci);
446
447
448#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644
index 000000000000..52596e75f9c2
--- /dev/null
+++ b/drivers/edac/i82860_edac.c
@@ -0,0 +1,299 @@
1/*
2 * Intel 82860 Memory Controller kernel module
3 * (C) 2005 Red Hat (http://www.redhat.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Ben Woodard <woodard@redhat.com>
8 * shamelessly copied from and based upon the edac_i82875 driver
9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
10 */
11
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17#include <linux/pci_ids.h>
18#include <linux/slab.h>
19#include "edac_mc.h"
20
21
22#ifndef PCI_DEVICE_ID_INTEL_82860_0
23#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
24#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
25
26#define I82860_MCHCFG 0x50
27#define I82860_GBA 0x60
28#define I82860_GBA_MASK 0x7FF
29#define I82860_GBA_SHIFT 24
30#define I82860_ERRSTS 0xC8
31#define I82860_EAP 0xE4
32#define I82860_DERRCTL_STS 0xE2
33
34enum i82860_chips {
35 I82860 = 0,
36};
37
38struct i82860_dev_info {
39 const char *ctl_name;
40};
41
42struct i82860_error_info {
43 u16 errsts;
44 u32 eap;
45 u16 derrsyn;
46 u16 errsts2;
47};
48
49static const struct i82860_dev_info i82860_devs[] = {
50 [I82860] = {
51 .ctl_name = "i82860"},
52};
53
54static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
55 has already registered driver */
56
57static int i82860_registered = 1;
58
59static void i82860_get_error_info (struct mem_ctl_info *mci,
60 struct i82860_error_info *info)
61{
62 /*
63 * This is a mess because there is no atomic way to read all the
64 * registers at once and the registers can transition from CE being
65 * overwritten by UE.
66 */
67 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts);
68 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
69 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, &info->derrsyn);
70 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts2);
71
72 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
73
74 /*
75 * If the error is the same for both reads then the first set of reads
76 * is valid. If there is a change then there is a CE no info and the
77 * second set of reads is valid and should be UE info.
78 */
79 if (!(info->errsts2 & 0x0003))
80 return;
81 if ((info->errsts ^ info->errsts2) & 0x0003) {
82 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
83 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
84 &info->derrsyn);
85 }
86}
87
88static int i82860_process_error_info (struct mem_ctl_info *mci,
89 struct i82860_error_info *info, int handle_errors)
90{
91 int row;
92
93 if (!(info->errsts2 & 0x0003))
94 return 0;
95
96 if (!handle_errors)
97 return 1;
98
99 if ((info->errsts ^ info->errsts2) & 0x0003) {
100 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
101 info->errsts = info->errsts2;
102 }
103
104 info->eap >>= PAGE_SHIFT;
105 row = edac_mc_find_csrow_by_page(mci, info->eap);
106
107 if (info->errsts & 0x0002)
108 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
109 else
110 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
111 0, "i82860 UE");
112
113 return 1;
114}
115
116static void i82860_check(struct mem_ctl_info *mci)
117{
118 struct i82860_error_info info;
119
120 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
121 i82860_get_error_info(mci, &info);
122 i82860_process_error_info(mci, &info, 1);
123}
124
125static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
126{
127 int rc = -ENODEV;
128 int index;
129 struct mem_ctl_info *mci = NULL;
130 unsigned long last_cumul_size;
131
132 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
133
134 /* RDRAM has channels but these don't map onto the abstractions that
135 edac uses.
136 The device groups from the GRA registers seem to map reasonably
137 well onto the notion of a chip select row.
138 There are 16 GRA registers and since the name is associated with
139 the channel and the GRA registers map to physical devices so we are
140 going to make 1 channel for group.
141 */
142 mci = edac_mc_alloc(0, 16, 1);
143 if (!mci)
144 return -ENOMEM;
145
146 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
147
148 mci->pdev = pdev;
149 mci->mtype_cap = MEM_FLAG_DDR;
150
151
152 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
153 /* I"m not sure about this but I think that all RDRAM is SECDED */
154 mci->edac_cap = EDAC_FLAG_SECDED;
155 /* adjust FLAGS */
156
157 mci->mod_name = BS_MOD_STR;
158 mci->mod_ver = "$Revision: 1.1.2.6 $";
159 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
160 mci->edac_check = i82860_check;
161 mci->ctl_page_to_phys = NULL;
162
163 pci_read_config_word(mci->pdev, I82860_MCHCFG, &mchcfg_ddim);
164 mchcfg_ddim = mchcfg_ddim & 0x180;
165
166 /*
167 * The group row boundary (GRA) reg values are boundary address
168 * for each DRAM row with a granularity of 16MB. GRA regs are
169 * cumulative; therefore GRA15 will contain the total memory contained
170 * in all eight rows.
171 */
172 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
173 u16 value;
174 u32 cumul_size;
175 struct csrow_info *csrow = &mci->csrows[index];
176
177 pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
178 &value);
179
180 cumul_size = (value & I82860_GBA_MASK) <<
181 (I82860_GBA_SHIFT - PAGE_SHIFT);
182 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
183 __func__, index, cumul_size);
184 if (cumul_size == last_cumul_size)
185 continue; /* not populated */
186
187 csrow->first_page = last_cumul_size;
188 csrow->last_page = cumul_size - 1;
189 csrow->nr_pages = cumul_size - last_cumul_size;
190 last_cumul_size = cumul_size;
191 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
192 csrow->mtype = MEM_RMBS;
193 csrow->dtype = DEV_UNKNOWN;
194 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
195 }
196
197 /* clear counters */
198 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
199
200 if (edac_mc_add_mc(mci)) {
201 debugf3("MC: " __FILE__
202 ": %s(): failed edac_mc_add_mc()\n",
203 __func__);
204 edac_mc_free(mci);
205 } else {
206 /* get this far and it's successful */
207 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
208 rc = 0;
209 }
210 return rc;
211}
212
213/* returns count (>= 0), or negative on error */
214static int __devinit i82860_init_one(struct pci_dev *pdev,
215 const struct pci_device_id *ent)
216{
217 int rc;
218
219 debugf0("MC: " __FILE__ ": %s()\n", __func__);
220
221 printk(KERN_INFO "i82860 init one\n");
222 if(pci_enable_device(pdev) < 0)
223 return -EIO;
224 rc = i82860_probe1(pdev, ent->driver_data);
225 if(rc == 0)
226 mci_pdev = pci_dev_get(pdev);
227 return rc;
228}
229
230static void __devexit i82860_remove_one(struct pci_dev *pdev)
231{
232 struct mem_ctl_info *mci;
233
234 debugf0(__FILE__ ": %s()\n", __func__);
235
236 mci = edac_mc_find_mci_by_pdev(pdev);
237 if ((mci != NULL) && (edac_mc_del_mc(mci) == 0))
238 edac_mc_free(mci);
239}
240
241static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
242 {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
243 I82860},
244 {0,} /* 0 terminated list. */
245};
246
247MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
248
249static struct pci_driver i82860_driver = {
250 .name = BS_MOD_STR,
251 .probe = i82860_init_one,
252 .remove = __devexit_p(i82860_remove_one),
253 .id_table = i82860_pci_tbl,
254};
255
256static int __init i82860_init(void)
257{
258 int pci_rc;
259
260 debugf3("MC: " __FILE__ ": %s()\n", __func__);
261 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
262 return pci_rc;
263
264 if (!mci_pdev) {
265 i82860_registered = 0;
266 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
267 PCI_DEVICE_ID_INTEL_82860_0, NULL);
268 if (mci_pdev == NULL) {
269 debugf0("860 pci_get_device fail\n");
270 return -ENODEV;
271 }
272 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
273 if (pci_rc < 0) {
274 debugf0("860 init fail\n");
275 pci_dev_put(mci_pdev);
276 return -ENODEV;
277 }
278 }
279 return 0;
280}
281
282static void __exit i82860_exit(void)
283{
284 debugf3("MC: " __FILE__ ": %s()\n", __func__);
285
286 pci_unregister_driver(&i82860_driver);
287 if (!i82860_registered) {
288 i82860_remove_one(mci_pdev);
289 pci_dev_put(mci_pdev);
290 }
291}
292
293module_init(i82860_init);
294module_exit(i82860_exit);
295
296MODULE_LICENSE("GPL");
297MODULE_AUTHOR
298 ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>");
299MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644
index 000000000000..009c08fe5d69
--- /dev/null
+++ b/drivers/edac/i82875p_edac.c
@@ -0,0 +1,532 @@
1/*
2 * Intel D82875P Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Contributors:
9 * Wang Zhenyu at intel.com
10 *
11 * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
12 *
13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
14 */
15
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/slab.h>
25
26#include "edac_mc.h"
27
28
29#ifndef PCI_DEVICE_ID_INTEL_82875_0
30#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
31#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
32
33#ifndef PCI_DEVICE_ID_INTEL_82875_6
34#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
35#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
36
37
38/* four csrows in dual channel, eight in single channel */
39#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
40
41
42/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
43#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
44 *
45 * 31:12 block address
46 * 11:0 reserved
47 */
48
49#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
50 *
51 * 7:0 DRAM ECC Syndrome
52 */
53
54#define I82875P_DES 0x5d /* DRAM Error Status (8b)
55 *
56 * 7:1 reserved
57 * 0 Error channel 0/1
58 */
59
60#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
61 *
62 * 15:10 reserved
63 * 9 non-DRAM lock error (ndlock)
64 * 8 Sftwr Generated SMI
65 * 7 ECC UE
66 * 6 reserved
67 * 5 MCH detects unimplemented cycle
68 * 4 AGP access outside GA
69 * 3 Invalid AGP access
70 * 2 Invalid GA translation table
71 * 1 Unsupported AGP command
72 * 0 ECC CE
73 */
74
75#define I82875P_ERRCMD 0xca /* Error Command (16b)
76 *
77 * 15:10 reserved
78 * 9 SERR on non-DRAM lock
79 * 8 SERR on ECC UE
80 * 7 SERR on ECC CE
81 * 6 target abort on high exception
82 * 5 detect unimplemented cyc
83 * 4 AGP access outside of GA
84 * 3 SERR on invalid AGP access
85 * 2 invalid translation table
86 * 1 SERR on unsupported AGP command
87 * 0 reserved
88 */
89
90
91/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
92#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
93 *
94 * 15:10 reserved
95 * 9 fast back-to-back - ro 0
96 * 8 SERR enable - ro 0
97 * 7 addr/data stepping - ro 0
98 * 6 parity err enable - ro 0
99 * 5 VGA palette snoop - ro 0
100 * 4 mem wr & invalidate - ro 0
101 * 3 special cycle - ro 0
102 * 2 bus master - ro 0
103 * 1 mem access dev6 - 0(dis),1(en)
104 * 0 IO access dev3 - 0(dis),1(en)
105 */
106
107#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
108 *
109 * 31:12 mem base addr [31:12]
110 * 11:4 address mask - ro 0
111 * 3 prefetchable - ro 0(non),1(pre)
112 * 2:1 mem type - ro 0
113 * 0 mem space - ro 0
114 */
115
116/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
117
118#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
119#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
120 *
121 * 7 reserved
122 * 6:0 64MiB row boundary addr
123 */
124
125#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
126 *
127 * 7 reserved
128 * 6:4 row attr row 1
129 * 3 reserved
130 * 2:0 row attr row 0
131 *
132 * 000 = 4KiB
133 * 001 = 8KiB
134 * 010 = 16KiB
135 * 011 = 32KiB
136 */
137
138#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
139 *
140 * 31:30 reserved
141 * 29 init complete
142 * 28:23 reserved
143 * 22:21 nr chan 00=1,01=2
144 * 20 reserved
145 * 19:18 Data Integ Mode 00=none,01=ecc
146 * 17:11 reserved
147 * 10:8 refresh mode
148 * 7 reserved
149 * 6:4 mode select
150 * 3:2 reserved
151 * 1:0 DRAM type 01=DDR
152 */
153
154
155enum i82875p_chips {
156 I82875P = 0,
157};
158
159
160struct i82875p_pvt {
161 struct pci_dev *ovrfl_pdev;
162 void *ovrfl_window;
163};
164
165
166struct i82875p_dev_info {
167 const char *ctl_name;
168};
169
170
171struct i82875p_error_info {
172 u16 errsts;
173 u32 eap;
174 u8 des;
175 u8 derrsyn;
176 u16 errsts2;
177};
178
179
180static const struct i82875p_dev_info i82875p_devs[] = {
181 [I82875P] = {
182 .ctl_name = "i82875p"},
183};
184
185static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
186 has already registered driver */
187static int i82875p_registered = 1;
188
189static void i82875p_get_error_info (struct mem_ctl_info *mci,
190 struct i82875p_error_info *info)
191{
192 /*
193 * This is a mess because there is no atomic way to read all the
194 * registers at once and the registers can transition from CE being
195 * overwritten by UE.
196 */
197 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts);
198 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
199 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
200 pci_read_config_byte(mci->pdev, I82875P_DERRSYN, &info->derrsyn);
201 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts2);
202
203 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
204
205 /*
206 * If the error is the same then we can for both reads then
207 * the first set of reads is valid. If there is a change then
208 * there is a CE no info and the second set of reads is valid
209 * and should be UE info.
210 */
211 if (!(info->errsts2 & 0x0081))
212 return;
213 if ((info->errsts ^ info->errsts2) & 0x0081) {
214 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
215 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
216 pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
217 &info->derrsyn);
218 }
219}
220
221static int i82875p_process_error_info (struct mem_ctl_info *mci,
222 struct i82875p_error_info *info, int handle_errors)
223{
224 int row, multi_chan;
225
226 multi_chan = mci->csrows[0].nr_channels - 1;
227
228 if (!(info->errsts2 & 0x0081))
229 return 0;
230
231 if (!handle_errors)
232 return 1;
233
234 if ((info->errsts ^ info->errsts2) & 0x0081) {
235 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
236 info->errsts = info->errsts2;
237 }
238
239 info->eap >>= PAGE_SHIFT;
240 row = edac_mc_find_csrow_by_page(mci, info->eap);
241
242 if (info->errsts & 0x0080)
243 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
244 else
245 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
246 multi_chan ? (info->des & 0x1) : 0,
247 "i82875p CE");
248
249 return 1;
250}
251
252
253static void i82875p_check(struct mem_ctl_info *mci)
254{
255 struct i82875p_error_info info;
256
257 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
258 i82875p_get_error_info(mci, &info);
259 i82875p_process_error_info(mci, &info, 1);
260}
261
262
263#ifdef CONFIG_PROC_FS
264extern int pci_proc_attach_device(struct pci_dev *);
265#endif
266
267static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
268{
269 int rc = -ENODEV;
270 int index;
271 struct mem_ctl_info *mci = NULL;
272 struct i82875p_pvt *pvt = NULL;
273 unsigned long last_cumul_size;
274 struct pci_dev *ovrfl_pdev;
275 void __iomem *ovrfl_window = NULL;
276
277 u32 drc;
278 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
279 u32 nr_chans;
280 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
281
282 debugf0("MC: " __FILE__ ": %s()\n", __func__);
283
284 ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
285
286 if (!ovrfl_pdev) {
287 /*
288 * Intel tells BIOS developers to hide device 6 which
289 * configures the overflow device access containing
290 * the DRBs - this is where we expose device 6.
291 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
292 */
293 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
294 ovrfl_pdev =
295 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
296 if (!ovrfl_pdev)
297 goto fail;
298 }
299#ifdef CONFIG_PROC_FS
300 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
301 printk(KERN_ERR "MC: " __FILE__
302 ": %s(): Failed to attach overflow device\n",
303 __func__);
304 goto fail;
305 }
306#endif /* CONFIG_PROC_FS */
307 if (pci_enable_device(ovrfl_pdev)) {
308 printk(KERN_ERR "MC: " __FILE__
309 ": %s(): Failed to enable overflow device\n",
310 __func__);
311 goto fail;
312 }
313
314 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
315#ifdef CORRECT_BIOS
316 goto fail;
317#endif
318 }
319 /* cache is irrelevant for PCI bus reads/writes */
320 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
321 pci_resource_len(ovrfl_pdev, 0));
322
323 if (!ovrfl_window) {
324 printk(KERN_ERR "MC: " __FILE__
325 ": %s(): Failed to ioremap bar6\n", __func__);
326 goto fail;
327 }
328
329 /* need to find out the number of channels */
330 drc = readl(ovrfl_window + I82875P_DRC);
331 drc_chan = ((drc >> 21) & 0x1);
332 nr_chans = drc_chan + 1;
333 drc_ddim = (drc >> 18) & 0x1;
334
335 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
336 nr_chans);
337
338 if (!mci) {
339 rc = -ENOMEM;
340 goto fail;
341 }
342
343 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
344
345 mci->pdev = pdev;
346 mci->mtype_cap = MEM_FLAG_DDR;
347
348 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
349 mci->edac_cap = EDAC_FLAG_UNKNOWN;
350 /* adjust FLAGS */
351
352 mci->mod_name = BS_MOD_STR;
353 mci->mod_ver = "$Revision: 1.5.2.11 $";
354 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
355 mci->edac_check = i82875p_check;
356 mci->ctl_page_to_phys = NULL;
357
358 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
359
360 pvt = (struct i82875p_pvt *) mci->pvt_info;
361 pvt->ovrfl_pdev = ovrfl_pdev;
362 pvt->ovrfl_window = ovrfl_window;
363
364 /*
365 * The dram row boundary (DRB) reg values are boundary address
366 * for each DRAM row with a granularity of 32 or 64MB (single/dual
367 * channel operation). DRB regs are cumulative; therefore DRB7 will
368 * contain the total memory contained in all eight rows.
369 */
370 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
371 u8 value;
372 u32 cumul_size;
373 struct csrow_info *csrow = &mci->csrows[index];
374
375 value = readb(ovrfl_window + I82875P_DRB + index);
376 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
377 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
378 __func__, index, cumul_size);
379 if (cumul_size == last_cumul_size)
380 continue; /* not populated */
381
382 csrow->first_page = last_cumul_size;
383 csrow->last_page = cumul_size - 1;
384 csrow->nr_pages = cumul_size - last_cumul_size;
385 last_cumul_size = cumul_size;
386 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
387 csrow->mtype = MEM_DDR;
388 csrow->dtype = DEV_UNKNOWN;
389 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
390 }
391
392 /* clear counters */
393 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
394
395 if (edac_mc_add_mc(mci)) {
396 debugf3("MC: " __FILE__
397 ": %s(): failed edac_mc_add_mc()\n", __func__);
398 goto fail;
399 }
400
401 /* get this far and it's successful */
402 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
403 return 0;
404
405 fail:
406 if (mci)
407 edac_mc_free(mci);
408
409 if (ovrfl_window)
410 iounmap(ovrfl_window);
411
412 if (ovrfl_pdev) {
413 pci_release_regions(ovrfl_pdev);
414 pci_disable_device(ovrfl_pdev);
415 }
416
417 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
418 return rc;
419}
420
421
422/* returns count (>= 0), or negative on error */
423static int __devinit i82875p_init_one(struct pci_dev *pdev,
424 const struct pci_device_id *ent)
425{
426 int rc;
427
428 debugf0("MC: " __FILE__ ": %s()\n", __func__);
429
430 printk(KERN_INFO "i82875p init one\n");
431 if(pci_enable_device(pdev) < 0)
432 return -EIO;
433 rc = i82875p_probe1(pdev, ent->driver_data);
434 if (mci_pdev == NULL)
435 mci_pdev = pci_dev_get(pdev);
436 return rc;
437}
438
439
440static void __devexit i82875p_remove_one(struct pci_dev *pdev)
441{
442 struct mem_ctl_info *mci;
443 struct i82875p_pvt *pvt = NULL;
444
445 debugf0(__FILE__ ": %s()\n", __func__);
446
447 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
448 return;
449
450 pvt = (struct i82875p_pvt *) mci->pvt_info;
451 if (pvt->ovrfl_window)
452 iounmap(pvt->ovrfl_window);
453
454 if (pvt->ovrfl_pdev) {
455#ifdef CORRECT_BIOS
456 pci_release_regions(pvt->ovrfl_pdev);
457#endif /*CORRECT_BIOS */
458 pci_disable_device(pvt->ovrfl_pdev);
459 pci_dev_put(pvt->ovrfl_pdev);
460 }
461
462 if (edac_mc_del_mc(mci))
463 return;
464
465 edac_mc_free(mci);
466}
467
468
469static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
470 {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
471 I82875P},
472 {0,} /* 0 terminated list. */
473};
474
475MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
476
477
478static struct pci_driver i82875p_driver = {
479 .name = BS_MOD_STR,
480 .probe = i82875p_init_one,
481 .remove = __devexit_p(i82875p_remove_one),
482 .id_table = i82875p_pci_tbl,
483};
484
485
486static int __init i82875p_init(void)
487{
488 int pci_rc;
489
490 debugf3("MC: " __FILE__ ": %s()\n", __func__);
491 pci_rc = pci_register_driver(&i82875p_driver);
492 if (pci_rc < 0)
493 return pci_rc;
494 if (mci_pdev == NULL) {
495 i82875p_registered = 0;
496 mci_pdev =
497 pci_get_device(PCI_VENDOR_ID_INTEL,
498 PCI_DEVICE_ID_INTEL_82875_0, NULL);
499 if (!mci_pdev) {
500 debugf0("875p pci_get_device fail\n");
501 return -ENODEV;
502 }
503 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
504 if (pci_rc < 0) {
505 debugf0("875p init fail\n");
506 pci_dev_put(mci_pdev);
507 return -ENODEV;
508 }
509 }
510 return 0;
511}
512
513
514static void __exit i82875p_exit(void)
515{
516 debugf3("MC: " __FILE__ ": %s()\n", __func__);
517
518 pci_unregister_driver(&i82875p_driver);
519 if (!i82875p_registered) {
520 i82875p_remove_one(mci_pdev);
521 pci_dev_put(mci_pdev);
522 }
523}
524
525
526module_init(i82875p_init);
527module_exit(i82875p_exit);
528
529
530MODULE_LICENSE("GPL");
531MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
532MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644
index 000000000000..e90892831b90
--- /dev/null
+++ b/drivers/edac/r82600_edac.c
@@ -0,0 +1,407 @@
1/*
2 * Radisys 82600 Embedded chipset Memory Controller kernel module
3 * (C) 2005 EADS Astrium
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
8 * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
9 *
10 * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
11 *
12 * Written with reference to 82600 High Integration Dual PCI System
13 * Controller Data Book:
14 * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
15 * references to this document given in []
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/init.h>
21
22#include <linux/pci.h>
23#include <linux/pci_ids.h>
24
25#include <linux/slab.h>
26
27#include "edac_mc.h"
28
29/* Radisys say "The 82600 integrates a main memory SDRAM controller that
30 * supports up to four banks of memory. The four banks can support a mix of
31 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
32 * each of which can be any size from 16MB to 512MB. Both registered (control
33 * signals buffered) and unbuffered DIMM types are supported. Mixing of
34 * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
35 * is not allowed. The 82600 SDRAM interface operates at the same frequency as
36 * the CPU bus, 66MHz, 100MHz or 133MHz."
37 */
38
39#define R82600_NR_CSROWS 4
40#define R82600_NR_CHANS 1
41#define R82600_NR_DIMMS 4
42
43#define R82600_BRIDGE_ID 0x8200
44
45/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
46#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
47 * all bits are R/W
48 *
49 * 7 SDRAM ISA Hole Enable
50 * 6 Flash Page Mode Enable
51 * 5 ECC Enable: 1=ECC 0=noECC
52 * 4 DRAM DIMM Type: 1=
53 * 3 BIOS Alias Disable
54 * 2 SDRAM BIOS Flash Write Enable
55 * 1:0 SDRAM Refresh Rate: 00=Disabled
56 * 01=7.8usec (256Mbit SDRAMs)
57 * 10=15.6us 11=125usec
58 */
59
60#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
61 * More SDRAM related control bits
62 * all bits are R/W
63 *
64 * 15:8 Reserved.
65 *
66 * 7:5 Special SDRAM Mode Select
67 *
68 * 4 Force ECC
69 *
70 * 1=Drive ECC bits to 0 during
71 * write cycles (i.e. ECC test mode)
72 *
73 * 0=Normal ECC functioning
74 *
75 * 3 Enhanced Paging Enable
76 *
77 * 2 CAS# Latency 0=3clks 1=2clks
78 *
79 * 1 RAS# to CAS# Delay 0=3 1=2
80 *
81 * 0 RAS# Precharge 0=3 1=2
82 */
83
84#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
85 *
86 * 31 Disable Hardware Scrubbing (RW)
87 * 0=Scrub on corrected read
88 * 1=Don't scrub on corrected read
89 *
90 * 30:12 Error Address Pointer (RO)
91 * Upper 19 bits of error address
92 *
93 * 11:4 Syndrome Bits (RO)
94 *
95 * 3 BSERR# on multibit error (RW)
96 * 1=enable 0=disable
97 *
98 * 2 NMI on Single Bit Eror (RW)
99 * 1=NMI triggered by SBE n.b. other
100 * prerequeists
101 * 0=NMI not triggered
102 *
103 * 1 MBE (R/WC)
104 * read 1=MBE at EAP (see above)
105 * read 0=no MBE, or SBE occurred first
106 * write 1=Clear MBE status (must also
107 * clear SBE)
108 * write 0=NOP
109 *
110 * 1 SBE (R/WC)
111 * read 1=SBE at EAP (see above)
112 * read 0=no SBE, or MBE occurred first
113 * write 1=Clear SBE status (must also
114 * clear MBE)
115 * write 0=NOP
116 */
117
118#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address
119 * Registers
120 *
121 * 7:0 Address lines 30:24 - upper limit of
122 * each row [p57]
123 */
124
125struct r82600_error_info {
126 u32 eapr;
127};
128
129
130static unsigned int disable_hardware_scrub = 0;
131
132
133static void r82600_get_error_info (struct mem_ctl_info *mci,
134 struct r82600_error_info *info)
135{
136 pci_read_config_dword(mci->pdev, R82600_EAP, &info->eapr);
137
138 if (info->eapr & BIT(0))
139 /* Clear error to allow next error to be reported [p.62] */
140 pci_write_bits32(mci->pdev, R82600_EAP,
141 ((u32) BIT(0) & (u32) BIT(1)),
142 ((u32) BIT(0) & (u32) BIT(1)));
143
144 if (info->eapr & BIT(1))
145 /* Clear error to allow next error to be reported [p.62] */
146 pci_write_bits32(mci->pdev, R82600_EAP,
147 ((u32) BIT(0) & (u32) BIT(1)),
148 ((u32) BIT(0) & (u32) BIT(1)));
149}
150
151
152static int r82600_process_error_info (struct mem_ctl_info *mci,
153 struct r82600_error_info *info, int handle_errors)
154{
155 int error_found;
156 u32 eapaddr, page;
157 u32 syndrome;
158
159 error_found = 0;
160
161 /* bits 30:12 store the upper 19 bits of the 32 bit error address */
162 eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
163 /* Syndrome in bits 11:4 [p.62] */
164 syndrome = (info->eapr >> 4) & 0xFF;
165
166 /* the R82600 reports at less than page *
167 * granularity (upper 19 bits only) */
168 page = eapaddr >> PAGE_SHIFT;
169
170 if (info->eapr & BIT(0)) { /* CE? */
171 error_found = 1;
172
173 if (handle_errors)
174 edac_mc_handle_ce(
175 mci, page, 0, /* not avail */
176 syndrome,
177 edac_mc_find_csrow_by_page(mci, page),
178 0, /* channel */
179 mci->ctl_name);
180 }
181
182 if (info->eapr & BIT(1)) { /* UE? */
183 error_found = 1;
184
185 if (handle_errors)
186 /* 82600 doesn't give enough info */
187 edac_mc_handle_ue(mci, page, 0,
188 edac_mc_find_csrow_by_page(mci, page),
189 mci->ctl_name);
190 }
191
192 return error_found;
193}
194
195static void r82600_check(struct mem_ctl_info *mci)
196{
197 struct r82600_error_info info;
198
199 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
200 r82600_get_error_info(mci, &info);
201 r82600_process_error_info(mci, &info, 1);
202}
203
204static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
205{
206 int rc = -ENODEV;
207 int index;
208 struct mem_ctl_info *mci = NULL;
209 u8 dramcr;
210 u32 ecc_on;
211 u32 reg_sdram;
212 u32 eapr;
213 u32 scrub_disabled;
214 u32 sdram_refresh_rate;
215 u32 row_high_limit_last = 0;
216 u32 eap_init_bits;
217
218 debugf0("MC: " __FILE__ ": %s()\n", __func__);
219
220
221 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
222 pci_read_config_dword(pdev, R82600_EAP, &eapr);
223
224 ecc_on = dramcr & BIT(5);
225 reg_sdram = dramcr & BIT(4);
226 scrub_disabled = eapr & BIT(31);
227 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
228
229 debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n",
230 __func__, sdram_refresh_rate);
231
232 debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
233 dramcr);
234
235 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
236
237 if (mci == NULL) {
238 rc = -ENOMEM;
239 goto fail;
240 }
241
242 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
243
244 mci->pdev = pdev;
245 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
246
247 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
248 /* FIXME try to work out if the chip leads have been *
249 * used for COM2 instead on this board? [MA6?] MAYBE: */
250
251 /* On the R82600, the pins for memory bits 72:65 - i.e. the *
252 * EC bits are shared with the pins for COM2 (!), so if COM2 *
253 * is enabled, we assume COM2 is wired up, and thus no EDAC *
254 * is possible. */
255 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
256 if (ecc_on) {
257 if (scrub_disabled)
258 debugf3("MC: " __FILE__ ": %s(): mci = %p - "
259 "Scrubbing disabled! EAP: %#0x\n", __func__,
260 mci, eapr);
261 } else
262 mci->edac_cap = EDAC_FLAG_NONE;
263
264 mci->mod_name = BS_MOD_STR;
265 mci->mod_ver = "$Revision: 1.1.2.6 $";
266 mci->ctl_name = "R82600";
267 mci->edac_check = r82600_check;
268 mci->ctl_page_to_phys = NULL;
269
270 for (index = 0; index < mci->nr_csrows; index++) {
271 struct csrow_info *csrow = &mci->csrows[index];
272 u8 drbar; /* sDram Row Boundry Address Register */
273 u32 row_high_limit;
274 u32 row_base;
275
276 /* find the DRAM Chip Select Base address and mask */
277 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
278
279 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n",
280 mci->mc_idx, __func__, index, drbar);
281
282 row_high_limit = ((u32) drbar << 24);
283/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
284
285 debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
286 "Boundry Address=%#0x, Last = %#0x \n",
287 mci->mc_idx, __func__, index, row_high_limit,
288 row_high_limit_last);
289
290 /* Empty row [p.57] */
291 if (row_high_limit == row_high_limit_last)
292 continue;
293
294 row_base = row_high_limit_last;
295
296 csrow->first_page = row_base >> PAGE_SHIFT;
297 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
298 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
299 /* Error address is top 19 bits - so granularity is *
300 * 14 bits */
301 csrow->grain = 1 << 14;
302 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
303 /* FIXME - check that this is unknowable with this chipset */
304 csrow->dtype = DEV_UNKNOWN;
305
306 /* Mode is global on 82600 */
307 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
308 row_high_limit_last = row_high_limit;
309 }
310
311 /* clear counters */
312 /* FIXME should we? */
313
314 if (edac_mc_add_mc(mci)) {
315 debugf3("MC: " __FILE__
316 ": %s(): failed edac_mc_add_mc()\n", __func__);
317 goto fail;
318 }
319
320 /* get this far and it's successful */
321
322 /* Clear error flags to allow next error to be reported [p.62] */
323 /* Test systems seem to always have the UE flag raised on boot */
324
325 eap_init_bits = BIT(0) & BIT(1);
326 if (disable_hardware_scrub) {
327 eap_init_bits |= BIT(31);
328 debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub "
329 "(scrub on error)\n", __func__);
330 }
331
332 pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits,
333 eap_init_bits);
334
335 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
336 return 0;
337
338fail:
339 if (mci)
340 edac_mc_free(mci);
341
342 return rc;
343}
344
345/* returns count (>= 0), or negative on error */
346static int __devinit r82600_init_one(struct pci_dev *pdev,
347 const struct pci_device_id *ent)
348{
349 debugf0("MC: " __FILE__ ": %s()\n", __func__);
350
351 /* don't need to call pci_device_enable() */
352 return r82600_probe1(pdev, ent->driver_data);
353}
354
355
356static void __devexit r82600_remove_one(struct pci_dev *pdev)
357{
358 struct mem_ctl_info *mci;
359
360 debugf0(__FILE__ ": %s()\n", __func__);
361
362 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) &&
363 !edac_mc_del_mc(mci))
364 edac_mc_free(mci);
365}
366
367
368static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
369 {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)},
370 {0,} /* 0 terminated list. */
371};
372
373MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
374
375
376static struct pci_driver r82600_driver = {
377 .name = BS_MOD_STR,
378 .probe = r82600_init_one,
379 .remove = __devexit_p(r82600_remove_one),
380 .id_table = r82600_pci_tbl,
381};
382
383
384static int __init r82600_init(void)
385{
386 return pci_register_driver(&r82600_driver);
387}
388
389
390static void __exit r82600_exit(void)
391{
392 pci_unregister_driver(&r82600_driver);
393}
394
395
396module_init(r82600_init);
397module_exit(r82600_exit);
398
399
400MODULE_LICENSE("GPL");
401MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
402 "on behalf of EADS Astrium");
403MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
404
405module_param(disable_hardware_scrub, bool, 0644);
406MODULE_PARM_DESC(disable_hardware_scrub,
407 "If set, disable the chipset's automatic scrub for CEs");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index ca99979c868a..8b3515f394a6 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -8,6 +8,7 @@
8 * completion notification. 8 * completion notification.
9 */ 9 */
10 10
11#include <asm/types.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12 13
13#include <linux/blkdev.h> 14#include <linux/blkdev.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1421941487c4..626508afe1b1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -7,6 +7,7 @@ menu "Network device support"
7 7
8config NETDEVICES 8config NETDEVICES
9 depends on NET 9 depends on NET
10 default y if UML
10 bool "Network device support" 11 bool "Network device support"
11 ---help--- 12 ---help---
12 You can say N here if you don't intend to connect your Linux box to 13 You can say N here if you don't intend to connect your Linux box to
@@ -1914,6 +1915,15 @@ config E1000_NAPI
1914 1915
1915 If in doubt, say N. 1916 If in doubt, say N.
1916 1917
1918config E1000_DISABLE_PACKET_SPLIT
1919 bool "Disable Packet Split for PCI express adapters"
1920 depends on E1000
1921 help
1922 Say Y here if you want to use the legacy receive path for PCI express
1923 hadware.
1924
1925 If in doubt, say N.
1926
1917source "drivers/net/ixp2000/Kconfig" 1927source "drivers/net/ixp2000/Kconfig"
1918 1928
1919config MYRI_SBUS 1929config MYRI_SBUS
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index dde631f8f685..6e295fce5c6f 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -335,6 +335,30 @@ static inline void cas_mask_intr(struct cas *cp)
335 cas_disable_irq(cp, i); 335 cas_disable_irq(cp, i);
336} 336}
337 337
338static inline void cas_buffer_init(cas_page_t *cp)
339{
340 struct page *page = cp->buffer;
341 atomic_set((atomic_t *)&page->lru.next, 1);
342}
343
344static inline int cas_buffer_count(cas_page_t *cp)
345{
346 struct page *page = cp->buffer;
347 return atomic_read((atomic_t *)&page->lru.next);
348}
349
350static inline void cas_buffer_inc(cas_page_t *cp)
351{
352 struct page *page = cp->buffer;
353 atomic_inc((atomic_t *)&page->lru.next);
354}
355
356static inline void cas_buffer_dec(cas_page_t *cp)
357{
358 struct page *page = cp->buffer;
359 atomic_dec((atomic_t *)&page->lru.next);
360}
361
338static void cas_enable_irq(struct cas *cp, const int ring) 362static void cas_enable_irq(struct cas *cp, const int ring)
339{ 363{
340 if (ring == 0) { /* all but TX_DONE */ 364 if (ring == 0) { /* all but TX_DONE */
@@ -472,6 +496,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
472{ 496{
473 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 497 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
474 PCI_DMA_FROMDEVICE); 498 PCI_DMA_FROMDEVICE);
499 cas_buffer_dec(page);
475 __free_pages(page->buffer, cp->page_order); 500 __free_pages(page->buffer, cp->page_order);
476 kfree(page); 501 kfree(page);
477 return 0; 502 return 0;
@@ -501,6 +526,7 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
501 page->buffer = alloc_pages(flags, cp->page_order); 526 page->buffer = alloc_pages(flags, cp->page_order);
502 if (!page->buffer) 527 if (!page->buffer)
503 goto page_err; 528 goto page_err;
529 cas_buffer_init(page);
504 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 530 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
505 cp->page_size, PCI_DMA_FROMDEVICE); 531 cp->page_size, PCI_DMA_FROMDEVICE);
506 return page; 532 return page;
@@ -579,7 +605,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
579 list_for_each_safe(elem, tmp, &list) { 605 list_for_each_safe(elem, tmp, &list) {
580 cas_page_t *page = list_entry(elem, cas_page_t, list); 606 cas_page_t *page = list_entry(elem, cas_page_t, list);
581 607
582 if (page_count(page->buffer) > 1) 608 if (cas_buffer_count(page) > 1)
583 continue; 609 continue;
584 610
585 list_del(elem); 611 list_del(elem);
@@ -1347,7 +1373,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1347 cas_page_t *page = cp->rx_pages[1][index]; 1373 cas_page_t *page = cp->rx_pages[1][index];
1348 cas_page_t *new; 1374 cas_page_t *new;
1349 1375
1350 if (page_count(page->buffer) == 1) 1376 if (cas_buffer_count(page) == 1)
1351 return page; 1377 return page;
1352 1378
1353 new = cas_page_dequeue(cp); 1379 new = cas_page_dequeue(cp);
@@ -1367,7 +1393,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1367 cas_page_t **page1 = cp->rx_pages[1]; 1393 cas_page_t **page1 = cp->rx_pages[1];
1368 1394
1369 /* swap if buffer is in use */ 1395 /* swap if buffer is in use */
1370 if (page_count(page0[index]->buffer) > 1) { 1396 if (cas_buffer_count(page0[index]) > 1) {
1371 cas_page_t *new = cas_page_spare(cp, index); 1397 cas_page_t *new = cas_page_spare(cp, index);
1372 if (new) { 1398 if (new) {
1373 page1[index] = page0[index]; 1399 page1[index] = page0[index];
@@ -2039,6 +2065,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2039 skb->len += hlen - swivel; 2065 skb->len += hlen - swivel;
2040 2066
2041 get_page(page->buffer); 2067 get_page(page->buffer);
2068 cas_buffer_inc(page);
2042 frag->page = page->buffer; 2069 frag->page = page->buffer;
2043 frag->page_offset = off; 2070 frag->page_offset = off;
2044 frag->size = hlen - swivel; 2071 frag->size = hlen - swivel;
@@ -2063,6 +2090,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2063 frag++; 2090 frag++;
2064 2091
2065 get_page(page->buffer); 2092 get_page(page->buffer);
2093 cas_buffer_inc(page);
2066 frag->page = page->buffer; 2094 frag->page = page->buffer;
2067 frag->page_offset = 0; 2095 frag->page_offset = 0;
2068 frag->size = hlen; 2096 frag->size = hlen;
@@ -2225,7 +2253,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2225 released = 0; 2253 released = 0;
2226 while (entry != last) { 2254 while (entry != last) {
2227 /* make a new buffer if it's still in use */ 2255 /* make a new buffer if it's still in use */
2228 if (page_count(page[entry]->buffer) > 1) { 2256 if (cas_buffer_count(page[entry]) > 1) {
2229 cas_page_t *new = cas_page_dequeue(cp); 2257 cas_page_t *new = cas_page_dequeue(cp);
2230 if (!new) { 2258 if (!new) {
2231 /* let the timer know that we need to 2259 /* let the timer know that we need to
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d252297e4db0..5cedc81786e3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -121,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
121 struct e1000_adapter *adapter = netdev_priv(netdev); 121 struct e1000_adapter *adapter = netdev_priv(netdev);
122 struct e1000_hw *hw = &adapter->hw; 122 struct e1000_hw *hw = &adapter->hw;
123 123
124 if(hw->media_type == e1000_media_type_copper) { 124 if (hw->media_type == e1000_media_type_copper) {
125 125
126 ecmd->supported = (SUPPORTED_10baseT_Half | 126 ecmd->supported = (SUPPORTED_10baseT_Half |
127 SUPPORTED_10baseT_Full | 127 SUPPORTED_10baseT_Full |
@@ -133,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
133 133
134 ecmd->advertising = ADVERTISED_TP; 134 ecmd->advertising = ADVERTISED_TP;
135 135
136 if(hw->autoneg == 1) { 136 if (hw->autoneg == 1) {
137 ecmd->advertising |= ADVERTISED_Autoneg; 137 ecmd->advertising |= ADVERTISED_Autoneg;
138 138
139 /* the e1000 autoneg seems to match ethtool nicely */ 139 /* the e1000 autoneg seems to match ethtool nicely */
@@ -144,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
144 ecmd->port = PORT_TP; 144 ecmd->port = PORT_TP;
145 ecmd->phy_address = hw->phy_addr; 145 ecmd->phy_address = hw->phy_addr;
146 146
147 if(hw->mac_type == e1000_82543) 147 if (hw->mac_type == e1000_82543)
148 ecmd->transceiver = XCVR_EXTERNAL; 148 ecmd->transceiver = XCVR_EXTERNAL;
149 else 149 else
150 ecmd->transceiver = XCVR_INTERNAL; 150 ecmd->transceiver = XCVR_INTERNAL;
@@ -160,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
160 160
161 ecmd->port = PORT_FIBRE; 161 ecmd->port = PORT_FIBRE;
162 162
163 if(hw->mac_type >= e1000_82545) 163 if (hw->mac_type >= e1000_82545)
164 ecmd->transceiver = XCVR_INTERNAL; 164 ecmd->transceiver = XCVR_INTERNAL;
165 else 165 else
166 ecmd->transceiver = XCVR_EXTERNAL; 166 ecmd->transceiver = XCVR_EXTERNAL;
167 } 167 }
168 168
169 if(netif_carrier_ok(adapter->netdev)) { 169 if (netif_carrier_ok(adapter->netdev)) {
170 170
171 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 171 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
172 &adapter->link_duplex); 172 &adapter->link_duplex);
@@ -175,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL 175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
176 * and HALF_DUPLEX != DUPLEX_HALF */ 176 * and HALF_DUPLEX != DUPLEX_HALF */
177 177
178 if(adapter->link_duplex == FULL_DUPLEX) 178 if (adapter->link_duplex == FULL_DUPLEX)
179 ecmd->duplex = DUPLEX_FULL; 179 ecmd->duplex = DUPLEX_FULL;
180 else 180 else
181 ecmd->duplex = DUPLEX_HALF; 181 ecmd->duplex = DUPLEX_HALF;
@@ -205,11 +205,11 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
205 205
206 if (ecmd->autoneg == AUTONEG_ENABLE) { 206 if (ecmd->autoneg == AUTONEG_ENABLE) {
207 hw->autoneg = 1; 207 hw->autoneg = 1;
208 if(hw->media_type == e1000_media_type_fiber) 208 if (hw->media_type == e1000_media_type_fiber)
209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
210 ADVERTISED_FIBRE | 210 ADVERTISED_FIBRE |
211 ADVERTISED_Autoneg; 211 ADVERTISED_Autoneg;
212 else 212 else
213 hw->autoneg_advertised = ADVERTISED_10baseT_Half | 213 hw->autoneg_advertised = ADVERTISED_10baseT_Half |
214 ADVERTISED_10baseT_Full | 214 ADVERTISED_10baseT_Full |
215 ADVERTISED_100baseT_Half | 215 ADVERTISED_100baseT_Half |
@@ -219,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
219 ADVERTISED_TP; 219 ADVERTISED_TP;
220 ecmd->advertising = hw->autoneg_advertised; 220 ecmd->advertising = hw->autoneg_advertised;
221 } else 221 } else
222 if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) 222 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
223 return -EINVAL; 223 return -EINVAL;
224 224
225 /* reset the link */ 225 /* reset the link */
226 226
227 if(netif_running(adapter->netdev)) { 227 if (netif_running(adapter->netdev)) {
228 e1000_down(adapter); 228 e1000_down(adapter);
229 e1000_reset(adapter); 229 e1000_reset(adapter);
230 e1000_up(adapter); 230 e1000_up(adapter);
@@ -241,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
241 struct e1000_adapter *adapter = netdev_priv(netdev); 241 struct e1000_adapter *adapter = netdev_priv(netdev);
242 struct e1000_hw *hw = &adapter->hw; 242 struct e1000_hw *hw = &adapter->hw;
243 243
244 pause->autoneg = 244 pause->autoneg =
245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
246 246
247 if(hw->fc == e1000_fc_rx_pause) 247 if (hw->fc == e1000_fc_rx_pause)
248 pause->rx_pause = 1; 248 pause->rx_pause = 1;
249 else if(hw->fc == e1000_fc_tx_pause) 249 else if (hw->fc == e1000_fc_tx_pause)
250 pause->tx_pause = 1; 250 pause->tx_pause = 1;
251 else if(hw->fc == e1000_fc_full) { 251 else if (hw->fc == e1000_fc_full) {
252 pause->rx_pause = 1; 252 pause->rx_pause = 1;
253 pause->tx_pause = 1; 253 pause->tx_pause = 1;
254 } 254 }
@@ -260,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
260{ 260{
261 struct e1000_adapter *adapter = netdev_priv(netdev); 261 struct e1000_adapter *adapter = netdev_priv(netdev);
262 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
263 263
264 adapter->fc_autoneg = pause->autoneg; 264 adapter->fc_autoneg = pause->autoneg;
265 265
266 if(pause->rx_pause && pause->tx_pause) 266 if (pause->rx_pause && pause->tx_pause)
267 hw->fc = e1000_fc_full; 267 hw->fc = e1000_fc_full;
268 else if(pause->rx_pause && !pause->tx_pause) 268 else if (pause->rx_pause && !pause->tx_pause)
269 hw->fc = e1000_fc_rx_pause; 269 hw->fc = e1000_fc_rx_pause;
270 else if(!pause->rx_pause && pause->tx_pause) 270 else if (!pause->rx_pause && pause->tx_pause)
271 hw->fc = e1000_fc_tx_pause; 271 hw->fc = e1000_fc_tx_pause;
272 else if(!pause->rx_pause && !pause->tx_pause) 272 else if (!pause->rx_pause && !pause->tx_pause)
273 hw->fc = e1000_fc_none; 273 hw->fc = e1000_fc_none;
274 274
275 hw->original_fc = hw->fc; 275 hw->original_fc = hw->fc;
276 276
277 if(adapter->fc_autoneg == AUTONEG_ENABLE) { 277 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
278 if(netif_running(adapter->netdev)) { 278 if (netif_running(adapter->netdev)) {
279 e1000_down(adapter); 279 e1000_down(adapter);
280 e1000_up(adapter); 280 e1000_up(adapter);
281 } else 281 } else
282 e1000_reset(adapter); 282 e1000_reset(adapter);
283 } 283 } else
284 else
285 return ((hw->media_type == e1000_media_type_fiber) ? 284 return ((hw->media_type == e1000_media_type_fiber) ?
286 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 285 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
287 286
288 return 0; 287 return 0;
289} 288}
290 289
@@ -301,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
301 struct e1000_adapter *adapter = netdev_priv(netdev); 300 struct e1000_adapter *adapter = netdev_priv(netdev);
302 adapter->rx_csum = data; 301 adapter->rx_csum = data;
303 302
304 if(netif_running(netdev)) { 303 if (netif_running(netdev)) {
305 e1000_down(adapter); 304 e1000_down(adapter);
306 e1000_up(adapter); 305 e1000_up(adapter);
307 } else 306 } else
308 e1000_reset(adapter); 307 e1000_reset(adapter);
309 return 0; 308 return 0;
310} 309}
311 310
312static uint32_t 311static uint32_t
313e1000_get_tx_csum(struct net_device *netdev) 312e1000_get_tx_csum(struct net_device *netdev)
314{ 313{
@@ -320,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
320{ 319{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 320 struct e1000_adapter *adapter = netdev_priv(netdev);
322 321
323 if(adapter->hw.mac_type < e1000_82543) { 322 if (adapter->hw.mac_type < e1000_82543) {
324 if (!data) 323 if (!data)
325 return -EINVAL; 324 return -EINVAL;
326 return 0; 325 return 0;
@@ -339,8 +338,8 @@ static int
339e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, uint32_t data)
340{ 339{
341 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
342 if((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
343 (adapter->hw.mac_type == e1000_82547)) 342 (adapter->hw.mac_type == e1000_82547))
344 return data ? -EINVAL : 0; 343 return data ? -EINVAL : 0;
345 344
346 if (data) 345 if (data)
@@ -348,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
348 else 347 else
349 netdev->features &= ~NETIF_F_TSO; 348 netdev->features &= ~NETIF_F_TSO;
350 return 0; 349 return 0;
351} 350}
352#endif /* NETIF_F_TSO */ 351#endif /* NETIF_F_TSO */
353 352
354static uint32_t 353static uint32_t
@@ -365,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
365 adapter->msg_enable = data; 364 adapter->msg_enable = data;
366} 365}
367 366
368static int 367static int
369e1000_get_regs_len(struct net_device *netdev) 368e1000_get_regs_len(struct net_device *netdev)
370{ 369{
371#define E1000_REGS_LEN 32 370#define E1000_REGS_LEN 32
@@ -401,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
401 regs_buff[11] = E1000_READ_REG(hw, TIDV); 400 regs_buff[11] = E1000_READ_REG(hw, TIDV);
402 401
403 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ 402 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
404 if(hw->phy_type == e1000_phy_igp) { 403 if (hw->phy_type == e1000_phy_igp) {
405 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 404 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
406 IGP01E1000_PHY_AGC_A); 405 IGP01E1000_PHY_AGC_A);
407 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 406 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -455,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
455 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 454 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
456 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 455 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
457 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 456 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
458 if(hw->mac_type >= e1000_82540 && 457 if (hw->mac_type >= e1000_82540 &&
459 hw->media_type == e1000_media_type_copper) { 458 hw->media_type == e1000_media_type_copper) {
460 regs_buff[26] = E1000_READ_REG(hw, MANC); 459 regs_buff[26] = E1000_READ_REG(hw, MANC);
461 } 460 }
@@ -479,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
479 int ret_val = 0; 478 int ret_val = 0;
480 uint16_t i; 479 uint16_t i;
481 480
482 if(eeprom->len == 0) 481 if (eeprom->len == 0)
483 return -EINVAL; 482 return -EINVAL;
484 483
485 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 484 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -489,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
489 488
490 eeprom_buff = kmalloc(sizeof(uint16_t) * 489 eeprom_buff = kmalloc(sizeof(uint16_t) *
491 (last_word - first_word + 1), GFP_KERNEL); 490 (last_word - first_word + 1), GFP_KERNEL);
492 if(!eeprom_buff) 491 if (!eeprom_buff)
493 return -ENOMEM; 492 return -ENOMEM;
494 493
495 if(hw->eeprom.type == e1000_eeprom_spi) 494 if (hw->eeprom.type == e1000_eeprom_spi)
496 ret_val = e1000_read_eeprom(hw, first_word, 495 ret_val = e1000_read_eeprom(hw, first_word,
497 last_word - first_word + 1, 496 last_word - first_word + 1,
498 eeprom_buff); 497 eeprom_buff);
499 else { 498 else {
500 for (i = 0; i < last_word - first_word + 1; i++) 499 for (i = 0; i < last_word - first_word + 1; i++)
501 if((ret_val = e1000_read_eeprom(hw, first_word + i, 1, 500 if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
502 &eeprom_buff[i]))) 501 &eeprom_buff[i])))
503 break; 502 break;
504 } 503 }
@@ -525,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
525 int max_len, first_word, last_word, ret_val = 0; 524 int max_len, first_word, last_word, ret_val = 0;
526 uint16_t i; 525 uint16_t i;
527 526
528 if(eeprom->len == 0) 527 if (eeprom->len == 0)
529 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
530 529
531 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 530 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
532 return -EFAULT; 531 return -EFAULT;
533 532
534 max_len = hw->eeprom.word_size * 2; 533 max_len = hw->eeprom.word_size * 2;
@@ -536,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
536 first_word = eeprom->offset >> 1; 535 first_word = eeprom->offset >> 1;
537 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 536 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
538 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 537 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
539 if(!eeprom_buff) 538 if (!eeprom_buff)
540 return -ENOMEM; 539 return -ENOMEM;
541 540
542 ptr = (void *)eeprom_buff; 541 ptr = (void *)eeprom_buff;
543 542
544 if(eeprom->offset & 1) { 543 if (eeprom->offset & 1) {
545 /* need read/modify/write of first changed EEPROM word */ 544 /* need read/modify/write of first changed EEPROM word */
546 /* only the second byte of the word is being modified */ 545 /* only the second byte of the word is being modified */
547 ret_val = e1000_read_eeprom(hw, first_word, 1, 546 ret_val = e1000_read_eeprom(hw, first_word, 1,
548 &eeprom_buff[0]); 547 &eeprom_buff[0]);
549 ptr++; 548 ptr++;
550 } 549 }
551 if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 550 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
552 /* need read/modify/write of last changed EEPROM word */ 551 /* need read/modify/write of last changed EEPROM word */
553 /* only the first byte of the word is being modified */ 552 /* only the first byte of the word is being modified */
554 ret_val = e1000_read_eeprom(hw, last_word, 1, 553 ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -567,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
567 ret_val = e1000_write_eeprom(hw, first_word, 566 ret_val = e1000_write_eeprom(hw, first_word,
568 last_word - first_word + 1, eeprom_buff); 567 last_word - first_word + 1, eeprom_buff);
569 568
570 /* Update the checksum over the first part of the EEPROM if needed 569 /* Update the checksum over the first part of the EEPROM if needed
571 * and flush shadow RAM for 82573 conrollers */ 570 * and flush shadow RAM for 82573 conrollers */
572 if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 571 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
573 (hw->mac_type == e1000_82573))) 572 (hw->mac_type == e1000_82573)))
574 e1000_update_eeprom_checksum(hw); 573 e1000_update_eeprom_checksum(hw);
575 574
@@ -633,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
633 ring->rx_jumbo_pending = 0; 632 ring->rx_jumbo_pending = 0;
634} 633}
635 634
636static int 635static int
637e1000_set_ringparam(struct net_device *netdev, 636e1000_set_ringparam(struct net_device *netdev,
638 struct ethtool_ringparam *ring) 637 struct ethtool_ringparam *ring)
639{ 638{
@@ -670,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
670 txdr = adapter->tx_ring; 669 txdr = adapter->tx_ring;
671 rxdr = adapter->rx_ring; 670 rxdr = adapter->rx_ring;
672 671
673 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 672 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
674 return -EINVAL; 673 return -EINVAL;
675 674
676 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 675 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
677 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 676 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
678 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 677 E1000_MAX_RXD : E1000_MAX_82544_RXD));
679 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 678 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
680 679
681 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 680 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
682 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 681 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
683 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 682 E1000_MAX_TXD : E1000_MAX_82544_TXD));
684 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 683 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
685 684
686 for (i = 0; i < adapter->num_tx_queues; i++) 685 for (i = 0; i < adapter->num_tx_queues; i++)
687 txdr[i].count = txdr->count; 686 txdr[i].count = txdr->count;
688 for (i = 0; i < adapter->num_rx_queues; i++) 687 for (i = 0; i < adapter->num_rx_queues; i++)
689 rxdr[i].count = rxdr->count; 688 rxdr[i].count = rxdr->count;
690 689
691 if(netif_running(adapter->netdev)) { 690 if (netif_running(adapter->netdev)) {
692 /* Try to get new resources before deleting old */ 691 /* Try to get new resources before deleting old */
693 if ((err = e1000_setup_all_rx_resources(adapter))) 692 if ((err = e1000_setup_all_rx_resources(adapter)))
694 goto err_setup_rx; 693 goto err_setup_rx;
@@ -708,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
708 kfree(rx_old); 707 kfree(rx_old);
709 adapter->rx_ring = rx_new; 708 adapter->rx_ring = rx_new;
710 adapter->tx_ring = tx_new; 709 adapter->tx_ring = tx_new;
711 if((err = e1000_up(adapter))) 710 if ((err = e1000_up(adapter)))
712 return err; 711 return err;
713 } 712 }
714 713
@@ -727,10 +726,10 @@ err_setup_rx:
727 uint32_t pat, value; \ 726 uint32_t pat, value; \
728 uint32_t test[] = \ 727 uint32_t test[] = \
729 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 728 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
730 for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ 729 for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
731 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ 730 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
732 value = E1000_READ_REG(&adapter->hw, R); \ 731 value = E1000_READ_REG(&adapter->hw, R); \
733 if(value != (test[pat] & W & M)) { \ 732 if (value != (test[pat] & W & M)) { \
734 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ 733 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
735 "0x%08X expected 0x%08X\n", \ 734 "0x%08X expected 0x%08X\n", \
736 E1000_##R, value, (test[pat] & W & M)); \ 735 E1000_##R, value, (test[pat] & W & M)); \
@@ -746,7 +745,7 @@ err_setup_rx:
746 uint32_t value; \ 745 uint32_t value; \
747 E1000_WRITE_REG(&adapter->hw, R, W & M); \ 746 E1000_WRITE_REG(&adapter->hw, R, W & M); \
748 value = E1000_READ_REG(&adapter->hw, R); \ 747 value = E1000_READ_REG(&adapter->hw, R); \
749 if((W & M) != (value & M)) { \ 748 if ((W & M) != (value & M)) { \
750 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 749 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
751 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ 750 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
752 *data = (adapter->hw.mac_type < e1000_82543) ? \ 751 *data = (adapter->hw.mac_type < e1000_82543) ? \
@@ -782,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
782 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); 781 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
783 E1000_WRITE_REG(&adapter->hw, STATUS, toggle); 782 E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
784 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; 783 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
785 if(value != after) { 784 if (value != after) {
786 DPRINTK(DRV, ERR, "failed STATUS register test got: " 785 DPRINTK(DRV, ERR, "failed STATUS register test got: "
787 "0x%08X expected: 0x%08X\n", after, value); 786 "0x%08X expected: 0x%08X\n", after, value);
788 *data = 1; 787 *data = 1;
@@ -810,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
810 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); 809 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
811 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 810 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
812 811
813 if(adapter->hw.mac_type >= e1000_82543) { 812 if (adapter->hw.mac_type >= e1000_82543) {
814 813
815 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); 814 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
816 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 815 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -818,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
818 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 817 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
819 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
820 819
821 for(i = 0; i < E1000_RAR_ENTRIES; i++) { 820 for (i = 0; i < E1000_RAR_ENTRIES; i++) {
822 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, 821 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
823 0xFFFFFFFF); 822 0xFFFFFFFF);
824 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 823 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -834,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
834 833
835 } 834 }
836 835
837 for(i = 0; i < E1000_MC_TBL_SIZE; i++) 836 for (i = 0; i < E1000_MC_TBL_SIZE; i++)
838 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 837 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
839 838
840 *data = 0; 839 *data = 0;
@@ -850,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
850 849
851 *data = 0; 850 *data = 0;
852 /* Read and add up the contents of the EEPROM */ 851 /* Read and add up the contents of the EEPROM */
853 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 852 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
854 if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { 853 if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
855 *data = 1; 854 *data = 1;
856 break; 855 break;
857 } 856 }
@@ -859,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
859 } 858 }
860 859
861 /* If Checksum is not Correct return error else test passed */ 860 /* If Checksum is not Correct return error else test passed */
862 if((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 861 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
863 *data = 2; 862 *data = 2;
864 863
865 return *data; 864 return *data;
@@ -888,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
888 *data = 0; 887 *data = 0;
889 888
890 /* Hook up test interrupt handler just for this test */ 889 /* Hook up test interrupt handler just for this test */
891 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 890 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
892 shared_int = FALSE; 891 shared_int = FALSE;
893 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 892 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
894 netdev->name, netdev)){ 893 netdev->name, netdev)){
895 *data = 1; 894 *data = 1;
896 return -1; 895 return -1;
@@ -901,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
901 msec_delay(10); 900 msec_delay(10);
902 901
903 /* Test each interrupt */ 902 /* Test each interrupt */
904 for(; i < 10; i++) { 903 for (; i < 10; i++) {
905 904
906 /* Interrupt to test */ 905 /* Interrupt to test */
907 mask = 1 << i; 906 mask = 1 << i;
908 907
909 if(!shared_int) { 908 if (!shared_int) {
910 /* Disable the interrupt to be reported in 909 /* Disable the interrupt to be reported in
911 * the cause register and then force the same 910 * the cause register and then force the same
912 * interrupt and see if one gets posted. If 911 * interrupt and see if one gets posted. If
@@ -917,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
917 E1000_WRITE_REG(&adapter->hw, IMC, mask); 916 E1000_WRITE_REG(&adapter->hw, IMC, mask);
918 E1000_WRITE_REG(&adapter->hw, ICS, mask); 917 E1000_WRITE_REG(&adapter->hw, ICS, mask);
919 msec_delay(10); 918 msec_delay(10);
920 919
921 if(adapter->test_icr & mask) { 920 if (adapter->test_icr & mask) {
922 *data = 3; 921 *data = 3;
923 break; 922 break;
924 } 923 }
@@ -935,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
935 E1000_WRITE_REG(&adapter->hw, ICS, mask); 934 E1000_WRITE_REG(&adapter->hw, ICS, mask);
936 msec_delay(10); 935 msec_delay(10);
937 936
938 if(!(adapter->test_icr & mask)) { 937 if (!(adapter->test_icr & mask)) {
939 *data = 4; 938 *data = 4;
940 break; 939 break;
941 } 940 }
942 941
943 if(!shared_int) { 942 if (!shared_int) {
944 /* Disable the other interrupts to be reported in 943 /* Disable the other interrupts to be reported in
945 * the cause register and then force the other 944 * the cause register and then force the other
946 * interrupts and see if any get posted. If 945 * interrupts and see if any get posted. If
@@ -952,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
952 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); 951 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
953 msec_delay(10); 952 msec_delay(10);
954 953
955 if(adapter->test_icr) { 954 if (adapter->test_icr) {
956 *data = 5; 955 *data = 5;
957 break; 956 break;
958 } 957 }
@@ -977,24 +976,24 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
977 struct pci_dev *pdev = adapter->pdev; 976 struct pci_dev *pdev = adapter->pdev;
978 int i; 977 int i;
979 978
980 if(txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
981 for(i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
982 if(txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
983 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 pci_unmap_single(pdev, txdr->buffer_info[i].dma,
984 txdr->buffer_info[i].length, 983 txdr->buffer_info[i].length,
985 PCI_DMA_TODEVICE); 984 PCI_DMA_TODEVICE);
986 if(txdr->buffer_info[i].skb) 985 if (txdr->buffer_info[i].skb)
987 dev_kfree_skb(txdr->buffer_info[i].skb); 986 dev_kfree_skb(txdr->buffer_info[i].skb);
988 } 987 }
989 } 988 }
990 989
991 if(rxdr->desc && rxdr->buffer_info) { 990 if (rxdr->desc && rxdr->buffer_info) {
992 for(i = 0; i < rxdr->count; i++) { 991 for (i = 0; i < rxdr->count; i++) {
993 if(rxdr->buffer_info[i].dma) 992 if (rxdr->buffer_info[i].dma)
994 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 993 pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
995 rxdr->buffer_info[i].length, 994 rxdr->buffer_info[i].length,
996 PCI_DMA_FROMDEVICE); 995 PCI_DMA_FROMDEVICE);
997 if(rxdr->buffer_info[i].skb) 996 if (rxdr->buffer_info[i].skb)
998 dev_kfree_skb(rxdr->buffer_info[i].skb); 997 dev_kfree_skb(rxdr->buffer_info[i].skb);
999 } 998 }
1000 } 999 }
@@ -1027,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1027 1026
1028 /* Setup Tx descriptor ring and Tx buffers */ 1027 /* Setup Tx descriptor ring and Tx buffers */
1029 1028
1030 if(!txdr->count) 1029 if (!txdr->count)
1031 txdr->count = E1000_DEFAULT_TXD; 1030 txdr->count = E1000_DEFAULT_TXD;
1032 1031
1033 size = txdr->count * sizeof(struct e1000_buffer); 1032 size = txdr->count * sizeof(struct e1000_buffer);
1034 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1033 if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1035 ret_val = 1; 1034 ret_val = 1;
1036 goto err_nomem; 1035 goto err_nomem;
1037 } 1036 }
@@ -1039,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 1038
1040 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1039 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1041 E1000_ROUNDUP(txdr->size, 4096); 1040 E1000_ROUNDUP(txdr->size, 4096);
1042 if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { 1041 if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
1043 ret_val = 2; 1042 ret_val = 2;
1044 goto err_nomem; 1043 goto err_nomem;
1045 } 1044 }
@@ -1058,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1058 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1057 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1059 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1058 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1060 1059
1061 for(i = 0; i < txdr->count; i++) { 1060 for (i = 0; i < txdr->count; i++) {
1062 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); 1061 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1063 struct sk_buff *skb; 1062 struct sk_buff *skb;
1064 unsigned int size = 1024; 1063 unsigned int size = 1024;
1065 1064
1066 if(!(skb = alloc_skb(size, GFP_KERNEL))) { 1065 if (!(skb = alloc_skb(size, GFP_KERNEL))) {
1067 ret_val = 3; 1066 ret_val = 3;
1068 goto err_nomem; 1067 goto err_nomem;
1069 } 1068 }
@@ -1083,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1083 1082
1084 /* Setup Rx descriptor ring and Rx buffers */ 1083 /* Setup Rx descriptor ring and Rx buffers */
1085 1084
1086 if(!rxdr->count) 1085 if (!rxdr->count)
1087 rxdr->count = E1000_DEFAULT_RXD; 1086 rxdr->count = E1000_DEFAULT_RXD;
1088 1087
1089 size = rxdr->count * sizeof(struct e1000_buffer); 1088 size = rxdr->count * sizeof(struct e1000_buffer);
1090 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1089 if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1091 ret_val = 4; 1090 ret_val = 4;
1092 goto err_nomem; 1091 goto err_nomem;
1093 } 1092 }
1094 memset(rxdr->buffer_info, 0, size); 1093 memset(rxdr->buffer_info, 0, size);
1095 1094
1096 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1097 if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { 1096 if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
1098 ret_val = 5; 1097 ret_val = 5;
1099 goto err_nomem; 1098 goto err_nomem;
1100 } 1099 }
@@ -1114,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1114 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1113 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1115 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1114 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1116 1115
1117 for(i = 0; i < rxdr->count; i++) { 1116 for (i = 0; i < rxdr->count; i++) {
1118 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1117 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1119 struct sk_buff *skb; 1118 struct sk_buff *skb;
1120 1119
1121 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1120 if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1122 GFP_KERNEL))) { 1121 GFP_KERNEL))) {
1123 ret_val = 6; 1122 ret_val = 6;
1124 goto err_nomem; 1123 goto err_nomem;
@@ -1227,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1227 1226
1228 /* Check Phy Configuration */ 1227 /* Check Phy Configuration */
1229 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); 1228 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1230 if(phy_reg != 0x4100) 1229 if (phy_reg != 0x4100)
1231 return 9; 1230 return 9;
1232 1231
1233 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1232 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1234 if(phy_reg != 0x0070) 1233 if (phy_reg != 0x0070)
1235 return 10; 1234 return 10;
1236 1235
1237 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); 1236 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
1238 if(phy_reg != 0x001A) 1237 if (phy_reg != 0x001A)
1239 return 11; 1238 return 11;
1240 1239
1241 return 0; 1240 return 0;
@@ -1249,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1249 1248
1250 adapter->hw.autoneg = FALSE; 1249 adapter->hw.autoneg = FALSE;
1251 1250
1252 if(adapter->hw.phy_type == e1000_phy_m88) { 1251 if (adapter->hw.phy_type == e1000_phy_m88) {
1253 /* Auto-MDI/MDIX Off */ 1252 /* Auto-MDI/MDIX Off */
1254 e1000_write_phy_reg(&adapter->hw, 1253 e1000_write_phy_reg(&adapter->hw,
1255 M88E1000_PHY_SPEC_CTRL, 0x0808); 1254 M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1269,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1269 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1268 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1270 E1000_CTRL_FD); /* Force Duplex to FULL */ 1269 E1000_CTRL_FD); /* Force Duplex to FULL */
1271 1270
1272 if(adapter->hw.media_type == e1000_media_type_copper && 1271 if (adapter->hw.media_type == e1000_media_type_copper &&
1273 adapter->hw.phy_type == e1000_phy_m88) { 1272 adapter->hw.phy_type == e1000_phy_m88) {
1274 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1273 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1275 } else { 1274 } else {
1276 /* Set the ILOS bit on the fiber Nic is half 1275 /* Set the ILOS bit on the fiber Nic is half
1277 * duplex link is detected. */ 1276 * duplex link is detected. */
1278 stat_reg = E1000_READ_REG(&adapter->hw, STATUS); 1277 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
1279 if((stat_reg & E1000_STATUS_FD) == 0) 1278 if ((stat_reg & E1000_STATUS_FD) == 0)
1280 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1279 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1281 } 1280 }
1282 1281
@@ -1285,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1285 /* Disable the receiver on the PHY so when a cable is plugged in, the 1284 /* Disable the receiver on the PHY so when a cable is plugged in, the
1286 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1285 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1287 */ 1286 */
1288 if(adapter->hw.phy_type == e1000_phy_m88) 1287 if (adapter->hw.phy_type == e1000_phy_m88)
1289 e1000_phy_disable_receiver(adapter); 1288 e1000_phy_disable_receiver(adapter);
1290 1289
1291 udelay(500); 1290 udelay(500);
@@ -1301,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1301 1300
1302 switch (adapter->hw.mac_type) { 1301 switch (adapter->hw.mac_type) {
1303 case e1000_82543: 1302 case e1000_82543:
1304 if(adapter->hw.media_type == e1000_media_type_copper) { 1303 if (adapter->hw.media_type == e1000_media_type_copper) {
1305 /* Attempt to setup Loopback mode on Non-integrated PHY. 1304 /* Attempt to setup Loopback mode on Non-integrated PHY.
1306 * Some PHY registers get corrupted at random, so 1305 * Some PHY registers get corrupted at random, so
1307 * attempt this 10 times. 1306 * attempt this 10 times.
1308 */ 1307 */
1309 while(e1000_nonintegrated_phy_loopback(adapter) && 1308 while (e1000_nonintegrated_phy_loopback(adapter) &&
1310 count++ < 10); 1309 count++ < 10);
1311 if(count < 11) 1310 if (count < 11)
1312 return 0; 1311 return 0;
1313 } 1312 }
1314 break; 1313 break;
@@ -1430,8 +1429,8 @@ static int
1430e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1429e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1431{ 1430{
1432 frame_size &= ~1; 1431 frame_size &= ~1;
1433 if(*(skb->data + 3) == 0xFF) { 1432 if (*(skb->data + 3) == 0xFF) {
1434 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1433 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1435 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1434 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1436 return 0; 1435 return 0;
1437 } 1436 }
@@ -1450,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1450 1449
1451 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1450 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1452 1451
1453 /* Calculate the loop count based on the largest descriptor ring 1452 /* Calculate the loop count based on the largest descriptor ring
1454 * The idea is to wrap the largest ring a number of times using 64 1453 * The idea is to wrap the largest ring a number of times using 64
1455 * send/receive pairs during each loop 1454 * send/receive pairs during each loop
1456 */ 1455 */
1457 1456
1458 if(rxdr->count <= txdr->count) 1457 if (rxdr->count <= txdr->count)
1459 lc = ((txdr->count / 64) * 2) + 1; 1458 lc = ((txdr->count / 64) * 2) + 1;
1460 else 1459 else
1461 lc = ((rxdr->count / 64) * 2) + 1; 1460 lc = ((rxdr->count / 64) * 2) + 1;
1462 1461
1463 k = l = 0; 1462 k = l = 0;
1464 for(j = 0; j <= lc; j++) { /* loop count loop */ 1463 for (j = 0; j <= lc; j++) { /* loop count loop */
1465 for(i = 0; i < 64; i++) { /* send the packets */ 1464 for (i = 0; i < 64; i++) { /* send the packets */
1466 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1465 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1467 1024); 1466 1024);
1468 pci_dma_sync_single_for_device(pdev, 1467 pci_dma_sync_single_for_device(pdev,
1469 txdr->buffer_info[k].dma, 1468 txdr->buffer_info[k].dma,
1470 txdr->buffer_info[k].length, 1469 txdr->buffer_info[k].length,
1471 PCI_DMA_TODEVICE); 1470 PCI_DMA_TODEVICE);
1472 if(unlikely(++k == txdr->count)) k = 0; 1471 if (unlikely(++k == txdr->count)) k = 0;
1473 } 1472 }
1474 E1000_WRITE_REG(&adapter->hw, TDT, k); 1473 E1000_WRITE_REG(&adapter->hw, TDT, k);
1475 msec_delay(200); 1474 msec_delay(200);
1476 time = jiffies; /* set the start time for the receive */ 1475 time = jiffies; /* set the start time for the receive */
1477 good_cnt = 0; 1476 good_cnt = 0;
1478 do { /* receive the sent packets */ 1477 do { /* receive the sent packets */
1479 pci_dma_sync_single_for_cpu(pdev, 1478 pci_dma_sync_single_for_cpu(pdev,
1480 rxdr->buffer_info[l].dma, 1479 rxdr->buffer_info[l].dma,
1481 rxdr->buffer_info[l].length, 1480 rxdr->buffer_info[l].length,
1482 PCI_DMA_FROMDEVICE); 1481 PCI_DMA_FROMDEVICE);
1483 1482
1484 ret_val = e1000_check_lbtest_frame( 1483 ret_val = e1000_check_lbtest_frame(
1485 rxdr->buffer_info[l].skb, 1484 rxdr->buffer_info[l].skb,
1486 1024); 1485 1024);
1487 if(!ret_val) 1486 if (!ret_val)
1488 good_cnt++; 1487 good_cnt++;
1489 if(unlikely(++l == rxdr->count)) l = 0; 1488 if (unlikely(++l == rxdr->count)) l = 0;
1490 /* time + 20 msecs (200 msecs on 2.4) is more than 1489 /* time + 20 msecs (200 msecs on 2.4) is more than
1491 * enough time to complete the receives, if it's 1490 * enough time to complete the receives, if it's
1492 * exceeded, break and error off 1491 * exceeded, break and error off
1493 */ 1492 */
1494 } while (good_cnt < 64 && jiffies < (time + 20)); 1493 } while (good_cnt < 64 && jiffies < (time + 20));
1495 if(good_cnt != 64) { 1494 if (good_cnt != 64) {
1496 ret_val = 13; /* ret_val is the same as mis-compare */ 1495 ret_val = 13; /* ret_val is the same as mis-compare */
1497 break; 1496 break;
1498 } 1497 }
1499 if(jiffies >= (time + 2)) { 1498 if (jiffies >= (time + 2)) {
1500 ret_val = 14; /* error code for time out error */ 1499 ret_val = 14; /* error code for time out error */
1501 break; 1500 break;
1502 } 1501 }
@@ -1549,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1549 *data = 1; 1548 *data = 1;
1550 } else { 1549 } else {
1551 e1000_check_for_link(&adapter->hw); 1550 e1000_check_for_link(&adapter->hw);
1552 if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ 1551 if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
1553 msec_delay(4000); 1552 msec_delay(4000);
1554 1553
1555 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1554 if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1556 *data = 1; 1555 *data = 1;
1557 } 1556 }
1558 } 1557 }
1559 return *data; 1558 return *data;
1560} 1559}
1561 1560
1562static int 1561static int
1563e1000_diag_test_count(struct net_device *netdev) 1562e1000_diag_test_count(struct net_device *netdev)
1564{ 1563{
1565 return E1000_TEST_LEN; 1564 return E1000_TEST_LEN;
@@ -1572,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
1572 struct e1000_adapter *adapter = netdev_priv(netdev); 1571 struct e1000_adapter *adapter = netdev_priv(netdev);
1573 boolean_t if_running = netif_running(netdev); 1572 boolean_t if_running = netif_running(netdev);
1574 1573
1575 if(eth_test->flags == ETH_TEST_FL_OFFLINE) { 1574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1576 /* Offline tests */ 1575 /* Offline tests */
1577 1576
1578 /* save speed, duplex, autoneg settings */ 1577 /* save speed, duplex, autoneg settings */
@@ -1582,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
1582 1581
1583 /* Link test performed before hardware reset so autoneg doesn't 1582 /* Link test performed before hardware reset so autoneg doesn't
1584 * interfere with test result */ 1583 * interfere with test result */
1585 if(e1000_link_test(adapter, &data[4])) 1584 if (e1000_link_test(adapter, &data[4]))
1586 eth_test->flags |= ETH_TEST_FL_FAILED; 1585 eth_test->flags |= ETH_TEST_FL_FAILED;
1587 1586
1588 if(if_running) 1587 if (if_running)
1589 e1000_down(adapter); 1588 e1000_down(adapter);
1590 else 1589 else
1591 e1000_reset(adapter); 1590 e1000_reset(adapter);
1592 1591
1593 if(e1000_reg_test(adapter, &data[0])) 1592 if (e1000_reg_test(adapter, &data[0]))
1594 eth_test->flags |= ETH_TEST_FL_FAILED; 1593 eth_test->flags |= ETH_TEST_FL_FAILED;
1595 1594
1596 e1000_reset(adapter); 1595 e1000_reset(adapter);
1597 if(e1000_eeprom_test(adapter, &data[1])) 1596 if (e1000_eeprom_test(adapter, &data[1]))
1598 eth_test->flags |= ETH_TEST_FL_FAILED; 1597 eth_test->flags |= ETH_TEST_FL_FAILED;
1599 1598
1600 e1000_reset(adapter); 1599 e1000_reset(adapter);
1601 if(e1000_intr_test(adapter, &data[2])) 1600 if (e1000_intr_test(adapter, &data[2]))
1602 eth_test->flags |= ETH_TEST_FL_FAILED; 1601 eth_test->flags |= ETH_TEST_FL_FAILED;
1603 1602
1604 e1000_reset(adapter); 1603 e1000_reset(adapter);
1605 if(e1000_loopback_test(adapter, &data[3])) 1604 if (e1000_loopback_test(adapter, &data[3]))
1606 eth_test->flags |= ETH_TEST_FL_FAILED; 1605 eth_test->flags |= ETH_TEST_FL_FAILED;
1607 1606
1608 /* restore speed, duplex, autoneg settings */ 1607 /* restore speed, duplex, autoneg settings */
@@ -1611,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
1611 adapter->hw.autoneg = autoneg; 1610 adapter->hw.autoneg = autoneg;
1612 1611
1613 e1000_reset(adapter); 1612 e1000_reset(adapter);
1614 if(if_running) 1613 if (if_running)
1615 e1000_up(adapter); 1614 e1000_up(adapter);
1616 } else { 1615 } else {
1617 /* Online tests */ 1616 /* Online tests */
1618 if(e1000_link_test(adapter, &data[4])) 1617 if (e1000_link_test(adapter, &data[4]))
1619 eth_test->flags |= ETH_TEST_FL_FAILED; 1618 eth_test->flags |= ETH_TEST_FL_FAILED;
1620 1619
1621 /* Offline tests aren't run; pass by default */ 1620 /* Offline tests aren't run; pass by default */
@@ -1633,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1633 struct e1000_adapter *adapter = netdev_priv(netdev); 1632 struct e1000_adapter *adapter = netdev_priv(netdev);
1634 struct e1000_hw *hw = &adapter->hw; 1633 struct e1000_hw *hw = &adapter->hw;
1635 1634
1636 switch(adapter->hw.device_id) { 1635 switch (adapter->hw.device_id) {
1637 case E1000_DEV_ID_82542: 1636 case E1000_DEV_ID_82542:
1638 case E1000_DEV_ID_82543GC_FIBER: 1637 case E1000_DEV_ID_82543GC_FIBER:
1639 case E1000_DEV_ID_82543GC_COPPER: 1638 case E1000_DEV_ID_82543GC_COPPER:
@@ -1649,7 +1648,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1649 case E1000_DEV_ID_82546GB_FIBER: 1648 case E1000_DEV_ID_82546GB_FIBER:
1650 case E1000_DEV_ID_82571EB_FIBER: 1649 case E1000_DEV_ID_82571EB_FIBER:
1651 /* Wake events only supported on port A for dual fiber */ 1650 /* Wake events only supported on port A for dual fiber */
1652 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1651 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1653 wol->supported = 0; 1652 wol->supported = 0;
1654 wol->wolopts = 0; 1653 wol->wolopts = 0;
1655 return; 1654 return;
@@ -1661,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1661 WAKE_BCAST | WAKE_MAGIC; 1660 WAKE_BCAST | WAKE_MAGIC;
1662 1661
1663 wol->wolopts = 0; 1662 wol->wolopts = 0;
1664 if(adapter->wol & E1000_WUFC_EX) 1663 if (adapter->wol & E1000_WUFC_EX)
1665 wol->wolopts |= WAKE_UCAST; 1664 wol->wolopts |= WAKE_UCAST;
1666 if(adapter->wol & E1000_WUFC_MC) 1665 if (adapter->wol & E1000_WUFC_MC)
1667 wol->wolopts |= WAKE_MCAST; 1666 wol->wolopts |= WAKE_MCAST;
1668 if(adapter->wol & E1000_WUFC_BC) 1667 if (adapter->wol & E1000_WUFC_BC)
1669 wol->wolopts |= WAKE_BCAST; 1668 wol->wolopts |= WAKE_BCAST;
1670 if(adapter->wol & E1000_WUFC_MAG) 1669 if (adapter->wol & E1000_WUFC_MAG)
1671 wol->wolopts |= WAKE_MAGIC; 1670 wol->wolopts |= WAKE_MAGIC;
1672 return; 1671 return;
1673 } 1672 }
@@ -1679,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1679 struct e1000_adapter *adapter = netdev_priv(netdev); 1678 struct e1000_adapter *adapter = netdev_priv(netdev);
1680 struct e1000_hw *hw = &adapter->hw; 1679 struct e1000_hw *hw = &adapter->hw;
1681 1680
1682 switch(adapter->hw.device_id) { 1681 switch (adapter->hw.device_id) {
1683 case E1000_DEV_ID_82542: 1682 case E1000_DEV_ID_82542:
1684 case E1000_DEV_ID_82543GC_FIBER: 1683 case E1000_DEV_ID_82543GC_FIBER:
1685 case E1000_DEV_ID_82543GC_COPPER: 1684 case E1000_DEV_ID_82543GC_COPPER:
@@ -1693,23 +1692,23 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1693 case E1000_DEV_ID_82546GB_FIBER: 1692 case E1000_DEV_ID_82546GB_FIBER:
1694 case E1000_DEV_ID_82571EB_FIBER: 1693 case E1000_DEV_ID_82571EB_FIBER:
1695 /* Wake events only supported on port A for dual fiber */ 1694 /* Wake events only supported on port A for dual fiber */
1696 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1695 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1697 return wol->wolopts ? -EOPNOTSUPP : 0; 1696 return wol->wolopts ? -EOPNOTSUPP : 0;
1698 /* Fall Through */ 1697 /* Fall Through */
1699 1698
1700 default: 1699 default:
1701 if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1700 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1702 return -EOPNOTSUPP; 1701 return -EOPNOTSUPP;
1703 1702
1704 adapter->wol = 0; 1703 adapter->wol = 0;
1705 1704
1706 if(wol->wolopts & WAKE_UCAST) 1705 if (wol->wolopts & WAKE_UCAST)
1707 adapter->wol |= E1000_WUFC_EX; 1706 adapter->wol |= E1000_WUFC_EX;
1708 if(wol->wolopts & WAKE_MCAST) 1707 if (wol->wolopts & WAKE_MCAST)
1709 adapter->wol |= E1000_WUFC_MC; 1708 adapter->wol |= E1000_WUFC_MC;
1710 if(wol->wolopts & WAKE_BCAST) 1709 if (wol->wolopts & WAKE_BCAST)
1711 adapter->wol |= E1000_WUFC_BC; 1710 adapter->wol |= E1000_WUFC_BC;
1712 if(wol->wolopts & WAKE_MAGIC) 1711 if (wol->wolopts & WAKE_MAGIC)
1713 adapter->wol |= E1000_WUFC_MAG; 1712 adapter->wol |= E1000_WUFC_MAG;
1714 } 1713 }
1715 1714
@@ -1727,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
1727{ 1726{
1728 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1727 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1729 1728
1730 if(test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1729 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1731 e1000_led_off(&adapter->hw); 1730 e1000_led_off(&adapter->hw);
1732 else 1731 else
1733 e1000_led_on(&adapter->hw); 1732 e1000_led_on(&adapter->hw);
@@ -1740,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1740{ 1739{
1741 struct e1000_adapter *adapter = netdev_priv(netdev); 1740 struct e1000_adapter *adapter = netdev_priv(netdev);
1742 1741
1743 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 1742 if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1744 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 1743 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1745 1744
1746 if(adapter->hw.mac_type < e1000_82571) { 1745 if (adapter->hw.mac_type < e1000_82571) {
1747 if(!adapter->blink_timer.function) { 1746 if (!adapter->blink_timer.function) {
1748 init_timer(&adapter->blink_timer); 1747 init_timer(&adapter->blink_timer);
1749 adapter->blink_timer.function = e1000_led_blink_callback; 1748 adapter->blink_timer.function = e1000_led_blink_callback;
1750 adapter->blink_timer.data = (unsigned long) adapter; 1749 adapter->blink_timer.data = (unsigned long) adapter;
@@ -1782,21 +1781,21 @@ static int
1782e1000_nway_reset(struct net_device *netdev) 1781e1000_nway_reset(struct net_device *netdev)
1783{ 1782{
1784 struct e1000_adapter *adapter = netdev_priv(netdev); 1783 struct e1000_adapter *adapter = netdev_priv(netdev);
1785 if(netif_running(netdev)) { 1784 if (netif_running(netdev)) {
1786 e1000_down(adapter); 1785 e1000_down(adapter);
1787 e1000_up(adapter); 1786 e1000_up(adapter);
1788 } 1787 }
1789 return 0; 1788 return 0;
1790} 1789}
1791 1790
1792static int 1791static int
1793e1000_get_stats_count(struct net_device *netdev) 1792e1000_get_stats_count(struct net_device *netdev)
1794{ 1793{
1795 return E1000_STATS_LEN; 1794 return E1000_STATS_LEN;
1796} 1795}
1797 1796
1798static void 1797static void
1799e1000_get_ethtool_stats(struct net_device *netdev, 1798e1000_get_ethtool_stats(struct net_device *netdev,
1800 struct ethtool_stats *stats, uint64_t *data) 1799 struct ethtool_stats *stats, uint64_t *data)
1801{ 1800{
1802 struct e1000_adapter *adapter = netdev_priv(netdev); 1801 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1830,7 +1829,7 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1830/* BUG_ON(i != E1000_STATS_LEN); */ 1829/* BUG_ON(i != E1000_STATS_LEN); */
1831} 1830}
1832 1831
1833static void 1832static void
1834e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1833e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1835{ 1834{
1836#ifdef CONFIG_E1000_MQ 1835#ifdef CONFIG_E1000_MQ
@@ -1839,9 +1838,9 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1839 uint8_t *p = data; 1838 uint8_t *p = data;
1840 int i; 1839 int i;
1841 1840
1842 switch(stringset) { 1841 switch (stringset) {
1843 case ETH_SS_TEST: 1842 case ETH_SS_TEST:
1844 memcpy(data, *e1000_gstrings_test, 1843 memcpy(data, *e1000_gstrings_test,
1845 E1000_TEST_LEN*ETH_GSTRING_LEN); 1844 E1000_TEST_LEN*ETH_GSTRING_LEN);
1846 break; 1845 break;
1847 case ETH_SS_STATS: 1846 case ETH_SS_STATS:
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 2437d362ff63..beeec0fbbeac 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1600,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1600 if(ret_val) 1600 if(ret_val)
1601 return ret_val; 1601 return ret_val;
1602 1602
1603 /* Read the MII 1000Base-T Control Register (Address 9). */ 1603 /* Read the MII 1000Base-T Control Register (Address 9). */
1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1605 if(ret_val) 1605 if(ret_val)
1606 return ret_val; 1606 return ret_val;
1607 1607
1608 /* Need to parse both autoneg_advertised and fc and set up 1608 /* Need to parse both autoneg_advertised and fc and set up
1609 * the appropriate PHY registers. First we will parse for 1609 * the appropriate PHY registers. First we will parse for
@@ -3916,7 +3916,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
3916 } 3916 }
3917 } 3917 }
3918 3918
3919 if(eeprom->use_eerd == TRUE) { 3919 if (eeprom->use_eerd == TRUE) {
3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3922 (hw->mac_type != e1000_82573)) 3922 (hw->mac_type != e1000_82573))
@@ -4423,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4423 return -E1000_ERR_EEPROM; 4423 return -E1000_ERR_EEPROM;
4424 } 4424 }
4425 4425
4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */
4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) {
4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
4429 } 4429 }
@@ -4431,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4431 /* Perform the flash update */ 4431 /* Perform the flash update */
4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
4433 4433
4434 for (i=0; i < attempts; i++) { 4434 for (i=0; i < attempts; i++) {
4435 eecd = E1000_READ_REG(hw, EECD); 4435 eecd = E1000_READ_REG(hw, EECD);
4436 if ((eecd & E1000_EECD_FLUPD) == 0) { 4436 if ((eecd & E1000_EECD_FLUPD) == 0) {
4437 break; 4437 break;
@@ -4504,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
4506 } 4506 }
4507
4507 switch (hw->mac_type) { 4508 switch (hw->mac_type) {
4508 default: 4509 default:
4509 break; 4510 break;
@@ -6840,7 +6841,8 @@ int32_t
6840e1000_check_phy_reset_block(struct e1000_hw *hw) 6841e1000_check_phy_reset_block(struct e1000_hw *hw)
6841{ 6842{
6842 uint32_t manc = 0; 6843 uint32_t manc = 0;
6843 if(hw->mac_type > e1000_82547_rev_2) 6844
6845 if (hw->mac_type > e1000_82547_rev_2)
6844 manc = E1000_READ_REG(hw, MANC); 6846 manc = E1000_READ_REG(hw, MANC);
6845 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 6847 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
6846 E1000_BLK_PHY_RESET : E1000_SUCCESS; 6848 E1000_BLK_PHY_RESET : E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 0b8f6f2b774b..f1219dd9dbac 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
378 378
379/* Filters (multicast, vlan, receive) */ 379/* Filters (multicast, vlan, receive) */
380void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
380uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 381uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
381void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 382void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
382void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 383void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
401void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 402void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
402/* Port I/O is only supported on 82544 and newer */ 403/* Port I/O is only supported on 82544 and newer */
403uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); 404uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
405uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
404void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 406void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
407void e1000_enable_pciex_master(struct e1000_hw *hw);
405int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 408int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
406int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 409int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
407void e1000_release_software_semaphore(struct e1000_hw *hw); 410void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -899,14 +902,14 @@ struct e1000_ffvt_entry {
899#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 902#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
900#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 903#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
901#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 904#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
902#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 905#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
903#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 906#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
904#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 907#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
905#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 908#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
906#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 909#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
907#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 910#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
908#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 911#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
909#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 912#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
910#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 913#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
911#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 914#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
912#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 915#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -1761,7 +1764,6 @@ struct e1000_hw {
1761#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1764#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1762#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1765#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1763 still to be processed. */ 1766 still to be processed. */
1764
1765/* Transmit Configuration Word */ 1767/* Transmit Configuration Word */
1766#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1768#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1767#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1769#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d0a5d1656c5f..31e332935e5a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -29,11 +29,71 @@
29#include "e1000.h" 29#include "e1000.h"
30 30
31/* Change Log 31/* Change Log
32 * 6.0.58 4/20/05 32 * 6.3.9 12/16/2005
33 * o Accepted ethtool cleanup patch from Stephen Hemminger 33 * o incorporate fix for recycled skbs from IBM LTC
34 * 6.0.44+ 2/15/05 34 * 6.3.7 11/18/2005
35 * o applied Anton's patch to resolve tx hang in hardware 35 * o Honor eeprom setting for enabling/disabling Wake On Lan
36 * o Applied Andrew Mortons patch - e1000 stops working after resume 36 * 6.3.5 11/17/2005
37 * o Fix memory leak in rx ring handling for PCI Express adapters
38 * 6.3.4 11/8/05
39 * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
40 * 6.3.2 9/20/05
41 * o Render logic that sets/resets DRV_LOAD as inline functions to
42 * avoid code replication. If f/w is AMT then set DRV_LOAD only when
43 * network interface is open.
44 * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
45 * o Adjust PBA partioning for Jumbo frames using MTU size and not
46 * rx_buffer_len
47 * 6.3.1 9/19/05
48 * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
49 (e1000_clean_tx_irq)
50 * o Support for 8086:10B5 device (Quad Port)
51 * 6.2.14 9/15/05
52 * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface
53 * open/close
54 * 6.2.13 9/14/05
55 * o Invoke e1000_check_mng_mode only for 8257x controllers since it
56 * accesses the FWSM that is not supported in other controllers
57 * 6.2.12 9/9/05
58 * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
59 * o set RCTL:SECRC only for controllers newer than 82543.
60 * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
61 * This code was moved from e1000_remove to e1000_close
62 * 6.2.10 9/6/05
63 * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
64 * o Enable fc by default on 82573 controllers (do not read eeprom)
65 * o Fix rx_errors statistic not to include missed_packet_count
66 * o Fix rx_dropped statistic not to include missed_packet_count
67 (Padraig Brady)
68 * 6.2.9 8/30/05
69 * o Remove call to update statistics from the controller ib e1000_get_stats
70 * 6.2.8 8/30/05
71 * o Improved algorithm for rx buffer allocation/rdt update
72 * o Flow control watermarks relative to rx PBA size
73 * o Simplified 'Tx Hung' detect logic
74 * 6.2.7 8/17/05
75 * o Report rx buffer allocation failures and tx timeout counts in stats
76 * 6.2.6 8/16/05
77 * o Implement workaround for controller erratum -- linear non-tso packet
78 * following a TSO gets written back prematurely
79 * 6.2.5 8/15/05
80 * o Set netdev->tx_queue_len based on link speed/duplex settings.
81 * o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
82 * o Do not power off PHY if SoL/IDER session is active
83 * 6.2.4 8/10/05
84 * o Fix loopback test setup/cleanup for 82571/3 controllers
85 * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
86 * all packets as raw
87 * o Prevent operations that will cause the PHY to be reset if SoL/IDER
88 * sessions are active and log a message
89 * 6.2.2 7/21/05
90 * o used fixed size descriptors for all MTU sizes, reduces memory load
91 * 6.1.2 4/13/05
92 * o Fixed ethtool diagnostics
93 * o Enabled flow control to take default eeprom settings
94 * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
95 * calls, one from mii_ioctl and other from within update_stats while
96 * processing MIIREG ioctl.
37 */ 97 */
38 98
39char e1000_driver_name[] = "e1000"; 99char e1000_driver_name[] = "e1000";
@@ -295,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
295static inline void 355static inline void
296e1000_irq_enable(struct e1000_adapter *adapter) 356e1000_irq_enable(struct e1000_adapter *adapter)
297{ 357{
298 if(likely(atomic_dec_and_test(&adapter->irq_sem))) { 358 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
299 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 359 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
300 E1000_WRITE_FLUSH(&adapter->hw); 360 E1000_WRITE_FLUSH(&adapter->hw);
301 } 361 }
@@ -307,17 +367,17 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
307 struct net_device *netdev = adapter->netdev; 367 struct net_device *netdev = adapter->netdev;
308 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 368 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
309 uint16_t old_vid = adapter->mng_vlan_id; 369 uint16_t old_vid = adapter->mng_vlan_id;
310 if(adapter->vlgrp) { 370 if (adapter->vlgrp) {
311 if(!adapter->vlgrp->vlan_devices[vid]) { 371 if (!adapter->vlgrp->vlan_devices[vid]) {
312 if(adapter->hw.mng_cookie.status & 372 if (adapter->hw.mng_cookie.status &
313 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 373 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
314 e1000_vlan_rx_add_vid(netdev, vid); 374 e1000_vlan_rx_add_vid(netdev, vid);
315 adapter->mng_vlan_id = vid; 375 adapter->mng_vlan_id = vid;
316 } else 376 } else
317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318 378
319 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 379 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
320 (vid != old_vid) && 380 (vid != old_vid) &&
321 !adapter->vlgrp->vlan_devices[old_vid]) 381 !adapter->vlgrp->vlan_devices[old_vid])
322 e1000_vlan_rx_kill_vid(netdev, old_vid); 382 e1000_vlan_rx_kill_vid(netdev, old_vid);
323 } 383 }
@@ -401,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
401 /* hardware has been reset, we need to reload some things */ 461 /* hardware has been reset, we need to reload some things */
402 462
403 /* Reset the PHY if it was previously powered down */ 463 /* Reset the PHY if it was previously powered down */
404 if(adapter->hw.media_type == e1000_media_type_copper) { 464 if (adapter->hw.media_type == e1000_media_type_copper) {
405 uint16_t mii_reg; 465 uint16_t mii_reg;
406 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 466 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
407 if(mii_reg & MII_CR_POWER_DOWN) 467 if (mii_reg & MII_CR_POWER_DOWN)
408 e1000_phy_reset(&adapter->hw); 468 e1000_phy_reset(&adapter->hw);
409 } 469 }
410 470
@@ -425,16 +485,16 @@ e1000_up(struct e1000_adapter *adapter)
425 } 485 }
426 486
427#ifdef CONFIG_PCI_MSI 487#ifdef CONFIG_PCI_MSI
428 if(adapter->hw.mac_type > e1000_82547_rev_2) { 488 if (adapter->hw.mac_type > e1000_82547_rev_2) {
429 adapter->have_msi = TRUE; 489 adapter->have_msi = TRUE;
430 if((err = pci_enable_msi(adapter->pdev))) { 490 if ((err = pci_enable_msi(adapter->pdev))) {
431 DPRINTK(PROBE, ERR, 491 DPRINTK(PROBE, ERR,
432 "Unable to allocate MSI interrupt Error: %d\n", err); 492 "Unable to allocate MSI interrupt Error: %d\n", err);
433 adapter->have_msi = FALSE; 493 adapter->have_msi = FALSE;
434 } 494 }
435 } 495 }
436#endif 496#endif
437 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 497 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
438 SA_SHIRQ | SA_SAMPLE_RANDOM, 498 SA_SHIRQ | SA_SAMPLE_RANDOM,
439 netdev->name, netdev))) { 499 netdev->name, netdev))) {
440 DPRINTK(PROBE, ERR, 500 DPRINTK(PROBE, ERR,
@@ -471,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
471#endif 531#endif
472 free_irq(adapter->pdev->irq, netdev); 532 free_irq(adapter->pdev->irq, netdev);
473#ifdef CONFIG_PCI_MSI 533#ifdef CONFIG_PCI_MSI
474 if(adapter->hw.mac_type > e1000_82547_rev_2 && 534 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
475 adapter->have_msi == TRUE) 535 adapter->have_msi == TRUE)
476 pci_disable_msi(adapter->pdev); 536 pci_disable_msi(adapter->pdev);
477#endif 537#endif
@@ -537,12 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
537 break; 597 break;
538 } 598 }
539 599
540 if((adapter->hw.mac_type != e1000_82573) && 600 if ((adapter->hw.mac_type != e1000_82573) &&
541 (adapter->netdev->mtu > E1000_RXBUFFER_8192)) 601 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
542 pba -= 8; /* allocate more FIFO for Tx */ 602 pba -= 8; /* allocate more FIFO for Tx */
543 603
544 604
545 if(adapter->hw.mac_type == e1000_82547) { 605 if (adapter->hw.mac_type == e1000_82547) {
546 adapter->tx_fifo_head = 0; 606 adapter->tx_fifo_head = 0;
547 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 607 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
548 adapter->tx_fifo_size = 608 adapter->tx_fifo_size =
@@ -565,9 +625,9 @@ e1000_reset(struct e1000_adapter *adapter)
565 625
566 /* Allow time for pending master requests to run */ 626 /* Allow time for pending master requests to run */
567 e1000_reset_hw(&adapter->hw); 627 e1000_reset_hw(&adapter->hw);
568 if(adapter->hw.mac_type >= e1000_82544) 628 if (adapter->hw.mac_type >= e1000_82544)
569 E1000_WRITE_REG(&adapter->hw, WUC, 0); 629 E1000_WRITE_REG(&adapter->hw, WUC, 0);
570 if(e1000_init_hw(&adapter->hw)) 630 if (e1000_init_hw(&adapter->hw))
571 DPRINTK(PROBE, ERR, "Hardware Error\n"); 631 DPRINTK(PROBE, ERR, "Hardware Error\n");
572 e1000_update_mng_vlan(adapter); 632 e1000_update_mng_vlan(adapter);
573 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 633 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -606,26 +666,26 @@ e1000_probe(struct pci_dev *pdev,
606 int i, err, pci_using_dac; 666 int i, err, pci_using_dac;
607 uint16_t eeprom_data; 667 uint16_t eeprom_data;
608 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 668 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
609 if((err = pci_enable_device(pdev))) 669 if ((err = pci_enable_device(pdev)))
610 return err; 670 return err;
611 671
612 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 672 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
613 pci_using_dac = 1; 673 pci_using_dac = 1;
614 } else { 674 } else {
615 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 675 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
616 E1000_ERR("No usable DMA configuration, aborting\n"); 676 E1000_ERR("No usable DMA configuration, aborting\n");
617 return err; 677 return err;
618 } 678 }
619 pci_using_dac = 0; 679 pci_using_dac = 0;
620 } 680 }
621 681
622 if((err = pci_request_regions(pdev, e1000_driver_name))) 682 if ((err = pci_request_regions(pdev, e1000_driver_name)))
623 return err; 683 return err;
624 684
625 pci_set_master(pdev); 685 pci_set_master(pdev);
626 686
627 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 687 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
628 if(!netdev) { 688 if (!netdev) {
629 err = -ENOMEM; 689 err = -ENOMEM;
630 goto err_alloc_etherdev; 690 goto err_alloc_etherdev;
631 } 691 }
@@ -644,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
644 mmio_len = pci_resource_len(pdev, BAR_0); 704 mmio_len = pci_resource_len(pdev, BAR_0);
645 705
646 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 706 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
647 if(!adapter->hw.hw_addr) { 707 if (!adapter->hw.hw_addr) {
648 err = -EIO; 708 err = -EIO;
649 goto err_ioremap; 709 goto err_ioremap;
650 } 710 }
651 711
652 for(i = BAR_1; i <= BAR_5; i++) { 712 for (i = BAR_1; i <= BAR_5; i++) {
653 if(pci_resource_len(pdev, i) == 0) 713 if (pci_resource_len(pdev, i) == 0)
654 continue; 714 continue;
655 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 715 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
656 adapter->hw.io_base = pci_resource_start(pdev, i); 716 adapter->hw.io_base = pci_resource_start(pdev, i);
657 break; 717 break;
658 } 718 }
@@ -689,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
689 749
690 /* setup the private structure */ 750 /* setup the private structure */
691 751
692 if((err = e1000_sw_init(adapter))) 752 if ((err = e1000_sw_init(adapter)))
693 goto err_sw_init; 753 goto err_sw_init;
694 754
695 if((err = e1000_check_phy_reset_block(&adapter->hw))) 755 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
696 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 756 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
697 757
698 if(adapter->hw.mac_type >= e1000_82543) { 758 if (adapter->hw.mac_type >= e1000_82543) {
699 netdev->features = NETIF_F_SG | 759 netdev->features = NETIF_F_SG |
700 NETIF_F_HW_CSUM | 760 NETIF_F_HW_CSUM |
701 NETIF_F_HW_VLAN_TX | 761 NETIF_F_HW_VLAN_TX |
@@ -704,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
704 } 764 }
705 765
706#ifdef NETIF_F_TSO 766#ifdef NETIF_F_TSO
707 if((adapter->hw.mac_type >= e1000_82544) && 767 if ((adapter->hw.mac_type >= e1000_82544) &&
708 (adapter->hw.mac_type != e1000_82547)) 768 (adapter->hw.mac_type != e1000_82547))
709 netdev->features |= NETIF_F_TSO; 769 netdev->features |= NETIF_F_TSO;
710 770
711#ifdef NETIF_F_TSO_IPV6 771#ifdef NETIF_F_TSO_IPV6
712 if(adapter->hw.mac_type > e1000_82547_rev_2) 772 if (adapter->hw.mac_type > e1000_82547_rev_2)
713 netdev->features |= NETIF_F_TSO_IPV6; 773 netdev->features |= NETIF_F_TSO_IPV6;
714#endif 774#endif
715#endif 775#endif
716 if(pci_using_dac) 776 if (pci_using_dac)
717 netdev->features |= NETIF_F_HIGHDMA; 777 netdev->features |= NETIF_F_HIGHDMA;
718 778
719 /* hard_start_xmit is safe against parallel locking */ 779 /* hard_start_xmit is safe against parallel locking */
@@ -721,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
721 781
722 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 782 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
723 783
724 /* before reading the EEPROM, reset the controller to 784 /* before reading the EEPROM, reset the controller to
725 * put the device in a known good starting state */ 785 * put the device in a known good starting state */
726 786
727 e1000_reset_hw(&adapter->hw); 787 e1000_reset_hw(&adapter->hw);
728 788
729 /* make sure the EEPROM is good */ 789 /* make sure the EEPROM is good */
730 790
731 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 791 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
732 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 792 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
733 err = -EIO; 793 err = -EIO;
734 goto err_eeprom; 794 goto err_eeprom;
@@ -736,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
736 796
737 /* copy the MAC address out of the EEPROM */ 797 /* copy the MAC address out of the EEPROM */
738 798
739 if(e1000_read_mac_addr(&adapter->hw)) 799 if (e1000_read_mac_addr(&adapter->hw))
740 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 800 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
741 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 801 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
742 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 802 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
743 803
744 if(!is_valid_ether_addr(netdev->perm_addr)) { 804 if (!is_valid_ether_addr(netdev->perm_addr)) {
745 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 805 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
746 err = -EIO; 806 err = -EIO;
747 goto err_eeprom; 807 goto err_eeprom;
@@ -781,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
781 * enable the ACPI Magic Packet filter 841 * enable the ACPI Magic Packet filter
782 */ 842 */
783 843
784 switch(adapter->hw.mac_type) { 844 switch (adapter->hw.mac_type) {
785 case e1000_82542_rev2_0: 845 case e1000_82542_rev2_0:
786 case e1000_82542_rev2_1: 846 case e1000_82542_rev2_1:
787 case e1000_82543: 847 case e1000_82543:
@@ -794,7 +854,7 @@ e1000_probe(struct pci_dev *pdev,
794 case e1000_82546: 854 case e1000_82546:
795 case e1000_82546_rev_3: 855 case e1000_82546_rev_3:
796 case e1000_82571: 856 case e1000_82571:
797 if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ 857 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
798 e1000_read_eeprom(&adapter->hw, 858 e1000_read_eeprom(&adapter->hw,
799 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 859 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
800 break; 860 break;
@@ -805,7 +865,7 @@ e1000_probe(struct pci_dev *pdev,
805 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 865 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
806 break; 866 break;
807 } 867 }
808 if(eeprom_data & eeprom_apme_mask) 868 if (eeprom_data & eeprom_apme_mask)
809 adapter->wol |= E1000_WUFC_MAG; 869 adapter->wol |= E1000_WUFC_MAG;
810 870
811 /* print bus type/speed/width info */ 871 /* print bus type/speed/width info */
@@ -840,7 +900,7 @@ e1000_probe(struct pci_dev *pdev,
840 e1000_get_hw_control(adapter); 900 e1000_get_hw_control(adapter);
841 901
842 strcpy(netdev->name, "eth%d"); 902 strcpy(netdev->name, "eth%d");
843 if((err = register_netdev(netdev))) 903 if ((err = register_netdev(netdev)))
844 goto err_register; 904 goto err_register;
845 905
846 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 906 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -881,10 +941,10 @@ e1000_remove(struct pci_dev *pdev)
881 941
882 flush_scheduled_work(); 942 flush_scheduled_work();
883 943
884 if(adapter->hw.mac_type >= e1000_82540 && 944 if (adapter->hw.mac_type >= e1000_82540 &&
885 adapter->hw.media_type == e1000_media_type_copper) { 945 adapter->hw.media_type == e1000_media_type_copper) {
886 manc = E1000_READ_REG(&adapter->hw, MANC); 946 manc = E1000_READ_REG(&adapter->hw, MANC);
887 if(manc & E1000_MANC_SMBUS_EN) { 947 if (manc & E1000_MANC_SMBUS_EN) {
888 manc |= E1000_MANC_ARP_EN; 948 manc |= E1000_MANC_ARP_EN;
889 E1000_WRITE_REG(&adapter->hw, MANC, manc); 949 E1000_WRITE_REG(&adapter->hw, MANC, manc);
890 } 950 }
@@ -900,7 +960,7 @@ e1000_remove(struct pci_dev *pdev)
900 __dev_put(&adapter->polling_netdev[i]); 960 __dev_put(&adapter->polling_netdev[i]);
901#endif 961#endif
902 962
903 if(!e1000_check_phy_reset_block(&adapter->hw)) 963 if (!e1000_check_phy_reset_block(&adapter->hw))
904 e1000_phy_hw_reset(&adapter->hw); 964 e1000_phy_hw_reset(&adapter->hw);
905 965
906 kfree(adapter->tx_ring); 966 kfree(adapter->tx_ring);
@@ -959,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
959 1019
960 /* identify the MAC */ 1020 /* identify the MAC */
961 1021
962 if(e1000_set_mac_type(hw)) { 1022 if (e1000_set_mac_type(hw)) {
963 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1023 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
964 return -EIO; 1024 return -EIO;
965 } 1025 }
966 1026
967 /* initialize eeprom parameters */ 1027 /* initialize eeprom parameters */
968 1028
969 if(e1000_init_eeprom_params(hw)) { 1029 if (e1000_init_eeprom_params(hw)) {
970 E1000_ERR("EEPROM initialization failed\n"); 1030 E1000_ERR("EEPROM initialization failed\n");
971 return -EIO; 1031 return -EIO;
972 } 1032 }
973 1033
974 switch(hw->mac_type) { 1034 switch (hw->mac_type) {
975 default: 1035 default:
976 break; 1036 break;
977 case e1000_82541: 1037 case e1000_82541:
@@ -990,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
990 1050
991 /* Copper options */ 1051 /* Copper options */
992 1052
993 if(hw->media_type == e1000_media_type_copper) { 1053 if (hw->media_type == e1000_media_type_copper) {
994 hw->mdix = AUTO_ALL_MODES; 1054 hw->mdix = AUTO_ALL_MODES;
995 hw->disable_polarity_correction = FALSE; 1055 hw->disable_polarity_correction = FALSE;
996 hw->master_slave = E1000_MASTER_SLAVE; 1056 hw->master_slave = E1000_MASTER_SLAVE;
@@ -1166,10 +1226,10 @@ e1000_open(struct net_device *netdev)
1166 if ((err = e1000_setup_all_rx_resources(adapter))) 1226 if ((err = e1000_setup_all_rx_resources(adapter)))
1167 goto err_setup_rx; 1227 goto err_setup_rx;
1168 1228
1169 if((err = e1000_up(adapter))) 1229 if ((err = e1000_up(adapter)))
1170 goto err_up; 1230 goto err_up;
1171 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1231 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1172 if((adapter->hw.mng_cookie.status & 1232 if ((adapter->hw.mng_cookie.status &
1173 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1233 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1174 e1000_update_mng_vlan(adapter); 1234 e1000_update_mng_vlan(adapter);
1175 } 1235 }
@@ -1214,7 +1274,7 @@ e1000_close(struct net_device *netdev)
1214 e1000_free_all_tx_resources(adapter); 1274 e1000_free_all_tx_resources(adapter);
1215 e1000_free_all_rx_resources(adapter); 1275 e1000_free_all_rx_resources(adapter);
1216 1276
1217 if((adapter->hw.mng_cookie.status & 1277 if ((adapter->hw.mng_cookie.status &
1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1278 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1219 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1279 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1220 } 1280 }
@@ -1269,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1269 size = sizeof(struct e1000_buffer) * txdr->count; 1329 size = sizeof(struct e1000_buffer) * txdr->count;
1270 1330
1271 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1331 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1272 if(!txdr->buffer_info) { 1332 if (!txdr->buffer_info) {
1273 DPRINTK(PROBE, ERR, 1333 DPRINTK(PROBE, ERR,
1274 "Unable to allocate memory for the transmit descriptor ring\n"); 1334 "Unable to allocate memory for the transmit descriptor ring\n");
1275 return -ENOMEM; 1335 return -ENOMEM;
@@ -1282,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1282 E1000_ROUNDUP(txdr->size, 4096); 1342 E1000_ROUNDUP(txdr->size, 4096);
1283 1343
1284 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1344 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1285 if(!txdr->desc) { 1345 if (!txdr->desc) {
1286setup_tx_desc_die: 1346setup_tx_desc_die:
1287 vfree(txdr->buffer_info); 1347 vfree(txdr->buffer_info);
1288 DPRINTK(PROBE, ERR, 1348 DPRINTK(PROBE, ERR,
@@ -1298,8 +1358,8 @@ setup_tx_desc_die:
1298 "at %p\n", txdr->size, txdr->desc); 1358 "at %p\n", txdr->size, txdr->desc);
1299 /* Try again, without freeing the previous */ 1359 /* Try again, without freeing the previous */
1300 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1360 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1301 if(!txdr->desc) {
1302 /* Failed allocation, critical failure */ 1361 /* Failed allocation, critical failure */
1362 if (!txdr->desc) {
1303 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1363 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1304 goto setup_tx_desc_die; 1364 goto setup_tx_desc_die;
1305 } 1365 }
@@ -1499,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1499 1559
1500 size = sizeof(struct e1000_ps_page) * rxdr->count; 1560 size = sizeof(struct e1000_ps_page) * rxdr->count;
1501 rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1561 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1502 if(!rxdr->ps_page) { 1562 if (!rxdr->ps_page) {
1503 vfree(rxdr->buffer_info); 1563 vfree(rxdr->buffer_info);
1504 DPRINTK(PROBE, ERR, 1564 DPRINTK(PROBE, ERR,
1505 "Unable to allocate memory for the receive descriptor ring\n"); 1565 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1509,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1509 1569
1510 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1570 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1511 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1571 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1512 if(!rxdr->ps_page_dma) { 1572 if (!rxdr->ps_page_dma) {
1513 vfree(rxdr->buffer_info); 1573 vfree(rxdr->buffer_info);
1514 kfree(rxdr->ps_page); 1574 kfree(rxdr->ps_page);
1515 DPRINTK(PROBE, ERR, 1575 DPRINTK(PROBE, ERR,
@@ -1518,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1518 } 1578 }
1519 memset(rxdr->ps_page_dma, 0, size); 1579 memset(rxdr->ps_page_dma, 0, size);
1520 1580
1521 if(adapter->hw.mac_type <= e1000_82547_rev_2) 1581 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1522 desc_len = sizeof(struct e1000_rx_desc); 1582 desc_len = sizeof(struct e1000_rx_desc);
1523 else 1583 else
1524 desc_len = sizeof(union e1000_rx_desc_packet_split); 1584 desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1621,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1621{ 1681{
1622 uint32_t rctl, rfctl; 1682 uint32_t rctl, rfctl;
1623 uint32_t psrctl = 0; 1683 uint32_t psrctl = 0;
1624#ifdef CONFIG_E1000_PACKET_SPLIT 1684#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1625 uint32_t pages = 0; 1685 uint32_t pages = 0;
1626#endif 1686#endif
1627 1687
@@ -1647,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1647 rctl |= E1000_RCTL_LPE; 1707 rctl |= E1000_RCTL_LPE;
1648 1708
1649 /* Setup buffer sizes */ 1709 /* Setup buffer sizes */
1650 if(adapter->hw.mac_type >= e1000_82571) { 1710 if (adapter->hw.mac_type >= e1000_82571) {
1651 /* We can now specify buffers in 1K increments. 1711 /* We can now specify buffers in 1K increments.
1652 * BSIZE and BSEX are ignored in this case. */ 1712 * BSIZE and BSEX are ignored in this case. */
1653 rctl |= adapter->rx_buffer_len << 0x11; 1713 rctl |= adapter->rx_buffer_len << 0x11;
1654 } else { 1714 } else {
1655 rctl &= ~E1000_RCTL_SZ_4096; 1715 rctl &= ~E1000_RCTL_SZ_4096;
1656 rctl |= E1000_RCTL_BSEX; 1716 rctl &= ~E1000_RCTL_BSEX;
1657 switch (adapter->rx_buffer_len) { 1717 rctl |= E1000_RCTL_SZ_2048;
1658 case E1000_RXBUFFER_2048:
1659 default:
1660 rctl |= E1000_RCTL_SZ_2048;
1661 rctl &= ~E1000_RCTL_BSEX;
1662 break;
1663 case E1000_RXBUFFER_4096:
1664 rctl |= E1000_RCTL_SZ_4096;
1665 break;
1666 case E1000_RXBUFFER_8192:
1667 rctl |= E1000_RCTL_SZ_8192;
1668 break;
1669 case E1000_RXBUFFER_16384:
1670 rctl |= E1000_RCTL_SZ_16384;
1671 break;
1672 }
1673 } 1718 }
1674 1719
1675#ifdef CONFIG_E1000_PACKET_SPLIT 1720#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1676 /* 82571 and greater support packet-split where the protocol 1721 /* 82571 and greater support packet-split where the protocol
1677 * header is placed in skb->data and the packet data is 1722 * header is placed in skb->data and the packet data is
1678 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1723 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1696,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1696 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1741 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1697 1742
1698 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1743 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1699 1744
1700 psrctl |= adapter->rx_ps_bsize0 >> 1745 psrctl |= adapter->rx_ps_bsize0 >>
1701 E1000_PSRCTL_BSIZE0_SHIFT; 1746 E1000_PSRCTL_BSIZE0_SHIFT;
1702 1747
@@ -1758,7 +1803,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1758 1803
1759 if (hw->mac_type >= e1000_82540) { 1804 if (hw->mac_type >= e1000_82540) {
1760 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 1805 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1761 if(adapter->itr > 1) 1806 if (adapter->itr > 1)
1762 E1000_WRITE_REG(hw, ITR, 1807 E1000_WRITE_REG(hw, ITR,
1763 1000000000 / (adapter->itr * 256)); 1808 1000000000 / (adapter->itr * 256));
1764 } 1809 }
@@ -1847,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1847 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1892 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1848 if (hw->mac_type >= e1000_82543) { 1893 if (hw->mac_type >= e1000_82543) {
1849 rxcsum = E1000_READ_REG(hw, RXCSUM); 1894 rxcsum = E1000_READ_REG(hw, RXCSUM);
1850 if(adapter->rx_csum == TRUE) { 1895 if (adapter->rx_csum == TRUE) {
1851 rxcsum |= E1000_RXCSUM_TUOFL; 1896 rxcsum |= E1000_RXCSUM_TUOFL;
1852 1897
1853 /* Enable 82571 IPv4 payload checksum for UDP fragments 1898 /* Enable 82571 IPv4 payload checksum for UDP fragments
1854 * Must be used in conjunction with packet-split. */ 1899 * Must be used in conjunction with packet-split. */
1855 if ((hw->mac_type >= e1000_82571) && 1900 if ((hw->mac_type >= e1000_82571) &&
1856 (adapter->rx_ps_pages)) { 1901 (adapter->rx_ps_pages)) {
1857 rxcsum |= E1000_RXCSUM_IPPCSE; 1902 rxcsum |= E1000_RXCSUM_IPPCSE;
1858 } 1903 }
1859 } else { 1904 } else {
@@ -1915,7 +1960,7 @@ static inline void
1915e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1960e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1916 struct e1000_buffer *buffer_info) 1961 struct e1000_buffer *buffer_info)
1917{ 1962{
1918 if(buffer_info->dma) { 1963 if (buffer_info->dma) {
1919 pci_unmap_page(adapter->pdev, 1964 pci_unmap_page(adapter->pdev,
1920 buffer_info->dma, 1965 buffer_info->dma,
1921 buffer_info->length, 1966 buffer_info->length,
@@ -1942,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1942 1987
1943 /* Free all the Tx ring sk_buffs */ 1988 /* Free all the Tx ring sk_buffs */
1944 1989
1945 for(i = 0; i < tx_ring->count; i++) { 1990 for (i = 0; i < tx_ring->count; i++) {
1946 buffer_info = &tx_ring->buffer_info[i]; 1991 buffer_info = &tx_ring->buffer_info[i];
1947 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1992 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1948 } 1993 }
@@ -2038,10 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
2038 unsigned int i, j; 2083 unsigned int i, j;
2039 2084
2040 /* Free all the Rx ring sk_buffs */ 2085 /* Free all the Rx ring sk_buffs */
2041 2086 for (i = 0; i < rx_ring->count; i++) {
2042 for(i = 0; i < rx_ring->count; i++) {
2043 buffer_info = &rx_ring->buffer_info[i]; 2087 buffer_info = &rx_ring->buffer_info[i];
2044 if(buffer_info->skb) { 2088 if (buffer_info->skb) {
2045 pci_unmap_single(pdev, 2089 pci_unmap_single(pdev,
2046 buffer_info->dma, 2090 buffer_info->dma,
2047 buffer_info->length, 2091 buffer_info->length,
@@ -2122,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
2122 E1000_WRITE_FLUSH(&adapter->hw); 2166 E1000_WRITE_FLUSH(&adapter->hw);
2123 mdelay(5); 2167 mdelay(5);
2124 2168
2125 if(netif_running(netdev)) 2169 if (netif_running(netdev))
2126 e1000_clean_all_rx_rings(adapter); 2170 e1000_clean_all_rx_rings(adapter);
2127} 2171}
2128 2172
@@ -2138,13 +2182,13 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2138 E1000_WRITE_FLUSH(&adapter->hw); 2182 E1000_WRITE_FLUSH(&adapter->hw);
2139 mdelay(5); 2183 mdelay(5);
2140 2184
2141 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) 2185 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2142 e1000_pci_set_mwi(&adapter->hw); 2186 e1000_pci_set_mwi(&adapter->hw);
2143 2187
2144 if(netif_running(netdev)) { 2188 if (netif_running(netdev)) {
2145 e1000_configure_rx(adapter);
2146 /* No need to loop, because 82542 supports only 1 queue */ 2189 /* No need to loop, because 82542 supports only 1 queue */
2147 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2190 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2191 e1000_configure_rx(adapter);
2148 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2192 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2149 } 2193 }
2150} 2194}
@@ -2163,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
2163 struct e1000_adapter *adapter = netdev_priv(netdev); 2207 struct e1000_adapter *adapter = netdev_priv(netdev);
2164 struct sockaddr *addr = p; 2208 struct sockaddr *addr = p;
2165 2209
2166 if(!is_valid_ether_addr(addr->sa_data)) 2210 if (!is_valid_ether_addr(addr->sa_data))
2167 return -EADDRNOTAVAIL; 2211 return -EADDRNOTAVAIL;
2168 2212
2169 /* 82542 2.0 needs to be in reset to write receive address registers */ 2213 /* 82542 2.0 needs to be in reset to write receive address registers */
2170 2214
2171 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2215 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2172 e1000_enter_82542_rst(adapter); 2216 e1000_enter_82542_rst(adapter);
2173 2217
2174 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2182,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
2182 /* activate the work around */ 2226 /* activate the work around */
2183 adapter->hw.laa_is_present = 1; 2227 adapter->hw.laa_is_present = 1;
2184 2228
2185 /* Hold a copy of the LAA in RAR[14] This is done so that 2229 /* Hold a copy of the LAA in RAR[14] This is done so that
2186 * between the time RAR[0] gets clobbered and the time it 2230 * between the time RAR[0] gets clobbered and the time it
2187 * gets fixed (in e1000_watchdog), the actual LAA is in one 2231 * gets fixed (in e1000_watchdog), the actual LAA is in one
2188 * of the RARs and no incoming packets directed to this port 2232 * of the RARs and no incoming packets directed to this port
2189 * are dropped. Eventaully the LAA will be in RAR[0] and 2233 * are dropped. Eventaully the LAA will be in RAR[0] and
2190 * RAR[14] */ 2234 * RAR[14] */
2191 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 2235 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2192 E1000_RAR_ENTRIES - 1); 2236 E1000_RAR_ENTRIES - 1);
2193 } 2237 }
2194 2238
2195 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2239 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2196 e1000_leave_82542_rst(adapter); 2240 e1000_leave_82542_rst(adapter);
2197 2241
2198 return 0; 2242 return 0;
@@ -2226,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
2226 2270
2227 rctl = E1000_READ_REG(hw, RCTL); 2271 rctl = E1000_READ_REG(hw, RCTL);
2228 2272
2229 if(netdev->flags & IFF_PROMISC) { 2273 if (netdev->flags & IFF_PROMISC) {
2230 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2274 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2231 } else if(netdev->flags & IFF_ALLMULTI) { 2275 } else if (netdev->flags & IFF_ALLMULTI) {
2232 rctl |= E1000_RCTL_MPE; 2276 rctl |= E1000_RCTL_MPE;
2233 rctl &= ~E1000_RCTL_UPE; 2277 rctl &= ~E1000_RCTL_UPE;
2234 } else { 2278 } else {
@@ -2239,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
2239 2283
2240 /* 82542 2.0 needs to be in reset to write receive address registers */ 2284 /* 82542 2.0 needs to be in reset to write receive address registers */
2241 2285
2242 if(hw->mac_type == e1000_82542_rev2_0) 2286 if (hw->mac_type == e1000_82542_rev2_0)
2243 e1000_enter_82542_rst(adapter); 2287 e1000_enter_82542_rst(adapter);
2244 2288
2245 /* load the first 14 multicast address into the exact filters 1-14 2289 /* load the first 14 multicast address into the exact filters 1-14
@@ -2249,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
2249 */ 2293 */
2250 mc_ptr = netdev->mc_list; 2294 mc_ptr = netdev->mc_list;
2251 2295
2252 for(i = 1; i < rar_entries; i++) { 2296 for (i = 1; i < rar_entries; i++) {
2253 if (mc_ptr) { 2297 if (mc_ptr) {
2254 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2298 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2255 mc_ptr = mc_ptr->next; 2299 mc_ptr = mc_ptr->next;
@@ -2261,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
2261 2305
2262 /* clear the old settings from the multicast hash table */ 2306 /* clear the old settings from the multicast hash table */
2263 2307
2264 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2308 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
2265 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2309 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2266 2310
2267 /* load any remaining addresses into the hash table */ 2311 /* load any remaining addresses into the hash table */
2268 2312
2269 for(; mc_ptr; mc_ptr = mc_ptr->next) { 2313 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2270 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); 2314 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2271 e1000_mta_set(hw, hash_value); 2315 e1000_mta_set(hw, hash_value);
2272 } 2316 }
2273 2317
2274 if(hw->mac_type == e1000_82542_rev2_0) 2318 if (hw->mac_type == e1000_82542_rev2_0)
2275 e1000_leave_82542_rst(adapter); 2319 e1000_leave_82542_rst(adapter);
2276} 2320}
2277 2321
@@ -2297,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2297 struct net_device *netdev = adapter->netdev; 2341 struct net_device *netdev = adapter->netdev;
2298 uint32_t tctl; 2342 uint32_t tctl;
2299 2343
2300 if(atomic_read(&adapter->tx_fifo_stall)) { 2344 if (atomic_read(&adapter->tx_fifo_stall)) {
2301 if((E1000_READ_REG(&adapter->hw, TDT) == 2345 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2302 E1000_READ_REG(&adapter->hw, TDH)) && 2346 E1000_READ_REG(&adapter->hw, TDH)) &&
2303 (E1000_READ_REG(&adapter->hw, TDFT) == 2347 (E1000_READ_REG(&adapter->hw, TDFT) ==
2304 E1000_READ_REG(&adapter->hw, TDFH)) && 2348 E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2350,18 +2394,18 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2350 e1000_check_for_link(&adapter->hw); 2394 e1000_check_for_link(&adapter->hw);
2351 if (adapter->hw.mac_type == e1000_82573) { 2395 if (adapter->hw.mac_type == e1000_82573) {
2352 e1000_enable_tx_pkt_filtering(&adapter->hw); 2396 e1000_enable_tx_pkt_filtering(&adapter->hw);
2353 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2397 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2354 e1000_update_mng_vlan(adapter); 2398 e1000_update_mng_vlan(adapter);
2355 } 2399 }
2356 2400
2357 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 2401 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2358 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 2402 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2359 link = !adapter->hw.serdes_link_down; 2403 link = !adapter->hw.serdes_link_down;
2360 else 2404 else
2361 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; 2405 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2362 2406
2363 if(link) { 2407 if (link) {
2364 if(!netif_carrier_ok(netdev)) { 2408 if (!netif_carrier_ok(netdev)) {
2365 e1000_get_speed_and_duplex(&adapter->hw, 2409 e1000_get_speed_and_duplex(&adapter->hw,
2366 &adapter->link_speed, 2410 &adapter->link_speed,
2367 &adapter->link_duplex); 2411 &adapter->link_duplex);
@@ -2392,7 +2436,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2392 adapter->smartspeed = 0; 2436 adapter->smartspeed = 0;
2393 } 2437 }
2394 } else { 2438 } else {
2395 if(netif_carrier_ok(netdev)) { 2439 if (netif_carrier_ok(netdev)) {
2396 adapter->link_speed = 0; 2440 adapter->link_speed = 0;
2397 adapter->link_duplex = 0; 2441 adapter->link_duplex = 0;
2398 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2442 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2432,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2432 } 2476 }
2433 2477
2434 /* Dynamic mode for Interrupt Throttle Rate (ITR) */ 2478 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2435 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { 2479 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2436 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total 2480 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2437 * asymmetrical Tx or Rx gets ITR=8000; everyone 2481 * asymmetrical Tx or Rx gets ITR=8000; everyone
2438 * else is between 2000-8000. */ 2482 * else is between 2000-8000. */
2439 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; 2483 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2440 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 2484 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2441 adapter->gotcl - adapter->gorcl : 2485 adapter->gotcl - adapter->gorcl :
2442 adapter->gorcl - adapter->gotcl) / 10000; 2486 adapter->gorcl - adapter->gotcl) / 10000;
2443 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2487 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2450,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2450 /* Force detection of hung controller every watchdog period */ 2494 /* Force detection of hung controller every watchdog period */
2451 adapter->detect_tx_hung = TRUE; 2495 adapter->detect_tx_hung = TRUE;
2452 2496
2453 /* With 82571 controllers, LAA may be overwritten due to controller 2497 /* With 82571 controllers, LAA may be overwritten due to controller
2454 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2498 * reset from the other port. Set the appropriate LAA in RAR[0] */
2455 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) 2499 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2456 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2500 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2479,7 +2523,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2479 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2523 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2480 int err; 2524 int err;
2481 2525
2482 if(skb_shinfo(skb)->tso_size) { 2526 if (skb_shinfo(skb)->tso_size) {
2483 if (skb_header_cloned(skb)) { 2527 if (skb_header_cloned(skb)) {
2484 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2528 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2485 if (err) 2529 if (err)
@@ -2488,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2488 2532
2489 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2533 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2490 mss = skb_shinfo(skb)->tso_size; 2534 mss = skb_shinfo(skb)->tso_size;
2491 if(skb->protocol == ntohs(ETH_P_IP)) { 2535 if (skb->protocol == ntohs(ETH_P_IP)) {
2492 skb->nh.iph->tot_len = 0; 2536 skb->nh.iph->tot_len = 0;
2493 skb->nh.iph->check = 0; 2537 skb->nh.iph->check = 0;
2494 skb->h.th->check = 2538 skb->h.th->check =
@@ -2500,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2500 cmd_length = E1000_TXD_CMD_IP; 2544 cmd_length = E1000_TXD_CMD_IP;
2501 ipcse = skb->h.raw - skb->data - 1; 2545 ipcse = skb->h.raw - skb->data - 1;
2502#ifdef NETIF_F_TSO_IPV6 2546#ifdef NETIF_F_TSO_IPV6
2503 } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 2547 } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
2504 skb->nh.ipv6h->payload_len = 0; 2548 skb->nh.ipv6h->payload_len = 0;
2505 skb->h.th->check = 2549 skb->h.th->check =
2506 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2550 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2555,7 +2599,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2555 unsigned int i; 2599 unsigned int i;
2556 uint8_t css; 2600 uint8_t css;
2557 2601
2558 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2602 if (likely(skb->ip_summed == CHECKSUM_HW)) {
2559 css = skb->h.raw - skb->data; 2603 css = skb->h.raw - skb->data;
2560 2604
2561 i = tx_ring->next_to_use; 2605 i = tx_ring->next_to_use;
@@ -2595,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2595 2639
2596 i = tx_ring->next_to_use; 2640 i = tx_ring->next_to_use;
2597 2641
2598 while(len) { 2642 while (len) {
2599 buffer_info = &tx_ring->buffer_info[i]; 2643 buffer_info = &tx_ring->buffer_info[i];
2600 size = min(len, max_per_txd); 2644 size = min(len, max_per_txd);
2601#ifdef NETIF_F_TSO 2645#ifdef NETIF_F_TSO
@@ -2611,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2611 2655
2612 /* Workaround for premature desc write-backs 2656 /* Workaround for premature desc write-backs
2613 * in TSO mode. Append 4-byte sentinel desc */ 2657 * in TSO mode. Append 4-byte sentinel desc */
2614 if(unlikely(mss && !nr_frags && size == len && size > 8)) 2658 if (unlikely(mss && !nr_frags && size == len && size > 8))
2615 size -= 4; 2659 size -= 4;
2616#endif 2660#endif
2617 /* work-around for errata 10 and it applies 2661 /* work-around for errata 10 and it applies
@@ -2619,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2619 * The fix is to make sure that the first descriptor of a 2663 * The fix is to make sure that the first descriptor of a
2620 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2664 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2621 */ 2665 */
2622 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2666 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2623 (size > 2015) && count == 0)) 2667 (size > 2015) && count == 0))
2624 size = 2015; 2668 size = 2015;
2625 2669
2626 /* Workaround for potential 82544 hang in PCI-X. Avoid 2670 /* Workaround for potential 82544 hang in PCI-X. Avoid
2627 * terminating buffers within evenly-aligned dwords. */ 2671 * terminating buffers within evenly-aligned dwords. */
2628 if(unlikely(adapter->pcix_82544 && 2672 if (unlikely(adapter->pcix_82544 &&
2629 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2673 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2630 size > 4)) 2674 size > 4))
2631 size -= 4; 2675 size -= 4;
@@ -2641,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2641 len -= size; 2685 len -= size;
2642 offset += size; 2686 offset += size;
2643 count++; 2687 count++;
2644 if(unlikely(++i == tx_ring->count)) i = 0; 2688 if (unlikely(++i == tx_ring->count)) i = 0;
2645 } 2689 }
2646 2690
2647 for(f = 0; f < nr_frags; f++) { 2691 for (f = 0; f < nr_frags; f++) {
2648 struct skb_frag_struct *frag; 2692 struct skb_frag_struct *frag;
2649 2693
2650 frag = &skb_shinfo(skb)->frags[f]; 2694 frag = &skb_shinfo(skb)->frags[f];
2651 len = frag->size; 2695 len = frag->size;
2652 offset = frag->page_offset; 2696 offset = frag->page_offset;
2653 2697
2654 while(len) { 2698 while (len) {
2655 buffer_info = &tx_ring->buffer_info[i]; 2699 buffer_info = &tx_ring->buffer_info[i];
2656 size = min(len, max_per_txd); 2700 size = min(len, max_per_txd);
2657#ifdef NETIF_F_TSO 2701#ifdef NETIF_F_TSO
2658 /* Workaround for premature desc write-backs 2702 /* Workaround for premature desc write-backs
2659 * in TSO mode. Append 4-byte sentinel desc */ 2703 * in TSO mode. Append 4-byte sentinel desc */
2660 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2704 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2661 size -= 4; 2705 size -= 4;
2662#endif 2706#endif
2663 /* Workaround for potential 82544 hang in PCI-X. 2707 /* Workaround for potential 82544 hang in PCI-X.
2664 * Avoid terminating buffers within evenly-aligned 2708 * Avoid terminating buffers within evenly-aligned
2665 * dwords. */ 2709 * dwords. */
2666 if(unlikely(adapter->pcix_82544 && 2710 if (unlikely(adapter->pcix_82544 &&
2667 !((unsigned long)(frag->page+offset+size-1) & 4) && 2711 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2668 size > 4)) 2712 size > 4))
2669 size -= 4; 2713 size -= 4;
@@ -2680,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2680 len -= size; 2724 len -= size;
2681 offset += size; 2725 offset += size;
2682 count++; 2726 count++;
2683 if(unlikely(++i == tx_ring->count)) i = 0; 2727 if (unlikely(++i == tx_ring->count)) i = 0;
2684 } 2728 }
2685 } 2729 }
2686 2730
@@ -2700,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2700 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2744 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2701 unsigned int i; 2745 unsigned int i;
2702 2746
2703 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2747 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2704 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2748 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2705 E1000_TXD_CMD_TSE; 2749 E1000_TXD_CMD_TSE;
2706 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2750 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2707 2751
2708 if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2752 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2709 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2753 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2710 } 2754 }
2711 2755
2712 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2756 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2713 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2757 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2714 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2758 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2715 } 2759 }
2716 2760
2717 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2761 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2718 txd_lower |= E1000_TXD_CMD_VLE; 2762 txd_lower |= E1000_TXD_CMD_VLE;
2719 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2763 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2720 } 2764 }
2721 2765
2722 i = tx_ring->next_to_use; 2766 i = tx_ring->next_to_use;
2723 2767
2724 while(count--) { 2768 while (count--) {
2725 buffer_info = &tx_ring->buffer_info[i]; 2769 buffer_info = &tx_ring->buffer_info[i];
2726 tx_desc = E1000_TX_DESC(*tx_ring, i); 2770 tx_desc = E1000_TX_DESC(*tx_ring, i);
2727 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2771 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2728 tx_desc->lower.data = 2772 tx_desc->lower.data =
2729 cpu_to_le32(txd_lower | buffer_info->length); 2773 cpu_to_le32(txd_lower | buffer_info->length);
2730 tx_desc->upper.data = cpu_to_le32(txd_upper); 2774 tx_desc->upper.data = cpu_to_le32(txd_upper);
2731 if(unlikely(++i == tx_ring->count)) i = 0; 2775 if (unlikely(++i == tx_ring->count)) i = 0;
2732 } 2776 }
2733 2777
2734 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2778 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2763,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2763 2807
2764 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); 2808 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2765 2809
2766 if(adapter->link_duplex != HALF_DUPLEX) 2810 if (adapter->link_duplex != HALF_DUPLEX)
2767 goto no_fifo_stall_required; 2811 goto no_fifo_stall_required;
2768 2812
2769 if(atomic_read(&adapter->tx_fifo_stall)) 2813 if (atomic_read(&adapter->tx_fifo_stall))
2770 return 1; 2814 return 1;
2771 2815
2772 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2816 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2773 atomic_set(&adapter->tx_fifo_stall, 1); 2817 atomic_set(&adapter->tx_fifo_stall, 1);
2774 return 1; 2818 return 1;
2775 } 2819 }
2776 2820
2777no_fifo_stall_required: 2821no_fifo_stall_required:
2778 adapter->tx_fifo_head += skb_fifo_len; 2822 adapter->tx_fifo_head += skb_fifo_len;
2779 if(adapter->tx_fifo_head >= adapter->tx_fifo_size) 2823 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
2780 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2824 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2781 return 0; 2825 return 0;
2782} 2826}
@@ -2787,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2787{ 2831{
2788 struct e1000_hw *hw = &adapter->hw; 2832 struct e1000_hw *hw = &adapter->hw;
2789 uint16_t length, offset; 2833 uint16_t length, offset;
2790 if(vlan_tx_tag_present(skb)) { 2834 if (vlan_tx_tag_present(skb)) {
2791 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 2835 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2792 ( adapter->hw.mng_cookie.status & 2836 ( adapter->hw.mng_cookie.status &
2793 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2837 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2794 return 0; 2838 return 0;
2795 } 2839 }
2796 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 2840 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2797 struct ethhdr *eth = (struct ethhdr *) skb->data; 2841 struct ethhdr *eth = (struct ethhdr *) skb->data;
2798 if((htons(ETH_P_IP) == eth->h_proto)) { 2842 if ((htons(ETH_P_IP) == eth->h_proto)) {
2799 const struct iphdr *ip = 2843 const struct iphdr *ip =
2800 (struct iphdr *)((uint8_t *)skb->data+14); 2844 (struct iphdr *)((uint8_t *)skb->data+14);
2801 if(IPPROTO_UDP == ip->protocol) { 2845 if (IPPROTO_UDP == ip->protocol) {
2802 struct udphdr *udp = 2846 struct udphdr *udp =
2803 (struct udphdr *)((uint8_t *)ip + 2847 (struct udphdr *)((uint8_t *)ip +
2804 (ip->ihl << 2)); 2848 (ip->ihl << 2));
2805 if(ntohs(udp->dest) == 67) { 2849 if (ntohs(udp->dest) == 67) {
2806 offset = (uint8_t *)udp + 8 - skb->data; 2850 offset = (uint8_t *)udp + 8 - skb->data;
2807 length = skb->len - offset; 2851 length = skb->len - offset;
2808 2852
2809 return e1000_mng_write_dhcp_info(hw, 2853 return e1000_mng_write_dhcp_info(hw,
2810 (uint8_t *)udp + 8, 2854 (uint8_t *)udp + 8,
2811 length); 2855 length);
2812 } 2856 }
2813 } 2857 }
@@ -2830,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2830 unsigned int nr_frags = 0; 2874 unsigned int nr_frags = 0;
2831 unsigned int mss = 0; 2875 unsigned int mss = 0;
2832 int count = 0; 2876 int count = 0;
2833 int tso; 2877 int tso;
2834 unsigned int f; 2878 unsigned int f;
2835 len -= skb->data_len; 2879 len -= skb->data_len;
2836 2880
@@ -2853,7 +2897,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2853 * 4 = ceil(buffer len/mss). To make sure we don't 2897 * 4 = ceil(buffer len/mss). To make sure we don't
2854 * overrun the FIFO, adjust the max buffer len if mss 2898 * overrun the FIFO, adjust the max buffer len if mss
2855 * drops. */ 2899 * drops. */
2856 if(mss) { 2900 if (mss) {
2857 uint8_t hdr_len; 2901 uint8_t hdr_len;
2858 max_per_txd = min(mss << 2, max_per_txd); 2902 max_per_txd = min(mss << 2, max_per_txd);
2859 max_txd_pwr = fls(max_per_txd) - 1; 2903 max_txd_pwr = fls(max_per_txd) - 1;
@@ -2876,12 +2920,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2876 } 2920 }
2877 } 2921 }
2878 2922
2879 if((mss) || (skb->ip_summed == CHECKSUM_HW))
2880 /* reserve a descriptor for the offload context */ 2923 /* reserve a descriptor for the offload context */
2924 if ((mss) || (skb->ip_summed == CHECKSUM_HW))
2881 count++; 2925 count++;
2882 count++; 2926 count++;
2883#else 2927#else
2884 if(skb->ip_summed == CHECKSUM_HW) 2928 if (skb->ip_summed == CHECKSUM_HW)
2885 count++; 2929 count++;
2886#endif 2930#endif
2887 2931
@@ -2894,24 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2894 2938
2895 count += TXD_USE_COUNT(len, max_txd_pwr); 2939 count += TXD_USE_COUNT(len, max_txd_pwr);
2896 2940
2897 if(adapter->pcix_82544) 2941 if (adapter->pcix_82544)
2898 count++; 2942 count++;
2899 2943
2900 /* work-around for errata 10 and it applies to all controllers 2944 /* work-around for errata 10 and it applies to all controllers
2901 * in PCI-X mode, so add one more descriptor to the count 2945 * in PCI-X mode, so add one more descriptor to the count
2902 */ 2946 */
2903 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2947 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2904 (len > 2015))) 2948 (len > 2015)))
2905 count++; 2949 count++;
2906 2950
2907 nr_frags = skb_shinfo(skb)->nr_frags; 2951 nr_frags = skb_shinfo(skb)->nr_frags;
2908 for(f = 0; f < nr_frags; f++) 2952 for (f = 0; f < nr_frags; f++)
2909 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 2953 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2910 max_txd_pwr); 2954 max_txd_pwr);
2911 if(adapter->pcix_82544) 2955 if (adapter->pcix_82544)
2912 count += nr_frags; 2956 count += nr_frags;
2913 2957
2914 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2958 if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2915 e1000_transfer_dhcp_info(adapter, skb); 2959 e1000_transfer_dhcp_info(adapter, skb);
2916 2960
2917 local_irq_save(flags); 2961 local_irq_save(flags);
@@ -2929,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2929 return NETDEV_TX_BUSY; 2973 return NETDEV_TX_BUSY;
2930 } 2974 }
2931 2975
2932 if(unlikely(adapter->hw.mac_type == e1000_82547)) { 2976 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
2933 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2977 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2934 netif_stop_queue(netdev); 2978 netif_stop_queue(netdev);
2935 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2979 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2936 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2980 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2938,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2938 } 2982 }
2939 } 2983 }
2940 2984
2941 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 2985 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2942 tx_flags |= E1000_TX_FLAGS_VLAN; 2986 tx_flags |= E1000_TX_FLAGS_VLAN;
2943 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2987 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2944 } 2988 }
2945 2989
2946 first = tx_ring->next_to_use; 2990 first = tx_ring->next_to_use;
2947 2991
2948 tso = e1000_tso(adapter, tx_ring, skb); 2992 tso = e1000_tso(adapter, tx_ring, skb);
2949 if (tso < 0) { 2993 if (tso < 0) {
2950 dev_kfree_skb_any(skb); 2994 dev_kfree_skb_any(skb);
@@ -3033,9 +3077,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3033 struct e1000_adapter *adapter = netdev_priv(netdev); 3077 struct e1000_adapter *adapter = netdev_priv(netdev);
3034 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3078 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3035 3079
3036 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3080 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3037 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3081 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3038 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3082 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3039 return -EINVAL; 3083 return -EINVAL;
3040 } 3084 }
3041 3085
@@ -3083,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3083 3127
3084 netdev->mtu = new_mtu; 3128 netdev->mtu = new_mtu;
3085 3129
3086 if(netif_running(netdev)) { 3130 if (netif_running(netdev)) {
3087 e1000_down(adapter); 3131 e1000_down(adapter);
3088 e1000_up(adapter); 3132 e1000_up(adapter);
3089 } 3133 }
@@ -3170,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3170 hw->collision_delta = E1000_READ_REG(hw, COLC); 3214 hw->collision_delta = E1000_READ_REG(hw, COLC);
3171 adapter->stats.colc += hw->collision_delta; 3215 adapter->stats.colc += hw->collision_delta;
3172 3216
3173 if(hw->mac_type >= e1000_82543) { 3217 if (hw->mac_type >= e1000_82543) {
3174 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); 3218 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3175 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); 3219 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3176 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); 3220 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3178,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3178 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 3222 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3179 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 3223 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3180 } 3224 }
3181 if(hw->mac_type > e1000_82547_rev_2) { 3225 if (hw->mac_type > e1000_82547_rev_2) {
3182 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3226 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3183 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3227 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3184 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3228 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3222,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
3222 3266
3223 /* Phy Stats */ 3267 /* Phy Stats */
3224 3268
3225 if(hw->media_type == e1000_media_type_copper) { 3269 if (hw->media_type == e1000_media_type_copper) {
3226 if((adapter->link_speed == SPEED_1000) && 3270 if ((adapter->link_speed == SPEED_1000) &&
3227 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3271 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3228 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3272 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3229 adapter->phy_stats.idle_errors += phy_tmp; 3273 adapter->phy_stats.idle_errors += phy_tmp;
3230 } 3274 }
3231 3275
3232 if((hw->mac_type <= e1000_82546) && 3276 if ((hw->mac_type <= e1000_82546) &&
3233 (hw->phy_type == e1000_phy_m88) && 3277 (hw->phy_type == e1000_phy_m88) &&
3234 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3278 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3235 adapter->phy_stats.receive_errors += phy_tmp; 3279 adapter->phy_stats.receive_errors += phy_tmp;
@@ -3294,7 +3338,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3294 return IRQ_NONE; /* Not our interrupt */ 3338 return IRQ_NONE; /* Not our interrupt */
3295 } 3339 }
3296 3340
3297 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3341 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3298 hw->get_link_status = 1; 3342 hw->get_link_status = 1;
3299 mod_timer(&adapter->watchdog_timer, jiffies); 3343 mod_timer(&adapter->watchdog_timer, jiffies);
3300 } 3344 }
@@ -3326,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3326 3370
3327#else /* if !CONFIG_E1000_NAPI */ 3371#else /* if !CONFIG_E1000_NAPI */
3328 /* Writing IMC and IMS is needed for 82547. 3372 /* Writing IMC and IMS is needed for 82547.
3329 Due to Hub Link bus being occupied, an interrupt 3373 * Due to Hub Link bus being occupied, an interrupt
3330 de-assertion message is not able to be sent. 3374 * de-assertion message is not able to be sent.
3331 When an interrupt assertion message is generated later, 3375 * When an interrupt assertion message is generated later,
3332 two messages are re-ordered and sent out. 3376 * two messages are re-ordered and sent out.
3333 That causes APIC to think 82547 is in de-assertion 3377 * That causes APIC to think 82547 is in de-assertion
3334 state, while 82547 is in assertion state, resulting 3378 * state, while 82547 is in assertion state, resulting
3335 in dead lock. Writing IMC forces 82547 into 3379 * in dead lock. Writing IMC forces 82547 into
3336 de-assertion state. 3380 * de-assertion state.
3337 */ 3381 */
3338 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 3382 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3339 atomic_inc(&adapter->irq_sem); 3383 atomic_inc(&adapter->irq_sem);
3340 E1000_WRITE_REG(hw, IMC, ~0); 3384 E1000_WRITE_REG(hw, IMC, ~0);
3341 } 3385 }
3342 3386
3343 for(i = 0; i < E1000_MAX_INTR; i++) 3387 for (i = 0; i < E1000_MAX_INTR; i++)
3344 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3388 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3345 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3389 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3346 break; 3390 break;
3347 3391
3348 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3392 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3349 e1000_irq_enable(adapter); 3393 e1000_irq_enable(adapter);
3350 3394
3351#endif /* CONFIG_E1000_NAPI */ 3395#endif /* CONFIG_E1000_NAPI */
@@ -3397,9 +3441,9 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3397 3441
3398 *budget -= work_done; 3442 *budget -= work_done;
3399 poll_dev->quota -= work_done; 3443 poll_dev->quota -= work_done;
3400 3444
3401 /* If no Tx and not enough Rx work done, exit the polling mode */ 3445 /* If no Tx and not enough Rx work done, exit the polling mode */
3402 if((!tx_cleaned && (work_done == 0)) || 3446 if ((!tx_cleaned && (work_done == 0)) ||
3403 !netif_running(adapter->netdev)) { 3447 !netif_running(adapter->netdev)) {
3404quit_polling: 3448quit_polling:
3405 netif_rx_complete(poll_dev); 3449 netif_rx_complete(poll_dev);
@@ -3431,7 +3475,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3431 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3475 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3432 3476
3433 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3477 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3434 for(cleaned = FALSE; !cleaned; ) { 3478 for (cleaned = FALSE; !cleaned; ) {
3435 tx_desc = E1000_TX_DESC(*tx_ring, i); 3479 tx_desc = E1000_TX_DESC(*tx_ring, i);
3436 buffer_info = &tx_ring->buffer_info[i]; 3480 buffer_info = &tx_ring->buffer_info[i];
3437 cleaned = (i == eop); 3481 cleaned = (i == eop);
@@ -3442,7 +3486,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3442 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3486 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3443 memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); 3487 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3444 3488
3445 if(unlikely(++i == tx_ring->count)) i = 0; 3489 if (unlikely(++i == tx_ring->count)) i = 0;
3446 } 3490 }
3447 3491
3448#ifdef CONFIG_E1000_MQ 3492#ifdef CONFIG_E1000_MQ
@@ -3457,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3457 3501
3458 spin_lock(&tx_ring->tx_lock); 3502 spin_lock(&tx_ring->tx_lock);
3459 3503
3460 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3504 if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3461 netif_carrier_ok(netdev))) 3505 netif_carrier_ok(netdev)))
3462 netif_wake_queue(netdev); 3506 netif_wake_queue(netdev);
3463 3507
@@ -3519,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3519 skb->ip_summed = CHECKSUM_NONE; 3563 skb->ip_summed = CHECKSUM_NONE;
3520 3564
3521 /* 82543 or newer only */ 3565 /* 82543 or newer only */
3522 if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 3566 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
3523 /* Ignore Checksum bit is set */ 3567 /* Ignore Checksum bit is set */
3524 if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 3568 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3525 /* TCP/UDP checksum error bit is set */ 3569 /* TCP/UDP checksum error bit is set */
3526 if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 3570 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3527 /* let the stack verify checksum errors */ 3571 /* let the stack verify checksum errors */
3528 adapter->hw_csum_err++; 3572 adapter->hw_csum_err++;
3529 return; 3573 return;
3530 } 3574 }
3531 /* TCP/UDP Checksum has not been calculated */ 3575 /* TCP/UDP Checksum has not been calculated */
3532 if(adapter->hw.mac_type <= e1000_82547_rev_2) { 3576 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
3533 if(!(status & E1000_RXD_STAT_TCPCS)) 3577 if (!(status & E1000_RXD_STAT_TCPCS))
3534 return; 3578 return;
3535 } else { 3579 } else {
3536 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 3580 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3537 return; 3581 return;
3538 } 3582 }
3539 /* It must be a TCP or UDP packet with a valid checksum */ 3583 /* It must be a TCP or UDP packet with a valid checksum */
@@ -3569,9 +3613,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3569{ 3613{
3570 struct net_device *netdev = adapter->netdev; 3614 struct net_device *netdev = adapter->netdev;
3571 struct pci_dev *pdev = adapter->pdev; 3615 struct pci_dev *pdev = adapter->pdev;
3572 struct e1000_rx_desc *rx_desc; 3616 struct e1000_rx_desc *rx_desc, *next_rxd;
3573 struct e1000_buffer *buffer_info; 3617 struct e1000_buffer *buffer_info, *next_buffer;
3574 struct sk_buff *skb;
3575 unsigned long flags; 3618 unsigned long flags;
3576 uint32_t length; 3619 uint32_t length;
3577 uint8_t last_byte; 3620 uint8_t last_byte;
@@ -3581,16 +3624,25 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3581 3624
3582 i = rx_ring->next_to_clean; 3625 i = rx_ring->next_to_clean;
3583 rx_desc = E1000_RX_DESC(*rx_ring, i); 3626 rx_desc = E1000_RX_DESC(*rx_ring, i);
3627 buffer_info = &rx_ring->buffer_info[i];
3584 3628
3585 while(rx_desc->status & E1000_RXD_STAT_DD) { 3629 while (rx_desc->status & E1000_RXD_STAT_DD) {
3586 buffer_info = &rx_ring->buffer_info[i]; 3630 struct sk_buff *skb, *next_skb;
3587 u8 status; 3631 u8 status;
3588#ifdef CONFIG_E1000_NAPI 3632#ifdef CONFIG_E1000_NAPI
3589 if(*work_done >= work_to_do) 3633 if (*work_done >= work_to_do)
3590 break; 3634 break;
3591 (*work_done)++; 3635 (*work_done)++;
3592#endif 3636#endif
3593 status = rx_desc->status; 3637 status = rx_desc->status;
3638 skb = buffer_info->skb;
3639 buffer_info->skb = NULL;
3640
3641 if (++i == rx_ring->count) i = 0;
3642 next_rxd = E1000_RX_DESC(*rx_ring, i);
3643 next_buffer = &rx_ring->buffer_info[i];
3644 next_skb = next_buffer->skb;
3645
3594 cleaned = TRUE; 3646 cleaned = TRUE;
3595 cleaned_count++; 3647 cleaned_count++;
3596 pci_unmap_single(pdev, 3648 pci_unmap_single(pdev,
@@ -3598,20 +3650,50 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3598 buffer_info->length, 3650 buffer_info->length,
3599 PCI_DMA_FROMDEVICE); 3651 PCI_DMA_FROMDEVICE);
3600 3652
3601 skb = buffer_info->skb;
3602 length = le16_to_cpu(rx_desc->length); 3653 length = le16_to_cpu(rx_desc->length);
3603 3654
3604 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 3655 skb_put(skb, length);
3605 /* All receives must fit into a single buffer */ 3656
3606 E1000_DBG("%s: Receive packet consumed multiple" 3657 if (!(status & E1000_RXD_STAT_EOP)) {
3607 " buffers\n", netdev->name); 3658 if (!rx_ring->rx_skb_top) {
3608 dev_kfree_skb_irq(skb); 3659 rx_ring->rx_skb_top = skb;
3660 rx_ring->rx_skb_top->len = length;
3661 rx_ring->rx_skb_prev = skb;
3662 } else {
3663 if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664 rx_ring->rx_skb_prev->next = skb;
3665 skb->prev = rx_ring->rx_skb_prev;
3666 } else {
3667 skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668 }
3669 rx_ring->rx_skb_prev = skb;
3670 rx_ring->rx_skb_top->data_len += length;
3671 }
3609 goto next_desc; 3672 goto next_desc;
3673 } else {
3674 if (rx_ring->rx_skb_top) {
3675 if (skb_shinfo(rx_ring->rx_skb_top)
3676 ->frag_list) {
3677 rx_ring->rx_skb_prev->next = skb;
3678 skb->prev = rx_ring->rx_skb_prev;
3679 } else
3680 skb_shinfo(rx_ring->rx_skb_top)
3681 ->frag_list = skb;
3682
3683 rx_ring->rx_skb_top->data_len += length;
3684 rx_ring->rx_skb_top->len +=
3685 rx_ring->rx_skb_top->data_len;
3686
3687 skb = rx_ring->rx_skb_top;
3688 multi_descriptor = TRUE;
3689 rx_ring->rx_skb_top = NULL;
3690 rx_ring->rx_skb_prev = NULL;
3691 }
3610 } 3692 }
3611 3693
3612 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3694 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
3613 last_byte = *(skb->data + length - 1); 3695 last_byte = *(skb->data + length - 1);
3614 if(TBI_ACCEPT(&adapter->hw, rx_desc->status, 3696 if (TBI_ACCEPT(&adapter->hw, status,
3615 rx_desc->errors, length, last_byte)) { 3697 rx_desc->errors, length, last_byte)) {
3616 spin_lock_irqsave(&adapter->stats_lock, flags); 3698 spin_lock_irqsave(&adapter->stats_lock, flags);
3617 e1000_tbi_adjust_stats(&adapter->hw, 3699 e1000_tbi_adjust_stats(&adapter->hw,
@@ -3656,9 +3738,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3656 (uint32_t)(status) | 3738 (uint32_t)(status) |
3657 ((uint32_t)(rx_desc->errors) << 24), 3739 ((uint32_t)(rx_desc->errors) << 24),
3658 rx_desc->csum, skb); 3740 rx_desc->csum, skb);
3741
3659 skb->protocol = eth_type_trans(skb, netdev); 3742 skb->protocol = eth_type_trans(skb, netdev);
3660#ifdef CONFIG_E1000_NAPI 3743#ifdef CONFIG_E1000_NAPI
3661 if(unlikely(adapter->vlgrp && 3744 if (unlikely(adapter->vlgrp &&
3662 (status & E1000_RXD_STAT_VP))) { 3745 (status & E1000_RXD_STAT_VP))) {
3663 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3746 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3664 le16_to_cpu(rx_desc->special) & 3747 le16_to_cpu(rx_desc->special) &
@@ -3667,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3667 netif_receive_skb(skb); 3750 netif_receive_skb(skb);
3668 } 3751 }
3669#else /* CONFIG_E1000_NAPI */ 3752#else /* CONFIG_E1000_NAPI */
3670 if(unlikely(adapter->vlgrp && 3753 if (unlikely(adapter->vlgrp &&
3671 (rx_desc->status & E1000_RXD_STAT_VP))) { 3754 (status & E1000_RXD_STAT_VP))) {
3672 vlan_hwaccel_rx(skb, adapter->vlgrp, 3755 vlan_hwaccel_rx(skb, adapter->vlgrp,
3673 le16_to_cpu(rx_desc->special) & 3756 le16_to_cpu(rx_desc->special) &
3674 E1000_RXD_SPC_VLAN_MASK); 3757 E1000_RXD_SPC_VLAN_MASK);
@@ -3691,6 +3774,8 @@ next_desc:
3691 cleaned_count = 0; 3774 cleaned_count = 0;
3692 } 3775 }
3693 3776
3777 rx_desc = next_rxd;
3778 buffer_info = next_buffer;
3694 } 3779 }
3695 rx_ring->next_to_clean = i; 3780 rx_ring->next_to_clean = i;
3696 3781
@@ -3716,13 +3801,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3716 struct e1000_rx_ring *rx_ring) 3801 struct e1000_rx_ring *rx_ring)
3717#endif 3802#endif
3718{ 3803{
3719 union e1000_rx_desc_packet_split *rx_desc; 3804 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
3720 struct net_device *netdev = adapter->netdev; 3805 struct net_device *netdev = adapter->netdev;
3721 struct pci_dev *pdev = adapter->pdev; 3806 struct pci_dev *pdev = adapter->pdev;
3722 struct e1000_buffer *buffer_info; 3807 struct e1000_buffer *buffer_info, *next_buffer;
3723 struct e1000_ps_page *ps_page; 3808 struct e1000_ps_page *ps_page;
3724 struct e1000_ps_page_dma *ps_page_dma; 3809 struct e1000_ps_page_dma *ps_page_dma;
3725 struct sk_buff *skb; 3810 struct sk_buff *skb, *next_skb;
3726 unsigned int i, j; 3811 unsigned int i, j;
3727 uint32_t length, staterr; 3812 uint32_t length, staterr;
3728 int cleaned_count = 0; 3813 int cleaned_count = 0;
@@ -3731,39 +3816,44 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3731 i = rx_ring->next_to_clean; 3816 i = rx_ring->next_to_clean;
3732 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3817 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3733 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3818 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3819 buffer_info = &rx_ring->buffer_info[i];
3734 3820
3735 while(staterr & E1000_RXD_STAT_DD) { 3821 while (staterr & E1000_RXD_STAT_DD) {
3736 buffer_info = &rx_ring->buffer_info[i];
3737 ps_page = &rx_ring->ps_page[i]; 3822 ps_page = &rx_ring->ps_page[i];
3738 ps_page_dma = &rx_ring->ps_page_dma[i]; 3823 ps_page_dma = &rx_ring->ps_page_dma[i];
3739#ifdef CONFIG_E1000_NAPI 3824#ifdef CONFIG_E1000_NAPI
3740 if(unlikely(*work_done >= work_to_do)) 3825 if (unlikely(*work_done >= work_to_do))
3741 break; 3826 break;
3742 (*work_done)++; 3827 (*work_done)++;
3743#endif 3828#endif
3829 skb = buffer_info->skb;
3830
3831 if (++i == rx_ring->count) i = 0;
3832 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
3833 next_buffer = &rx_ring->buffer_info[i];
3834 next_skb = next_buffer->skb;
3835
3744 cleaned = TRUE; 3836 cleaned = TRUE;
3745 cleaned_count++; 3837 cleaned_count++;
3746 pci_unmap_single(pdev, buffer_info->dma, 3838 pci_unmap_single(pdev, buffer_info->dma,
3747 buffer_info->length, 3839 buffer_info->length,
3748 PCI_DMA_FROMDEVICE); 3840 PCI_DMA_FROMDEVICE);
3749 3841
3750 skb = buffer_info->skb; 3842 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3751
3752 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3753 E1000_DBG("%s: Packet Split buffers didn't pick up" 3843 E1000_DBG("%s: Packet Split buffers didn't pick up"
3754 " the full packet\n", netdev->name); 3844 " the full packet\n", netdev->name);
3755 dev_kfree_skb_irq(skb); 3845 dev_kfree_skb_irq(skb);
3756 goto next_desc; 3846 goto next_desc;
3757 } 3847 }
3758 3848
3759 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3849 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3760 dev_kfree_skb_irq(skb); 3850 dev_kfree_skb_irq(skb);
3761 goto next_desc; 3851 goto next_desc;
3762 } 3852 }
3763 3853
3764 length = le16_to_cpu(rx_desc->wb.middle.length0); 3854 length = le16_to_cpu(rx_desc->wb.middle.length0);
3765 3855
3766 if(unlikely(!length)) { 3856 if (unlikely(!length)) {
3767 E1000_DBG("%s: Last part of the packet spanning" 3857 E1000_DBG("%s: Last part of the packet spanning"
3768 " multiple descriptors\n", netdev->name); 3858 " multiple descriptors\n", netdev->name);
3769 dev_kfree_skb_irq(skb); 3859 dev_kfree_skb_irq(skb);
@@ -3773,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3773 /* Good Receive */ 3863 /* Good Receive */
3774 skb_put(skb, length); 3864 skb_put(skb, length);
3775 3865
3776 for(j = 0; j < adapter->rx_ps_pages; j++) { 3866 for (j = 0; j < adapter->rx_ps_pages; j++) {
3777 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3867 if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3778 break; 3868 break;
3779 3869
3780 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 3870 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3794,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3794 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3884 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3795 skb->protocol = eth_type_trans(skb, netdev); 3885 skb->protocol = eth_type_trans(skb, netdev);
3796 3886
3797 if(likely(rx_desc->wb.upper.header_status & 3887 if (likely(rx_desc->wb.upper.header_status &
3798 E1000_RXDPS_HDRSTAT_HDRSP)) { 3888 E1000_RXDPS_HDRSTAT_HDRSP))
3799 adapter->rx_hdr_split++; 3889 adapter->rx_hdr_split++;
3800#ifdef HAVE_RX_ZERO_COPY
3801 skb_shinfo(skb)->zero_copy = TRUE;
3802#endif
3803 }
3804#ifdef CONFIG_E1000_NAPI 3890#ifdef CONFIG_E1000_NAPI
3805 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3891 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3806 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3892 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3807 le16_to_cpu(rx_desc->wb.middle.vlan) & 3893 le16_to_cpu(rx_desc->wb.middle.vlan) &
3808 E1000_RXD_SPC_VLAN_MASK); 3894 E1000_RXD_SPC_VLAN_MASK);
@@ -3810,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3810 netif_receive_skb(skb); 3896 netif_receive_skb(skb);
3811 } 3897 }
3812#else /* CONFIG_E1000_NAPI */ 3898#else /* CONFIG_E1000_NAPI */
3813 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3899 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3814 vlan_hwaccel_rx(skb, adapter->vlgrp, 3900 vlan_hwaccel_rx(skb, adapter->vlgrp,
3815 le16_to_cpu(rx_desc->wb.middle.vlan) & 3901 le16_to_cpu(rx_desc->wb.middle.vlan) &
3816 E1000_RXD_SPC_VLAN_MASK); 3902 E1000_RXD_SPC_VLAN_MASK);
@@ -3834,6 +3920,9 @@ next_desc:
3834 cleaned_count = 0; 3920 cleaned_count = 0;
3835 } 3921 }
3836 3922
3923 rx_desc = next_rxd;
3924 buffer_info = next_buffer;
3925
3837 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3926 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3838 } 3927 }
3839 rx_ring->next_to_clean = i; 3928 rx_ring->next_to_clean = i;
@@ -3875,7 +3964,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3875 } 3964 }
3876 3965
3877 3966
3878 if(unlikely(!skb)) { 3967 if (unlikely(!skb)) {
3879 /* Better luck next round */ 3968 /* Better luck next round */
3880 adapter->alloc_rx_buff_failed++; 3969 adapter->alloc_rx_buff_failed++;
3881 break; 3970 break;
@@ -3940,20 +4029,23 @@ map_skb:
3940 rx_desc = E1000_RX_DESC(*rx_ring, i); 4029 rx_desc = E1000_RX_DESC(*rx_ring, i);
3941 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4030 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3942 4031
3943 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4032 if (unlikely(++i == rx_ring->count))
3944 /* Force memory writes to complete before letting h/w 4033 i = 0;
3945 * know there are new descriptors to fetch. (Only
3946 * applicable for weak-ordered memory model archs,
3947 * such as IA-64). */
3948 wmb();
3949 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3950 }
3951
3952 if(unlikely(++i == rx_ring->count)) i = 0;
3953 buffer_info = &rx_ring->buffer_info[i]; 4034 buffer_info = &rx_ring->buffer_info[i];
3954 } 4035 }
3955 4036
3956 rx_ring->next_to_use = i; 4037 if (likely(rx_ring->next_to_use != i)) {
4038 rx_ring->next_to_use = i;
4039 if (unlikely(i-- == 0))
4040 i = (rx_ring->count - 1);
4041
4042 /* Force memory writes to complete before letting h/w
4043 * know there are new descriptors to fetch. (Only
4044 * applicable for weak-ordered memory model archs,
4045 * such as IA-64). */
4046 wmb();
4047 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4048 }
3957} 4049}
3958 4050
3959/** 4051/**
@@ -3983,13 +4075,15 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3983 while (cleaned_count--) { 4075 while (cleaned_count--) {
3984 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4076 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3985 4077
3986 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 4078 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
3987 if (j < adapter->rx_ps_pages) { 4079 if (j < adapter->rx_ps_pages) {
3988 if (likely(!ps_page->ps_page[j])) { 4080 if (likely(!ps_page->ps_page[j])) {
3989 ps_page->ps_page[j] = 4081 ps_page->ps_page[j] =
3990 alloc_page(GFP_ATOMIC); 4082 alloc_page(GFP_ATOMIC);
3991 if (unlikely(!ps_page->ps_page[j])) 4083 if (unlikely(!ps_page->ps_page[j])) {
4084 adapter->alloc_rx_buff_failed++;
3992 goto no_buffers; 4085 goto no_buffers;
4086 }
3993 ps_page_dma->ps_page_dma[j] = 4087 ps_page_dma->ps_page_dma[j] =
3994 pci_map_page(pdev, 4088 pci_map_page(pdev,
3995 ps_page->ps_page[j], 4089 ps_page->ps_page[j],
@@ -3997,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3997 PCI_DMA_FROMDEVICE); 4091 PCI_DMA_FROMDEVICE);
3998 } 4092 }
3999 /* Refresh the desc even if buffer_addrs didn't 4093 /* Refresh the desc even if buffer_addrs didn't
4000 * change because each write-back erases 4094 * change because each write-back erases
4001 * this info. 4095 * this info.
4002 */ 4096 */
4003 rx_desc->read.buffer_addr[j+1] = 4097 rx_desc->read.buffer_addr[j+1] =
@@ -4008,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4008 4102
4009 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4103 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4010 4104
4011 if(unlikely(!skb)) 4105 if (unlikely(!skb)) {
4106 adapter->alloc_rx_buff_failed++;
4012 break; 4107 break;
4108 }
4013 4109
4014 /* Make buffer alignment 2 beyond a 16 byte boundary 4110 /* Make buffer alignment 2 beyond a 16 byte boundary
4015 * this will result in a 16 byte aligned IP header after 4111 * this will result in a 16 byte aligned IP header after
@@ -4027,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4027 4123
4028 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 4124 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4029 4125
4030 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4126 if (unlikely(++i == rx_ring->count)) i = 0;
4031 /* Force memory writes to complete before letting h/w
4032 * know there are new descriptors to fetch. (Only
4033 * applicable for weak-ordered memory model archs,
4034 * such as IA-64). */
4035 wmb();
4036 /* Hardware increments by 16 bytes, but packet split
4037 * descriptors are 32 bytes...so we increment tail
4038 * twice as much.
4039 */
4040 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4041 }
4042
4043 if(unlikely(++i == rx_ring->count)) i = 0;
4044 buffer_info = &rx_ring->buffer_info[i]; 4127 buffer_info = &rx_ring->buffer_info[i];
4045 ps_page = &rx_ring->ps_page[i]; 4128 ps_page = &rx_ring->ps_page[i];
4046 ps_page_dma = &rx_ring->ps_page_dma[i]; 4129 ps_page_dma = &rx_ring->ps_page_dma[i];
4047 } 4130 }
4048 4131
4049no_buffers: 4132no_buffers:
4050 rx_ring->next_to_use = i; 4133 if (likely(rx_ring->next_to_use != i)) {
4134 rx_ring->next_to_use = i;
4135 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4136
4137 /* Force memory writes to complete before letting h/w
4138 * know there are new descriptors to fetch. (Only
4139 * applicable for weak-ordered memory model archs,
4140 * such as IA-64). */
4141 wmb();
4142 /* Hardware increments by 16 bytes, but packet split
4143 * descriptors are 32 bytes...so we increment tail
4144 * twice as much.
4145 */
4146 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4147 }
4051} 4148}
4052 4149
4053/** 4150/**
@@ -4061,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4061 uint16_t phy_status; 4158 uint16_t phy_status;
4062 uint16_t phy_ctrl; 4159 uint16_t phy_ctrl;
4063 4160
4064 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4161 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4065 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4162 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4066 return; 4163 return;
4067 4164
4068 if(adapter->smartspeed == 0) { 4165 if (adapter->smartspeed == 0) {
4069 /* If Master/Slave config fault is asserted twice, 4166 /* If Master/Slave config fault is asserted twice,
4070 * we assume back-to-back */ 4167 * we assume back-to-back */
4071 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4168 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4072 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4169 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4073 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4170 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4074 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4171 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4075 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4172 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4076 if(phy_ctrl & CR_1000T_MS_ENABLE) { 4173 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4077 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4174 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4078 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 4175 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4079 phy_ctrl); 4176 phy_ctrl);
4080 adapter->smartspeed++; 4177 adapter->smartspeed++;
4081 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4178 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4082 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, 4179 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4083 &phy_ctrl)) { 4180 &phy_ctrl)) {
4084 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4181 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -4088,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4088 } 4185 }
4089 } 4186 }
4090 return; 4187 return;
4091 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4188 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4092 /* If still no link, perhaps using 2/3 pair cable */ 4189 /* If still no link, perhaps using 2/3 pair cable */
4093 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4190 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4094 phy_ctrl |= CR_1000T_MS_ENABLE; 4191 phy_ctrl |= CR_1000T_MS_ENABLE;
4095 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); 4192 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
4096 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4193 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4097 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { 4194 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4098 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4195 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4099 MII_CR_RESTART_AUTO_NEG); 4196 MII_CR_RESTART_AUTO_NEG);
@@ -4101,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4101 } 4198 }
4102 } 4199 }
4103 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4200 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4104 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4201 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4105 adapter->smartspeed = 0; 4202 adapter->smartspeed = 0;
4106} 4203}
4107 4204
@@ -4142,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4142 uint16_t spddplx; 4239 uint16_t spddplx;
4143 unsigned long flags; 4240 unsigned long flags;
4144 4241
4145 if(adapter->hw.media_type != e1000_media_type_copper) 4242 if (adapter->hw.media_type != e1000_media_type_copper)
4146 return -EOPNOTSUPP; 4243 return -EOPNOTSUPP;
4147 4244
4148 switch (cmd) { 4245 switch (cmd) {
@@ -4150,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4150 data->phy_id = adapter->hw.phy_addr; 4247 data->phy_id = adapter->hw.phy_addr;
4151 break; 4248 break;
4152 case SIOCGMIIREG: 4249 case SIOCGMIIREG:
4153 if(!capable(CAP_NET_ADMIN)) 4250 if (!capable(CAP_NET_ADMIN))
4154 return -EPERM; 4251 return -EPERM;
4155 spin_lock_irqsave(&adapter->stats_lock, flags); 4252 spin_lock_irqsave(&adapter->stats_lock, flags);
4156 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 4253 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4157 &data->val_out)) { 4254 &data->val_out)) {
4158 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4255 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4159 return -EIO; 4256 return -EIO;
@@ -4161,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4161 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4258 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4162 break; 4259 break;
4163 case SIOCSMIIREG: 4260 case SIOCSMIIREG:
4164 if(!capable(CAP_NET_ADMIN)) 4261 if (!capable(CAP_NET_ADMIN))
4165 return -EPERM; 4262 return -EPERM;
4166 if(data->reg_num & ~(0x1F)) 4263 if (data->reg_num & ~(0x1F))
4167 return -EFAULT; 4264 return -EFAULT;
4168 mii_reg = data->val_in; 4265 mii_reg = data->val_in;
4169 spin_lock_irqsave(&adapter->stats_lock, flags); 4266 spin_lock_irqsave(&adapter->stats_lock, flags);
4170 if(e1000_write_phy_reg(&adapter->hw, data->reg_num, 4267 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
4171 mii_reg)) { 4268 mii_reg)) {
4172 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4269 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4173 return -EIO; 4270 return -EIO;
4174 } 4271 }
4175 if(adapter->hw.phy_type == e1000_phy_m88) { 4272 if (adapter->hw.phy_type == e1000_phy_m88) {
4176 switch (data->reg_num) { 4273 switch (data->reg_num) {
4177 case PHY_CTRL: 4274 case PHY_CTRL:
4178 if(mii_reg & MII_CR_POWER_DOWN) 4275 if (mii_reg & MII_CR_POWER_DOWN)
4179 break; 4276 break;
4180 if(mii_reg & MII_CR_AUTO_NEG_EN) { 4277 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4181 adapter->hw.autoneg = 1; 4278 adapter->hw.autoneg = 1;
4182 adapter->hw.autoneg_advertised = 0x2F; 4279 adapter->hw.autoneg_advertised = 0x2F;
4183 } else { 4280 } else {
@@ -4192,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4192 HALF_DUPLEX; 4289 HALF_DUPLEX;
4193 retval = e1000_set_spd_dplx(adapter, 4290 retval = e1000_set_spd_dplx(adapter,
4194 spddplx); 4291 spddplx);
4195 if(retval) { 4292 if (retval) {
4196 spin_unlock_irqrestore( 4293 spin_unlock_irqrestore(
4197 &adapter->stats_lock, 4294 &adapter->stats_lock,
4198 flags); 4295 flags);
4199 return retval; 4296 return retval;
4200 } 4297 }
4201 } 4298 }
4202 if(netif_running(adapter->netdev)) { 4299 if (netif_running(adapter->netdev)) {
4203 e1000_down(adapter); 4300 e1000_down(adapter);
4204 e1000_up(adapter); 4301 e1000_up(adapter);
4205 } else 4302 } else
@@ -4207,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4207 break; 4304 break;
4208 case M88E1000_PHY_SPEC_CTRL: 4305 case M88E1000_PHY_SPEC_CTRL:
4209 case M88E1000_EXT_PHY_SPEC_CTRL: 4306 case M88E1000_EXT_PHY_SPEC_CTRL:
4210 if(e1000_phy_reset(&adapter->hw)) { 4307 if (e1000_phy_reset(&adapter->hw)) {
4211 spin_unlock_irqrestore( 4308 spin_unlock_irqrestore(
4212 &adapter->stats_lock, flags); 4309 &adapter->stats_lock, flags);
4213 return -EIO; 4310 return -EIO;
@@ -4217,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4217 } else { 4314 } else {
4218 switch (data->reg_num) { 4315 switch (data->reg_num) {
4219 case PHY_CTRL: 4316 case PHY_CTRL:
4220 if(mii_reg & MII_CR_POWER_DOWN) 4317 if (mii_reg & MII_CR_POWER_DOWN)
4221 break; 4318 break;
4222 if(netif_running(adapter->netdev)) { 4319 if (netif_running(adapter->netdev)) {
4223 e1000_down(adapter); 4320 e1000_down(adapter);
4224 e1000_up(adapter); 4321 e1000_up(adapter);
4225 } else 4322 } else
@@ -4241,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
4241 struct e1000_adapter *adapter = hw->back; 4338 struct e1000_adapter *adapter = hw->back;
4242 int ret_val = pci_set_mwi(adapter->pdev); 4339 int ret_val = pci_set_mwi(adapter->pdev);
4243 4340
4244 if(ret_val) 4341 if (ret_val)
4245 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4342 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4246} 4343}
4247 4344
@@ -4290,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4290 e1000_irq_disable(adapter); 4387 e1000_irq_disable(adapter);
4291 adapter->vlgrp = grp; 4388 adapter->vlgrp = grp;
4292 4389
4293 if(grp) { 4390 if (grp) {
4294 /* enable VLAN tag insert/strip */ 4391 /* enable VLAN tag insert/strip */
4295 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4392 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4296 ctrl |= E1000_CTRL_VME; 4393 ctrl |= E1000_CTRL_VME;
@@ -4312,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4312 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4409 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4313 rctl &= ~E1000_RCTL_VFE; 4410 rctl &= ~E1000_RCTL_VFE;
4314 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4411 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4315 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 4412 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
4316 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4413 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4414 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4318 } 4415 }
@@ -4326,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4326{ 4423{
4327 struct e1000_adapter *adapter = netdev_priv(netdev); 4424 struct e1000_adapter *adapter = netdev_priv(netdev);
4328 uint32_t vfta, index; 4425 uint32_t vfta, index;
4329 if((adapter->hw.mng_cookie.status & 4426
4330 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4427 if ((adapter->hw.mng_cookie.status &
4331 (vid == adapter->mng_vlan_id)) 4428 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4429 (vid == adapter->mng_vlan_id))
4332 return; 4430 return;
4333 /* add VID to filter table */ 4431 /* add VID to filter table */
4334 index = (vid >> 5) & 0x7F; 4432 index = (vid >> 5) & 0x7F;
@@ -4345,13 +4443,13 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4345 4443
4346 e1000_irq_disable(adapter); 4444 e1000_irq_disable(adapter);
4347 4445
4348 if(adapter->vlgrp) 4446 if (adapter->vlgrp)
4349 adapter->vlgrp->vlan_devices[vid] = NULL; 4447 adapter->vlgrp->vlan_devices[vid] = NULL;
4350 4448
4351 e1000_irq_enable(adapter); 4449 e1000_irq_enable(adapter);
4352 4450
4353 if((adapter->hw.mng_cookie.status & 4451 if ((adapter->hw.mng_cookie.status &
4354 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4452 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4355 (vid == adapter->mng_vlan_id)) { 4453 (vid == adapter->mng_vlan_id)) {
4356 /* release control to f/w */ 4454 /* release control to f/w */
4357 e1000_release_hw_control(adapter); 4455 e1000_release_hw_control(adapter);
@@ -4370,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
4370{ 4468{
4371 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4469 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4372 4470
4373 if(adapter->vlgrp) { 4471 if (adapter->vlgrp) {
4374 uint16_t vid; 4472 uint16_t vid;
4375 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4473 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4376 if(!adapter->vlgrp->vlan_devices[vid]) 4474 if (!adapter->vlgrp->vlan_devices[vid])
4377 continue; 4475 continue;
4378 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4476 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4379 } 4477 }
@@ -4386,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4386 adapter->hw.autoneg = 0; 4484 adapter->hw.autoneg = 0;
4387 4485
4388 /* Fiber NICs only allow 1000 gbps Full duplex */ 4486 /* Fiber NICs only allow 1000 gbps Full duplex */
4389 if((adapter->hw.media_type == e1000_media_type_fiber) && 4487 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4390 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4488 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4391 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4489 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4392 return -EINVAL; 4490 return -EINVAL;
4393 } 4491 }
4394 4492
4395 switch(spddplx) { 4493 switch (spddplx) {
4396 case SPEED_10 + DUPLEX_HALF: 4494 case SPEED_10 + DUPLEX_HALF:
4397 adapter->hw.forced_speed_duplex = e1000_10_half; 4495 adapter->hw.forced_speed_duplex = e1000_10_half;
4398 break; 4496 break;
@@ -4418,6 +4516,54 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4418} 4516}
4419 4517
4420#ifdef CONFIG_PM 4518#ifdef CONFIG_PM
4519/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
4520 * space versus the 64 bytes that pci_[save|restore]_state handle
4521 */
4522#define PCIE_CONFIG_SPACE_LEN 256
4523#define PCI_CONFIG_SPACE_LEN 64
4524static int
4525e1000_pci_save_state(struct e1000_adapter *adapter)
4526{
4527 struct pci_dev *dev = adapter->pdev;
4528 int size;
4529 int i;
4530 if (adapter->hw.mac_type >= e1000_82571)
4531 size = PCIE_CONFIG_SPACE_LEN;
4532 else
4533 size = PCI_CONFIG_SPACE_LEN;
4534
4535 WARN_ON(adapter->config_space != NULL);
4536
4537 adapter->config_space = kmalloc(size, GFP_KERNEL);
4538 if (!adapter->config_space) {
4539 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
4540 return -ENOMEM;
4541 }
4542 for (i = 0; i < (size / 4); i++)
4543 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
4544 return 0;
4545}
4546
4547static void
4548e1000_pci_restore_state(struct e1000_adapter *adapter)
4549{
4550 struct pci_dev *dev = adapter->pdev;
4551 int size;
4552 int i;
4553 if (adapter->config_space == NULL)
4554 return;
4555 if (adapter->hw.mac_type >= e1000_82571)
4556 size = PCIE_CONFIG_SPACE_LEN;
4557 else
4558 size = PCI_CONFIG_SPACE_LEN;
4559 for (i = 0; i < (size / 4); i++)
4560 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
4561 kfree(adapter->config_space);
4562 adapter->config_space = NULL;
4563 return;
4564}
4565#endif /* CONFIG_PM */
4566
4421static int 4567static int
4422e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4568e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4423{ 4569{
@@ -4429,25 +4575,33 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4429 4575
4430 netif_device_detach(netdev); 4576 netif_device_detach(netdev);
4431 4577
4432 if(netif_running(netdev)) 4578 if (netif_running(netdev))
4433 e1000_down(adapter); 4579 e1000_down(adapter);
4434 4580
4581#ifdef CONFIG_PM
4582 /* implement our own version of pci_save_state(pdev) because pci
4583 * express adapters have larger 256 byte config spaces */
4584 retval = e1000_pci_save_state(adapter);
4585 if (retval)
4586 return retval;
4587#endif
4588
4435 status = E1000_READ_REG(&adapter->hw, STATUS); 4589 status = E1000_READ_REG(&adapter->hw, STATUS);
4436 if(status & E1000_STATUS_LU) 4590 if (status & E1000_STATUS_LU)
4437 wufc &= ~E1000_WUFC_LNKC; 4591 wufc &= ~E1000_WUFC_LNKC;
4438 4592
4439 if(wufc) { 4593 if (wufc) {
4440 e1000_setup_rctl(adapter); 4594 e1000_setup_rctl(adapter);
4441 e1000_set_multi(netdev); 4595 e1000_set_multi(netdev);
4442 4596
4443 /* turn on all-multi mode if wake on multicast is enabled */ 4597 /* turn on all-multi mode if wake on multicast is enabled */
4444 if(adapter->wol & E1000_WUFC_MC) { 4598 if (adapter->wol & E1000_WUFC_MC) {
4445 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4599 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4446 rctl |= E1000_RCTL_MPE; 4600 rctl |= E1000_RCTL_MPE;
4447 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4601 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4448 } 4602 }
4449 4603
4450 if(adapter->hw.mac_type >= e1000_82540) { 4604 if (adapter->hw.mac_type >= e1000_82540) {
4451 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4605 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4452 /* advertise wake from D3Cold */ 4606 /* advertise wake from D3Cold */
4453 #define E1000_CTRL_ADVD3WUC 0x00100000 4607 #define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4458,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4458 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4612 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4459 } 4613 }
4460 4614
4461 if(adapter->hw.media_type == e1000_media_type_fiber || 4615 if (adapter->hw.media_type == e1000_media_type_fiber ||
4462 adapter->hw.media_type == e1000_media_type_internal_serdes) { 4616 adapter->hw.media_type == e1000_media_type_internal_serdes) {
4463 /* keep the laser running in D3 */ 4617 /* keep the laser running in D3 */
4464 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4618 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4488,12 +4642,10 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4488 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); 4642 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4489 } 4643 }
4490 4644
4491 pci_save_state(pdev); 4645 if (adapter->hw.mac_type >= e1000_82540 &&
4492
4493 if(adapter->hw.mac_type >= e1000_82540 &&
4494 adapter->hw.media_type == e1000_media_type_copper) { 4646 adapter->hw.media_type == e1000_media_type_copper) {
4495 manc = E1000_READ_REG(&adapter->hw, MANC); 4647 manc = E1000_READ_REG(&adapter->hw, MANC);
4496 if(manc & E1000_MANC_SMBUS_EN) { 4648 if (manc & E1000_MANC_SMBUS_EN) {
4497 manc |= E1000_MANC_ARP_EN; 4649 manc |= E1000_MANC_ARP_EN;
4498 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4650 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4499 retval = pci_enable_wake(pdev, PCI_D3hot, 1); 4651 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
@@ -4518,6 +4670,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4518 return 0; 4670 return 0;
4519} 4671}
4520 4672
4673#ifdef CONFIG_PM
4521static int 4674static int
4522e1000_resume(struct pci_dev *pdev) 4675e1000_resume(struct pci_dev *pdev)
4523{ 4676{
@@ -4529,6 +4682,7 @@ e1000_resume(struct pci_dev *pdev)
4529 retval = pci_set_power_state(pdev, PCI_D0); 4682 retval = pci_set_power_state(pdev, PCI_D0);
4530 if (retval) 4683 if (retval)
4531 DPRINTK(PROBE, ERR, "Error in setting power state\n"); 4684 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4685 e1000_pci_restore_state(adapter);
4532 ret_val = pci_enable_device(pdev); 4686 ret_val = pci_enable_device(pdev);
4533 pci_set_master(pdev); 4687 pci_set_master(pdev);
4534 4688
@@ -4542,12 +4696,12 @@ e1000_resume(struct pci_dev *pdev)
4542 e1000_reset(adapter); 4696 e1000_reset(adapter);
4543 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4697 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4544 4698
4545 if(netif_running(netdev)) 4699 if (netif_running(netdev))
4546 e1000_up(adapter); 4700 e1000_up(adapter);
4547 4701
4548 netif_device_attach(netdev); 4702 netif_device_attach(netdev);
4549 4703
4550 if(adapter->hw.mac_type >= e1000_82540 && 4704 if (adapter->hw.mac_type >= e1000_82540 &&
4551 adapter->hw.media_type == e1000_media_type_copper) { 4705 adapter->hw.media_type == e1000_media_type_copper) {
4552 manc = E1000_READ_REG(&adapter->hw, MANC); 4706 manc = E1000_READ_REG(&adapter->hw, MANC);
4553 manc &= ~(E1000_MANC_ARP_EN); 4707 manc &= ~(E1000_MANC_ARP_EN);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index aac64de61437..9790db974dc1 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -47,7 +47,7 @@
47 BUG(); \ 47 BUG(); \
48 } else { \ 48 } else { \
49 msleep(x); \ 49 msleep(x); \
50 } } while(0) 50 } } while (0)
51 51
52/* Some workarounds require millisecond delays and are run during interrupt 52/* Some workarounds require millisecond delays and are run during interrupt
53 * context. Most notably, when establishing link, the phy may need tweaking 53 * context. Most notably, when establishing link, the phy may need tweaking
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 0a7918c62557..3768d83cd577 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -227,7 +227,7 @@ static int __devinit
227e1000_validate_option(int *value, struct e1000_option *opt, 227e1000_validate_option(int *value, struct e1000_option *opt,
228 struct e1000_adapter *adapter) 228 struct e1000_adapter *adapter)
229{ 229{
230 if(*value == OPTION_UNSET) { 230 if (*value == OPTION_UNSET) {
231 *value = opt->def; 231 *value = opt->def;
232 return 0; 232 return 0;
233 } 233 }
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
244 } 244 }
245 break; 245 break;
246 case range_option: 246 case range_option:
247 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 DPRINTK(PROBE, INFO, 248 DPRINTK(PROBE, INFO,
249 "%s set to %i\n", opt->name, *value); 249 "%s set to %i\n", opt->name, *value);
250 return 0; 250 return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
254 int i; 254 int i;
255 struct e1000_opt_list *ent; 255 struct e1000_opt_list *ent;
256 256
257 for(i = 0; i < opt->arg.l.nr; i++) { 257 for (i = 0; i < opt->arg.l.nr; i++) {
258 ent = &opt->arg.l.p[i]; 258 ent = &opt->arg.l.p[i];
259 if(*value == ent->i) { 259 if (*value == ent->i) {
260 if(ent->str[0] != '\0') 260 if (ent->str[0] != '\0')
261 DPRINTK(PROBE, INFO, "%s\n", ent->str); 261 DPRINTK(PROBE, INFO, "%s\n", ent->str);
262 return 0; 262 return 0;
263 } 263 }
@@ -291,7 +291,7 @@ void __devinit
291e1000_check_options(struct e1000_adapter *adapter) 291e1000_check_options(struct e1000_adapter *adapter)
292{ 292{
293 int bd = adapter->bd_number; 293 int bd = adapter->bd_number;
294 if(bd >= E1000_MAX_NIC) { 294 if (bd >= E1000_MAX_NIC) {
295 DPRINTK(PROBE, NOTICE, 295 DPRINTK(PROBE, NOTICE,
296 "Warning: no configuration for board #%i\n", bd); 296 "Warning: no configuration for board #%i\n", bd);
297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,7 +315,7 @@ e1000_check_options(struct e1000_adapter *adapter)
315 if (num_TxDescriptors > bd) { 315 if (num_TxDescriptors > bd) {
316 tx_ring->count = TxDescriptors[bd]; 316 tx_ring->count = TxDescriptors[bd];
317 e1000_validate_option(&tx_ring->count, &opt, adapter); 317 e1000_validate_option(&tx_ring->count, &opt, adapter);
318 E1000_ROUNDUP(tx_ring->count, 318 E1000_ROUNDUP(tx_ring->count,
319 REQ_TX_DESCRIPTOR_MULTIPLE); 319 REQ_TX_DESCRIPTOR_MULTIPLE);
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
@@ -341,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter)
341 if (num_RxDescriptors > bd) { 341 if (num_RxDescriptors > bd) {
342 rx_ring->count = RxDescriptors[bd]; 342 rx_ring->count = RxDescriptors[bd];
343 e1000_validate_option(&rx_ring->count, &opt, adapter); 343 e1000_validate_option(&rx_ring->count, &opt, adapter);
344 E1000_ROUNDUP(rx_ring->count, 344 E1000_ROUNDUP(rx_ring->count,
345 REQ_RX_DESCRIPTOR_MULTIPLE); 345 REQ_RX_DESCRIPTOR_MULTIPLE);
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
403 403
404 if (num_TxIntDelay > bd) { 404 if (num_TxIntDelay > bd) {
405 adapter->tx_int_delay = TxIntDelay[bd]; 405 adapter->tx_int_delay = TxIntDelay[bd];
406 e1000_validate_option(&adapter->tx_int_delay, &opt, 406 e1000_validate_option(&adapter->tx_int_delay, &opt,
407 adapter); 407 adapter);
408 } else { 408 } else {
409 adapter->tx_int_delay = opt.def; 409 adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
421 421
422 if (num_TxAbsIntDelay > bd) { 422 if (num_TxAbsIntDelay > bd) {
423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
425 adapter); 425 adapter);
426 } else { 426 } else {
427 adapter->tx_abs_int_delay = opt.def; 427 adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
439 439
440 if (num_RxIntDelay > bd) { 440 if (num_RxIntDelay > bd) {
441 adapter->rx_int_delay = RxIntDelay[bd]; 441 adapter->rx_int_delay = RxIntDelay[bd];
442 e1000_validate_option(&adapter->rx_int_delay, &opt, 442 e1000_validate_option(&adapter->rx_int_delay, &opt,
443 adapter); 443 adapter);
444 } else { 444 } else {
445 adapter->rx_int_delay = opt.def; 445 adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
457 457
458 if (num_RxAbsIntDelay > bd) { 458 if (num_RxAbsIntDelay > bd) {
459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
461 adapter); 461 adapter);
462 } else { 462 } else {
463 adapter->rx_abs_int_delay = opt.def; 463 adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
475 475
476 if (num_InterruptThrottleRate > bd) { 476 if (num_InterruptThrottleRate > bd) {
477 adapter->itr = InterruptThrottleRate[bd]; 477 adapter->itr = InterruptThrottleRate[bd];
478 switch(adapter->itr) { 478 switch (adapter->itr) {
479 case 0: 479 case 0:
480 DPRINTK(PROBE, INFO, "%s turned off\n", 480 DPRINTK(PROBE, INFO, "%s turned off\n",
481 opt.name); 481 opt.name);
482 break; 482 break;
483 case 1: 483 case 1:
484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
485 opt.name); 485 opt.name);
486 break; 486 break;
487 default: 487 default:
488 e1000_validate_option(&adapter->itr, &opt, 488 e1000_validate_option(&adapter->itr, &opt,
489 adapter); 489 adapter);
490 break; 490 break;
491 } 491 }
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
494 } 494 }
495 } 495 }
496 496
497 switch(adapter->hw.media_type) { 497 switch (adapter->hw.media_type) {
498 case e1000_media_type_fiber: 498 case e1000_media_type_fiber:
499 case e1000_media_type_internal_serdes: 499 case e1000_media_type_internal_serdes:
500 e1000_check_fiber_options(adapter); 500 e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
518e1000_check_fiber_options(struct e1000_adapter *adapter) 518e1000_check_fiber_options(struct e1000_adapter *adapter)
519{ 519{
520 int bd = adapter->bd_number; 520 int bd = adapter->bd_number;
521 if(num_Speed > bd) { 521 if (num_Speed > bd) {
522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
523 "parameter ignored\n"); 523 "parameter ignored\n");
524 } 524 }
525 525
526 if(num_Duplex > bd) { 526 if (num_Duplex > bd) {
527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
528 "parameter ignored\n"); 528 "parameter ignored\n");
529 } 529 }
530 530
531 if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 531 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
533 "not valid for fiber adapters, " 533 "not valid for fiber adapters, "
534 "parameter ignored\n"); 534 "parameter ignored\n");
@@ -598,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
598 } 598 }
599 } 599 }
600 600
601 if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 601 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
602 DPRINTK(PROBE, INFO, 602 DPRINTK(PROBE, INFO,
603 "AutoNeg specified along with Speed or Duplex, " 603 "AutoNeg specified along with Speed or Duplex, "
604 "parameter ignored\n"); 604 "parameter ignored\n");
@@ -659,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
659 switch (speed + dplx) { 659 switch (speed + dplx) {
660 case 0: 660 case 0:
661 adapter->hw.autoneg = adapter->fc_autoneg = 1; 661 adapter->hw.autoneg = adapter->fc_autoneg = 1;
662 if((num_Speed > bd) && (speed != 0 || dplx != 0)) 662 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
663 DPRINTK(PROBE, INFO, 663 DPRINTK(PROBE, INFO,
664 "Speed and duplex autonegotiation enabled\n"); 664 "Speed and duplex autonegotiation enabled\n");
665 break; 665 break;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index d9ce8c549416..bc36edff2058 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2595,15 +2595,11 @@ static int __init serial8250_init(void)
2595 if (ret) 2595 if (ret)
2596 goto out; 2596 goto out;
2597 2597
2598 ret = platform_driver_register(&serial8250_isa_driver);
2599 if (ret)
2600 goto unreg_uart_drv;
2601
2602 serial8250_isa_devs = platform_device_alloc("serial8250", 2598 serial8250_isa_devs = platform_device_alloc("serial8250",
2603 PLAT8250_DEV_LEGACY); 2599 PLAT8250_DEV_LEGACY);
2604 if (!serial8250_isa_devs) { 2600 if (!serial8250_isa_devs) {
2605 ret = -ENOMEM; 2601 ret = -ENOMEM;
2606 goto unreg_plat_drv; 2602 goto unreg_uart_drv;
2607 } 2603 }
2608 2604
2609 ret = platform_device_add(serial8250_isa_devs); 2605 ret = platform_device_add(serial8250_isa_devs);
@@ -2612,12 +2608,13 @@ static int __init serial8250_init(void)
2612 2608
2613 serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev); 2609 serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
2614 2610
2615 goto out; 2611 ret = platform_driver_register(&serial8250_isa_driver);
2612 if (ret == 0)
2613 goto out;
2616 2614
2615 platform_device_del(serial8250_isa_devs);
2617 put_dev: 2616 put_dev:
2618 platform_device_put(serial8250_isa_devs); 2617 platform_device_put(serial8250_isa_devs);
2619 unreg_plat_drv:
2620 platform_driver_unregister(&serial8250_isa_driver);
2621 unreg_uart_drv: 2618 unreg_uart_drv:
2622 uart_unregister_driver(&serial8250_reg); 2619 uart_unregister_driver(&serial8250_reg);
2623 out: 2620 out:
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 589fb076654a..2a912153321e 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -940,6 +940,7 @@ enum pci_board_num_t {
940 pbn_b2_bt_2_921600, 940 pbn_b2_bt_2_921600,
941 pbn_b2_bt_4_921600, 941 pbn_b2_bt_4_921600,
942 942
943 pbn_b3_2_115200,
943 pbn_b3_4_115200, 944 pbn_b3_4_115200,
944 pbn_b3_8_115200, 945 pbn_b3_8_115200,
945 946
@@ -1311,6 +1312,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1311 .uart_offset = 8, 1312 .uart_offset = 8,
1312 }, 1313 },
1313 1314
1315 [pbn_b3_2_115200] = {
1316 .flags = FL_BASE3,
1317 .num_ports = 2,
1318 .base_baud = 115200,
1319 .uart_offset = 8,
1320 },
1314 [pbn_b3_4_115200] = { 1321 [pbn_b3_4_115200] = {
1315 .flags = FL_BASE3, 1322 .flags = FL_BASE3,
1316 .num_ports = 4, 1323 .num_ports = 4,
@@ -2272,6 +2279,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2273 pbn_nec_nile4 }, 2280 pbn_nec_nile4 },
2274 2281
2282 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
2283 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2284 pbn_b3_2_115200 },
2275 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4, 2285 { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4,
2276 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2286 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2277 pbn_b3_4_115200 }, 2287 pbn_b3_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 5e7199f7b59c..9fd1925de361 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -301,7 +301,7 @@ config SERIAL_AT91_TTYAT
301 depends on SERIAL_AT91=y 301 depends on SERIAL_AT91=y
302 help 302 help
303 Say Y here if you wish to have the five internal AT91RM9200 UARTs 303 Say Y here if you wish to have the five internal AT91RM9200 UARTs
304 appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the 304 appear as /dev/ttyAT0-4 (major 204, minor 154-158) instead of the
305 normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if 305 normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
306 you also want other UARTs, such as external 8250/16C550 compatible 306 you also want other UARTs, such as external 8250/16C550 compatible
307 UARTs. 307 UARTs.
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
index 0e206063d685..2113feb75c39 100644
--- a/drivers/serial/at91_serial.c
+++ b/drivers/serial/at91_serial.c
@@ -222,8 +222,6 @@ static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
222 while (status & (AT91_US_RXRDY)) { 222 while (status & (AT91_US_RXRDY)) {
223 ch = UART_GET_CHAR(port); 223 ch = UART_GET_CHAR(port);
224 224
225 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
226 goto ignore_char;
227 port->icount.rx++; 225 port->icount.rx++;
228 226
229 flg = TTY_NORMAL; 227 flg = TTY_NORMAL;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 5468e5a767e2..43e67d6c29d4 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -6,7 +6,7 @@
6 * driver for that. 6 * driver for that.
7 * 7 *
8 * 8 *
9 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 9 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of version 2 of the GNU General Public License 12 * under the terms of version 2 of the GNU General Public License
@@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void)
829 misc.name = DEVICE_NAME_DYNAMIC; 829 misc.name = DEVICE_NAME_DYNAMIC;
830 retval = misc_register(&misc); 830 retval = misc_register(&misc);
831 if (retval != 0) { 831 if (retval != 0) {
832 printk 832 printk(KERN_WARNING "Failed to register console "
833 ("Failed to register console device using misc_register.\n"); 833 "device using misc_register.\n");
834 return -ENODEV; 834 return -ENODEV;
835 } 835 }
836 sal_console_uart.major = MISC_MAJOR; 836 sal_console_uart.major = MISC_MAJOR;
@@ -942,88 +942,75 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
942{ 942{
943 unsigned long flags = 0; 943 unsigned long flags = 0;
944 struct sn_cons_port *port = &sal_console_port; 944 struct sn_cons_port *port = &sal_console_port;
945#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
946 static int stole_lock = 0; 945 static int stole_lock = 0;
947#endif
948 946
949 BUG_ON(!port->sc_is_asynch); 947 BUG_ON(!port->sc_is_asynch);
950 948
951 /* We can't look at the xmit buffer if we're not registered with serial core 949 /* We can't look at the xmit buffer if we're not registered with serial core
952 * yet. So only do the fancy recovery after registering 950 * yet. So only do the fancy recovery after registering
953 */ 951 */
954 if (port->sc_port.info) { 952 if (!port->sc_port.info) {
955 953 /* Not yet registered with serial core - simple case */
956 /* somebody really wants this output, might be an 954 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
957 * oops, kdb, panic, etc. make sure they get it. */ 955 return;
958#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 956 }
959 if (spin_is_locked(&port->sc_port.lock)) {
960 int lhead = port->sc_port.info->xmit.head;
961 int ltail = port->sc_port.info->xmit.tail;
962 int counter, got_lock = 0;
963 957
964 /* 958 /* somebody really wants this output, might be an
965 * We attempt to determine if someone has died with the 959 * oops, kdb, panic, etc. make sure they get it. */
966 * lock. We wait ~20 secs after the head and tail ptrs 960 if (spin_is_locked(&port->sc_port.lock)) {
967 * stop moving and assume the lock holder is not functional 961 int lhead = port->sc_port.info->xmit.head;
968 * and plow ahead. If the lock is freed within the time out 962 int ltail = port->sc_port.info->xmit.tail;
969 * period we re-get the lock and go ahead normally. We also 963 int counter, got_lock = 0;
970 * remember if we have plowed ahead so that we don't have 964
971 * to wait out the time out period again - the asumption 965 /*
972 * is that we will time out again. 966 * We attempt to determine if someone has died with the
973 */ 967 * lock. We wait ~20 secs after the head and tail ptrs
968 * stop moving and assume the lock holder is not functional
969 * and plow ahead. If the lock is freed within the time out
970 * period we re-get the lock and go ahead normally. We also
971 * remember if we have plowed ahead so that we don't have
972 * to wait out the time out period again - the asumption
973 * is that we will time out again.
974 */
974 975
975 for (counter = 0; counter < 150; mdelay(125), counter++) { 976 for (counter = 0; counter < 150; mdelay(125), counter++) {
976 if (!spin_is_locked(&port->sc_port.lock) 977 if (!spin_is_locked(&port->sc_port.lock)
977 || stole_lock) { 978 || stole_lock) {
978 if (!stole_lock) { 979 if (!stole_lock) {
979 spin_lock_irqsave(&port-> 980 spin_lock_irqsave(&port->sc_port.lock,
980 sc_port.lock, 981 flags);
981 flags); 982 got_lock = 1;
982 got_lock = 1;
983 }
984 break;
985 } else {
986 /* still locked */
987 if ((lhead !=
988 port->sc_port.info->xmit.head)
989 || (ltail !=
990 port->sc_port.info->xmit.
991 tail)) {
992 lhead =
993 port->sc_port.info->xmit.
994 head;
995 ltail =
996 port->sc_port.info->xmit.
997 tail;
998 counter = 0;
999 }
1000 } 983 }
1001 } 984 break;
1002 /* flush anything in the serial core xmit buffer, raw */
1003 sn_transmit_chars(port, 1);
1004 if (got_lock) {
1005 spin_unlock_irqrestore(&port->sc_port.lock,
1006 flags);
1007 stole_lock = 0;
1008 } else { 985 } else {
1009 /* fell thru */ 986 /* still locked */
1010 stole_lock = 1; 987 if ((lhead != port->sc_port.info->xmit.head)
988 || (ltail !=
989 port->sc_port.info->xmit.tail)) {
990 lhead =
991 port->sc_port.info->xmit.head;
992 ltail =
993 port->sc_port.info->xmit.tail;
994 counter = 0;
995 }
1011 } 996 }
1012 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); 997 }
1013 } else { 998 /* flush anything in the serial core xmit buffer, raw */
1014 stole_lock = 0; 999 sn_transmit_chars(port, 1);
1015#endif 1000 if (got_lock) {
1016 spin_lock_irqsave(&port->sc_port.lock, flags);
1017 sn_transmit_chars(port, 1);
1018 spin_unlock_irqrestore(&port->sc_port.lock, flags); 1001 spin_unlock_irqrestore(&port->sc_port.lock, flags);
1019 1002 stole_lock = 0;
1020 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); 1003 } else {
1021#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 1004 /* fell thru */
1005 stole_lock = 1;
1022 } 1006 }
1023#endif 1007 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
1024 } 1008 } else {
1025 else { 1009 stole_lock = 0;
1026 /* Not yet registered with serial core - simple case */ 1010 spin_lock_irqsave(&port->sc_port.lock, flags);
1011 sn_transmit_chars(port, 1);
1012 spin_unlock_irqrestore(&port->sc_port.lock, flags);
1013
1027 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); 1014 puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
1028 } 1015 }
1029} 1016}
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 5fc4a62173d9..fa4ae94243c2 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -34,6 +34,7 @@ sunserial_console_termios(struct console *con)
34 char *mode_prop = "ttyX-mode"; 34 char *mode_prop = "ttyX-mode";
35 char *cd_prop = "ttyX-ignore-cd"; 35 char *cd_prop = "ttyX-ignore-cd";
36 char *dtr_prop = "ttyX-rts-dtr-off"; 36 char *dtr_prop = "ttyX-rts-dtr-off";
37 char *ssp_console_modes_prop = "ssp-console-modes";
37 int baud, bits, stop, cflag; 38 int baud, bits, stop, cflag;
38 char parity; 39 char parity;
39 int carrier = 0; 40 int carrier = 0;
@@ -43,14 +44,39 @@ sunserial_console_termios(struct console *con)
43 if (!serial_console) 44 if (!serial_console)
44 return; 45 return;
45 46
46 if (serial_console == 1) { 47 switch (serial_console) {
48 case PROMDEV_OTTYA:
47 mode_prop[3] = 'a'; 49 mode_prop[3] = 'a';
48 cd_prop[3] = 'a'; 50 cd_prop[3] = 'a';
49 dtr_prop[3] = 'a'; 51 dtr_prop[3] = 'a';
50 } else { 52 break;
53
54 case PROMDEV_OTTYB:
51 mode_prop[3] = 'b'; 55 mode_prop[3] = 'b';
52 cd_prop[3] = 'b'; 56 cd_prop[3] = 'b';
53 dtr_prop[3] = 'b'; 57 dtr_prop[3] = 'b';
58 break;
59
60 case PROMDEV_ORSC:
61
62 nd = prom_pathtoinode("rsc");
63 if (!nd) {
64 strcpy(mode, "115200,8,n,1,-");
65 goto no_options;
66 }
67
68 if (!prom_node_has_property(nd, ssp_console_modes_prop)) {
69 strcpy(mode, "115200,8,n,1,-");
70 goto no_options;
71 }
72
73 memset(mode, 0, sizeof(mode));
74 prom_getstring(nd, ssp_console_modes_prop, mode, sizeof(mode));
75 goto no_options;
76
77 default:
78 strcpy(mode, "9600,8,n,1,-");
79 goto no_options;
54 } 80 }
55 81
56 topnd = prom_getchild(prom_root_node); 82 topnd = prom_getchild(prom_root_node);
@@ -110,6 +136,10 @@ no_options:
110 case 9600: cflag |= B9600; break; 136 case 9600: cflag |= B9600; break;
111 case 19200: cflag |= B19200; break; 137 case 19200: cflag |= B19200; break;
112 case 38400: cflag |= B38400; break; 138 case 38400: cflag |= B38400; break;
139 case 57600: cflag |= B57600; break;
140 case 115200: cflag |= B115200; break;
141 case 230400: cflag |= B230400; break;
142 case 460800: cflag |= B460800; break;
113 default: baud = 9600; cflag |= B9600; break; 143 default: baud = 9600; cflag |= B9600; break;
114 } 144 }
115 145
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 7e773ff76c61..8bcaebcc0ad7 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -897,9 +897,6 @@ static int sunsab_console_setup(struct console *con, char *options)
897 897
898 sunserial_console_termios(con); 898 sunserial_console_termios(con);
899 899
900 /* Firmware console speed is limited to 150-->38400 baud so
901 * this hackish cflag thing is OK.
902 */
903 switch (con->cflag & CBAUD) { 900 switch (con->cflag & CBAUD) {
904 case B150: baud = 150; break; 901 case B150: baud = 150; break;
905 case B300: baud = 300; break; 902 case B300: baud = 300; break;
@@ -910,6 +907,10 @@ static int sunsab_console_setup(struct console *con, char *options)
910 default: case B9600: baud = 9600; break; 907 default: case B9600: baud = 9600; break;
911 case B19200: baud = 19200; break; 908 case B19200: baud = 19200; break;
912 case B38400: baud = 38400; break; 909 case B38400: baud = 38400; break;
910 case B57600: baud = 57600; break;
911 case B115200: baud = 115200; break;
912 case B230400: baud = 230400; break;
913 case B460800: baud = 460800; break;
913 }; 914 };
914 915
915 /* 916 /*