aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:42:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:42:45 -0400
commit5a5a1bf099d6942399ea0b34a62e5f0bc4c5c36e (patch)
treedf094aa1544281ec0894eee48ad60c9d000a18ba
parent74c7d2f5200a340ae6655e9adcf990381e387937 (diff)
parent5379f8c0d72cab43bbe6d974ceb3ad84dddc2b8e (diff)
Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 RAS changes from Ingo Molnar: - Add an Intel CMCI hotplug fix - Add AMD family 16h EDAC support - Make the AMD MCE banks code more flexible for virtual environments * 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: amd64_edac: Add Family 16h support x86/mce: Rework cmci_rediscover() to play well with CPU hotplug x86, MCE, AMD: Use MCG_CAP MSR to find out number of banks on AMD x86, MCE, AMD: Replace shared_bank array with is_shared_bank() helper
-rw-r--r--arch/x86/include/asm/mce.h4
-rw-r--r--arch/x86/kernel/amd_nb.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c39
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c25
-rw-r--r--drivers/edac/amd64_edac.c65
-rw-r--r--drivers/edac/amd64_edac.h4
-rw-r--r--include/linux/pci_ids.h2
8 files changed, 103 insertions, 41 deletions
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index f4076af1f4ed..fa5f71e021d5 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -146,13 +146,13 @@ DECLARE_PER_CPU(struct device *, mce_device);
146void mce_intel_feature_init(struct cpuinfo_x86 *c); 146void mce_intel_feature_init(struct cpuinfo_x86 *c);
147void cmci_clear(void); 147void cmci_clear(void);
148void cmci_reenable(void); 148void cmci_reenable(void);
149void cmci_rediscover(int dying); 149void cmci_rediscover(void);
150void cmci_recheck(void); 150void cmci_recheck(void);
151#else 151#else
152static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } 152static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
153static inline void cmci_clear(void) {} 153static inline void cmci_clear(void) {}
154static inline void cmci_reenable(void) {} 154static inline void cmci_reenable(void) {}
155static inline void cmci_rediscover(int dying) {} 155static inline void cmci_rediscover(void) {}
156static inline void cmci_recheck(void) {} 156static inline void cmci_recheck(void) {}
157#endif 157#endif
158 158
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3684129be947..3048ded1b598 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,12 +20,14 @@ const struct pci_device_id amd_nb_misc_ids[] = {
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
23 {} 24 {}
24}; 25};
25EXPORT_SYMBOL(amd_nb_misc_ids); 26EXPORT_SYMBOL(amd_nb_misc_ids);
26 27
27static const struct pci_device_id amd_nb_link_ids[] = { 28static const struct pci_device_id amd_nb_link_ids[] = {
28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
29 {} 31 {}
30}; 32};
31 33
@@ -81,7 +83,6 @@ int amd_cache_northbridges(void)
81 next_northbridge(link, amd_nb_link_ids); 83 next_northbridge(link, amd_nb_link_ids);
82 } 84 }
83 85
84 /* some CPU families (e.g. family 0x11) do not support GART */
85 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 86 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
86 boot_cpu_data.x86 == 0x15) 87 boot_cpu_data.x86 == 0x15)
87 amd_northbridges.flags |= AMD_NB_GART; 88 amd_northbridges.flags |= AMD_NB_GART;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7bc126346ace..9239504b41cb 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2358,7 +2358,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2358 2358
2359 if (action == CPU_POST_DEAD) { 2359 if (action == CPU_POST_DEAD) {
2360 /* intentionally ignoring frozen here */ 2360 /* intentionally ignoring frozen here */
2361 cmci_rediscover(cpu); 2361 cmci_rediscover();
2362 } 2362 }
2363 2363
2364 return NOTIFY_OK; 2364 return NOTIFY_OK;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 1ac581f38dfa..9cb52767999a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -33,7 +33,6 @@
33#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h> 34#include <asm/msr.h>
35 35
36#define NR_BANKS 6
37#define NR_BLOCKS 9 36#define NR_BLOCKS 9
38#define THRESHOLD_MAX 0xFFF 37#define THRESHOLD_MAX 0xFFF
39#define INT_TYPE_APIC 0x00020000 38#define INT_TYPE_APIC 0x00020000
@@ -57,12 +56,7 @@ static const char * const th_names[] = {
57 "execution_unit", 56 "execution_unit",
58}; 57};
59 58
60static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); 59static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
61
62static unsigned char shared_bank[NR_BANKS] = {
63 0, 0, 0, 0, 1
64};
65
66static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ 60static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
67 61
68static void amd_threshold_interrupt(void); 62static void amd_threshold_interrupt(void);
@@ -79,6 +73,12 @@ struct thresh_restart {
79 u16 old_limit; 73 u16 old_limit;
80}; 74};
81 75
76static inline bool is_shared_bank(int bank)
77{
78 /* Bank 4 is for northbridge reporting and is thus shared */
79 return (bank == 4);
80}
81
82static const char * const bank4_names(struct threshold_block *b) 82static const char * const bank4_names(struct threshold_block *b)
83{ 83{
84 switch (b->address) { 84 switch (b->address) {
@@ -214,7 +214,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
214 unsigned int bank, block; 214 unsigned int bank, block;
215 int offset = -1; 215 int offset = -1;
216 216
217 for (bank = 0; bank < NR_BANKS; ++bank) { 217 for (bank = 0; bank < mca_cfg.banks; ++bank) {
218 for (block = 0; block < NR_BLOCKS; ++block) { 218 for (block = 0; block < NR_BLOCKS; ++block) {
219 if (block == 0) 219 if (block == 0)
220 address = MSR_IA32_MC0_MISC + bank * 4; 220 address = MSR_IA32_MC0_MISC + bank * 4;
@@ -276,7 +276,7 @@ static void amd_threshold_interrupt(void)
276 mce_setup(&m); 276 mce_setup(&m);
277 277
278 /* assume first bank caused it */ 278 /* assume first bank caused it */
279 for (bank = 0; bank < NR_BANKS; ++bank) { 279 for (bank = 0; bank < mca_cfg.banks; ++bank) {
280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) 280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
281 continue; 281 continue;
282 for (block = 0; block < NR_BLOCKS; ++block) { 282 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -467,7 +467,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
467 u32 low, high; 467 u32 low, high;
468 int err; 468 int err;
469 469
470 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) 470 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
471 return 0; 471 return 0;
472 472
473 if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) 473 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
@@ -575,7 +575,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
575 const char *name = th_names[bank]; 575 const char *name = th_names[bank];
576 int err = 0; 576 int err = 0;
577 577
578 if (shared_bank[bank]) { 578 if (is_shared_bank(bank)) {
579 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 579 nb = node_to_amd_nb(amd_get_nb_id(cpu));
580 580
581 /* threshold descriptor already initialized on this node? */ 581 /* threshold descriptor already initialized on this node? */
@@ -609,7 +609,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
609 609
610 per_cpu(threshold_banks, cpu)[bank] = b; 610 per_cpu(threshold_banks, cpu)[bank] = b;
611 611
612 if (shared_bank[bank]) { 612 if (is_shared_bank(bank)) {
613 atomic_set(&b->cpus, 1); 613 atomic_set(&b->cpus, 1);
614 614
615 /* nb is already initialized, see above */ 615 /* nb is already initialized, see above */
@@ -635,9 +635,17 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
635static __cpuinit int threshold_create_device(unsigned int cpu) 635static __cpuinit int threshold_create_device(unsigned int cpu)
636{ 636{
637 unsigned int bank; 637 unsigned int bank;
638 struct threshold_bank **bp;
638 int err = 0; 639 int err = 0;
639 640
640 for (bank = 0; bank < NR_BANKS; ++bank) { 641 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
642 GFP_KERNEL);
643 if (!bp)
644 return -ENOMEM;
645
646 per_cpu(threshold_banks, cpu) = bp;
647
648 for (bank = 0; bank < mca_cfg.banks; ++bank) {
641 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 649 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
642 continue; 650 continue;
643 err = threshold_create_bank(cpu, bank); 651 err = threshold_create_bank(cpu, bank);
@@ -691,7 +699,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
691 if (!b->blocks) 699 if (!b->blocks)
692 goto free_out; 700 goto free_out;
693 701
694 if (shared_bank[bank]) { 702 if (is_shared_bank(bank)) {
695 if (!atomic_dec_and_test(&b->cpus)) { 703 if (!atomic_dec_and_test(&b->cpus)) {
696 __threshold_remove_blocks(b); 704 __threshold_remove_blocks(b);
697 per_cpu(threshold_banks, cpu)[bank] = NULL; 705 per_cpu(threshold_banks, cpu)[bank] = NULL;
@@ -719,11 +727,12 @@ static void threshold_remove_device(unsigned int cpu)
719{ 727{
720 unsigned int bank; 728 unsigned int bank;
721 729
722 for (bank = 0; bank < NR_BANKS; ++bank) { 730 for (bank = 0; bank < mca_cfg.banks; ++bank) {
723 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 731 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
724 continue; 732 continue;
725 threshold_remove_bank(cpu, bank); 733 threshold_remove_bank(cpu, bank);
726 } 734 }
735 kfree(per_cpu(threshold_banks, cpu));
727} 736}
728 737
729/* get notified when a cpu comes on/off */ 738/* get notified when a cpu comes on/off */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 402c454fbff0..ae1697c2afe3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -285,39 +285,24 @@ void cmci_clear(void)
285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
286} 286}
287 287
288static long cmci_rediscover_work_func(void *arg) 288static void cmci_rediscover_work_func(void *arg)
289{ 289{
290 int banks; 290 int banks;
291 291
292 /* Recheck banks in case CPUs don't all have the same */ 292 /* Recheck banks in case CPUs don't all have the same */
293 if (cmci_supported(&banks)) 293 if (cmci_supported(&banks))
294 cmci_discover(banks); 294 cmci_discover(banks);
295
296 return 0;
297} 295}
298 296
299/* 297/* After a CPU went down cycle through all the others and rediscover */
300 * After a CPU went down cycle through all the others and rediscover 298void cmci_rediscover(void)
301 * Must run in process context.
302 */
303void cmci_rediscover(int dying)
304{ 299{
305 int cpu, banks; 300 int banks;
306 301
307 if (!cmci_supported(&banks)) 302 if (!cmci_supported(&banks))
308 return; 303 return;
309 304
310 for_each_online_cpu(cpu) { 305 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
311 if (cpu == dying)
312 continue;
313
314 if (cpu == smp_processor_id()) {
315 cmci_rediscover_work_func(NULL);
316 continue;
317 }
318
319 work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
320 }
321} 306}
322 307
323/* 308/*
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index e1d13c463c90..8b6a0343c220 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -98,6 +98,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
98 * 98 *
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel] 99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100 * 100 *
101 * F16h: has only 1 DCT
101 */ 102 */
102static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, 103static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
103 const char *func) 104 const char *func)
@@ -340,6 +341,27 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
340 base_bits = GENMASK(21, 31) | GENMASK(9, 15); 341 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
341 mask_bits = GENMASK(21, 29) | GENMASK(9, 15); 342 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
342 addr_shift = 4; 343 addr_shift = 4;
344
345 /*
346 * F16h needs two addr_shift values: 8 for high and 6 for low
347 * (cf. F16h BKDG).
348 */
349 } else if (boot_cpu_data.x86 == 0x16) {
350 csbase = pvt->csels[dct].csbases[csrow];
351 csmask = pvt->csels[dct].csmasks[csrow >> 1];
352
353 *base = (csbase & GENMASK(5, 15)) << 6;
354 *base |= (csbase & GENMASK(19, 30)) << 8;
355
356 *mask = ~0ULL;
357 /* poke holes for the csmask */
358 *mask &= ~((GENMASK(5, 15) << 6) |
359 (GENMASK(19, 30) << 8));
360
361 *mask |= (csmask & GENMASK(5, 15)) << 6;
362 *mask |= (csmask & GENMASK(19, 30)) << 8;
363
364 return;
343 } else { 365 } else {
344 csbase = pvt->csels[dct].csbases[csrow]; 366 csbase = pvt->csels[dct].csbases[csrow];
345 csmask = pvt->csels[dct].csmasks[csrow >> 1]; 367 csmask = pvt->csels[dct].csmasks[csrow >> 1];
@@ -1150,6 +1172,21 @@ static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1150 return ddr3_cs_size(cs_mode, false); 1172 return ddr3_cs_size(cs_mode, false);
1151} 1173}
1152 1174
1175/*
1176 * F16h has only limited cs_modes
1177 */
1178static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1179 unsigned cs_mode)
1180{
1181 WARN_ON(cs_mode > 12);
1182
1183 if (cs_mode == 6 || cs_mode == 8 ||
1184 cs_mode == 9 || cs_mode == 12)
1185 return -1;
1186 else
1187 return ddr3_cs_size(cs_mode, false);
1188}
1189
1153static void read_dram_ctl_register(struct amd64_pvt *pvt) 1190static void read_dram_ctl_register(struct amd64_pvt *pvt)
1154{ 1191{
1155 1192
@@ -1587,6 +1624,17 @@ static struct amd64_family_type amd64_family_types[] = {
1587 .read_dct_pci_cfg = f15_read_dct_pci_cfg, 1624 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1588 } 1625 }
1589 }, 1626 },
1627 [F16_CPUS] = {
1628 .ctl_name = "F16h",
1629 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1630 .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1631 .ops = {
1632 .early_channel_count = f1x_early_channel_count,
1633 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1634 .dbam_to_cs = f16_dbam_to_chip_select,
1635 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1636 }
1637 },
1590}; 1638};
1591 1639
1592/* 1640/*
@@ -1939,7 +1987,9 @@ static void read_mc_regs(struct amd64_pvt *pvt)
1939 1987
1940 if (c->x86 >= 0x10) { 1988 if (c->x86 >= 0x10) {
1941 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); 1989 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
1942 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); 1990 if (c->x86 != 0x16)
1991 /* F16h has only DCT0 */
1992 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
1943 1993
1944 /* F10h, revD and later can do x8 ECC too */ 1994 /* F10h, revD and later can do x8 ECC too */
1945 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) 1995 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
@@ -2356,6 +2406,11 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2356 pvt->ops = &amd64_family_types[F15_CPUS].ops; 2406 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2357 break; 2407 break;
2358 2408
2409 case 0x16:
2410 fam_type = &amd64_family_types[F16_CPUS];
2411 pvt->ops = &amd64_family_types[F16_CPUS].ops;
2412 break;
2413
2359 default: 2414 default:
2360 amd64_err("Unsupported family!\n"); 2415 amd64_err("Unsupported family!\n");
2361 return NULL; 2416 return NULL;
@@ -2581,6 +2636,14 @@ static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2581 .class = 0, 2636 .class = 0,
2582 .class_mask = 0, 2637 .class_mask = 0,
2583 }, 2638 },
2639 {
2640 .vendor = PCI_VENDOR_ID_AMD,
2641 .device = PCI_DEVICE_ID_AMD_16H_NB_F2,
2642 .subvendor = PCI_ANY_ID,
2643 .subdevice = PCI_ANY_ID,
2644 .class = 0,
2645 .class_mask = 0,
2646 },
2584 2647
2585 {0, } 2648 {0, }
2586}; 2649};
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 35637d83f235..2c6f113bae2b 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -172,7 +172,8 @@
172 */ 172 */
173#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 173#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
174#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 174#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
175 175#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
176#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
176 177
177/* 178/*
178 * Function 1 - Address Map 179 * Function 1 - Address Map
@@ -296,6 +297,7 @@ enum amd_families {
296 K8_CPUS = 0, 297 K8_CPUS = 0,
297 F10_CPUS, 298 F10_CPUS,
298 F15_CPUS, 299 F15_CPUS,
300 F16_CPUS,
299 NUM_FAMILIES, 301 NUM_FAMILIES,
300}; 302};
301 303
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a80f9e6ce9e5..2b85c521f737 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -524,6 +524,8 @@
524#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 524#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
525#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 525#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
526#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605 526#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
527#define PCI_DEVICE_ID_AMD_16H_NB_F3 0x1533
528#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
527#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 529#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
528#define PCI_DEVICE_ID_AMD_LANCE 0x2000 530#define PCI_DEVICE_ID_AMD_LANCE 0x2000
529#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 531#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001