aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/amd_iommu.h3
-rw-r--r--include/asm-x86/amd_iommu_types.h64
-rw-r--r--include/asm-x86/bitops.h10
-rw-r--r--include/asm-x86/dma-mapping.h87
-rw-r--r--include/asm-x86/gart.h2
-rw-r--r--include/asm-x86/iommu.h1
-rw-r--r--include/linux/dma-mapping.h12
-rw-r--r--include/linux/iommu-helper.h16
-rw-r--r--include/linux/pci_ids.h10
9 files changed, 177 insertions, 28 deletions
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
index 783f43e58052..041d0db7da27 100644
--- a/include/asm-x86/amd_iommu.h
+++ b/include/asm-x86/amd_iommu.h
@@ -20,10 +20,13 @@
20#ifndef ASM_X86__AMD_IOMMU_H 20#ifndef ASM_X86__AMD_IOMMU_H
21#define ASM_X86__AMD_IOMMU_H 21#define ASM_X86__AMD_IOMMU_H
22 22
23#include <linux/irqreturn.h>
24
23#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
24extern int amd_iommu_init(void); 26extern int amd_iommu_init(void);
25extern int amd_iommu_init_dma_ops(void); 27extern int amd_iommu_init_dma_ops(void);
26extern void amd_iommu_detect(void); 28extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27#else 30#else
28static inline int amd_iommu_init(void) { return -ENODEV; } 31static inline int amd_iommu_init(void) { return -ENODEV; }
29static inline void amd_iommu_detect(void) { } 32static inline void amd_iommu_detect(void) { }
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index 1ffa4e53c989..b3085869a17b 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -37,6 +37,7 @@
37/* Capability offsets used by the driver */ 37/* Capability offsets used by the driver */
38#define MMIO_CAP_HDR_OFFSET 0x00 38#define MMIO_CAP_HDR_OFFSET 0x00
39#define MMIO_RANGE_OFFSET 0x0c 39#define MMIO_RANGE_OFFSET 0x0c
40#define MMIO_MISC_OFFSET 0x10
40 41
41/* Masks, shifts and macros to parse the device range capability */ 42/* Masks, shifts and macros to parse the device range capability */
42#define MMIO_RANGE_LD_MASK 0xff000000 43#define MMIO_RANGE_LD_MASK 0xff000000
@@ -48,6 +49,7 @@
48#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 49#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
49#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 50#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
50#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 51#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
52#define MMIO_MSI_NUM(x) ((x) & 0x1f)
51 53
52/* Flag masks for the AMD IOMMU exclusion range */ 54/* Flag masks for the AMD IOMMU exclusion range */
53#define MMIO_EXCL_ENABLE_MASK 0x01ULL 55#define MMIO_EXCL_ENABLE_MASK 0x01ULL
@@ -69,6 +71,25 @@
69/* MMIO status bits */ 71/* MMIO status bits */
70#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 72#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
71 73
74/* event logging constants */
75#define EVENT_ENTRY_SIZE 0x10
76#define EVENT_TYPE_SHIFT 28
77#define EVENT_TYPE_MASK 0xf
78#define EVENT_TYPE_ILL_DEV 0x1
79#define EVENT_TYPE_IO_FAULT 0x2
80#define EVENT_TYPE_DEV_TAB_ERR 0x3
81#define EVENT_TYPE_PAGE_TAB_ERR 0x4
82#define EVENT_TYPE_ILL_CMD 0x5
83#define EVENT_TYPE_CMD_HARD_ERR 0x6
84#define EVENT_TYPE_IOTLB_INV_TO 0x7
85#define EVENT_TYPE_INV_DEV_REQ 0x8
86#define EVENT_DEVID_MASK 0xffff
87#define EVENT_DEVID_SHIFT 0
88#define EVENT_DOMID_MASK 0xffff
89#define EVENT_DOMID_SHIFT 0
90#define EVENT_FLAGS_MASK 0xfff
91#define EVENT_FLAGS_SHIFT 0x10
92
72/* feature control bits */ 93/* feature control bits */
73#define CONTROL_IOMMU_EN 0x00ULL 94#define CONTROL_IOMMU_EN 0x00ULL
74#define CONTROL_HT_TUN_EN 0x01ULL 95#define CONTROL_HT_TUN_EN 0x01ULL
@@ -109,6 +130,8 @@
109#define DEV_ENTRY_NMI_PASS 0xba 130#define DEV_ENTRY_NMI_PASS 0xba
110#define DEV_ENTRY_LINT0_PASS 0xbe 131#define DEV_ENTRY_LINT0_PASS 0xbe
111#define DEV_ENTRY_LINT1_PASS 0xbf 132#define DEV_ENTRY_LINT1_PASS 0xbf
133#define DEV_ENTRY_MODE_MASK 0x07
134#define DEV_ENTRY_MODE_SHIFT 0x09
112 135
113/* constants to configure the command buffer */ 136/* constants to configure the command buffer */
114#define CMD_BUFFER_SIZE 8192 137#define CMD_BUFFER_SIZE 8192
@@ -116,6 +139,10 @@
116#define MMIO_CMD_SIZE_SHIFT 56 139#define MMIO_CMD_SIZE_SHIFT 56
117#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 140#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
118 141
142/* constants for event buffer handling */
143#define EVT_BUFFER_SIZE 8192 /* 512 entries */
144#define EVT_LEN_MASK (0x9ULL << 56)
145
119#define PAGE_MODE_1_LEVEL 0x01 146#define PAGE_MODE_1_LEVEL 0x01
120#define PAGE_MODE_2_LEVEL 0x02 147#define PAGE_MODE_2_LEVEL 0x02
121#define PAGE_MODE_3_LEVEL 0x03 148#define PAGE_MODE_3_LEVEL 0x03
@@ -134,6 +161,7 @@
134#define IOMMU_MAP_SIZE_L3 (1ULL << 39) 161#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
135 162
136#define IOMMU_PTE_P (1ULL << 0) 163#define IOMMU_PTE_P (1ULL << 0)
164#define IOMMU_PTE_TV (1ULL << 1)
137#define IOMMU_PTE_U (1ULL << 59) 165#define IOMMU_PTE_U (1ULL << 59)
138#define IOMMU_PTE_FC (1ULL << 60) 166#define IOMMU_PTE_FC (1ULL << 60)
139#define IOMMU_PTE_IR (1ULL << 61) 167#define IOMMU_PTE_IR (1ULL << 61)
@@ -159,6 +187,9 @@
159 187
160#define MAX_DOMAIN_ID 65536 188#define MAX_DOMAIN_ID 65536
161 189
190/* FIXME: move this macro to <linux/pci.h> */
191#define PCI_BUS(x) (((x) >> 8) & 0xff)
192
162/* 193/*
163 * This structure contains generic data for IOMMU protection domains 194 * This structure contains generic data for IOMMU protection domains
164 * independent of their use. 195 * independent of their use.
@@ -196,6 +227,15 @@ struct dma_ops_domain {
196 * just calculate its address in constant time. 227 * just calculate its address in constant time.
197 */ 228 */
198 u64 **pte_pages; 229 u64 **pte_pages;
230
231 /* This will be set to true when TLB needs to be flushed */
232 bool need_flush;
233
234 /*
235 * if this is a preallocated domain, keep the device for which it was
236 * preallocated in this variable
237 */
238 u16 target_dev;
199}; 239};
200 240
201/* 241/*
@@ -208,8 +248,9 @@ struct amd_iommu {
208 /* locks the accesses to the hardware */ 248 /* locks the accesses to the hardware */
209 spinlock_t lock; 249 spinlock_t lock;
210 250
211 /* device id of this IOMMU */ 251 /* Pointer to PCI device of this IOMMU */
212 u16 devid; 252 struct pci_dev *dev;
253
213 /* 254 /*
214 * Capability pointer. There could be more than one IOMMU per PCI 255 * Capability pointer. There could be more than one IOMMU per PCI
215 * device function if there are more than one AMD IOMMU capability 256 * device function if there are more than one AMD IOMMU capability
@@ -225,6 +266,9 @@ struct amd_iommu {
225 /* capabilities of that IOMMU read from ACPI */ 266 /* capabilities of that IOMMU read from ACPI */
226 u32 cap; 267 u32 cap;
227 268
269 /* pci domain of this IOMMU */
270 u16 pci_seg;
271
228 /* first device this IOMMU handles. read from PCI */ 272 /* first device this IOMMU handles. read from PCI */
229 u16 first_device; 273 u16 first_device;
230 /* last device this IOMMU handles. read from PCI */ 274 /* last device this IOMMU handles. read from PCI */
@@ -240,9 +284,19 @@ struct amd_iommu {
240 /* size of command buffer */ 284 /* size of command buffer */
241 u32 cmd_buf_size; 285 u32 cmd_buf_size;
242 286
287 /* event buffer virtual address */
288 u8 *evt_buf;
289 /* size of event buffer */
290 u32 evt_buf_size;
291 /* MSI number for event interrupt */
292 u16 evt_msi_num;
293
243 /* if one, we need to send a completion wait command */ 294 /* if one, we need to send a completion wait command */
244 int need_sync; 295 int need_sync;
245 296
297 /* true if interrupts for this IOMMU are already enabled */
298 bool int_enabled;
299
246 /* default dma_ops domain for that IOMMU */ 300 /* default dma_ops domain for that IOMMU */
247 struct dma_ops_domain *default_dom; 301 struct dma_ops_domain *default_dom;
248}; 302};
@@ -322,6 +376,12 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
322/* will be 1 if device isolation is enabled */ 376/* will be 1 if device isolation is enabled */
323extern int amd_iommu_isolate; 377extern int amd_iommu_isolate;
324 378
379/*
380 * If true, the addresses will be flushed on unmap time, not when
381 * they are reused
382 */
383extern bool amd_iommu_unmap_flush;
384
325/* takes a PCI device id and prints it out in a readable form */ 385/* takes a PCI device id and prints it out in a readable form */
326static inline void print_devid(u16 devid, int nl) 386static inline void print_devid(u16 devid, int nl)
327{ 387{
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 61989b93b475..451a74762bd4 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -424,16 +424,6 @@ static inline int fls(int x)
424 424
425#undef ADDR 425#undef ADDR
426 426
427static inline void set_bit_string(unsigned long *bitmap,
428 unsigned long i, int len)
429{
430 unsigned long end = i + len;
431 while (i < end) {
432 __set_bit(i, bitmap);
433 i++;
434 }
435}
436
437#ifdef __KERNEL__ 427#ifdef __KERNEL__
438 428
439#include <asm-generic/bitops/sched.h> 429#include <asm-generic/bitops/sched.h>
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index 5d200e78bd81..219c33d6361c 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -9,12 +9,12 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/swiotlb.h> 11#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h>
12 13
13extern dma_addr_t bad_dma_address; 14extern dma_addr_t bad_dma_address;
14extern int iommu_merge; 15extern int iommu_merge;
15extern struct device fallback_dev; 16extern struct device x86_dma_fallback_dev;
16extern int panic_on_overflow; 17extern int panic_on_overflow;
17extern int force_iommu;
18 18
19struct dma_mapping_ops { 19struct dma_mapping_ops {
20 int (*mapping_error)(struct device *dev, 20 int (*mapping_error)(struct device *dev,
@@ -25,9 +25,6 @@ struct dma_mapping_ops {
25 void *vaddr, dma_addr_t dma_handle); 25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, 26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction); 27 size_t size, int direction);
28 /* like map_single, but doesn't check the device mask */
29 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
30 size_t size, int direction);
31 void (*unmap_single)(struct device *dev, dma_addr_t addr, 28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
32 size_t size, int direction); 29 size_t size, int direction);
33 void (*sync_single_for_cpu)(struct device *hwdev, 30 void (*sync_single_for_cpu)(struct device *hwdev,
@@ -68,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68 return dma_ops; 65 return dma_ops;
69 else 66 else
70 return dev->archdata.dma_ops; 67 return dev->archdata.dma_ops;
71#endif 68#endif /* ASM_X86__DMA_MAPPING_H */
72} 69}
73 70
74/* Make sure we keep the same behaviour */ 71/* Make sure we keep the same behaviour */
@@ -87,17 +84,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
87 84
88#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
89#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
90 87#define dma_is_consistent(d, h) (1)
91void *dma_alloc_coherent(struct device *dev, size_t size,
92 dma_addr_t *dma_handle, gfp_t flag);
93
94void dma_free_coherent(struct device *dev, size_t size,
95 void *vaddr, dma_addr_t dma_handle);
96
97 88
98extern int dma_supported(struct device *hwdev, u64 mask); 89extern int dma_supported(struct device *hwdev, u64 mask);
99extern int dma_set_mask(struct device *dev, u64 mask); 90extern int dma_set_mask(struct device *dev, u64 mask);
100 91
92extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
94
101static inline dma_addr_t 95static inline dma_addr_t
102dma_map_single(struct device *hwdev, void *ptr, size_t size, 96dma_map_single(struct device *hwdev, void *ptr, size_t size,
103 int direction) 97 int direction)
@@ -247,7 +241,68 @@ static inline int dma_get_cache_alignment(void)
247 return boot_cpu_data.x86_clflush_size; 241 return boot_cpu_data.x86_clflush_size;
248} 242}
249 243
250#define dma_is_consistent(d, h) (1) 244static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
245 gfp_t gfp)
246{
247 unsigned long dma_mask = 0;
251 248
252#include <asm-generic/dma-coherent.h> 249 dma_mask = dev->coherent_dma_mask;
253#endif /* ASM_X86__DMA_MAPPING_H */ 250 if (!dma_mask)
251 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
252
253 return dma_mask;
254}
255
256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257{
258#ifdef CONFIG_X86_64
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32;
263#endif
264 return gfp;
265}
266
267static inline void *
268dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
269 gfp_t gfp)
270{
271 struct dma_mapping_ops *ops = get_dma_ops(dev);
272 void *memory;
273
274 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
275
276 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 return memory;
278
279 if (!dev) {
280 dev = &x86_dma_fallback_dev;
281 gfp |= GFP_DMA;
282 }
283
284 if (!is_device_dma_capable(dev))
285 return NULL;
286
287 if (!ops->alloc_coherent)
288 return NULL;
289
290 return ops->alloc_coherent(dev, size, dma_handle,
291 dma_alloc_coherent_gfp_flags(dev, gfp));
292}
293
294static inline void dma_free_coherent(struct device *dev, size_t size,
295 void *vaddr, dma_addr_t bus)
296{
297 struct dma_mapping_ops *ops = get_dma_ops(dev);
298
299 WARN_ON(irqs_disabled()); /* for portability */
300
301 if (dma_release_from_coherent(dev, get_order(size), vaddr))
302 return;
303
304 if (ops->free_coherent)
305 ops->free_coherent(dev, size, vaddr, bus);
306}
307
308#endif
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index baa54faba892..605edb39ef9e 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -29,6 +29,8 @@ extern int fix_aperture;
29#define AMD64_GARTCACHECTL 0x9c 29#define AMD64_GARTCACHECTL 0x9c
30#define AMD64_GARTEN (1<<0) 30#define AMD64_GARTEN (1<<0)
31 31
32extern int agp_amd64_init(void);
33
32static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) 34static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
33{ 35{
34 u32 tmp, ctl; 36 u32 tmp, ctl;
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index e86f44148c66..546ad3110fea 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -6,6 +6,7 @@ extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled;
9 10
10extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); 11extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
11 12
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 952e0f857ac9..ba9114ec5d3a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -48,6 +48,11 @@ static inline int is_device_dma_capable(struct device *dev)
48 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 48 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
49} 49}
50 50
51static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
52{
53 return addr + size <= mask;
54}
55
51#ifdef CONFIG_HAS_DMA 56#ifdef CONFIG_HAS_DMA
52#include <asm/dma-mapping.h> 57#include <asm/dma-mapping.h>
53#else 58#else
@@ -58,6 +63,13 @@ static inline int is_device_dma_capable(struct device *dev)
58#define dma_sync_single dma_sync_single_for_cpu 63#define dma_sync_single dma_sync_single_for_cpu
59#define dma_sync_sg dma_sync_sg_for_cpu 64#define dma_sync_sg dma_sync_sg_for_cpu
60 65
66static inline u64 dma_get_mask(struct device *dev)
67{
68 if (dev && dev->dma_mask && *dev->dma_mask)
69 return *dev->dma_mask;
70 return DMA_32BIT_MASK;
71}
72
61extern u64 dma_get_required_mask(struct device *dev); 73extern u64 dma_get_required_mask(struct device *dev);
62 74
63static inline unsigned int dma_get_max_seg_size(struct device *dev) 75static inline unsigned int dma_get_max_seg_size(struct device *dev)
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index c975caf75385..a6d0586e2bf7 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -1,6 +1,20 @@
1#ifndef _LINUX_IOMMU_HELPER_H
2#define _LINUX_IOMMU_HELPER_H
3
4static inline unsigned long iommu_device_max_index(unsigned long size,
5 unsigned long offset,
6 u64 dma_mask)
7{
8 if (size + offset > dma_mask)
9 return dma_mask - offset + 1;
10 else
11 return size;
12}
13
1extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, 14extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
2 unsigned long shift, 15 unsigned long shift,
3 unsigned long boundary_size); 16 unsigned long boundary_size);
17extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
4extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, 18extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
5 unsigned long start, unsigned int nr, 19 unsigned long start, unsigned int nr,
6 unsigned long shift, 20 unsigned long shift,
@@ -8,3 +22,5 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
8 unsigned long align_mask); 22 unsigned long align_mask);
9extern void iommu_area_free(unsigned long *map, unsigned long start, 23extern void iommu_area_free(unsigned long *map, unsigned long start,
10 unsigned int nr); 24 unsigned int nr);
25
26#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f1624b396754..c114103af987 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -497,6 +497,16 @@
497#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 497#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101
498#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 498#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102
499#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 499#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103
500#define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200
501#define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201
502#define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202
503#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203
504#define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204
505#define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300
506#define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301
507#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
508#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
509#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
500#define PCI_DEVICE_ID_AMD_LANCE 0x2000 510#define PCI_DEVICE_ID_AMD_LANCE 0x2000
501#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 511#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
502#define PCI_DEVICE_ID_AMD_SCSI 0x2020 512#define PCI_DEVICE_ID_AMD_SCSI 0x2020