aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-03-25 10:30:22 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 12:10:55 -0500
commitf2d3efedbecc04dc348d723e4c90b46731b3bb48 (patch)
tree982c7838a97a5c2420de392e5a36f49eaa1778b0
parentf083a329e63d471a5e9238e837772b1b76c218db (diff)
[PATCH] x86_64: Implement early DMI scanning
There are more and more cases where we need to know DMI information early to work around bugs. i386 already had early DMI scanning, but x86-64 didn't. Implement this now. This required some cleanup in the i386 code. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/dmi_scan.c1
-rw-r--r--arch/x86_64/kernel/setup.c16
-rw-r--r--arch/x86_64/mm/init.c27
-rw-r--r--include/asm-i386/dmi.h11
-rw-r--r--include/asm-x86_64/dmi.h27
-rw-r--r--include/asm-x86_64/io.h8
6 files changed, 78 insertions, 12 deletions
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index d2dfd9c8d691..ebc8dc116c43 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -5,6 +5,7 @@
5#include <linux/dmi.h> 5#include <linux/dmi.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <asm/dmi.h>
8 9
9static char * __init dmi_string(struct dmi_header *dm, u8 s) 10static char * __init dmi_string(struct dmi_header *dm, u8 s)
10{ 11{
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 49f285871df5..8acf5313e835 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -68,6 +68,7 @@
68#include <asm/swiotlb.h> 68#include <asm/swiotlb.h>
69#include <asm/sections.h> 69#include <asm/sections.h>
70#include <asm/gart-mapping.h> 70#include <asm/gart-mapping.h>
71#include <asm/dmi.h>
71 72
72/* 73/*
73 * Machine setup.. 74 * Machine setup..
@@ -92,6 +93,12 @@ int bootloader_type;
92 93
93unsigned long saved_video_mode; 94unsigned long saved_video_mode;
94 95
96/*
97 * Early DMI memory
98 */
99int dmi_alloc_index;
100char dmi_alloc_data[DMI_MAX_DATA];
101
95/* 102/*
96 * Setup options 103 * Setup options
97 */ 104 */
@@ -620,6 +627,8 @@ void __init setup_arch(char **cmdline_p)
620 627
621 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); 628 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
622 629
630 dmi_scan_machine();
631
623 zap_low_mappings(0); 632 zap_low_mappings(0);
624 633
625#ifdef CONFIG_ACPI 634#ifdef CONFIG_ACPI
@@ -1412,10 +1421,3 @@ struct seq_operations cpuinfo_op = {
1412 .show = show_cpuinfo, 1421 .show = show_cpuinfo,
1413}; 1422};
1414 1423
1415static int __init run_dmi_scan(void)
1416{
1417 dmi_scan_machine();
1418 return 0;
1419}
1420core_initcall(run_dmi_scan);
1421
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 675a45691338..54c7f5975b44 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -225,6 +225,33 @@ static __meminit void unmap_low_page(int i)
225 ti->allocated = 0; 225 ti->allocated = 0;
226} 226}
227 227
228/* Must run before zap_low_mappings */
229__init void *early_ioremap(unsigned long addr, unsigned long size)
230{
231 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
232
233 /* actually usually some more */
234 if (size >= LARGE_PAGE_SIZE) {
235 printk("SMBIOS area too long %lu\n", size);
236 return NULL;
237 }
238 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
239 map += LARGE_PAGE_SIZE;
240 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
241 __flush_tlb();
242 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
243}
244
245/* To avoid virtual aliases later */
246__init void early_iounmap(void *addr, unsigned long size)
247{
248 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
249 printk("early_iounmap: bad address %p\n", addr);
250 set_pmd(temp_mappings[0].pmd, __pmd(0));
251 set_pmd(temp_mappings[1].pmd, __pmd(0));
252 __flush_tlb();
253}
254
228static void __meminit 255static void __meminit
229phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) 256phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
230{ 257{
diff --git a/include/asm-i386/dmi.h b/include/asm-i386/dmi.h
new file mode 100644
index 000000000000..38d4eeb7fc7e
--- /dev/null
+++ b/include/asm-i386/dmi.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1
3
4#include <asm/io.h>
5
6/* Use early IO mappings for DMI because it's initialized early */
7#define dmi_ioremap bt_ioremap
8#define dmi_iounmap bt_iounmap
9#define dmi_alloc alloc_bootmem
10
11#endif
diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h
new file mode 100644
index 000000000000..93b2b15d4325
--- /dev/null
+++ b/include/asm-x86_64/dmi.h
@@ -0,0 +1,27 @@
1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1
3
4#include <asm/io.h>
5
6extern void *dmi_ioremap(unsigned long addr, unsigned long size);
7extern void dmi_iounmap(void *addr, unsigned long size);
8
9#define DMI_MAX_DATA 2048
10
11extern int dmi_alloc_index;
12extern char dmi_alloc_data[DMI_MAX_DATA];
13
14/* This is so early that there is no good way to allocate dynamic memory.
15 Allocate data in an BSS array. */
16static inline void *dmi_alloc(unsigned len)
17{
18 int idx = dmi_alloc_index;
19 if ((dmi_alloc_index += len) > DMI_MAX_DATA)
20 return NULL;
21 return dmi_alloc_data + idx;
22}
23
24#define dmi_ioremap early_ioremap
25#define dmi_iounmap early_iounmap
26
27#endif
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index a85fe8370820..ac12bda3bb1f 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -135,6 +135,9 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
135 return __ioremap(offset, size, 0); 135 return __ioremap(offset, size, 0);
136} 136}
137 137
138extern void *early_ioremap(unsigned long addr, unsigned long size);
139extern void early_iounmap(void *addr, unsigned long size);
140
138/* 141/*
139 * This one maps high address device memory and turns off caching for that area. 142 * This one maps high address device memory and turns off caching for that area.
140 * it's useful if some control registers are in such an area and write combining 143 * it's useful if some control registers are in such an area and write combining
@@ -143,11 +146,6 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
143extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 146extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
144extern void iounmap(volatile void __iomem *addr); 147extern void iounmap(volatile void __iomem *addr);
145 148
146/* Use normal IO mappings for DMI */
147#define dmi_ioremap ioremap
148#define dmi_iounmap(x,l) iounmap(x)
149#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
150
151/* 149/*
152 * ISA I/O bus memory addresses are 1:1 with the physical address. 150 * ISA I/O bus memory addresses are 1:1 with the physical address.
153 */ 151 */