aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/aperture_64.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c3
-rw-r--r--arch/x86/kernel/e820_32.c26
-rw-r--r--arch/x86/kernel/e820_64.c27
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/pci-dma_64.c5
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c18
-rw-r--r--arch/x86/kernel/setup64.c14
9 files changed, 87 insertions, 13 deletions
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 608152a2a05e..00df126169b4 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -18,6 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <linux/suspend.h>
21#include <asm/e820.h> 22#include <asm/e820.h>
22#include <asm/io.h> 23#include <asm/io.h>
23#include <asm/gart.h> 24#include <asm/gart.h>
@@ -76,6 +77,8 @@ static u32 __init allocate_aperture(void)
76 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", 77 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
77 aper_size >> 10, __pa(p)); 78 aper_size >> 10, __pa(p));
78 insert_aperture_resource((u32)__pa(p), aper_size); 79 insert_aperture_resource((u32)__pa(p), aper_size);
80 register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
81 (u32)__pa(p+aper_size) >> PAGE_SHIFT);
79 82
80 return (u32)__pa(p); 83 return (u32)__pa(p);
81} 84}
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index be83336fddba..a6450b3ae759 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -711,7 +711,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
711 trim_size = end_pfn; 711 trim_size = end_pfn;
712 trim_size <<= PAGE_SHIFT; 712 trim_size <<= PAGE_SHIFT;
713 trim_size -= trim_start; 713 trim_size -= trim_start;
714 add_memory_region(trim_start, trim_size, E820_RESERVED); 714 update_memory_range(trim_start, trim_size, E820_RAM,
715 E820_RESERVED);
715 update_e820(); 716 update_e820();
716 return 1; 717 return 1;
717 } 718 }
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 4e16ef4a2659..80444c5c9b14 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -749,6 +749,32 @@ static int __init parse_memmap(char *arg)
749 return 0; 749 return 0;
750} 750}
751early_param("memmap", parse_memmap); 751early_param("memmap", parse_memmap);
752void __init update_memory_range(u64 start, u64 size, unsigned old_type,
753 unsigned new_type)
754{
755 int i;
756
757 BUG_ON(old_type == new_type);
758
759 for (i = 0; i < e820.nr_map; i++) {
760 struct e820entry *ei = &e820.map[i];
761 u64 final_start, final_end;
762 if (ei->type != old_type)
763 continue;
764 /* totally covered? */
765 if (ei->addr >= start && ei->size <= size) {
766 ei->type = new_type;
767 continue;
768 }
769 /* partially covered */
770 final_start = max(start, ei->addr);
771 final_end = min(start + size, ei->addr + ei->size);
772 if (final_start >= final_end)
773 continue;
774 add_memory_region(final_start, final_end - final_start,
775 new_type);
776 }
777}
752void __init update_e820(void) 778void __init update_e820(void)
753{ 779{
754 u8 nr_map; 780 u8 nr_map;
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index 9f65b4cc323c..9be697126013 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -744,6 +744,33 @@ void __init finish_e820_parsing(void)
744 } 744 }
745} 745}
746 746
747void __init update_memory_range(u64 start, u64 size, unsigned old_type,
748 unsigned new_type)
749{
750 int i;
751
752 BUG_ON(old_type == new_type);
753
754 for (i = 0; i < e820.nr_map; i++) {
755 struct e820entry *ei = &e820.map[i];
756 u64 final_start, final_end;
757 if (ei->type != old_type)
758 continue;
759 /* totally covered? */
760 if (ei->addr >= start && ei->size <= size) {
761 ei->type = new_type;
762 continue;
763 }
764 /* partially covered */
765 final_start = max(start, ei->addr);
766 final_end = min(start + size, ei->addr + ei->size);
767 if (final_start >= final_end)
768 continue;
769 add_memory_region(final_start, final_end - final_start,
770 new_type);
771 }
772}
773
747void __init update_e820(void) 774void __init update_e820(void)
748{ 775{
749 u8 nr_map; 776 u8 nr_map;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index fd8ca53943a8..74d87ea85b5c 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -657,7 +657,7 @@ int_msg:
657 .asciz "Unknown interrupt or fault at EIP %p %p %p\n" 657 .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
658 658
659fault_msg: 659fault_msg:
660 .ascii \ 660 .asciz \
661/* fault info: */ "BUG: Int %d: CR2 %p\n" \ 661/* fault info: */ "BUG: Int %d: CR2 %p\n" \
662/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \ 662/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \
663 " EBX %p EDX %p ECX %p EAX %p\n" \ 663 " EBX %p EDX %p ECX %p EAX %p\n" \
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index a82473d192a3..375cb2bc45be 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -53,11 +53,6 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
53 int node; 53 int node;
54 54
55 node = dev_to_node(dev); 55 node = dev_to_node(dev);
56 if (node == -1)
57 node = numa_node_id();
58
59 if (node < first_node(node_online_map))
60 node = first_node(node_online_map);
61 56
62 page = alloc_pages_node(node, gfp, order); 57 page = alloc_pages_node(node, gfp, order);
63 return page ? page_address(page) : NULL; 58 return page ? page_address(page) : NULL;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index c47208fc5932..d89a648fe710 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -363,6 +363,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
363 nvidia_force_enable_hpet); 363 nvidia_force_enable_hpet);
364 364
365/* LPC bridges */ 365/* LPC bridges */
366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
367 nvidia_force_enable_hpet);
366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, 368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
367 nvidia_force_enable_hpet); 369 nvidia_force_enable_hpet);
368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, 370DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 55ceb8cdef75..484c4a80d38a 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -152,6 +152,24 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
152 DMI_MATCH(DMI_BOARD_NAME, "0WF810"), 152 DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
153 }, 153 },
154 }, 154 },
155 { /* Handle problems with rebooting on Dell Optiplex 745's DFF*/
156 .callback = set_bios_reboot,
157 .ident = "Dell OptiPlex 745",
158 .matches = {
159 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
160 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
161 DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
162 },
163 },
164 { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
165 .callback = set_bios_reboot,
166 .ident = "Dell OptiPlex 745",
167 .matches = {
168 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
169 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
170 DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
171 },
172 },
155 { /* Handle problems with rebooting on Dell 2400's */ 173 { /* Handle problems with rebooting on Dell 2400's */
156 .callback = set_bios_reboot, 174 .callback = set_bios_reboot,
157 .ident = "Dell PowerEdge 2400", 175 .ident = "Dell PowerEdge 2400",
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
index 309366f8f603..e24c45677094 100644
--- a/arch/x86/kernel/setup64.c
+++ b/arch/x86/kernel/setup64.c
@@ -142,14 +142,16 @@ void __init setup_per_cpu_areas(void)
142 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); 142 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
143 for_each_cpu_mask (i, cpu_possible_map) { 143 for_each_cpu_mask (i, cpu_possible_map) {
144 char *ptr; 144 char *ptr;
145#ifndef CONFIG_NEED_MULTIPLE_NODES
146 ptr = alloc_bootmem_pages(size);
147#else
148 int node = early_cpu_to_node(i);
145 149
146 if (!NODE_DATA(early_cpu_to_node(i))) { 150 if (!node_online(node) || !NODE_DATA(node))
147 printk("cpu with no node %d, num_online_nodes %d\n",
148 i, num_online_nodes());
149 ptr = alloc_bootmem_pages(size); 151 ptr = alloc_bootmem_pages(size);
150 } else { 152 else
151 ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size); 153 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
152 } 154#endif
153 if (!ptr) 155 if (!ptr)
154 panic("Cannot allocate cpu data for CPU %d\n", i); 156 panic("Cannot allocate cpu data for CPU %d\n", i);
155 cpu_pda(i)->data_offset = ptr - __per_cpu_start; 157 cpu_pda(i)->data_offset = ptr - __per_cpu_start;