aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c8
-rw-r--r--arch/i386/kernel/io_apic.c4
-rw-r--r--arch/i386/pci/irq.c4
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/hp/sim/Kconfig4
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c2
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/irq_lsapic.c2
-rw-r--r--arch/ia64/mm/hugetlbpage.c4
-rw-r--r--arch/ia64/sn/kernel/bte.c9
-rw-r--r--arch/ia64/sn/kernel/irq.c2
-rw-r--r--arch/powerpc/Kconfig2
-rwxr-xr-xarch/powerpc/boot/wrapper4
-rw-r--r--arch/powerpc/boot/zImage.lds.S5
-rw-r--r--arch/powerpc/kernel/rtas_flash.c47
-rw-r--r--arch/powerpc/mm/hugetlbpage.c8
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c41
-rw-r--r--arch/x86_64/boot/setup.S5
-rw-r--r--arch/x86_64/ia32/ptrace32.c2
-rw-r--r--arch/x86_64/kernel/e820.c4
-rw-r--r--arch/x86_64/kernel/early-quirks.c8
-rw-r--r--arch/x86_64/kernel/io_apic.c4
-rw-r--r--arch/x86_64/kernel/process.c3
-rw-r--r--arch/x86_64/kernel/smp.c3
-rw-r--r--arch/x86_64/kernel/time.c11
-rw-r--r--arch/x86_64/kernel/vsyscall.c45
-rw-r--r--arch/x86_64/mm/init.c15
-rw-r--r--arch/x86_64/pci/mmconfig.c32
31 files changed, 191 insertions, 118 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 22e4c466e5a3..d12fb97a5337 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict);
82acpi_interrupt_flags acpi_sci_flags __initdata; 82acpi_interrupt_flags acpi_sci_flags __initdata;
83int acpi_sci_override_gsi __initdata; 83int acpi_sci_override_gsi __initdata;
84int acpi_skip_timer_override __initdata; 84int acpi_skip_timer_override __initdata;
85int acpi_use_timer_override __initdata;
85 86
86#ifdef CONFIG_X86_LOCAL_APIC 87#ifdef CONFIG_X86_LOCAL_APIC
87static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 88static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg)
1300 return 0; 1301 return 0;
1301} 1302}
1302early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); 1303early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1304
1305static int __init parse_acpi_use_timer_override(char *arg)
1306{
1307 acpi_use_timer_override = 1;
1308 return 0;
1309}
1310early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
1303#endif /* CONFIG_X86_IO_APIC */ 1311#endif /* CONFIG_X86_IO_APIC */
1304 1312
1305static int __init setup_acpi_sci(char *s) 1313static int __init setup_acpi_sci(char *s)
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index fe799b11ac0a..c9841692bb7c 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device)
27#ifdef CONFIG_ACPI 27#ifdef CONFIG_ACPI
28 /* According to Nvidia all timer overrides are bogus unless HPET 28 /* According to Nvidia all timer overrides are bogus unless HPET
29 is enabled. */ 29 is enabled. */
30 if (vendor == PCI_VENDOR_ID_NVIDIA) { 30 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
31 nvidia_hpet_detected = 0; 31 nvidia_hpet_detected = 0;
32 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 32 acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
33 if (nvidia_hpet_detected == 0) { 33 if (nvidia_hpet_detected == 0) {
34 acpi_skip_timer_override = 1; 34 acpi_skip_timer_override = 1;
35 printk(KERN_INFO "Nvidia board "
36 "detected. Ignoring ACPI "
37 "timer override.\n");
38 printk(KERN_INFO "If you got timer trouble "
39 "try acpi_use_timer_override\n");
40
35 } 41 }
36 } 42 }
37#endif 43#endif
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index ad84bc2802a6..3b7a63e0ed1a 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -1287,9 +1287,11 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1287 trigger == IOAPIC_LEVEL) 1287 trigger == IOAPIC_LEVEL)
1288 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1288 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1289 handle_fasteoi_irq, "fasteoi"); 1289 handle_fasteoi_irq, "fasteoi");
1290 else 1290 else {
1291 irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
1291 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1292 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1292 handle_edge_irq, "edge"); 1293 handle_edge_irq, "edge");
1294 }
1293 set_intr_gate(vector, interrupt[irq]); 1295 set_intr_gate(vector, interrupt[irq]);
1294} 1296}
1295 1297
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index dbc4aae91959..69163998adeb 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -255,13 +255,13 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
255 */ 255 */
256static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq) 256static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
257{ 257{
258 static const unsigned int pirqmap[4] = { 3, 2, 5, 1 }; 258 static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
259 return read_config_nybble(router, 0x55, pirqmap[pirq-1]); 259 return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
260} 260}
261 261
262static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) 262static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
263{ 263{
264 static const unsigned int pirqmap[4] = { 3, 2, 5, 1 }; 264 static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
265 write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); 265 write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
266 return 1; 266 return 1;
267} 267}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 14682396f7f7..683b12c6f76c 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -484,6 +484,15 @@ source "net/Kconfig"
484 484
485source "drivers/Kconfig" 485source "drivers/Kconfig"
486 486
487config MSPEC
488 tristate "Memory special operations driver"
489 depends on IA64
490 select IA64_UNCACHED_ALLOCATOR
491 help
492 If you have an ia64 and you want to enable memory special
493 operations support (formerly known as fetchop), say Y here,
494 otherwise say N.
495
487source "fs/Kconfig" 496source "fs/Kconfig"
488 497
489source "lib/Kconfig" 498source "lib/Kconfig"
diff --git a/arch/ia64/hp/sim/Kconfig b/arch/ia64/hp/sim/Kconfig
index 18ccb1266e18..f92306bbedb8 100644
--- a/arch/ia64/hp/sim/Kconfig
+++ b/arch/ia64/hp/sim/Kconfig
@@ -13,8 +13,8 @@ config HP_SIMSERIAL_CONSOLE
13 depends on HP_SIMSERIAL 13 depends on HP_SIMSERIAL
14 14
15config HP_SIMSCSI 15config HP_SIMSCSI
16 tristate "Simulated SCSI disk" 16 bool "Simulated SCSI disk"
17 depends on SCSI 17 depends on SCSI=y
18 18
19endmenu 19endmenu
20 20
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index 8145547bb52d..c2f58ff364e7 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -27,7 +27,7 @@ hpsim_set_affinity_noop (unsigned int a, cpumask_t b)
27} 27}
28 28
29static struct hw_interrupt_type irq_type_hp_sim = { 29static struct hw_interrupt_type irq_type_hp_sim = {
30 .typename = "hpsim", 30 .name = "hpsim",
31 .startup = hpsim_irq_startup, 31 .startup = hpsim_irq_startup,
32 .shutdown = hpsim_irq_noop, 32 .shutdown = hpsim_irq_noop,
33 .enable = hpsim_irq_noop, 33 .enable = hpsim_irq_noop,
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 9bf15fefa7e4..60d64950e3c2 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -426,7 +426,7 @@ iosapic_end_level_irq (unsigned int irq)
426#define iosapic_ack_level_irq nop 426#define iosapic_ack_level_irq nop
427 427
428struct hw_interrupt_type irq_type_iosapic_level = { 428struct hw_interrupt_type irq_type_iosapic_level = {
429 .typename = "IO-SAPIC-level", 429 .name = "IO-SAPIC-level",
430 .startup = iosapic_startup_level_irq, 430 .startup = iosapic_startup_level_irq,
431 .shutdown = iosapic_shutdown_level_irq, 431 .shutdown = iosapic_shutdown_level_irq,
432 .enable = iosapic_enable_level_irq, 432 .enable = iosapic_enable_level_irq,
@@ -473,7 +473,7 @@ iosapic_ack_edge_irq (unsigned int irq)
473#define iosapic_end_edge_irq nop 473#define iosapic_end_edge_irq nop
474 474
475struct hw_interrupt_type irq_type_iosapic_edge = { 475struct hw_interrupt_type irq_type_iosapic_edge = {
476 .typename = "IO-SAPIC-edge", 476 .name = "IO-SAPIC-edge",
477 .startup = iosapic_startup_edge_irq, 477 .startup = iosapic_startup_edge_irq,
478 .shutdown = iosapic_disable_edge_irq, 478 .shutdown = iosapic_disable_edge_irq,
479 .enable = iosapic_enable_edge_irq, 479 .enable = iosapic_enable_edge_irq,
@@ -664,7 +664,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
664 printk(KERN_WARNING 664 printk(KERN_WARNING
665 "%s: changing vector %d from %s to %s\n", 665 "%s: changing vector %d from %s to %s\n",
666 __FUNCTION__, vector, 666 __FUNCTION__, vector,
667 idesc->chip->typename, irq_type->typename); 667 idesc->chip->name, irq_type->name);
668 idesc->chip = irq_type; 668 idesc->chip = irq_type;
669 } 669 }
670 return 0; 670 return 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index f07c0864b0b4..54d55e4d64f7 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
76 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 76 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
77 } 77 }
78#endif 78#endif
79 seq_printf(p, " %14s", irq_desc[i].chip->typename); 79 seq_printf(p, " %14s", irq_desc[i].chip->name);
80 seq_printf(p, " %s", action->name); 80 seq_printf(p, " %s", action->name);
81 81
82 for (action=action->next; action; action = action->next) 82 for (action=action->next; action; action = action->next)
@@ -197,7 +197,7 @@ void fixup_irqs(void)
197 struct pt_regs *old_regs = set_irq_regs(NULL); 197 struct pt_regs *old_regs = set_irq_regs(NULL);
198 198
199 vectors_in_migration[irq]=0; 199 vectors_in_migration[irq]=0;
200 __do_IRQ(irq); 200 generic_handle_irq(irq);
201 set_irq_regs(old_regs); 201 set_irq_regs(old_regs);
202 } 202 }
203 } 203 }
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 9c6dafa2d0df..ba3ba8bc50be 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -186,7 +186,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
186 ia64_setreg(_IA64_REG_CR_TPR, vector); 186 ia64_setreg(_IA64_REG_CR_TPR, vector);
187 ia64_srlz_d(); 187 ia64_srlz_d();
188 188
189 __do_IRQ(local_vector_to_irq(vector)); 189 generic_handle_irq(local_vector_to_irq(vector));
190 190
191 /* 191 /*
192 * Disable interrupts and send EOI: 192 * Disable interrupts and send EOI:
@@ -242,7 +242,7 @@ void ia64_process_pending_intr(void)
242 * Probably could shared code. 242 * Probably could shared code.
243 */ 243 */
244 vectors_in_migration[local_vector_to_irq(vector)]=0; 244 vectors_in_migration[local_vector_to_irq(vector)]=0;
245 __do_IRQ(local_vector_to_irq(vector)); 245 generic_handle_irq(local_vector_to_irq(vector));
246 set_irq_regs(old_regs); 246 set_irq_regs(old_regs);
247 247
248 /* 248 /*
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index 1ab58b09f3d7..c2f07beb1759 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -34,7 +34,7 @@ static int lsapic_retrigger(unsigned int irq)
34} 34}
35 35
36struct hw_interrupt_type irq_type_ia64_lsapic = { 36struct hw_interrupt_type irq_type_ia64_lsapic = {
37 .typename = "LSAPIC", 37 .name = "LSAPIC",
38 .startup = lsapic_noop_startup, 38 .startup = lsapic_noop_startup,
39 .shutdown = lsapic_noop, 39 .shutdown = lsapic_noop,
40 .enable = lsapic_noop, 40 .enable = lsapic_noop,
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index eee5c1cfbe32..f3a9585e98a8 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -70,8 +70,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
70 * Don't actually need to do any preparation, but need to make sure 70 * Don't actually need to do any preparation, but need to make sure
71 * the address is in the right region. 71 * the address is in the right region.
72 */ 72 */
73int prepare_hugepage_range(unsigned long addr, unsigned long len) 73int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
74{ 74{
75 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
76 return -EINVAL;
75 if (len & ~HPAGE_MASK) 77 if (len & ~HPAGE_MASK)
76 return -EINVAL; 78 return -EINVAL;
77 if (addr & ~HPAGE_MASK) 79 if (addr & ~HPAGE_MASK)
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 7f73ad4408aa..ff1c55601178 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -381,14 +381,13 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
381 * bcopy to the destination. 381 * bcopy to the destination.
382 */ 382 */
383 383
384 /* Add the leader from source */
385 headBteLen = len + (src & L1_CACHE_MASK);
386 /* Add the trailing bytes from footer. */
387 headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
388 headBteSource = src & ~L1_CACHE_MASK;
389 headBcopySrcOffset = src & L1_CACHE_MASK; 384 headBcopySrcOffset = src & L1_CACHE_MASK;
390 headBcopyDest = dest; 385 headBcopyDest = dest;
391 headBcopyLen = len; 386 headBcopyLen = len;
387
388 headBteSource = src - headBcopySrcOffset;
389 /* Add the leading and trailing bytes from source */
390 headBteLen = L1_CACHE_ALIGN(len + headBcopySrcOffset);
392 } 391 }
393 392
394 if (headBcopyLen > 0) { 393 if (headBcopyLen > 0) {
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 7bb6ad188ba3..0b49459a878a 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -201,7 +201,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
201} 201}
202 202
203struct hw_interrupt_type irq_type_sn = { 203struct hw_interrupt_type irq_type_sn = {
204 .typename = "SN hub", 204 .name = "SN hub",
205 .startup = sn_startup_irq, 205 .startup = sn_startup_irq,
206 .shutdown = sn_shutdown_irq, 206 .shutdown = sn_shutdown_irq,
207 .enable = sn_enable_irq, 207 .enable = sn_enable_irq,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2bd9b7fb0f6c..0673dbedb241 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -740,7 +740,7 @@ config ARCH_SPARSEMEM_ENABLE
740 740
741config ARCH_SPARSEMEM_DEFAULT 741config ARCH_SPARSEMEM_DEFAULT
742 def_bool y 742 def_bool y
743 depends on SMP && PPC_PSERIES 743 depends on (SMP && PPC_PSERIES) || PPC_CELL
744 744
745config ARCH_POPULATES_NODE_MAP 745config ARCH_POPULATES_NODE_MAP
746 def_bool y 746 def_bool y
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index eab7318729e9..b5fb1fee76f8 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -179,11 +179,11 @@ if [ -z "$cacheit" ]; then
179fi 179fi
180 180
181if [ -n "$initrd" ]; then 181if [ -n "$initrd" ]; then
182 addsec $tmp "$initrd" initrd 182 addsec $tmp "$initrd" $isection
183fi 183fi
184 184
185if [ -n "$dtb" ]; then 185if [ -n "$dtb" ]; then
186 addsec $tmp "$dtb" dtb 186 addsec $tmp "$dtb" .kernel:dtb
187fi 187fi
188 188
189if [ "$platform" != "miboot" ]; then 189if [ "$platform" != "miboot" ]; then
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 4b6bb3ffe3dc..4be3c6414b04 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -21,6 +21,11 @@ SECTIONS
21 __got2_end = .; 21 __got2_end = .;
22 } 22 }
23 23
24 . = ALIGN(8);
25 _dtb_start = .;
26 .kernel:dtb : { *(.kernel:dtb) }
27 _dtb_end = .;
28
24 . = ALIGN(4096); 29 . = ALIGN(4096);
25 _vmlinux_start = .; 30 _vmlinux_start = .;
26 .kernel:vmlinux.strip : { *(.kernel:vmlinux.strip) } 31 .kernel:vmlinux.strip : { *(.kernel:vmlinux.strip) }
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 1442b63a75da..6f6fc977cb39 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -72,6 +72,10 @@
72#define VALIDATE_BUF_SIZE 4096 72#define VALIDATE_BUF_SIZE 4096
73#define RTAS_MSG_MAXLEN 64 73#define RTAS_MSG_MAXLEN 64
74 74
75/* Quirk - RTAS requires 4k list length and block size */
76#define RTAS_BLKLIST_LENGTH 4096
77#define RTAS_BLK_SIZE 4096
78
75struct flash_block { 79struct flash_block {
76 char *data; 80 char *data;
77 unsigned long length; 81 unsigned long length;
@@ -83,7 +87,7 @@ struct flash_block {
83 * into a version/length and translate the pointers 87 * into a version/length and translate the pointers
84 * to absolute. 88 * to absolute.
85 */ 89 */
86#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block)) 90#define FLASH_BLOCKS_PER_NODE ((RTAS_BLKLIST_LENGTH - 16) / sizeof(struct flash_block))
87struct flash_block_list { 91struct flash_block_list {
88 unsigned long num_blocks; 92 unsigned long num_blocks;
89 struct flash_block_list *next; 93 struct flash_block_list *next;
@@ -96,6 +100,9 @@ struct flash_block_list_header { /* just the header of flash_block_list */
96 100
97static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; 101static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
98 102
103/* Use slab cache to guarantee 4k alignment */
104static kmem_cache_t *flash_block_cache = NULL;
105
99#define FLASH_BLOCK_LIST_VERSION (1UL) 106#define FLASH_BLOCK_LIST_VERSION (1UL)
100 107
101/* Local copy of the flash block list. 108/* Local copy of the flash block list.
@@ -153,7 +160,7 @@ static int flash_list_valid(struct flash_block_list *flist)
153 return FLASH_IMG_NULL_DATA; 160 return FLASH_IMG_NULL_DATA;
154 } 161 }
155 block_size = f->blocks[i].length; 162 block_size = f->blocks[i].length;
156 if (block_size <= 0 || block_size > PAGE_SIZE) { 163 if (block_size <= 0 || block_size > RTAS_BLK_SIZE) {
157 return FLASH_IMG_BAD_LEN; 164 return FLASH_IMG_BAD_LEN;
158 } 165 }
159 image_size += block_size; 166 image_size += block_size;
@@ -177,9 +184,9 @@ static void free_flash_list(struct flash_block_list *f)
177 184
178 while (f) { 185 while (f) {
179 for (i = 0; i < f->num_blocks; i++) 186 for (i = 0; i < f->num_blocks; i++)
180 free_page((unsigned long)(f->blocks[i].data)); 187 kmem_cache_free(flash_block_cache, f->blocks[i].data);
181 next = f->next; 188 next = f->next;
182 free_page((unsigned long)f); 189 kmem_cache_free(flash_block_cache, f);
183 f = next; 190 f = next;
184 } 191 }
185} 192}
@@ -278,6 +285,12 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
278 return msglen; 285 return msglen;
279} 286}
280 287
288/* constructor for flash_block_cache */
289void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
290{
291 memset(ptr, 0, RTAS_BLK_SIZE);
292}
293
281/* We could be much more efficient here. But to keep this function 294/* We could be much more efficient here. But to keep this function
282 * simple we allocate a page to the block list no matter how small the 295 * simple we allocate a page to the block list no matter how small the
283 * count is. If the system is low on memory it will be just as well 296 * count is. If the system is low on memory it will be just as well
@@ -302,7 +315,7 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
302 * proc file 315 * proc file
303 */ 316 */
304 if (uf->flist == NULL) { 317 if (uf->flist == NULL) {
305 uf->flist = (struct flash_block_list *) get_zeroed_page(GFP_KERNEL); 318 uf->flist = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
306 if (!uf->flist) 319 if (!uf->flist)
307 return -ENOMEM; 320 return -ENOMEM;
308 } 321 }
@@ -313,21 +326,21 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
313 next_free = fl->num_blocks; 326 next_free = fl->num_blocks;
314 if (next_free == FLASH_BLOCKS_PER_NODE) { 327 if (next_free == FLASH_BLOCKS_PER_NODE) {
315 /* Need to allocate another block_list */ 328 /* Need to allocate another block_list */
316 fl->next = (struct flash_block_list *)get_zeroed_page(GFP_KERNEL); 329 fl->next = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
317 if (!fl->next) 330 if (!fl->next)
318 return -ENOMEM; 331 return -ENOMEM;
319 fl = fl->next; 332 fl = fl->next;
320 next_free = 0; 333 next_free = 0;
321 } 334 }
322 335
323 if (count > PAGE_SIZE) 336 if (count > RTAS_BLK_SIZE)
324 count = PAGE_SIZE; 337 count = RTAS_BLK_SIZE;
325 p = (char *)get_zeroed_page(GFP_KERNEL); 338 p = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
326 if (!p) 339 if (!p)
327 return -ENOMEM; 340 return -ENOMEM;
328 341
329 if(copy_from_user(p, buffer, count)) { 342 if(copy_from_user(p, buffer, count)) {
330 free_page((unsigned long)p); 343 kmem_cache_free(flash_block_cache, p);
331 return -EFAULT; 344 return -EFAULT;
332 } 345 }
333 fl->blocks[next_free].data = p; 346 fl->blocks[next_free].data = p;
@@ -791,6 +804,16 @@ int __init rtas_flash_init(void)
791 goto cleanup; 804 goto cleanup;
792 805
793 rtas_flash_term_hook = rtas_flash_firmware; 806 rtas_flash_term_hook = rtas_flash_firmware;
807
808 flash_block_cache = kmem_cache_create("rtas_flash_cache",
809 RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
810 rtas_block_ctor, NULL);
811 if (!flash_block_cache) {
812 printk(KERN_ERR "%s: failed to create block cache\n",
813 __FUNCTION__);
814 rc = -ENOMEM;
815 goto cleanup;
816 }
794 return 0; 817 return 0;
795 818
796cleanup: 819cleanup:
@@ -805,6 +828,10 @@ cleanup:
805void __exit rtas_flash_cleanup(void) 828void __exit rtas_flash_cleanup(void)
806{ 829{
807 rtas_flash_term_hook = NULL; 830 rtas_flash_term_hook = NULL;
831
832 if (flash_block_cache)
833 kmem_cache_destroy(flash_block_cache);
834
808 remove_flash_pde(firmware_flash_pde); 835 remove_flash_pde(firmware_flash_pde);
809 remove_flash_pde(firmware_update_pde); 836 remove_flash_pde(firmware_update_pde);
810 remove_flash_pde(validate_pde); 837 remove_flash_pde(validate_pde);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index fd68b74c07c3..506d89768d45 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -491,11 +491,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
491 return 0; 491 return 0;
492} 492}
493 493
494int prepare_hugepage_range(unsigned long addr, unsigned long len) 494int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
495{ 495{
496 int err = 0; 496 int err = 0;
497 497
498 if ( (addr+len) < addr ) 498 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
499 return -EINVAL;
500 if (len & ~HPAGE_MASK)
501 return -EINVAL;
502 if (addr & ~HPAGE_MASK)
499 return -EINVAL; 503 return -EINVAL;
500 504
501 if (addr < 0x100000000UL) 505 if (addr < 0x100000000UL)
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index d0fb959e3ef1..7aa809d5a244 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -655,14 +655,19 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
655 655
656 for (i=0; i < 3; i++) { 656 for (i=0; i < 3; i++) {
657 ret = of_irq_map_one(np, i, &oirq); 657 ret = of_irq_map_one(np, i, &oirq);
658 if (ret) 658 if (ret) {
659 pr_debug("spu_new: failed to get irq %d\n", i);
659 goto err; 660 goto err;
660 661 }
661 ret = -EINVAL; 662 ret = -EINVAL;
663 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
664 oirq.controller->full_name);
662 spu->irqs[i] = irq_create_of_mapping(oirq.controller, 665 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
663 oirq.specifier, oirq.size); 666 oirq.specifier, oirq.size);
664 if (spu->irqs[i] == NO_IRQ) 667 if (spu->irqs[i] == NO_IRQ) {
668 pr_debug("spu_new: failed to map it !\n");
665 goto err; 669 goto err;
670 }
666 } 671 }
667 return 0; 672 return 0;
668 673
@@ -681,7 +686,7 @@ static int spu_map_resource(struct device_node *node, int nr,
681 struct resource resource = { }; 686 struct resource resource = { };
682 int ret; 687 int ret;
683 688
684 ret = of_address_to_resource(node, 0, &resource); 689 ret = of_address_to_resource(node, nr, &resource);
685 if (ret) 690 if (ret)
686 goto out; 691 goto out;
687 692
@@ -704,22 +709,42 @@ static int __init spu_map_device(struct spu *spu, struct device_node *node)
704 709
705 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store, 710 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
706 &spu->local_store_phys); 711 &spu->local_store_phys);
707 if (ret) 712 if (ret) {
713 pr_debug("spu_new: failed to map %s resource 0\n",
714 node->full_name);
708 goto out; 715 goto out;
716 }
709 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem, 717 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
710 &spu->problem_phys); 718 &spu->problem_phys);
711 if (ret) 719 if (ret) {
720 pr_debug("spu_new: failed to map %s resource 1\n",
721 node->full_name);
712 goto out_unmap; 722 goto out_unmap;
723 }
713 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2, 724 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
714 NULL); 725 NULL);
715 if (ret) 726 if (ret) {
727 pr_debug("spu_new: failed to map %s resource 2\n",
728 node->full_name);
716 goto out_unmap; 729 goto out_unmap;
730 }
717 731
718 if (!firmware_has_feature(FW_FEATURE_LPAR)) 732 if (!firmware_has_feature(FW_FEATURE_LPAR))
719 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1, 733 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
720 NULL); 734 NULL);
721 if (ret) 735 if (ret) {
736 pr_debug("spu_new: failed to map %s resource 3\n",
737 node->full_name);
722 goto out_unmap; 738 goto out_unmap;
739 }
740 pr_debug("spu_new: %s maps:\n", node->full_name);
741 pr_debug(" local store : 0x%016lx -> 0x%p\n",
742 spu->local_store_phys, spu->local_store);
743 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
744 spu->problem_phys, spu->problem);
745 pr_debug(" priv2 : 0x%p\n", spu->priv2);
746 pr_debug(" priv1 : 0x%p\n", spu->priv1);
747
723 return 0; 748 return 0;
724 749
725out_unmap: 750out_unmap:
diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S
index c3bfd223ab49..770940cc0108 100644
--- a/arch/x86_64/boot/setup.S
+++ b/arch/x86_64/boot/setup.S
@@ -836,13 +836,12 @@ gdt:
836 .word 0x9200 # data read/write 836 .word 0x9200 # data read/write
837 .word 0x00CF # granularity = 4096, 386 837 .word 0x00CF # granularity = 4096, 386
838 # (+5th nibble of limit) 838 # (+5th nibble of limit)
839gdt_end:
839idt_48: 840idt_48:
840 .word 0 # idt limit = 0 841 .word 0 # idt limit = 0
841 .word 0, 0 # idt base = 0L 842 .word 0, 0 # idt base = 0L
842gdt_48: 843gdt_48:
843 .word 0x8000 # gdt limit=2048, 844 .word gdt_end-gdt-1 # gdt limit
844 # 256 GDT entries
845
846 .word 0, 0 # gdt base (filled in later) 845 .word 0, 0 # gdt base (filled in later)
847 846
848# Include video setup & detection code 847# Include video setup & detection code
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 3a7561d4703e..04566fe5de49 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -244,6 +244,8 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
244 case PTRACE_DETACH: 244 case PTRACE_DETACH:
245 case PTRACE_SYSCALL: 245 case PTRACE_SYSCALL:
246 case PTRACE_SETOPTIONS: 246 case PTRACE_SETOPTIONS:
247 case PTRACE_SET_THREAD_AREA:
248 case PTRACE_GET_THREAD_AREA:
247 return sys_ptrace(request, pid, addr, data); 249 return sys_ptrace(request, pid, addr, data);
248 250
249 default: 251 default:
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index a75c829c2b02..6fe191c58084 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -278,7 +278,7 @@ e820_register_active_regions(int nid, unsigned long start_pfn,
278 >> PAGE_SHIFT; 278 >> PAGE_SHIFT;
279 279
280 /* Skip map entries smaller than a page */ 280 /* Skip map entries smaller than a page */
281 if (ei_startpfn > ei_endpfn) 281 if (ei_startpfn >= ei_endpfn)
282 continue; 282 continue;
283 283
284 /* Check if end_pfn_map should be updated */ 284 /* Check if end_pfn_map should be updated */
@@ -594,7 +594,9 @@ static int __init parse_memmap_opt(char *p)
594 * size before original memory map is 594 * size before original memory map is
595 * reset. 595 * reset.
596 */ 596 */
597 e820_register_active_regions(0, 0, -1UL);
597 saved_max_pfn = e820_end_of_ram(); 598 saved_max_pfn = e820_end_of_ram();
599 remove_all_active_ranges();
598#endif 600#endif
599 end_pfn_map = 0; 601 end_pfn_map = 0;
600 e820.nr_map = 0; 602 e820.nr_map = 0;
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 2b1245d86258..68273bff58cc 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -45,7 +45,13 @@ static void nvidia_bugs(void)
45 /* 45 /*
46 * All timer overrides on Nvidia are 46 * All timer overrides on Nvidia are
47 * wrong unless HPET is enabled. 47 * wrong unless HPET is enabled.
48 * Unfortunately that's not true on many Asus boards.
49 * We don't know yet how to detect this automatically, but
50 * at least allow a command line override.
48 */ 51 */
52 if (acpi_use_timer_override)
53 return;
54
49 nvidia_hpet_detected = 0; 55 nvidia_hpet_detected = 0;
50 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 56 acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
51 if (nvidia_hpet_detected == 0) { 57 if (nvidia_hpet_detected == 0) {
@@ -53,6 +59,8 @@ static void nvidia_bugs(void)
53 printk(KERN_INFO "Nvidia board " 59 printk(KERN_INFO "Nvidia board "
54 "detected. Ignoring ACPI " 60 "detected. Ignoring ACPI "
55 "timer override.\n"); 61 "timer override.\n");
62 printk(KERN_INFO "If you got timer trouble "
63 "try acpi_use_timer_override\n");
56 } 64 }
57#endif 65#endif
58 /* RED-PEN skip them on mptables too? */ 66 /* RED-PEN skip them on mptables too? */
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 41bfc49301ad..14654e682411 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -790,9 +790,11 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
790 trigger == IOAPIC_LEVEL) 790 trigger == IOAPIC_LEVEL)
791 set_irq_chip_and_handler_name(irq, &ioapic_chip, 791 set_irq_chip_and_handler_name(irq, &ioapic_chip,
792 handle_fasteoi_irq, "fasteoi"); 792 handle_fasteoi_irq, "fasteoi");
793 else 793 else {
794 irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
794 set_irq_chip_and_handler_name(irq, &ioapic_chip, 795 set_irq_chip_and_handler_name(irq, &ioapic_chip,
795 handle_edge_irq, "edge"); 796 handle_edge_irq, "edge");
797 }
796} 798}
797 799
798static void __init setup_IO_APIC_irqs(void) 800static void __init setup_IO_APIC_irqs(void)
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 49f7fac6229e..f6226055d53d 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -88,9 +88,8 @@ void enter_idle(void)
88 88
89static void __exit_idle(void) 89static void __exit_idle(void)
90{ 90{
91 if (read_pda(isidle) == 0) 91 if (test_and_clear_bit_pda(0, isidle) == 0)
92 return; 92 return;
93 write_pda(isidle, 0);
94 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
95} 94}
96 95
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4f67697f5036..9f74c883568c 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
376 /* prevent preemption and reschedule on another processor */ 376 /* prevent preemption and reschedule on another processor */
377 int me = get_cpu(); 377 int me = get_cpu();
378 if (cpu == me) { 378 if (cpu == me) {
379 WARN_ON(1);
380 put_cpu(); 379 put_cpu();
381 return -EBUSY; 380 return 0;
382 } 381 }
383 spin_lock_bh(&call_lock); 382 spin_lock_bh(&call_lock);
384 __smp_call_function_single(cpu, func, info, nonatomic, wait); 383 __smp_call_function_single(cpu, func, info, nonatomic, wait);
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 88722f11ca13..e3ef544d2cfb 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -876,15 +876,6 @@ static struct irqaction irq0 = {
876 timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL 876 timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
877}; 877};
878 878
879static int __cpuinit
880time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu)
881{
882 unsigned cpu = (unsigned long) hcpu;
883 if (action == CPU_ONLINE)
884 vsyscall_set_cpu(cpu);
885 return NOTIFY_DONE;
886}
887
888void __init time_init(void) 879void __init time_init(void)
889{ 880{
890 if (nohpet) 881 if (nohpet)
@@ -925,8 +916,6 @@ void __init time_init(void)
925 vxtime.last_tsc = get_cycles_sync(); 916 vxtime.last_tsc = get_cycles_sync();
926 set_cyc2ns_scale(cpu_khz); 917 set_cyc2ns_scale(cpu_khz);
927 setup_irq(0, &irq0); 918 setup_irq(0, &irq0);
928 hotcpu_notifier(time_cpu_notifier, 0);
929 time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
930 919
931#ifndef CONFIG_SMP 920#ifndef CONFIG_SMP
932 time_init_gtod(); 921 time_init_gtod();
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index a98b460af6a1..a730bacecb0b 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -27,6 +27,9 @@
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29#include <linux/getcpu.h> 29#include <linux/getcpu.h>
30#include <linux/cpu.h>
31#include <linux/smp.h>
32#include <linux/notifier.h>
30 33
31#include <asm/vsyscall.h> 34#include <asm/vsyscall.h>
32#include <asm/pgtable.h> 35#include <asm/pgtable.h>
@@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = {
243 246
244#endif 247#endif
245 248
246static void __cpuinit write_rdtscp_cb(void *info) 249/* Assume __initcall executes before all user space. Hopefully kmod
247{ 250 doesn't violate that. We'll find out if it does. */
248 write_rdtscp_aux((unsigned long)info); 251static void __cpuinit vsyscall_set_cpu(int cpu)
249}
250
251void __cpuinit vsyscall_set_cpu(int cpu)
252{ 252{
253 unsigned long *d; 253 unsigned long *d;
254 unsigned long node = 0; 254 unsigned long node = 0;
255#ifdef CONFIG_NUMA 255#ifdef CONFIG_NUMA
256 node = cpu_to_node[cpu]; 256 node = cpu_to_node[cpu];
257#endif 257#endif
258 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) { 258 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
259 void *info = (void *)((node << 12) | cpu); 259 write_rdtscp_aux((node << 12) | cpu);
260 /* Can happen on preemptive kernel */
261 if (get_cpu() == cpu)
262 write_rdtscp_cb(info);
263#ifdef CONFIG_SMP
264 else {
265 /* the notifier is unfortunately not executed on the
266 target CPU */
267 smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
268 }
269#endif
270 put_cpu();
271 }
272 260
273 /* Store cpu number in limit so that it can be loaded quickly 261 /* Store cpu number in limit so that it can be loaded quickly
274 in user space in vgetcpu. 262 in user space in vgetcpu.
@@ -280,6 +268,21 @@ void __cpuinit vsyscall_set_cpu(int cpu)
280 *d |= (node >> 4) << 48; 268 *d |= (node >> 4) << 48;
281} 269}
282 270
271static void __cpuinit cpu_vsyscall_init(void *arg)
272{
273 /* preemption should be already off */
274 vsyscall_set_cpu(raw_smp_processor_id());
275}
276
277static int __cpuinit
278cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
279{
280 long cpu = (long)arg;
281 if (action == CPU_ONLINE)
282 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
283 return NOTIFY_DONE;
284}
285
283static void __init map_vsyscall(void) 286static void __init map_vsyscall(void)
284{ 287{
285 extern char __vsyscall_0; 288 extern char __vsyscall_0;
@@ -299,6 +302,8 @@ static int __init vsyscall_init(void)
299#ifdef CONFIG_SYSCTL 302#ifdef CONFIG_SYSCTL
300 register_sysctl_table(kernel_root_table2, 0); 303 register_sysctl_table(kernel_root_table2, 0);
301#endif 304#endif
305 on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
306 hotcpu_notifier(cpu_vsyscall_notifier, 0);
302 return 0; 307 return 0;
303} 308}
304 309
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 971dc1181e69..f1f977aafae1 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -655,9 +655,22 @@ void free_initrd_mem(unsigned long start, unsigned long end)
655 655
656void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 656void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
657{ 657{
658 /* Should check here against the e820 map to avoid double free */
659#ifdef CONFIG_NUMA 658#ifdef CONFIG_NUMA
660 int nid = phys_to_nid(phys); 659 int nid = phys_to_nid(phys);
660#endif
661 unsigned long pfn = phys >> PAGE_SHIFT;
662 if (pfn >= end_pfn) {
663 /* This can happen with kdump kernels when accessing firmware
664 tables. */
665 if (pfn < end_pfn_map)
666 return;
667 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
668 phys, len);
669 return;
670 }
671
672 /* Should check here against the e820 map to avoid double free */
673#ifdef CONFIG_NUMA
661 reserve_bootmem_node(NODE_DATA(nid), phys, len); 674 reserve_bootmem_node(NODE_DATA(nid), phys, len);
662#else 675#else
663 reserve_bootmem(phys, len); 676 reserve_bootmem(phys, len);
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index e61093b34c26..f8b6b2800a62 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -163,37 +163,6 @@ static __init void unreachable_devices(void)
163 } 163 }
164} 164}
165 165
166static __init void pci_mmcfg_insert_resources(void)
167{
168#define PCI_MMCFG_RESOURCE_NAME_LEN 19
169 int i;
170 struct resource *res;
171 char *names;
172 unsigned num_buses;
173
174 res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
175 pci_mmcfg_config_num, GFP_KERNEL);
176
177 if (!res) {
178 printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
179 return;
180 }
181
182 names = (void *)&res[pci_mmcfg_config_num];
183 for (i = 0; i < pci_mmcfg_config_num; i++, res++) {
184 num_buses = pci_mmcfg_config[i].end_bus_number -
185 pci_mmcfg_config[i].start_bus_number + 1;
186 res->name = names;
187 snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u",
188 pci_mmcfg_config[i].pci_segment_group_number);
189 res->start = pci_mmcfg_config[i].base_address;
190 res->end = res->start + (num_buses << 20) - 1;
191 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
192 insert_resource(&iomem_resource, res);
193 names += PCI_MMCFG_RESOURCE_NAME_LEN;
194 }
195}
196
197void __init pci_mmcfg_init(int type) 166void __init pci_mmcfg_init(int type)
198{ 167{
199 int i; 168 int i;
@@ -237,7 +206,6 @@ void __init pci_mmcfg_init(int type)
237 } 206 }
238 207
239 unreachable_devices(); 208 unreachable_devices();
240 pci_mmcfg_insert_resources();
241 209
242 raw_pci_ops = &pci_mmcfg; 210 raw_pci_ops = &pci_mmcfg;
243 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; 211 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;