aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-13 01:43:25 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-13 01:43:25 -0500
commitd9bc125caf592b7d081021f32ce5b717efdf70c8 (patch)
tree263b7066ba22ddce21db610c0300f6eaac6f2064 /arch/x86_64
parent43d78ef2ba5bec26d0315859e8324bfc0be23766 (diff)
parentec2f9d1331f658433411c58077871e1eef4ee1b4 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Conflicts: net/sunrpc/auth_gss/gss_krb5_crypto.c net/sunrpc/auth_gss/gss_spkm3_token.c net/sunrpc/clnt.c Merge with mainline and fix conflicts.
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig4
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c4
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/ia32/sys_ia32.c66
-rw-r--r--arch/x86_64/ia32/syscall32.c59
-rw-r--r--arch/x86_64/kernel/early-quirks.c4
-rw-r--r--arch/x86_64/kernel/genapic.c4
-rw-r--r--arch/x86_64/kernel/head64.c4
-rw-r--r--arch/x86_64/kernel/io_apic.c17
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mce_amd.c2
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/setup.c6
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86_64/mm/fault.c21
-rw-r--r--arch/x86_64/mm/srat.c48
-rw-r--r--arch/x86_64/pci/mmconfig.c29
19 files changed, 95 insertions, 203 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index d4275537b25b..02dd39457bcf 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -44,6 +44,10 @@ config MMU
44 bool 44 bool
45 default y 45 default y
46 46
47config ZONE_DMA
48 bool
49 default y
50
47config ISA 51config ISA
48 bool 52 bool
49 53
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 5ce0bd486bbf..6efe04f3cbca 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -300,12 +300,10 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
300 bprm->loader += stack_base; 300 bprm->loader += stack_base;
301 bprm->exec += stack_base; 301 bprm->exec += stack_base;
302 302
303 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 303 mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
304 if (!mpnt) 304 if (!mpnt)
305 return -ENOMEM; 305 return -ENOMEM;
306 306
307 memset(mpnt, 0, sizeof(*mpnt));
308
309 down_write(&mm->mmap_sem); 307 down_write(&mm->mmap_sem);
310 { 308 {
311 mpnt->vm_mm = mm; 309 mpnt->vm_mm = mm;
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index b4aa875e175b..5f32cf4de5fb 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -515,7 +515,7 @@ ia32_sys_call_table:
515 .quad sys32_vm86_warning /* vm86old */ 515 .quad sys32_vm86_warning /* vm86old */
516 .quad compat_sys_wait4 516 .quad compat_sys_wait4
517 .quad sys_swapoff /* 115 */ 517 .quad sys_swapoff /* 115 */
518 .quad sys32_sysinfo 518 .quad compat_sys_sysinfo
519 .quad sys32_ipc 519 .quad sys32_ipc
520 .quad sys_fsync 520 .quad sys_fsync
521 .quad stub32_sigreturn 521 .quad stub32_sigreturn
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index c9bac3af29d6..200fdde18d96 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -523,72 +523,6 @@ sys32_sysfs(int option, u32 arg1, u32 arg2)
523 return sys_sysfs(option, arg1, arg2); 523 return sys_sysfs(option, arg1, arg2);
524} 524}
525 525
526struct sysinfo32 {
527 s32 uptime;
528 u32 loads[3];
529 u32 totalram;
530 u32 freeram;
531 u32 sharedram;
532 u32 bufferram;
533 u32 totalswap;
534 u32 freeswap;
535 unsigned short procs;
536 unsigned short pad;
537 u32 totalhigh;
538 u32 freehigh;
539 u32 mem_unit;
540 char _f[20-2*sizeof(u32)-sizeof(int)];
541};
542
543asmlinkage long
544sys32_sysinfo(struct sysinfo32 __user *info)
545{
546 struct sysinfo s;
547 int ret;
548 mm_segment_t old_fs = get_fs ();
549 int bitcount = 0;
550
551 set_fs (KERNEL_DS);
552 ret = sys_sysinfo((struct sysinfo __user *)&s);
553 set_fs (old_fs);
554
555 /* Check to see if any memory value is too large for 32-bit and scale
556 * down if needed
557 */
558 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
559 while (s.mem_unit < PAGE_SIZE) {
560 s.mem_unit <<= 1;
561 bitcount++;
562 }
563 s.totalram >>= bitcount;
564 s.freeram >>= bitcount;
565 s.sharedram >>= bitcount;
566 s.bufferram >>= bitcount;
567 s.totalswap >>= bitcount;
568 s.freeswap >>= bitcount;
569 s.totalhigh >>= bitcount;
570 s.freehigh >>= bitcount;
571 }
572
573 if (!access_ok(VERIFY_WRITE, info, sizeof(struct sysinfo32)) ||
574 __put_user (s.uptime, &info->uptime) ||
575 __put_user (s.loads[0], &info->loads[0]) ||
576 __put_user (s.loads[1], &info->loads[1]) ||
577 __put_user (s.loads[2], &info->loads[2]) ||
578 __put_user (s.totalram, &info->totalram) ||
579 __put_user (s.freeram, &info->freeram) ||
580 __put_user (s.sharedram, &info->sharedram) ||
581 __put_user (s.bufferram, &info->bufferram) ||
582 __put_user (s.totalswap, &info->totalswap) ||
583 __put_user (s.freeswap, &info->freeswap) ||
584 __put_user (s.procs, &info->procs) ||
585 __put_user (s.totalhigh, &info->totalhigh) ||
586 __put_user (s.freehigh, &info->freehigh) ||
587 __put_user (s.mem_unit, &info->mem_unit))
588 return -EFAULT;
589 return 0;
590}
591
592asmlinkage long 526asmlinkage long
593sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval) 527sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
594{ 528{
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index 59f1fa155915..568ff0df89e7 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -18,68 +18,34 @@ extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
18extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[]; 18extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
19extern int sysctl_vsyscall32; 19extern int sysctl_vsyscall32;
20 20
21char *syscall32_page; 21static struct page *syscall32_pages[1];
22static int use_sysenter = -1; 22static int use_sysenter = -1;
23 23
24static struct page *
25syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
26{
27 struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
28 get_page(p);
29 return p;
30}
31
32/* Prevent VMA merging */
33static void syscall32_vma_close(struct vm_area_struct *vma)
34{
35}
36
37static struct vm_operations_struct syscall32_vm_ops = {
38 .close = syscall32_vma_close,
39 .nopage = syscall32_nopage,
40};
41
42struct linux_binprm; 24struct linux_binprm;
43 25
44/* Setup a VMA at program startup for the vsyscall page */ 26/* Setup a VMA at program startup for the vsyscall page */
45int syscall32_setup_pages(struct linux_binprm *bprm, int exstack) 27int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
46{ 28{
47 int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
48 struct vm_area_struct *vma;
49 struct mm_struct *mm = current->mm; 29 struct mm_struct *mm = current->mm;
50 int ret; 30 int ret;
51 31
52 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 32 down_write(&mm->mmap_sem);
53 if (!vma)
54 return -ENOMEM;
55
56 memset(vma, 0, sizeof(struct vm_area_struct));
57 /* Could randomize here */
58 vma->vm_start = VSYSCALL32_BASE;
59 vma->vm_end = VSYSCALL32_END;
60 /* MAYWRITE to allow gdb to COW and set breakpoints */
61 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
62 /* 33 /*
34 * MAYWRITE to allow gdb to COW and set breakpoints
35 *
63 * Make sure the vDSO gets into every core dump. 36 * Make sure the vDSO gets into every core dump.
64 * Dumping its contents makes post-mortem fully interpretable later 37 * Dumping its contents makes post-mortem fully interpretable later
65 * without matching up the same kernel and hardware config to see 38 * without matching up the same kernel and hardware config to see
66 * what PC values meant. 39 * what PC values meant.
67 */ 40 */
68 vma->vm_flags |= VM_ALWAYSDUMP; 41 /* Could randomize here */
69 vma->vm_flags |= mm->def_flags; 42 ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
70 vma->vm_page_prot = protection_map[vma->vm_flags & 7]; 43 VM_READ|VM_EXEC|
71 vma->vm_ops = &syscall32_vm_ops; 44 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
72 vma->vm_mm = mm; 45 VM_ALWAYSDUMP,
73 46 syscall32_pages);
74 down_write(&mm->mmap_sem);
75 if ((ret = insert_vm_struct(mm, vma))) {
76 up_write(&mm->mmap_sem);
77 kmem_cache_free(vm_area_cachep, vma);
78 return ret;
79 }
80 mm->total_vm += npages;
81 up_write(&mm->mmap_sem); 47 up_write(&mm->mmap_sem);
82 return 0; 48 return ret;
83} 49}
84 50
85const char *arch_vma_name(struct vm_area_struct *vma) 51const char *arch_vma_name(struct vm_area_struct *vma)
@@ -92,9 +58,10 @@ const char *arch_vma_name(struct vm_area_struct *vma)
92 58
93static int __init init_syscall32(void) 59static int __init init_syscall32(void)
94{ 60{
95 syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 61 char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
96 if (!syscall32_page) 62 if (!syscall32_page)
97 panic("Cannot allocate syscall32 page"); 63 panic("Cannot allocate syscall32 page");
64 syscall32_pages[0] = virt_to_page(syscall32_page);
98 if (use_sysenter > 0) { 65 if (use_sysenter > 0) {
99 memcpy(syscall32_page, syscall32_sysenter, 66 memcpy(syscall32_page, syscall32_sysenter,
100 syscall32_sysenter_end - syscall32_sysenter); 67 syscall32_sysenter_end - syscall32_sysenter);
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 49802f1bee94..bd30d138113f 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -32,7 +32,7 @@ static void via_bugs(void)
32 32
33static int nvidia_hpet_detected __initdata; 33static int nvidia_hpet_detected __initdata;
34 34
35static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 35static int __init nvidia_hpet_check(struct acpi_table_header *header)
36{ 36{
37 nvidia_hpet_detected = 1; 37 nvidia_hpet_detected = 1;
38 return 0; 38 return 0;
@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
53 return; 53 return;
54 54
55 nvidia_hpet_detected = 0; 55 nvidia_hpet_detected = 0;
56 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 56 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
57 if (nvidia_hpet_detected == 0) { 57 if (nvidia_hpet_detected == 0) {
58 acpi_skip_timer_override = 1; 58 acpi_skip_timer_override = 1;
59 printk(KERN_INFO "Nvidia board " 59 printk(KERN_INFO "Nvidia board "
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index b007433f96bb..0b3603adf56d 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
58 * Some x86_64 machines use physical APIC mode regardless of how many 58 * Some x86_64 machines use physical APIC mode regardless of how many
59 * procs/clusters are present (x86_64 ES7000 is an example). 59 * procs/clusters are present (x86_64 ES7000 is an example).
60 */ 60 */
61 if (acpi_fadt.revision > FADT2_REVISION_ID) 61 if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
62 if (acpi_fadt.force_apic_physical_destination_mode) { 62 if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
63 genapic = &apic_cluster; 63 genapic = &apic_cluster;
64 goto print; 64 goto print;
65 } 65 }
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cc230b93cd1c..5f197b0a330a 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -34,8 +34,6 @@ static void __init clear_bss(void)
34#define OLD_CL_BASE_ADDR 0x90000 34#define OLD_CL_BASE_ADDR 0x90000
35#define OLD_CL_OFFSET 0x90022 35#define OLD_CL_OFFSET 0x90022
36 36
37extern char saved_command_line[];
38
39static void __init copy_bootdata(char *real_mode_data) 37static void __init copy_bootdata(char *real_mode_data)
40{ 38{
41 int new_data; 39 int new_data;
@@ -50,7 +48,7 @@ static void __init copy_bootdata(char *real_mode_data)
50 new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET; 48 new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
51 } 49 }
52 command_line = (char *) ((u64)(new_data)); 50 command_line = (char *) ((u64)(new_data));
53 memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); 51 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
54} 52}
55 53
56void __init x86_64_start_kernel(char * real_mode_data) 54void __init x86_64_start_kernel(char * real_mode_data)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index d7bad90a5ad8..6be6730acb5c 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -1956,24 +1956,31 @@ static struct irq_chip msi_chip = {
1956 .retrigger = ioapic_retrigger_irq, 1956 .retrigger = ioapic_retrigger_irq,
1957}; 1957};
1958 1958
1959int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) 1959int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
1960{ 1960{
1961 struct msi_msg msg; 1961 struct msi_msg msg;
1962 int ret; 1962 int irq, ret;
1963 irq = create_irq();
1964 if (irq < 0)
1965 return irq;
1966
1967 set_irq_msi(irq, desc);
1963 ret = msi_compose_msg(dev, irq, &msg); 1968 ret = msi_compose_msg(dev, irq, &msg);
1964 if (ret < 0) 1969 if (ret < 0) {
1970 destroy_irq(irq);
1965 return ret; 1971 return ret;
1972 }
1966 1973
1967 write_msi_msg(irq, &msg); 1974 write_msi_msg(irq, &msg);
1968 1975
1969 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); 1976 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
1970 1977
1971 return 0; 1978 return irq;
1972} 1979}
1973 1980
1974void arch_teardown_msi_irq(unsigned int irq) 1981void arch_teardown_msi_irq(unsigned int irq)
1975{ 1982{
1976 return; 1983 destroy_irq(irq);
1977} 1984}
1978 1985
1979#endif /* CONFIG_PCI_MSI */ 1986#endif /* CONFIG_PCI_MSI */
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index ac085038af29..bdb54a2c9f18 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -516,7 +516,7 @@ static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned
516 } 516 }
517} 517}
518 518
519static struct file_operations mce_chrdev_ops = { 519static const struct file_operations mce_chrdev_ops = {
520 .read = mce_read, 520 .read = mce_read,
521 .ioctl = mce_ioctl, 521 .ioctl = mce_ioctl,
522}; 522};
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index fa09debad4b7..93c707257637 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -401,7 +401,6 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
401 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); 401 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
402 if (!b) 402 if (!b)
403 return -ENOMEM; 403 return -ENOMEM;
404 memset(b, 0, sizeof(struct threshold_block));
405 404
406 b->block = block; 405 b->block = block;
407 b->bank = bank; 406 b->bank = bank;
@@ -490,7 +489,6 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
490 err = -ENOMEM; 489 err = -ENOMEM;
491 goto out; 490 goto out;
492 } 491 }
493 memset(b, 0, sizeof(struct threshold_bank));
494 492
495 kobject_set_name(&b->kobj, "threshold_bank%i", bank); 493 kobject_set_name(&b->kobj, "threshold_bank%i", bank);
496 b->kobj.parent = &per_cpu(device_mce, cpu).kobj; 494 b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 08072568847d..50dd8bef850e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
798 return gsi; 798 return gsi;
799 799
800 /* Don't set up the ACPI SCI because it's already set up */ 800 /* Don't set up the ACPI SCI because it's already set up */
801 if (acpi_fadt.sci_int == gsi) 801 if (acpi_gbl_FADT.sci_interrupt == gsi)
802 return gsi; 802 return gsi;
803 803
804 ioapic = mp_find_ioapic(gsi); 804 ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa794b9..eb18be5a6569 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
29 .dma_supported = NULL, 29 .dma_supported = NULL,
30}; 30};
31 31
32void pci_swiotlb_init(void) 32void __init pci_swiotlb_init(void)
33{ 33{
34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) 35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index af425a8049fb..60477244d1a3 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(edid_info);
100 100
101extern int root_mountflags; 101extern int root_mountflags;
102 102
103char command_line[COMMAND_LINE_SIZE]; 103char __initdata command_line[COMMAND_LINE_SIZE];
104 104
105struct resource standard_io_resources[] = { 105struct resource standard_io_resources[] = {
106 { .name = "dma1", .start = 0x00, .end = 0x1f, 106 { .name = "dma1", .start = 0x00, .end = 0x1f,
@@ -343,7 +343,7 @@ static void discover_ebda(void)
343 343
344void __init setup_arch(char **cmdline_p) 344void __init setup_arch(char **cmdline_p)
345{ 345{
346 printk(KERN_INFO "Command line: %s\n", saved_command_line); 346 printk(KERN_INFO "Command line: %s\n", boot_command_line);
347 347
348 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); 348 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
349 screen_info = SCREEN_INFO; 349 screen_info = SCREEN_INFO;
@@ -373,7 +373,7 @@ void __init setup_arch(char **cmdline_p)
373 373
374 early_identify_cpu(&boot_cpu_data); 374 early_identify_cpu(&boot_cpu_data);
375 375
376 strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE); 376 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
377 *cmdline_p = command_line; 377 *cmdline_p = command_line;
378 378
379 parse_early_param(); 379 parse_early_param();
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 5cc76d0d331f..335cc91c49b7 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
498{ 498{
499 unsigned int year, mon, day, hour, min, sec; 499 unsigned int year, mon, day, hour, min, sec;
500 unsigned long flags; 500 unsigned long flags;
501 unsigned extyear = 0; 501 unsigned century = 0;
502 502
503 spin_lock_irqsave(&rtc_lock, flags); 503 spin_lock_irqsave(&rtc_lock, flags);
504 504
@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
510 mon = CMOS_READ(RTC_MONTH); 510 mon = CMOS_READ(RTC_MONTH);
511 year = CMOS_READ(RTC_YEAR); 511 year = CMOS_READ(RTC_YEAR);
512#ifdef CONFIG_ACPI 512#ifdef CONFIG_ACPI
513 if (acpi_fadt.revision >= FADT2_REVISION_ID && 513 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
514 acpi_fadt.century) 514 acpi_gbl_FADT.century)
515 extyear = CMOS_READ(acpi_fadt.century); 515 century = CMOS_READ(acpi_gbl_FADT.century);
516#endif 516#endif
517 } while (sec != CMOS_READ(RTC_SECONDS)); 517 } while (sec != CMOS_READ(RTC_SECONDS));
518 518
@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
530 BCD_TO_BIN(mon); 530 BCD_TO_BIN(mon);
531 BCD_TO_BIN(year); 531 BCD_TO_BIN(year);
532 532
533 if (extyear) { 533 if (century) {
534 BCD_TO_BIN(extyear); 534 BCD_TO_BIN(century);
535 year += extyear; 535 year += century * 100;
536 printk(KERN_INFO "Extended CMOS year: %d\n", extyear); 536 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
537 } else { 537 } else {
538 /* 538 /*
539 * x86-64 systems only exists since 2002. 539 * x86-64 systems only exists since 2002.
@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
955#ifdef CONFIG_ACPI 955#ifdef CONFIG_ACPI
956 /* But TSC doesn't tick in C3 so don't use it there */ 956 /* But TSC doesn't tick in C3 so don't use it there */
957 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000) 957 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
958 return 1; 958 return 1;
959#endif 959#endif
960 return 0; 960 return 0;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 1e54ddf2338d..c360c4225244 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -192,10 +192,14 @@ SECTIONS
192 from .altinstructions and .eh_frame */ 192 from .altinstructions and .eh_frame */
193 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } 193 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
194 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } 194 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
195
196#ifdef CONFIG_BLK_DEV_INITRD
195 . = ALIGN(4096); 197 . = ALIGN(4096);
196 __initramfs_start = .; 198 __initramfs_start = .;
197 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } 199 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
198 __initramfs_end = .; 200 __initramfs_end = .;
201#endif
202
199 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 203 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
200 __per_cpu_start = .; 204 __per_cpu_start = .;
201 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) } 205 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index a65fc6f1dcaf..49e8cf2e06f8 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -69,27 +69,6 @@ static inline int notify_page_fault(enum die_val val, const char *str,
69 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args); 69 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
70} 70}
71 71
72void bust_spinlocks(int yes)
73{
74 int loglevel_save = console_loglevel;
75 if (yes) {
76 oops_in_progress = 1;
77 } else {
78#ifdef CONFIG_VT
79 unblank_screen();
80#endif
81 oops_in_progress = 0;
82 /*
83 * OK, the message is on the console. Now we call printk()
84 * without oops_in_progress set so that printk will give klogd
85 * a poke. Hold onto your hats...
86 */
87 console_loglevel = 15; /* NMI oopser may have shut the console up */
88 printk(" ");
89 console_loglevel = loglevel_save;
90 }
91}
92
93/* Sometimes the CPU reports invalid exceptions on prefetch. 72/* Sometimes the CPU reports invalid exceptions on prefetch.
94 Check that here and ignore. 73 Check that here and ignore.
95 Opcode checker based on code by Richard Brunner */ 74 Opcode checker based on code by Richard Brunner */
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1087e150a218..2efe215fc76a 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
101static __init int slit_valid(struct acpi_table_slit *slit) 101static __init int slit_valid(struct acpi_table_slit *slit)
102{ 102{
103 int i, j; 103 int i, j;
104 int d = slit->localities; 104 int d = slit->locality_count;
105 for (i = 0; i < d; i++) { 105 for (i = 0; i < d; i++) {
106 for (j = 0; j < d; j++) { 106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j]; 107 u8 val = slit->entry[d*i + j];
@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
127 127
128/* Callback for Proximity Domain -> LAPIC mapping */ 128/* Callback for Proximity Domain -> LAPIC mapping */
129void __init 129void __init
130acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 130acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
131{ 131{
132 int pxm, node; 132 int pxm, node;
133 if (srat_disabled()) 133 if (srat_disabled())
134 return; 134 return;
135 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { 135 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
136 bad_srat(); 136 bad_srat();
137 return; 137 return;
138 } 138 }
139 if (pa->flags.enabled == 0) 139 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
140 return; 140 return;
141 pxm = pa->proximity_domain; 141 pxm = pa->proximity_domain_lo;
142 node = setup_node(pxm); 142 node = setup_node(pxm);
143 if (node < 0) { 143 if (node < 0) {
144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); 144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
254 /* Looks good */ 254 /* Looks good */
255 255
256 if (nd->start == nd->end) { 256 if (nd->start == nd->end) {
257 nd->start = start; 257 nd->start = start;
258 nd->end = end; 258 nd->end = end;
259 changed = 1; 259 changed = 1;
260 } else { 260 } else {
261 if (nd->start == end) { 261 if (nd->start == end) {
262 nd->start = start; 262 nd->start = start;
263 changed = 1; 263 changed = 1;
264 } 264 }
265 if (nd->end == start) { 265 if (nd->end == start) {
266 nd->end = end; 266 nd->end = end;
267 changed = 1; 267 changed = 1;
268 } 268 }
269 if (!changed) 269 if (!changed)
270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); 270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
271 } 271 }
272 272
273 ret = update_end_of_memory(nd->end); 273 ret = update_end_of_memory(nd->end);
274 274
@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
279 279
280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
281void __init 281void __init
282acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 282acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
283{ 283{
284 struct bootnode *nd, oldnode; 284 struct bootnode *nd, oldnode;
285 unsigned long start, end; 285 unsigned long start, end;
@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
288 288
289 if (srat_disabled()) 289 if (srat_disabled())
290 return; 290 return;
291 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) { 291 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
292 bad_srat(); 292 bad_srat();
293 return; 293 return;
294 } 294 }
295 if (ma->flags.enabled == 0) 295 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
296 return; 296 return;
297 if (ma->flags.hot_pluggable && !save_add_info()) 297
298 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
298 return; 299 return;
299 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); 300 start = ma->base_address;
300 end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); 301 end = start + ma->length;
301 pxm = ma->proximity_domain; 302 pxm = ma->proximity_domain;
302 node = setup_node(pxm); 303 node = setup_node(pxm);
303 if (node < 0) { 304 if (node < 0) {
@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
337 push_node_boundaries(node, nd->start >> PAGE_SHIFT, 338 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
338 nd->end >> PAGE_SHIFT); 339 nd->end >> PAGE_SHIFT);
339 340
340 if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) { 341 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
342 (reserve_hotadd(node, start, end) < 0)) {
341 /* Ignore hotadd region. Undo damage */ 343 /* Ignore hotadd region. Undo damage */
342 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); 344 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
343 *nd = oldnode; 345 *nd = oldnode;
@@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
394 396
395 /* First clean up the node list */ 397 /* First clean up the node list */
396 for (i = 0; i < MAX_NUMNODES; i++) { 398 for (i = 0; i < MAX_NUMNODES; i++) {
397 cutoff_node(i, start, end); 399 cutoff_node(i, start, end);
398 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { 400 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
399 unparse_node(i); 401 unparse_node(i);
400 node_set_offline(i); 402 node_set_offline(i);
@@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
426 if (!node_online(i)) 428 if (!node_online(i))
427 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 429 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
428 430
429 for (i = 0; i < NR_CPUS; i++) { 431 for (i = 0; i < NR_CPUS; i++) {
430 if (cpu_to_node[i] == NUMA_NO_NODE) 432 if (cpu_to_node[i] == NUMA_NO_NODE)
431 continue; 433 continue;
432 if (!node_isset(cpu_to_node[i], nodes_parsed)) 434 if (!node_isset(cpu_to_node[i], nodes_parsed))
@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
461 463
462 if (!acpi_slit) 464 if (!acpi_slit)
463 return a == b ? 10 : 20; 465 return a == b ? 10 : 20;
464 index = acpi_slit->localities * node_to_pxm(a); 466 index = acpi_slit->locality_count * node_to_pxm(a);
465 return acpi_slit->entry[index + node_to_pxm(b)]; 467 return acpi_slit->entry[index + node_to_pxm(b)];
466} 468}
467 469
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f8b6b2800a62..faabb6e87f12 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG 2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
3 * 3 *
4 * This is an 64bit optimized version that always keeps the full mmconfig 4 * This is an 64bit optimized version that always keeps the full mmconfig
5 * space mapped. This allows lockless config space operation. 5 * space mapped. This allows lockless config space operation.
6 */ 6 */
@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
25 25
26/* Static virtual mapping of the MMCONFIG aperture */ 26/* Static virtual mapping of the MMCONFIG aperture */
27struct mmcfg_virt { 27struct mmcfg_virt {
28 struct acpi_table_mcfg_config *cfg; 28 struct acpi_mcfg_allocation *cfg;
29 char __iomem *virt; 29 char __iomem *virt;
30}; 30};
31static struct mmcfg_virt *pci_mmcfg_virt; 31static struct mmcfg_virt *pci_mmcfg_virt;
@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
33static char __iomem *get_virt(unsigned int seg, unsigned bus) 33static char __iomem *get_virt(unsigned int seg, unsigned bus)
34{ 34{
35 int cfg_num = -1; 35 int cfg_num = -1;
36 struct acpi_table_mcfg_config *cfg; 36 struct acpi_mcfg_allocation *cfg;
37 37
38 while (1) { 38 while (1) {
39 ++cfg_num; 39 ++cfg_num;
40 if (cfg_num >= pci_mmcfg_config_num) 40 if (cfg_num >= pci_mmcfg_config_num)
41 break; 41 break;
42 cfg = pci_mmcfg_virt[cfg_num].cfg; 42 cfg = pci_mmcfg_virt[cfg_num].cfg;
43 if (cfg->pci_segment_group_number != seg) 43 if (cfg->pci_segment != seg)
44 continue; 44 continue;
45 if ((cfg->start_bus_number <= bus) && 45 if ((cfg->start_bus_number <= bus) &&
46 (cfg->end_bus_number >= bus)) 46 (cfg->end_bus_number >= bus))
@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
52 this applies to all busses. */ 52 this applies to all busses. */
53 cfg = &pci_mmcfg_config[0]; 53 cfg = &pci_mmcfg_config[0];
54 if (pci_mmcfg_config_num == 1 && 54 if (pci_mmcfg_config_num == 1 &&
55 cfg->pci_segment_group_number == 0 && 55 cfg->pci_segment == 0 &&
56 (cfg->start_bus_number | cfg->end_bus_number) == 0) 56 (cfg->start_bus_number | cfg->end_bus_number) == 0)
57 return pci_mmcfg_virt[0].virt; 57 return pci_mmcfg_virt[0].virt;
58 58
@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
170 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 170 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
171 return; 171 return;
172 172
173 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 173 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
174 if ((pci_mmcfg_config_num == 0) || 174 if ((pci_mmcfg_config_num == 0) ||
175 (pci_mmcfg_config == NULL) || 175 (pci_mmcfg_config == NULL) ||
176 (pci_mmcfg_config[0].base_address == 0)) 176 (pci_mmcfg_config[0].address == 0))
177 return; 177 return;
178 178
179 /* Only do this check when type 1 works. If it doesn't work 179 /* Only do this check when type 1 works. If it doesn't work
180 assume we run on a Mac and always use MCFG */ 180 assume we run on a Mac and always use MCFG */
181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, 181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
182 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, 182 pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
183 E820_RESERVED)) { 183 E820_RESERVED)) {
184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", 184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
185 pci_mmcfg_config[0].base_address); 185 (unsigned long)pci_mmcfg_config[0].address);
186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
187 return; 187 return;
188 } 188 }
@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
194 } 194 }
195 for (i = 0; i < pci_mmcfg_config_num; ++i) { 195 for (i = 0; i < pci_mmcfg_config_num; ++i) {
196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; 196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, 197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
198 MMCONFIG_APER_MAX); 198 MMCONFIG_APER_MAX);
199 if (!pci_mmcfg_virt[i].virt) { 199 if (!pci_mmcfg_virt[i].virt) {
200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " 200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
201 "segment %d\n", 201 "segment %d\n",
202 pci_mmcfg_config[i].pci_segment_group_number); 202 pci_mmcfg_config[i].pci_segment);
203 return; 203 return;
204 } 204 }
205 printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address); 205 printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
206 (unsigned long)pci_mmcfg_config[i].address);
206 } 207 }
207 208
208 unreachable_devices(); 209 unreachable_devices();