aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-07-31 20:37:25 -0400
committerPaul Mackerras <paulus@samba.org>2006-07-31 20:37:25 -0400
commit57cad8084e0837e0f2c97da789ec9b3f36809be9 (patch)
treee9c790afb4286f78cb08d9664f58baa7e876fe55 /arch/x86_64/kernel
parentcb18bd40030c879cd93fef02fd579f74dbab473d (diff)
parent49b1e3ea19b1c95c2f012b8331ffb3b169e4c042 (diff)
Merge branch 'merge'
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/e820.c1
-rw-r--r--arch/x86_64/kernel/early_printk.c2
-rw-r--r--arch/x86_64/kernel/machine_kexec.c13
-rw-r--r--arch/x86_64/kernel/mce.c12
-rw-r--r--arch/x86_64/kernel/mce_amd.c21
-rw-r--r--arch/x86_64/kernel/pci-calgary.c83
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c5
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/smpboot.c4
-rw-r--r--arch/x86_64/kernel/tce.c10
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/kernel/traps.c28
12 files changed, 110 insertions, 89 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index b8eee4c7888b..e56c2adf57a4 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -17,6 +17,7 @@
17#include <linux/kexec.h> 17#include <linux/kexec.h>
18#include <linux/module.h> 18#include <linux/module.h>
19 19
20#include <asm/pgtable.h>
20#include <asm/page.h> 21#include <asm/page.h>
21#include <asm/e820.h> 22#include <asm/e820.h>
22#include <asm/proto.h> 23#include <asm/proto.h>
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index b93ef5b51980..140051e07fa6 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -2,7 +2,7 @@
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/tty.h> 5#include <linux/screen_info.h>
6#include <asm/io.h> 6#include <asm/io.h>
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <asm/fcntl.h> 8#include <asm/fcntl.h>
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 83fb24a02821..106076b370fc 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -207,14 +207,11 @@ NORET_TYPE void machine_kexec(struct kimage *image)
207 __flush_tlb(); 207 __flush_tlb();
208 208
209 209
210 /* The segment registers are funny things, they are 210 /* The segment registers are funny things, they have both a
211 * automatically loaded from a table, in memory wherever you 211 * visible and an invisible part. Whenever the visible part is
212 * set them to a specific selector, but this table is never 212 * set to a specific selector, the invisible part is loaded
213 * accessed again unless you set the segment to a different selector. 213 * with from a table in memory. At no other time is the
214 * 214 * descriptor table in memory accessed.
215 * The more common model are caches where the behide
216 * the scenes work is done, but is also dropped at arbitrary
217 * times.
218 * 215 *
219 * I take advantage of this here by force loading the 216 * I take advantage of this here by force loading the
220 * segments, before I zap the gdt with an invalid value. 217 * segments, before I zap the gdt with an invalid value.
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 88845674c661..4e017fb30fb3 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -615,7 +615,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
615} 615}
616 616
617#ifdef CONFIG_HOTPLUG_CPU 617#ifdef CONFIG_HOTPLUG_CPU
618static __cpuinit void mce_remove_device(unsigned int cpu) 618static void mce_remove_device(unsigned int cpu)
619{ 619{
620 int i; 620 int i;
621 621
@@ -626,10 +626,9 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
626 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval); 626 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
627 sysdev_unregister(&per_cpu(device_mce,cpu)); 627 sysdev_unregister(&per_cpu(device_mce,cpu));
628} 628}
629#endif
630 629
631/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 630/* Get notified when a cpu comes on/off. Be hotplug friendly. */
632static __cpuinit int 631static int
633mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 632mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
634{ 633{
635 unsigned int cpu = (unsigned long)hcpu; 634 unsigned int cpu = (unsigned long)hcpu;
@@ -638,18 +637,17 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
638 case CPU_ONLINE: 637 case CPU_ONLINE:
639 mce_create_device(cpu); 638 mce_create_device(cpu);
640 break; 639 break;
641#ifdef CONFIG_HOTPLUG_CPU
642 case CPU_DEAD: 640 case CPU_DEAD:
643 mce_remove_device(cpu); 641 mce_remove_device(cpu);
644 break; 642 break;
645#endif
646 } 643 }
647 return NOTIFY_OK; 644 return NOTIFY_OK;
648} 645}
649 646
650static struct notifier_block __cpuinitdata mce_cpu_notifier = { 647static struct notifier_block mce_cpu_notifier = {
651 .notifier_call = mce_cpu_callback, 648 .notifier_call = mce_cpu_callback,
652}; 649};
650#endif
653 651
654static __init int mce_init_device(void) 652static __init int mce_init_device(void)
655{ 653{
@@ -664,7 +662,7 @@ static __init int mce_init_device(void)
664 mce_create_device(i); 662 mce_create_device(i);
665 } 663 }
666 664
667 register_cpu_notifier(&mce_cpu_notifier); 665 register_hotcpu_notifier(&mce_cpu_notifier);
668 misc_register(&mce_log_device); 666 misc_register(&mce_log_device);
669 return err; 667 return err;
670} 668}
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 335200aa2737..883fe747f64c 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -558,7 +558,7 @@ out:
558 * of shared sysfs dir/files, and rest of the cores will be symlinked to it. 558 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
559 */ 559 */
560 560
561static __cpuinit void deallocate_threshold_block(unsigned int cpu, 561static void deallocate_threshold_block(unsigned int cpu,
562 unsigned int bank) 562 unsigned int bank)
563{ 563{
564 struct threshold_block *pos = NULL; 564 struct threshold_block *pos = NULL;
@@ -578,7 +578,7 @@ static __cpuinit void deallocate_threshold_block(unsigned int cpu,
578 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; 578 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
579} 579}
580 580
581static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) 581static void threshold_remove_bank(unsigned int cpu, int bank)
582{ 582{
583 int i = 0; 583 int i = 0;
584 struct threshold_bank *b; 584 struct threshold_bank *b;
@@ -597,7 +597,7 @@ static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
597 /* sibling symlink */ 597 /* sibling symlink */
598 if (shared_bank[bank] && b->blocks->cpu != cpu) { 598 if (shared_bank[bank] && b->blocks->cpu != cpu) {
599 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); 599 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
600 per_cpu(threshold_banks, i)[bank] = NULL; 600 per_cpu(threshold_banks, cpu)[bank] = NULL;
601 return; 601 return;
602 } 602 }
603 603
@@ -618,7 +618,7 @@ free_out:
618 per_cpu(threshold_banks, cpu)[bank] = NULL; 618 per_cpu(threshold_banks, cpu)[bank] = NULL;
619} 619}
620 620
621static __cpuinit void threshold_remove_device(unsigned int cpu) 621static void threshold_remove_device(unsigned int cpu)
622{ 622{
623 unsigned int bank; 623 unsigned int bank;
624 624
@@ -629,14 +629,8 @@ static __cpuinit void threshold_remove_device(unsigned int cpu)
629 } 629 }
630} 630}
631 631
632#else /* !CONFIG_HOTPLUG_CPU */
633static void threshold_remove_device(unsigned int cpu)
634{
635}
636#endif
637
638/* get notified when a cpu comes on/off */ 632/* get notified when a cpu comes on/off */
639static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, 633static int threshold_cpu_callback(struct notifier_block *nfb,
640 unsigned long action, void *hcpu) 634 unsigned long action, void *hcpu)
641{ 635{
642 /* cpu was unsigned int to begin with */ 636 /* cpu was unsigned int to begin with */
@@ -659,9 +653,10 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
659 return NOTIFY_OK; 653 return NOTIFY_OK;
660} 654}
661 655
662static struct notifier_block threshold_cpu_notifier __cpuinitdata = { 656static struct notifier_block threshold_cpu_notifier = {
663 .notifier_call = threshold_cpu_callback, 657 .notifier_call = threshold_cpu_callback,
664}; 658};
659#endif /* CONFIG_HOTPLUG_CPU */
665 660
666static __init int threshold_init_device(void) 661static __init int threshold_init_device(void)
667{ 662{
@@ -673,7 +668,7 @@ static __init int threshold_init_device(void)
673 if (err) 668 if (err)
674 return err; 669 return err;
675 } 670 }
676 register_cpu_notifier(&threshold_cpu_notifier); 671 register_hotcpu_notifier(&threshold_cpu_notifier);
677 return 0; 672 return 0;
678} 673}
679 674
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index d91cb843f54d..146924ba5df5 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -1,9 +1,11 @@
1/* 1/*
2 * Derived from arch/powerpc/kernel/iommu.c 2 * Derived from arch/powerpc/kernel/iommu.c
3 * 3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation 4 * Copyright (C) IBM Corporation, 2006
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
6 * 5 *
6 * Author: Jon Mason <jdmason@us.ibm.com>
7 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
8
7 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 11 * the Free Software Foundation; either version 2 of the License, or
@@ -83,7 +85,8 @@
83#define CSR_AGENT_MASK 0xffe0ffff 85#define CSR_AGENT_MASK 0xffe0ffff
84 86
85#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ 87#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
86#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */ 88#define MAX_NUM_CHASSIS 8 /* max number of chassis */
89#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2) /* max dev->bus->number */
87#define PHBS_PER_CALGARY 4 90#define PHBS_PER_CALGARY 4
88 91
89/* register offsets in Calgary's internal register space */ 92/* register offsets in Calgary's internal register space */
@@ -108,7 +111,8 @@ static const unsigned long phb_offsets[] = {
108 0xB000 /* PHB3 */ 111 0xB000 /* PHB3 */
109}; 112};
110 113
111void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES]; 114static char bus_to_phb[MAX_PHB_BUS_NUM];
115void* tce_table_kva[MAX_PHB_BUS_NUM];
112unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; 116unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
113static int translate_empty_slots __read_mostly = 0; 117static int translate_empty_slots __read_mostly = 0;
114static int calgary_detected __read_mostly = 0; 118static int calgary_detected __read_mostly = 0;
@@ -117,7 +121,7 @@ static int calgary_detected __read_mostly = 0;
117 * the bitmap of PHBs the user requested that we disable 121 * the bitmap of PHBs the user requested that we disable
118 * translation on. 122 * translation on.
119 */ 123 */
120static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM); 124static DECLARE_BITMAP(translation_disabled, MAX_PHB_BUS_NUM);
121 125
122static void tce_cache_blast(struct iommu_table *tbl); 126static void tce_cache_blast(struct iommu_table *tbl);
123 127
@@ -450,7 +454,7 @@ static struct dma_mapping_ops calgary_dma_ops = {
450 454
451static inline int busno_to_phbid(unsigned char num) 455static inline int busno_to_phbid(unsigned char num)
452{ 456{
453 return bus_to_phb(num) % PHBS_PER_CALGARY; 457 return bus_to_phb[num];
454} 458}
455 459
456static inline unsigned long split_queue_offset(unsigned char num) 460static inline unsigned long split_queue_offset(unsigned char num)
@@ -810,7 +814,7 @@ static int __init calgary_init(void)
810 int i, ret = -ENODEV; 814 int i, ret = -ENODEV;
811 struct pci_dev *dev = NULL; 815 struct pci_dev *dev = NULL;
812 816
813 for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) { 817 for (i = 0; i < MAX_PHB_BUS_NUM; i++) {
814 dev = pci_get_device(PCI_VENDOR_ID_IBM, 818 dev = pci_get_device(PCI_VENDOR_ID_IBM,
815 PCI_DEVICE_ID_IBM_CALGARY, 819 PCI_DEVICE_ID_IBM_CALGARY,
816 dev); 820 dev);
@@ -820,7 +824,7 @@ static int __init calgary_init(void)
820 calgary_init_one_nontraslated(dev); 824 calgary_init_one_nontraslated(dev);
821 continue; 825 continue;
822 } 826 }
823 if (!tce_table_kva[i] && !translate_empty_slots) { 827 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots) {
824 pci_dev_put(dev); 828 pci_dev_put(dev);
825 continue; 829 continue;
826 } 830 }
@@ -840,7 +844,7 @@ error:
840 pci_dev_put(dev); 844 pci_dev_put(dev);
841 continue; 845 continue;
842 } 846 }
843 if (!tce_table_kva[i] && !translate_empty_slots) 847 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots)
844 continue; 848 continue;
845 calgary_disable_translation(dev); 849 calgary_disable_translation(dev);
846 calgary_free_tar(dev); 850 calgary_free_tar(dev);
@@ -874,9 +878,10 @@ static inline int __init determine_tce_table_size(u64 ram)
874void __init detect_calgary(void) 878void __init detect_calgary(void)
875{ 879{
876 u32 val; 880 u32 val;
877 int bus, table_idx; 881 int bus;
878 void *tbl; 882 void *tbl;
879 int detected = 0; 883 int calgary_found = 0;
884 int phb = -1;
880 885
881 /* 886 /*
882 * if the user specified iommu=off or iommu=soft or we found 887 * if the user specified iommu=off or iommu=soft or we found
@@ -887,38 +892,46 @@ void __init detect_calgary(void)
887 892
888 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); 893 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
889 894
890 for (bus = 0, table_idx = 0; 895 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
891 bus <= num_online_nodes() * MAX_PHB_BUS_NUM; 896 int dev;
892 bus++) { 897
893 BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM); 898 tce_table_kva[bus] = NULL;
899 bus_to_phb[bus] = -1;
900
894 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) 901 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
895 continue; 902 continue;
903
904 /*
905 * There are 4 PHBs per Calgary chip. Set phb to which phb (0-3)
906 * it is connected to releative to the clagary chip.
907 */
908 phb = (phb + 1) % PHBS_PER_CALGARY;
909
896 if (test_bit(bus, translation_disabled)) { 910 if (test_bit(bus, translation_disabled)) {
897 printk(KERN_INFO "Calgary: translation is disabled for " 911 printk(KERN_INFO "Calgary: translation is disabled for "
898 "PHB 0x%x\n", bus); 912 "PHB 0x%x\n", bus);
899 /* skip this phb, don't allocate a tbl for it */ 913 /* skip this phb, don't allocate a tbl for it */
900 tce_table_kva[table_idx] = NULL;
901 table_idx++;
902 continue; 914 continue;
903 } 915 }
904 /* 916 /*
905 * scan the first slot of the PCI bus to see if there 917 * Scan the slots of the PCI bus to see if there is a device present.
906 * are any devices present 918 * The parent bus will be the zero-ith device, so start at 1.
907 */ 919 */
908 val = read_pci_config(bus, 1, 0, 0); 920 for (dev = 1; dev < 8; dev++) {
909 if (val != 0xffffffff || translate_empty_slots) { 921 val = read_pci_config(bus, dev, 0, 0);
910 tbl = alloc_tce_table(); 922 if (val != 0xffffffff || translate_empty_slots) {
911 if (!tbl) 923 tbl = alloc_tce_table();
912 goto cleanup; 924 if (!tbl)
913 detected = 1; 925 goto cleanup;
914 } else 926 tce_table_kva[bus] = tbl;
915 tbl = NULL; 927 bus_to_phb[bus] = phb;
916 928 calgary_found = 1;
917 tce_table_kva[table_idx] = tbl; 929 break;
918 table_idx++; 930 }
931 }
919 } 932 }
920 933
921 if (detected) { 934 if (calgary_found) {
922 iommu_detected = 1; 935 iommu_detected = 1;
923 calgary_detected = 1; 936 calgary_detected = 1;
924 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. " 937 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
@@ -927,9 +940,9 @@ void __init detect_calgary(void)
927 return; 940 return;
928 941
929cleanup: 942cleanup:
930 for (--table_idx; table_idx >= 0; --table_idx) 943 for (--bus; bus >= 0; --bus)
931 if (tce_table_kva[table_idx]) 944 if (tce_table_kva[bus])
932 free_tce_table(tce_table_kva[table_idx]); 945 free_tce_table(tce_table_kva[bus]);
933} 946}
934 947
935int __init calgary_iommu_init(void) 948int __init calgary_iommu_init(void)
@@ -1000,7 +1013,7 @@ static int __init calgary_parse_options(char *p)
1000 if (p == endp) 1013 if (p == endp)
1001 break; 1014 break;
1002 1015
1003 if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) { 1016 if (bridge < MAX_PHB_BUS_NUM) {
1004 printk(KERN_INFO "Calgary: disabling " 1017 printk(KERN_INFO "Calgary: disabling "
1005 "translation for PHB 0x%x\n", bridge); 1018 "translation for PHB 0x%x\n", bridge);
1006 set_bit(bridge, translation_disabled); 1019 set_bit(bridge, translation_disabled);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index ebdb77fe2057..6a55f87ba97f 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,9 +31,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
31void pci_swiotlb_init(void) 31void pci_swiotlb_init(void)
32{ 32{
33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
34 if (!iommu_detected && !no_iommu && 34 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 35 swiotlb = 1;
36 if (swiotlb_force)
37 swiotlb = 1;
37 if (swiotlb) { 38 if (swiotlb) {
38 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
39 swiotlb_init(); 40 swiotlb_init();
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 0925518b58d0..8a099ff1f8bc 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -21,7 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/user.h> 22#include <linux/user.h>
23#include <linux/a.out.h> 23#include <linux/a.out.h>
24#include <linux/tty.h> 24#include <linux/screen_info.h>
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/init.h> 27#include <linux/init.h>
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index b7c705969791..975380207b46 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -771,12 +771,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
771 unsigned long start_rip; 771 unsigned long start_rip;
772 struct create_idle c_idle = { 772 struct create_idle c_idle = {
773 .cpu = cpu, 773 .cpu = cpu,
774 .done = COMPLETION_INITIALIZER(c_idle.done), 774 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
775 }; 775 };
776 DECLARE_WORK(work, do_fork_idle, &c_idle); 776 DECLARE_WORK(work, do_fork_idle, &c_idle);
777 777
778 lockdep_set_class(&c_idle.done.wait.lock, &waitqueue_lock_key);
779
780 /* allocate memory for gdts of secondary cpus. Hotplug is considered */ 778 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
781 if (!cpu_gdt_descr[cpu].address && 779 if (!cpu_gdt_descr[cpu].address &&
782 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { 780 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
index 8d4c67f61b8e..5530dda3f27a 100644
--- a/arch/x86_64/kernel/tce.c
+++ b/arch/x86_64/kernel/tce.c
@@ -1,8 +1,10 @@
1/* 1/*
2 * Derived from arch/powerpc/platforms/pseries/iommu.c 2 * Derived from arch/powerpc/platforms/pseries/iommu.c
3 * 3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation 4 * Copyright (C) IBM Corporation, 2006
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation 5 *
6 * Author: Jon Mason <jdmason@us.ibm.com>
7 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
6 * 8 *
7 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -94,7 +96,6 @@ static inline unsigned int table_size_to_number_of_entries(unsigned char size)
94static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) 96static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
95{ 97{
96 unsigned int bitmapsz; 98 unsigned int bitmapsz;
97 unsigned int tce_table_index;
98 unsigned long bmppages; 99 unsigned long bmppages;
99 int ret; 100 int ret;
100 101
@@ -103,8 +104,7 @@ static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
103 /* set the tce table size - measured in entries */ 104 /* set the tce table size - measured in entries */
104 tbl->it_size = table_size_to_number_of_entries(specified_table_size); 105 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
105 106
106 tce_table_index = bus_to_phb(tbl->it_busno); 107 tbl->it_base = (unsigned long)tce_table_kva[dev->bus->number];
107 tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
108 if (!tbl->it_base) { 108 if (!tbl->it_base) {
109 printk(KERN_ERR "Calgary: iommu_table_setparms: " 109 printk(KERN_ERR "Calgary: iommu_table_setparms: "
110 "no table allocated?!\n"); 110 "no table allocated?!\n");
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index b9ff75992c16..7a9b18224182 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -28,6 +28,7 @@
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#ifdef CONFIG_ACPI 29#ifdef CONFIG_ACPI
30#include <acpi/achware.h> /* for PM timer frequency */ 30#include <acpi/achware.h> /* for PM timer frequency */
31#include <acpi/acpi_bus.h>
31#endif 32#endif
32#include <asm/8253pit.h> 33#include <asm/8253pit.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
@@ -193,7 +194,7 @@ unsigned long profile_pc(struct pt_regs *regs)
193 is just accounted to the spinlock function. 194 is just accounted to the spinlock function.
194 Better would be to write these functions in assembler again 195 Better would be to write these functions in assembler again
195 and check exactly. */ 196 and check exactly. */
196 if (in_lock_functions(pc)) { 197 if (!user_mode(regs) && in_lock_functions(pc)) {
197 char *v = *(char **)regs->rsp; 198 char *v = *(char **)regs->rsp;
198 if ((v >= _stext && v <= _etext) || 199 if ((v >= _stext && v <= _etext) ||
199 (v >= _sinittext && v <= _einittext) || 200 (v >= _sinittext && v <= _einittext) ||
@@ -953,11 +954,18 @@ __cpuinit int unsynchronized_tsc(void)
953#ifdef CONFIG_SMP 954#ifdef CONFIG_SMP
954 if (apic_is_clustered_box()) 955 if (apic_is_clustered_box())
955 return 1; 956 return 1;
956 /* Intel systems are normally all synchronized. Exceptions
957 are handled in the check above. */
958 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
959 return 0;
960#endif 957#endif
958 /* Most intel systems have synchronized TSCs except for
959 multi node systems */
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
961#ifdef CONFIG_ACPI
962 /* But TSC doesn't tick in C3 so don't use it there */
963 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 100)
964 return 1;
965#endif
966 return 0;
967 }
968
961 /* Assume multi socket systems are not synchronized */ 969 /* Assume multi socket systems are not synchronized */
962 return num_present_cpus() > 1; 970 return num_present_cpus() > 1;
963} 971}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 79d05c482072..4e9938dee060 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -76,13 +76,13 @@ int register_die_notifier(struct notifier_block *nb)
76 vmalloc_sync_all(); 76 vmalloc_sync_all();
77 return atomic_notifier_chain_register(&die_chain, nb); 77 return atomic_notifier_chain_register(&die_chain, nb);
78} 78}
79EXPORT_SYMBOL(register_die_notifier); 79EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
80 80
81int unregister_die_notifier(struct notifier_block *nb) 81int unregister_die_notifier(struct notifier_block *nb)
82{ 82{
83 return atomic_notifier_chain_unregister(&die_chain, nb); 83 return atomic_notifier_chain_unregister(&die_chain, nb);
84} 84}
85EXPORT_SYMBOL(unregister_die_notifier); 85EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
86 86
87static inline void conditional_sti(struct pt_regs *regs) 87static inline void conditional_sti(struct pt_regs *regs)
88{ 88{
@@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
254{ 254{
255 const unsigned cpu = safe_smp_processor_id(); 255 const unsigned cpu = safe_smp_processor_id();
256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
257 int i = 11;
258 unsigned used = 0; 257 unsigned used = 0;
259 258
260 printk("\nCall Trace:\n"); 259 printk("\nCall Trace:\n");
@@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
275 if (unwind_init_blocked(&info, tsk) == 0) 274 if (unwind_init_blocked(&info, tsk) == 0)
276 unw_ret = show_trace_unwind(&info, NULL); 275 unw_ret = show_trace_unwind(&info, NULL);
277 } 276 }
278 if (unw_ret > 0) { 277 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
279 if (call_trace > 0) 278#ifdef CONFIG_STACK_UNWIND
279 unsigned long rip = info.regs.rip;
280 print_symbol("DWARF2 unwinder stuck at %s\n", rip);
281 if (call_trace == 1) {
282 printk("Leftover inexact backtrace:\n");
283 stack = (unsigned long *)info.regs.rsp;
284 } else if (call_trace > 1)
280 return; 285 return;
281 printk("Legacy call trace:"); 286 else
282 i = 18; 287 printk("Full inexact backtrace again:\n");
288#else
289 printk("Inexact backtrace:\n");
290#endif
283 } 291 }
284 } 292 }
285 293
@@ -521,7 +529,7 @@ void __kprobes oops_end(unsigned long flags)
521 /* Nest count reaches zero, release the lock. */ 529 /* Nest count reaches zero, release the lock. */
522 spin_unlock_irqrestore(&die_lock, flags); 530 spin_unlock_irqrestore(&die_lock, flags);
523 if (panic_on_oops) 531 if (panic_on_oops)
524 panic("Oops"); 532 panic("Fatal exception: panic_on_oops");
525} 533}
526 534
527void __kprobes __die(const char * str, struct pt_regs * regs, long err) 535void __kprobes __die(const char * str, struct pt_regs * regs, long err)
@@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s)
1118 call_trace = -1; 1126 call_trace = -1;
1119 else if (strcmp(s, "both") == 0) 1127 else if (strcmp(s, "both") == 0)
1120 call_trace = 0; 1128 call_trace = 0;
1121 else if (strcmp(s, "new") == 0) 1129 else if (strcmp(s, "newfallback") == 0)
1122 call_trace = 1; 1130 call_trace = 1;
1131 else if (strcmp(s, "new") == 0)
1132 call_trace = 2;
1123 return 1; 1133 return 1;
1124} 1134}
1125__setup("call_trace=", call_trace_setup); 1135__setup("call_trace=", call_trace_setup);