diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:43:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:43:06 -0400 |
commit | 9dcdbf7a33d9018ac5d45debcf261be648bdd56a (patch) | |
tree | bbcc1a018f11ff76cd7ce174ef3ffe2c02da07ee /arch/x86 | |
parent | cc5edb0eb9ce892b530e34a5d110382483587942 (diff) | |
parent | cd5b8f8755a89a57fc8c408d284b8b613f090345 (diff) |
Merge branch 'linus' into perf/core
Merge reason: Pick up the latest perf fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/x86_init.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/early-quirks.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/mrst.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/pci-calgary_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/quirks.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/x86_init.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/pat_rbtree.c | 34 | ||||
-rw-r--r-- | arch/x86/pci/i386.c | 1 | ||||
-rw-r--r-- | arch/x86/pci/mrst.c | 7 |
14 files changed, 61 insertions, 56 deletions
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 519b54327d75..baa579c8e038 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -142,6 +142,7 @@ struct x86_cpuinit_ops { | |||
142 | * @set_wallclock: set time back to HW clock | 142 | * @set_wallclock: set time back to HW clock |
143 | * @is_untracked_pat_range exclude from PAT logic | 143 | * @is_untracked_pat_range exclude from PAT logic |
144 | * @nmi_init enable NMI on cpus | 144 | * @nmi_init enable NMI on cpus |
145 | * @i8042_detect pre-detect if i8042 controller exists | ||
145 | */ | 146 | */ |
146 | struct x86_platform_ops { | 147 | struct x86_platform_ops { |
147 | unsigned long (*calibrate_tsc)(void); | 148 | unsigned long (*calibrate_tsc)(void); |
@@ -150,6 +151,7 @@ struct x86_platform_ops { | |||
150 | void (*iommu_shutdown)(void); | 151 | void (*iommu_shutdown)(void); |
151 | bool (*is_untracked_pat_range)(u64 start, u64 end); | 152 | bool (*is_untracked_pat_range)(u64 start, u64 end); |
152 | void (*nmi_init)(void); | 153 | void (*nmi_init)(void); |
154 | int (*i8042_detect)(void); | ||
153 | }; | 155 | }; |
154 | 156 | ||
155 | extern struct x86_init_ops x86_init; | 157 | extern struct x86_init_ops x86_init; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index c02cc692985c..a96489ee6cab 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -921,7 +921,7 @@ void disable_local_APIC(void) | |||
921 | unsigned int value; | 921 | unsigned int value; |
922 | 922 | ||
923 | /* APIC hasn't been mapped yet */ | 923 | /* APIC hasn't been mapped yet */ |
924 | if (!apic_phys) | 924 | if (!x2apic_mode && !apic_phys) |
925 | return; | 925 | return; |
926 | 926 | ||
927 | clear_local_APIC(); | 927 | clear_local_APIC(); |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index ebdb85cf2686..e5cc7e82e60d 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
19 | #include <asm/iommu.h> | 19 | #include <asm/iommu.h> |
20 | #include <asm/gart.h> | 20 | #include <asm/gart.h> |
21 | #include <asm/hpet.h> | ||
21 | 22 | ||
22 | static void __init fix_hypertransport_config(int num, int slot, int func) | 23 | static void __init fix_hypertransport_config(int num, int slot, int func) |
23 | { | 24 | { |
@@ -191,6 +192,21 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
191 | } | 192 | } |
192 | #endif | 193 | #endif |
193 | 194 | ||
195 | /* | ||
196 | * Force the read back of the CMP register in hpet_next_event() | ||
197 | * to work around the problem that the CMP register write seems to be | ||
198 | * delayed. See hpet_next_event() for details. | ||
199 | * | ||
200 | * We do this on all SMBUS incarnations for now until we have more | ||
201 | * information about the affected chipsets. | ||
202 | */ | ||
203 | static void __init ati_hpet_bugs(int num, int slot, int func) | ||
204 | { | ||
205 | #ifdef CONFIG_HPET_TIMER | ||
206 | hpet_readback_cmp = 1; | ||
207 | #endif | ||
208 | } | ||
209 | |||
194 | #define QFLAG_APPLY_ONCE 0x1 | 210 | #define QFLAG_APPLY_ONCE 0x1 |
195 | #define QFLAG_APPLIED 0x2 | 211 | #define QFLAG_APPLIED 0x2 |
196 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) | 212 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) |
@@ -220,6 +236,8 @@ static struct chipset early_qrk[] __initdata = { | |||
220 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, | 236 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, |
221 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | 237 | { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
222 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, | 238 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, |
239 | { PCI_VENDOR_ID_ATI, PCI_ANY_ID, | ||
240 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs }, | ||
223 | {} | 241 | {} |
224 | }; | 242 | }; |
225 | 243 | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 175f85ceace3..1bfb6cf4dd55 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -642,8 +642,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
642 | /* Skip cs, ip, orig_ax and gs. */ \ | 642 | /* Skip cs, ip, orig_ax and gs. */ \ |
643 | " subl $16, %esp\n" \ | 643 | " subl $16, %esp\n" \ |
644 | " pushl %fs\n" \ | 644 | " pushl %fs\n" \ |
645 | " pushl %ds\n" \ | ||
646 | " pushl %es\n" \ | 645 | " pushl %es\n" \ |
646 | " pushl %ds\n" \ | ||
647 | " pushl %eax\n" \ | 647 | " pushl %eax\n" \ |
648 | " pushl %ebp\n" \ | 648 | " pushl %ebp\n" \ |
649 | " pushl %edi\n" \ | 649 | " pushl %edi\n" \ |
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c index e796448f0eb5..5915e0b33303 100644 --- a/arch/x86/kernel/mrst.c +++ b/arch/x86/kernel/mrst.c | |||
@@ -216,6 +216,12 @@ static void __init mrst_setup_boot_clock(void) | |||
216 | setup_boot_APIC_clock(); | 216 | setup_boot_APIC_clock(); |
217 | }; | 217 | }; |
218 | 218 | ||
219 | /* MID systems don't have i8042 controller */ | ||
220 | static int mrst_i8042_detect(void) | ||
221 | { | ||
222 | return 0; | ||
223 | } | ||
224 | |||
219 | /* | 225 | /* |
220 | * Moorestown specific x86_init function overrides and early setup | 226 | * Moorestown specific x86_init function overrides and early setup |
221 | * calls. | 227 | * calls. |
@@ -233,6 +239,7 @@ void __init x86_mrst_early_setup(void) | |||
233 | x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock; | 239 | x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock; |
234 | 240 | ||
235 | x86_platform.calibrate_tsc = mrst_calibrate_tsc; | 241 | x86_platform.calibrate_tsc = mrst_calibrate_tsc; |
242 | x86_platform.i8042_detect = mrst_i8042_detect; | ||
236 | x86_init.pci.init = pci_mrst_init; | 243 | x86_init.pci.init = pci_mrst_init; |
237 | x86_init.pci.fixup_irqs = x86_init_noop; | 244 | x86_init.pci.fixup_irqs = x86_init_noop; |
238 | 245 | ||
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 0b96b5589f08..078d4ec1a9d9 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -110,7 +110,7 @@ int use_calgary __read_mostly = 0; | |||
110 | * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256 | 110 | * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256 |
111 | * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128 | 111 | * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128 |
112 | */ | 112 | */ |
113 | #define MAX_PHB_BUS_NUM 384 | 113 | #define MAX_PHB_BUS_NUM 256 |
114 | 114 | ||
115 | #define PHBS_PER_CALGARY 4 | 115 | #define PHBS_PER_CALGARY 4 |
116 | 116 | ||
@@ -1056,8 +1056,6 @@ static int __init calgary_init_one(struct pci_dev *dev) | |||
1056 | struct iommu_table *tbl; | 1056 | struct iommu_table *tbl; |
1057 | int ret; | 1057 | int ret; |
1058 | 1058 | ||
1059 | BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM); | ||
1060 | |||
1061 | bbar = busno_to_bbar(dev->bus->number); | 1059 | bbar = busno_to_bbar(dev->bus->number); |
1062 | ret = calgary_setup_tar(dev, bbar); | 1060 | ret = calgary_setup_tar(dev, bbar); |
1063 | if (ret) | 1061 | if (ret) |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index e72d3fc6547d..939b9e98245f 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -498,15 +498,10 @@ void force_hpet_resume(void) | |||
498 | * See erratum #27 (Misinterpreted MSI Requests May Result in | 498 | * See erratum #27 (Misinterpreted MSI Requests May Result in |
499 | * Corrupted LPC DMA Data) in AMD Publication #46837, | 499 | * Corrupted LPC DMA Data) in AMD Publication #46837, |
500 | * "SB700 Family Product Errata", Rev. 1.0, March 2010. | 500 | * "SB700 Family Product Errata", Rev. 1.0, March 2010. |
501 | * | ||
502 | * Also force the read back of the CMP register in hpet_next_event() | ||
503 | * to work around the problem that the CMP register write seems to be | ||
504 | * delayed. See hpet_next_event() for details. | ||
505 | */ | 501 | */ |
506 | static void force_disable_hpet_msi(struct pci_dev *unused) | 502 | static void force_disable_hpet_msi(struct pci_dev *unused) |
507 | { | 503 | { |
508 | hpet_msi_disable = 1; | 504 | hpet_msi_disable = 1; |
509 | hpet_readback_cmp = 1; | ||
510 | } | 505 | } |
511 | 506 | ||
512 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | 507 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index de3b63ae3da2..a60df9ae6454 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -238,6 +238,15 @@ void __init setup_per_cpu_areas(void) | |||
238 | #ifdef CONFIG_NUMA | 238 | #ifdef CONFIG_NUMA |
239 | per_cpu(x86_cpu_to_node_map, cpu) = | 239 | per_cpu(x86_cpu_to_node_map, cpu) = |
240 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | 240 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
241 | /* | ||
242 | * Ensure that the boot cpu numa_node is correct when the boot | ||
243 | * cpu is on a node that doesn't have memory installed. | ||
244 | * Also cpu_up() will call cpu_to_node() for APs when | ||
245 | * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set | ||
246 | * up later with c_init aka intel_init/amd_init. | ||
247 | * So set them all (boot cpu and all APs). | ||
248 | */ | ||
249 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); | ||
241 | #endif | 250 | #endif |
242 | #endif | 251 | #endif |
243 | /* | 252 | /* |
@@ -257,14 +266,6 @@ void __init setup_per_cpu_areas(void) | |||
257 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | 266 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
258 | #endif | 267 | #endif |
259 | 268 | ||
260 | #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) | ||
261 | /* | ||
262 | * make sure boot cpu numa_node is right, when boot cpu is on the | ||
263 | * node that doesn't have mem installed | ||
264 | */ | ||
265 | set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id)); | ||
266 | #endif | ||
267 | |||
268 | /* Setup node to cpumask map */ | 269 | /* Setup node to cpumask map */ |
269 | setup_node_to_cpumask_map(); | 270 | setup_node_to_cpumask_map(); |
270 | 271 | ||
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 61a1e8c7e19f..cd6da6bf3eca 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/ioport.h> | 7 | #include <linux/ioport.h> |
8 | #include <linux/module.h> | ||
8 | 9 | ||
9 | #include <asm/bios_ebda.h> | 10 | #include <asm/bios_ebda.h> |
10 | #include <asm/paravirt.h> | 11 | #include <asm/paravirt.h> |
@@ -85,6 +86,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { | |||
85 | }; | 86 | }; |
86 | 87 | ||
87 | static void default_nmi_init(void) { }; | 88 | static void default_nmi_init(void) { }; |
89 | static int default_i8042_detect(void) { return 1; }; | ||
88 | 90 | ||
89 | struct x86_platform_ops x86_platform = { | 91 | struct x86_platform_ops x86_platform = { |
90 | .calibrate_tsc = native_calibrate_tsc, | 92 | .calibrate_tsc = native_calibrate_tsc, |
@@ -92,5 +94,8 @@ struct x86_platform_ops x86_platform = { | |||
92 | .set_wallclock = mach_set_rtc_mmss, | 94 | .set_wallclock = mach_set_rtc_mmss, |
93 | .iommu_shutdown = iommu_shutdown_noop, | 95 | .iommu_shutdown = iommu_shutdown_noop, |
94 | .is_untracked_pat_range = is_ISA_range, | 96 | .is_untracked_pat_range = is_ISA_range, |
95 | .nmi_init = default_nmi_init | 97 | .nmi_init = default_nmi_init, |
98 | .i8042_detect = default_i8042_detect | ||
96 | }; | 99 | }; |
100 | |||
101 | EXPORT_SYMBOL_GPL(x86_platform); | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a6f695d76928..b1ed0a1a5913 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1879,6 +1879,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
1879 | pgprintk("hfn old %lx new %lx\n", | 1879 | pgprintk("hfn old %lx new %lx\n", |
1880 | spte_to_pfn(*sptep), pfn); | 1880 | spte_to_pfn(*sptep), pfn); |
1881 | rmap_remove(vcpu->kvm, sptep); | 1881 | rmap_remove(vcpu->kvm, sptep); |
1882 | __set_spte(sptep, shadow_trap_nonpresent_pte); | ||
1883 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1882 | } else | 1884 | } else |
1883 | was_rmapped = 1; | 1885 | was_rmapped = 1; |
1884 | } | 1886 | } |
@@ -2924,7 +2926,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm) | |||
2924 | return kvm_mmu_zap_page(kvm, page) + 1; | 2926 | return kvm_mmu_zap_page(kvm, page) + 1; |
2925 | } | 2927 | } |
2926 | 2928 | ||
2927 | static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) | 2929 | static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
2928 | { | 2930 | { |
2929 | struct kvm *kvm; | 2931 | struct kvm *kvm; |
2930 | struct kvm *kvm_freed = NULL; | 2932 | struct kvm *kvm_freed = NULL; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 859a01a07dbf..ee03679efe78 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1744,18 +1744,15 @@ static void enter_lmode(struct kvm_vcpu *vcpu) | |||
1744 | (guest_tr_ar & ~AR_TYPE_MASK) | 1744 | (guest_tr_ar & ~AR_TYPE_MASK) |
1745 | | AR_TYPE_BUSY_64_TSS); | 1745 | | AR_TYPE_BUSY_64_TSS); |
1746 | } | 1746 | } |
1747 | vcpu->arch.efer |= EFER_LMA; | 1747 | vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); |
1748 | vmx_set_efer(vcpu, vcpu->arch.efer); | ||
1749 | } | 1748 | } |
1750 | 1749 | ||
1751 | static void exit_lmode(struct kvm_vcpu *vcpu) | 1750 | static void exit_lmode(struct kvm_vcpu *vcpu) |
1752 | { | 1751 | { |
1753 | vcpu->arch.efer &= ~EFER_LMA; | ||
1754 | |||
1755 | vmcs_write32(VM_ENTRY_CONTROLS, | 1752 | vmcs_write32(VM_ENTRY_CONTROLS, |
1756 | vmcs_read32(VM_ENTRY_CONTROLS) | 1753 | vmcs_read32(VM_ENTRY_CONTROLS) |
1757 | & ~VM_ENTRY_IA32E_MODE); | 1754 | & ~VM_ENTRY_IA32E_MODE); |
1758 | vmx_set_efer(vcpu, vcpu->arch.efer); | 1755 | vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); |
1759 | } | 1756 | } |
1760 | 1757 | ||
1761 | #endif | 1758 | #endif |
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index f20eeec85a86..8acaddd0fb21 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c | |||
@@ -34,8 +34,7 @@ | |||
34 | * memtype_lock protects the rbtree. | 34 | * memtype_lock protects the rbtree. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | static void memtype_rb_augment_cb(struct rb_node *node); | 37 | static struct rb_root memtype_rbroot = RB_ROOT; |
38 | static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb); | ||
39 | 38 | ||
40 | static int is_node_overlap(struct memtype *node, u64 start, u64 end) | 39 | static int is_node_overlap(struct memtype *node, u64 start, u64 end) |
41 | { | 40 | { |
@@ -56,7 +55,7 @@ static u64 get_subtree_max_end(struct rb_node *node) | |||
56 | } | 55 | } |
57 | 56 | ||
58 | /* Update 'subtree_max_end' for a node, based on node and its children */ | 57 | /* Update 'subtree_max_end' for a node, based on node and its children */ |
59 | static void update_node_max_end(struct rb_node *node) | 58 | static void memtype_rb_augment_cb(struct rb_node *node, void *__unused) |
60 | { | 59 | { |
61 | struct memtype *data; | 60 | struct memtype *data; |
62 | u64 max_end, child_max_end; | 61 | u64 max_end, child_max_end; |
@@ -78,25 +77,6 @@ static void update_node_max_end(struct rb_node *node) | |||
78 | data->subtree_max_end = max_end; | 77 | data->subtree_max_end = max_end; |
79 | } | 78 | } |
80 | 79 | ||
81 | /* Update 'subtree_max_end' for a node and all its ancestors */ | ||
82 | static void update_path_max_end(struct rb_node *node) | ||
83 | { | ||
84 | u64 old_max_end, new_max_end; | ||
85 | |||
86 | while (node) { | ||
87 | struct memtype *data = container_of(node, struct memtype, rb); | ||
88 | |||
89 | old_max_end = data->subtree_max_end; | ||
90 | update_node_max_end(node); | ||
91 | new_max_end = data->subtree_max_end; | ||
92 | |||
93 | if (new_max_end == old_max_end) | ||
94 | break; | ||
95 | |||
96 | node = rb_parent(node); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | /* Find the first (lowest start addr) overlapping range from rb tree */ | 80 | /* Find the first (lowest start addr) overlapping range from rb tree */ |
101 | static struct memtype *memtype_rb_lowest_match(struct rb_root *root, | 81 | static struct memtype *memtype_rb_lowest_match(struct rb_root *root, |
102 | u64 start, u64 end) | 82 | u64 start, u64 end) |
@@ -190,12 +170,6 @@ failure: | |||
190 | return -EBUSY; | 170 | return -EBUSY; |
191 | } | 171 | } |
192 | 172 | ||
193 | static void memtype_rb_augment_cb(struct rb_node *node) | ||
194 | { | ||
195 | if (node) | ||
196 | update_path_max_end(node); | ||
197 | } | ||
198 | |||
199 | static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) | 173 | static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) |
200 | { | 174 | { |
201 | struct rb_node **node = &(root->rb_node); | 175 | struct rb_node **node = &(root->rb_node); |
@@ -213,6 +187,7 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) | |||
213 | 187 | ||
214 | rb_link_node(&newdata->rb, parent, node); | 188 | rb_link_node(&newdata->rb, parent, node); |
215 | rb_insert_color(&newdata->rb, root); | 189 | rb_insert_color(&newdata->rb, root); |
190 | rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL); | ||
216 | } | 191 | } |
217 | 192 | ||
218 | int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) | 193 | int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) |
@@ -234,13 +209,16 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) | |||
234 | 209 | ||
235 | struct memtype *rbt_memtype_erase(u64 start, u64 end) | 210 | struct memtype *rbt_memtype_erase(u64 start, u64 end) |
236 | { | 211 | { |
212 | struct rb_node *deepest; | ||
237 | struct memtype *data; | 213 | struct memtype *data; |
238 | 214 | ||
239 | data = memtype_rb_exact_match(&memtype_rbroot, start, end); | 215 | data = memtype_rb_exact_match(&memtype_rbroot, start, end); |
240 | if (!data) | 216 | if (!data) |
241 | goto out; | 217 | goto out; |
242 | 218 | ||
219 | deepest = rb_augment_erase_begin(&data->rb); | ||
243 | rb_erase(&data->rb, &memtype_rbroot); | 220 | rb_erase(&data->rb, &memtype_rbroot); |
221 | rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL); | ||
244 | out: | 222 | out: |
245 | return data; | 223 | return data; |
246 | } | 224 | } |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 6fdb3ec30c31..55253095be84 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -184,6 +184,7 @@ static void __init pcibios_allocate_resources(int pass) | |||
184 | idx, r, disabled, pass); | 184 | idx, r, disabled, pass); |
185 | if (pci_claim_resource(dev, idx) < 0) { | 185 | if (pci_claim_resource(dev, idx) < 0) { |
186 | /* We'll assign a new address later */ | 186 | /* We'll assign a new address later */ |
187 | dev->fw_addr[idx] = r->start; | ||
187 | r->end -= r->start; | 188 | r->end -= r->start; |
188 | r->start = 0; | 189 | r->start = 0; |
189 | } | 190 | } |
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c index 7ef3a2735df3..cb29191cee58 100644 --- a/arch/x86/pci/mrst.c +++ b/arch/x86/pci/mrst.c | |||
@@ -66,8 +66,9 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn) | |||
66 | devfn, pos, 4, &pcie_cap)) | 66 | devfn, pos, 4, &pcie_cap)) |
67 | return 0; | 67 | return 0; |
68 | 68 | ||
69 | if (pcie_cap == 0xffffffff) | 69 | if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 || |
70 | return 0; | 70 | PCI_EXT_CAP_ID(pcie_cap) == 0xffff) |
71 | break; | ||
71 | 72 | ||
72 | if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) { | 73 | if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) { |
73 | raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, | 74 | raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, |
@@ -76,7 +77,7 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn) | |||
76 | return pos; | 77 | return pos; |
77 | } | 78 | } |
78 | 79 | ||
79 | pos = pcie_cap >> 20; | 80 | pos = PCI_EXT_CAP_NEXT(pcie_cap); |
80 | } | 81 | } |
81 | 82 | ||
82 | return 0; | 83 | return 0; |