diff options
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/include/asm/iomap.h | 4 | ||||
| -rw-r--r-- | arch/x86/include/asm/pci.h | 6 | ||||
| -rw-r--r-- | arch/x86/include/asm/tsc.h | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 59 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 15 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/trampoline.c | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/tsc.c | 38 | ||||
| -rw-r--r-- | arch/x86/mm/iomap_32.c | 6 | ||||
| -rw-r--r-- | arch/x86/oprofile/nmi_int.c | 22 | ||||
| -rw-r--r-- | arch/x86/power/cpu.c | 2 | ||||
| -rw-r--r-- | arch/x86/xen/platform-pci-unplug.c | 18 |
14 files changed, 146 insertions, 46 deletions
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index f35eb45d6576..c4191b3b7056 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h | |||
| @@ -26,11 +26,11 @@ | |||
| 26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
| 27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
| 28 | 28 | ||
| 29 | void * | 29 | void __iomem * |
| 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
| 31 | 31 | ||
| 32 | void | 32 | void |
| 33 | iounmap_atomic(void *kvaddr, enum km_type type); | 33 | iounmap_atomic(void __iomem *kvaddr, enum km_type type); |
| 34 | 34 | ||
| 35 | int | 35 | int |
| 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); | 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 404a880ea325..d395540ff894 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
| @@ -27,6 +27,9 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, | |||
| 27 | int node); | 27 | int node); |
| 28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); | 28 | extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); |
| 29 | 29 | ||
| 30 | #ifdef CONFIG_PCI | ||
| 31 | |||
| 32 | #ifdef CONFIG_PCI_DOMAINS | ||
| 30 | static inline int pci_domain_nr(struct pci_bus *bus) | 33 | static inline int pci_domain_nr(struct pci_bus *bus) |
| 31 | { | 34 | { |
| 32 | struct pci_sysdata *sd = bus->sysdata; | 35 | struct pci_sysdata *sd = bus->sysdata; |
| @@ -37,13 +40,12 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
| 37 | { | 40 | { |
| 38 | return pci_domain_nr(bus); | 41 | return pci_domain_nr(bus); |
| 39 | } | 42 | } |
| 40 | 43 | #endif | |
| 41 | 44 | ||
| 42 | /* Can be used to override the logic in pci_scan_bus for skipping | 45 | /* Can be used to override the logic in pci_scan_bus for skipping |
| 43 | already-configured bus numbers - to be used for buggy BIOSes | 46 | already-configured bus numbers - to be used for buggy BIOSes |
| 44 | or architectures with incomplete PCI setup by the loader */ | 47 | or architectures with incomplete PCI setup by the loader */ |
| 45 | 48 | ||
| 46 | #ifdef CONFIG_PCI | ||
| 47 | extern unsigned int pcibios_assign_all_busses(void); | 49 | extern unsigned int pcibios_assign_all_busses(void); |
| 48 | extern int pci_legacy_init(void); | 50 | extern int pci_legacy_init(void); |
| 49 | # ifdef CONFIG_ACPI | 51 | # ifdef CONFIG_ACPI |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index c0427295e8f5..1ca132fc0d03 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
| @@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu); | |||
| 59 | extern void check_tsc_sync_target(void); | 59 | extern void check_tsc_sync_target(void); |
| 60 | 60 | ||
| 61 | extern int notsc_setup(char *); | 61 | extern int notsc_setup(char *); |
| 62 | extern void save_sched_clock_state(void); | ||
| 63 | extern void restore_sched_clock_state(void); | ||
| 62 | 64 | ||
| 63 | #endif /* _ASM_X86_TSC_H */ | 65 | #endif /* _ASM_X86_TSC_H */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 224392d8fe8c..5e975298fa81 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 530 | err = -ENOMEM; | 530 | err = -ENOMEM; |
| 531 | goto out; | 531 | goto out; |
| 532 | } | 532 | } |
| 533 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | 533 | if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) { |
| 534 | kfree(b); | 534 | kfree(b); |
| 535 | err = -ENOMEM; | 535 | err = -ENOMEM; |
| 536 | goto out; | 536 | goto out; |
| @@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 543 | #ifndef CONFIG_SMP | 543 | #ifndef CONFIG_SMP |
| 544 | cpumask_setall(b->cpus); | 544 | cpumask_setall(b->cpus); |
| 545 | #else | 545 | #else |
| 546 | cpumask_copy(b->cpus, c->llc_shared_map); | 546 | cpumask_set_cpu(cpu, b->cpus); |
| 547 | #endif | 547 | #endif |
| 548 | 548 | ||
| 549 | per_cpu(threshold_banks, cpu)[bank] = b; | 549 | per_cpu(threshold_banks, cpu)[bank] = b; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index c2a8b26d4fea..d9368eeda309 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
| @@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
| 202 | 202 | ||
| 203 | #ifdef CONFIG_SYSFS | 203 | #ifdef CONFIG_SYSFS |
| 204 | /* Add/Remove thermal_throttle interface for CPU device: */ | 204 | /* Add/Remove thermal_throttle interface for CPU device: */ |
| 205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) | 205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, |
| 206 | unsigned int cpu) | ||
| 206 | { | 207 | { |
| 207 | int err; | 208 | int err; |
| 208 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | 209 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 209 | 210 | ||
| 210 | err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); | 211 | err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); |
| 211 | if (err) | 212 | if (err) |
| @@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
| 251 | case CPU_UP_PREPARE: | 252 | case CPU_UP_PREPARE: |
| 252 | case CPU_UP_PREPARE_FROZEN: | 253 | case CPU_UP_PREPARE_FROZEN: |
| 253 | mutex_lock(&therm_cpu_lock); | 254 | mutex_lock(&therm_cpu_lock); |
| 254 | err = thermal_throttle_add_dev(sys_dev); | 255 | err = thermal_throttle_add_dev(sys_dev, cpu); |
| 255 | mutex_unlock(&therm_cpu_lock); | 256 | mutex_unlock(&therm_cpu_lock); |
| 256 | WARN_ON(err); | 257 | WARN_ON(err); |
| 257 | break; | 258 | break; |
| @@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void) | |||
| 287 | #endif | 288 | #endif |
| 288 | /* connect live CPUs to sysfs */ | 289 | /* connect live CPUs to sysfs */ |
| 289 | for_each_online_cpu(cpu) { | 290 | for_each_online_cpu(cpu) { |
| 290 | err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); | 291 | err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu); |
| 291 | WARN_ON(err); | 292 | WARN_ON(err); |
| 292 | } | 293 | } |
| 293 | #ifdef CONFIG_HOTPLUG_CPU | 294 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f2da20fda02d..3efdf2870a35 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
| 1154 | /* | 1154 | /* |
| 1155 | * event overflow | 1155 | * event overflow |
| 1156 | */ | 1156 | */ |
| 1157 | handled = 1; | 1157 | handled++; |
| 1158 | data.period = event->hw.last_period; | 1158 | data.period = event->hw.last_period; |
| 1159 | 1159 | ||
| 1160 | if (!x86_perf_event_set_period(event)) | 1160 | if (!x86_perf_event_set_period(event)) |
| @@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void) | |||
| 1200 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1200 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 1201 | } | 1201 | } |
| 1202 | 1202 | ||
| 1203 | struct pmu_nmi_state { | ||
| 1204 | unsigned int marked; | ||
| 1205 | int handled; | ||
| 1206 | }; | ||
| 1207 | |||
| 1208 | static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); | ||
| 1209 | |||
| 1203 | static int __kprobes | 1210 | static int __kprobes |
| 1204 | perf_event_nmi_handler(struct notifier_block *self, | 1211 | perf_event_nmi_handler(struct notifier_block *self, |
| 1205 | unsigned long cmd, void *__args) | 1212 | unsigned long cmd, void *__args) |
| 1206 | { | 1213 | { |
| 1207 | struct die_args *args = __args; | 1214 | struct die_args *args = __args; |
| 1208 | struct pt_regs *regs; | 1215 | unsigned int this_nmi; |
| 1216 | int handled; | ||
| 1209 | 1217 | ||
| 1210 | if (!atomic_read(&active_events)) | 1218 | if (!atomic_read(&active_events)) |
| 1211 | return NOTIFY_DONE; | 1219 | return NOTIFY_DONE; |
| @@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
| 1214 | case DIE_NMI: | 1222 | case DIE_NMI: |
| 1215 | case DIE_NMI_IPI: | 1223 | case DIE_NMI_IPI: |
| 1216 | break; | 1224 | break; |
| 1217 | 1225 | case DIE_NMIUNKNOWN: | |
| 1226 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
| 1227 | if (this_nmi != __get_cpu_var(pmu_nmi).marked) | ||
| 1228 | /* let the kernel handle the unknown nmi */ | ||
| 1229 | return NOTIFY_DONE; | ||
| 1230 | /* | ||
| 1231 | * This one is a PMU back-to-back nmi. Two events | ||
| 1232 | * trigger 'simultaneously' raising two back-to-back | ||
| 1233 | * NMIs. If the first NMI handles both, the latter | ||
| 1234 | * will be empty and daze the CPU. So, we drop it to | ||
| 1235 | * avoid false-positive 'unknown nmi' messages. | ||
| 1236 | */ | ||
| 1237 | return NOTIFY_STOP; | ||
| 1218 | default: | 1238 | default: |
| 1219 | return NOTIFY_DONE; | 1239 | return NOTIFY_DONE; |
| 1220 | } | 1240 | } |
| 1221 | 1241 | ||
| 1222 | regs = args->regs; | ||
| 1223 | |||
| 1224 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1242 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 1225 | /* | 1243 | |
| 1226 | * Can't rely on the handled return value to say it was our NMI, two | 1244 | handled = x86_pmu.handle_irq(args->regs); |
| 1227 | * events could trigger 'simultaneously' raising two back-to-back NMIs. | 1245 | if (!handled) |
| 1228 | * | 1246 | return NOTIFY_DONE; |
| 1229 | * If the first NMI handles both, the latter will be empty and daze | 1247 | |
| 1230 | * the CPU. | 1248 | this_nmi = percpu_read(irq_stat.__nmi_count); |
| 1231 | */ | 1249 | if ((handled > 1) || |
| 1232 | x86_pmu.handle_irq(regs); | 1250 | /* the next nmi could be a back-to-back nmi */ |
| 1251 | ((__get_cpu_var(pmu_nmi).marked == this_nmi) && | ||
| 1252 | (__get_cpu_var(pmu_nmi).handled > 1))) { | ||
| 1253 | /* | ||
| 1254 | * We could have two subsequent back-to-back nmis: The | ||
| 1255 | * first handles more than one counter, the 2nd | ||
| 1256 | * handles only one counter and the 3rd handles no | ||
| 1257 | * counter. | ||
| 1258 | * | ||
| 1259 | * This is the 2nd nmi because the previous was | ||
| 1260 | * handling more than one counter. We will mark the | ||
| 1261 | * next (3rd) and then drop it if unhandled. | ||
| 1262 | */ | ||
| 1263 | __get_cpu_var(pmu_nmi).marked = this_nmi + 1; | ||
| 1264 | __get_cpu_var(pmu_nmi).handled = handled; | ||
| 1265 | } | ||
| 1233 | 1266 | ||
| 1234 | return NOTIFY_STOP; | 1267 | return NOTIFY_STOP; |
| 1235 | } | 1268 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index d8d86d014008..ee05c90012d2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
| @@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
| 712 | struct perf_sample_data data; | 712 | struct perf_sample_data data; |
| 713 | struct cpu_hw_events *cpuc; | 713 | struct cpu_hw_events *cpuc; |
| 714 | int bit, loops; | 714 | int bit, loops; |
| 715 | u64 ack, status; | 715 | u64 status; |
| 716 | int handled = 0; | ||
| 716 | 717 | ||
| 717 | perf_sample_data_init(&data, 0); | 718 | perf_sample_data_init(&data, 0); |
| 718 | 719 | ||
| @@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
| 728 | 729 | ||
| 729 | loops = 0; | 730 | loops = 0; |
| 730 | again: | 731 | again: |
| 732 | intel_pmu_ack_status(status); | ||
| 731 | if (++loops > 100) { | 733 | if (++loops > 100) { |
| 732 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 734 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
| 733 | perf_event_print_debug(); | 735 | perf_event_print_debug(); |
| @@ -736,19 +738,22 @@ again: | |||
| 736 | } | 738 | } |
| 737 | 739 | ||
| 738 | inc_irq_stat(apic_perf_irqs); | 740 | inc_irq_stat(apic_perf_irqs); |
| 739 | ack = status; | ||
| 740 | 741 | ||
| 741 | intel_pmu_lbr_read(); | 742 | intel_pmu_lbr_read(); |
| 742 | 743 | ||
| 743 | /* | 744 | /* |
| 744 | * PEBS overflow sets bit 62 in the global status register | 745 | * PEBS overflow sets bit 62 in the global status register |
| 745 | */ | 746 | */ |
| 746 | if (__test_and_clear_bit(62, (unsigned long *)&status)) | 747 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
| 748 | handled++; | ||
| 747 | x86_pmu.drain_pebs(regs); | 749 | x86_pmu.drain_pebs(regs); |
| 750 | } | ||
| 748 | 751 | ||
| 749 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 752 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
| 750 | struct perf_event *event = cpuc->events[bit]; | 753 | struct perf_event *event = cpuc->events[bit]; |
| 751 | 754 | ||
| 755 | handled++; | ||
| 756 | |||
| 752 | if (!test_bit(bit, cpuc->active_mask)) | 757 | if (!test_bit(bit, cpuc->active_mask)) |
| 753 | continue; | 758 | continue; |
| 754 | 759 | ||
| @@ -761,8 +766,6 @@ again: | |||
| 761 | x86_pmu_stop(event); | 766 | x86_pmu_stop(event); |
| 762 | } | 767 | } |
| 763 | 768 | ||
| 764 | intel_pmu_ack_status(ack); | ||
| 765 | |||
| 766 | /* | 769 | /* |
| 767 | * Repeat if there is more work to be done: | 770 | * Repeat if there is more work to be done: |
| 768 | */ | 771 | */ |
| @@ -772,7 +775,7 @@ again: | |||
| 772 | 775 | ||
| 773 | done: | 776 | done: |
| 774 | intel_pmu_enable_all(0); | 777 | intel_pmu_enable_all(0); |
| 775 | return 1; | 778 | return handled; |
| 776 | } | 779 | } |
| 777 | 780 | ||
| 778 | static struct event_constraint * | 781 | static struct event_constraint * |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index febb12cea795..b560db3305be 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
| @@ -497,6 +497,8 @@ static int p4_hw_config(struct perf_event *event) | |||
| 497 | event->hw.config |= event->attr.config & | 497 | event->hw.config |= event->attr.config & |
| 498 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | | 498 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | |
| 499 | p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); | 499 | p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); |
| 500 | |||
| 501 | event->hw.config &= ~P4_CCCR_FORCE_OVF; | ||
| 500 | } | 502 | } |
| 501 | 503 | ||
| 502 | rc = x86_setup_perfctr(event); | 504 | rc = x86_setup_perfctr(event); |
| @@ -690,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
| 690 | inc_irq_stat(apic_perf_irqs); | 692 | inc_irq_stat(apic_perf_irqs); |
| 691 | } | 693 | } |
| 692 | 694 | ||
| 693 | return handled > 0; | 695 | return handled; |
| 694 | } | 696 | } |
| 695 | 697 | ||
| 696 | /* | 698 | /* |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a874495b3673..e2a595257390 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
| @@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void) | |||
| 45 | /* Copy kernel address range */ | 45 | /* Copy kernel address range */ |
| 46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, | 46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, |
| 47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | 47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
| 48 | min_t(unsigned long, KERNEL_PGD_PTRS, | 48 | KERNEL_PGD_PTRS); |
| 49 | KERNEL_PGD_BOUNDARY)); | ||
| 50 | 49 | ||
| 51 | /* Initialize low mappings */ | 50 | /* Initialize low mappings */ |
| 52 | clone_pgd_range(trampoline_pg_dir, | 51 | clone_pgd_range(trampoline_pg_dir, |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ce8e50239332..d632934cb638 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
| 626 | local_irq_restore(flags); | 626 | local_irq_restore(flags); |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | static unsigned long long cyc2ns_suspend; | ||
| 630 | |||
| 631 | void save_sched_clock_state(void) | ||
| 632 | { | ||
| 633 | if (!sched_clock_stable) | ||
| 634 | return; | ||
| 635 | |||
| 636 | cyc2ns_suspend = sched_clock(); | ||
| 637 | } | ||
| 638 | |||
| 639 | /* | ||
| 640 | * Even on processors with invariant TSC, TSC gets reset in some the | ||
| 641 | * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to | ||
| 642 | * arbitrary value (still sync'd across cpu's) during resume from such sleep | ||
| 643 | * states. To cope up with this, recompute the cyc2ns_offset for each cpu so | ||
| 644 | * that sched_clock() continues from the point where it was left off during | ||
| 645 | * suspend. | ||
| 646 | */ | ||
| 647 | void restore_sched_clock_state(void) | ||
| 648 | { | ||
| 649 | unsigned long long offset; | ||
| 650 | unsigned long flags; | ||
| 651 | int cpu; | ||
| 652 | |||
| 653 | if (!sched_clock_stable) | ||
| 654 | return; | ||
| 655 | |||
| 656 | local_irq_save(flags); | ||
| 657 | |||
| 658 | get_cpu_var(cyc2ns_offset) = 0; | ||
| 659 | offset = cyc2ns_suspend - sched_clock(); | ||
| 660 | |||
| 661 | for_each_possible_cpu(cpu) | ||
| 662 | per_cpu(cyc2ns_offset, cpu) = offset; | ||
| 663 | |||
| 664 | local_irq_restore(flags); | ||
| 665 | } | ||
| 666 | |||
| 629 | #ifdef CONFIG_CPU_FREQ | 667 | #ifdef CONFIG_CPU_FREQ |
| 630 | 668 | ||
| 631 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | 669 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 84e236ce76ba..72fc70cf6184 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
| @@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
| 74 | /* | 74 | /* |
| 75 | * Map 'pfn' using fixed map 'type' and protections 'prot' | 75 | * Map 'pfn' using fixed map 'type' and protections 'prot' |
| 76 | */ | 76 | */ |
| 77 | void * | 77 | void __iomem * |
| 78 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 78 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) |
| 79 | { | 79 | { |
| 80 | /* | 80 | /* |
| @@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
| 86 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | 86 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) |
| 87 | prot = PAGE_KERNEL_UC_MINUS; | 87 | prot = PAGE_KERNEL_UC_MINUS; |
| 88 | 88 | ||
| 89 | return kmap_atomic_prot_pfn(pfn, type, prot); | 89 | return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); |
| 90 | } | 90 | } |
| 91 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); | 91 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); |
| 92 | 92 | ||
| 93 | void | 93 | void |
| 94 | iounmap_atomic(void *kvaddr, enum km_type type) | 94 | iounmap_atomic(void __iomem *kvaddr, enum km_type type) |
| 95 | { | 95 | { |
| 96 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 96 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 97 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 97 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index f6b48f6c5951..cfe4faabb0f6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -568,8 +568,13 @@ static int __init init_sysfs(void) | |||
| 568 | int error; | 568 | int error; |
| 569 | 569 | ||
| 570 | error = sysdev_class_register(&oprofile_sysclass); | 570 | error = sysdev_class_register(&oprofile_sysclass); |
| 571 | if (!error) | 571 | if (error) |
| 572 | error = sysdev_register(&device_oprofile); | 572 | return error; |
| 573 | |||
| 574 | error = sysdev_register(&device_oprofile); | ||
| 575 | if (error) | ||
| 576 | sysdev_class_unregister(&oprofile_sysclass); | ||
| 577 | |||
| 573 | return error; | 578 | return error; |
| 574 | } | 579 | } |
| 575 | 580 | ||
| @@ -580,8 +585,10 @@ static void exit_sysfs(void) | |||
| 580 | } | 585 | } |
| 581 | 586 | ||
| 582 | #else | 587 | #else |
| 583 | #define init_sysfs() do { } while (0) | 588 | |
| 584 | #define exit_sysfs() do { } while (0) | 589 | static inline int init_sysfs(void) { return 0; } |
| 590 | static inline void exit_sysfs(void) { } | ||
| 591 | |||
| 585 | #endif /* CONFIG_PM */ | 592 | #endif /* CONFIG_PM */ |
| 586 | 593 | ||
| 587 | static int __init p4_init(char **cpu_type) | 594 | static int __init p4_init(char **cpu_type) |
| @@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
| 695 | char *cpu_type = NULL; | 702 | char *cpu_type = NULL; |
| 696 | int ret = 0; | 703 | int ret = 0; |
| 697 | 704 | ||
| 705 | using_nmi = 0; | ||
| 706 | |||
| 698 | if (!cpu_has_apic) | 707 | if (!cpu_has_apic) |
| 699 | return -ENODEV; | 708 | return -ENODEV; |
| 700 | 709 | ||
| @@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
| 774 | 783 | ||
| 775 | mux_init(ops); | 784 | mux_init(ops); |
| 776 | 785 | ||
| 777 | init_sysfs(); | 786 | ret = init_sysfs(); |
| 787 | if (ret) | ||
| 788 | return ret; | ||
| 789 | |||
| 778 | using_nmi = 1; | 790 | using_nmi = 1; |
| 779 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); | 791 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
| 780 | return 0; | 792 | return 0; |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index e7e8c5f54956..87bb35e34ef1 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
| @@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
| 113 | void save_processor_state(void) | 113 | void save_processor_state(void) |
| 114 | { | 114 | { |
| 115 | __save_processor_state(&saved_context); | 115 | __save_processor_state(&saved_context); |
| 116 | save_sched_clock_state(); | ||
| 116 | } | 117 | } |
| 117 | #ifdef CONFIG_X86_32 | 118 | #ifdef CONFIG_X86_32 |
| 118 | EXPORT_SYMBOL(save_processor_state); | 119 | EXPORT_SYMBOL(save_processor_state); |
| @@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
| 229 | void restore_processor_state(void) | 230 | void restore_processor_state(void) |
| 230 | { | 231 | { |
| 231 | __restore_processor_state(&saved_context); | 232 | __restore_processor_state(&saved_context); |
| 233 | restore_sched_clock_state(); | ||
| 232 | } | 234 | } |
| 233 | #ifdef CONFIG_X86_32 | 235 | #ifdef CONFIG_X86_32 |
| 234 | EXPORT_SYMBOL(restore_processor_state); | 236 | EXPORT_SYMBOL(restore_processor_state); |
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 554c002a1e1a..0f456386cce5 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c | |||
| @@ -72,13 +72,17 @@ void __init xen_unplug_emulated_devices(void) | |||
| 72 | { | 72 | { |
| 73 | int r; | 73 | int r; |
| 74 | 74 | ||
| 75 | /* user explicitly requested no unplug */ | ||
| 76 | if (xen_emul_unplug & XEN_UNPLUG_NEVER) | ||
| 77 | return; | ||
| 75 | /* check the version of the xen platform PCI device */ | 78 | /* check the version of the xen platform PCI device */ |
| 76 | r = check_platform_magic(); | 79 | r = check_platform_magic(); |
| 77 | /* If the version matches enable the Xen platform PCI driver. | 80 | /* If the version matches enable the Xen platform PCI driver. |
| 78 | * Also enable the Xen platform PCI driver if the version is really old | 81 | * Also enable the Xen platform PCI driver if the host does |
| 79 | * and the user told us to ignore it. */ | 82 | * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC) |
| 83 | * but the user told us that unplugging is unnecessary. */ | ||
| 80 | if (r && !(r == XEN_PLATFORM_ERR_MAGIC && | 84 | if (r && !(r == XEN_PLATFORM_ERR_MAGIC && |
| 81 | (xen_emul_unplug & XEN_UNPLUG_IGNORE))) | 85 | (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))) |
| 82 | return; | 86 | return; |
| 83 | /* Set the default value of xen_emul_unplug depending on whether or | 87 | /* Set the default value of xen_emul_unplug depending on whether or |
| 84 | * not the Xen PV frontends and the Xen platform PCI driver have | 88 | * not the Xen PV frontends and the Xen platform PCI driver have |
| @@ -99,7 +103,7 @@ void __init xen_unplug_emulated_devices(void) | |||
| 99 | } | 103 | } |
| 100 | } | 104 | } |
| 101 | /* Now unplug the emulated devices */ | 105 | /* Now unplug the emulated devices */ |
| 102 | if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE)) | 106 | if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)) |
| 103 | outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); | 107 | outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); |
| 104 | xen_platform_pci_unplug = xen_emul_unplug; | 108 | xen_platform_pci_unplug = xen_emul_unplug; |
| 105 | } | 109 | } |
| @@ -125,8 +129,10 @@ static int __init parse_xen_emul_unplug(char *arg) | |||
| 125 | xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; | 129 | xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; |
| 126 | else if (!strncmp(p, "nics", l)) | 130 | else if (!strncmp(p, "nics", l)) |
| 127 | xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; | 131 | xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; |
| 128 | else if (!strncmp(p, "ignore", l)) | 132 | else if (!strncmp(p, "unnecessary", l)) |
| 129 | xen_emul_unplug |= XEN_UNPLUG_IGNORE; | 133 | xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY; |
| 134 | else if (!strncmp(p, "never", l)) | ||
| 135 | xen_emul_unplug |= XEN_UNPLUG_NEVER; | ||
| 130 | else | 136 | else |
| 131 | printk(KERN_WARNING "unrecognised option '%s' " | 137 | printk(KERN_WARNING "unrecognised option '%s' " |
| 132 | "in parameter 'xen_emul_unplug'\n", p); | 138 | "in parameter 'xen_emul_unplug'\n", p); |
