diff options
author | Tejun Heo <tj@kernel.org> | 2011-05-02 08:16:37 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-05-02 08:16:47 -0400 |
commit | ba67cf5cf2ce10ad86a212b70f8c7c75d93a5016 (patch) | |
tree | 70242f5927c6d6454bd352ff78f956cfc5238f59 /arch/x86/kernel | |
parent | aff364860aa105b2deacc6f21ec8ef524460e3fc (diff) | |
parent | 2be19102b71c1a45d37fec50303791daa1a06869 (diff) |
Merge branch 'x86/urgent' into x86-mm
Merge reason: Pick up the following two fix commits.
2be19102b7: x86, NUMA: Fix empty memblk detection in numa_cleanup_meminfo()
765af22da8: x86-32, NUMA: Fix ACPI NUMA init broken by recent x86-64 change
Scheduled NUMA init 32/64bit unification changes depend on these.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/aperture_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/apm_32.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 38 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/devicetree.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 5 |
11 files changed, 126 insertions, 29 deletions
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 86d1ad4962a7..73fb469908c6 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -499,7 +499,7 @@ out: | |||
499 | * Don't enable translation yet but enable GART IO and CPU | 499 | * Don't enable translation yet but enable GART IO and CPU |
500 | * accesses and set DISTLBWALKPRB since GART table memory is UC. | 500 | * accesses and set DISTLBWALKPRB since GART table memory is UC. |
501 | */ | 501 | */ |
502 | u32 ctl = DISTLBWALKPRB | aper_order << 1; | 502 | u32 ctl = aper_order << 1; |
503 | 503 | ||
504 | bus = amd_nb_bus_dev_ranges[i].bus; | 504 | bus = amd_nb_bus_dev_ranges[i].bus; |
505 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; | 505 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 68df09bba92e..45fd33d1fd3a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -128,8 +128,8 @@ static int __init parse_noapic(char *str) | |||
128 | } | 128 | } |
129 | early_param("noapic", parse_noapic); | 129 | early_param("noapic", parse_noapic); |
130 | 130 | ||
131 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | 131 | static int io_apic_setup_irq_pin(unsigned int irq, int node, |
132 | struct io_apic_irq_attr *attr); | 132 | struct io_apic_irq_attr *attr); |
133 | 133 | ||
134 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ | 134 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ |
135 | void mp_save_irq(struct mpc_intsrc *m) | 135 | void mp_save_irq(struct mpc_intsrc *m) |
@@ -3570,7 +3570,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3570 | } | 3570 | } |
3571 | #endif /* CONFIG_HT_IRQ */ | 3571 | #endif /* CONFIG_HT_IRQ */ |
3572 | 3572 | ||
3573 | int | 3573 | static int |
3574 | io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) | 3574 | io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) |
3575 | { | 3575 | { |
3576 | struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); | 3576 | struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); |
@@ -3585,8 +3585,8 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) | |||
3585 | return ret; | 3585 | return ret; |
3586 | } | 3586 | } |
3587 | 3587 | ||
3588 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | 3588 | int io_apic_setup_irq_pin_once(unsigned int irq, int node, |
3589 | struct io_apic_irq_attr *attr) | 3589 | struct io_apic_irq_attr *attr) |
3590 | { | 3590 | { |
3591 | unsigned int id = attr->ioapic, pin = attr->ioapic_pin; | 3591 | unsigned int id = attr->ioapic, pin = attr->ioapic_pin; |
3592 | int ret; | 3592 | int ret; |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 0b4be431c620..adee12e0da1f 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -228,6 +228,7 @@ | |||
228 | #include <linux/kthread.h> | 228 | #include <linux/kthread.h> |
229 | #include <linux/jiffies.h> | 229 | #include <linux/jiffies.h> |
230 | #include <linux/acpi.h> | 230 | #include <linux/acpi.h> |
231 | #include <linux/syscore_ops.h> | ||
231 | 232 | ||
232 | #include <asm/system.h> | 233 | #include <asm/system.h> |
233 | #include <asm/uaccess.h> | 234 | #include <asm/uaccess.h> |
@@ -1238,6 +1239,7 @@ static int suspend(int vetoable) | |||
1238 | 1239 | ||
1239 | local_irq_disable(); | 1240 | local_irq_disable(); |
1240 | sysdev_suspend(PMSG_SUSPEND); | 1241 | sysdev_suspend(PMSG_SUSPEND); |
1242 | syscore_suspend(); | ||
1241 | 1243 | ||
1242 | local_irq_enable(); | 1244 | local_irq_enable(); |
1243 | 1245 | ||
@@ -1255,6 +1257,7 @@ static int suspend(int vetoable) | |||
1255 | apm_error("suspend", err); | 1257 | apm_error("suspend", err); |
1256 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1258 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1257 | 1259 | ||
1260 | syscore_resume(); | ||
1258 | sysdev_resume(); | 1261 | sysdev_resume(); |
1259 | local_irq_enable(); | 1262 | local_irq_enable(); |
1260 | 1263 | ||
@@ -1280,6 +1283,7 @@ static void standby(void) | |||
1280 | 1283 | ||
1281 | local_irq_disable(); | 1284 | local_irq_disable(); |
1282 | sysdev_suspend(PMSG_SUSPEND); | 1285 | sysdev_suspend(PMSG_SUSPEND); |
1286 | syscore_suspend(); | ||
1283 | local_irq_enable(); | 1287 | local_irq_enable(); |
1284 | 1288 | ||
1285 | err = set_system_power_state(APM_STATE_STANDBY); | 1289 | err = set_system_power_state(APM_STATE_STANDBY); |
@@ -1287,6 +1291,7 @@ static void standby(void) | |||
1287 | apm_error("standby", err); | 1291 | apm_error("standby", err); |
1288 | 1292 | ||
1289 | local_irq_disable(); | 1293 | local_irq_disable(); |
1294 | syscore_resume(); | ||
1290 | sysdev_resume(); | 1295 | sysdev_resume(); |
1291 | local_irq_enable(); | 1296 | local_irq_enable(); |
1292 | 1297 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3ecece0217ef..bb9eb29a52dd 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
615 | /* As a rule processors have APIC timer running in deep C states */ | 615 | /* As a rule processors have APIC timer running in deep C states */ |
616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | 616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) |
617 | set_cpu_cap(c, X86_FEATURE_ARAT); | 617 | set_cpu_cap(c, X86_FEATURE_ARAT); |
618 | |||
619 | /* | ||
620 | * Disable GART TLB Walk Errors on Fam10h. We do this here | ||
621 | * because this is always needed when GART is enabled, even in a | ||
622 | * kernel which has no MCE support built in. | ||
623 | */ | ||
624 | if (c->x86 == 0x10) { | ||
625 | /* | ||
626 | * BIOS should disable GartTlbWlk Errors themself. If | ||
627 | * it doesn't do it here as suggested by the BKDG. | ||
628 | * | ||
629 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | ||
630 | */ | ||
631 | u64 mask; | ||
632 | |||
633 | rdmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
634 | mask |= (1 << 10); | ||
635 | wrmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
636 | } | ||
618 | } | 637 | } |
619 | 638 | ||
620 | #ifdef CONFIG_X86_32 | 639 | #ifdef CONFIG_X86_32 |
@@ -679,7 +698,7 @@ cpu_dev_register(amd_cpu_dev); | |||
679 | */ | 698 | */ |
680 | 699 | ||
681 | const int amd_erratum_400[] = | 700 | const int amd_erratum_400[] = |
682 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), | 701 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), |
683 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); | 702 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); |
684 | EXPORT_SYMBOL_GPL(amd_erratum_400); | 703 | EXPORT_SYMBOL_GPL(amd_erratum_400); |
685 | 704 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index eed3673a8656..e638689279d3 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -586,8 +586,12 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
586 | return -EOPNOTSUPP; | 586 | return -EOPNOTSUPP; |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | ||
590 | * Do not allow config1 (extended registers) to propagate, | ||
591 | * there's no sane user-space generalization yet: | ||
592 | */ | ||
589 | if (attr->type == PERF_TYPE_RAW) | 593 | if (attr->type == PERF_TYPE_RAW) |
590 | return x86_pmu_extra_regs(event->attr.config, event); | 594 | return 0; |
591 | 595 | ||
592 | if (attr->type == PERF_TYPE_HW_CACHE) | 596 | if (attr->type == PERF_TYPE_HW_CACHE) |
593 | return set_ext_hw_attr(hwc, event); | 597 | return set_ext_hw_attr(hwc, event); |
@@ -609,8 +613,8 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
609 | /* | 613 | /* |
610 | * Branch tracing: | 614 | * Branch tracing: |
611 | */ | 615 | */ |
612 | if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && | 616 | if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && |
613 | (hwc->sample_period == 1)) { | 617 | !attr->freq && hwc->sample_period == 1) { |
614 | /* BTS is not supported by this architecture. */ | 618 | /* BTS is not supported by this architecture. */ |
615 | if (!x86_pmu.bts_active) | 619 | if (!x86_pmu.bts_active) |
616 | return -EOPNOTSUPP; | 620 | return -EOPNOTSUPP; |
@@ -1284,6 +1288,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1284 | 1288 | ||
1285 | cpuc = &__get_cpu_var(cpu_hw_events); | 1289 | cpuc = &__get_cpu_var(cpu_hw_events); |
1286 | 1290 | ||
1291 | /* | ||
1292 | * Some chipsets need to unmask the LVTPC in a particular spot | ||
1293 | * inside the nmi handler. As a result, the unmasking was pushed | ||
1294 | * into all the nmi handlers. | ||
1295 | * | ||
1296 | * This generic handler doesn't seem to have any issues where the | ||
1297 | * unmasking occurs so it was left at the top. | ||
1298 | */ | ||
1299 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
1300 | |||
1287 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1301 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1288 | if (!test_bit(idx, cpuc->active_mask)) { | 1302 | if (!test_bit(idx, cpuc->active_mask)) { |
1289 | /* | 1303 | /* |
@@ -1370,8 +1384,6 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1370 | return NOTIFY_DONE; | 1384 | return NOTIFY_DONE; |
1371 | } | 1385 | } |
1372 | 1386 | ||
1373 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
1374 | |||
1375 | handled = x86_pmu.handle_irq(args->regs); | 1387 | handled = x86_pmu.handle_irq(args->regs); |
1376 | if (!handled) | 1388 | if (!handled) |
1377 | return NOTIFY_DONE; | 1389 | return NOTIFY_DONE; |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 461f62bbd774..cf4e369cea67 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
8 | [ C(L1D) ] = { | 8 | [ C(L1D) ] = { |
9 | [ C(OP_READ) ] = { | 9 | [ C(OP_READ) ] = { |
10 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | 10 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ |
11 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ | 11 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
12 | }, | 12 | }, |
13 | [ C(OP_WRITE) ] = { | 13 | [ C(OP_WRITE) ] = { |
14 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ | 14 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ |
@@ -427,7 +427,9 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
427 | * | 427 | * |
428 | * Exceptions: | 428 | * Exceptions: |
429 | * | 429 | * |
430 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) | ||
430 | * 0x003 FP PERF_CTL[3] | 431 | * 0x003 FP PERF_CTL[3] |
432 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) | ||
431 | * 0x00B FP PERF_CTL[3] | 433 | * 0x00B FP PERF_CTL[3] |
432 | * 0x00D FP PERF_CTL[3] | 434 | * 0x00D FP PERF_CTL[3] |
433 | * 0x023 DE PERF_CTL[2:0] | 435 | * 0x023 DE PERF_CTL[2:0] |
@@ -448,6 +450,8 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
448 | * 0x0DF LS PERF_CTL[5:0] | 450 | * 0x0DF LS PERF_CTL[5:0] |
449 | * 0x1D6 EX PERF_CTL[5:0] | 451 | * 0x1D6 EX PERF_CTL[5:0] |
450 | * 0x1D8 EX PERF_CTL[5:0] | 452 | * 0x1D8 EX PERF_CTL[5:0] |
453 | * | ||
454 | * (*) depending on the umask all FPU counters may be used | ||
451 | */ | 455 | */ |
452 | 456 | ||
453 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | 457 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); |
@@ -460,18 +464,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |||
460 | static struct event_constraint * | 464 | static struct event_constraint * |
461 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | 465 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) |
462 | { | 466 | { |
463 | unsigned int event_code = amd_get_event_code(&event->hw); | 467 | struct hw_perf_event *hwc = &event->hw; |
468 | unsigned int event_code = amd_get_event_code(hwc); | ||
464 | 469 | ||
465 | switch (event_code & AMD_EVENT_TYPE_MASK) { | 470 | switch (event_code & AMD_EVENT_TYPE_MASK) { |
466 | case AMD_EVENT_FP: | 471 | case AMD_EVENT_FP: |
467 | switch (event_code) { | 472 | switch (event_code) { |
473 | case 0x000: | ||
474 | if (!(hwc->config & 0x0000F000ULL)) | ||
475 | break; | ||
476 | if (!(hwc->config & 0x00000F00ULL)) | ||
477 | break; | ||
478 | return &amd_f15_PMC3; | ||
479 | case 0x004: | ||
480 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | ||
481 | break; | ||
482 | return &amd_f15_PMC3; | ||
468 | case 0x003: | 483 | case 0x003: |
469 | case 0x00B: | 484 | case 0x00B: |
470 | case 0x00D: | 485 | case 0x00D: |
471 | return &amd_f15_PMC3; | 486 | return &amd_f15_PMC3; |
472 | default: | ||
473 | return &amd_f15_PMC53; | ||
474 | } | 487 | } |
488 | return &amd_f15_PMC53; | ||
475 | case AMD_EVENT_LS: | 489 | case AMD_EVENT_LS: |
476 | case AMD_EVENT_DC: | 490 | case AMD_EVENT_DC: |
477 | case AMD_EVENT_EX_LS: | 491 | case AMD_EVENT_EX_LS: |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 8fc2b2cee1da..e61539b07d2c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -25,7 +25,7 @@ struct intel_percore { | |||
25 | /* | 25 | /* |
26 | * Intel PerfMon, used on Core and later. | 26 | * Intel PerfMon, used on Core and later. |
27 | */ | 27 | */ |
28 | static const u64 intel_perfmon_event_map[] = | 28 | static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = |
29 | { | 29 | { |
30 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | 30 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, |
31 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | 31 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
@@ -391,12 +391,12 @@ static __initconst const u64 nehalem_hw_cache_event_ids | |||
391 | { | 391 | { |
392 | [ C(L1D) ] = { | 392 | [ C(L1D) ] = { |
393 | [ C(OP_READ) ] = { | 393 | [ C(OP_READ) ] = { |
394 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | 394 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ |
395 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | 395 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ |
396 | }, | 396 | }, |
397 | [ C(OP_WRITE) ] = { | 397 | [ C(OP_WRITE) ] = { |
398 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | 398 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ |
399 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | 399 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ |
400 | }, | 400 | }, |
401 | [ C(OP_PREFETCH) ] = { | 401 | [ C(OP_PREFETCH) ] = { |
402 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | 402 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ |
@@ -933,6 +933,16 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
933 | 933 | ||
934 | cpuc = &__get_cpu_var(cpu_hw_events); | 934 | cpuc = &__get_cpu_var(cpu_hw_events); |
935 | 935 | ||
936 | /* | ||
937 | * Some chipsets need to unmask the LVTPC in a particular spot | ||
938 | * inside the nmi handler. As a result, the unmasking was pushed | ||
939 | * into all the nmi handlers. | ||
940 | * | ||
941 | * This handler doesn't seem to have any issues with the unmasking | ||
942 | * so it was left at the top. | ||
943 | */ | ||
944 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
945 | |||
936 | intel_pmu_disable_all(); | 946 | intel_pmu_disable_all(); |
937 | handled = intel_pmu_drain_bts_buffer(); | 947 | handled = intel_pmu_drain_bts_buffer(); |
938 | status = intel_pmu_get_status(); | 948 | status = intel_pmu_get_status(); |
@@ -998,6 +1008,9 @@ intel_bts_constraints(struct perf_event *event) | |||
998 | struct hw_perf_event *hwc = &event->hw; | 1008 | struct hw_perf_event *hwc = &event->hw; |
999 | unsigned int hw_event, bts_event; | 1009 | unsigned int hw_event, bts_event; |
1000 | 1010 | ||
1011 | if (event->attr.freq) | ||
1012 | return NULL; | ||
1013 | |||
1001 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | 1014 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
1002 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | 1015 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
1003 | 1016 | ||
@@ -1305,7 +1318,7 @@ static void intel_clovertown_quirks(void) | |||
1305 | * AJ106 could possibly be worked around by not allowing LBR | 1318 | * AJ106 could possibly be worked around by not allowing LBR |
1306 | * usage from PEBS, including the fixup. | 1319 | * usage from PEBS, including the fixup. |
1307 | * AJ68 could possibly be worked around by always programming | 1320 | * AJ68 could possibly be worked around by always programming |
1308 | * a pebs_event_reset[0] value and coping with the lost events. | 1321 | * a pebs_event_reset[0] value and coping with the lost events. |
1309 | * | 1322 | * |
1310 | * But taken together it might just make sense to not enable PEBS on | 1323 | * But taken together it might just make sense to not enable PEBS on |
1311 | * these chips. | 1324 | * these chips. |
@@ -1409,6 +1422,18 @@ static __init int intel_pmu_init(void) | |||
1409 | x86_pmu.percore_constraints = intel_nehalem_percore_constraints; | 1422 | x86_pmu.percore_constraints = intel_nehalem_percore_constraints; |
1410 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1423 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1411 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | 1424 | x86_pmu.extra_regs = intel_nehalem_extra_regs; |
1425 | |||
1426 | if (ebx & 0x40) { | ||
1427 | /* | ||
1428 | * Erratum AAJ80 detected, we work it around by using | ||
1429 | * the BR_MISP_EXEC.ANY event. This will over-count | ||
1430 | * branch-misses, but it's still much better than the | ||
1431 | * architectural event which is often completely bogus: | ||
1432 | */ | ||
1433 | intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; | ||
1434 | |||
1435 | pr_cont("erratum AAJ80 worked around, "); | ||
1436 | } | ||
1412 | pr_cont("Nehalem events, "); | 1437 | pr_cont("Nehalem events, "); |
1413 | break; | 1438 | break; |
1414 | 1439 | ||
@@ -1425,6 +1450,7 @@ static __init int intel_pmu_init(void) | |||
1425 | 1450 | ||
1426 | case 37: /* 32 nm nehalem, "Clarkdale" */ | 1451 | case 37: /* 32 nm nehalem, "Clarkdale" */ |
1427 | case 44: /* 32 nm nehalem, "Gulftown" */ | 1452 | case 44: /* 32 nm nehalem, "Gulftown" */ |
1453 | case 47: /* 32 nm Xeon E7 */ | ||
1428 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | 1454 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, |
1429 | sizeof(hw_cache_event_ids)); | 1455 | sizeof(hw_cache_event_ids)); |
1430 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | 1456 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index c2520e178d32..e93fcd55fae1 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -947,14 +947,23 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
947 | if (!x86_perf_event_set_period(event)) | 947 | if (!x86_perf_event_set_period(event)) |
948 | continue; | 948 | continue; |
949 | if (perf_event_overflow(event, 1, &data, regs)) | 949 | if (perf_event_overflow(event, 1, &data, regs)) |
950 | p4_pmu_disable_event(event); | 950 | x86_pmu_stop(event, 0); |
951 | } | 951 | } |
952 | 952 | ||
953 | if (handled) { | 953 | if (handled) |
954 | /* p4 quirk: unmask it again */ | ||
955 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); | ||
956 | inc_irq_stat(apic_perf_irqs); | 954 | inc_irq_stat(apic_perf_irqs); |
957 | } | 955 | |
956 | /* | ||
957 | * When dealing with the unmasking of the LVTPC on P4 perf hw, it has | ||
958 | * been observed that the OVF bit flag has to be cleared first _before_ | ||
959 | * the LVTPC can be unmasked. | ||
960 | * | ||
961 | * The reason is the NMI line will continue to be asserted while the OVF | ||
962 | * bit is set. This causes a second NMI to generate if the LVTPC is | ||
963 | * unmasked before the OVF bit is cleared, leading to unknown NMI | ||
964 | * messages. | ||
965 | */ | ||
966 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
958 | 967 | ||
959 | return handled; | 968 | return handled; |
960 | } | 969 | } |
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 706a9fb46a58..e90f08458e6b 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c | |||
@@ -391,7 +391,7 @@ static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, | |||
391 | 391 | ||
392 | set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); | 392 | set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); |
393 | 393 | ||
394 | return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr); | 394 | return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr); |
395 | } | 395 | } |
396 | 396 | ||
397 | static void __init ioapic_add_ofnode(struct device_node *np) | 397 | static void __init ioapic_add_ofnode(struct device_node *np) |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 82ada01625b9..b117efd24f71 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry; | |||
81 | #define AGPEXTERN | 81 | #define AGPEXTERN |
82 | #endif | 82 | #endif |
83 | 83 | ||
84 | /* GART can only remap to physical addresses < 1TB */ | ||
85 | #define GART_MAX_PHYS_ADDR (1ULL << 40) | ||
86 | |||
84 | /* backdoor interface to AGP driver */ | 87 | /* backdoor interface to AGP driver */ |
85 | AGPEXTERN int agp_memory_reserved; | 88 | AGPEXTERN int agp_memory_reserved; |
86 | AGPEXTERN __u32 *agp_gatt_table; | 89 | AGPEXTERN __u32 *agp_gatt_table; |
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
212 | size_t size, int dir, unsigned long align_mask) | 215 | size_t size, int dir, unsigned long align_mask) |
213 | { | 216 | { |
214 | unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); | 217 | unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); |
215 | unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); | 218 | unsigned long iommu_page; |
216 | int i; | 219 | int i; |
217 | 220 | ||
221 | if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) | ||
222 | return bad_dma_addr; | ||
223 | |||
224 | iommu_page = alloc_iommu(dev, npages, align_mask); | ||
218 | if (iommu_page == -1) { | 225 | if (iommu_page == -1) { |
219 | if (!nonforced_iommu(dev, phys_mem, size)) | 226 | if (!nonforced_iommu(dev, phys_mem, size)) |
220 | return phys_mem; | 227 | return phys_mem; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5a0484a95ad6..4be9b398470e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -976,6 +976,11 @@ void __init setup_arch(char **cmdline_p) | |||
976 | paging_init(); | 976 | paging_init(); |
977 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); | 977 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); |
978 | 978 | ||
979 | if (boot_cpu_data.cpuid_level >= 0) { | ||
980 | /* A CPU has %cr4 if and only if it has CPUID */ | ||
981 | mmu_cr4_features = read_cr4(); | ||
982 | } | ||
983 | |||
979 | #ifdef CONFIG_X86_32 | 984 | #ifdef CONFIG_X86_32 |
980 | /* sync back kernel address range */ | 985 | /* sync back kernel address range */ |
981 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, | 986 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, |