aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2014-11-17 16:16:03 -0500
committerTakashi Iwai <tiwai@suse.de>2014-11-17 16:16:03 -0500
commit39ae97ea4b773be81bae9eec08ed1e5c53606c1a (patch)
tree4d55635fb46a86b970c1491cc529eb2770bf3076 /arch/x86/kernel
parenta358a0ef861dae6f8330fb034aaa43adae71ebc1 (diff)
parentcf9a7f7823c67243da44da2ac47ca944a3108282 (diff)
Merge tag 'asoc-v3.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v3.18 As well as the usual driver fixes there's a few other things here: One is a fix for a race in DPCM which is unfortuantely a rather large diffstat, this is the result of growing usage of the mainline code and hence more detailed testing so I'm relatively happy. The other is a fix for non-DT machine driver matching following some of the componentization work which is much more focused. Both have had a while to cook in -next.
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile7
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c33
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c173
-rw-r--r--arch/x86/kernel/entry_32.S15
-rw-r--r--arch/x86/kernel/i8259.c3
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/kernel/tsc.c5
16 files changed, 72 insertions, 228 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b436fc735aa4..a142e77693e1 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
397 397
398 /* Don't set up the ACPI SCI because it's already set up */ 398 /* Don't set up the ACPI SCI because it's already set up */
399 if (acpi_gbl_FADT.sci_interrupt == gsi) 399 if (acpi_gbl_FADT.sci_interrupt == gsi)
400 return gsi; 400 return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
401 401
402 trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; 402 trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
403 polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; 403 polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
604 604
605int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) 605int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
606{ 606{
607 int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); 607 int irq;
608 608
609 if (irq >= 0) { 609 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
610 *irqp = gsi;
611 } else {
612 irq = mp_map_gsi_to_irq(gsi,
613 IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
614 if (irq < 0)
615 return -1;
610 *irqp = irq; 616 *irqp = irq;
611 return 0;
612 } 617 }
613 618 return 0;
614 return -1;
615} 619}
616EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); 620EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
617 621
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 5972b108f15a..b708738d016e 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev)
185 185
186 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 186 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
187 irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 187 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
188 /* APB timer irqs are set up as mp_irqs, timer is edge type */
189 __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
190} 188}
191 189
192/* Should be called with per cpu */ 190/* Should be called with per cpu */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 00853b254ab0..ba6cc041edb1 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1297,7 +1297,7 @@ void setup_local_APIC(void)
1297 unsigned int value, queued; 1297 unsigned int value, queued;
1298 int i, j, acked = 0; 1298 int i, j, acked = 0;
1299 unsigned long long tsc = 0, ntsc; 1299 unsigned long long tsc = 0, ntsc;
1300 long long max_loops = cpu_khz; 1300 long long max_loops = cpu_khz ? cpu_khz : 1000000;
1301 1301
1302 if (cpu_has_tsc) 1302 if (cpu_has_tsc)
1303 rdtscll(tsc); 1303 rdtscll(tsc);
@@ -1383,7 +1383,7 @@ void setup_local_APIC(void)
1383 break; 1383 break;
1384 } 1384 }
1385 if (queued) { 1385 if (queued) {
1386 if (cpu_has_tsc) { 1386 if (cpu_has_tsc && cpu_khz) {
1387 rdtscll(ntsc); 1387 rdtscll(ntsc);
1388 max_loops = (cpu_khz << 10) - (ntsc - tsc); 1388 max_loops = (cpu_khz << 10) - (ntsc - tsc);
1389 } else 1389 } else
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 01d5453b5502..e27b49d7c922 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,9 +39,12 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
39endif 39endif
40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o
43obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o
44obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o 42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
43
44obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
45 perf_event_intel_uncore_snb.o \
46 perf_event_intel_uncore_snbep.o \
47 perf_event_intel_uncore_nhmex.o
45endif 48endif
46 49
47 50
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1ef456273172..9cc6b6f25f42 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
213{ 213{
214#ifdef CONFIG_X86_F00F_BUG 214#ifdef CONFIG_X86_F00F_BUG
215 /* 215 /*
216 * All current models of Pentium and Pentium with MMX technology CPUs 216 * All models of Pentium and Pentium with MMX technology CPUs
217 * have the F0 0F bug, which lets nonprivileged users lock up the 217 * have the F0 0F bug, which lets nonprivileged users lock up the
218 * system. Announce that the fault handler will be checking for it. 218 * system. Announce that the fault handler will be checking for it.
219 * The Quark is also family 5, but does not have the same bug.
219 */ 220 */
220 clear_cpu_bug(c, X86_BUG_F00F); 221 clear_cpu_bug(c, X86_BUG_F00F);
221 if (!paravirt_enabled() && c->x86 == 5) { 222 if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) {
222 static int f00f_workaround_enabled; 223 static int f00f_workaround_enabled;
223 224
224 set_cpu_bug(c, X86_BUG_F00F); 225 set_cpu_bug(c, X86_BUG_F00F);
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 7aa1acc79789..06674473b0e6 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size)
108 * load_microcode_amd() to save equivalent cpu table and microcode patches in 108 * load_microcode_amd() to save equivalent cpu table and microcode patches in
109 * kernel heap memory. 109 * kernel heap memory.
110 */ 110 */
111static void apply_ucode_in_initrd(void *ucode, size_t size) 111static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
112{ 112{
113 struct equiv_cpu_entry *eq; 113 struct equiv_cpu_entry *eq;
114 size_t *cont_sz; 114 size_t *cont_sz;
115 u32 *header; 115 u32 *header;
116 u8 *data, **cont; 116 u8 *data, **cont;
117 u8 (*patch)[PATCH_MAX_SIZE];
117 u16 eq_id = 0; 118 u16 eq_id = 0;
118 int offset, left; 119 int offset, left;
119 u32 rev, eax, ebx, ecx, edx; 120 u32 rev, eax, ebx, ecx, edx;
@@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
123 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); 124 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
124 cont_sz = (size_t *)__pa_nodebug(&container_size); 125 cont_sz = (size_t *)__pa_nodebug(&container_size);
125 cont = (u8 **)__pa_nodebug(&container); 126 cont = (u8 **)__pa_nodebug(&container);
127 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
126#else 128#else
127 new_rev = &ucode_new_rev; 129 new_rev = &ucode_new_rev;
128 cont_sz = &container_size; 130 cont_sz = &container_size;
129 cont = &container; 131 cont = &container;
132 patch = &amd_ucode_patch;
130#endif 133#endif
131 134
132 data = ucode; 135 data = ucode;
@@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
213 rev = mc->hdr.patch_id; 216 rev = mc->hdr.patch_id;
214 *new_rev = rev; 217 *new_rev = rev;
215 218
216 /* save ucode patch */ 219 if (save_patch)
217 memcpy(amd_ucode_patch, mc, 220 memcpy(patch, mc,
218 min_t(u32, header[1], PATCH_MAX_SIZE)); 221 min_t(u32, header[1], PATCH_MAX_SIZE));
219 } 222 }
220 } 223 }
221 224
@@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void)
246 *data = cp.data; 249 *data = cp.data;
247 *size = cp.size; 250 *size = cp.size;
248 251
249 apply_ucode_in_initrd(cp.data, cp.size); 252 apply_ucode_in_initrd(cp.data, cp.size, true);
250} 253}
251 254
252#ifdef CONFIG_X86_32 255#ifdef CONFIG_X86_32
@@ -263,7 +266,7 @@ void load_ucode_amd_ap(void)
263 size_t *usize; 266 size_t *usize;
264 void **ucode; 267 void **ucode;
265 268
266 mc = (struct microcode_amd *)__pa(amd_ucode_patch); 269 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
267 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { 270 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
268 __apply_microcode_amd(mc); 271 __apply_microcode_amd(mc);
269 return; 272 return;
@@ -275,7 +278,7 @@ void load_ucode_amd_ap(void)
275 if (!*ucode || !*usize) 278 if (!*ucode || !*usize)
276 return; 279 return;
277 280
278 apply_ucode_in_initrd(*ucode, *usize); 281 apply_ucode_in_initrd(*ucode, *usize, false);
279} 282}
280 283
281static void __init collect_cpu_sig_on_bsp(void *arg) 284static void __init collect_cpu_sig_on_bsp(void *arg)
@@ -339,7 +342,7 @@ void load_ucode_amd_ap(void)
339 * AP has a different equivalence ID than BSP, looks like 342 * AP has a different equivalence ID than BSP, looks like
340 * mixed-steppings silicon so go through the ucode blob anew. 343 * mixed-steppings silicon so go through the ucode blob anew.
341 */ 344 */
342 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); 345 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
343 } 346 }
344} 347}
345#endif 348#endif
@@ -347,7 +350,9 @@ void load_ucode_amd_ap(void)
347int __init save_microcode_in_initrd_amd(void) 350int __init save_microcode_in_initrd_amd(void)
348{ 351{
349 unsigned long cont; 352 unsigned long cont;
353 int retval = 0;
350 enum ucode_state ret; 354 enum ucode_state ret;
355 u8 *cont_va;
351 u32 eax; 356 u32 eax;
352 357
353 if (!container) 358 if (!container)
@@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void)
355 360
356#ifdef CONFIG_X86_32 361#ifdef CONFIG_X86_32
357 get_bsp_sig(); 362 get_bsp_sig();
358 cont = (unsigned long)container; 363 cont = (unsigned long)container;
364 cont_va = __va(container);
359#else 365#else
360 /* 366 /*
361 * We need the physical address of the container for both bitness since 367 * We need the physical address of the container for both bitness since
362 * boot_params.hdr.ramdisk_image is a physical address. 368 * boot_params.hdr.ramdisk_image is a physical address.
363 */ 369 */
364 cont = __pa(container); 370 cont = __pa(container);
371 cont_va = container;
365#endif 372#endif
366 373
367 /* 374 /*
@@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void)
372 if (relocated_ramdisk) 379 if (relocated_ramdisk)
373 container = (u8 *)(__va(relocated_ramdisk) + 380 container = (u8 *)(__va(relocated_ramdisk) +
374 (cont - boot_params.hdr.ramdisk_image)); 381 (cont - boot_params.hdr.ramdisk_image));
382 else
383 container = cont_va;
375 384
376 if (ucode_new_rev) 385 if (ucode_new_rev)
377 pr_info("microcode: updated early to new patch_level=0x%08x\n", 386 pr_info("microcode: updated early to new patch_level=0x%08x\n",
@@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void)
382 391
383 ret = load_microcode_amd(eax, container, container_size); 392 ret = load_microcode_amd(eax, container, container_size);
384 if (ret != UCODE_OK) 393 if (ret != UCODE_OK)
385 return -EINVAL; 394 retval = -EINVAL;
386 395
387 /* 396 /*
388 * This will be freed any msec now, stash patches for the current 397 * This will be freed any msec now, stash patches for the current
@@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void)
391 container = NULL; 400 container = NULL;
392 container_size = 0; 401 container_size = 0;
393 402
394 return 0; 403 return retval;
395} 404}
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index 5f28a64e71ea..2c017f242a78 100644
--- a/arch/x86/kernel/cpu/microcode/core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
@@ -124,7 +124,7 @@ void __init load_ucode_bsp(void)
124static bool check_loader_disabled_ap(void) 124static bool check_loader_disabled_ap(void)
125{ 125{
126#ifdef CONFIG_X86_32 126#ifdef CONFIG_X86_32
127 return __pa_nodebug(dis_ucode_ldr); 127 return *((bool *)__pa_nodebug(&dis_ucode_ldr));
128#else 128#else
129 return dis_ucode_ldr; 129 return dis_ucode_ldr;
130#endif 130#endif
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1b8299dd3d91..143e5f5dc855 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -243,8 +243,9 @@ static bool check_hw_exists(void)
243 243
244msr_fail: 244msr_fail:
245 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); 245 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
246 printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR 246 printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
247 "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); 247 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
248 reg, val_new);
248 249
249 return false; 250 return false;
250} 251}
@@ -444,12 +445,6 @@ int x86_pmu_hw_config(struct perf_event *event)
444 if (event->attr.type == PERF_TYPE_RAW) 445 if (event->attr.type == PERF_TYPE_RAW)
445 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; 446 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
446 447
447 if (event->attr.sample_period && x86_pmu.limit_period) {
448 if (x86_pmu.limit_period(event, event->attr.sample_period) >
449 event->attr.sample_period)
450 return -EINVAL;
451 }
452
453 return x86_setup_perfctr(event); 448 return x86_setup_perfctr(event);
454} 449}
455 450
@@ -987,9 +982,6 @@ int x86_perf_event_set_period(struct perf_event *event)
987 if (left > x86_pmu.max_period) 982 if (left > x86_pmu.max_period)
988 left = x86_pmu.max_period; 983 left = x86_pmu.max_period;
989 984
990 if (x86_pmu.limit_period)
991 left = x86_pmu.limit_period(event, left);
992
993 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 985 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
994 986
995 /* 987 /*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index d98a34d435d7..fc5eb390b368 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -445,7 +445,6 @@ struct x86_pmu {
445 struct x86_pmu_quirk *quirks; 445 struct x86_pmu_quirk *quirks;
446 int perfctr_second_write; 446 int perfctr_second_write;
447 bool late_ack; 447 bool late_ack;
448 unsigned (*limit_period)(struct perf_event *event, unsigned l);
449 448
450 /* 449 /*
451 * sysfs attrs 450 * sysfs attrs
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a73947c53b65..944bf019b74f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -220,15 +220,6 @@ static struct event_constraint intel_hsw_event_constraints[] = {
220 EVENT_CONSTRAINT_END 220 EVENT_CONSTRAINT_END
221}; 221};
222 222
223static struct event_constraint intel_bdw_event_constraints[] = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
227 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
228 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
229 EVENT_CONSTRAINT_END
230};
231
232static u64 intel_pmu_event_map(int hw_event) 223static u64 intel_pmu_event_map(int hw_event)
233{ 224{
234 return intel_perfmon_event_map[hw_event]; 225 return intel_perfmon_event_map[hw_event];
@@ -424,126 +415,6 @@ static __initconst const u64 snb_hw_cache_event_ids
424 415
425}; 416};
426 417
427static __initconst const u64 hsw_hw_cache_event_ids
428 [PERF_COUNT_HW_CACHE_MAX]
429 [PERF_COUNT_HW_CACHE_OP_MAX]
430 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
431{
432 [ C(L1D ) ] = {
433 [ C(OP_READ) ] = {
434 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
435 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
436 },
437 [ C(OP_WRITE) ] = {
438 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
439 [ C(RESULT_MISS) ] = 0x0,
440 },
441 [ C(OP_PREFETCH) ] = {
442 [ C(RESULT_ACCESS) ] = 0x0,
443 [ C(RESULT_MISS) ] = 0x0,
444 },
445 },
446 [ C(L1I ) ] = {
447 [ C(OP_READ) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
450 },
451 [ C(OP_WRITE) ] = {
452 [ C(RESULT_ACCESS) ] = -1,
453 [ C(RESULT_MISS) ] = -1,
454 },
455 [ C(OP_PREFETCH) ] = {
456 [ C(RESULT_ACCESS) ] = 0x0,
457 [ C(RESULT_MISS) ] = 0x0,
458 },
459 },
460 [ C(LL ) ] = {
461 [ C(OP_READ) ] = {
462 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
463 [ C(RESULT_ACCESS) ] = 0x1b7,
464 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
465 L3_MISS|ANY_SNOOP */
466 [ C(RESULT_MISS) ] = 0x1b7,
467 },
468 [ C(OP_WRITE) ] = {
469 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */
470 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
471 [ C(RESULT_MISS) ] = 0x1b7,
472 },
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = 0x0,
475 [ C(RESULT_MISS) ] = 0x0,
476 },
477 },
478 [ C(DTLB) ] = {
479 [ C(OP_READ) ] = {
480 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
481 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
482 },
483 [ C(OP_WRITE) ] = {
484 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
485 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
486 },
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = 0x0,
489 [ C(RESULT_MISS) ] = 0x0,
490 },
491 },
492 [ C(ITLB) ] = {
493 [ C(OP_READ) ] = {
494 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
495 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
496 },
497 [ C(OP_WRITE) ] = {
498 [ C(RESULT_ACCESS) ] = -1,
499 [ C(RESULT_MISS) ] = -1,
500 },
501 [ C(OP_PREFETCH) ] = {
502 [ C(RESULT_ACCESS) ] = -1,
503 [ C(RESULT_MISS) ] = -1,
504 },
505 },
506 [ C(BPU ) ] = {
507 [ C(OP_READ) ] = {
508 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
509 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
510 },
511 [ C(OP_WRITE) ] = {
512 [ C(RESULT_ACCESS) ] = -1,
513 [ C(RESULT_MISS) ] = -1,
514 },
515 [ C(OP_PREFETCH) ] = {
516 [ C(RESULT_ACCESS) ] = -1,
517 [ C(RESULT_MISS) ] = -1,
518 },
519 },
520};
521
522static __initconst const u64 hsw_hw_cache_extra_regs
523 [PERF_COUNT_HW_CACHE_MAX]
524 [PERF_COUNT_HW_CACHE_OP_MAX]
525 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
526{
527 [ C(LL ) ] = {
528 [ C(OP_READ) ] = {
529 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
530 [ C(RESULT_ACCESS) ] = 0x2d5,
531 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
532 L3_MISS|ANY_SNOOP */
533 [ C(RESULT_MISS) ] = 0x3fbc0202d5ull,
534 },
535 [ C(OP_WRITE) ] = {
536 [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */
537 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
538 [ C(RESULT_MISS) ] = 0x3fbc020122ull,
539 },
540 [ C(OP_PREFETCH) ] = {
541 [ C(RESULT_ACCESS) ] = 0x0,
542 [ C(RESULT_MISS) ] = 0x0,
543 },
544 },
545};
546
547static __initconst const u64 westmere_hw_cache_event_ids 418static __initconst const u64 westmere_hw_cache_event_ids
548 [PERF_COUNT_HW_CACHE_MAX] 419 [PERF_COUNT_HW_CACHE_MAX]
549 [PERF_COUNT_HW_CACHE_OP_MAX] 420 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -2034,24 +1905,6 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2034 return c; 1905 return c;
2035} 1906}
2036 1907
2037/*
2038 * Broadwell:
2039 * The INST_RETIRED.ALL period always needs to have lowest
2040 * 6bits cleared (BDM57). It shall not use a period smaller
2041 * than 100 (BDM11). We combine the two to enforce
2042 * a min-period of 128.
2043 */
2044static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2045{
2046 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2047 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2048 if (left < 128)
2049 left = 128;
2050 left &= ~0x3fu;
2051 }
2052 return left;
2053}
2054
2055PMU_FORMAT_ATTR(event, "config:0-7" ); 1908PMU_FORMAT_ATTR(event, "config:0-7" );
2056PMU_FORMAT_ATTR(umask, "config:8-15" ); 1909PMU_FORMAT_ATTR(umask, "config:8-15" );
2057PMU_FORMAT_ATTR(edge, "config:18" ); 1910PMU_FORMAT_ATTR(edge, "config:18" );
@@ -2692,8 +2545,8 @@ __init int intel_pmu_init(void)
2692 case 69: /* 22nm Haswell ULT */ 2545 case 69: /* 22nm Haswell ULT */
2693 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ 2546 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2694 x86_pmu.late_ack = true; 2547 x86_pmu.late_ack = true;
2695 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2548 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2696 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2549 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2697 2550
2698 intel_pmu_lbr_init_snb(); 2551 intel_pmu_lbr_init_snb();
2699 2552
@@ -2712,28 +2565,6 @@ __init int intel_pmu_init(void)
2712 pr_cont("Haswell events, "); 2565 pr_cont("Haswell events, ");
2713 break; 2566 break;
2714 2567
2715 case 61: /* 14nm Broadwell Core-M */
2716 x86_pmu.late_ack = true;
2717 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2718 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2719
2720 intel_pmu_lbr_init_snb();
2721
2722 x86_pmu.event_constraints = intel_bdw_event_constraints;
2723 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2724 x86_pmu.extra_regs = intel_snbep_extra_regs;
2725 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2726 /* all extra regs are per-cpu when HT is on */
2727 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2728 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2729
2730 x86_pmu.hw_config = hsw_hw_config;
2731 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2732 x86_pmu.cpu_events = hsw_events_attrs;
2733 x86_pmu.limit_period = bdw_limit_period;
2734 pr_cont("Broadwell events, ");
2735 break;
2736
2737 default: 2568 default:
2738 switch (x86_pmu.version) { 2569 switch (x86_pmu.version) {
2739 case 1: 2570 case 1:
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index b553ed89e5f5..344b63f18d14 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -447,15 +447,14 @@ sysenter_exit:
447sysenter_audit: 447sysenter_audit:
448 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 448 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
449 jnz syscall_trace_entry 449 jnz syscall_trace_entry
450 addl $4,%esp 450 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
451 CFI_ADJUST_CFA_OFFSET -4 451 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
452 movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ 452 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
453 movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ 453 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
454 /* %ecx already in %ecx 3rd arg: 2nd syscall arg */ 454 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
455 movl %ebx,%edx /* 2nd arg: 1st syscall arg */
456 /* %eax already in %eax 1st arg: syscall number */
457 call __audit_syscall_entry 455 call __audit_syscall_entry
458 pushl_cfi %ebx 456 popl_cfi %ecx /* get that remapped edx off the stack */
457 popl_cfi %ecx /* get that remapped esi off the stack */
459 movl PT_EAX(%esp),%eax /* reload syscall number */ 458 movl PT_EAX(%esp),%eax /* reload syscall number */
460 jmp sysenter_do_call 459 jmp sysenter_do_call
461 460
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 8af817105e29..e7cc5370cd2f 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq)
111{ 111{
112 disable_irq_nosync(irq); 112 disable_irq_nosync(irq);
113 io_apic_irqs &= ~(1<<irq); 113 io_apic_irqs &= ~(1<<irq);
114 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, 114 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
115 i8259A_chip.name);
116 enable_irq(irq); 115 enable_irq(irq);
117} 116}
118 117
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 44f1ed42fdf2..4de73ee78361 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector)
70void __init init_ISA_irqs(void) 70void __init init_ISA_irqs(void)
71{ 71{
72 struct irq_chip *chip = legacy_pic->chip; 72 struct irq_chip *chip = legacy_pic->chip;
73 const char *name = chip->name;
74 int i; 73 int i;
75 74
76#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 75#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
@@ -79,7 +78,7 @@ void __init init_ISA_irqs(void)
79 legacy_pic->init(0); 78 legacy_pic->init(0);
80 79
81 for (i = 0; i < nr_legacy_irqs(); i++) 80 for (i = 0; i < nr_legacy_irqs(); i++)
82 irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); 81 irq_set_chip_and_handler(i, chip, handle_level_irq);
83} 82}
84 83
85void __init init_IRQ(void) 84void __init init_IRQ(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 235cfd39e0d7..ab08aa2276fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p)
1128 setup_real_mode(); 1128 setup_real_mode();
1129 1129
1130 memblock_set_current_limit(get_max_mapped()); 1130 memblock_set_current_limit(get_max_mapped());
1131 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1132 1131
1133 /* 1132 /*
1134 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 1133 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p)
1159 early_acpi_boot_init(); 1158 early_acpi_boot_init();
1160 1159
1161 initmem_init(); 1160 initmem_init();
1161 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1162 1162
1163 /* 1163 /*
1164 * Reserve memory for crash kernel after SRAT is parsed so that it 1164 * Reserve memory for crash kernel after SRAT is parsed so that it
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2d5200e56357..668d8f2a8781 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
102DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 102DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
103EXPORT_PER_CPU_SYMBOL(cpu_info); 103EXPORT_PER_CPU_SYMBOL(cpu_info);
104 104
105static DEFINE_PER_CPU(struct completion, die_complete);
106
107atomic_t init_deasserted; 105atomic_t init_deasserted;
108 106
109/* 107/*
@@ -1305,10 +1303,14 @@ static void __ref remove_cpu_from_maps(int cpu)
1305 numa_remove_cpu(cpu); 1303 numa_remove_cpu(cpu);
1306} 1304}
1307 1305
1306static DEFINE_PER_CPU(struct completion, die_complete);
1307
1308void cpu_disable_common(void) 1308void cpu_disable_common(void)
1309{ 1309{
1310 int cpu = smp_processor_id(); 1310 int cpu = smp_processor_id();
1311 1311
1312 init_completion(&per_cpu(die_complete, smp_processor_id()));
1313
1312 remove_siblinginfo(cpu); 1314 remove_siblinginfo(cpu);
1313 1315
1314 /* It's now safe to remove this processor from the online map */ 1316 /* It's now safe to remove this processor from the online map */
@@ -1327,16 +1329,21 @@ int native_cpu_disable(void)
1327 return ret; 1329 return ret;
1328 1330
1329 clear_local_APIC(); 1331 clear_local_APIC();
1330 init_completion(&per_cpu(die_complete, smp_processor_id()));
1331 cpu_disable_common(); 1332 cpu_disable_common();
1332 1333
1333 return 0; 1334 return 0;
1334} 1335}
1335 1336
1337void cpu_die_common(unsigned int cpu)
1338{
1339 wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
1340}
1341
1336void native_cpu_die(unsigned int cpu) 1342void native_cpu_die(unsigned int cpu)
1337{ 1343{
1338 /* We don't do anything here: idle task is faking death itself. */ 1344 /* We don't do anything here: idle task is faking death itself. */
1339 wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); 1345
1346 cpu_die_common(cpu);
1340 1347
1341 /* They ack this in play_dead() by setting CPU_DEAD */ 1348 /* They ack this in play_dead() by setting CPU_DEAD */
1342 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1349 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b6025f9e36c6..b7e50bba3bbb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1166,14 +1166,17 @@ void __init tsc_init(void)
1166 1166
1167 x86_init.timers.tsc_pre_init(); 1167 x86_init.timers.tsc_pre_init();
1168 1168
1169 if (!cpu_has_tsc) 1169 if (!cpu_has_tsc) {
1170 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1170 return; 1171 return;
1172 }
1171 1173
1172 tsc_khz = x86_platform.calibrate_tsc(); 1174 tsc_khz = x86_platform.calibrate_tsc();
1173 cpu_khz = tsc_khz; 1175 cpu_khz = tsc_khz;
1174 1176
1175 if (!tsc_khz) { 1177 if (!tsc_khz) {
1176 mark_tsc_unstable("could not calculate TSC khz"); 1178 mark_tsc_unstable("could not calculate TSC khz");
1179 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1177 return; 1180 return;
1178 } 1181 }
1179 1182