aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/perf_event.h19
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c216
-rw-r--r--arch/x86/kernel/dumpstack_32.c6
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/irq_32.c12
-rw-r--r--arch/x86/kernel/ptrace.c17
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c1
-rw-r--r--arch/x86/mm/highmem_32.c3
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/iomap_32.c3
-rw-r--r--arch/x86/oprofile/nmi_int.c6
-rw-r--r--arch/x86/oprofile/op_model_amd.c146
-rw-r--r--arch/x86/pci/i386.c17
-rw-r--r--arch/x86/pci/irq.c11
-rw-r--r--arch/x86/pci/mmconfig-shared.c4
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/smp.c6
25 files changed, 334 insertions, 198 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dfabfefc21c4..299fbc86f570 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -347,6 +347,7 @@ endif
347 347
348config X86_VSMP 348config X86_VSMP
349 bool "ScaleMP vSMP" 349 bool "ScaleMP vSMP"
350 select PARAVIRT_GUEST
350 select PARAVIRT 351 select PARAVIRT
351 depends on X86_64 && PCI 352 depends on X86_64 && PCI
352 depends on X86_EXTENDED_PLATFORM 353 depends on X86_EXTENDED_PLATFORM
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 0bf5b0083650..13b0ebaa512f 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
21 21
22#ifdef CONFIG_X86_32 22#ifdef CONFIG_X86_32
23extern void irq_ctx_init(int cpu); 23extern void irq_ctx_init(int cpu);
24extern void irq_ctx_exit(int cpu);
25#else 24#else
26# define irq_ctx_init(cpu) do { } while (0) 25# define irq_ctx_init(cpu) do { } while (0)
27# define irq_ctx_exit(cpu) do { } while (0)
28#endif 26#endif
29 27
30#define __ARCH_HAS_DO_SOFTIRQ 28#define __ARCH_HAS_DO_SOFTIRQ
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 83c4bb1d917d..3ea3dc487047 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -121,6 +121,7 @@
121#define MSR_AMD64_IBSDCLINAD 0xc0011038 121#define MSR_AMD64_IBSDCLINAD 0xc0011038
122#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 122#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
123#define MSR_AMD64_IBSCTL 0xc001103a 123#define MSR_AMD64_IBSCTL 0xc001103a
124#define MSR_AMD64_IBSBRTARGET 0xc001103b
124 125
125/* Fam 10h MSRs */ 126/* Fam 10h MSRs */
126#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 127#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 6e742cc4251b..550e26b1dbb3 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -111,17 +111,18 @@ union cpuid10_edx {
111#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 111#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
112 112
113/* IbsFetchCtl bits/masks */ 113/* IbsFetchCtl bits/masks */
114#define IBS_FETCH_RAND_EN (1ULL<<57) 114#define IBS_FETCH_RAND_EN (1ULL<<57)
115#define IBS_FETCH_VAL (1ULL<<49) 115#define IBS_FETCH_VAL (1ULL<<49)
116#define IBS_FETCH_ENABLE (1ULL<<48) 116#define IBS_FETCH_ENABLE (1ULL<<48)
117#define IBS_FETCH_CNT 0xFFFF0000ULL 117#define IBS_FETCH_CNT 0xFFFF0000ULL
118#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 118#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
119 119
120/* IbsOpCtl bits */ 120/* IbsOpCtl bits */
121#define IBS_OP_CNT_CTL (1ULL<<19) 121#define IBS_OP_CNT_CTL (1ULL<<19)
122#define IBS_OP_VAL (1ULL<<18) 122#define IBS_OP_VAL (1ULL<<18)
123#define IBS_OP_ENABLE (1ULL<<17) 123#define IBS_OP_ENABLE (1ULL<<17)
124#define IBS_OP_MAX_CNT 0x0000FFFFULL 124#define IBS_OP_MAX_CNT 0x0000FFFFULL
125#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
125 126
126#ifdef CONFIG_PERF_EVENTS 127#ifdef CONFIG_PERF_EVENTS
127extern void init_hw_perf_events(void); 128extern void init_hw_perf_events(void);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4cfc90824068..4c2f63c7fc1b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -50,7 +50,7 @@ struct smp_ops {
50 void (*smp_prepare_cpus)(unsigned max_cpus); 50 void (*smp_prepare_cpus)(unsigned max_cpus);
51 void (*smp_cpus_done)(unsigned max_cpus); 51 void (*smp_cpus_done)(unsigned max_cpus);
52 52
53 void (*smp_send_stop)(void); 53 void (*stop_other_cpus)(int wait);
54 void (*smp_send_reschedule)(int cpu); 54 void (*smp_send_reschedule)(int cpu);
55 55
56 int (*cpu_up)(unsigned cpu); 56 int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
73 73
74static inline void smp_send_stop(void) 74static inline void smp_send_stop(void)
75{ 75{
76 smp_ops.smp_send_stop(); 76 smp_ops.stop_other_cpus(0);
77}
78
79static inline void stop_other_cpus(void)
80{
81 smp_ops.stop_other_cpus(1);
77} 82}
78 83
79static inline void smp_prepare_boot_cpu(void) 84static inline void smp_prepare_boot_cpu(void)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index c1e8c7a51164..ed6310183efb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -237,6 +237,7 @@ struct x86_pmu {
237 * Intel DebugStore bits 237 * Intel DebugStore bits
238 */ 238 */
239 int bts, pebs; 239 int bts, pebs;
240 int bts_active, pebs_active;
240 int pebs_record_size; 241 int pebs_record_size;
241 void (*drain_pebs)(struct pt_regs *regs); 242 void (*drain_pebs)(struct pt_regs *regs);
242 struct event_constraint *pebs_constraints; 243 struct event_constraint *pebs_constraints;
@@ -380,7 +381,7 @@ static void release_pmc_hardware(void) {}
380 381
381#endif 382#endif
382 383
383static int reserve_ds_buffers(void); 384static void reserve_ds_buffers(void);
384static void release_ds_buffers(void); 385static void release_ds_buffers(void);
385 386
386static void hw_perf_event_destroy(struct perf_event *event) 387static void hw_perf_event_destroy(struct perf_event *event)
@@ -477,7 +478,7 @@ static int x86_setup_perfctr(struct perf_event *event)
477 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 478 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
478 (hwc->sample_period == 1)) { 479 (hwc->sample_period == 1)) {
479 /* BTS is not supported by this architecture. */ 480 /* BTS is not supported by this architecture. */
480 if (!x86_pmu.bts) 481 if (!x86_pmu.bts_active)
481 return -EOPNOTSUPP; 482 return -EOPNOTSUPP;
482 483
483 /* BTS is currently only allowed for user-mode. */ 484 /* BTS is currently only allowed for user-mode. */
@@ -496,12 +497,13 @@ static int x86_pmu_hw_config(struct perf_event *event)
496 int precise = 0; 497 int precise = 0;
497 498
498 /* Support for constant skid */ 499 /* Support for constant skid */
499 if (x86_pmu.pebs) 500 if (x86_pmu.pebs_active) {
500 precise++; 501 precise++;
501 502
502 /* Support for IP fixup */ 503 /* Support for IP fixup */
503 if (x86_pmu.lbr_nr) 504 if (x86_pmu.lbr_nr)
504 precise++; 505 precise++;
506 }
505 507
506 if (event->attr.precise_ip > precise) 508 if (event->attr.precise_ip > precise)
507 return -EOPNOTSUPP; 509 return -EOPNOTSUPP;
@@ -543,11 +545,8 @@ static int __x86_pmu_event_init(struct perf_event *event)
543 if (atomic_read(&active_events) == 0) { 545 if (atomic_read(&active_events) == 0) {
544 if (!reserve_pmc_hardware()) 546 if (!reserve_pmc_hardware())
545 err = -EBUSY; 547 err = -EBUSY;
546 else { 548 else
547 err = reserve_ds_buffers(); 549 reserve_ds_buffers();
548 if (err)
549 release_pmc_hardware();
550 }
551 } 550 }
552 if (!err) 551 if (!err)
553 atomic_inc(&active_events); 552 atomic_inc(&active_events);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 4977f9c400e5..b7dcd9f2b8a0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -74,6 +74,107 @@ static void fini_debug_store_on_cpu(int cpu)
74 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); 74 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
75} 75}
76 76
77static int alloc_pebs_buffer(int cpu)
78{
79 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
80 int node = cpu_to_node(cpu);
81 int max, thresh = 1; /* always use a single PEBS record */
82 void *buffer;
83
84 if (!x86_pmu.pebs)
85 return 0;
86
87 buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
88 if (unlikely(!buffer))
89 return -ENOMEM;
90
91 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
92
93 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
94 ds->pebs_index = ds->pebs_buffer_base;
95 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
96 max * x86_pmu.pebs_record_size;
97
98 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
99 thresh * x86_pmu.pebs_record_size;
100
101 return 0;
102}
103
104static void release_pebs_buffer(int cpu)
105{
106 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
107
108 if (!ds || !x86_pmu.pebs)
109 return;
110
111 kfree((void *)(unsigned long)ds->pebs_buffer_base);
112 ds->pebs_buffer_base = 0;
113}
114
115static int alloc_bts_buffer(int cpu)
116{
117 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
118 int node = cpu_to_node(cpu);
119 int max, thresh;
120 void *buffer;
121
122 if (!x86_pmu.bts)
123 return 0;
124
125 buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
126 if (unlikely(!buffer))
127 return -ENOMEM;
128
129 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
130 thresh = max / 16;
131
132 ds->bts_buffer_base = (u64)(unsigned long)buffer;
133 ds->bts_index = ds->bts_buffer_base;
134 ds->bts_absolute_maximum = ds->bts_buffer_base +
135 max * BTS_RECORD_SIZE;
136 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
137 thresh * BTS_RECORD_SIZE;
138
139 return 0;
140}
141
142static void release_bts_buffer(int cpu)
143{
144 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
145
146 if (!ds || !x86_pmu.bts)
147 return;
148
149 kfree((void *)(unsigned long)ds->bts_buffer_base);
150 ds->bts_buffer_base = 0;
151}
152
153static int alloc_ds_buffer(int cpu)
154{
155 int node = cpu_to_node(cpu);
156 struct debug_store *ds;
157
158 ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
159 if (unlikely(!ds))
160 return -ENOMEM;
161
162 per_cpu(cpu_hw_events, cpu).ds = ds;
163
164 return 0;
165}
166
167static void release_ds_buffer(int cpu)
168{
169 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
170
171 if (!ds)
172 return;
173
174 per_cpu(cpu_hw_events, cpu).ds = NULL;
175 kfree(ds);
176}
177
77static void release_ds_buffers(void) 178static void release_ds_buffers(void)
78{ 179{
79 int cpu; 180 int cpu;
@@ -82,93 +183,77 @@ static void release_ds_buffers(void)
82 return; 183 return;
83 184
84 get_online_cpus(); 185 get_online_cpus();
85
86 for_each_online_cpu(cpu) 186 for_each_online_cpu(cpu)
87 fini_debug_store_on_cpu(cpu); 187 fini_debug_store_on_cpu(cpu);
88 188
89 for_each_possible_cpu(cpu) { 189 for_each_possible_cpu(cpu) {
90 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 190 release_pebs_buffer(cpu);
91 191 release_bts_buffer(cpu);
92 if (!ds) 192 release_ds_buffer(cpu);
93 continue;
94
95 per_cpu(cpu_hw_events, cpu).ds = NULL;
96
97 kfree((void *)(unsigned long)ds->pebs_buffer_base);
98 kfree((void *)(unsigned long)ds->bts_buffer_base);
99 kfree(ds);
100 } 193 }
101
102 put_online_cpus(); 194 put_online_cpus();
103} 195}
104 196
105static int reserve_ds_buffers(void) 197static void reserve_ds_buffers(void)
106{ 198{
107 int cpu, err = 0; 199 int bts_err = 0, pebs_err = 0;
200 int cpu;
201
202 x86_pmu.bts_active = 0;
203 x86_pmu.pebs_active = 0;
108 204
109 if (!x86_pmu.bts && !x86_pmu.pebs) 205 if (!x86_pmu.bts && !x86_pmu.pebs)
110 return 0; 206 return;
207
208 if (!x86_pmu.bts)
209 bts_err = 1;
210
211 if (!x86_pmu.pebs)
212 pebs_err = 1;
111 213
112 get_online_cpus(); 214 get_online_cpus();
113 215
114 for_each_possible_cpu(cpu) { 216 for_each_possible_cpu(cpu) {
115 struct debug_store *ds; 217 if (alloc_ds_buffer(cpu)) {
116 void *buffer; 218 bts_err = 1;
117 int max, thresh; 219 pebs_err = 1;
220 }
221
222 if (!bts_err && alloc_bts_buffer(cpu))
223 bts_err = 1;
118 224
119 err = -ENOMEM; 225 if (!pebs_err && alloc_pebs_buffer(cpu))
120 ds = kzalloc(sizeof(*ds), GFP_KERNEL); 226 pebs_err = 1;
121 if (unlikely(!ds)) 227
228 if (bts_err && pebs_err)
122 break; 229 break;
123 per_cpu(cpu_hw_events, cpu).ds = ds; 230 }
124
125 if (x86_pmu.bts) {
126 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
127 if (unlikely(!buffer))
128 break;
129
130 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
131 thresh = max / 16;
132
133 ds->bts_buffer_base = (u64)(unsigned long)buffer;
134 ds->bts_index = ds->bts_buffer_base;
135 ds->bts_absolute_maximum = ds->bts_buffer_base +
136 max * BTS_RECORD_SIZE;
137 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
138 thresh * BTS_RECORD_SIZE;
139 }
140 231
141 if (x86_pmu.pebs) { 232 if (bts_err) {
142 buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL); 233 for_each_possible_cpu(cpu)
143 if (unlikely(!buffer)) 234 release_bts_buffer(cpu);
144 break; 235 }
145
146 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
147
148 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
149 ds->pebs_index = ds->pebs_buffer_base;
150 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
151 max * x86_pmu.pebs_record_size;
152 /*
153 * Always use single record PEBS
154 */
155 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
156 x86_pmu.pebs_record_size;
157 }
158 236
159 err = 0; 237 if (pebs_err) {
238 for_each_possible_cpu(cpu)
239 release_pebs_buffer(cpu);
160 } 240 }
161 241
162 if (err) 242 if (bts_err && pebs_err) {
163 release_ds_buffers(); 243 for_each_possible_cpu(cpu)
164 else { 244 release_ds_buffer(cpu);
245 } else {
246 if (x86_pmu.bts && !bts_err)
247 x86_pmu.bts_active = 1;
248
249 if (x86_pmu.pebs && !pebs_err)
250 x86_pmu.pebs_active = 1;
251
165 for_each_online_cpu(cpu) 252 for_each_online_cpu(cpu)
166 init_debug_store_on_cpu(cpu); 253 init_debug_store_on_cpu(cpu);
167 } 254 }
168 255
169 put_online_cpus(); 256 put_online_cpus();
170
171 return err;
172} 257}
173 258
174/* 259/*
@@ -233,7 +318,7 @@ static int intel_pmu_drain_bts_buffer(void)
233 if (!event) 318 if (!event)
234 return 0; 319 return 0;
235 320
236 if (!ds) 321 if (!x86_pmu.bts_active)
237 return 0; 322 return 0;
238 323
239 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; 324 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
@@ -503,7 +588,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
503 struct pebs_record_core *at, *top; 588 struct pebs_record_core *at, *top;
504 int n; 589 int n;
505 590
506 if (!ds || !x86_pmu.pebs) 591 if (!x86_pmu.pebs_active)
507 return; 592 return;
508 593
509 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; 594 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
@@ -545,7 +630,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
545 u64 status = 0; 630 u64 status = 0;
546 int bit, n; 631 int bit, n;
547 632
548 if (!ds || !x86_pmu.pebs) 633 if (!x86_pmu.pebs_active)
549 return; 634 return;
550 635
551 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; 636 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
@@ -630,9 +715,8 @@ static void intel_ds_init(void)
630 715
631#else /* CONFIG_CPU_SUP_INTEL */ 716#else /* CONFIG_CPU_SUP_INTEL */
632 717
633static int reserve_ds_buffers(void) 718static void reserve_ds_buffers(void)
634{ 719{
635 return 0;
636} 720}
637 721
638static void release_ds_buffers(void) 722static void release_ds_buffers(void)
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 0f6376ffa2d9..1bc7f75a5bda 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
82 if (kstack_end(stack)) 82 if (kstack_end(stack))
83 break; 83 break;
84 if (i && ((i % STACKSLOTS_PER_LINE) == 0)) 84 if (i && ((i % STACKSLOTS_PER_LINE) == 0))
85 printk("\n%s", log_lvl); 85 printk(KERN_CONT "\n");
86 printk(" %08lx", *stack++); 86 printk(KERN_CONT " %08lx", *stack++);
87 touch_nmi_watchdog(); 87 touch_nmi_watchdog();
88 } 88 }
89 printk("\n"); 89 printk(KERN_CONT "\n");
90 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 90 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
91} 91}
92 92
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 57a21f11c791..6a340485249a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
265 if (stack >= irq_stack && stack <= irq_stack_end) { 265 if (stack >= irq_stack && stack <= irq_stack_end) {
266 if (stack == irq_stack_end) { 266 if (stack == irq_stack_end) {
267 stack = (unsigned long *) (irq_stack_end[-1]); 267 stack = (unsigned long *) (irq_stack_end[-1]);
268 printk(" <EOI> "); 268 printk(KERN_CONT " <EOI> ");
269 } 269 }
270 } else { 270 } else {
271 if (((long) stack & (THREAD_SIZE-1)) == 0) 271 if (((long) stack & (THREAD_SIZE-1)) == 0)
272 break; 272 break;
273 } 273 }
274 if (i && ((i % STACKSLOTS_PER_LINE) == 0)) 274 if (i && ((i % STACKSLOTS_PER_LINE) == 0))
275 printk("\n%s", log_lvl); 275 printk(KERN_CONT "\n");
276 printk(" %016lx", *stack++); 276 printk(KERN_CONT " %016lx", *stack++);
277 touch_nmi_watchdog(); 277 touch_nmi_watchdog();
278 } 278 }
279 preempt_enable(); 279 preempt_enable();
280 280
281 printk("\n"); 281 printk(KERN_CONT "\n");
282 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 282 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
283} 283}
284 284
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 50fbbe60e507..64668dbf00a4 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -60,9 +60,6 @@ union irq_ctx {
60static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); 60static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
61static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); 61static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
62 62
63static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
64static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
65
66static void call_on_stack(void *func, void *stack) 63static void call_on_stack(void *func, void *stack)
67{ 64{
68 asm volatile("xchgl %%ebx,%%esp \n" 65 asm volatile("xchgl %%ebx,%%esp \n"
@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu)
128 if (per_cpu(hardirq_ctx, cpu)) 125 if (per_cpu(hardirq_ctx, cpu))
129 return; 126 return;
130 127
131 irqctx = &per_cpu(hardirq_stack, cpu); 128 irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
132 irqctx->tinfo.task = NULL; 129 irqctx->tinfo.task = NULL;
133 irqctx->tinfo.exec_domain = NULL; 130 irqctx->tinfo.exec_domain = NULL;
134 irqctx->tinfo.cpu = cpu; 131 irqctx->tinfo.cpu = cpu;
@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu)
137 134
138 per_cpu(hardirq_ctx, cpu) = irqctx; 135 per_cpu(hardirq_ctx, cpu) = irqctx;
139 136
140 irqctx = &per_cpu(softirq_stack, cpu); 137 irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
141 irqctx->tinfo.task = NULL; 138 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.exec_domain = NULL; 139 irqctx->tinfo.exec_domain = NULL;
143 irqctx->tinfo.cpu = cpu; 140 irqctx->tinfo.cpu = cpu;
@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu)
150 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); 147 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
151} 148}
152 149
153void irq_ctx_exit(int cpu)
154{
155 per_cpu(hardirq_ctx, cpu) = NULL;
156}
157
158asmlinkage void do_softirq(void) 150asmlinkage void do_softirq(void)
159{ 151{
160 unsigned long flags; 152 unsigned long flags;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 70c4872cd8aa..45892dc4b72a 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -801,7 +801,8 @@ void ptrace_disable(struct task_struct *child)
801static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 801static const struct user_regset_view user_x86_32_view; /* Initialized below. */
802#endif 802#endif
803 803
804long arch_ptrace(struct task_struct *child, long request, long addr, long data) 804long arch_ptrace(struct task_struct *child, long request,
805 unsigned long addr, unsigned long data)
805{ 806{
806 int ret; 807 int ret;
807 unsigned long __user *datap = (unsigned long __user *)data; 808 unsigned long __user *datap = (unsigned long __user *)data;
@@ -812,8 +813,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
812 unsigned long tmp; 813 unsigned long tmp;
813 814
814 ret = -EIO; 815 ret = -EIO;
815 if ((addr & (sizeof(data) - 1)) || addr < 0 || 816 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
816 addr >= sizeof(struct user))
817 break; 817 break;
818 818
819 tmp = 0; /* Default return condition */ 819 tmp = 0; /* Default return condition */
@@ -830,8 +830,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
830 830
831 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 831 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
832 ret = -EIO; 832 ret = -EIO;
833 if ((addr & (sizeof(data) - 1)) || addr < 0 || 833 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
834 addr >= sizeof(struct user))
835 break; 834 break;
836 835
837 if (addr < sizeof(struct user_regs_struct)) 836 if (addr < sizeof(struct user_regs_struct))
@@ -888,17 +887,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
888 887
889#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 888#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
890 case PTRACE_GET_THREAD_AREA: 889 case PTRACE_GET_THREAD_AREA:
891 if (addr < 0) 890 if ((int) addr < 0)
892 return -EIO; 891 return -EIO;
893 ret = do_get_thread_area(child, addr, 892 ret = do_get_thread_area(child, addr,
894 (struct user_desc __user *) data); 893 (struct user_desc __user *)data);
895 break; 894 break;
896 895
897 case PTRACE_SET_THREAD_AREA: 896 case PTRACE_SET_THREAD_AREA:
898 if (addr < 0) 897 if ((int) addr < 0)
899 return -EIO; 898 return -EIO;
900 ret = do_set_thread_area(child, addr, 899 ret = do_set_thread_area(child, addr,
901 (struct user_desc __user *) data, 0); 900 (struct user_desc __user *)data, 0);
902 break; 901 break;
903#endif 902#endif
904 903
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f7f53dcd3e0a..c495aa8d4815 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
635 /* O.K Now that I'm on the appropriate processor, 635 /* O.K Now that I'm on the appropriate processor,
636 * stop all of the others. 636 * stop all of the others.
637 */ 637 */
638 smp_send_stop(); 638 stop_other_cpus();
639#endif 639#endif
640 640
641 lapic_shutdown(); 641 lapic_shutdown();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 95a32746fbf9..21c6746338af 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -769,6 +769,8 @@ void __init setup_arch(char **cmdline_p)
769 769
770 x86_init.oem.arch_setup(); 770 x86_init.oem.arch_setup();
771 771
772 resource_alloc_from_bottom = 0;
773 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
772 setup_memory_map(); 774 setup_memory_map();
773 parse_setup_data(); 775 parse_setup_data();
774 /* update the e820_saved too */ 776 /* update the e820_saved too */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d801210945d6..513deac7228d 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
159 irq_exit(); 159 irq_exit();
160} 160}
161 161
162static void native_smp_send_stop(void) 162static void native_stop_other_cpus(int wait)
163{ 163{
164 unsigned long flags; 164 unsigned long flags;
165 unsigned long wait; 165 unsigned long timeout;
166 166
167 if (reboot_force) 167 if (reboot_force)
168 return; 168 return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
179 if (num_online_cpus() > 1) { 179 if (num_online_cpus() > 1) {
180 apic->send_IPI_allbutself(REBOOT_VECTOR); 180 apic->send_IPI_allbutself(REBOOT_VECTOR);
181 181
182 /* Don't wait longer than a second */ 182 /*
183 wait = USEC_PER_SEC; 183 * Don't wait longer than a second if the caller
184 while (num_online_cpus() > 1 && wait--) 184 * didn't ask us to wait.
185 */
186 timeout = USEC_PER_SEC;
187 while (num_online_cpus() > 1 && (wait || timeout--))
185 udelay(1); 188 udelay(1);
186 } 189 }
187 190
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
227 .smp_prepare_cpus = native_smp_prepare_cpus, 230 .smp_prepare_cpus = native_smp_prepare_cpus,
228 .smp_cpus_done = native_smp_cpus_done, 231 .smp_cpus_done = native_smp_cpus_done,
229 232
230 .smp_send_stop = native_smp_send_stop, 233 .stop_other_cpus = native_stop_other_cpus,
231 .smp_send_reschedule = native_smp_send_reschedule, 234 .smp_send_reschedule = native_smp_send_reschedule,
232 235
233 .cpu_up = native_cpu_up, 236 .cpu_up = native_cpu_up,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6c7faecd9e4a..083e99d1b7df 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1373,7 +1373,6 @@ void play_dead_common(void)
1373{ 1373{
1374 idle_task_exit(); 1374 idle_task_exit();
1375 reset_lazy_tlbstate(); 1375 reset_lazy_tlbstate();
1376 irq_ctx_exit(raw_smp_processor_id());
1377 c1e_remove_cpu(raw_smp_processor_id()); 1376 c1e_remove_cpu(raw_smp_processor_id());
1378 1377
1379 mb(); 1378 mb();
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index d723e369003c..b49962662101 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
74 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { 74 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
75 int idx, type; 75 int idx, type;
76 76
77 type = kmap_atomic_idx_pop(); 77 type = kmap_atomic_idx();
78 idx = type + KM_TYPE_NR * smp_processor_id(); 78 idx = type + KM_TYPE_NR * smp_processor_id();
79 79
80#ifdef CONFIG_DEBUG_HIGHMEM 80#ifdef CONFIG_DEBUG_HIGHMEM
@@ -87,6 +87,7 @@ void __kunmap_atomic(void *kvaddr)
87 * attributes or becomes a protected page in a hypervisor. 87 * attributes or becomes a protected page in a hypervisor.
88 */ 88 */
89 kpte_clear_flush(kmap_pte-idx, vaddr); 89 kpte_clear_flush(kmap_pte-idx, vaddr);
90 kmap_atomic_idx_pop();
90 } 91 }
91#ifdef CONFIG_DEBUG_HIGHMEM 92#ifdef CONFIG_DEBUG_HIGHMEM
92 else { 93 else {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 84346200e783..71a59296af80 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -51,7 +51,6 @@
51#include <asm/numa.h> 51#include <asm/numa.h>
52#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
53#include <asm/init.h> 53#include <asm/init.h>
54#include <linux/bootmem.h>
55 54
56static int __init parse_direct_gbpages_off(char *arg) 55static int __init parse_direct_gbpages_off(char *arg)
57{ 56{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 75a3d7f24a2c..7b179b499fa3 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -98,7 +98,7 @@ iounmap_atomic(void __iomem *kvaddr)
98 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { 98 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
99 int idx, type; 99 int idx, type;
100 100
101 type = kmap_atomic_idx_pop(); 101 type = kmap_atomic_idx();
102 idx = type + KM_TYPE_NR * smp_processor_id(); 102 idx = type + KM_TYPE_NR * smp_processor_id();
103 103
104#ifdef CONFIG_DEBUG_HIGHMEM 104#ifdef CONFIG_DEBUG_HIGHMEM
@@ -111,6 +111,7 @@ iounmap_atomic(void __iomem *kvaddr)
111 * attributes or becomes a protected page in a hypervisor. 111 * attributes or becomes a protected page in a hypervisor.
112 */ 112 */
113 kpte_clear_flush(kmap_pte-idx, vaddr); 113 kpte_clear_flush(kmap_pte-idx, vaddr);
114 kmap_atomic_idx_pop();
114 } 115 }
115 116
116 pagefault_enable(); 117 pagefault_enable();
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index bd1489c3ce09..4e8baad36d37 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -726,6 +726,12 @@ int __init op_nmi_init(struct oprofile_operations *ops)
726 case 0x11: 726 case 0x11:
727 cpu_type = "x86-64/family11h"; 727 cpu_type = "x86-64/family11h";
728 break; 728 break;
729 case 0x12:
730 cpu_type = "x86-64/family12h";
731 break;
732 case 0x14:
733 cpu_type = "x86-64/family14h";
734 break;
729 default: 735 default:
730 return -ENODEV; 736 return -ENODEV;
731 } 737 }
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 42fb46f83883..a011bcc0f943 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -48,17 +48,24 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS];
48 48
49static u32 ibs_caps; 49static u32 ibs_caps;
50 50
51struct op_ibs_config { 51struct ibs_config {
52 unsigned long op_enabled; 52 unsigned long op_enabled;
53 unsigned long fetch_enabled; 53 unsigned long fetch_enabled;
54 unsigned long max_cnt_fetch; 54 unsigned long max_cnt_fetch;
55 unsigned long max_cnt_op; 55 unsigned long max_cnt_op;
56 unsigned long rand_en; 56 unsigned long rand_en;
57 unsigned long dispatched_ops; 57 unsigned long dispatched_ops;
58 unsigned long branch_target;
58}; 59};
59 60
60static struct op_ibs_config ibs_config; 61struct ibs_state {
61static u64 ibs_op_ctl; 62 u64 ibs_op_ctl;
63 int branch_target;
64 unsigned long sample_size;
65};
66
67static struct ibs_config ibs_config;
68static struct ibs_state ibs_state;
62 69
63/* 70/*
64 * IBS cpuid feature detection 71 * IBS cpuid feature detection
@@ -71,8 +78,16 @@ static u64 ibs_op_ctl;
71 * bit 0 is used to indicate the existence of IBS. 78 * bit 0 is used to indicate the existence of IBS.
72 */ 79 */
73#define IBS_CAPS_AVAIL (1U<<0) 80#define IBS_CAPS_AVAIL (1U<<0)
81#define IBS_CAPS_FETCHSAM (1U<<1)
82#define IBS_CAPS_OPSAM (1U<<2)
74#define IBS_CAPS_RDWROPCNT (1U<<3) 83#define IBS_CAPS_RDWROPCNT (1U<<3)
75#define IBS_CAPS_OPCNT (1U<<4) 84#define IBS_CAPS_OPCNT (1U<<4)
85#define IBS_CAPS_BRNTRGT (1U<<5)
86#define IBS_CAPS_OPCNTEXT (1U<<6)
87
88#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
89 | IBS_CAPS_FETCHSAM \
90 | IBS_CAPS_OPSAM)
76 91
77/* 92/*
78 * IBS APIC setup 93 * IBS APIC setup
@@ -99,12 +114,12 @@ static u32 get_ibs_caps(void)
99 /* check IBS cpuid feature flags */ 114 /* check IBS cpuid feature flags */
100 max_level = cpuid_eax(0x80000000); 115 max_level = cpuid_eax(0x80000000);
101 if (max_level < IBS_CPUID_FEATURES) 116 if (max_level < IBS_CPUID_FEATURES)
102 return IBS_CAPS_AVAIL; 117 return IBS_CAPS_DEFAULT;
103 118
104 ibs_caps = cpuid_eax(IBS_CPUID_FEATURES); 119 ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
105 if (!(ibs_caps & IBS_CAPS_AVAIL)) 120 if (!(ibs_caps & IBS_CAPS_AVAIL))
106 /* cpuid flags not valid */ 121 /* cpuid flags not valid */
107 return IBS_CAPS_AVAIL; 122 return IBS_CAPS_DEFAULT;
108 123
109 return ibs_caps; 124 return ibs_caps;
110} 125}
@@ -197,8 +212,8 @@ op_amd_handle_ibs(struct pt_regs * const regs,
197 rdmsrl(MSR_AMD64_IBSOPCTL, ctl); 212 rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
198 if (ctl & IBS_OP_VAL) { 213 if (ctl & IBS_OP_VAL) {
199 rdmsrl(MSR_AMD64_IBSOPRIP, val); 214 rdmsrl(MSR_AMD64_IBSOPRIP, val);
200 oprofile_write_reserve(&entry, regs, val, 215 oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
201 IBS_OP_CODE, IBS_OP_SIZE); 216 ibs_state.sample_size);
202 oprofile_add_data64(&entry, val); 217 oprofile_add_data64(&entry, val);
203 rdmsrl(MSR_AMD64_IBSOPDATA, val); 218 rdmsrl(MSR_AMD64_IBSOPDATA, val);
204 oprofile_add_data64(&entry, val); 219 oprofile_add_data64(&entry, val);
@@ -210,10 +225,14 @@ op_amd_handle_ibs(struct pt_regs * const regs,
210 oprofile_add_data64(&entry, val); 225 oprofile_add_data64(&entry, val);
211 rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); 226 rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
212 oprofile_add_data64(&entry, val); 227 oprofile_add_data64(&entry, val);
228 if (ibs_state.branch_target) {
229 rdmsrl(MSR_AMD64_IBSBRTARGET, val);
230 oprofile_add_data(&entry, (unsigned long)val);
231 }
213 oprofile_write_commit(&entry); 232 oprofile_write_commit(&entry);
214 233
215 /* reenable the IRQ */ 234 /* reenable the IRQ */
216 ctl = op_amd_randomize_ibs_op(ibs_op_ctl); 235 ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
217 wrmsrl(MSR_AMD64_IBSOPCTL, ctl); 236 wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
218 } 237 }
219 } 238 }
@@ -226,21 +245,32 @@ static inline void op_amd_start_ibs(void)
226 if (!ibs_caps) 245 if (!ibs_caps)
227 return; 246 return;
228 247
248 memset(&ibs_state, 0, sizeof(ibs_state));
249
250 /*
251 * Note: Since the max count settings may out of range we
252 * write back the actual used values so that userland can read
253 * it.
254 */
255
229 if (ibs_config.fetch_enabled) { 256 if (ibs_config.fetch_enabled) {
230 val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT; 257 val = ibs_config.max_cnt_fetch >> 4;
258 val = min(val, IBS_FETCH_MAX_CNT);
259 ibs_config.max_cnt_fetch = val << 4;
231 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; 260 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
232 val |= IBS_FETCH_ENABLE; 261 val |= IBS_FETCH_ENABLE;
233 wrmsrl(MSR_AMD64_IBSFETCHCTL, val); 262 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
234 } 263 }
235 264
236 if (ibs_config.op_enabled) { 265 if (ibs_config.op_enabled) {
237 ibs_op_ctl = ibs_config.max_cnt_op >> 4; 266 val = ibs_config.max_cnt_op >> 4;
238 if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) { 267 if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
239 /* 268 /*
240 * IbsOpCurCnt not supported. See 269 * IbsOpCurCnt not supported. See
241 * op_amd_randomize_ibs_op() for details. 270 * op_amd_randomize_ibs_op() for details.
242 */ 271 */
243 ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL); 272 val = clamp(val, 0x0081ULL, 0xFF80ULL);
273 ibs_config.max_cnt_op = val << 4;
244 } else { 274 } else {
245 /* 275 /*
246 * The start value is randomized with a 276 * The start value is randomized with a
@@ -248,13 +278,24 @@ static inline void op_amd_start_ibs(void)
248 * with the half of the randomized range. Also 278 * with the half of the randomized range. Also
249 * avoid underflows. 279 * avoid underflows.
250 */ 280 */
251 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, 281 val += IBS_RANDOM_MAXCNT_OFFSET;
252 IBS_OP_MAX_CNT); 282 if (ibs_caps & IBS_CAPS_OPCNTEXT)
283 val = min(val, IBS_OP_MAX_CNT_EXT);
284 else
285 val = min(val, IBS_OP_MAX_CNT);
286 ibs_config.max_cnt_op =
287 (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
288 }
289 val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
290 val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
291 val |= IBS_OP_ENABLE;
292 ibs_state.ibs_op_ctl = val;
293 ibs_state.sample_size = IBS_OP_SIZE;
294 if (ibs_config.branch_target) {
295 ibs_state.branch_target = 1;
296 ibs_state.sample_size++;
253 } 297 }
254 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) 298 val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
255 ibs_op_ctl |= IBS_OP_CNT_CTL;
256 ibs_op_ctl |= IBS_OP_ENABLE;
257 val = op_amd_randomize_ibs_op(ibs_op_ctl);
258 wrmsrl(MSR_AMD64_IBSOPCTL, val); 299 wrmsrl(MSR_AMD64_IBSOPCTL, val);
259 } 300 }
260} 301}
@@ -281,29 +322,25 @@ static inline int eilvt_is_available(int offset)
281 322
282static inline int ibs_eilvt_valid(void) 323static inline int ibs_eilvt_valid(void)
283{ 324{
284 u64 val;
285 int offset; 325 int offset;
326 u64 val;
286 327
287 rdmsrl(MSR_AMD64_IBSCTL, val); 328 rdmsrl(MSR_AMD64_IBSCTL, val);
329 offset = val & IBSCTL_LVT_OFFSET_MASK;
330
288 if (!(val & IBSCTL_LVT_OFFSET_VALID)) { 331 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
289 pr_err(FW_BUG "cpu %d, invalid IBS " 332 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
290 "interrupt offset %d (MSR%08X=0x%016llx)", 333 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
291 smp_processor_id(), offset,
292 MSR_AMD64_IBSCTL, val);
293 return 0; 334 return 0;
294 } 335 }
295 336
296 offset = val & IBSCTL_LVT_OFFSET_MASK; 337 if (!eilvt_is_available(offset)) {
297 338 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
298 if (eilvt_is_available(offset)) 339 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
299 return !0; 340 return 0;
300 341 }
301 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
302 "not available (MSR%08X=0x%016llx)",
303 smp_processor_id(), offset,
304 MSR_AMD64_IBSCTL, val);
305 342
306 return 0; 343 return 1;
307} 344}
308 345
309static inline int get_ibs_offset(void) 346static inline int get_ibs_offset(void)
@@ -630,28 +667,33 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
630 /* model specific files */ 667 /* model specific files */
631 668
632 /* setup some reasonable defaults */ 669 /* setup some reasonable defaults */
670 memset(&ibs_config, 0, sizeof(ibs_config));
633 ibs_config.max_cnt_fetch = 250000; 671 ibs_config.max_cnt_fetch = 250000;
634 ibs_config.fetch_enabled = 0;
635 ibs_config.max_cnt_op = 250000; 672 ibs_config.max_cnt_op = 250000;
636 ibs_config.op_enabled = 0; 673
637 ibs_config.dispatched_ops = 0; 674 if (ibs_caps & IBS_CAPS_FETCHSAM) {
638 675 dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
639 dir = oprofilefs_mkdir(sb, root, "ibs_fetch"); 676 oprofilefs_create_ulong(sb, dir, "enable",
640 oprofilefs_create_ulong(sb, dir, "enable", 677 &ibs_config.fetch_enabled);
641 &ibs_config.fetch_enabled); 678 oprofilefs_create_ulong(sb, dir, "max_count",
642 oprofilefs_create_ulong(sb, dir, "max_count", 679 &ibs_config.max_cnt_fetch);
643 &ibs_config.max_cnt_fetch); 680 oprofilefs_create_ulong(sb, dir, "rand_enable",
644 oprofilefs_create_ulong(sb, dir, "rand_enable", 681 &ibs_config.rand_en);
645 &ibs_config.rand_en); 682 }
646 683
647 dir = oprofilefs_mkdir(sb, root, "ibs_op"); 684 if (ibs_caps & IBS_CAPS_OPSAM) {
648 oprofilefs_create_ulong(sb, dir, "enable", 685 dir = oprofilefs_mkdir(sb, root, "ibs_op");
649 &ibs_config.op_enabled); 686 oprofilefs_create_ulong(sb, dir, "enable",
650 oprofilefs_create_ulong(sb, dir, "max_count", 687 &ibs_config.op_enabled);
651 &ibs_config.max_cnt_op); 688 oprofilefs_create_ulong(sb, dir, "max_count",
652 if (ibs_caps & IBS_CAPS_OPCNT) 689 &ibs_config.max_cnt_op);
653 oprofilefs_create_ulong(sb, dir, "dispatched_ops", 690 if (ibs_caps & IBS_CAPS_OPCNT)
654 &ibs_config.dispatched_ops); 691 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
692 &ibs_config.dispatched_ops);
693 if (ibs_caps & IBS_CAPS_BRNTRGT)
694 oprofilefs_create_ulong(sb, dir, "branch_target",
695 &ibs_config.branch_target);
696 }
655 697
656 return 0; 698 return 0;
657} 699}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 55253095be84..826140af3c3c 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res,
65 resource_size_t size, resource_size_t align) 65 resource_size_t size, resource_size_t align)
66{ 66{
67 struct pci_dev *dev = data; 67 struct pci_dev *dev = data;
68 resource_size_t start = res->start; 68 resource_size_t start = round_down(res->end - size + 1, align);
69 69
70 if (res->flags & IORESOURCE_IO) { 70 if (res->flags & IORESOURCE_IO) {
71 if (skip_isa_ioresource_align(dev)) 71
72 return start; 72 /*
73 if (start & 0x300) 73 * If we're avoiding ISA aliases, the largest contiguous I/O
74 start = (start + 0x3ff) & ~0x3ff; 74 * port space is 256 bytes. Clearing bits 9 and 10 preserves
75 * all 256-byte and smaller alignments, so the result will
76 * still be correctly aligned.
77 */
78 if (!skip_isa_ioresource_align(dev))
79 start &= ~0x300;
75 } else if (res->flags & IORESOURCE_MEM) { 80 } else if (res->flags & IORESOURCE_MEM) {
76 if (start < BIOS_END) 81 if (start < BIOS_END)
77 start = BIOS_END; 82 start = res->end; /* fail; no space */
78 } 83 }
79 return start; 84 return start;
80} 85}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index f547ee05f715..9f9bfb705cf9 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -584,27 +584,28 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
584 case PCI_DEVICE_ID_INTEL_ICH9_3: 584 case PCI_DEVICE_ID_INTEL_ICH9_3:
585 case PCI_DEVICE_ID_INTEL_ICH9_4: 585 case PCI_DEVICE_ID_INTEL_ICH9_4:
586 case PCI_DEVICE_ID_INTEL_ICH9_5: 586 case PCI_DEVICE_ID_INTEL_ICH9_5:
587 case PCI_DEVICE_ID_INTEL_TOLAPAI_0: 587 case PCI_DEVICE_ID_INTEL_EP80579_0:
588 case PCI_DEVICE_ID_INTEL_ICH10_0: 588 case PCI_DEVICE_ID_INTEL_ICH10_0:
589 case PCI_DEVICE_ID_INTEL_ICH10_1: 589 case PCI_DEVICE_ID_INTEL_ICH10_1:
590 case PCI_DEVICE_ID_INTEL_ICH10_2: 590 case PCI_DEVICE_ID_INTEL_ICH10_2:
591 case PCI_DEVICE_ID_INTEL_ICH10_3: 591 case PCI_DEVICE_ID_INTEL_ICH10_3:
592 case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
592 r->name = "PIIX/ICH"; 593 r->name = "PIIX/ICH";
593 r->get = pirq_piix_get; 594 r->get = pirq_piix_get;
594 r->set = pirq_piix_set; 595 r->set = pirq_piix_set;
595 return 1; 596 return 1;
596 } 597 }
597 598
598 if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && 599 if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
599 (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) { 600 (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
600 r->name = "PIIX/ICH"; 601 r->name = "PIIX/ICH";
601 r->get = pirq_piix_get; 602 r->get = pirq_piix_get;
602 r->set = pirq_piix_set; 603 r->set = pirq_piix_set;
603 return 1; 604 return 1;
604 } 605 }
605 606
606 if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) && 607 if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
607 (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) { 608 (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
608 r->name = "PIIX/ICH"; 609 r->name = "PIIX/ICH";
609 r->get = pirq_piix_get; 610 r->get = pirq_piix_get;
610 r->set = pirq_piix_set; 611 r->set = pirq_piix_set;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index a918553ebc75..e282886616a0 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
65 int end, u64 addr) 65 int end, u64 addr)
66{ 66{
67 struct pci_mmcfg_region *new; 67 struct pci_mmcfg_region *new;
68 int num_buses;
69 struct resource *res; 68 struct resource *res;
70 69
71 if (addr == 0) 70 if (addr == 0)
@@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
82 81
83 list_add_sorted(new); 82 list_add_sorted(new);
84 83
85 num_buses = end - start + 1;
86 res = &new->res; 84 res = &new->res;
87 res->start = addr + PCI_MMCFG_BUS_OFFSET(start); 85 res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
88 res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; 86 res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
89 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 87 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
90 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, 88 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
91 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); 89 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 44ab12dc2a12..70ddeaeb1ef3 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -59,7 +59,6 @@
59#include <asm/pgtable.h> 59#include <asm/pgtable.h>
60#include <asm/tlbflush.h> 60#include <asm/tlbflush.h>
61#include <asm/reboot.h> 61#include <asm/reboot.h>
62#include <asm/setup.h>
63#include <asm/stackprotector.h> 62#include <asm/stackprotector.h>
64#include <asm/hypervisor.h> 63#include <asm/hypervisor.h>
65 64
@@ -1016,7 +1015,7 @@ static void xen_reboot(int reason)
1016 struct sched_shutdown r = { .reason = reason }; 1015 struct sched_shutdown r = { .reason = reason };
1017 1016
1018#ifdef CONFIG_SMP 1017#ifdef CONFIG_SMP
1019 smp_send_stop(); 1018 stop_other_cpus();
1020#endif 1019#endif
1021 1020
1022 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 1021 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 25f232b18a82..f4d010031465 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -400,9 +400,9 @@ static void stop_self(void *v)
400 BUG(); 400 BUG();
401} 401}
402 402
403static void xen_smp_send_stop(void) 403static void xen_stop_other_cpus(int wait)
404{ 404{
405 smp_call_function(stop_self, NULL, 0); 405 smp_call_function(stop_self, NULL, wait);
406} 406}
407 407
408static void xen_smp_send_reschedule(int cpu) 408static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
470 .cpu_disable = xen_cpu_disable, 470 .cpu_disable = xen_cpu_disable,
471 .play_dead = xen_play_dead, 471 .play_dead = xen_play_dead,
472 472
473 .smp_send_stop = xen_smp_send_stop, 473 .stop_other_cpus = xen_stop_other_cpus,
474 .smp_send_reschedule = xen_smp_send_reschedule, 474 .smp_send_reschedule = xen_smp_send_reschedule,
475 475
476 .send_call_func_ipi = xen_smp_send_call_function_ipi, 476 .send_call_func_ipi = xen_smp_send_call_function_ipi,