aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h4
-rw-r--r--arch/x86/kernel/microcode_amd.c24
-rw-r--r--arch/x86/net/bpf_jit_comp.c36
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/platform/uv/uv_irq.c2
-rw-r--r--arch/x86/xen/spinlock.c27
9 files changed, 74 insertions, 25 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 864cc6e6ac8e..5bed94e189fa 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -360,7 +360,6 @@ config X86_NUMACHIP
360 depends on NUMA 360 depends on NUMA
361 depends on SMP 361 depends on SMP
362 depends on X86_X2APIC 362 depends on X86_X2APIC
363 depends on !EDAC_AMD64
364 ---help--- 363 ---help---
365 Adds support for Numascale NumaChip large-SMP systems. Needed to 364 Adds support for Numascale NumaChip large-SMP systems. Needed to
366 enable more than ~168 cores. 365 enable more than ~168 cores.
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 3a19d04cebeb..7116dcba0c9e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -321,6 +321,8 @@ static void parse_elf(void *output)
321 default: /* Ignore other PT_* */ break; 321 default: /* Ignore other PT_* */ break;
322 } 322 }
323 } 323 }
324
325 free(phdrs);
324} 326}
325 327
326asmlinkage void decompress_kernel(void *rmode, memptr heap, 328asmlinkage void decompress_kernel(void *rmode, memptr heap,
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 17c5d4bdee5e..8d67d428b0f9 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -159,6 +159,7 @@
159#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ 159#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
160#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ 160#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
161#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ 161#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
162#define X86_FEATURE_TCE (6*32+17) /* translation cache extension */
162#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ 163#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
163#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ 164#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
164#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 165#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 54a13aaebc40..21f7385badb8 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -318,13 +318,13 @@ uv_gpa_in_mmr_space(unsigned long gpa)
318/* UV global physical address --> socket phys RAM */ 318/* UV global physical address --> socket phys RAM */
319static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) 319static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
320{ 320{
321 unsigned long paddr = gpa & uv_hub_info->gpa_mask; 321 unsigned long paddr;
322 unsigned long remap_base = uv_hub_info->lowmem_remap_base; 322 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
323 unsigned long remap_top = uv_hub_info->lowmem_remap_top; 323 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
324 324
325 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | 325 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
326 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); 326 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
327 gpa = gpa & uv_hub_info->gpa_mask; 327 paddr = gpa & uv_hub_info->gpa_mask;
328 if (paddr >= remap_base && paddr < remap_base + remap_top) 328 if (paddr >= remap_base && paddr < remap_base + remap_top)
329 paddr -= remap_base; 329 paddr -= remap_base;
330 return paddr; 330 return paddr;
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index fe86493f3ed1..ac0417be9131 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -311,13 +311,33 @@ out:
311 return state; 311 return state;
312} 312}
313 313
314/*
315 * AMD microcode firmware naming convention, up to family 15h they are in
316 * the legacy file:
317 *
318 * amd-ucode/microcode_amd.bin
319 *
320 * This legacy file is always smaller than 2K in size.
321 *
322 * Starting at family 15h they are in family specific firmware files:
323 *
324 * amd-ucode/microcode_amd_fam15h.bin
325 * amd-ucode/microcode_amd_fam16h.bin
326 * ...
327 *
328 * These might be larger than 2K.
329 */
314static enum ucode_state request_microcode_amd(int cpu, struct device *device) 330static enum ucode_state request_microcode_amd(int cpu, struct device *device)
315{ 331{
316 const char *fw_name = "amd-ucode/microcode_amd.bin"; 332 char fw_name[36] = "amd-ucode/microcode_amd.bin";
317 const struct firmware *fw; 333 const struct firmware *fw;
318 enum ucode_state ret = UCODE_NFOUND; 334 enum ucode_state ret = UCODE_NFOUND;
335 struct cpuinfo_x86 *c = &cpu_data(cpu);
336
337 if (c->x86 >= 0x15)
338 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
319 339
320 if (request_firmware(&fw, fw_name, device)) { 340 if (request_firmware(&fw, (const char *)fw_name, device)) {
321 pr_err("failed to load file %s\n", fw_name); 341 pr_err("failed to load file %s\n", fw_name);
322 goto out; 342 goto out;
323 } 343 }
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 7b65f752c5f8..7c1b765ecc59 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
151 cleanup_addr = proglen; /* epilogue address */ 151 cleanup_addr = proglen; /* epilogue address */
152 152
153 for (pass = 0; pass < 10; pass++) { 153 for (pass = 0; pass < 10; pass++) {
154 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
154 /* no prologue/epilogue for trivial filters (RET something) */ 155 /* no prologue/epilogue for trivial filters (RET something) */
155 proglen = 0; 156 proglen = 0;
156 prog = temp; 157 prog = temp;
157 158
158 if (seen) { 159 if (seen_or_pass0) {
159 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ 160 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
160 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ 161 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
161 /* note : must save %rbx in case bpf_error is hit */ 162 /* note : must save %rbx in case bpf_error is hit */
162 if (seen & (SEEN_XREG | SEEN_DATAREF)) 163 if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
163 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ 164 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
164 if (seen & SEEN_XREG) 165 if (seen_or_pass0 & SEEN_XREG)
165 CLEAR_X(); /* make sure we dont leek kernel memory */ 166 CLEAR_X(); /* make sure we dont leek kernel memory */
166 167
167 /* 168 /*
@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
170 * r9 = skb->len - skb->data_len 171 * r9 = skb->len - skb->data_len
171 * r8 = skb->data 172 * r8 = skb->data
172 */ 173 */
173 if (seen & SEEN_DATAREF) { 174 if (seen_or_pass0 & SEEN_DATAREF) {
174 if (offsetof(struct sk_buff, len) <= 127) 175 if (offsetof(struct sk_buff, len) <= 127)
175 /* mov off8(%rdi),%r9d */ 176 /* mov off8(%rdi),%r9d */
176 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); 177 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
260 case BPF_S_ALU_DIV_X: /* A /= X; */ 261 case BPF_S_ALU_DIV_X: /* A /= X; */
261 seen |= SEEN_XREG; 262 seen |= SEEN_XREG;
262 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ 263 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
263 if (pc_ret0 != -1) 264 if (pc_ret0 > 0) {
264 EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); 265 /* addrs[pc_ret0 - 1] is start address of target
265 else { 266 * (addrs[i] - 4) is the address following this jmp
267 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
268 */
269 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
270 (addrs[i] - 4));
271 } else {
266 EMIT_COND_JMP(X86_JNE, 2 + 5); 272 EMIT_COND_JMP(X86_JNE, 2 + 5);
267 CLEAR_A(); 273 CLEAR_A();
268 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ 274 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
335 } 341 }
336 /* fallinto */ 342 /* fallinto */
337 case BPF_S_RET_A: 343 case BPF_S_RET_A:
338 if (seen) { 344 if (seen_or_pass0) {
339 if (i != flen - 1) { 345 if (i != flen - 1) {
340 EMIT_JMP(cleanup_addr - addrs[i]); 346 EMIT_JMP(cleanup_addr - addrs[i]);
341 break; 347 break;
342 } 348 }
343 if (seen & SEEN_XREG) 349 if (seen_or_pass0 & SEEN_XREG)
344 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ 350 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
345 EMIT1(0xc9); /* leaveq */ 351 EMIT1(0xc9); /* leaveq */
346 } 352 }
@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
483 goto common_load; 489 goto common_load;
484 case BPF_S_LDX_B_MSH: 490 case BPF_S_LDX_B_MSH:
485 if ((int)K < 0) { 491 if ((int)K < 0) {
486 if (pc_ret0 != -1) { 492 if (pc_ret0 > 0) {
487 EMIT_JMP(addrs[pc_ret0] - addrs[i]); 493 /* addrs[pc_ret0 - 1] is the start address */
494 EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
488 break; 495 break;
489 } 496 }
490 CLEAR_A(); 497 CLEAR_A();
@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
599 * use it to give the cleanup instruction(s) addr 606 * use it to give the cleanup instruction(s) addr
600 */ 607 */
601 cleanup_addr = proglen - 1; /* ret */ 608 cleanup_addr = proglen - 1; /* ret */
602 if (seen) 609 if (seen_or_pass0)
603 cleanup_addr -= 1; /* leaveq */ 610 cleanup_addr -= 1; /* leaveq */
604 if (seen & SEEN_XREG) 611 if (seen_or_pass0 & SEEN_XREG)
605 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ 612 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
606 613
607 if (image) { 614 if (image) {
608 WARN_ON(proglen != oldproglen); 615 if (proglen != oldproglen)
616 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
609 break; 617 break;
610 } 618 }
611 if (proglen == oldproglen) { 619 if (proglen == oldproglen) {
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 9be4cff00a2d..3ae0e61abd23 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1851,6 +1851,8 @@ static void __init init_per_cpu_tunables(void)
1851 bcp->cong_reps = congested_reps; 1851 bcp->cong_reps = congested_reps;
1852 bcp->cong_period = congested_period; 1852 bcp->cong_period = congested_period;
1853 bcp->clocks_per_100_usec = usec_2_cycles(100); 1853 bcp->clocks_per_100_usec = usec_2_cycles(100);
1854 spin_lock_init(&bcp->queue_lock);
1855 spin_lock_init(&bcp->uvhub_lock);
1854 } 1856 }
1855} 1857}
1856 1858
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 374a05d8ad22..f25c2765a5c9 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -25,7 +25,7 @@ struct uv_irq_2_mmr_pnode{
25 int irq; 25 int irq;
26}; 26};
27 27
28static spinlock_t uv_irq_lock; 28static DEFINE_SPINLOCK(uv_irq_lock);
29static struct rb_root uv_irq_root; 29static struct rb_root uv_irq_root;
30 30
31static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); 31static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index cc9b1e182fcf..d69cc6c3f808 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -116,9 +116,26 @@ static inline void spin_time_accum_blocked(u64 start)
116} 116}
117#endif /* CONFIG_XEN_DEBUG_FS */ 117#endif /* CONFIG_XEN_DEBUG_FS */
118 118
119/*
120 * Size struct xen_spinlock so it's the same as arch_spinlock_t.
121 */
122#if NR_CPUS < 256
123typedef u8 xen_spinners_t;
124# define inc_spinners(xl) \
125 asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
126# define dec_spinners(xl) \
127 asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
128#else
129typedef u16 xen_spinners_t;
130# define inc_spinners(xl) \
131 asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
132# define dec_spinners(xl) \
133 asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
134#endif
135
119struct xen_spinlock { 136struct xen_spinlock {
120 unsigned char lock; /* 0 -> free; 1 -> locked */ 137 unsigned char lock; /* 0 -> free; 1 -> locked */
121 unsigned short spinners; /* count of waiting cpus */ 138 xen_spinners_t spinners; /* count of waiting cpus */
122}; 139};
123 140
124static int xen_spin_is_locked(struct arch_spinlock *lock) 141static int xen_spin_is_locked(struct arch_spinlock *lock)
@@ -164,8 +181,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
164 181
165 wmb(); /* set lock of interest before count */ 182 wmb(); /* set lock of interest before count */
166 183
167 asm(LOCK_PREFIX " incw %0" 184 inc_spinners(xl);
168 : "+m" (xl->spinners) : : "memory");
169 185
170 return prev; 186 return prev;
171} 187}
@@ -176,8 +192,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
176 */ 192 */
177static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) 193static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
178{ 194{
179 asm(LOCK_PREFIX " decw %0" 195 dec_spinners(xl);
180 : "+m" (xl->spinners) : : "memory");
181 wmb(); /* decrement count before restoring lock */ 196 wmb(); /* decrement count before restoring lock */
182 __this_cpu_write(lock_spinners, prev); 197 __this_cpu_write(lock_spinners, prev);
183} 198}
@@ -373,6 +388,8 @@ void xen_uninit_lock_cpu(int cpu)
373 388
374void __init xen_init_spinlocks(void) 389void __init xen_init_spinlocks(void)
375{ 390{
391 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
392
376 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 393 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
377 pv_lock_ops.spin_is_contended = xen_spin_is_contended; 394 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
378 pv_lock_ops.spin_lock = xen_spin_lock; 395 pv_lock_ops.spin_lock = xen_spin_lock;