diff options
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/boot/compressed/aslr.c | 34 | ||||
| -rw-r--r-- | arch/x86/boot/compressed/misc.c | 3 | ||||
| -rw-r--r-- | arch/x86/boot/compressed/misc.h | 6 | ||||
| -rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 4 | ||||
| -rw-r--r-- | arch/x86/include/asm/fpu-internal.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/page_types.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/pci_x86.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/uapi/asm/bootparam.h | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/acpi/boot.c | 25 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/apic_numachip.c | 22 | ||||
| -rw-r--r-- | arch/x86/kernel/entry_64.S | 18 | ||||
| -rw-r--r-- | arch/x86/kernel/module.c | 10 | ||||
| -rw-r--r-- | arch/x86/kernel/setup.c | 22 | ||||
| -rw-r--r-- | arch/x86/kernel/traps.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/xsave.c | 7 | ||||
| -rw-r--r-- | arch/x86/kvm/i8259.c | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/ioapic.c | 4 | ||||
| -rw-r--r-- | arch/x86/kvm/lapic.c | 3 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 18 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 1 | ||||
| -rw-r--r-- | arch/x86/pci/common.c | 34 | ||||
| -rw-r--r-- | arch/x86/pci/intel_mid_pci.c | 4 | ||||
| -rw-r--r-- | arch/x86/pci/irq.c | 15 | ||||
| -rw-r--r-- | arch/x86/vdso/vdso32/sigreturn.S | 1 |
24 files changed, 123 insertions, 120 deletions
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 7083c16cccba..bb1376381985 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
| @@ -14,13 +14,6 @@ | |||
| 14 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" | 14 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" |
| 15 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; | 15 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; |
| 16 | 16 | ||
| 17 | struct kaslr_setup_data { | ||
| 18 | __u64 next; | ||
| 19 | __u32 type; | ||
| 20 | __u32 len; | ||
| 21 | __u8 data[1]; | ||
| 22 | } kaslr_setup_data; | ||
| 23 | |||
| 24 | #define I8254_PORT_CONTROL 0x43 | 17 | #define I8254_PORT_CONTROL 0x43 |
| 25 | #define I8254_PORT_COUNTER0 0x40 | 18 | #define I8254_PORT_COUNTER0 0x40 |
| 26 | #define I8254_CMD_READBACK 0xC0 | 19 | #define I8254_CMD_READBACK 0xC0 |
| @@ -302,29 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum, | |||
| 302 | return slots_fetch_random(); | 295 | return slots_fetch_random(); |
| 303 | } | 296 | } |
| 304 | 297 | ||
| 305 | static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) | 298 | unsigned char *choose_kernel_location(unsigned char *input, |
| 306 | { | ||
| 307 | struct setup_data *data; | ||
| 308 | |||
| 309 | kaslr_setup_data.type = SETUP_KASLR; | ||
| 310 | kaslr_setup_data.len = 1; | ||
| 311 | kaslr_setup_data.next = 0; | ||
| 312 | kaslr_setup_data.data[0] = enabled; | ||
| 313 | |||
| 314 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; | ||
| 315 | |||
| 316 | while (data && data->next) | ||
| 317 | data = (struct setup_data *)(unsigned long)data->next; | ||
| 318 | |||
| 319 | if (data) | ||
| 320 | data->next = (unsigned long)&kaslr_setup_data; | ||
| 321 | else | ||
| 322 | params->hdr.setup_data = (unsigned long)&kaslr_setup_data; | ||
| 323 | |||
| 324 | } | ||
| 325 | |||
| 326 | unsigned char *choose_kernel_location(struct boot_params *params, | ||
| 327 | unsigned char *input, | ||
| 328 | unsigned long input_size, | 299 | unsigned long input_size, |
| 329 | unsigned char *output, | 300 | unsigned char *output, |
| 330 | unsigned long output_size) | 301 | unsigned long output_size) |
| @@ -335,17 +306,14 @@ unsigned char *choose_kernel_location(struct boot_params *params, | |||
| 335 | #ifdef CONFIG_HIBERNATION | 306 | #ifdef CONFIG_HIBERNATION |
| 336 | if (!cmdline_find_option_bool("kaslr")) { | 307 | if (!cmdline_find_option_bool("kaslr")) { |
| 337 | debug_putstr("KASLR disabled by default...\n"); | 308 | debug_putstr("KASLR disabled by default...\n"); |
| 338 | add_kaslr_setup_data(params, 0); | ||
| 339 | goto out; | 309 | goto out; |
| 340 | } | 310 | } |
| 341 | #else | 311 | #else |
| 342 | if (cmdline_find_option_bool("nokaslr")) { | 312 | if (cmdline_find_option_bool("nokaslr")) { |
| 343 | debug_putstr("KASLR disabled by cmdline...\n"); | 313 | debug_putstr("KASLR disabled by cmdline...\n"); |
| 344 | add_kaslr_setup_data(params, 0); | ||
| 345 | goto out; | 314 | goto out; |
| 346 | } | 315 | } |
| 347 | #endif | 316 | #endif |
| 348 | add_kaslr_setup_data(params, 1); | ||
| 349 | 317 | ||
| 350 | /* Record the various known unsafe memory ranges. */ | 318 | /* Record the various known unsafe memory ranges. */ |
| 351 | mem_avoid_init((unsigned long)input, input_size, | 319 | mem_avoid_init((unsigned long)input, input_size, |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 5903089c818f..a950864a64da 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
| @@ -401,8 +401,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, | |||
| 401 | * the entire decompressed kernel plus relocation table, or the | 401 | * the entire decompressed kernel plus relocation table, or the |
| 402 | * entire decompressed kernel plus .bss and .brk sections. | 402 | * entire decompressed kernel plus .bss and .brk sections. |
| 403 | */ | 403 | */ |
| 404 | output = choose_kernel_location(real_mode, input_data, input_len, | 404 | output = choose_kernel_location(input_data, input_len, output, |
| 405 | output, | ||
| 406 | output_len > run_size ? output_len | 405 | output_len > run_size ? output_len |
| 407 | : run_size); | 406 | : run_size); |
| 408 | 407 | ||
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index ee3576b2666b..04477d68403f 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
| @@ -57,8 +57,7 @@ int cmdline_find_option_bool(const char *option); | |||
| 57 | 57 | ||
| 58 | #if CONFIG_RANDOMIZE_BASE | 58 | #if CONFIG_RANDOMIZE_BASE |
| 59 | /* aslr.c */ | 59 | /* aslr.c */ |
| 60 | unsigned char *choose_kernel_location(struct boot_params *params, | 60 | unsigned char *choose_kernel_location(unsigned char *input, |
| 61 | unsigned char *input, | ||
| 62 | unsigned long input_size, | 61 | unsigned long input_size, |
| 63 | unsigned char *output, | 62 | unsigned char *output, |
| 64 | unsigned long output_size); | 63 | unsigned long output_size); |
| @@ -66,8 +65,7 @@ unsigned char *choose_kernel_location(struct boot_params *params, | |||
| 66 | bool has_cpuflag(int flag); | 65 | bool has_cpuflag(int flag); |
| 67 | #else | 66 | #else |
| 68 | static inline | 67 | static inline |
| 69 | unsigned char *choose_kernel_location(struct boot_params *params, | 68 | unsigned char *choose_kernel_location(unsigned char *input, |
| 70 | unsigned char *input, | ||
| 71 | unsigned long input_size, | 69 | unsigned long input_size, |
| 72 | unsigned char *output, | 70 | unsigned char *output, |
| 73 | unsigned long output_size) | 71 | unsigned long output_size) |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 947c6bf52c33..54f60ab41c63 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
| 1155 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); | 1155 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); |
| 1156 | if (!src) | 1156 | if (!src) |
| 1157 | return -ENOMEM; | 1157 | return -ENOMEM; |
| 1158 | assoc = (src + req->cryptlen + auth_tag_len); | 1158 | assoc = (src + req->cryptlen); |
| 1159 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); | 1159 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); |
| 1160 | scatterwalk_map_and_copy(assoc, req->assoc, 0, | 1160 | scatterwalk_map_and_copy(assoc, req->assoc, 0, |
| 1161 | req->assoclen, 0); | 1161 | req->assoclen, 0); |
| @@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
| 1180 | scatterwalk_done(&src_sg_walk, 0, 0); | 1180 | scatterwalk_done(&src_sg_walk, 0, 0); |
| 1181 | scatterwalk_done(&assoc_sg_walk, 0, 0); | 1181 | scatterwalk_done(&assoc_sg_walk, 0, 0); |
| 1182 | } else { | 1182 | } else { |
| 1183 | scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); | 1183 | scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); |
| 1184 | kfree(src); | 1184 | kfree(src); |
| 1185 | } | 1185 | } |
| 1186 | return retval; | 1186 | return retval; |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 0dbc08282291..72ba21a8b5fc 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
| @@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk) | |||
| 370 | preempt_disable(); | 370 | preempt_disable(); |
| 371 | tsk->thread.fpu_counter = 0; | 371 | tsk->thread.fpu_counter = 0; |
| 372 | __drop_fpu(tsk); | 372 | __drop_fpu(tsk); |
| 373 | clear_used_math(); | 373 | clear_stopped_child_used_math(tsk); |
| 374 | preempt_enable(); | 374 | preempt_enable(); |
| 375 | } | 375 | } |
| 376 | 376 | ||
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 95e11f79f123..f97fbe3abb67 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
| @@ -51,8 +51,6 @@ extern int devmem_is_allowed(unsigned long pagenr); | |||
| 51 | extern unsigned long max_low_pfn_mapped; | 51 | extern unsigned long max_low_pfn_mapped; |
| 52 | extern unsigned long max_pfn_mapped; | 52 | extern unsigned long max_pfn_mapped; |
| 53 | 53 | ||
| 54 | extern bool kaslr_enabled; | ||
| 55 | |||
| 56 | static inline phys_addr_t get_max_mapped(void) | 54 | static inline phys_addr_t get_max_mapped(void) |
| 57 | { | 55 | { |
| 58 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; | 56 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index fa1195dae425..164e3f8d3c3d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
| @@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; | |||
| 93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
| 94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); | 94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); |
| 95 | 95 | ||
| 96 | extern bool mp_should_keep_irq(struct device *dev); | ||
| 97 | |||
| 96 | struct pci_raw_ops { | 98 | struct pci_raw_ops { |
| 97 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, | 99 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, |
| 98 | int reg, int len, u32 *val); | 100 | int reg, int len, u32 *val); |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 44e6dd7e36a2..225b0988043a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
| 8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
| 9 | #define SETUP_EFI 4 | 9 | #define SETUP_EFI 4 |
| 10 | #define SETUP_KASLR 5 | ||
| 11 | 10 | ||
| 12 | /* ram_size flags */ | 11 | /* ram_size flags */ |
| 13 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 12 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3d525c6124f6..803b684676ff 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
| @@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) | |||
| 1338 | } | 1338 | } |
| 1339 | 1339 | ||
| 1340 | /* | 1340 | /* |
| 1341 | * ACPI offers an alternative platform interface model that removes | ||
| 1342 | * ACPI hardware requirements for platforms that do not implement | ||
| 1343 | * the PC Architecture. | ||
| 1344 | * | ||
| 1345 | * We initialize the Hardware-reduced ACPI model here: | ||
| 1346 | */ | ||
| 1347 | static void __init acpi_reduced_hw_init(void) | ||
| 1348 | { | ||
| 1349 | if (acpi_gbl_reduced_hardware) { | ||
| 1350 | /* | ||
| 1351 | * Override x86_init functions and bypass legacy pic | ||
| 1352 | * in Hardware-reduced ACPI mode | ||
| 1353 | */ | ||
| 1354 | x86_init.timers.timer_init = x86_init_noop; | ||
| 1355 | x86_init.irqs.pre_vector_init = x86_init_noop; | ||
| 1356 | legacy_pic = &null_legacy_pic; | ||
| 1357 | } | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | /* | ||
| 1341 | * If your system is blacklisted here, but you find that acpi=force | 1361 | * If your system is blacklisted here, but you find that acpi=force |
| 1342 | * works for you, please contact linux-acpi@vger.kernel.org | 1362 | * works for you, please contact linux-acpi@vger.kernel.org |
| 1343 | */ | 1363 | */ |
| @@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void) | |||
| 1536 | */ | 1556 | */ |
| 1537 | early_acpi_process_madt(); | 1557 | early_acpi_process_madt(); |
| 1538 | 1558 | ||
| 1559 | /* | ||
| 1560 | * Hardware-reduced ACPI mode initialization: | ||
| 1561 | */ | ||
| 1562 | acpi_reduced_hw_init(); | ||
| 1563 | |||
| 1539 | return 0; | 1564 | return 0; |
| 1540 | } | 1565 | } |
| 1541 | 1566 | ||
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index c2fd21fed002..017149cded07 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c | |||
| @@ -37,10 +37,12 @@ static const struct apic apic_numachip; | |||
| 37 | static unsigned int get_apic_id(unsigned long x) | 37 | static unsigned int get_apic_id(unsigned long x) |
| 38 | { | 38 | { |
| 39 | unsigned long value; | 39 | unsigned long value; |
| 40 | unsigned int id; | 40 | unsigned int id = (x >> 24) & 0xff; |
| 41 | 41 | ||
| 42 | rdmsrl(MSR_FAM10H_NODE_ID, value); | 42 | if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { |
| 43 | id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); | 43 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
| 44 | id |= (value << 2) & 0xff00; | ||
| 45 | } | ||
| 44 | 46 | ||
| 45 | return id; | 47 | return id; |
| 46 | } | 48 | } |
| @@ -155,10 +157,18 @@ static int __init numachip_probe(void) | |||
| 155 | 157 | ||
| 156 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) | 158 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) |
| 157 | { | 159 | { |
| 158 | if (c->phys_proc_id != node) { | 160 | u64 val; |
| 159 | c->phys_proc_id = node; | 161 | u32 nodes = 1; |
| 160 | per_cpu(cpu_llc_id, smp_processor_id()) = node; | 162 | |
| 163 | this_cpu_write(cpu_llc_id, node); | ||
| 164 | |||
| 165 | /* Account for nodes per socket in multi-core-module processors */ | ||
| 166 | if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { | ||
| 167 | rdmsrl(MSR_FAM10H_NODE_ID, val); | ||
| 168 | nodes = ((val >> 3) & 7) + 1; | ||
| 161 | } | 169 | } |
| 170 | |||
| 171 | c->phys_proc_id = node / nodes; | ||
| 162 | } | 172 | } |
| 163 | 173 | ||
| 164 | static int __init numachip_system_init(void) | 174 | static int __init numachip_system_init(void) |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1d74d161687c..2babb393915e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
| @@ -364,12 +364,21 @@ system_call_fastpath: | |||
| 364 | * Has incomplete stack frame and undefined top of stack. | 364 | * Has incomplete stack frame and undefined top of stack. |
| 365 | */ | 365 | */ |
| 366 | ret_from_sys_call: | 366 | ret_from_sys_call: |
| 367 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
| 368 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
| 369 | |||
| 370 | LOCKDEP_SYS_EXIT | 367 | LOCKDEP_SYS_EXIT |
| 371 | DISABLE_INTERRUPTS(CLBR_NONE) | 368 | DISABLE_INTERRUPTS(CLBR_NONE) |
| 372 | TRACE_IRQS_OFF | 369 | TRACE_IRQS_OFF |
| 370 | |||
| 371 | /* | ||
| 372 | * We must check ti flags with interrupts (or at least preemption) | ||
| 373 | * off because we must *never* return to userspace without | ||
| 374 | * processing exit work that is enqueued if we're preempted here. | ||
| 375 | * In particular, returning to userspace with any of the one-shot | ||
| 376 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is | ||
| 377 | * very bad. | ||
| 378 | */ | ||
| 379 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
| 380 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
| 381 | |||
| 373 | CFI_REMEMBER_STATE | 382 | CFI_REMEMBER_STATE |
| 374 | /* | 383 | /* |
| 375 | * sysretq will re-enable interrupts: | 384 | * sysretq will re-enable interrupts: |
| @@ -386,7 +395,7 @@ ret_from_sys_call: | |||
| 386 | 395 | ||
| 387 | int_ret_from_sys_call_fixup: | 396 | int_ret_from_sys_call_fixup: |
| 388 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | 397 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET |
| 389 | jmp int_ret_from_sys_call | 398 | jmp int_ret_from_sys_call_irqs_off |
| 390 | 399 | ||
| 391 | /* Do syscall tracing */ | 400 | /* Do syscall tracing */ |
| 392 | tracesys: | 401 | tracesys: |
| @@ -432,6 +441,7 @@ tracesys_phase2: | |||
| 432 | GLOBAL(int_ret_from_sys_call) | 441 | GLOBAL(int_ret_from_sys_call) |
| 433 | DISABLE_INTERRUPTS(CLBR_NONE) | 442 | DISABLE_INTERRUPTS(CLBR_NONE) |
| 434 | TRACE_IRQS_OFF | 443 | TRACE_IRQS_OFF |
| 444 | int_ret_from_sys_call_irqs_off: | ||
| 435 | movl $_TIF_ALLWORK_MASK,%edi | 445 | movl $_TIF_ALLWORK_MASK,%edi |
| 436 | /* edi: mask to check */ | 446 | /* edi: mask to check */ |
| 437 | GLOBAL(int_with_check) | 447 | GLOBAL(int_with_check) |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 9bbb9b35c144..d1ac80b72c72 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
| @@ -47,13 +47,21 @@ do { \ | |||
| 47 | 47 | ||
| 48 | #ifdef CONFIG_RANDOMIZE_BASE | 48 | #ifdef CONFIG_RANDOMIZE_BASE |
| 49 | static unsigned long module_load_offset; | 49 | static unsigned long module_load_offset; |
| 50 | static int randomize_modules = 1; | ||
| 50 | 51 | ||
| 51 | /* Mutex protects the module_load_offset. */ | 52 | /* Mutex protects the module_load_offset. */ |
| 52 | static DEFINE_MUTEX(module_kaslr_mutex); | 53 | static DEFINE_MUTEX(module_kaslr_mutex); |
| 53 | 54 | ||
| 55 | static int __init parse_nokaslr(char *p) | ||
| 56 | { | ||
| 57 | randomize_modules = 0; | ||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | early_param("nokaslr", parse_nokaslr); | ||
| 61 | |||
| 54 | static unsigned long int get_module_load_offset(void) | 62 | static unsigned long int get_module_load_offset(void) |
| 55 | { | 63 | { |
| 56 | if (kaslr_enabled) { | 64 | if (randomize_modules) { |
| 57 | mutex_lock(&module_kaslr_mutex); | 65 | mutex_lock(&module_kaslr_mutex); |
| 58 | /* | 66 | /* |
| 59 | * Calculate the module_load_offset the first time this | 67 | * Calculate the module_load_offset the first time this |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 98dc9317286e..0a2421cca01f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
| @@ -122,8 +122,6 @@ | |||
| 122 | unsigned long max_low_pfn_mapped; | 122 | unsigned long max_low_pfn_mapped; |
| 123 | unsigned long max_pfn_mapped; | 123 | unsigned long max_pfn_mapped; |
| 124 | 124 | ||
| 125 | bool __read_mostly kaslr_enabled = false; | ||
| 126 | |||
| 127 | #ifdef CONFIG_DMI | 125 | #ifdef CONFIG_DMI |
| 128 | RESERVE_BRK(dmi_alloc, 65536); | 126 | RESERVE_BRK(dmi_alloc, 65536); |
| 129 | #endif | 127 | #endif |
| @@ -427,11 +425,6 @@ static void __init reserve_initrd(void) | |||
| 427 | } | 425 | } |
| 428 | #endif /* CONFIG_BLK_DEV_INITRD */ | 426 | #endif /* CONFIG_BLK_DEV_INITRD */ |
| 429 | 427 | ||
| 430 | static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) | ||
| 431 | { | ||
| 432 | kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); | ||
| 433 | } | ||
| 434 | |||
| 435 | static void __init parse_setup_data(void) | 428 | static void __init parse_setup_data(void) |
| 436 | { | 429 | { |
| 437 | struct setup_data *data; | 430 | struct setup_data *data; |
| @@ -457,9 +450,6 @@ static void __init parse_setup_data(void) | |||
| 457 | case SETUP_EFI: | 450 | case SETUP_EFI: |
| 458 | parse_efi_setup(pa_data, data_len); | 451 | parse_efi_setup(pa_data, data_len); |
| 459 | break; | 452 | break; |
| 460 | case SETUP_KASLR: | ||
| 461 | parse_kaslr_setup(pa_data, data_len); | ||
| 462 | break; | ||
| 463 | default: | 453 | default: |
| 464 | break; | 454 | break; |
| 465 | } | 455 | } |
| @@ -842,14 +832,10 @@ static void __init trim_low_memory_range(void) | |||
| 842 | static int | 832 | static int |
| 843 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) | 833 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) |
| 844 | { | 834 | { |
| 845 | if (kaslr_enabled) | 835 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx " |
| 846 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", | 836 | "(relocation range: 0x%lx-0x%lx)\n", |
| 847 | (unsigned long)&_text - __START_KERNEL, | 837 | (unsigned long)&_text - __START_KERNEL, __START_KERNEL, |
| 848 | __START_KERNEL, | 838 | __START_KERNEL_map, MODULES_VADDR-1); |
| 849 | __START_KERNEL_map, | ||
| 850 | MODULES_VADDR-1); | ||
| 851 | else | ||
| 852 | pr_emerg("Kernel Offset: disabled\n"); | ||
| 853 | 839 | ||
| 854 | return 0; | 840 | return 0; |
| 855 | } | 841 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9d2073e2ecc9..4ff5d162ff9f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) | |||
| 384 | goto exit; | 384 | goto exit; |
| 385 | conditional_sti(regs); | 385 | conditional_sti(regs); |
| 386 | 386 | ||
| 387 | if (!user_mode(regs)) | 387 | if (!user_mode_vm(regs)) |
| 388 | die("bounds", regs, error_code); | 388 | die("bounds", regs, error_code); |
| 389 | 389 | ||
| 390 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { | 390 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { |
| @@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) | |||
| 637 | * then it's very likely the result of an icebp/int01 trap. | 637 | * then it's very likely the result of an icebp/int01 trap. |
| 638 | * User wants a sigtrap for that. | 638 | * User wants a sigtrap for that. |
| 639 | */ | 639 | */ |
| 640 | if (!dr6 && user_mode(regs)) | 640 | if (!dr6 && user_mode_vm(regs)) |
| 641 | user_icebp = 1; | 641 | user_icebp = 1; |
| 642 | 642 | ||
| 643 | /* Catch kmemcheck conditions first of all! */ | 643 | /* Catch kmemcheck conditions first of all! */ |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 34f66e58a896..cdc6cf903078 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
| @@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
| 379 | * thread's fpu state, reconstruct fxstate from the fsave | 379 | * thread's fpu state, reconstruct fxstate from the fsave |
| 380 | * header. Sanitize the copied state etc. | 380 | * header. Sanitize the copied state etc. |
| 381 | */ | 381 | */ |
| 382 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; | 382 | struct fpu *fpu = &tsk->thread.fpu; |
| 383 | struct user_i387_ia32_struct env; | 383 | struct user_i387_ia32_struct env; |
| 384 | int err = 0; | 384 | int err = 0; |
| 385 | 385 | ||
| @@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
| 393 | */ | 393 | */ |
| 394 | drop_fpu(tsk); | 394 | drop_fpu(tsk); |
| 395 | 395 | ||
| 396 | if (__copy_from_user(xsave, buf_fx, state_size) || | 396 | if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || |
| 397 | __copy_from_user(&env, buf, sizeof(env))) { | 397 | __copy_from_user(&env, buf, sizeof(env))) { |
| 398 | fpu_finit(fpu); | ||
| 398 | err = -1; | 399 | err = -1; |
| 399 | } else { | 400 | } else { |
| 400 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); | 401 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); |
| 401 | set_used_math(); | ||
| 402 | } | 402 | } |
| 403 | 403 | ||
| 404 | set_used_math(); | ||
| 404 | if (use_eager_fpu()) { | 405 | if (use_eager_fpu()) { |
| 405 | preempt_disable(); | 406 | preempt_disable(); |
| 406 | math_state_restore(); | 407 | math_state_restore(); |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index cc31f7c06d3d..9541ba34126b 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
| @@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s, | |||
| 507 | return -EOPNOTSUPP; | 507 | return -EOPNOTSUPP; |
| 508 | 508 | ||
| 509 | if (len != 1) { | 509 | if (len != 1) { |
| 510 | memset(val, 0, len); | ||
| 510 | pr_pic_unimpl("non byte read\n"); | 511 | pr_pic_unimpl("non byte read\n"); |
| 511 | return 0; | 512 | return 0; |
| 512 | } | 513 | } |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index b1947e0f3e10..46d4449772bc 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
| @@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
| 422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) | 422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) |
| 423 | { | 423 | { |
| 424 | int i; | 424 | int i; |
| 425 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
| 425 | 426 | ||
| 426 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | 427 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { |
| 427 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | 428 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; |
| @@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
| 443 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 444 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
| 444 | spin_lock(&ioapic->lock); | 445 | spin_lock(&ioapic->lock); |
| 445 | 446 | ||
| 446 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 447 | if (trigger_mode != IOAPIC_LEVEL_TRIG || |
| 448 | kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) | ||
| 447 | continue; | 449 | continue; |
| 448 | 450 | ||
| 449 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 451 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bd4e34de24c7..4ee827d7bf36 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) | |||
| 833 | 833 | ||
| 834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) | 834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) |
| 835 | { | 835 | { |
| 836 | if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && | 836 | if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { |
| 837 | kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { | ||
| 838 | int trigger_mode; | 837 | int trigger_mode; |
| 839 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) | 838 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) |
| 840 | trigger_mode = IOAPIC_LEVEL_TRIG; | 839 | trigger_mode = IOAPIC_LEVEL_TRIG; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f7b20b417a3a..ae4f6d35d19c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) | |||
| 2168 | { | 2168 | { |
| 2169 | unsigned long *msr_bitmap; | 2169 | unsigned long *msr_bitmap; |
| 2170 | 2170 | ||
| 2171 | if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { | 2171 | if (is_guest_mode(vcpu)) |
| 2172 | msr_bitmap = vmx_msr_bitmap_nested; | ||
| 2173 | else if (irqchip_in_kernel(vcpu->kvm) && | ||
| 2174 | apic_x2apic_mode(vcpu->arch.apic)) { | ||
| 2172 | if (is_long_mode(vcpu)) | 2175 | if (is_long_mode(vcpu)) |
| 2173 | msr_bitmap = vmx_msr_bitmap_longmode_x2apic; | 2176 | msr_bitmap = vmx_msr_bitmap_longmode_x2apic; |
| 2174 | else | 2177 | else |
| @@ -2476,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
| 2476 | if (enable_ept) { | 2479 | if (enable_ept) { |
| 2477 | /* nested EPT: emulate EPT also to L1 */ | 2480 | /* nested EPT: emulate EPT also to L1 */ |
| 2478 | vmx->nested.nested_vmx_secondary_ctls_high |= | 2481 | vmx->nested.nested_vmx_secondary_ctls_high |= |
| 2479 | SECONDARY_EXEC_ENABLE_EPT | | 2482 | SECONDARY_EXEC_ENABLE_EPT; |
| 2480 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
| 2481 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | | 2483 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | |
| 2482 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | | 2484 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | |
| 2483 | VMX_EPT_INVEPT_BIT; | 2485 | VMX_EPT_INVEPT_BIT; |
| @@ -2491,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
| 2491 | } else | 2493 | } else |
| 2492 | vmx->nested.nested_vmx_ept_caps = 0; | 2494 | vmx->nested.nested_vmx_ept_caps = 0; |
| 2493 | 2495 | ||
| 2496 | if (enable_unrestricted_guest) | ||
| 2497 | vmx->nested.nested_vmx_secondary_ctls_high |= | ||
| 2498 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
| 2499 | |||
| 2494 | /* miscellaneous data */ | 2500 | /* miscellaneous data */ |
| 2495 | rdmsr(MSR_IA32_VMX_MISC, | 2501 | rdmsr(MSR_IA32_VMX_MISC, |
| 2496 | vmx->nested.nested_vmx_misc_low, | 2502 | vmx->nested.nested_vmx_misc_low, |
| @@ -9218,9 +9224,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
| 9218 | } | 9224 | } |
| 9219 | 9225 | ||
| 9220 | if (cpu_has_vmx_msr_bitmap() && | 9226 | if (cpu_has_vmx_msr_bitmap() && |
| 9221 | exec_control & CPU_BASED_USE_MSR_BITMAPS && | 9227 | exec_control & CPU_BASED_USE_MSR_BITMAPS) { |
| 9222 | nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { | 9228 | nested_vmx_merge_msr_bitmap(vcpu, vmcs12); |
| 9223 | vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); | 9229 | /* MSR_BITMAP will be set by following vmx_set_efer. */ |
| 9224 | } else | 9230 | } else |
| 9225 | exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; | 9231 | exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; |
| 9226 | 9232 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd7a70be41b3..32bf19ef3115 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
| 2744 | case KVM_CAP_USER_NMI: | 2744 | case KVM_CAP_USER_NMI: |
| 2745 | case KVM_CAP_REINJECT_CONTROL: | 2745 | case KVM_CAP_REINJECT_CONTROL: |
| 2746 | case KVM_CAP_IRQ_INJECT_STATUS: | 2746 | case KVM_CAP_IRQ_INJECT_STATUS: |
| 2747 | case KVM_CAP_IRQFD: | ||
| 2748 | case KVM_CAP_IOEVENTFD: | 2747 | case KVM_CAP_IOEVENTFD: |
| 2749 | case KVM_CAP_IOEVENTFD_NO_LENGTH: | 2748 | case KVM_CAP_IOEVENTFD_NO_LENGTH: |
| 2750 | case KVM_CAP_PIT2: | 2749 | case KVM_CAP_PIT2: |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 3d2612b68694..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
| @@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void) | |||
| 513 | } | 513 | } |
| 514 | } | 514 | } |
| 515 | 515 | ||
| 516 | /* | ||
| 517 | * Some device drivers assume dev->irq won't change after calling | ||
| 518 | * pci_disable_device(). So delay releasing of IRQ resource to driver | ||
| 519 | * unbinding time. Otherwise it will break PM subsystem and drivers | ||
| 520 | * like xen-pciback etc. | ||
| 521 | */ | ||
| 522 | static int pci_irq_notifier(struct notifier_block *nb, unsigned long action, | ||
| 523 | void *data) | ||
| 524 | { | ||
| 525 | struct pci_dev *dev = to_pci_dev(data); | ||
| 526 | |||
| 527 | if (action != BUS_NOTIFY_UNBOUND_DRIVER) | ||
| 528 | return NOTIFY_DONE; | ||
| 529 | |||
| 530 | if (pcibios_disable_irq) | ||
| 531 | pcibios_disable_irq(dev); | ||
| 532 | |||
| 533 | return NOTIFY_OK; | ||
| 534 | } | ||
| 535 | |||
| 536 | static struct notifier_block pci_irq_nb = { | ||
| 537 | .notifier_call = pci_irq_notifier, | ||
| 538 | .priority = INT_MIN, | ||
| 539 | }; | ||
| 540 | |||
| 541 | int __init pcibios_init(void) | 516 | int __init pcibios_init(void) |
| 542 | { | 517 | { |
| 543 | if (!raw_pci_ops) { | 518 | if (!raw_pci_ops) { |
| @@ -550,9 +525,6 @@ int __init pcibios_init(void) | |||
| 550 | 525 | ||
| 551 | if (pci_bf_sort >= pci_force_bf) | 526 | if (pci_bf_sort >= pci_force_bf) |
| 552 | pci_sort_breadthfirst(); | 527 | pci_sort_breadthfirst(); |
| 553 | |||
| 554 | bus_register_notifier(&pci_bus_type, &pci_irq_nb); | ||
| 555 | |||
| 556 | return 0; | 528 | return 0; |
| 557 | } | 529 | } |
| 558 | 530 | ||
| @@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
| 711 | return 0; | 683 | return 0; |
| 712 | } | 684 | } |
| 713 | 685 | ||
| 686 | void pcibios_disable_device (struct pci_dev *dev) | ||
| 687 | { | ||
| 688 | if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) | ||
| 689 | pcibios_disable_irq(dev); | ||
| 690 | } | ||
| 691 | |||
| 714 | int pci_ext_cfg_avail(void) | 692 | int pci_ext_cfg_avail(void) |
| 715 | { | 693 | { |
| 716 | if (raw_pci_ext_ops) | 694 | if (raw_pci_ext_ops) |
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index efb849323c74..852aa4c92da0 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c | |||
| @@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
| 234 | 234 | ||
| 235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) | 235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) |
| 236 | { | 236 | { |
| 237 | if (dev->irq_managed && dev->irq > 0) { | 237 | if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && |
| 238 | dev->irq > 0) { | ||
| 238 | mp_unmap_irq(dev->irq); | 239 | mp_unmap_irq(dev->irq); |
| 239 | dev->irq_managed = 0; | 240 | dev->irq_managed = 0; |
| 240 | dev->irq = 0; | ||
| 241 | } | 241 | } |
| 242 | } | 242 | } |
| 243 | 243 | ||
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index e71b3dbd87b8..5dc6ca5e1741 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
| @@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
| 1256 | return 0; | 1256 | return 0; |
| 1257 | } | 1257 | } |
| 1258 | 1258 | ||
| 1259 | bool mp_should_keep_irq(struct device *dev) | ||
| 1260 | { | ||
| 1261 | if (dev->power.is_prepared) | ||
| 1262 | return true; | ||
| 1263 | #ifdef CONFIG_PM | ||
| 1264 | if (dev->power.runtime_status == RPM_SUSPENDING) | ||
| 1265 | return true; | ||
| 1266 | #endif | ||
| 1267 | |||
| 1268 | return false; | ||
| 1269 | } | ||
| 1270 | |||
| 1259 | static void pirq_disable_irq(struct pci_dev *dev) | 1271 | static void pirq_disable_irq(struct pci_dev *dev) |
| 1260 | { | 1272 | { |
| 1261 | if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { | 1273 | if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && |
| 1274 | dev->irq_managed && dev->irq) { | ||
| 1262 | mp_unmap_irq(dev->irq); | 1275 | mp_unmap_irq(dev->irq); |
| 1263 | dev->irq = 0; | 1276 | dev->irq = 0; |
| 1264 | dev->irq_managed = 0; | 1277 | dev->irq_managed = 0; |
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S index 31776d0efc8c..d7ec4e251c0a 100644 --- a/arch/x86/vdso/vdso32/sigreturn.S +++ b/arch/x86/vdso/vdso32/sigreturn.S | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | .text | 17 | .text |
| 18 | .globl __kernel_sigreturn | 18 | .globl __kernel_sigreturn |
| 19 | .type __kernel_sigreturn,@function | 19 | .type __kernel_sigreturn,@function |
| 20 | nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ | ||
| 20 | ALIGN | 21 | ALIGN |
| 21 | __kernel_sigreturn: | 22 | __kernel_sigreturn: |
| 22 | .LSTART_sigreturn: | 23 | .LSTART_sigreturn: |
