diff options
| -rw-r--r-- | Kbuild | 1 | ||||
| -rw-r--r-- | arch/x86/boot/compressed/misc.c | 29 | ||||
| -rw-r--r-- | arch/x86/include/asm/calling.h | 52 | ||||
| -rw-r--r-- | arch/x86/include/asm/entry_arch.h | 19 | ||||
| -rw-r--r-- | arch/x86/include/asm/segment.h | 32 | ||||
| -rw-r--r-- | arch/x86/kernel/asm-offsets_32.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/entry_32.S | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/entry_64.S | 20 | ||||
| -rw-r--r-- | arch/x86/kernel/quirks.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/tlb_uv.c | 11 | ||||
| -rw-r--r-- | arch/x86/kernel/traps.c | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/vm86_32.c | 10 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 1 | ||||
| -rw-r--r-- | kernel/softirq.c | 2 |
15 files changed, 100 insertions, 94 deletions
| @@ -53,6 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s | |||
| 53 | # Default sed regexp - multiline due to syntax constraints | 53 | # Default sed regexp - multiline due to syntax constraints |
| 54 | define sed-y | 54 | define sed-y |
| 55 | "/^->/{s:->#\(.*\):/* \1 */:; \ | 55 | "/^->/{s:->#\(.*\):/* \1 */:; \ |
| 56 | s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 (\2) /* \3 */:; \ | ||
| 56 | s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \ | 57 | s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \ |
| 57 | s:->::; p;}" | 58 | s:->::; p;}" |
| 58 | endef | 59 | endef |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 8f7bef8e9fff..23f315c9f215 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
| @@ -229,18 +229,35 @@ void *memset(void *s, int c, size_t n) | |||
| 229 | ss[i] = c; | 229 | ss[i] = c; |
| 230 | return s; | 230 | return s; |
| 231 | } | 231 | } |
| 232 | 232 | #ifdef CONFIG_X86_32 | |
| 233 | void *memcpy(void *dest, const void *src, size_t n) | 233 | void *memcpy(void *dest, const void *src, size_t n) |
| 234 | { | 234 | { |
| 235 | int i; | 235 | int d0, d1, d2; |
| 236 | const char *s = src; | 236 | asm volatile( |
| 237 | char *d = dest; | 237 | "rep ; movsl\n\t" |
| 238 | "movl %4,%%ecx\n\t" | ||
| 239 | "rep ; movsb\n\t" | ||
| 240 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | ||
| 241 | : "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src) | ||
| 242 | : "memory"); | ||
| 238 | 243 | ||
| 239 | for (i = 0; i < n; i++) | ||
| 240 | d[i] = s[i]; | ||
| 241 | return dest; | 244 | return dest; |
| 242 | } | 245 | } |
| 246 | #else | ||
| 247 | void *memcpy(void *dest, const void *src, size_t n) | ||
| 248 | { | ||
| 249 | long d0, d1, d2; | ||
| 250 | asm volatile( | ||
| 251 | "rep ; movsq\n\t" | ||
| 252 | "movq %4,%%rcx\n\t" | ||
| 253 | "rep ; movsb\n\t" | ||
| 254 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | ||
| 255 | : "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src) | ||
| 256 | : "memory"); | ||
| 243 | 257 | ||
| 258 | return dest; | ||
| 259 | } | ||
| 260 | #endif | ||
| 244 | 261 | ||
| 245 | static void error(char *x) | 262 | static void error(char *x) |
| 246 | { | 263 | { |
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 0e63c9a2a8d0..30af5a832163 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h | |||
| @@ -48,36 +48,38 @@ For 32-bit we have the following conventions - kernel is built with | |||
| 48 | 48 | ||
| 49 | 49 | ||
| 50 | /* | 50 | /* |
| 51 | * 64-bit system call stack frame layout defines and helpers, | 51 | * 64-bit system call stack frame layout defines and helpers, for |
| 52 | * for assembly code: | 52 | * assembly code (note that the seemingly unnecessary parentheses |
| 53 | * are to prevent cpp from inserting spaces in expressions that get | ||
| 54 | * passed to macros): | ||
| 53 | */ | 55 | */ |
| 54 | 56 | ||
| 55 | #define R15 0 | 57 | #define R15 (0) |
| 56 | #define R14 8 | 58 | #define R14 (8) |
| 57 | #define R13 16 | 59 | #define R13 (16) |
| 58 | #define R12 24 | 60 | #define R12 (24) |
| 59 | #define RBP 32 | 61 | #define RBP (32) |
| 60 | #define RBX 40 | 62 | #define RBX (40) |
| 61 | 63 | ||
| 62 | /* arguments: interrupts/non tracing syscalls only save up to here: */ | 64 | /* arguments: interrupts/non tracing syscalls only save up to here: */ |
| 63 | #define R11 48 | 65 | #define R11 (48) |
| 64 | #define R10 56 | 66 | #define R10 (56) |
| 65 | #define R9 64 | 67 | #define R9 (64) |
| 66 | #define R8 72 | 68 | #define R8 (72) |
| 67 | #define RAX 80 | 69 | #define RAX (80) |
| 68 | #define RCX 88 | 70 | #define RCX (88) |
| 69 | #define RDX 96 | 71 | #define RDX (96) |
| 70 | #define RSI 104 | 72 | #define RSI (104) |
| 71 | #define RDI 112 | 73 | #define RDI (112) |
| 72 | #define ORIG_RAX 120 /* + error_code */ | 74 | #define ORIG_RAX (120) /* + error_code */ |
| 73 | /* end of arguments */ | 75 | /* end of arguments */ |
| 74 | 76 | ||
| 75 | /* cpu exception frame or undefined in case of fast syscall: */ | 77 | /* cpu exception frame or undefined in case of fast syscall: */ |
| 76 | #define RIP 128 | 78 | #define RIP (128) |
| 77 | #define CS 136 | 79 | #define CS (136) |
| 78 | #define EFLAGS 144 | 80 | #define EFLAGS (144) |
| 79 | #define RSP 152 | 81 | #define RSP (152) |
| 80 | #define SS 160 | 82 | #define SS (160) |
| 81 | 83 | ||
| 82 | #define ARGOFFSET R11 | 84 | #define ARGOFFSET R11 |
| 83 | #define SWFRAME ORIG_RAX | 85 | #define SWFRAME ORIG_RAX |
| @@ -111,7 +113,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
| 111 | .endif | 113 | .endif |
| 112 | .endm | 114 | .endm |
| 113 | 115 | ||
| 114 | #define ARG_SKIP 9*8 | 116 | #define ARG_SKIP (9*8) |
| 115 | 117 | ||
| 116 | .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ | 118 | .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ |
| 117 | skipr8910=0, skiprdx=0 | 119 | skipr8910=0, skiprdx=0 |
| @@ -169,7 +171,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
| 169 | .endif | 171 | .endif |
| 170 | .endm | 172 | .endm |
| 171 | 173 | ||
| 172 | #define REST_SKIP 6*8 | 174 | #define REST_SKIP (6*8) |
| 173 | 175 | ||
| 174 | .macro SAVE_REST | 176 | .macro SAVE_REST |
| 175 | subq $REST_SKIP, %rsp | 177 | subq $REST_SKIP, %rsp |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index b8e96a18676b..57650ab4a5f5 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
| @@ -16,22 +16,11 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | |||
| 16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) | 16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) |
| 17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) | 17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) |
| 18 | 18 | ||
| 19 | BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, | 19 | .irpc idx, "01234567" |
| 20 | smp_invalidate_interrupt) | 20 | BUILD_INTERRUPT3(invalidate_interrupt\idx, |
| 21 | BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, | 21 | (INVALIDATE_TLB_VECTOR_START)+\idx, |
| 22 | smp_invalidate_interrupt) | ||
| 23 | BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2, | ||
| 24 | smp_invalidate_interrupt) | ||
| 25 | BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3, | ||
| 26 | smp_invalidate_interrupt) | ||
| 27 | BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4, | ||
| 28 | smp_invalidate_interrupt) | ||
| 29 | BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5, | ||
| 30 | smp_invalidate_interrupt) | ||
| 31 | BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6, | ||
| 32 | smp_invalidate_interrupt) | ||
| 33 | BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, | ||
| 34 | smp_invalidate_interrupt) | 22 | smp_invalidate_interrupt) |
| 23 | .endr | ||
| 35 | #endif | 24 | #endif |
| 36 | 25 | ||
| 37 | BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) | 26 | BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) |
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 14e0ed86a6f9..231f1c1d6607 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h | |||
| @@ -73,31 +73,31 @@ | |||
| 73 | 73 | ||
| 74 | #define GDT_ENTRY_DEFAULT_USER_DS 15 | 74 | #define GDT_ENTRY_DEFAULT_USER_DS 15 |
| 75 | 75 | ||
| 76 | #define GDT_ENTRY_KERNEL_BASE 12 | 76 | #define GDT_ENTRY_KERNEL_BASE (12) |
| 77 | 77 | ||
| 78 | #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) | 78 | #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) |
| 79 | 79 | ||
| 80 | #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) | 80 | #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) |
| 81 | 81 | ||
| 82 | #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) | 82 | #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) |
| 83 | #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) | 83 | #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE+5) |
| 84 | 84 | ||
| 85 | #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) | 85 | #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE+6) |
| 86 | #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) | 86 | #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE+11) |
| 87 | 87 | ||
| 88 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) | 88 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE+14) |
| 89 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) | 89 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8) |
| 90 | 90 | ||
| 91 | #define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) | 91 | #define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE+15) |
| 92 | #ifdef CONFIG_SMP | 92 | #ifdef CONFIG_SMP |
| 93 | #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) | 93 | #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) |
| 94 | #else | 94 | #else |
| 95 | #define __KERNEL_PERCPU 0 | 95 | #define __KERNEL_PERCPU 0 |
| 96 | #endif | 96 | #endif |
| 97 | 97 | ||
| 98 | #define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16) | 98 | #define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE+16) |
| 99 | #ifdef CONFIG_CC_STACKPROTECTOR | 99 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 100 | #define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8) | 100 | #define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) |
| 101 | #else | 101 | #else |
| 102 | #define __KERNEL_STACK_CANARY 0 | 102 | #define __KERNEL_STACK_CANARY 0 |
| 103 | #endif | 103 | #endif |
| @@ -182,10 +182,10 @@ | |||
| 182 | 182 | ||
| 183 | #endif | 183 | #endif |
| 184 | 184 | ||
| 185 | #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) | 185 | #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) |
| 186 | #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) | 186 | #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) |
| 187 | #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3) | 187 | #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) |
| 188 | #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3) | 188 | #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) |
| 189 | #ifndef CONFIG_PARAVIRT | 189 | #ifndef CONFIG_PARAVIRT |
| 190 | #define get_kernel_rpl() 0 | 190 | #define get_kernel_rpl() 0 |
| 191 | #endif | 191 | #endif |
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index dfdbf6403895..1a4088dda37a 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
| @@ -99,9 +99,7 @@ void foo(void) | |||
| 99 | 99 | ||
| 100 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); | 100 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); |
| 101 | DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); | 101 | DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); |
| 102 | DEFINE(PTRS_PER_PTE, PTRS_PER_PTE); | 102 | DEFINE(THREAD_SIZE_asm, THREAD_SIZE); |
| 103 | DEFINE(PTRS_PER_PMD, PTRS_PER_PMD); | ||
| 104 | DEFINE(PTRS_PER_PGD, PTRS_PER_PGD); | ||
| 105 | 103 | ||
| 106 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); | 104 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); |
| 107 | 105 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 695f17731e23..d16c2c53d6bf 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -284,9 +284,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
| 284 | /* Don't do the funky fallback heuristics the AMD version employs | 284 | /* Don't do the funky fallback heuristics the AMD version employs |
| 285 | for now. */ | 285 | for now. */ |
| 286 | node = apicid_to_node[apicid]; | 286 | node = apicid_to_node[apicid]; |
| 287 | if (node == NUMA_NO_NODE) | 287 | if (node == NUMA_NO_NODE || !node_online(node)) { |
| 288 | node = first_node(node_online_map); | ||
| 289 | else if (!node_online(node)) { | ||
| 290 | /* reuse the value from init_cpu_to_node() */ | 288 | /* reuse the value from init_cpu_to_node() */ |
| 291 | node = cpu_to_node(cpu); | 289 | node = cpu_to_node(cpu); |
| 292 | } | 290 | } |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 9fb188d7bc76..59e175e89599 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
| @@ -382,20 +382,20 @@ sysenter_past_esp: | |||
| 382 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | 382 | * enough kernel state to call TRACE_IRQS_OFF can be called - but |
| 383 | * we immediately enable interrupts at that point anyway. | 383 | * we immediately enable interrupts at that point anyway. |
| 384 | */ | 384 | */ |
| 385 | pushl_cfi $(__USER_DS) | 385 | pushl_cfi $__USER_DS |
| 386 | /*CFI_REL_OFFSET ss, 0*/ | 386 | /*CFI_REL_OFFSET ss, 0*/ |
| 387 | pushl_cfi %ebp | 387 | pushl_cfi %ebp |
| 388 | CFI_REL_OFFSET esp, 0 | 388 | CFI_REL_OFFSET esp, 0 |
| 389 | pushfl_cfi | 389 | pushfl_cfi |
| 390 | orl $X86_EFLAGS_IF, (%esp) | 390 | orl $X86_EFLAGS_IF, (%esp) |
| 391 | pushl_cfi $(__USER_CS) | 391 | pushl_cfi $__USER_CS |
| 392 | /*CFI_REL_OFFSET cs, 0*/ | 392 | /*CFI_REL_OFFSET cs, 0*/ |
| 393 | /* | 393 | /* |
| 394 | * Push current_thread_info()->sysenter_return to the stack. | 394 | * Push current_thread_info()->sysenter_return to the stack. |
| 395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | 395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
| 396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | 396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
| 397 | */ | 397 | */ |
| 398 | pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | 398 | pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp) |
| 399 | CFI_REL_OFFSET eip, 0 | 399 | CFI_REL_OFFSET eip, 0 |
| 400 | 400 | ||
| 401 | pushl_cfi %eax | 401 | pushl_cfi %eax |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index a7ae7fd1010f..fe2690d71c0c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
| @@ -963,22 +963,10 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ | |||
| 963 | x86_platform_ipi smp_x86_platform_ipi | 963 | x86_platform_ipi smp_x86_platform_ipi |
| 964 | 964 | ||
| 965 | #ifdef CONFIG_SMP | 965 | #ifdef CONFIG_SMP |
| 966 | apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ | 966 | .irpc idx, "01234567" |
| 967 | invalidate_interrupt0 smp_invalidate_interrupt | 967 | apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ |
| 968 | apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ | 968 | invalidate_interrupt\idx smp_invalidate_interrupt |
| 969 | invalidate_interrupt1 smp_invalidate_interrupt | 969 | .endr |
| 970 | apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \ | ||
| 971 | invalidate_interrupt2 smp_invalidate_interrupt | ||
| 972 | apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \ | ||
| 973 | invalidate_interrupt3 smp_invalidate_interrupt | ||
| 974 | apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \ | ||
| 975 | invalidate_interrupt4 smp_invalidate_interrupt | ||
| 976 | apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \ | ||
| 977 | invalidate_interrupt5 smp_invalidate_interrupt | ||
| 978 | apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ | ||
| 979 | invalidate_interrupt6 smp_invalidate_interrupt | ||
| 980 | apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \ | ||
| 981 | invalidate_interrupt7 smp_invalidate_interrupt | ||
| 982 | #endif | 970 | #endif |
| 983 | 971 | ||
| 984 | apicinterrupt THRESHOLD_APIC_VECTOR \ | 972 | apicinterrupt THRESHOLD_APIC_VECTOR \ |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 939b9e98245f..8bbe8c56916d 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
| @@ -344,6 +344,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, | |||
| 344 | vt8237_force_enable_hpet); | 344 | vt8237_force_enable_hpet); |
| 345 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, | 345 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, |
| 346 | vt8237_force_enable_hpet); | 346 | vt8237_force_enable_hpet); |
| 347 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700, | ||
| 348 | vt8237_force_enable_hpet); | ||
| 347 | 349 | ||
| 348 | static void ati_force_hpet_resume(void) | 350 | static void ati_force_hpet_resume(void) |
| 349 | { | 351 | { |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 50ac949c7f1c..20ea20a39e2a 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
| @@ -1001,10 +1001,10 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) | |||
| 1001 | static ssize_t tunables_read(struct file *file, char __user *userbuf, | 1001 | static ssize_t tunables_read(struct file *file, char __user *userbuf, |
| 1002 | size_t count, loff_t *ppos) | 1002 | size_t count, loff_t *ppos) |
| 1003 | { | 1003 | { |
| 1004 | char buf[300]; | 1004 | char *buf; |
| 1005 | int ret; | 1005 | int ret; |
| 1006 | 1006 | ||
| 1007 | ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", | 1007 | buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", |
| 1008 | "max_bau_concurrent plugged_delay plugsb4reset", | 1008 | "max_bau_concurrent plugged_delay plugsb4reset", |
| 1009 | "timeoutsb4reset ipi_reset_limit complete_threshold", | 1009 | "timeoutsb4reset ipi_reset_limit complete_threshold", |
| 1010 | "congested_response_us congested_reps congested_period", | 1010 | "congested_response_us congested_reps congested_period", |
| @@ -1012,7 +1012,12 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf, | |||
| 1012 | timeoutsb4reset, ipi_reset_limit, complete_threshold, | 1012 | timeoutsb4reset, ipi_reset_limit, complete_threshold, |
| 1013 | congested_response_us, congested_reps, congested_period); | 1013 | congested_response_us, congested_reps, congested_period); |
| 1014 | 1014 | ||
| 1015 | return simple_read_from_buffer(userbuf, count, ppos, buf, ret); | 1015 | if (!buf) |
| 1016 | return -ENOMEM; | ||
| 1017 | |||
| 1018 | ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); | ||
| 1019 | kfree(buf); | ||
| 1020 | return ret; | ||
| 1016 | } | 1021 | } |
| 1017 | 1022 | ||
| 1018 | /* | 1023 | /* |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d43968503dd2..cb838ca42c96 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -575,6 +575,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
| 575 | if (regs->flags & X86_VM_MASK) { | 575 | if (regs->flags & X86_VM_MASK) { |
| 576 | handle_vm86_trap((struct kernel_vm86_regs *) regs, | 576 | handle_vm86_trap((struct kernel_vm86_regs *) regs, |
| 577 | error_code, 1); | 577 | error_code, 1); |
| 578 | preempt_conditional_cli(regs); | ||
| 578 | return; | 579 | return; |
| 579 | } | 580 | } |
| 580 | 581 | ||
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 5ffb5622f793..61fb98519622 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
| @@ -551,8 +551,14 @@ cannot_handle: | |||
| 551 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) | 551 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) |
| 552 | { | 552 | { |
| 553 | if (VMPI.is_vm86pus) { | 553 | if (VMPI.is_vm86pus) { |
| 554 | if ((trapno == 3) || (trapno == 1)) | 554 | if ((trapno == 3) || (trapno == 1)) { |
| 555 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | 555 | KVM86->regs32->ax = VM86_TRAP + (trapno << 8); |
| 556 | /* setting this flag forces the code in entry_32.S to | ||
| 557 | call save_v86_state() and change the stack pointer | ||
| 558 | to KVM86->regs32 */ | ||
| 559 | set_thread_flag(TIF_IRET); | ||
| 560 | return 0; | ||
| 561 | } | ||
| 556 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); | 562 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); |
| 557 | return 0; | 563 | return 0; |
| 558 | } | 564 | } |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 414328577ced..01b281646251 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -416,7 +416,6 @@ static inline void __raise_softirq_irqoff(unsigned int nr) | |||
| 416 | 416 | ||
| 417 | extern void raise_softirq_irqoff(unsigned int nr); | 417 | extern void raise_softirq_irqoff(unsigned int nr); |
| 418 | extern void raise_softirq(unsigned int nr); | 418 | extern void raise_softirq(unsigned int nr); |
| 419 | extern void wakeup_softirqd(void); | ||
| 420 | 419 | ||
| 421 | /* This is the worklist that queues up per-cpu softirq work. | 420 | /* This is the worklist that queues up per-cpu softirq work. |
| 422 | * | 421 | * |
diff --git a/kernel/softirq.c b/kernel/softirq.c index fc978889b194..f02a9dfa19bc 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -67,7 +67,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
| 67 | * to the pending events, so lets the scheduler to balance | 67 | * to the pending events, so lets the scheduler to balance |
| 68 | * the softirq load for us. | 68 | * the softirq load for us. |
| 69 | */ | 69 | */ |
| 70 | void wakeup_softirqd(void) | 70 | static void wakeup_softirqd(void) |
| 71 | { | 71 | { |
| 72 | /* Interrupts are disabled: no need to stop preemption */ | 72 | /* Interrupts are disabled: no need to stop preemption */ |
| 73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | 73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); |
