diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-06 16:54:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-06 16:54:09 -0500 |
commit | 3e6bdf473f489664dac4d7511d26c7ac3dfdc748 (patch) | |
tree | 10cb2e928830b9de8bbc3f6dd47c18c24cd2affa /arch/x86/kernel | |
parent | 3d4d4582e5b3f67a68f2cf32fd5b70d8d80f119d (diff) | |
parent | 58d5d0d8dd52cbca988af24b5692a20b00285543 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
x86: fix deadlock, make pgd_lock irq-safe
virtio: fix trivial build bug
x86: fix mttr trimming
x86: delay CPA self-test and repeat it
x86: fix 64-bit sections
generic: add __FINITDATA
x86: remove suprious ifdefs from pageattr.c
x86: mark the .rodata section also NX
x86: fix iret exception recovery on 64-bit
cpuidle: dubious one-bit signed bitfield in cpuidle.h
x86: fix sparse warnings in powernow-k8.c
x86: fix sparse error in traps_32.c
x86: trivial sparse/checkpatch in quirks.c
x86 ptrace: disallow null cs/ss
MAINTAINERS: RDC R-321x SoC maintainer
brk randomization: introduce CONFIG_COMPAT_BRK
brk: check the lower bound properly
x86: remove X2 workaround
x86: make spurious fault handler aware of large mappings
x86: make traps on entry code be debuggable in user space, 64-bit
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 24 | ||||
-rw-r--r-- | arch/x86/kernel/head_64.S | 15 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/quirks.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/test_nx.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/traps_32.c | 15 |
8 files changed, 72 insertions, 55 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index a0522735dd9d..5affe91ca1e5 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -827,7 +827,6 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
827 | 827 | ||
828 | for (i = 0; i < data->acpi_data.state_count; i++) { | 828 | for (i = 0; i < data->acpi_data.state_count; i++) { |
829 | u32 index; | 829 | u32 index; |
830 | u32 hi = 0, lo = 0; | ||
831 | 830 | ||
832 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; | 831 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; |
833 | if (index > data->max_hw_pstate) { | 832 | if (index > data->max_hw_pstate) { |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 1e27b69a7a0e..b6e136f23d3d 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -659,7 +659,7 @@ static __init int amd_special_default_mtrr(void) | |||
659 | */ | 659 | */ |
660 | int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | 660 | int __init mtrr_trim_uncached_memory(unsigned long end_pfn) |
661 | { | 661 | { |
662 | unsigned long i, base, size, highest_addr = 0, def, dummy; | 662 | unsigned long i, base, size, highest_pfn = 0, def, dummy; |
663 | mtrr_type type; | 663 | mtrr_type type; |
664 | u64 trim_start, trim_size; | 664 | u64 trim_start, trim_size; |
665 | 665 | ||
@@ -682,28 +682,27 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
682 | mtrr_if->get(i, &base, &size, &type); | 682 | mtrr_if->get(i, &base, &size, &type); |
683 | if (type != MTRR_TYPE_WRBACK) | 683 | if (type != MTRR_TYPE_WRBACK) |
684 | continue; | 684 | continue; |
685 | base <<= PAGE_SHIFT; | 685 | if (highest_pfn < base + size) |
686 | size <<= PAGE_SHIFT; | 686 | highest_pfn = base + size; |
687 | if (highest_addr < base + size) | ||
688 | highest_addr = base + size; | ||
689 | } | 687 | } |
690 | 688 | ||
691 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 689 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ |
692 | if (!highest_addr) { | 690 | if (!highest_pfn) { |
693 | printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n"); | 691 | printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n"); |
694 | WARN_ON(1); | 692 | WARN_ON(1); |
695 | return 0; | 693 | return 0; |
696 | } | 694 | } |
697 | 695 | ||
698 | if ((highest_addr >> PAGE_SHIFT) < end_pfn) { | 696 | if (highest_pfn < end_pfn) { |
699 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" | 697 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" |
700 | " all of memory, losing %LdMB of RAM.\n", | 698 | " all of memory, losing %luMB of RAM.\n", |
701 | (((u64)end_pfn << PAGE_SHIFT) - highest_addr) >> 20); | 699 | (end_pfn - highest_pfn) >> (20 - PAGE_SHIFT)); |
702 | 700 | ||
703 | WARN_ON(1); | 701 | WARN_ON(1); |
704 | 702 | ||
705 | printk(KERN_INFO "update e820 for mtrr\n"); | 703 | printk(KERN_INFO "update e820 for mtrr\n"); |
706 | trim_start = highest_addr; | 704 | trim_start = highest_pfn; |
705 | trim_start <<= PAGE_SHIFT; | ||
707 | trim_size = end_pfn; | 706 | trim_size = end_pfn; |
708 | trim_size <<= PAGE_SHIFT; | 707 | trim_size <<= PAGE_SHIFT; |
709 | trim_size -= trim_start; | 708 | trim_size -= trim_start; |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index bea8474744ff..c7341e81941c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -582,7 +582,6 @@ retint_restore_args: /* return to kernel space */ | |||
582 | TRACE_IRQS_IRETQ | 582 | TRACE_IRQS_IRETQ |
583 | restore_args: | 583 | restore_args: |
584 | RESTORE_ARGS 0,8,0 | 584 | RESTORE_ARGS 0,8,0 |
585 | iret_label: | ||
586 | #ifdef CONFIG_PARAVIRT | 585 | #ifdef CONFIG_PARAVIRT |
587 | INTERRUPT_RETURN | 586 | INTERRUPT_RETURN |
588 | #endif | 587 | #endif |
@@ -593,13 +592,22 @@ ENTRY(native_iret) | |||
593 | .quad native_iret, bad_iret | 592 | .quad native_iret, bad_iret |
594 | .previous | 593 | .previous |
595 | .section .fixup,"ax" | 594 | .section .fixup,"ax" |
596 | /* force a signal here? this matches i386 behaviour */ | ||
597 | /* running with kernel gs */ | ||
598 | bad_iret: | 595 | bad_iret: |
599 | movq $11,%rdi /* SIGSEGV */ | 596 | /* |
600 | TRACE_IRQS_ON | 597 | * The iret traps when the %cs or %ss being restored is bogus. |
601 | ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) | 598 | * We've lost the original trap vector and error code. |
602 | jmp do_exit | 599 | * #GPF is the most likely one to get for an invalid selector. |
600 | * So pretend we completed the iret and took the #GPF in user mode. | ||
601 | * | ||
602 | * We are now running with the kernel GS after exception recovery. | ||
603 | * But error_entry expects us to have user GS to match the user %cs, | ||
604 | * so swap back. | ||
605 | */ | ||
606 | pushq $0 | ||
607 | |||
608 | SWAPGS | ||
609 | jmp general_protection | ||
610 | |||
603 | .previous | 611 | .previous |
604 | 612 | ||
605 | /* edi: workmask, edx: work */ | 613 | /* edi: workmask, edx: work */ |
@@ -911,7 +919,7 @@ error_kernelspace: | |||
911 | iret run with kernel gs again, so don't set the user space flag. | 919 | iret run with kernel gs again, so don't set the user space flag. |
912 | B stepping K8s sometimes report an truncated RIP for IRET | 920 | B stepping K8s sometimes report an truncated RIP for IRET |
913 | exceptions returning to compat mode. Check for these here too. */ | 921 | exceptions returning to compat mode. Check for these here too. */ |
914 | leaq iret_label(%rip),%rbp | 922 | leaq native_iret(%rip),%rbp |
915 | cmpq %rbp,RIP(%rsp) | 923 | cmpq %rbp,RIP(%rsp) |
916 | je error_swapgs | 924 | je error_swapgs |
917 | movl %ebp,%ebp /* zero extend */ | 925 | movl %ebp,%ebp /* zero extend */ |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 4f283ad215ec..09b38d539b09 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -250,18 +250,13 @@ ENTRY(secondary_startup_64) | |||
250 | lretq | 250 | lretq |
251 | 251 | ||
252 | /* SMP bootup changes these two */ | 252 | /* SMP bootup changes these two */ |
253 | #ifndef CONFIG_HOTPLUG_CPU | 253 | __CPUINITDATA |
254 | .pushsection .init.data | ||
255 | #endif | ||
256 | .align 8 | 254 | .align 8 |
257 | .globl initial_code | 255 | ENTRY(initial_code) |
258 | initial_code: | ||
259 | .quad x86_64_start_kernel | 256 | .quad x86_64_start_kernel |
260 | #ifndef CONFIG_HOTPLUG_CPU | 257 | __FINITDATA |
261 | .popsection | 258 | |
262 | #endif | 259 | ENTRY(init_rsp) |
263 | .globl init_rsp | ||
264 | init_rsp: | ||
265 | .quad init_thread_union+THREAD_SIZE-8 | 260 | .quad init_thread_union+THREAD_SIZE-8 |
266 | 261 | ||
267 | bad_address: | 262 | bad_address: |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 96286df1bb81..702c33efea84 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -103,9 +103,26 @@ static int set_segment_reg(struct task_struct *task, | |||
103 | if (invalid_selector(value)) | 103 | if (invalid_selector(value)) |
104 | return -EIO; | 104 | return -EIO; |
105 | 105 | ||
106 | if (offset != offsetof(struct user_regs_struct, gs)) | 106 | /* |
107 | * For %cs and %ss we cannot permit a null selector. | ||
108 | * We can permit a bogus selector as long as it has USER_RPL. | ||
109 | * Null selectors are fine for other segment registers, but | ||
110 | * we will never get back to user mode with invalid %cs or %ss | ||
111 | * and will take the trap in iret instead. Much code relies | ||
112 | * on user_mode() to distinguish a user trap frame (which can | ||
113 | * safely use invalid selectors) from a kernel trap frame. | ||
114 | */ | ||
115 | switch (offset) { | ||
116 | case offsetof(struct user_regs_struct, cs): | ||
117 | case offsetof(struct user_regs_struct, ss): | ||
118 | if (unlikely(value == 0)) | ||
119 | return -EIO; | ||
120 | |||
121 | default: | ||
107 | *pt_regs_access(task_pt_regs(task), offset) = value; | 122 | *pt_regs_access(task_pt_regs(task), offset) = value; |
108 | else { | 123 | break; |
124 | |||
125 | case offsetof(struct user_regs_struct, gs): | ||
109 | task->thread.gs = value; | 126 | task->thread.gs = value; |
110 | if (task == current) | 127 | if (task == current) |
111 | /* | 128 | /* |
@@ -227,12 +244,16 @@ static int set_segment_reg(struct task_struct *task, | |||
227 | * Can't actually change these in 64-bit mode. | 244 | * Can't actually change these in 64-bit mode. |
228 | */ | 245 | */ |
229 | case offsetof(struct user_regs_struct,cs): | 246 | case offsetof(struct user_regs_struct,cs): |
247 | if (unlikely(value == 0)) | ||
248 | return -EIO; | ||
230 | #ifdef CONFIG_IA32_EMULATION | 249 | #ifdef CONFIG_IA32_EMULATION |
231 | if (test_tsk_thread_flag(task, TIF_IA32)) | 250 | if (test_tsk_thread_flag(task, TIF_IA32)) |
232 | task_pt_regs(task)->cs = value; | 251 | task_pt_regs(task)->cs = value; |
233 | #endif | 252 | #endif |
234 | break; | 253 | break; |
235 | case offsetof(struct user_regs_struct,ss): | 254 | case offsetof(struct user_regs_struct,ss): |
255 | if (unlikely(value == 0)) | ||
256 | return -EIO; | ||
236 | #ifdef CONFIG_IA32_EMULATION | 257 | #ifdef CONFIG_IA32_EMULATION |
237 | if (test_tsk_thread_flag(task, TIF_IA32)) | 258 | if (test_tsk_thread_flag(task, TIF_IA32)) |
238 | task_pt_regs(task)->ss = value; | 259 | task_pt_regs(task)->ss = value; |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 3cd7a2dcd4fe..6ba33ca8715a 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -380,19 +380,19 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, | |||
380 | void force_hpet_resume(void) | 380 | void force_hpet_resume(void) |
381 | { | 381 | { |
382 | switch (force_hpet_resume_type) { | 382 | switch (force_hpet_resume_type) { |
383 | case ICH_FORCE_HPET_RESUME: | 383 | case ICH_FORCE_HPET_RESUME: |
384 | return ich_force_hpet_resume(); | 384 | ich_force_hpet_resume(); |
385 | 385 | return; | |
386 | case OLD_ICH_FORCE_HPET_RESUME: | 386 | case OLD_ICH_FORCE_HPET_RESUME: |
387 | return old_ich_force_hpet_resume(); | 387 | old_ich_force_hpet_resume(); |
388 | 388 | return; | |
389 | case VT8237_FORCE_HPET_RESUME: | 389 | case VT8237_FORCE_HPET_RESUME: |
390 | return vt8237_force_hpet_resume(); | 390 | vt8237_force_hpet_resume(); |
391 | 391 | return; | |
392 | case NVIDIA_FORCE_HPET_RESUME: | 392 | case NVIDIA_FORCE_HPET_RESUME: |
393 | return nvidia_force_hpet_resume(); | 393 | nvidia_force_hpet_resume(); |
394 | 394 | return; | |
395 | default: | 395 | default: |
396 | break; | 396 | break; |
397 | } | 397 | } |
398 | } | 398 | } |
diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c index 36c100c323aa..10b8a6f69f84 100644 --- a/arch/x86/kernel/test_nx.c +++ b/arch/x86/kernel/test_nx.c | |||
@@ -139,7 +139,6 @@ static int test_NX(void) | |||
139 | * Until then, don't run them to avoid too many people getting scared | 139 | * Until then, don't run them to avoid too many people getting scared |
140 | * by the error message | 140 | * by the error message |
141 | */ | 141 | */ |
142 | #if 0 | ||
143 | 142 | ||
144 | #ifdef CONFIG_DEBUG_RODATA | 143 | #ifdef CONFIG_DEBUG_RODATA |
145 | /* Test 3: Check if the .rodata section is executable */ | 144 | /* Test 3: Check if the .rodata section is executable */ |
@@ -152,6 +151,7 @@ static int test_NX(void) | |||
152 | } | 151 | } |
153 | #endif | 152 | #endif |
154 | 153 | ||
154 | #if 0 | ||
155 | /* Test 4: Check if the .data section of a module is executable */ | 155 | /* Test 4: Check if the .data section of a module is executable */ |
156 | if (test_address(&test_data)) { | 156 | if (test_address(&test_data)) { |
157 | printk(KERN_ERR "test_nx: .data section is executable\n"); | 157 | printk(KERN_ERR "test_nx: .data section is executable\n"); |
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 3cf72977d012..b22c01e05a18 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -1176,17 +1176,12 @@ void __init trap_init(void) | |||
1176 | #endif | 1176 | #endif |
1177 | set_trap_gate(19,&simd_coprocessor_error); | 1177 | set_trap_gate(19,&simd_coprocessor_error); |
1178 | 1178 | ||
1179 | /* | ||
1180 | * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. | ||
1181 | * Generate a build-time error if the alignment is wrong. | ||
1182 | */ | ||
1183 | BUILD_BUG_ON(offsetof(struct task_struct, thread.i387.fxsave) & 15); | ||
1179 | if (cpu_has_fxsr) { | 1184 | if (cpu_has_fxsr) { |
1180 | /* | ||
1181 | * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. | ||
1182 | * Generates a compile-time "error: zero width for bit-field" if | ||
1183 | * the alignment is wrong. | ||
1184 | */ | ||
1185 | struct fxsrAlignAssert { | ||
1186 | int _:!(offsetof(struct task_struct, | ||
1187 | thread.i387.fxsave) & 15); | ||
1188 | }; | ||
1189 | |||
1190 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | 1185 | printk(KERN_INFO "Enabling fast FPU save and restore... "); |
1191 | set_in_cr4(X86_CR4_OSFXSR); | 1186 | set_in_cr4(X86_CR4_OSFXSR); |
1192 | printk("done.\n"); | 1187 | printk("done.\n"); |