diff options
| author | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-28 08:29:59 -0400 |
|---|---|---|
| committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-28 08:29:59 -0400 |
| commit | 185a257f2f73bcd89050ad02da5bedbc28fc43fa (patch) | |
| tree | 5e32586114534ed3f2165614cba3d578f5d87307 /arch/i386 | |
| parent | 3f1a9aaeffd8d1cbc5ab9776c45cbd66af1c9699 (diff) | |
| parent | a77c64c1a641950626181b4857abb701d8f38ccc (diff) | |
Merge branch 'master' into gfs2
Diffstat (limited to 'arch/i386')
82 files changed, 3966 insertions, 2574 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index b2751eadbc56..3fd2f256f2be 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
| @@ -166,7 +166,6 @@ config X86_VISWS | |||
| 166 | 166 | ||
| 167 | config X86_GENERICARCH | 167 | config X86_GENERICARCH |
| 168 | bool "Generic architecture (Summit, bigsmp, ES7000, default)" | 168 | bool "Generic architecture (Summit, bigsmp, ES7000, default)" |
| 169 | depends on SMP | ||
| 170 | help | 169 | help |
| 171 | This option compiles in the Summit, bigsmp, ES7000, default subarchitectures. | 170 | This option compiles in the Summit, bigsmp, ES7000, default subarchitectures. |
| 172 | It is intended for a generic binary kernel. | 171 | It is intended for a generic binary kernel. |
| @@ -263,7 +262,7 @@ source "kernel/Kconfig.preempt" | |||
| 263 | 262 | ||
| 264 | config X86_UP_APIC | 263 | config X86_UP_APIC |
| 265 | bool "Local APIC support on uniprocessors" | 264 | bool "Local APIC support on uniprocessors" |
| 266 | depends on !SMP && !(X86_VISWS || X86_VOYAGER) | 265 | depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH) |
| 267 | help | 266 | help |
| 268 | A local APIC (Advanced Programmable Interrupt Controller) is an | 267 | A local APIC (Advanced Programmable Interrupt Controller) is an |
| 269 | integrated interrupt controller in the CPU. If you have a single-CPU | 268 | integrated interrupt controller in the CPU. If you have a single-CPU |
| @@ -288,12 +287,12 @@ config X86_UP_IOAPIC | |||
| 288 | 287 | ||
| 289 | config X86_LOCAL_APIC | 288 | config X86_LOCAL_APIC |
| 290 | bool | 289 | bool |
| 291 | depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) | 290 | depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) || X86_GENERICARCH |
| 292 | default y | 291 | default y |
| 293 | 292 | ||
| 294 | config X86_IO_APIC | 293 | config X86_IO_APIC |
| 295 | bool | 294 | bool |
| 296 | depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) | 295 | depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) || X86_GENERICARCH |
| 297 | default y | 296 | default y |
| 298 | 297 | ||
| 299 | config X86_VISWS_APIC | 298 | config X86_VISWS_APIC |
| @@ -402,6 +401,7 @@ config X86_REBOOTFIXUPS | |||
| 402 | 401 | ||
| 403 | config MICROCODE | 402 | config MICROCODE |
| 404 | tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support" | 403 | tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support" |
| 404 | select FW_LOADER | ||
| 405 | ---help--- | 405 | ---help--- |
| 406 | If you say Y here and also to "/dev file system support" in the | 406 | If you say Y here and also to "/dev file system support" in the |
| 407 | 'File systems' section, you will be able to update the microcode on | 407 | 'File systems' section, you will be able to update the microcode on |
| @@ -417,6 +417,11 @@ config MICROCODE | |||
| 417 | To compile this driver as a module, choose M here: the | 417 | To compile this driver as a module, choose M here: the |
| 418 | module will be called microcode. | 418 | module will be called microcode. |
| 419 | 419 | ||
| 420 | config MICROCODE_OLD_INTERFACE | ||
| 421 | bool | ||
| 422 | depends on MICROCODE | ||
| 423 | default y | ||
| 424 | |||
| 420 | config X86_MSR | 425 | config X86_MSR |
| 421 | tristate "/dev/cpu/*/msr - Model-specific register support" | 426 | tristate "/dev/cpu/*/msr - Model-specific register support" |
| 422 | help | 427 | help |
| @@ -494,7 +499,7 @@ config HIGHMEM64G | |||
| 494 | endchoice | 499 | endchoice |
| 495 | 500 | ||
| 496 | choice | 501 | choice |
| 497 | depends on EXPERIMENTAL && !X86_PAE | 502 | depends on EXPERIMENTAL |
| 498 | prompt "Memory split" if EMBEDDED | 503 | prompt "Memory split" if EMBEDDED |
| 499 | default VMSPLIT_3G | 504 | default VMSPLIT_3G |
| 500 | help | 505 | help |
| @@ -516,6 +521,7 @@ choice | |||
| 516 | config VMSPLIT_3G | 521 | config VMSPLIT_3G |
| 517 | bool "3G/1G user/kernel split" | 522 | bool "3G/1G user/kernel split" |
| 518 | config VMSPLIT_3G_OPT | 523 | config VMSPLIT_3G_OPT |
| 524 | depends on !HIGHMEM | ||
| 519 | bool "3G/1G user/kernel split (for full 1G low memory)" | 525 | bool "3G/1G user/kernel split (for full 1G low memory)" |
| 520 | config VMSPLIT_2G | 526 | config VMSPLIT_2G |
| 521 | bool "2G/2G user/kernel split" | 527 | bool "2G/2G user/kernel split" |
| @@ -598,12 +604,10 @@ config ARCH_SELECT_MEMORY_MODEL | |||
| 598 | def_bool y | 604 | def_bool y |
| 599 | depends on ARCH_SPARSEMEM_ENABLE | 605 | depends on ARCH_SPARSEMEM_ENABLE |
| 600 | 606 | ||
| 601 | source "mm/Kconfig" | 607 | config ARCH_POPULATES_NODE_MAP |
| 608 | def_bool y | ||
| 602 | 609 | ||
| 603 | config HAVE_ARCH_EARLY_PFN_TO_NID | 610 | source "mm/Kconfig" |
| 604 | bool | ||
| 605 | default y | ||
| 606 | depends on NUMA | ||
| 607 | 611 | ||
| 608 | config HIGHPTE | 612 | config HIGHPTE |
| 609 | bool "Allocate 3rd-level pagetables from highmem" | 613 | bool "Allocate 3rd-level pagetables from highmem" |
| @@ -740,8 +744,7 @@ config SECCOMP | |||
| 740 | source kernel/Kconfig.hz | 744 | source kernel/Kconfig.hz |
| 741 | 745 | ||
| 742 | config KEXEC | 746 | config KEXEC |
| 743 | bool "kexec system call (EXPERIMENTAL)" | 747 | bool "kexec system call" |
| 744 | depends on EXPERIMENTAL | ||
| 745 | help | 748 | help |
| 746 | kexec is a system call that implements the ability to shutdown your | 749 | kexec is a system call that implements the ability to shutdown your |
| 747 | current kernel, and to start another kernel. It is like a reboot | 750 | current kernel, and to start another kernel. It is like a reboot |
| @@ -762,6 +765,13 @@ config CRASH_DUMP | |||
| 762 | depends on HIGHMEM | 765 | depends on HIGHMEM |
| 763 | help | 766 | help |
| 764 | Generate crash dump after being started by kexec. | 767 | Generate crash dump after being started by kexec. |
| 768 | This should be normally only set in special crash dump kernels | ||
| 769 | which are loaded in the main kernel with kexec-tools into | ||
| 770 | a specially reserved region and then later executed after | ||
| 771 | a crash by kdump/kexec. The crash dump kernel must be compiled | ||
| 772 | to a memory address not used by the main kernel or BIOS using | ||
| 773 | PHYSICAL_START. | ||
| 774 | For more details see Documentation/kdump/kdump.txt | ||
| 765 | 775 | ||
| 766 | config PHYSICAL_START | 776 | config PHYSICAL_START |
| 767 | hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) | 777 | hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) |
| @@ -794,6 +804,7 @@ config HOTPLUG_CPU | |||
| 794 | config COMPAT_VDSO | 804 | config COMPAT_VDSO |
| 795 | bool "Compat VDSO support" | 805 | bool "Compat VDSO support" |
| 796 | default y | 806 | default y |
| 807 | depends on !PARAVIRT | ||
| 797 | help | 808 | help |
| 798 | Map the VDSO to the predictable old-style address too. | 809 | Map the VDSO to the predictable old-style address too. |
| 799 | ---help--- | 810 | ---help--- |
diff --git a/arch/i386/Makefile b/arch/i386/Makefile index 3e4adb1e2244..7cc0b189b82b 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile | |||
| @@ -46,6 +46,14 @@ cflags-y += -ffreestanding | |||
| 46 | # a lot more stack due to the lack of sharing of stacklots: | 46 | # a lot more stack due to the lack of sharing of stacklots: |
| 47 | CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;) | 47 | CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;) |
| 48 | 48 | ||
| 49 | # do binutils support CFI? | ||
| 50 | cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | ||
| 51 | AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | ||
| 52 | |||
| 53 | # is .cfi_signal_frame supported too? | ||
| 54 | cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | ||
| 55 | AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | ||
| 56 | |||
| 49 | CFLAGS += $(cflags-y) | 57 | CFLAGS += $(cflags-y) |
| 50 | 58 | ||
| 51 | # Default subarch .c files | 59 | # Default subarch .c files |
diff --git a/arch/i386/boot/edd.S b/arch/i386/boot/edd.S index 4b84ea216f2b..34321368011a 100644 --- a/arch/i386/boot/edd.S +++ b/arch/i386/boot/edd.S | |||
| @@ -15,42 +15,95 @@ | |||
| 15 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
| 16 | 16 | ||
| 17 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) | 17 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) |
| 18 | |||
| 19 | # It is assumed that %ds == INITSEG here | ||
| 20 | |||
| 18 | movb $0, (EDD_MBR_SIG_NR_BUF) | 21 | movb $0, (EDD_MBR_SIG_NR_BUF) |
| 19 | movb $0, (EDDNR) | 22 | movb $0, (EDDNR) |
| 20 | 23 | ||
| 21 | # Check the command line for two options: | 24 | # Check the command line for options: |
| 22 | # edd=of disables EDD completely (edd=off) | 25 | # edd=of disables EDD completely (edd=off) |
| 23 | # edd=sk skips the MBR test (edd=skipmbr) | 26 | # edd=sk skips the MBR test (edd=skipmbr) |
| 27 | # edd=on re-enables EDD (edd=on) | ||
| 28 | |||
| 24 | pushl %esi | 29 | pushl %esi |
| 25 | cmpl $0, %cs:cmd_line_ptr | 30 | movw $edd_mbr_sig_start, %di # Default to edd=on |
| 26 | jz done_cl | 31 | |
| 27 | movl %cs:(cmd_line_ptr), %esi | 32 | movl %cs:(cmd_line_ptr), %esi |
| 28 | # ds:esi has the pointer to the command line now | 33 | andl %esi, %esi |
| 29 | movl $(COMMAND_LINE_SIZE-7), %ecx | 34 | jz old_cl # Old boot protocol? |
| 30 | # loop through kernel command line one byte at a time | 35 | |
| 31 | cl_loop: | 36 | # Convert to a real-mode pointer in fs:si |
| 32 | cmpl $EDD_CL_EQUALS, (%si) | 37 | movl %esi, %eax |
| 38 | shrl $4, %eax | ||
| 39 | movw %ax, %fs | ||
| 40 | andw $0xf, %si | ||
| 41 | jmp have_cl_pointer | ||
| 42 | |||
| 43 | # Old-style boot protocol? | ||
| 44 | old_cl: | ||
| 45 | push %ds # aka INITSEG | ||
| 46 | pop %fs | ||
| 47 | |||
| 48 | cmpw $0xa33f, (0x20) | ||
| 49 | jne done_cl # No command line at all? | ||
| 50 | movw (0x22), %si # Pointer relative to INITSEG | ||
| 51 | |||
| 52 | # fs:si has the pointer to the command line now | ||
| 53 | have_cl_pointer: | ||
| 54 | |||
| 55 | # Loop through kernel command line one byte at a time. Just in | ||
| 56 | # case the loader is buggy and failed to null-terminate the command line | ||
| 57 | # terminate if we get close enough to the end of the segment that we | ||
| 58 | # cannot fit "edd=XX"... | ||
| 59 | cl_atspace: | ||
| 60 | cmpw $-5, %si # Watch for segment wraparound | ||
| 61 | jae done_cl | ||
| 62 | movl %fs:(%si), %eax | ||
| 63 | andb %al, %al # End of line? | ||
| 64 | jz done_cl | ||
| 65 | cmpl $EDD_CL_EQUALS, %eax | ||
| 33 | jz found_edd_equals | 66 | jz found_edd_equals |
| 34 | incl %esi | 67 | cmpb $0x20, %al # <= space consider whitespace |
| 35 | loop cl_loop | 68 | ja cl_skipword |
| 36 | jmp done_cl | 69 | incw %si |
| 70 | jmp cl_atspace | ||
| 71 | |||
| 72 | cl_skipword: | ||
| 73 | cmpw $-5, %si # Watch for segment wraparound | ||
| 74 | jae done_cl | ||
| 75 | movb %fs:(%si), %al # End of string? | ||
| 76 | andb %al, %al | ||
| 77 | jz done_cl | ||
| 78 | cmpb $0x20, %al | ||
| 79 | jbe cl_atspace | ||
| 80 | incw %si | ||
| 81 | jmp cl_skipword | ||
| 82 | |||
| 37 | found_edd_equals: | 83 | found_edd_equals: |
| 38 | # only looking at first two characters after equals | 84 | # only looking at first two characters after equals |
| 39 | addl $4, %esi | 85 | # late overrides early on the command line, so keep going after finding something |
| 40 | cmpw $EDD_CL_OFF, (%si) # edd=of | 86 | movw %fs:4(%si), %ax |
| 41 | jz do_edd_off | 87 | cmpw $EDD_CL_OFF, %ax # edd=of |
| 42 | cmpw $EDD_CL_SKIP, (%si) # edd=sk | 88 | je do_edd_off |
| 43 | jz do_edd_skipmbr | 89 | cmpw $EDD_CL_SKIP, %ax # edd=sk |
| 44 | jmp done_cl | 90 | je do_edd_skipmbr |
| 91 | cmpw $EDD_CL_ON, %ax # edd=on | ||
| 92 | je do_edd_on | ||
| 93 | jmp cl_skipword | ||
| 45 | do_edd_skipmbr: | 94 | do_edd_skipmbr: |
| 46 | popl %esi | 95 | movw $edd_start, %di |
| 47 | jmp edd_start | 96 | jmp cl_skipword |
| 48 | do_edd_off: | 97 | do_edd_off: |
| 49 | popl %esi | 98 | movw $edd_done, %di |
| 50 | jmp edd_done | 99 | jmp cl_skipword |
| 100 | do_edd_on: | ||
| 101 | movw $edd_mbr_sig_start, %di | ||
| 102 | jmp cl_skipword | ||
| 103 | |||
| 51 | done_cl: | 104 | done_cl: |
| 52 | popl %esi | 105 | popl %esi |
| 53 | 106 | jmpw *%di | |
| 54 | 107 | ||
| 55 | # Read the first sector of each BIOS disk device and store the 4-byte signature | 108 | # Read the first sector of each BIOS disk device and store the 4-byte signature |
| 56 | edd_mbr_sig_start: | 109 | edd_mbr_sig_start: |
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S index d2b684cd620a..3aec4538a113 100644 --- a/arch/i386/boot/setup.S +++ b/arch/i386/boot/setup.S | |||
| @@ -494,12 +494,12 @@ no_voyager: | |||
| 494 | movw %cs, %ax # aka SETUPSEG | 494 | movw %cs, %ax # aka SETUPSEG |
| 495 | subw $DELTA_INITSEG, %ax # aka INITSEG | 495 | subw $DELTA_INITSEG, %ax # aka INITSEG |
| 496 | movw %ax, %ds | 496 | movw %ax, %ds |
| 497 | movw $0, (0x1ff) # default is no pointing device | 497 | movb $0, (0x1ff) # default is no pointing device |
| 498 | int $0x11 # int 0x11: equipment list | 498 | int $0x11 # int 0x11: equipment list |
| 499 | testb $0x04, %al # check if mouse installed | 499 | testb $0x04, %al # check if mouse installed |
| 500 | jz no_psmouse | 500 | jz no_psmouse |
| 501 | 501 | ||
| 502 | movw $0xAA, (0x1ff) # device present | 502 | movb $0xAA, (0x1ff) # device present |
| 503 | no_psmouse: | 503 | no_psmouse: |
| 504 | 504 | ||
| 505 | #if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) | 505 | #if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) |
diff --git a/arch/i386/defconfig b/arch/i386/defconfig index 89ebb7a316ab..1a29bfa26d0c 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig | |||
| @@ -1,41 +1,51 @@ | |||
| 1 | # | 1 | # |
| 2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
| 3 | # Linux kernel version: 2.6.18-git5 | ||
| 4 | # Tue Sep 26 09:30:47 2006 | ||
| 3 | # | 5 | # |
| 4 | CONFIG_X86_32=y | 6 | CONFIG_X86_32=y |
| 7 | CONFIG_GENERIC_TIME=y | ||
| 8 | CONFIG_LOCKDEP_SUPPORT=y | ||
| 9 | CONFIG_STACKTRACE_SUPPORT=y | ||
| 5 | CONFIG_SEMAPHORE_SLEEPERS=y | 10 | CONFIG_SEMAPHORE_SLEEPERS=y |
| 6 | CONFIG_X86=y | 11 | CONFIG_X86=y |
| 7 | CONFIG_MMU=y | 12 | CONFIG_MMU=y |
| 8 | CONFIG_GENERIC_ISA_DMA=y | 13 | CONFIG_GENERIC_ISA_DMA=y |
| 9 | CONFIG_GENERIC_IOMAP=y | 14 | CONFIG_GENERIC_IOMAP=y |
| 15 | CONFIG_GENERIC_HWEIGHT=y | ||
| 10 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | 16 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y |
| 11 | CONFIG_DMI=y | 17 | CONFIG_DMI=y |
| 18 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
| 12 | 19 | ||
| 13 | # | 20 | # |
| 14 | # Code maturity level options | 21 | # Code maturity level options |
| 15 | # | 22 | # |
| 16 | CONFIG_EXPERIMENTAL=y | 23 | CONFIG_EXPERIMENTAL=y |
| 17 | CONFIG_BROKEN_ON_SMP=y | 24 | CONFIG_LOCK_KERNEL=y |
| 18 | CONFIG_INIT_ENV_ARG_LIMIT=32 | 25 | CONFIG_INIT_ENV_ARG_LIMIT=32 |
| 19 | 26 | ||
| 20 | # | 27 | # |
| 21 | # General setup | 28 | # General setup |
| 22 | # | 29 | # |
| 23 | CONFIG_LOCALVERSION="" | 30 | CONFIG_LOCALVERSION="" |
| 24 | # CONFIG_LOCALVERSION_AUTO is not set | 31 | CONFIG_LOCALVERSION_AUTO=y |
| 25 | CONFIG_SWAP=y | 32 | CONFIG_SWAP=y |
| 26 | CONFIG_SYSVIPC=y | 33 | CONFIG_SYSVIPC=y |
| 27 | # CONFIG_POSIX_MQUEUE is not set | 34 | CONFIG_POSIX_MQUEUE=y |
| 28 | # CONFIG_BSD_PROCESS_ACCT is not set | 35 | # CONFIG_BSD_PROCESS_ACCT is not set |
| 29 | CONFIG_SYSCTL=y | 36 | # CONFIG_TASKSTATS is not set |
| 30 | # CONFIG_AUDIT is not set | 37 | # CONFIG_AUDIT is not set |
| 31 | CONFIG_IKCONFIG=y | 38 | CONFIG_IKCONFIG=y |
| 32 | CONFIG_IKCONFIG_PROC=y | 39 | CONFIG_IKCONFIG_PROC=y |
| 40 | # CONFIG_CPUSETS is not set | ||
| 41 | # CONFIG_RELAY is not set | ||
| 33 | CONFIG_INITRAMFS_SOURCE="" | 42 | CONFIG_INITRAMFS_SOURCE="" |
| 34 | CONFIG_UID16=y | ||
| 35 | CONFIG_VM86=y | ||
| 36 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 43 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
| 37 | # CONFIG_EMBEDDED is not set | 44 | # CONFIG_EMBEDDED is not set |
| 45 | CONFIG_UID16=y | ||
| 46 | CONFIG_SYSCTL=y | ||
| 38 | CONFIG_KALLSYMS=y | 47 | CONFIG_KALLSYMS=y |
| 48 | CONFIG_KALLSYMS_ALL=y | ||
| 39 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 49 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
| 40 | CONFIG_HOTPLUG=y | 50 | CONFIG_HOTPLUG=y |
| 41 | CONFIG_PRINTK=y | 51 | CONFIG_PRINTK=y |
| @@ -45,11 +55,9 @@ CONFIG_BASE_FULL=y | |||
| 45 | CONFIG_FUTEX=y | 55 | CONFIG_FUTEX=y |
| 46 | CONFIG_EPOLL=y | 56 | CONFIG_EPOLL=y |
| 47 | CONFIG_SHMEM=y | 57 | CONFIG_SHMEM=y |
| 48 | CONFIG_CC_ALIGN_FUNCTIONS=0 | ||
| 49 | CONFIG_CC_ALIGN_LABELS=0 | ||
| 50 | CONFIG_CC_ALIGN_LOOPS=0 | ||
| 51 | CONFIG_CC_ALIGN_JUMPS=0 | ||
| 52 | CONFIG_SLAB=y | 58 | CONFIG_SLAB=y |
| 59 | CONFIG_VM_EVENT_COUNTERS=y | ||
| 60 | CONFIG_RT_MUTEXES=y | ||
| 53 | # CONFIG_TINY_SHMEM is not set | 61 | # CONFIG_TINY_SHMEM is not set |
| 54 | CONFIG_BASE_SMALL=0 | 62 | CONFIG_BASE_SMALL=0 |
| 55 | # CONFIG_SLOB is not set | 63 | # CONFIG_SLOB is not set |
| @@ -60,41 +68,45 @@ CONFIG_BASE_SMALL=0 | |||
| 60 | CONFIG_MODULES=y | 68 | CONFIG_MODULES=y |
| 61 | CONFIG_MODULE_UNLOAD=y | 69 | CONFIG_MODULE_UNLOAD=y |
| 62 | CONFIG_MODULE_FORCE_UNLOAD=y | 70 | CONFIG_MODULE_FORCE_UNLOAD=y |
| 63 | CONFIG_OBSOLETE_MODPARM=y | ||
| 64 | # CONFIG_MODVERSIONS is not set | 71 | # CONFIG_MODVERSIONS is not set |
| 65 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 72 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
| 66 | # CONFIG_KMOD is not set | 73 | # CONFIG_KMOD is not set |
| 74 | CONFIG_STOP_MACHINE=y | ||
| 67 | 75 | ||
| 68 | # | 76 | # |
| 69 | # Block layer | 77 | # Block layer |
| 70 | # | 78 | # |
| 71 | # CONFIG_LBD is not set | 79 | CONFIG_LBD=y |
| 80 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
| 81 | # CONFIG_LSF is not set | ||
| 72 | 82 | ||
| 73 | # | 83 | # |
| 74 | # IO Schedulers | 84 | # IO Schedulers |
| 75 | # | 85 | # |
| 76 | CONFIG_IOSCHED_NOOP=y | 86 | CONFIG_IOSCHED_NOOP=y |
| 77 | # CONFIG_IOSCHED_AS is not set | 87 | CONFIG_IOSCHED_AS=y |
| 78 | # CONFIG_IOSCHED_DEADLINE is not set | 88 | CONFIG_IOSCHED_DEADLINE=y |
| 79 | CONFIG_IOSCHED_CFQ=y | 89 | CONFIG_IOSCHED_CFQ=y |
| 80 | # CONFIG_DEFAULT_AS is not set | 90 | CONFIG_DEFAULT_AS=y |
| 81 | # CONFIG_DEFAULT_DEADLINE is not set | 91 | # CONFIG_DEFAULT_DEADLINE is not set |
| 82 | CONFIG_DEFAULT_CFQ=y | 92 | # CONFIG_DEFAULT_CFQ is not set |
| 83 | # CONFIG_DEFAULT_NOOP is not set | 93 | # CONFIG_DEFAULT_NOOP is not set |
| 84 | CONFIG_DEFAULT_IOSCHED="cfq" | 94 | CONFIG_DEFAULT_IOSCHED="anticipatory" |
| 85 | 95 | ||
| 86 | # | 96 | # |
| 87 | # Processor type and features | 97 | # Processor type and features |
| 88 | # | 98 | # |
| 89 | CONFIG_X86_PC=y | 99 | CONFIG_SMP=y |
| 100 | # CONFIG_X86_PC is not set | ||
| 90 | # CONFIG_X86_ELAN is not set | 101 | # CONFIG_X86_ELAN is not set |
| 91 | # CONFIG_X86_VOYAGER is not set | 102 | # CONFIG_X86_VOYAGER is not set |
| 92 | # CONFIG_X86_NUMAQ is not set | 103 | # CONFIG_X86_NUMAQ is not set |
| 93 | # CONFIG_X86_SUMMIT is not set | 104 | # CONFIG_X86_SUMMIT is not set |
| 94 | # CONFIG_X86_BIGSMP is not set | 105 | # CONFIG_X86_BIGSMP is not set |
| 95 | # CONFIG_X86_VISWS is not set | 106 | # CONFIG_X86_VISWS is not set |
| 96 | # CONFIG_X86_GENERICARCH is not set | 107 | CONFIG_X86_GENERICARCH=y |
| 97 | # CONFIG_X86_ES7000 is not set | 108 | # CONFIG_X86_ES7000 is not set |
| 109 | CONFIG_X86_CYCLONE_TIMER=y | ||
| 98 | # CONFIG_M386 is not set | 110 | # CONFIG_M386 is not set |
| 99 | # CONFIG_M486 is not set | 111 | # CONFIG_M486 is not set |
| 100 | # CONFIG_M586 is not set | 112 | # CONFIG_M586 is not set |
| @@ -102,11 +114,11 @@ CONFIG_X86_PC=y | |||
| 102 | # CONFIG_M586MMX is not set | 114 | # CONFIG_M586MMX is not set |
| 103 | # CONFIG_M686 is not set | 115 | # CONFIG_M686 is not set |
| 104 | # CONFIG_MPENTIUMII is not set | 116 | # CONFIG_MPENTIUMII is not set |
| 105 | # CONFIG_MPENTIUMIII is not set | 117 | CONFIG_MPENTIUMIII=y |
| 106 | # CONFIG_MPENTIUMM is not set | 118 | # CONFIG_MPENTIUMM is not set |
| 107 | # CONFIG_MPENTIUM4 is not set | 119 | # CONFIG_MPENTIUM4 is not set |
| 108 | # CONFIG_MK6 is not set | 120 | # CONFIG_MK6 is not set |
| 109 | CONFIG_MK7=y | 121 | # CONFIG_MK7 is not set |
| 110 | # CONFIG_MK8 is not set | 122 | # CONFIG_MK8 is not set |
| 111 | # CONFIG_MCRUSOE is not set | 123 | # CONFIG_MCRUSOE is not set |
| 112 | # CONFIG_MEFFICEON is not set | 124 | # CONFIG_MEFFICEON is not set |
| @@ -117,10 +129,10 @@ CONFIG_MK7=y | |||
| 117 | # CONFIG_MGEODE_LX is not set | 129 | # CONFIG_MGEODE_LX is not set |
| 118 | # CONFIG_MCYRIXIII is not set | 130 | # CONFIG_MCYRIXIII is not set |
| 119 | # CONFIG_MVIAC3_2 is not set | 131 | # CONFIG_MVIAC3_2 is not set |
| 120 | # CONFIG_X86_GENERIC is not set | 132 | CONFIG_X86_GENERIC=y |
| 121 | CONFIG_X86_CMPXCHG=y | 133 | CONFIG_X86_CMPXCHG=y |
| 122 | CONFIG_X86_XADD=y | 134 | CONFIG_X86_XADD=y |
| 123 | CONFIG_X86_L1_CACHE_SHIFT=6 | 135 | CONFIG_X86_L1_CACHE_SHIFT=7 |
| 124 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 136 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
| 125 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 137 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
| 126 | CONFIG_X86_WP_WORKS_OK=y | 138 | CONFIG_X86_WP_WORKS_OK=y |
| @@ -131,26 +143,28 @@ CONFIG_X86_CMPXCHG64=y | |||
| 131 | CONFIG_X86_GOOD_APIC=y | 143 | CONFIG_X86_GOOD_APIC=y |
| 132 | CONFIG_X86_INTEL_USERCOPY=y | 144 | CONFIG_X86_INTEL_USERCOPY=y |
| 133 | CONFIG_X86_USE_PPRO_CHECKSUM=y | 145 | CONFIG_X86_USE_PPRO_CHECKSUM=y |
| 134 | CONFIG_X86_USE_3DNOW=y | ||
| 135 | CONFIG_X86_TSC=y | 146 | CONFIG_X86_TSC=y |
| 136 | # CONFIG_HPET_TIMER is not set | 147 | CONFIG_HPET_TIMER=y |
| 137 | # CONFIG_SMP is not set | 148 | CONFIG_HPET_EMULATE_RTC=y |
| 138 | CONFIG_PREEMPT_NONE=y | 149 | CONFIG_NR_CPUS=32 |
| 139 | # CONFIG_PREEMPT_VOLUNTARY is not set | 150 | CONFIG_SCHED_SMT=y |
| 151 | CONFIG_SCHED_MC=y | ||
| 152 | # CONFIG_PREEMPT_NONE is not set | ||
| 153 | CONFIG_PREEMPT_VOLUNTARY=y | ||
| 140 | # CONFIG_PREEMPT is not set | 154 | # CONFIG_PREEMPT is not set |
| 141 | CONFIG_X86_UP_APIC=y | 155 | CONFIG_PREEMPT_BKL=y |
| 142 | CONFIG_X86_UP_IOAPIC=y | ||
| 143 | CONFIG_X86_LOCAL_APIC=y | 156 | CONFIG_X86_LOCAL_APIC=y |
| 144 | CONFIG_X86_IO_APIC=y | 157 | CONFIG_X86_IO_APIC=y |
| 145 | CONFIG_X86_MCE=y | 158 | CONFIG_X86_MCE=y |
| 146 | CONFIG_X86_MCE_NONFATAL=y | 159 | CONFIG_X86_MCE_NONFATAL=y |
| 147 | # CONFIG_X86_MCE_P4THERMAL is not set | 160 | CONFIG_X86_MCE_P4THERMAL=y |
| 161 | CONFIG_VM86=y | ||
| 148 | # CONFIG_TOSHIBA is not set | 162 | # CONFIG_TOSHIBA is not set |
| 149 | # CONFIG_I8K is not set | 163 | # CONFIG_I8K is not set |
| 150 | # CONFIG_X86_REBOOTFIXUPS is not set | 164 | # CONFIG_X86_REBOOTFIXUPS is not set |
| 151 | # CONFIG_MICROCODE is not set | 165 | CONFIG_MICROCODE=y |
| 152 | # CONFIG_X86_MSR is not set | 166 | CONFIG_X86_MSR=y |
| 153 | # CONFIG_X86_CPUID is not set | 167 | CONFIG_X86_CPUID=y |
| 154 | 168 | ||
| 155 | # | 169 | # |
| 156 | # Firmware Drivers | 170 | # Firmware Drivers |
| @@ -158,68 +172,67 @@ CONFIG_X86_MCE_NONFATAL=y | |||
| 158 | # CONFIG_EDD is not set | 172 | # CONFIG_EDD is not set |
| 159 | # CONFIG_DELL_RBU is not set | 173 | # CONFIG_DELL_RBU is not set |
| 160 | # CONFIG_DCDBAS is not set | 174 | # CONFIG_DCDBAS is not set |
| 161 | CONFIG_NOHIGHMEM=y | 175 | # CONFIG_NOHIGHMEM is not set |
| 162 | # CONFIG_HIGHMEM4G is not set | 176 | CONFIG_HIGHMEM4G=y |
| 163 | # CONFIG_HIGHMEM64G is not set | 177 | # CONFIG_HIGHMEM64G is not set |
| 164 | CONFIG_VMSPLIT_3G=y | ||
| 165 | # CONFIG_VMSPLIT_3G_OPT is not set | ||
| 166 | # CONFIG_VMSPLIT_2G is not set | ||
| 167 | # CONFIG_VMSPLIT_1G is not set | ||
| 168 | CONFIG_PAGE_OFFSET=0xC0000000 | 178 | CONFIG_PAGE_OFFSET=0xC0000000 |
| 169 | CONFIG_ARCH_FLATMEM_ENABLE=y | 179 | CONFIG_HIGHMEM=y |
| 170 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
| 171 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | ||
| 172 | CONFIG_SELECT_MEMORY_MODEL=y | 180 | CONFIG_SELECT_MEMORY_MODEL=y |
| 173 | CONFIG_FLATMEM_MANUAL=y | 181 | CONFIG_FLATMEM_MANUAL=y |
| 174 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 182 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
| 175 | # CONFIG_SPARSEMEM_MANUAL is not set | 183 | # CONFIG_SPARSEMEM_MANUAL is not set |
| 176 | CONFIG_FLATMEM=y | 184 | CONFIG_FLATMEM=y |
| 177 | CONFIG_FLAT_NODE_MEM_MAP=y | 185 | CONFIG_FLAT_NODE_MEM_MAP=y |
| 178 | CONFIG_SPARSEMEM_STATIC=y | 186 | # CONFIG_SPARSEMEM_STATIC is not set |
| 179 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 187 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
| 188 | CONFIG_RESOURCES_64BIT=y | ||
| 189 | # CONFIG_HIGHPTE is not set | ||
| 180 | # CONFIG_MATH_EMULATION is not set | 190 | # CONFIG_MATH_EMULATION is not set |
| 181 | CONFIG_MTRR=y | 191 | CONFIG_MTRR=y |
| 182 | # CONFIG_EFI is not set | 192 | # CONFIG_EFI is not set |
| 193 | # CONFIG_IRQBALANCE is not set | ||
| 183 | CONFIG_REGPARM=y | 194 | CONFIG_REGPARM=y |
| 184 | # CONFIG_SECCOMP is not set | 195 | CONFIG_SECCOMP=y |
| 185 | CONFIG_HZ_100=y | 196 | # CONFIG_HZ_100 is not set |
| 186 | # CONFIG_HZ_250 is not set | 197 | CONFIG_HZ_250=y |
| 187 | # CONFIG_HZ_1000 is not set | 198 | # CONFIG_HZ_1000 is not set |
| 188 | CONFIG_HZ=100 | 199 | CONFIG_HZ=250 |
| 189 | # CONFIG_KEXEC is not set | 200 | # CONFIG_KEXEC is not set |
| 201 | # CONFIG_CRASH_DUMP is not set | ||
| 190 | CONFIG_PHYSICAL_START=0x100000 | 202 | CONFIG_PHYSICAL_START=0x100000 |
| 191 | CONFIG_DOUBLEFAULT=y | 203 | # CONFIG_HOTPLUG_CPU is not set |
| 204 | CONFIG_COMPAT_VDSO=y | ||
| 205 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | ||
| 192 | 206 | ||
| 193 | # | 207 | # |
| 194 | # Power management options (ACPI, APM) | 208 | # Power management options (ACPI, APM) |
| 195 | # | 209 | # |
| 196 | CONFIG_PM=y | 210 | CONFIG_PM=y |
| 197 | # CONFIG_PM_LEGACY is not set | 211 | CONFIG_PM_LEGACY=y |
| 198 | # CONFIG_PM_DEBUG is not set | 212 | # CONFIG_PM_DEBUG is not set |
| 199 | CONFIG_SOFTWARE_SUSPEND=y | ||
| 200 | CONFIG_PM_STD_PARTITION="" | ||
| 201 | 213 | ||
| 202 | # | 214 | # |
| 203 | # ACPI (Advanced Configuration and Power Interface) Support | 215 | # ACPI (Advanced Configuration and Power Interface) Support |
| 204 | # | 216 | # |
| 205 | CONFIG_ACPI=y | 217 | CONFIG_ACPI=y |
| 206 | # CONFIG_ACPI_SLEEP is not set | 218 | CONFIG_ACPI_AC=y |
| 207 | # CONFIG_ACPI_AC is not set | 219 | CONFIG_ACPI_BATTERY=y |
| 208 | # CONFIG_ACPI_BATTERY is not set | 220 | CONFIG_ACPI_BUTTON=y |
| 209 | # CONFIG_ACPI_BUTTON is not set | ||
| 210 | # CONFIG_ACPI_VIDEO is not set | 221 | # CONFIG_ACPI_VIDEO is not set |
| 211 | # CONFIG_ACPI_HOTKEY is not set | 222 | # CONFIG_ACPI_HOTKEY is not set |
| 212 | # CONFIG_ACPI_FAN is not set | 223 | CONFIG_ACPI_FAN=y |
| 213 | # CONFIG_ACPI_PROCESSOR is not set | 224 | # CONFIG_ACPI_DOCK is not set |
| 225 | CONFIG_ACPI_PROCESSOR=y | ||
| 226 | CONFIG_ACPI_THERMAL=y | ||
| 214 | # CONFIG_ACPI_ASUS is not set | 227 | # CONFIG_ACPI_ASUS is not set |
| 215 | # CONFIG_ACPI_IBM is not set | 228 | # CONFIG_ACPI_IBM is not set |
| 216 | # CONFIG_ACPI_TOSHIBA is not set | 229 | # CONFIG_ACPI_TOSHIBA is not set |
| 217 | CONFIG_ACPI_BLACKLIST_YEAR=0 | 230 | CONFIG_ACPI_BLACKLIST_YEAR=2001 |
| 218 | # CONFIG_ACPI_DEBUG is not set | 231 | CONFIG_ACPI_DEBUG=y |
| 219 | CONFIG_ACPI_EC=y | 232 | CONFIG_ACPI_EC=y |
| 220 | CONFIG_ACPI_POWER=y | 233 | CONFIG_ACPI_POWER=y |
| 221 | CONFIG_ACPI_SYSTEM=y | 234 | CONFIG_ACPI_SYSTEM=y |
| 222 | # CONFIG_X86_PM_TIMER is not set | 235 | CONFIG_X86_PM_TIMER=y |
| 223 | # CONFIG_ACPI_CONTAINER is not set | 236 | # CONFIG_ACPI_CONTAINER is not set |
| 224 | 237 | ||
| 225 | # | 238 | # |
| @@ -230,7 +243,41 @@ CONFIG_ACPI_SYSTEM=y | |||
| 230 | # | 243 | # |
| 231 | # CPU Frequency scaling | 244 | # CPU Frequency scaling |
| 232 | # | 245 | # |
| 233 | # CONFIG_CPU_FREQ is not set | 246 | CONFIG_CPU_FREQ=y |
| 247 | CONFIG_CPU_FREQ_TABLE=y | ||
| 248 | CONFIG_CPU_FREQ_DEBUG=y | ||
| 249 | CONFIG_CPU_FREQ_STAT=y | ||
| 250 | # CONFIG_CPU_FREQ_STAT_DETAILS is not set | ||
| 251 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | ||
| 252 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | ||
| 253 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | ||
| 254 | # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set | ||
| 255 | CONFIG_CPU_FREQ_GOV_USERSPACE=y | ||
| 256 | CONFIG_CPU_FREQ_GOV_ONDEMAND=y | ||
| 257 | # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set | ||
| 258 | |||
| 259 | # | ||
| 260 | # CPUFreq processor drivers | ||
| 261 | # | ||
| 262 | CONFIG_X86_ACPI_CPUFREQ=y | ||
| 263 | # CONFIG_X86_POWERNOW_K6 is not set | ||
| 264 | # CONFIG_X86_POWERNOW_K7 is not set | ||
| 265 | CONFIG_X86_POWERNOW_K8=y | ||
| 266 | CONFIG_X86_POWERNOW_K8_ACPI=y | ||
| 267 | # CONFIG_X86_GX_SUSPMOD is not set | ||
| 268 | # CONFIG_X86_SPEEDSTEP_CENTRINO is not set | ||
| 269 | # CONFIG_X86_SPEEDSTEP_ICH is not set | ||
| 270 | # CONFIG_X86_SPEEDSTEP_SMI is not set | ||
| 271 | # CONFIG_X86_P4_CLOCKMOD is not set | ||
| 272 | # CONFIG_X86_CPUFREQ_NFORCE2 is not set | ||
| 273 | # CONFIG_X86_LONGRUN is not set | ||
| 274 | # CONFIG_X86_LONGHAUL is not set | ||
| 275 | |||
| 276 | # | ||
| 277 | # shared options | ||
| 278 | # | ||
| 279 | CONFIG_X86_ACPI_CPUFREQ_PROC_INTF=y | ||
| 280 | # CONFIG_X86_SPEEDSTEP_LIB is not set | ||
| 234 | 281 | ||
| 235 | # | 282 | # |
| 236 | # Bus options (PCI, PCMCIA, EISA, MCA, ISA) | 283 | # Bus options (PCI, PCMCIA, EISA, MCA, ISA) |
| @@ -244,12 +291,13 @@ CONFIG_PCI_BIOS=y | |||
| 244 | CONFIG_PCI_DIRECT=y | 291 | CONFIG_PCI_DIRECT=y |
| 245 | CONFIG_PCI_MMCONFIG=y | 292 | CONFIG_PCI_MMCONFIG=y |
| 246 | # CONFIG_PCIEPORTBUS is not set | 293 | # CONFIG_PCIEPORTBUS is not set |
| 247 | # CONFIG_PCI_MSI is not set | 294 | CONFIG_PCI_MSI=y |
| 248 | # CONFIG_PCI_LEGACY_PROC is not set | 295 | # CONFIG_PCI_DEBUG is not set |
| 249 | CONFIG_ISA_DMA_API=y | 296 | CONFIG_ISA_DMA_API=y |
| 250 | # CONFIG_ISA is not set | 297 | # CONFIG_ISA is not set |
| 251 | # CONFIG_MCA is not set | 298 | # CONFIG_MCA is not set |
| 252 | # CONFIG_SCx200 is not set | 299 | # CONFIG_SCx200 is not set |
| 300 | CONFIG_K8_NB=y | ||
| 253 | 301 | ||
| 254 | # | 302 | # |
| 255 | # PCCARD (PCMCIA/CardBus) support | 303 | # PCCARD (PCMCIA/CardBus) support |
| @@ -278,93 +326,54 @@ CONFIG_NET=y | |||
| 278 | # | 326 | # |
| 279 | # CONFIG_NETDEBUG is not set | 327 | # CONFIG_NETDEBUG is not set |
| 280 | CONFIG_PACKET=y | 328 | CONFIG_PACKET=y |
| 281 | CONFIG_PACKET_MMAP=y | 329 | # CONFIG_PACKET_MMAP is not set |
| 282 | CONFIG_UNIX=y | 330 | CONFIG_UNIX=y |
| 331 | CONFIG_XFRM=y | ||
| 332 | # CONFIG_XFRM_USER is not set | ||
| 333 | # CONFIG_XFRM_SUB_POLICY is not set | ||
| 283 | # CONFIG_NET_KEY is not set | 334 | # CONFIG_NET_KEY is not set |
| 284 | CONFIG_INET=y | 335 | CONFIG_INET=y |
| 285 | # CONFIG_IP_MULTICAST is not set | 336 | CONFIG_IP_MULTICAST=y |
| 286 | # CONFIG_IP_ADVANCED_ROUTER is not set | 337 | # CONFIG_IP_ADVANCED_ROUTER is not set |
| 287 | CONFIG_IP_FIB_HASH=y | 338 | CONFIG_IP_FIB_HASH=y |
| 288 | # CONFIG_IP_PNP is not set | 339 | CONFIG_IP_PNP=y |
| 340 | CONFIG_IP_PNP_DHCP=y | ||
| 341 | # CONFIG_IP_PNP_BOOTP is not set | ||
| 342 | # CONFIG_IP_PNP_RARP is not set | ||
| 289 | # CONFIG_NET_IPIP is not set | 343 | # CONFIG_NET_IPIP is not set |
| 290 | # CONFIG_NET_IPGRE is not set | 344 | # CONFIG_NET_IPGRE is not set |
| 345 | # CONFIG_IP_MROUTE is not set | ||
| 291 | # CONFIG_ARPD is not set | 346 | # CONFIG_ARPD is not set |
| 292 | # CONFIG_SYN_COOKIES is not set | 347 | # CONFIG_SYN_COOKIES is not set |
| 293 | # CONFIG_INET_AH is not set | 348 | # CONFIG_INET_AH is not set |
| 294 | # CONFIG_INET_ESP is not set | 349 | # CONFIG_INET_ESP is not set |
| 295 | # CONFIG_INET_IPCOMP is not set | 350 | # CONFIG_INET_IPCOMP is not set |
| 351 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
| 296 | # CONFIG_INET_TUNNEL is not set | 352 | # CONFIG_INET_TUNNEL is not set |
| 297 | # CONFIG_INET_DIAG is not set | 353 | CONFIG_INET_XFRM_MODE_TRANSPORT=y |
| 354 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
| 355 | CONFIG_INET_DIAG=y | ||
| 356 | CONFIG_INET_TCP_DIAG=y | ||
| 298 | # CONFIG_TCP_CONG_ADVANCED is not set | 357 | # CONFIG_TCP_CONG_ADVANCED is not set |
| 299 | CONFIG_TCP_CONG_BIC=y | 358 | CONFIG_TCP_CONG_CUBIC=y |
| 300 | 359 | CONFIG_DEFAULT_TCP_CONG="cubic" | |
| 301 | # | 360 | CONFIG_IPV6=y |
| 302 | # IP: Virtual Server Configuration | 361 | # CONFIG_IPV6_PRIVACY is not set |
| 303 | # | 362 | # CONFIG_IPV6_ROUTER_PREF is not set |
| 304 | # CONFIG_IP_VS is not set | 363 | # CONFIG_INET6_AH is not set |
| 305 | # CONFIG_IPV6 is not set | 364 | # CONFIG_INET6_ESP is not set |
| 306 | CONFIG_NETFILTER=y | 365 | # CONFIG_INET6_IPCOMP is not set |
| 307 | # CONFIG_NETFILTER_DEBUG is not set | 366 | # CONFIG_IPV6_MIP6 is not set |
| 308 | 367 | # CONFIG_INET6_XFRM_TUNNEL is not set | |
| 309 | # | 368 | # CONFIG_INET6_TUNNEL is not set |
| 310 | # Core Netfilter Configuration | 369 | CONFIG_INET6_XFRM_MODE_TRANSPORT=y |
| 311 | # | 370 | CONFIG_INET6_XFRM_MODE_TUNNEL=y |
| 312 | # CONFIG_NETFILTER_NETLINK is not set | 371 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set |
| 313 | CONFIG_NETFILTER_XTABLES=y | 372 | # CONFIG_IPV6_TUNNEL is not set |
| 314 | # CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set | 373 | # CONFIG_IPV6_SUBTREES is not set |
| 315 | # CONFIG_NETFILTER_XT_TARGET_MARK is not set | 374 | # CONFIG_IPV6_MULTIPLE_TABLES is not set |
| 316 | # CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set | 375 | # CONFIG_NETWORK_SECMARK is not set |
| 317 | # CONFIG_NETFILTER_XT_MATCH_COMMENT is not set | 376 | # CONFIG_NETFILTER is not set |
| 318 | # CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set | ||
| 319 | # CONFIG_NETFILTER_XT_MATCH_DCCP is not set | ||
| 320 | # CONFIG_NETFILTER_XT_MATCH_HELPER is not set | ||
| 321 | # CONFIG_NETFILTER_XT_MATCH_LENGTH is not set | ||
| 322 | CONFIG_NETFILTER_XT_MATCH_LIMIT=y | ||
| 323 | CONFIG_NETFILTER_XT_MATCH_MAC=y | ||
| 324 | # CONFIG_NETFILTER_XT_MATCH_MARK is not set | ||
| 325 | # CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set | ||
| 326 | # CONFIG_NETFILTER_XT_MATCH_REALM is not set | ||
| 327 | # CONFIG_NETFILTER_XT_MATCH_SCTP is not set | ||
| 328 | CONFIG_NETFILTER_XT_MATCH_STATE=y | ||
| 329 | # CONFIG_NETFILTER_XT_MATCH_STRING is not set | ||
| 330 | # CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set | ||
| 331 | |||
| 332 | # | ||
| 333 | # IP: Netfilter Configuration | ||
| 334 | # | ||
| 335 | CONFIG_IP_NF_CONNTRACK=y | ||
| 336 | # CONFIG_IP_NF_CT_ACCT is not set | ||
| 337 | # CONFIG_IP_NF_CONNTRACK_MARK is not set | ||
| 338 | # CONFIG_IP_NF_CONNTRACK_EVENTS is not set | ||
| 339 | # CONFIG_IP_NF_CT_PROTO_SCTP is not set | ||
| 340 | CONFIG_IP_NF_FTP=y | ||
| 341 | # CONFIG_IP_NF_IRC is not set | ||
| 342 | # CONFIG_IP_NF_NETBIOS_NS is not set | ||
| 343 | # CONFIG_IP_NF_TFTP is not set | ||
| 344 | # CONFIG_IP_NF_AMANDA is not set | ||
| 345 | # CONFIG_IP_NF_PPTP is not set | ||
| 346 | # CONFIG_IP_NF_QUEUE is not set | ||
| 347 | CONFIG_IP_NF_IPTABLES=y | ||
| 348 | # CONFIG_IP_NF_MATCH_IPRANGE is not set | ||
| 349 | # CONFIG_IP_NF_MATCH_MULTIPORT is not set | ||
| 350 | # CONFIG_IP_NF_MATCH_TOS is not set | ||
| 351 | # CONFIG_IP_NF_MATCH_RECENT is not set | ||
| 352 | # CONFIG_IP_NF_MATCH_ECN is not set | ||
| 353 | # CONFIG_IP_NF_MATCH_DSCP is not set | ||
| 354 | # CONFIG_IP_NF_MATCH_AH_ESP is not set | ||
| 355 | # CONFIG_IP_NF_MATCH_TTL is not set | ||
| 356 | # CONFIG_IP_NF_MATCH_OWNER is not set | ||
| 357 | # CONFIG_IP_NF_MATCH_ADDRTYPE is not set | ||
| 358 | # CONFIG_IP_NF_MATCH_HASHLIMIT is not set | ||
| 359 | CONFIG_IP_NF_FILTER=y | ||
| 360 | # CONFIG_IP_NF_TARGET_REJECT is not set | ||
| 361 | CONFIG_IP_NF_TARGET_LOG=y | ||
| 362 | # CONFIG_IP_NF_TARGET_ULOG is not set | ||
| 363 | # CONFIG_IP_NF_TARGET_TCPMSS is not set | ||
| 364 | # CONFIG_IP_NF_NAT is not set | ||
| 365 | # CONFIG_IP_NF_MANGLE is not set | ||
| 366 | # CONFIG_IP_NF_RAW is not set | ||
| 367 | # CONFIG_IP_NF_ARPTABLES is not set | ||
| 368 | 377 | ||
| 369 | # | 378 | # |
| 370 | # DCCP Configuration (EXPERIMENTAL) | 379 | # DCCP Configuration (EXPERIMENTAL) |
| @@ -389,7 +398,6 @@ CONFIG_IP_NF_TARGET_LOG=y | |||
| 389 | # CONFIG_ATALK is not set | 398 | # CONFIG_ATALK is not set |
| 390 | # CONFIG_X25 is not set | 399 | # CONFIG_X25 is not set |
| 391 | # CONFIG_LAPB is not set | 400 | # CONFIG_LAPB is not set |
| 392 | # CONFIG_NET_DIVERT is not set | ||
| 393 | # CONFIG_ECONET is not set | 401 | # CONFIG_ECONET is not set |
| 394 | # CONFIG_WAN_ROUTER is not set | 402 | # CONFIG_WAN_ROUTER is not set |
| 395 | 403 | ||
| @@ -402,6 +410,7 @@ CONFIG_IP_NF_TARGET_LOG=y | |||
| 402 | # Network testing | 410 | # Network testing |
| 403 | # | 411 | # |
| 404 | # CONFIG_NET_PKTGEN is not set | 412 | # CONFIG_NET_PKTGEN is not set |
| 413 | # CONFIG_NET_TCPPROBE is not set | ||
| 405 | # CONFIG_HAMRADIO is not set | 414 | # CONFIG_HAMRADIO is not set |
| 406 | # CONFIG_IRDA is not set | 415 | # CONFIG_IRDA is not set |
| 407 | # CONFIG_BT is not set | 416 | # CONFIG_BT is not set |
| @@ -416,7 +425,9 @@ CONFIG_IP_NF_TARGET_LOG=y | |||
| 416 | # | 425 | # |
| 417 | CONFIG_STANDALONE=y | 426 | CONFIG_STANDALONE=y |
| 418 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 427 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
| 419 | # CONFIG_FW_LOADER is not set | 428 | CONFIG_FW_LOADER=y |
| 429 | # CONFIG_DEBUG_DRIVER is not set | ||
| 430 | # CONFIG_SYS_HYPERVISOR is not set | ||
| 420 | 431 | ||
| 421 | # | 432 | # |
| 422 | # Connector - unified userspace <-> kernelspace linker | 433 | # Connector - unified userspace <-> kernelspace linker |
| @@ -431,13 +442,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y | |||
| 431 | # | 442 | # |
| 432 | # Parallel port support | 443 | # Parallel port support |
| 433 | # | 444 | # |
| 434 | CONFIG_PARPORT=y | 445 | # CONFIG_PARPORT is not set |
| 435 | CONFIG_PARPORT_PC=y | ||
| 436 | # CONFIG_PARPORT_SERIAL is not set | ||
| 437 | # CONFIG_PARPORT_PC_FIFO is not set | ||
| 438 | # CONFIG_PARPORT_PC_SUPERIO is not set | ||
| 439 | # CONFIG_PARPORT_GSC is not set | ||
| 440 | CONFIG_PARPORT_1284=y | ||
| 441 | 446 | ||
| 442 | # | 447 | # |
| 443 | # Plug and Play support | 448 | # Plug and Play support |
| @@ -447,8 +452,7 @@ CONFIG_PARPORT_1284=y | |||
| 447 | # | 452 | # |
| 448 | # Block devices | 453 | # Block devices |
| 449 | # | 454 | # |
| 450 | # CONFIG_BLK_DEV_FD is not set | 455 | CONFIG_BLK_DEV_FD=y |
| 451 | # CONFIG_PARIDE is not set | ||
| 452 | # CONFIG_BLK_CPQ_DA is not set | 456 | # CONFIG_BLK_CPQ_DA is not set |
| 453 | # CONFIG_BLK_CPQ_CISS_DA is not set | 457 | # CONFIG_BLK_CPQ_CISS_DA is not set |
| 454 | # CONFIG_BLK_DEV_DAC960 is not set | 458 | # CONFIG_BLK_DEV_DAC960 is not set |
| @@ -459,8 +463,11 @@ CONFIG_BLK_DEV_LOOP=y | |||
| 459 | # CONFIG_BLK_DEV_NBD is not set | 463 | # CONFIG_BLK_DEV_NBD is not set |
| 460 | # CONFIG_BLK_DEV_SX8 is not set | 464 | # CONFIG_BLK_DEV_SX8 is not set |
| 461 | # CONFIG_BLK_DEV_UB is not set | 465 | # CONFIG_BLK_DEV_UB is not set |
| 462 | # CONFIG_BLK_DEV_RAM is not set | 466 | CONFIG_BLK_DEV_RAM=y |
| 463 | CONFIG_BLK_DEV_RAM_COUNT=16 | 467 | CONFIG_BLK_DEV_RAM_COUNT=16 |
| 468 | CONFIG_BLK_DEV_RAM_SIZE=4096 | ||
| 469 | CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 | ||
| 470 | CONFIG_BLK_DEV_INITRD=y | ||
| 464 | # CONFIG_CDROM_PKTCDVD is not set | 471 | # CONFIG_CDROM_PKTCDVD is not set |
| 465 | # CONFIG_ATA_OVER_ETH is not set | 472 | # CONFIG_ATA_OVER_ETH is not set |
| 466 | 473 | ||
| @@ -476,7 +483,7 @@ CONFIG_BLK_DEV_IDE=y | |||
| 476 | # CONFIG_BLK_DEV_IDE_SATA is not set | 483 | # CONFIG_BLK_DEV_IDE_SATA is not set |
| 477 | # CONFIG_BLK_DEV_HD_IDE is not set | 484 | # CONFIG_BLK_DEV_HD_IDE is not set |
| 478 | CONFIG_BLK_DEV_IDEDISK=y | 485 | CONFIG_BLK_DEV_IDEDISK=y |
| 479 | # CONFIG_IDEDISK_MULTI_MODE is not set | 486 | CONFIG_IDEDISK_MULTI_MODE=y |
| 480 | CONFIG_BLK_DEV_IDECD=y | 487 | CONFIG_BLK_DEV_IDECD=y |
| 481 | # CONFIG_BLK_DEV_IDETAPE is not set | 488 | # CONFIG_BLK_DEV_IDETAPE is not set |
| 482 | # CONFIG_BLK_DEV_IDEFLOPPY is not set | 489 | # CONFIG_BLK_DEV_IDEFLOPPY is not set |
| @@ -486,10 +493,10 @@ CONFIG_BLK_DEV_IDECD=y | |||
| 486 | # | 493 | # |
| 487 | # IDE chipset support/bugfixes | 494 | # IDE chipset support/bugfixes |
| 488 | # | 495 | # |
| 489 | # CONFIG_IDE_GENERIC is not set | 496 | CONFIG_IDE_GENERIC=y |
| 490 | # CONFIG_BLK_DEV_CMD640 is not set | 497 | # CONFIG_BLK_DEV_CMD640 is not set |
| 491 | CONFIG_BLK_DEV_IDEPCI=y | 498 | CONFIG_BLK_DEV_IDEPCI=y |
| 492 | CONFIG_IDEPCI_SHARE_IRQ=y | 499 | # CONFIG_IDEPCI_SHARE_IRQ is not set |
| 493 | # CONFIG_BLK_DEV_OFFBOARD is not set | 500 | # CONFIG_BLK_DEV_OFFBOARD is not set |
| 494 | # CONFIG_BLK_DEV_GENERIC is not set | 501 | # CONFIG_BLK_DEV_GENERIC is not set |
| 495 | # CONFIG_BLK_DEV_OPTI621 is not set | 502 | # CONFIG_BLK_DEV_OPTI621 is not set |
| @@ -500,7 +507,7 @@ CONFIG_IDEDMA_PCI_AUTO=y | |||
| 500 | # CONFIG_IDEDMA_ONLYDISK is not set | 507 | # CONFIG_IDEDMA_ONLYDISK is not set |
| 501 | # CONFIG_BLK_DEV_AEC62XX is not set | 508 | # CONFIG_BLK_DEV_AEC62XX is not set |
| 502 | # CONFIG_BLK_DEV_ALI15X3 is not set | 509 | # CONFIG_BLK_DEV_ALI15X3 is not set |
| 503 | # CONFIG_BLK_DEV_AMD74XX is not set | 510 | CONFIG_BLK_DEV_AMD74XX=y |
| 504 | # CONFIG_BLK_DEV_ATIIXP is not set | 511 | # CONFIG_BLK_DEV_ATIIXP is not set |
| 505 | # CONFIG_BLK_DEV_CMD64X is not set | 512 | # CONFIG_BLK_DEV_CMD64X is not set |
| 506 | # CONFIG_BLK_DEV_TRIFLEX is not set | 513 | # CONFIG_BLK_DEV_TRIFLEX is not set |
| @@ -511,7 +518,7 @@ CONFIG_IDEDMA_PCI_AUTO=y | |||
| 511 | # CONFIG_BLK_DEV_HPT34X is not set | 518 | # CONFIG_BLK_DEV_HPT34X is not set |
| 512 | # CONFIG_BLK_DEV_HPT366 is not set | 519 | # CONFIG_BLK_DEV_HPT366 is not set |
| 513 | # CONFIG_BLK_DEV_SC1200 is not set | 520 | # CONFIG_BLK_DEV_SC1200 is not set |
| 514 | # CONFIG_BLK_DEV_PIIX is not set | 521 | CONFIG_BLK_DEV_PIIX=y |
| 515 | # CONFIG_BLK_DEV_IT821X is not set | 522 | # CONFIG_BLK_DEV_IT821X is not set |
| 516 | # CONFIG_BLK_DEV_NS87415 is not set | 523 | # CONFIG_BLK_DEV_NS87415 is not set |
| 517 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | 524 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set |
| @@ -521,7 +528,7 @@ CONFIG_IDEDMA_PCI_AUTO=y | |||
| 521 | # CONFIG_BLK_DEV_SIS5513 is not set | 528 | # CONFIG_BLK_DEV_SIS5513 is not set |
| 522 | # CONFIG_BLK_DEV_SLC90E66 is not set | 529 | # CONFIG_BLK_DEV_SLC90E66 is not set |
| 523 | # CONFIG_BLK_DEV_TRM290 is not set | 530 | # CONFIG_BLK_DEV_TRM290 is not set |
| 524 | CONFIG_BLK_DEV_VIA82CXXX=y | 531 | # CONFIG_BLK_DEV_VIA82CXXX is not set |
| 525 | # CONFIG_IDE_ARM is not set | 532 | # CONFIG_IDE_ARM is not set |
| 526 | CONFIG_BLK_DEV_IDEDMA=y | 533 | CONFIG_BLK_DEV_IDEDMA=y |
| 527 | # CONFIG_IDEDMA_IVB is not set | 534 | # CONFIG_IDEDMA_IVB is not set |
| @@ -533,6 +540,7 @@ CONFIG_IDEDMA_AUTO=y | |||
| 533 | # | 540 | # |
| 534 | # CONFIG_RAID_ATTRS is not set | 541 | # CONFIG_RAID_ATTRS is not set |
| 535 | CONFIG_SCSI=y | 542 | CONFIG_SCSI=y |
| 543 | CONFIG_SCSI_NETLINK=y | ||
| 536 | # CONFIG_SCSI_PROC_FS is not set | 544 | # CONFIG_SCSI_PROC_FS is not set |
| 537 | 545 | ||
| 538 | # | 546 | # |
| @@ -541,8 +549,9 @@ CONFIG_SCSI=y | |||
| 541 | CONFIG_BLK_DEV_SD=y | 549 | CONFIG_BLK_DEV_SD=y |
| 542 | # CONFIG_CHR_DEV_ST is not set | 550 | # CONFIG_CHR_DEV_ST is not set |
| 543 | # CONFIG_CHR_DEV_OSST is not set | 551 | # CONFIG_CHR_DEV_OSST is not set |
| 544 | # CONFIG_BLK_DEV_SR is not set | 552 | CONFIG_BLK_DEV_SR=y |
| 545 | # CONFIG_CHR_DEV_SG is not set | 553 | # CONFIG_BLK_DEV_SR_VENDOR is not set |
| 554 | CONFIG_CHR_DEV_SG=y | ||
| 546 | # CONFIG_CHR_DEV_SCH is not set | 555 | # CONFIG_CHR_DEV_SCH is not set |
| 547 | 556 | ||
| 548 | # | 557 | # |
| @@ -553,29 +562,44 @@ CONFIG_BLK_DEV_SD=y | |||
| 553 | # CONFIG_SCSI_LOGGING is not set | 562 | # CONFIG_SCSI_LOGGING is not set |
| 554 | 563 | ||
| 555 | # | 564 | # |
| 556 | # SCSI Transport Attributes | 565 | # SCSI Transports |
| 557 | # | 566 | # |
| 558 | # CONFIG_SCSI_SPI_ATTRS is not set | 567 | CONFIG_SCSI_SPI_ATTRS=y |
| 559 | # CONFIG_SCSI_FC_ATTRS is not set | 568 | CONFIG_SCSI_FC_ATTRS=y |
| 560 | # CONFIG_SCSI_ISCSI_ATTRS is not set | 569 | # CONFIG_SCSI_ISCSI_ATTRS is not set |
| 561 | # CONFIG_SCSI_SAS_ATTRS is not set | 570 | # CONFIG_SCSI_SAS_ATTRS is not set |
| 571 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
| 562 | 572 | ||
| 563 | # | 573 | # |
| 564 | # SCSI low-level drivers | 574 | # SCSI low-level drivers |
| 565 | # | 575 | # |
| 566 | # CONFIG_ISCSI_TCP is not set | 576 | # CONFIG_ISCSI_TCP is not set |
| 567 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | 577 | CONFIG_BLK_DEV_3W_XXXX_RAID=y |
| 568 | # CONFIG_SCSI_3W_9XXX is not set | 578 | # CONFIG_SCSI_3W_9XXX is not set |
| 569 | # CONFIG_SCSI_ACARD is not set | 579 | # CONFIG_SCSI_ACARD is not set |
| 570 | # CONFIG_SCSI_AACRAID is not set | 580 | # CONFIG_SCSI_AACRAID is not set |
| 571 | # CONFIG_SCSI_AIC7XXX is not set | 581 | CONFIG_SCSI_AIC7XXX=y |
| 582 | CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 | ||
| 583 | CONFIG_AIC7XXX_RESET_DELAY_MS=5000 | ||
| 584 | CONFIG_AIC7XXX_DEBUG_ENABLE=y | ||
| 585 | CONFIG_AIC7XXX_DEBUG_MASK=0 | ||
| 586 | CONFIG_AIC7XXX_REG_PRETTY_PRINT=y | ||
| 572 | # CONFIG_SCSI_AIC7XXX_OLD is not set | 587 | # CONFIG_SCSI_AIC7XXX_OLD is not set |
| 573 | # CONFIG_SCSI_AIC79XX is not set | 588 | CONFIG_SCSI_AIC79XX=y |
| 589 | CONFIG_AIC79XX_CMDS_PER_DEVICE=32 | ||
| 590 | CONFIG_AIC79XX_RESET_DELAY_MS=4000 | ||
| 591 | # CONFIG_AIC79XX_ENABLE_RD_STRM is not set | ||
| 592 | # CONFIG_AIC79XX_DEBUG_ENABLE is not set | ||
| 593 | CONFIG_AIC79XX_DEBUG_MASK=0 | ||
| 594 | # CONFIG_AIC79XX_REG_PRETTY_PRINT is not set | ||
| 595 | # CONFIG_SCSI_AIC94XX is not set | ||
| 574 | # CONFIG_SCSI_DPT_I2O is not set | 596 | # CONFIG_SCSI_DPT_I2O is not set |
| 597 | # CONFIG_SCSI_ADVANSYS is not set | ||
| 598 | # CONFIG_SCSI_ARCMSR is not set | ||
| 575 | # CONFIG_MEGARAID_NEWGEN is not set | 599 | # CONFIG_MEGARAID_NEWGEN is not set |
| 576 | # CONFIG_MEGARAID_LEGACY is not set | 600 | # CONFIG_MEGARAID_LEGACY is not set |
| 577 | # CONFIG_MEGARAID_SAS is not set | 601 | # CONFIG_MEGARAID_SAS is not set |
| 578 | # CONFIG_SCSI_SATA is not set | 602 | # CONFIG_SCSI_HPTIOP is not set |
| 579 | # CONFIG_SCSI_BUSLOGIC is not set | 603 | # CONFIG_SCSI_BUSLOGIC is not set |
| 580 | # CONFIG_SCSI_DMX3191D is not set | 604 | # CONFIG_SCSI_DMX3191D is not set |
| 581 | # CONFIG_SCSI_EATA is not set | 605 | # CONFIG_SCSI_EATA is not set |
| @@ -584,11 +608,9 @@ CONFIG_BLK_DEV_SD=y | |||
| 584 | # CONFIG_SCSI_IPS is not set | 608 | # CONFIG_SCSI_IPS is not set |
| 585 | # CONFIG_SCSI_INITIO is not set | 609 | # CONFIG_SCSI_INITIO is not set |
| 586 | # CONFIG_SCSI_INIA100 is not set | 610 | # CONFIG_SCSI_INIA100 is not set |
| 587 | # CONFIG_SCSI_PPA is not set | 611 | # CONFIG_SCSI_STEX is not set |
| 588 | # CONFIG_SCSI_IMM is not set | ||
| 589 | # CONFIG_SCSI_SYM53C8XX_2 is not set | 612 | # CONFIG_SCSI_SYM53C8XX_2 is not set |
| 590 | # CONFIG_SCSI_IPR is not set | 613 | # CONFIG_SCSI_IPR is not set |
| 591 | # CONFIG_SCSI_QLOGIC_FC is not set | ||
| 592 | # CONFIG_SCSI_QLOGIC_1280 is not set | 614 | # CONFIG_SCSI_QLOGIC_1280 is not set |
| 593 | # CONFIG_SCSI_QLA_FC is not set | 615 | # CONFIG_SCSI_QLA_FC is not set |
| 594 | # CONFIG_SCSI_LPFC is not set | 616 | # CONFIG_SCSI_LPFC is not set |
| @@ -598,22 +620,114 @@ CONFIG_BLK_DEV_SD=y | |||
| 598 | # CONFIG_SCSI_DEBUG is not set | 620 | # CONFIG_SCSI_DEBUG is not set |
| 599 | 621 | ||
| 600 | # | 622 | # |
| 623 | # Serial ATA (prod) and Parallel ATA (experimental) drivers | ||
| 624 | # | ||
| 625 | CONFIG_ATA=y | ||
| 626 | CONFIG_SATA_AHCI=y | ||
| 627 | CONFIG_SATA_SVW=y | ||
| 628 | CONFIG_ATA_PIIX=y | ||
| 629 | # CONFIG_SATA_MV is not set | ||
| 630 | CONFIG_SATA_NV=y | ||
| 631 | # CONFIG_PDC_ADMA is not set | ||
| 632 | # CONFIG_SATA_QSTOR is not set | ||
| 633 | # CONFIG_SATA_PROMISE is not set | ||
| 634 | # CONFIG_SATA_SX4 is not set | ||
| 635 | CONFIG_SATA_SIL=y | ||
| 636 | # CONFIG_SATA_SIL24 is not set | ||
| 637 | # CONFIG_SATA_SIS is not set | ||
| 638 | # CONFIG_SATA_ULI is not set | ||
| 639 | CONFIG_SATA_VIA=y | ||
| 640 | # CONFIG_SATA_VITESSE is not set | ||
| 641 | CONFIG_SATA_INTEL_COMBINED=y | ||
| 642 | # CONFIG_PATA_ALI is not set | ||
| 643 | # CONFIG_PATA_AMD is not set | ||
| 644 | # CONFIG_PATA_ARTOP is not set | ||
| 645 | # CONFIG_PATA_ATIIXP is not set | ||
| 646 | # CONFIG_PATA_CMD64X is not set | ||
| 647 | # CONFIG_PATA_CS5520 is not set | ||
| 648 | # CONFIG_PATA_CS5530 is not set | ||
| 649 | # CONFIG_PATA_CS5535 is not set | ||
| 650 | # CONFIG_PATA_CYPRESS is not set | ||
| 651 | # CONFIG_PATA_EFAR is not set | ||
| 652 | # CONFIG_ATA_GENERIC is not set | ||
| 653 | # CONFIG_PATA_HPT366 is not set | ||
| 654 | # CONFIG_PATA_HPT37X is not set | ||
| 655 | # CONFIG_PATA_HPT3X2N is not set | ||
| 656 | # CONFIG_PATA_HPT3X3 is not set | ||
| 657 | # CONFIG_PATA_IT821X is not set | ||
| 658 | # CONFIG_PATA_JMICRON is not set | ||
| 659 | # CONFIG_PATA_LEGACY is not set | ||
| 660 | # CONFIG_PATA_TRIFLEX is not set | ||
| 661 | # CONFIG_PATA_MPIIX is not set | ||
| 662 | # CONFIG_PATA_OLDPIIX is not set | ||
| 663 | # CONFIG_PATA_NETCELL is not set | ||
| 664 | # CONFIG_PATA_NS87410 is not set | ||
| 665 | # CONFIG_PATA_OPTI is not set | ||
| 666 | # CONFIG_PATA_OPTIDMA is not set | ||
| 667 | # CONFIG_PATA_PDC_OLD is not set | ||
| 668 | # CONFIG_PATA_QDI is not set | ||
| 669 | # CONFIG_PATA_RADISYS is not set | ||
| 670 | # CONFIG_PATA_RZ1000 is not set | ||
| 671 | # CONFIG_PATA_SC1200 is not set | ||
| 672 | # CONFIG_PATA_SERVERWORKS is not set | ||
| 673 | # CONFIG_PATA_PDC2027X is not set | ||
| 674 | # CONFIG_PATA_SIL680 is not set | ||
| 675 | # CONFIG_PATA_SIS is not set | ||
| 676 | # CONFIG_PATA_VIA is not set | ||
| 677 | # CONFIG_PATA_WINBOND is not set | ||
| 678 | |||
| 679 | # | ||
| 601 | # Multi-device support (RAID and LVM) | 680 | # Multi-device support (RAID and LVM) |
| 602 | # | 681 | # |
| 603 | # CONFIG_MD is not set | 682 | CONFIG_MD=y |
| 683 | # CONFIG_BLK_DEV_MD is not set | ||
| 684 | CONFIG_BLK_DEV_DM=y | ||
| 685 | # CONFIG_DM_CRYPT is not set | ||
| 686 | # CONFIG_DM_SNAPSHOT is not set | ||
| 687 | # CONFIG_DM_MIRROR is not set | ||
| 688 | # CONFIG_DM_ZERO is not set | ||
| 689 | # CONFIG_DM_MULTIPATH is not set | ||
| 604 | 690 | ||
| 605 | # | 691 | # |
| 606 | # Fusion MPT device support | 692 | # Fusion MPT device support |
| 607 | # | 693 | # |
| 608 | # CONFIG_FUSION is not set | 694 | CONFIG_FUSION=y |
| 609 | # CONFIG_FUSION_SPI is not set | 695 | CONFIG_FUSION_SPI=y |
| 610 | # CONFIG_FUSION_FC is not set | 696 | # CONFIG_FUSION_FC is not set |
| 611 | # CONFIG_FUSION_SAS is not set | 697 | # CONFIG_FUSION_SAS is not set |
| 698 | CONFIG_FUSION_MAX_SGE=128 | ||
| 699 | # CONFIG_FUSION_CTL is not set | ||
| 612 | 700 | ||
| 613 | # | 701 | # |
| 614 | # IEEE 1394 (FireWire) support | 702 | # IEEE 1394 (FireWire) support |
| 615 | # | 703 | # |
| 616 | # CONFIG_IEEE1394 is not set | 704 | CONFIG_IEEE1394=y |
| 705 | |||
| 706 | # | ||
| 707 | # Subsystem Options | ||
| 708 | # | ||
| 709 | # CONFIG_IEEE1394_VERBOSEDEBUG is not set | ||
| 710 | # CONFIG_IEEE1394_OUI_DB is not set | ||
| 711 | # CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set | ||
| 712 | # CONFIG_IEEE1394_EXPORT_FULL_API is not set | ||
| 713 | |||
| 714 | # | ||
| 715 | # Device Drivers | ||
| 716 | # | ||
| 717 | |||
| 718 | # | ||
| 719 | # Texas Instruments PCILynx requires I2C | ||
| 720 | # | ||
| 721 | CONFIG_IEEE1394_OHCI1394=y | ||
| 722 | |||
| 723 | # | ||
| 724 | # Protocol Drivers | ||
| 725 | # | ||
| 726 | # CONFIG_IEEE1394_VIDEO1394 is not set | ||
| 727 | # CONFIG_IEEE1394_SBP2 is not set | ||
| 728 | # CONFIG_IEEE1394_ETH1394 is not set | ||
| 729 | # CONFIG_IEEE1394_DV1394 is not set | ||
| 730 | CONFIG_IEEE1394_RAWIO=y | ||
| 617 | 731 | ||
| 618 | # | 732 | # |
| 619 | # I2O device support | 733 | # I2O device support |
| @@ -652,46 +766,63 @@ CONFIG_MII=y | |||
| 652 | # | 766 | # |
| 653 | # Tulip family network device support | 767 | # Tulip family network device support |
| 654 | # | 768 | # |
| 655 | # CONFIG_NET_TULIP is not set | 769 | CONFIG_NET_TULIP=y |
| 770 | # CONFIG_DE2104X is not set | ||
| 771 | CONFIG_TULIP=y | ||
| 772 | # CONFIG_TULIP_MWI is not set | ||
| 773 | # CONFIG_TULIP_MMIO is not set | ||
| 774 | # CONFIG_TULIP_NAPI is not set | ||
| 775 | # CONFIG_DE4X5 is not set | ||
| 776 | # CONFIG_WINBOND_840 is not set | ||
| 777 | # CONFIG_DM9102 is not set | ||
| 778 | # CONFIG_ULI526X is not set | ||
| 656 | # CONFIG_HP100 is not set | 779 | # CONFIG_HP100 is not set |
| 657 | CONFIG_NET_PCI=y | 780 | CONFIG_NET_PCI=y |
| 658 | # CONFIG_PCNET32 is not set | 781 | # CONFIG_PCNET32 is not set |
| 659 | # CONFIG_AMD8111_ETH is not set | 782 | # CONFIG_AMD8111_ETH is not set |
| 660 | # CONFIG_ADAPTEC_STARFIRE is not set | 783 | # CONFIG_ADAPTEC_STARFIRE is not set |
| 661 | # CONFIG_B44 is not set | 784 | CONFIG_B44=y |
| 662 | # CONFIG_FORCEDETH is not set | 785 | CONFIG_FORCEDETH=y |
| 786 | # CONFIG_FORCEDETH_NAPI is not set | ||
| 663 | # CONFIG_DGRS is not set | 787 | # CONFIG_DGRS is not set |
| 664 | # CONFIG_EEPRO100 is not set | 788 | # CONFIG_EEPRO100 is not set |
| 665 | CONFIG_E100=y | 789 | CONFIG_E100=y |
| 666 | # CONFIG_FEALNX is not set | 790 | # CONFIG_FEALNX is not set |
| 667 | # CONFIG_NATSEMI is not set | 791 | # CONFIG_NATSEMI is not set |
| 668 | # CONFIG_NE2K_PCI is not set | 792 | # CONFIG_NE2K_PCI is not set |
| 669 | # CONFIG_8139CP is not set | 793 | CONFIG_8139CP=y |
| 670 | # CONFIG_8139TOO is not set | 794 | CONFIG_8139TOO=y |
| 795 | # CONFIG_8139TOO_PIO is not set | ||
| 796 | # CONFIG_8139TOO_TUNE_TWISTER is not set | ||
| 797 | # CONFIG_8139TOO_8129 is not set | ||
| 798 | # CONFIG_8139_OLD_RX_RESET is not set | ||
| 671 | # CONFIG_SIS900 is not set | 799 | # CONFIG_SIS900 is not set |
| 672 | # CONFIG_EPIC100 is not set | 800 | # CONFIG_EPIC100 is not set |
| 673 | # CONFIG_SUNDANCE is not set | 801 | # CONFIG_SUNDANCE is not set |
| 674 | # CONFIG_TLAN is not set | 802 | # CONFIG_TLAN is not set |
| 675 | # CONFIG_VIA_RHINE is not set | 803 | # CONFIG_VIA_RHINE is not set |
| 676 | # CONFIG_NET_POCKET is not set | ||
| 677 | 804 | ||
| 678 | # | 805 | # |
| 679 | # Ethernet (1000 Mbit) | 806 | # Ethernet (1000 Mbit) |
| 680 | # | 807 | # |
| 681 | # CONFIG_ACENIC is not set | 808 | # CONFIG_ACENIC is not set |
| 682 | # CONFIG_DL2K is not set | 809 | # CONFIG_DL2K is not set |
| 683 | # CONFIG_E1000 is not set | 810 | CONFIG_E1000=y |
| 811 | # CONFIG_E1000_NAPI is not set | ||
| 812 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 684 | # CONFIG_NS83820 is not set | 813 | # CONFIG_NS83820 is not set |
| 685 | # CONFIG_HAMACHI is not set | 814 | # CONFIG_HAMACHI is not set |
| 686 | # CONFIG_YELLOWFIN is not set | 815 | # CONFIG_YELLOWFIN is not set |
| 687 | # CONFIG_R8169 is not set | 816 | CONFIG_R8169=y |
| 817 | # CONFIG_R8169_NAPI is not set | ||
| 688 | # CONFIG_SIS190 is not set | 818 | # CONFIG_SIS190 is not set |
| 689 | # CONFIG_SKGE is not set | 819 | # CONFIG_SKGE is not set |
| 690 | # CONFIG_SKY2 is not set | 820 | CONFIG_SKY2=y |
| 691 | # CONFIG_SK98LIN is not set | 821 | # CONFIG_SK98LIN is not set |
| 692 | # CONFIG_VIA_VELOCITY is not set | 822 | # CONFIG_VIA_VELOCITY is not set |
| 693 | # CONFIG_TIGON3 is not set | 823 | CONFIG_TIGON3=y |
| 694 | # CONFIG_BNX2 is not set | 824 | CONFIG_BNX2=y |
| 825 | # CONFIG_QLA3XXX is not set | ||
| 695 | 826 | ||
| 696 | # | 827 | # |
| 697 | # Ethernet (10000 Mbit) | 828 | # Ethernet (10000 Mbit) |
| @@ -699,6 +830,7 @@ CONFIG_E100=y | |||
| 699 | # CONFIG_CHELSIO_T1 is not set | 830 | # CONFIG_CHELSIO_T1 is not set |
| 700 | # CONFIG_IXGB is not set | 831 | # CONFIG_IXGB is not set |
| 701 | # CONFIG_S2IO is not set | 832 | # CONFIG_S2IO is not set |
| 833 | # CONFIG_MYRI10GE is not set | ||
| 702 | 834 | ||
| 703 | # | 835 | # |
| 704 | # Token Ring devices | 836 | # Token Ring devices |
| @@ -716,14 +848,15 @@ CONFIG_E100=y | |||
| 716 | # CONFIG_WAN is not set | 848 | # CONFIG_WAN is not set |
| 717 | # CONFIG_FDDI is not set | 849 | # CONFIG_FDDI is not set |
| 718 | # CONFIG_HIPPI is not set | 850 | # CONFIG_HIPPI is not set |
| 719 | # CONFIG_PLIP is not set | ||
| 720 | # CONFIG_PPP is not set | 851 | # CONFIG_PPP is not set |
| 721 | # CONFIG_SLIP is not set | 852 | # CONFIG_SLIP is not set |
| 722 | # CONFIG_NET_FC is not set | 853 | # CONFIG_NET_FC is not set |
| 723 | # CONFIG_SHAPER is not set | 854 | # CONFIG_SHAPER is not set |
| 724 | # CONFIG_NETCONSOLE is not set | 855 | CONFIG_NETCONSOLE=y |
| 725 | # CONFIG_NETPOLL is not set | 856 | CONFIG_NETPOLL=y |
| 726 | # CONFIG_NET_POLL_CONTROLLER is not set | 857 | # CONFIG_NETPOLL_RX is not set |
| 858 | # CONFIG_NETPOLL_TRAP is not set | ||
| 859 | CONFIG_NET_POLL_CONTROLLER=y | ||
| 727 | 860 | ||
| 728 | # | 861 | # |
| 729 | # ISDN subsystem | 862 | # ISDN subsystem |
| @@ -745,8 +878,8 @@ CONFIG_INPUT=y | |||
| 745 | # | 878 | # |
| 746 | CONFIG_INPUT_MOUSEDEV=y | 879 | CONFIG_INPUT_MOUSEDEV=y |
| 747 | CONFIG_INPUT_MOUSEDEV_PSAUX=y | 880 | CONFIG_INPUT_MOUSEDEV_PSAUX=y |
| 748 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1280 | 881 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 |
| 749 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1024 | 882 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 |
| 750 | # CONFIG_INPUT_JOYDEV is not set | 883 | # CONFIG_INPUT_JOYDEV is not set |
| 751 | # CONFIG_INPUT_TSDEV is not set | 884 | # CONFIG_INPUT_TSDEV is not set |
| 752 | CONFIG_INPUT_EVDEV=y | 885 | CONFIG_INPUT_EVDEV=y |
| @@ -776,7 +909,6 @@ CONFIG_SERIO=y | |||
| 776 | CONFIG_SERIO_I8042=y | 909 | CONFIG_SERIO_I8042=y |
| 777 | # CONFIG_SERIO_SERPORT is not set | 910 | # CONFIG_SERIO_SERPORT is not set |
| 778 | # CONFIG_SERIO_CT82C710 is not set | 911 | # CONFIG_SERIO_CT82C710 is not set |
| 779 | # CONFIG_SERIO_PARKBD is not set | ||
| 780 | # CONFIG_SERIO_PCIPS2 is not set | 912 | # CONFIG_SERIO_PCIPS2 is not set |
| 781 | CONFIG_SERIO_LIBPS2=y | 913 | CONFIG_SERIO_LIBPS2=y |
| 782 | # CONFIG_SERIO_RAW is not set | 914 | # CONFIG_SERIO_RAW is not set |
| @@ -788,14 +920,15 @@ CONFIG_SERIO_LIBPS2=y | |||
| 788 | CONFIG_VT=y | 920 | CONFIG_VT=y |
| 789 | CONFIG_VT_CONSOLE=y | 921 | CONFIG_VT_CONSOLE=y |
| 790 | CONFIG_HW_CONSOLE=y | 922 | CONFIG_HW_CONSOLE=y |
| 923 | # CONFIG_VT_HW_CONSOLE_BINDING is not set | ||
| 791 | # CONFIG_SERIAL_NONSTANDARD is not set | 924 | # CONFIG_SERIAL_NONSTANDARD is not set |
| 792 | 925 | ||
| 793 | # | 926 | # |
| 794 | # Serial drivers | 927 | # Serial drivers |
| 795 | # | 928 | # |
| 796 | CONFIG_SERIAL_8250=y | 929 | CONFIG_SERIAL_8250=y |
| 797 | # CONFIG_SERIAL_8250_CONSOLE is not set | 930 | CONFIG_SERIAL_8250_CONSOLE=y |
| 798 | # CONFIG_SERIAL_8250_ACPI is not set | 931 | CONFIG_SERIAL_8250_PCI=y |
| 799 | CONFIG_SERIAL_8250_NR_UARTS=4 | 932 | CONFIG_SERIAL_8250_NR_UARTS=4 |
| 800 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | 933 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 |
| 801 | # CONFIG_SERIAL_8250_EXTENDED is not set | 934 | # CONFIG_SERIAL_8250_EXTENDED is not set |
| @@ -804,14 +937,11 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | |||
| 804 | # Non-8250 serial port support | 937 | # Non-8250 serial port support |
| 805 | # | 938 | # |
| 806 | CONFIG_SERIAL_CORE=y | 939 | CONFIG_SERIAL_CORE=y |
| 940 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
| 807 | # CONFIG_SERIAL_JSM is not set | 941 | # CONFIG_SERIAL_JSM is not set |
| 808 | CONFIG_UNIX98_PTYS=y | 942 | CONFIG_UNIX98_PTYS=y |
| 809 | CONFIG_LEGACY_PTYS=y | 943 | CONFIG_LEGACY_PTYS=y |
| 810 | CONFIG_LEGACY_PTY_COUNT=256 | 944 | CONFIG_LEGACY_PTY_COUNT=256 |
| 811 | CONFIG_PRINTER=y | ||
| 812 | # CONFIG_LP_CONSOLE is not set | ||
| 813 | # CONFIG_PPDEV is not set | ||
| 814 | # CONFIG_TIPAR is not set | ||
| 815 | 945 | ||
| 816 | # | 946 | # |
| 817 | # IPMI | 947 | # IPMI |
| @@ -822,8 +952,12 @@ CONFIG_PRINTER=y | |||
| 822 | # Watchdog Cards | 952 | # Watchdog Cards |
| 823 | # | 953 | # |
| 824 | # CONFIG_WATCHDOG is not set | 954 | # CONFIG_WATCHDOG is not set |
| 825 | # CONFIG_HW_RANDOM is not set | 955 | CONFIG_HW_RANDOM=y |
| 826 | CONFIG_NVRAM=y | 956 | CONFIG_HW_RANDOM_INTEL=y |
| 957 | CONFIG_HW_RANDOM_AMD=y | ||
| 958 | CONFIG_HW_RANDOM_GEODE=y | ||
| 959 | CONFIG_HW_RANDOM_VIA=y | ||
| 960 | # CONFIG_NVRAM is not set | ||
| 827 | CONFIG_RTC=y | 961 | CONFIG_RTC=y |
| 828 | # CONFIG_DTLK is not set | 962 | # CONFIG_DTLK is not set |
| 829 | # CONFIG_R3964 is not set | 963 | # CONFIG_R3964 is not set |
| @@ -833,31 +967,28 @@ CONFIG_RTC=y | |||
| 833 | # | 967 | # |
| 834 | # Ftape, the floppy tape device driver | 968 | # Ftape, the floppy tape device driver |
| 835 | # | 969 | # |
| 836 | # CONFIG_FTAPE is not set | ||
| 837 | CONFIG_AGP=y | 970 | CONFIG_AGP=y |
| 838 | # CONFIG_AGP_ALI is not set | 971 | # CONFIG_AGP_ALI is not set |
| 839 | # CONFIG_AGP_ATI is not set | 972 | # CONFIG_AGP_ATI is not set |
| 840 | # CONFIG_AGP_AMD is not set | 973 | # CONFIG_AGP_AMD is not set |
| 841 | # CONFIG_AGP_AMD64 is not set | 974 | CONFIG_AGP_AMD64=y |
| 842 | # CONFIG_AGP_INTEL is not set | 975 | CONFIG_AGP_INTEL=y |
| 843 | # CONFIG_AGP_NVIDIA is not set | 976 | # CONFIG_AGP_NVIDIA is not set |
| 844 | # CONFIG_AGP_SIS is not set | 977 | # CONFIG_AGP_SIS is not set |
| 845 | # CONFIG_AGP_SWORKS is not set | 978 | # CONFIG_AGP_SWORKS is not set |
| 846 | CONFIG_AGP_VIA=y | 979 | # CONFIG_AGP_VIA is not set |
| 847 | # CONFIG_AGP_EFFICEON is not set | 980 | # CONFIG_AGP_EFFICEON is not set |
| 848 | CONFIG_DRM=y | 981 | # CONFIG_DRM is not set |
| 849 | # CONFIG_DRM_TDFX is not set | ||
| 850 | # CONFIG_DRM_R128 is not set | ||
| 851 | CONFIG_DRM_RADEON=y | ||
| 852 | # CONFIG_DRM_MGA is not set | ||
| 853 | # CONFIG_DRM_SIS is not set | ||
| 854 | # CONFIG_DRM_VIA is not set | ||
| 855 | # CONFIG_DRM_SAVAGE is not set | ||
| 856 | # CONFIG_MWAVE is not set | 982 | # CONFIG_MWAVE is not set |
| 983 | # CONFIG_PC8736x_GPIO is not set | ||
| 984 | # CONFIG_NSC_GPIO is not set | ||
| 857 | # CONFIG_CS5535_GPIO is not set | 985 | # CONFIG_CS5535_GPIO is not set |
| 858 | # CONFIG_RAW_DRIVER is not set | 986 | CONFIG_RAW_DRIVER=y |
| 859 | # CONFIG_HPET is not set | 987 | CONFIG_MAX_RAW_DEVS=256 |
| 860 | # CONFIG_HANGCHECK_TIMER is not set | 988 | CONFIG_HPET=y |
| 989 | # CONFIG_HPET_RTC_IRQ is not set | ||
| 990 | CONFIG_HPET_MMAP=y | ||
| 991 | CONFIG_HANGCHECK_TIMER=y | ||
| 861 | 992 | ||
| 862 | # | 993 | # |
| 863 | # TPM devices | 994 | # TPM devices |
| @@ -868,59 +999,7 @@ CONFIG_DRM_RADEON=y | |||
| 868 | # | 999 | # |
| 869 | # I2C support | 1000 | # I2C support |
| 870 | # | 1001 | # |
| 871 | CONFIG_I2C=y | 1002 | # CONFIG_I2C is not set |
| 872 | CONFIG_I2C_CHARDEV=y | ||
| 873 | |||
| 874 | # | ||
| 875 | # I2C Algorithms | ||
| 876 | # | ||
| 877 | CONFIG_I2C_ALGOBIT=y | ||
| 878 | # CONFIG_I2C_ALGOPCF is not set | ||
| 879 | # CONFIG_I2C_ALGOPCA is not set | ||
| 880 | |||
| 881 | # | ||
| 882 | # I2C Hardware Bus support | ||
| 883 | # | ||
| 884 | # CONFIG_I2C_ALI1535 is not set | ||
| 885 | # CONFIG_I2C_ALI1563 is not set | ||
| 886 | # CONFIG_I2C_ALI15X3 is not set | ||
| 887 | # CONFIG_I2C_AMD756 is not set | ||
| 888 | # CONFIG_I2C_AMD8111 is not set | ||
| 889 | # CONFIG_I2C_I801 is not set | ||
| 890 | # CONFIG_I2C_I810 is not set | ||
| 891 | # CONFIG_I2C_PIIX4 is not set | ||
| 892 | CONFIG_I2C_ISA=y | ||
| 893 | # CONFIG_I2C_NFORCE2 is not set | ||
| 894 | # CONFIG_I2C_PARPORT is not set | ||
| 895 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
| 896 | # CONFIG_I2C_PROSAVAGE is not set | ||
| 897 | # CONFIG_I2C_SAVAGE4 is not set | ||
| 898 | # CONFIG_SCx200_ACB is not set | ||
| 899 | # CONFIG_I2C_SIS5595 is not set | ||
| 900 | # CONFIG_I2C_SIS630 is not set | ||
| 901 | # CONFIG_I2C_SIS96X is not set | ||
| 902 | # CONFIG_I2C_STUB is not set | ||
| 903 | # CONFIG_I2C_VIA is not set | ||
| 904 | CONFIG_I2C_VIAPRO=y | ||
| 905 | # CONFIG_I2C_VOODOO3 is not set | ||
| 906 | # CONFIG_I2C_PCA_ISA is not set | ||
| 907 | |||
| 908 | # | ||
| 909 | # Miscellaneous I2C Chip support | ||
| 910 | # | ||
| 911 | # CONFIG_SENSORS_DS1337 is not set | ||
| 912 | # CONFIG_SENSORS_DS1374 is not set | ||
| 913 | # CONFIG_SENSORS_EEPROM is not set | ||
| 914 | # CONFIG_SENSORS_PCF8574 is not set | ||
| 915 | # CONFIG_SENSORS_PCA9539 is not set | ||
| 916 | # CONFIG_SENSORS_PCF8591 is not set | ||
| 917 | # CONFIG_SENSORS_RTC8564 is not set | ||
| 918 | # CONFIG_SENSORS_MAX6875 is not set | ||
| 919 | # CONFIG_RTC_X1205_I2C is not set | ||
| 920 | # CONFIG_I2C_DEBUG_CORE is not set | ||
| 921 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
| 922 | # CONFIG_I2C_DEBUG_BUS is not set | ||
| 923 | # CONFIG_I2C_DEBUG_CHIP is not set | ||
| 924 | 1003 | ||
| 925 | # | 1004 | # |
| 926 | # SPI support | 1005 | # SPI support |
| @@ -931,51 +1010,12 @@ CONFIG_I2C_VIAPRO=y | |||
| 931 | # | 1010 | # |
| 932 | # Dallas's 1-wire bus | 1011 | # Dallas's 1-wire bus |
| 933 | # | 1012 | # |
| 934 | # CONFIG_W1 is not set | ||
| 935 | 1013 | ||
| 936 | # | 1014 | # |
| 937 | # Hardware Monitoring support | 1015 | # Hardware Monitoring support |
| 938 | # | 1016 | # |
| 939 | CONFIG_HWMON=y | 1017 | # CONFIG_HWMON is not set |
| 940 | CONFIG_HWMON_VID=y | 1018 | # CONFIG_HWMON_VID is not set |
| 941 | # CONFIG_SENSORS_ADM1021 is not set | ||
| 942 | # CONFIG_SENSORS_ADM1025 is not set | ||
| 943 | # CONFIG_SENSORS_ADM1026 is not set | ||
| 944 | # CONFIG_SENSORS_ADM1031 is not set | ||
| 945 | # CONFIG_SENSORS_ADM9240 is not set | ||
| 946 | # CONFIG_SENSORS_ASB100 is not set | ||
| 947 | # CONFIG_SENSORS_ATXP1 is not set | ||
| 948 | # CONFIG_SENSORS_DS1621 is not set | ||
| 949 | # CONFIG_SENSORS_F71805F is not set | ||
| 950 | # CONFIG_SENSORS_FSCHER is not set | ||
| 951 | # CONFIG_SENSORS_FSCPOS is not set | ||
| 952 | # CONFIG_SENSORS_GL518SM is not set | ||
| 953 | # CONFIG_SENSORS_GL520SM is not set | ||
| 954 | CONFIG_SENSORS_IT87=y | ||
| 955 | # CONFIG_SENSORS_LM63 is not set | ||
| 956 | # CONFIG_SENSORS_LM75 is not set | ||
| 957 | # CONFIG_SENSORS_LM77 is not set | ||
| 958 | # CONFIG_SENSORS_LM78 is not set | ||
| 959 | # CONFIG_SENSORS_LM80 is not set | ||
| 960 | # CONFIG_SENSORS_LM83 is not set | ||
| 961 | # CONFIG_SENSORS_LM85 is not set | ||
| 962 | # CONFIG_SENSORS_LM87 is not set | ||
| 963 | # CONFIG_SENSORS_LM90 is not set | ||
| 964 | # CONFIG_SENSORS_LM92 is not set | ||
| 965 | # CONFIG_SENSORS_MAX1619 is not set | ||
| 966 | # CONFIG_SENSORS_PC87360 is not set | ||
| 967 | # CONFIG_SENSORS_SIS5595 is not set | ||
| 968 | # CONFIG_SENSORS_SMSC47M1 is not set | ||
| 969 | # CONFIG_SENSORS_SMSC47B397 is not set | ||
| 970 | # CONFIG_SENSORS_VIA686A is not set | ||
| 971 | # CONFIG_SENSORS_VT8231 is not set | ||
| 972 | # CONFIG_SENSORS_W83781D is not set | ||
| 973 | # CONFIG_SENSORS_W83792D is not set | ||
| 974 | # CONFIG_SENSORS_W83L785TS is not set | ||
| 975 | # CONFIG_SENSORS_W83627HF is not set | ||
| 976 | # CONFIG_SENSORS_W83627EHF is not set | ||
| 977 | # CONFIG_SENSORS_HDAPS is not set | ||
| 978 | # CONFIG_HWMON_DEBUG_CHIP is not set | ||
| 979 | 1019 | ||
| 980 | # | 1020 | # |
| 981 | # Misc devices | 1021 | # Misc devices |
| @@ -983,117 +1023,31 @@ CONFIG_SENSORS_IT87=y | |||
| 983 | # CONFIG_IBM_ASM is not set | 1023 | # CONFIG_IBM_ASM is not set |
| 984 | 1024 | ||
| 985 | # | 1025 | # |
| 986 | # Multimedia Capabilities Port drivers | ||
| 987 | # | ||
| 988 | |||
| 989 | # | ||
| 990 | # Multimedia devices | 1026 | # Multimedia devices |
| 991 | # | 1027 | # |
| 992 | CONFIG_VIDEO_DEV=y | 1028 | # CONFIG_VIDEO_DEV is not set |
| 993 | 1029 | CONFIG_VIDEO_V4L2=y | |
| 994 | # | ||
| 995 | # Video For Linux | ||
| 996 | # | ||
| 997 | |||
| 998 | # | ||
| 999 | # Video Adapters | ||
| 1000 | # | ||
| 1001 | # CONFIG_VIDEO_ADV_DEBUG is not set | ||
| 1002 | # CONFIG_VIDEO_BT848 is not set | ||
| 1003 | # CONFIG_VIDEO_BWQCAM is not set | ||
| 1004 | # CONFIG_VIDEO_CQCAM is not set | ||
| 1005 | # CONFIG_VIDEO_W9966 is not set | ||
| 1006 | # CONFIG_VIDEO_CPIA is not set | ||
| 1007 | # CONFIG_VIDEO_SAA5246A is not set | ||
| 1008 | # CONFIG_VIDEO_SAA5249 is not set | ||
| 1009 | # CONFIG_TUNER_3036 is not set | ||
| 1010 | # CONFIG_VIDEO_STRADIS is not set | ||
| 1011 | # CONFIG_VIDEO_ZORAN is not set | ||
| 1012 | CONFIG_VIDEO_SAA7134=y | ||
| 1013 | # CONFIG_VIDEO_SAA7134_ALSA is not set | ||
| 1014 | # CONFIG_VIDEO_MXB is not set | ||
| 1015 | # CONFIG_VIDEO_DPC is not set | ||
| 1016 | # CONFIG_VIDEO_HEXIUM_ORION is not set | ||
| 1017 | # CONFIG_VIDEO_HEXIUM_GEMINI is not set | ||
| 1018 | # CONFIG_VIDEO_CX88 is not set | ||
| 1019 | # CONFIG_VIDEO_EM28XX is not set | ||
| 1020 | # CONFIG_VIDEO_OVCAMCHIP is not set | ||
| 1021 | # CONFIG_VIDEO_AUDIO_DECODER is not set | ||
| 1022 | # CONFIG_VIDEO_DECODER is not set | ||
| 1023 | |||
| 1024 | # | ||
| 1025 | # Radio Adapters | ||
| 1026 | # | ||
| 1027 | # CONFIG_RADIO_GEMTEK_PCI is not set | ||
| 1028 | # CONFIG_RADIO_MAXIRADIO is not set | ||
| 1029 | # CONFIG_RADIO_MAESTRO is not set | ||
| 1030 | 1030 | ||
| 1031 | # | 1031 | # |
| 1032 | # Digital Video Broadcasting Devices | 1032 | # Digital Video Broadcasting Devices |
| 1033 | # | 1033 | # |
| 1034 | # CONFIG_DVB is not set | 1034 | # CONFIG_DVB is not set |
| 1035 | CONFIG_VIDEO_TUNER=y | 1035 | # CONFIG_USB_DABUSB is not set |
| 1036 | CONFIG_VIDEO_BUF=y | ||
| 1037 | CONFIG_VIDEO_IR=y | ||
| 1038 | 1036 | ||
| 1039 | # | 1037 | # |
| 1040 | # Graphics support | 1038 | # Graphics support |
| 1041 | # | 1039 | # |
| 1042 | CONFIG_FB=y | 1040 | CONFIG_FIRMWARE_EDID=y |
| 1043 | CONFIG_FB_CFB_FILLRECT=y | 1041 | # CONFIG_FB is not set |
| 1044 | CONFIG_FB_CFB_COPYAREA=y | ||
| 1045 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
| 1046 | # CONFIG_FB_MACMODES is not set | ||
| 1047 | CONFIG_FB_MODE_HELPERS=y | ||
| 1048 | # CONFIG_FB_TILEBLITTING is not set | ||
| 1049 | # CONFIG_FB_CIRRUS is not set | ||
| 1050 | # CONFIG_FB_PM2 is not set | ||
| 1051 | # CONFIG_FB_CYBER2000 is not set | ||
| 1052 | # CONFIG_FB_ARC is not set | ||
| 1053 | # CONFIG_FB_ASILIANT is not set | ||
| 1054 | # CONFIG_FB_IMSTT is not set | ||
| 1055 | # CONFIG_FB_VGA16 is not set | ||
| 1056 | # CONFIG_FB_VESA is not set | ||
| 1057 | CONFIG_VIDEO_SELECT=y | ||
| 1058 | # CONFIG_FB_HGA is not set | ||
| 1059 | # CONFIG_FB_S1D13XXX is not set | ||
| 1060 | # CONFIG_FB_NVIDIA is not set | ||
| 1061 | # CONFIG_FB_RIVA is not set | ||
| 1062 | # CONFIG_FB_I810 is not set | ||
| 1063 | # CONFIG_FB_INTEL is not set | ||
| 1064 | # CONFIG_FB_MATROX is not set | ||
| 1065 | # CONFIG_FB_RADEON_OLD is not set | ||
| 1066 | CONFIG_FB_RADEON=y | ||
| 1067 | CONFIG_FB_RADEON_I2C=y | ||
| 1068 | # CONFIG_FB_RADEON_DEBUG is not set | ||
| 1069 | # CONFIG_FB_ATY128 is not set | ||
| 1070 | # CONFIG_FB_ATY is not set | ||
| 1071 | # CONFIG_FB_SAVAGE is not set | ||
| 1072 | # CONFIG_FB_SIS is not set | ||
| 1073 | # CONFIG_FB_NEOMAGIC is not set | ||
| 1074 | # CONFIG_FB_KYRO is not set | ||
| 1075 | # CONFIG_FB_3DFX is not set | ||
| 1076 | # CONFIG_FB_VOODOO1 is not set | ||
| 1077 | # CONFIG_FB_CYBLA is not set | ||
| 1078 | # CONFIG_FB_TRIDENT is not set | ||
| 1079 | # CONFIG_FB_GEODE is not set | ||
| 1080 | # CONFIG_FB_VIRTUAL is not set | ||
| 1081 | 1042 | ||
| 1082 | # | 1043 | # |
| 1083 | # Console display driver support | 1044 | # Console display driver support |
| 1084 | # | 1045 | # |
| 1085 | CONFIG_VGA_CONSOLE=y | 1046 | CONFIG_VGA_CONSOLE=y |
| 1047 | CONFIG_VGACON_SOFT_SCROLLBACK=y | ||
| 1048 | CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128 | ||
| 1049 | CONFIG_VIDEO_SELECT=y | ||
| 1086 | CONFIG_DUMMY_CONSOLE=y | 1050 | CONFIG_DUMMY_CONSOLE=y |
| 1087 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
| 1088 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
| 1089 | # CONFIG_FONTS is not set | ||
| 1090 | CONFIG_FONT_8x8=y | ||
| 1091 | CONFIG_FONT_8x16=y | ||
| 1092 | |||
| 1093 | # | ||
| 1094 | # Logo configuration | ||
| 1095 | # | ||
| 1096 | # CONFIG_LOGO is not set | ||
| 1097 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | 1051 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set |
| 1098 | 1052 | ||
| 1099 | # | 1053 | # |
| @@ -1104,97 +1058,30 @@ CONFIG_SOUND=y | |||
| 1104 | # | 1058 | # |
| 1105 | # Advanced Linux Sound Architecture | 1059 | # Advanced Linux Sound Architecture |
| 1106 | # | 1060 | # |
| 1107 | CONFIG_SND=y | 1061 | # CONFIG_SND is not set |
| 1108 | CONFIG_SND_TIMER=y | ||
| 1109 | CONFIG_SND_PCM=y | ||
| 1110 | CONFIG_SND_RAWMIDI=y | ||
| 1111 | CONFIG_SND_SEQUENCER=y | ||
| 1112 | # CONFIG_SND_SEQ_DUMMY is not set | ||
| 1113 | # CONFIG_SND_MIXER_OSS is not set | ||
| 1114 | # CONFIG_SND_PCM_OSS is not set | ||
| 1115 | # CONFIG_SND_SEQUENCER_OSS is not set | ||
| 1116 | CONFIG_SND_RTCTIMER=y | ||
| 1117 | CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y | ||
| 1118 | # CONFIG_SND_DYNAMIC_MINORS is not set | ||
| 1119 | # CONFIG_SND_SUPPORT_OLD_API is not set | ||
| 1120 | # CONFIG_SND_VERBOSE_PRINTK is not set | ||
| 1121 | # CONFIG_SND_DEBUG is not set | ||
| 1122 | |||
| 1123 | # | ||
| 1124 | # Generic devices | ||
| 1125 | # | ||
| 1126 | CONFIG_SND_MPU401_UART=y | ||
| 1127 | CONFIG_SND_AC97_CODEC=y | ||
| 1128 | CONFIG_SND_AC97_BUS=y | ||
| 1129 | # CONFIG_SND_DUMMY is not set | ||
| 1130 | # CONFIG_SND_VIRMIDI is not set | ||
| 1131 | # CONFIG_SND_MTPAV is not set | ||
| 1132 | # CONFIG_SND_SERIAL_U16550 is not set | ||
| 1133 | # CONFIG_SND_MPU401 is not set | ||
| 1134 | |||
| 1135 | # | ||
| 1136 | # PCI devices | ||
| 1137 | # | ||
| 1138 | # CONFIG_SND_AD1889 is not set | ||
| 1139 | # CONFIG_SND_ALS4000 is not set | ||
| 1140 | # CONFIG_SND_ALI5451 is not set | ||
| 1141 | # CONFIG_SND_ATIIXP is not set | ||
| 1142 | # CONFIG_SND_ATIIXP_MODEM is not set | ||
| 1143 | # CONFIG_SND_AU8810 is not set | ||
| 1144 | # CONFIG_SND_AU8820 is not set | ||
| 1145 | # CONFIG_SND_AU8830 is not set | ||
| 1146 | # CONFIG_SND_AZT3328 is not set | ||
| 1147 | # CONFIG_SND_BT87X is not set | ||
| 1148 | # CONFIG_SND_CA0106 is not set | ||
| 1149 | # CONFIG_SND_CMIPCI is not set | ||
| 1150 | # CONFIG_SND_CS4281 is not set | ||
| 1151 | # CONFIG_SND_CS46XX is not set | ||
| 1152 | # CONFIG_SND_CS5535AUDIO is not set | ||
| 1153 | # CONFIG_SND_EMU10K1 is not set | ||
| 1154 | # CONFIG_SND_EMU10K1X is not set | ||
| 1155 | # CONFIG_SND_ENS1370 is not set | ||
| 1156 | # CONFIG_SND_ENS1371 is not set | ||
| 1157 | # CONFIG_SND_ES1938 is not set | ||
| 1158 | # CONFIG_SND_ES1968 is not set | ||
| 1159 | # CONFIG_SND_FM801 is not set | ||
| 1160 | # CONFIG_SND_HDA_INTEL is not set | ||
| 1161 | # CONFIG_SND_HDSP is not set | ||
| 1162 | # CONFIG_SND_HDSPM is not set | ||
| 1163 | # CONFIG_SND_ICE1712 is not set | ||
| 1164 | # CONFIG_SND_ICE1724 is not set | ||
| 1165 | # CONFIG_SND_INTEL8X0 is not set | ||
| 1166 | # CONFIG_SND_INTEL8X0M is not set | ||
| 1167 | # CONFIG_SND_KORG1212 is not set | ||
| 1168 | # CONFIG_SND_MAESTRO3 is not set | ||
| 1169 | # CONFIG_SND_MIXART is not set | ||
| 1170 | # CONFIG_SND_NM256 is not set | ||
| 1171 | # CONFIG_SND_PCXHR is not set | ||
| 1172 | # CONFIG_SND_RME32 is not set | ||
| 1173 | # CONFIG_SND_RME96 is not set | ||
| 1174 | # CONFIG_SND_RME9652 is not set | ||
| 1175 | # CONFIG_SND_SONICVIBES is not set | ||
| 1176 | # CONFIG_SND_TRIDENT is not set | ||
| 1177 | CONFIG_SND_VIA82XX=y | ||
| 1178 | # CONFIG_SND_VIA82XX_MODEM is not set | ||
| 1179 | # CONFIG_SND_VX222 is not set | ||
| 1180 | # CONFIG_SND_YMFPCI is not set | ||
| 1181 | |||
| 1182 | # | ||
| 1183 | # USB devices | ||
| 1184 | # | ||
| 1185 | # CONFIG_SND_USB_AUDIO is not set | ||
| 1186 | # CONFIG_SND_USB_USX2Y is not set | ||
| 1187 | 1062 | ||
| 1188 | # | 1063 | # |
| 1189 | # Open Sound System | 1064 | # Open Sound System |
| 1190 | # | 1065 | # |
| 1191 | # CONFIG_SOUND_PRIME is not set | 1066 | CONFIG_SOUND_PRIME=y |
| 1067 | CONFIG_OSS_OBSOLETE_DRIVER=y | ||
| 1068 | # CONFIG_SOUND_BT878 is not set | ||
| 1069 | # CONFIG_SOUND_EMU10K1 is not set | ||
| 1070 | # CONFIG_SOUND_FUSION is not set | ||
| 1071 | # CONFIG_SOUND_ES1371 is not set | ||
| 1072 | CONFIG_SOUND_ICH=y | ||
| 1073 | # CONFIG_SOUND_TRIDENT is not set | ||
| 1074 | # CONFIG_SOUND_MSNDCLAS is not set | ||
| 1075 | # CONFIG_SOUND_MSNDPIN is not set | ||
| 1076 | # CONFIG_SOUND_VIA82CXXX is not set | ||
| 1077 | # CONFIG_SOUND_OSS is not set | ||
| 1192 | 1078 | ||
| 1193 | # | 1079 | # |
| 1194 | # USB support | 1080 | # USB support |
| 1195 | # | 1081 | # |
| 1196 | CONFIG_USB_ARCH_HAS_HCD=y | 1082 | CONFIG_USB_ARCH_HAS_HCD=y |
| 1197 | CONFIG_USB_ARCH_HAS_OHCI=y | 1083 | CONFIG_USB_ARCH_HAS_OHCI=y |
| 1084 | CONFIG_USB_ARCH_HAS_EHCI=y | ||
| 1198 | CONFIG_USB=y | 1085 | CONFIG_USB=y |
| 1199 | # CONFIG_USB_DEBUG is not set | 1086 | # CONFIG_USB_DEBUG is not set |
| 1200 | 1087 | ||
| @@ -1213,17 +1100,19 @@ CONFIG_USB_DEVICEFS=y | |||
| 1213 | CONFIG_USB_EHCI_HCD=y | 1100 | CONFIG_USB_EHCI_HCD=y |
| 1214 | # CONFIG_USB_EHCI_SPLIT_ISO is not set | 1101 | # CONFIG_USB_EHCI_SPLIT_ISO is not set |
| 1215 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set | 1102 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set |
| 1103 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | ||
| 1216 | # CONFIG_USB_ISP116X_HCD is not set | 1104 | # CONFIG_USB_ISP116X_HCD is not set |
| 1217 | # CONFIG_USB_OHCI_HCD is not set | 1105 | CONFIG_USB_OHCI_HCD=y |
| 1106 | # CONFIG_USB_OHCI_BIG_ENDIAN is not set | ||
| 1107 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | ||
| 1218 | CONFIG_USB_UHCI_HCD=y | 1108 | CONFIG_USB_UHCI_HCD=y |
| 1219 | # CONFIG_USB_SL811_HCD is not set | 1109 | # CONFIG_USB_SL811_HCD is not set |
| 1220 | 1110 | ||
| 1221 | # | 1111 | # |
| 1222 | # USB Device Class drivers | 1112 | # USB Device Class drivers |
| 1223 | # | 1113 | # |
| 1224 | # CONFIG_OBSOLETE_OSS_USB_DRIVER is not set | ||
| 1225 | # CONFIG_USB_ACM is not set | 1114 | # CONFIG_USB_ACM is not set |
| 1226 | # CONFIG_USB_PRINTER is not set | 1115 | CONFIG_USB_PRINTER=y |
| 1227 | 1116 | ||
| 1228 | # | 1117 | # |
| 1229 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | 1118 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' |
| @@ -1248,21 +1137,17 @@ CONFIG_USB_STORAGE=y | |||
| 1248 | # | 1137 | # |
| 1249 | # USB Input Devices | 1138 | # USB Input Devices |
| 1250 | # | 1139 | # |
| 1251 | # CONFIG_USB_HID is not set | 1140 | CONFIG_USB_HID=y |
| 1252 | 1141 | CONFIG_USB_HIDINPUT=y | |
| 1253 | # | 1142 | # CONFIG_USB_HIDINPUT_POWERBOOK is not set |
| 1254 | # USB HID Boot Protocol drivers | 1143 | # CONFIG_HID_FF is not set |
| 1255 | # | 1144 | # CONFIG_USB_HIDDEV is not set |
| 1256 | # CONFIG_USB_KBD is not set | ||
| 1257 | # CONFIG_USB_MOUSE is not set | ||
| 1258 | # CONFIG_USB_AIPTEK is not set | 1145 | # CONFIG_USB_AIPTEK is not set |
| 1259 | # CONFIG_USB_WACOM is not set | 1146 | # CONFIG_USB_WACOM is not set |
| 1260 | # CONFIG_USB_ACECAD is not set | 1147 | # CONFIG_USB_ACECAD is not set |
| 1261 | # CONFIG_USB_KBTAB is not set | 1148 | # CONFIG_USB_KBTAB is not set |
| 1262 | # CONFIG_USB_POWERMATE is not set | 1149 | # CONFIG_USB_POWERMATE is not set |
| 1263 | # CONFIG_USB_MTOUCH is not set | 1150 | # CONFIG_USB_TOUCHSCREEN is not set |
| 1264 | # CONFIG_USB_ITMTOUCH is not set | ||
| 1265 | # CONFIG_USB_EGALAX is not set | ||
| 1266 | # CONFIG_USB_YEALINK is not set | 1151 | # CONFIG_USB_YEALINK is not set |
| 1267 | # CONFIG_USB_XPAD is not set | 1152 | # CONFIG_USB_XPAD is not set |
| 1268 | # CONFIG_USB_ATI_REMOTE is not set | 1153 | # CONFIG_USB_ATI_REMOTE is not set |
| @@ -1277,21 +1162,6 @@ CONFIG_USB_STORAGE=y | |||
| 1277 | # CONFIG_USB_MICROTEK is not set | 1162 | # CONFIG_USB_MICROTEK is not set |
| 1278 | 1163 | ||
| 1279 | # | 1164 | # |
| 1280 | # USB Multimedia devices | ||
| 1281 | # | ||
| 1282 | # CONFIG_USB_DABUSB is not set | ||
| 1283 | # CONFIG_USB_VICAM is not set | ||
| 1284 | # CONFIG_USB_DSBR is not set | ||
| 1285 | # CONFIG_USB_ET61X251 is not set | ||
| 1286 | # CONFIG_USB_IBMCAM is not set | ||
| 1287 | # CONFIG_USB_KONICAWC is not set | ||
| 1288 | # CONFIG_USB_OV511 is not set | ||
| 1289 | # CONFIG_USB_SE401 is not set | ||
| 1290 | # CONFIG_USB_SN9C102 is not set | ||
| 1291 | # CONFIG_USB_STV680 is not set | ||
| 1292 | # CONFIG_USB_PWC is not set | ||
| 1293 | |||
| 1294 | # | ||
| 1295 | # USB Network Adapters | 1165 | # USB Network Adapters |
| 1296 | # | 1166 | # |
| 1297 | # CONFIG_USB_CATC is not set | 1167 | # CONFIG_USB_CATC is not set |
| @@ -1299,12 +1169,11 @@ CONFIG_USB_STORAGE=y | |||
| 1299 | # CONFIG_USB_PEGASUS is not set | 1169 | # CONFIG_USB_PEGASUS is not set |
| 1300 | # CONFIG_USB_RTL8150 is not set | 1170 | # CONFIG_USB_RTL8150 is not set |
| 1301 | # CONFIG_USB_USBNET is not set | 1171 | # CONFIG_USB_USBNET is not set |
| 1302 | # CONFIG_USB_MON is not set | 1172 | CONFIG_USB_MON=y |
| 1303 | 1173 | ||
| 1304 | # | 1174 | # |
| 1305 | # USB port drivers | 1175 | # USB port drivers |
| 1306 | # | 1176 | # |
| 1307 | # CONFIG_USB_USS720 is not set | ||
| 1308 | 1177 | ||
| 1309 | # | 1178 | # |
| 1310 | # USB Serial Converter support | 1179 | # USB Serial Converter support |
| @@ -1321,10 +1190,12 @@ CONFIG_USB_STORAGE=y | |||
| 1321 | # CONFIG_USB_LEGOTOWER is not set | 1190 | # CONFIG_USB_LEGOTOWER is not set |
| 1322 | # CONFIG_USB_LCD is not set | 1191 | # CONFIG_USB_LCD is not set |
| 1323 | # CONFIG_USB_LED is not set | 1192 | # CONFIG_USB_LED is not set |
| 1193 | # CONFIG_USB_CYPRESS_CY7C63 is not set | ||
| 1324 | # CONFIG_USB_CYTHERM is not set | 1194 | # CONFIG_USB_CYTHERM is not set |
| 1325 | # CONFIG_USB_PHIDGETKIT is not set | 1195 | # CONFIG_USB_PHIDGETKIT is not set |
| 1326 | # CONFIG_USB_PHIDGETSERVO is not set | 1196 | # CONFIG_USB_PHIDGETSERVO is not set |
| 1327 | # CONFIG_USB_IDMOUSE is not set | 1197 | # CONFIG_USB_IDMOUSE is not set |
| 1198 | # CONFIG_USB_APPLEDISPLAY is not set | ||
| 1328 | # CONFIG_USB_SISUSBVGA is not set | 1199 | # CONFIG_USB_SISUSBVGA is not set |
| 1329 | # CONFIG_USB_LD is not set | 1200 | # CONFIG_USB_LD is not set |
| 1330 | # CONFIG_USB_TEST is not set | 1201 | # CONFIG_USB_TEST is not set |
| @@ -1344,56 +1215,96 @@ CONFIG_USB_STORAGE=y | |||
| 1344 | # CONFIG_MMC is not set | 1215 | # CONFIG_MMC is not set |
| 1345 | 1216 | ||
| 1346 | # | 1217 | # |
| 1218 | # LED devices | ||
| 1219 | # | ||
| 1220 | # CONFIG_NEW_LEDS is not set | ||
| 1221 | |||
| 1222 | # | ||
| 1223 | # LED drivers | ||
| 1224 | # | ||
| 1225 | |||
| 1226 | # | ||
| 1227 | # LED Triggers | ||
| 1228 | # | ||
| 1229 | |||
| 1230 | # | ||
| 1347 | # InfiniBand support | 1231 | # InfiniBand support |
| 1348 | # | 1232 | # |
| 1349 | # CONFIG_INFINIBAND is not set | 1233 | # CONFIG_INFINIBAND is not set |
| 1350 | 1234 | ||
| 1351 | # | 1235 | # |
| 1352 | # SN Devices | 1236 | # EDAC - error detection and reporting (RAS) (EXPERIMENTAL) |
| 1237 | # | ||
| 1238 | # CONFIG_EDAC is not set | ||
| 1239 | |||
| 1240 | # | ||
| 1241 | # Real Time Clock | ||
| 1353 | # | 1242 | # |
| 1243 | # CONFIG_RTC_CLASS is not set | ||
| 1354 | 1244 | ||
| 1355 | # | 1245 | # |
| 1356 | # EDAC - error detection and reporting (RAS) | 1246 | # DMA Engine support |
| 1247 | # | ||
| 1248 | # CONFIG_DMA_ENGINE is not set | ||
| 1249 | |||
| 1250 | # | ||
| 1251 | # DMA Clients | ||
| 1252 | # | ||
| 1253 | |||
| 1254 | # | ||
| 1255 | # DMA Devices | ||
| 1357 | # | 1256 | # |
| 1358 | # CONFIG_EDAC is not set | ||
| 1359 | 1257 | ||
| 1360 | # | 1258 | # |
| 1361 | # File systems | 1259 | # File systems |
| 1362 | # | 1260 | # |
| 1363 | CONFIG_EXT2_FS=y | 1261 | CONFIG_EXT2_FS=y |
| 1364 | # CONFIG_EXT2_FS_XATTR is not set | 1262 | CONFIG_EXT2_FS_XATTR=y |
| 1263 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
| 1264 | # CONFIG_EXT2_FS_SECURITY is not set | ||
| 1365 | # CONFIG_EXT2_FS_XIP is not set | 1265 | # CONFIG_EXT2_FS_XIP is not set |
| 1366 | # CONFIG_EXT3_FS is not set | 1266 | CONFIG_EXT3_FS=y |
| 1367 | # CONFIG_REISERFS_FS is not set | 1267 | CONFIG_EXT3_FS_XATTR=y |
| 1268 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
| 1269 | # CONFIG_EXT3_FS_SECURITY is not set | ||
| 1270 | CONFIG_JBD=y | ||
| 1271 | # CONFIG_JBD_DEBUG is not set | ||
| 1272 | CONFIG_FS_MBCACHE=y | ||
| 1273 | CONFIG_REISERFS_FS=y | ||
| 1274 | # CONFIG_REISERFS_CHECK is not set | ||
| 1275 | # CONFIG_REISERFS_PROC_INFO is not set | ||
| 1276 | CONFIG_REISERFS_FS_XATTR=y | ||
| 1277 | CONFIG_REISERFS_FS_POSIX_ACL=y | ||
| 1278 | # CONFIG_REISERFS_FS_SECURITY is not set | ||
| 1368 | # CONFIG_JFS_FS is not set | 1279 | # CONFIG_JFS_FS is not set |
| 1369 | # CONFIG_FS_POSIX_ACL is not set | 1280 | CONFIG_FS_POSIX_ACL=y |
| 1370 | # CONFIG_XFS_FS is not set | 1281 | # CONFIG_XFS_FS is not set |
| 1371 | # CONFIG_OCFS2_FS is not set | 1282 | # CONFIG_OCFS2_FS is not set |
| 1372 | # CONFIG_MINIX_FS is not set | 1283 | # CONFIG_MINIX_FS is not set |
| 1373 | # CONFIG_ROMFS_FS is not set | 1284 | # CONFIG_ROMFS_FS is not set |
| 1374 | # CONFIG_INOTIFY is not set | 1285 | CONFIG_INOTIFY=y |
| 1286 | CONFIG_INOTIFY_USER=y | ||
| 1375 | # CONFIG_QUOTA is not set | 1287 | # CONFIG_QUOTA is not set |
| 1376 | CONFIG_DNOTIFY=y | 1288 | CONFIG_DNOTIFY=y |
| 1377 | # CONFIG_AUTOFS_FS is not set | 1289 | # CONFIG_AUTOFS_FS is not set |
| 1378 | # CONFIG_AUTOFS4_FS is not set | 1290 | CONFIG_AUTOFS4_FS=y |
| 1379 | # CONFIG_FUSE_FS is not set | 1291 | # CONFIG_FUSE_FS is not set |
| 1380 | 1292 | ||
| 1381 | # | 1293 | # |
| 1382 | # CD-ROM/DVD Filesystems | 1294 | # CD-ROM/DVD Filesystems |
| 1383 | # | 1295 | # |
| 1384 | CONFIG_ISO9660_FS=y | 1296 | CONFIG_ISO9660_FS=y |
| 1385 | CONFIG_JOLIET=y | 1297 | # CONFIG_JOLIET is not set |
| 1386 | CONFIG_ZISOFS=y | 1298 | # CONFIG_ZISOFS is not set |
| 1387 | CONFIG_ZISOFS_FS=y | ||
| 1388 | # CONFIG_UDF_FS is not set | 1299 | # CONFIG_UDF_FS is not set |
| 1389 | 1300 | ||
| 1390 | # | 1301 | # |
| 1391 | # DOS/FAT/NT Filesystems | 1302 | # DOS/FAT/NT Filesystems |
| 1392 | # | 1303 | # |
| 1393 | CONFIG_FAT_FS=y | 1304 | CONFIG_FAT_FS=y |
| 1394 | # CONFIG_MSDOS_FS is not set | 1305 | CONFIG_MSDOS_FS=y |
| 1395 | CONFIG_VFAT_FS=y | 1306 | CONFIG_VFAT_FS=y |
| 1396 | CONFIG_FAT_DEFAULT_CODEPAGE=850 | 1307 | CONFIG_FAT_DEFAULT_CODEPAGE=437 |
| 1397 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | 1308 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" |
| 1398 | # CONFIG_NTFS_FS is not set | 1309 | # CONFIG_NTFS_FS is not set |
| 1399 | 1310 | ||
| @@ -1404,10 +1315,9 @@ CONFIG_PROC_FS=y | |||
| 1404 | CONFIG_PROC_KCORE=y | 1315 | CONFIG_PROC_KCORE=y |
| 1405 | CONFIG_SYSFS=y | 1316 | CONFIG_SYSFS=y |
| 1406 | CONFIG_TMPFS=y | 1317 | CONFIG_TMPFS=y |
| 1407 | # CONFIG_HUGETLBFS is not set | 1318 | CONFIG_HUGETLBFS=y |
| 1408 | # CONFIG_HUGETLB_PAGE is not set | 1319 | CONFIG_HUGETLB_PAGE=y |
| 1409 | CONFIG_RAMFS=y | 1320 | CONFIG_RAMFS=y |
| 1410 | # CONFIG_RELAYFS_FS is not set | ||
| 1411 | # CONFIG_CONFIGFS_FS is not set | 1321 | # CONFIG_CONFIGFS_FS is not set |
| 1412 | 1322 | ||
| 1413 | # | 1323 | # |
| @@ -1430,13 +1340,26 @@ CONFIG_RAMFS=y | |||
| 1430 | # | 1340 | # |
| 1431 | # Network File Systems | 1341 | # Network File Systems |
| 1432 | # | 1342 | # |
| 1433 | # CONFIG_NFS_FS is not set | 1343 | CONFIG_NFS_FS=y |
| 1434 | # CONFIG_NFSD is not set | 1344 | CONFIG_NFS_V3=y |
| 1345 | # CONFIG_NFS_V3_ACL is not set | ||
| 1346 | # CONFIG_NFS_V4 is not set | ||
| 1347 | # CONFIG_NFS_DIRECTIO is not set | ||
| 1348 | CONFIG_NFSD=y | ||
| 1349 | CONFIG_NFSD_V3=y | ||
| 1350 | # CONFIG_NFSD_V3_ACL is not set | ||
| 1351 | # CONFIG_NFSD_V4 is not set | ||
| 1352 | CONFIG_NFSD_TCP=y | ||
| 1353 | CONFIG_ROOT_NFS=y | ||
| 1354 | CONFIG_LOCKD=y | ||
| 1355 | CONFIG_LOCKD_V4=y | ||
| 1356 | CONFIG_EXPORTFS=y | ||
| 1357 | CONFIG_NFS_COMMON=y | ||
| 1358 | CONFIG_SUNRPC=y | ||
| 1359 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
| 1360 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
| 1435 | # CONFIG_SMB_FS is not set | 1361 | # CONFIG_SMB_FS is not set |
| 1436 | CONFIG_CIFS=y | 1362 | # CONFIG_CIFS is not set |
| 1437 | # CONFIG_CIFS_STATS is not set | ||
| 1438 | # CONFIG_CIFS_XATTR is not set | ||
| 1439 | # CONFIG_CIFS_EXPERIMENTAL is not set | ||
| 1440 | # CONFIG_NCP_FS is not set | 1363 | # CONFIG_NCP_FS is not set |
| 1441 | # CONFIG_CODA_FS is not set | 1364 | # CONFIG_CODA_FS is not set |
| 1442 | # CONFIG_AFS_FS is not set | 1365 | # CONFIG_AFS_FS is not set |
| @@ -1445,33 +1368,18 @@ CONFIG_CIFS=y | |||
| 1445 | # | 1368 | # |
| 1446 | # Partition Types | 1369 | # Partition Types |
| 1447 | # | 1370 | # |
| 1448 | CONFIG_PARTITION_ADVANCED=y | 1371 | # CONFIG_PARTITION_ADVANCED is not set |
| 1449 | # CONFIG_ACORN_PARTITION is not set | ||
| 1450 | # CONFIG_OSF_PARTITION is not set | ||
| 1451 | # CONFIG_AMIGA_PARTITION is not set | ||
| 1452 | # CONFIG_ATARI_PARTITION is not set | ||
| 1453 | # CONFIG_MAC_PARTITION is not set | ||
| 1454 | CONFIG_MSDOS_PARTITION=y | 1372 | CONFIG_MSDOS_PARTITION=y |
| 1455 | # CONFIG_BSD_DISKLABEL is not set | ||
| 1456 | # CONFIG_MINIX_SUBPARTITION is not set | ||
| 1457 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
| 1458 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
| 1459 | # CONFIG_LDM_PARTITION is not set | ||
| 1460 | # CONFIG_SGI_PARTITION is not set | ||
| 1461 | # CONFIG_ULTRIX_PARTITION is not set | ||
| 1462 | # CONFIG_SUN_PARTITION is not set | ||
| 1463 | # CONFIG_KARMA_PARTITION is not set | ||
| 1464 | # CONFIG_EFI_PARTITION is not set | ||
| 1465 | 1373 | ||
| 1466 | # | 1374 | # |
| 1467 | # Native Language Support | 1375 | # Native Language Support |
| 1468 | # | 1376 | # |
| 1469 | CONFIG_NLS=y | 1377 | CONFIG_NLS=y |
| 1470 | CONFIG_NLS_DEFAULT="iso8859-15" | 1378 | CONFIG_NLS_DEFAULT="iso8859-1" |
| 1471 | # CONFIG_NLS_CODEPAGE_437 is not set | 1379 | CONFIG_NLS_CODEPAGE_437=y |
| 1472 | # CONFIG_NLS_CODEPAGE_737 is not set | 1380 | # CONFIG_NLS_CODEPAGE_737 is not set |
| 1473 | # CONFIG_NLS_CODEPAGE_775 is not set | 1381 | # CONFIG_NLS_CODEPAGE_775 is not set |
| 1474 | CONFIG_NLS_CODEPAGE_850=y | 1382 | # CONFIG_NLS_CODEPAGE_850 is not set |
| 1475 | # CONFIG_NLS_CODEPAGE_852 is not set | 1383 | # CONFIG_NLS_CODEPAGE_852 is not set |
| 1476 | # CONFIG_NLS_CODEPAGE_855 is not set | 1384 | # CONFIG_NLS_CODEPAGE_855 is not set |
| 1477 | # CONFIG_NLS_CODEPAGE_857 is not set | 1385 | # CONFIG_NLS_CODEPAGE_857 is not set |
| @@ -1491,7 +1399,7 @@ CONFIG_NLS_CODEPAGE_850=y | |||
| 1491 | # CONFIG_NLS_ISO8859_8 is not set | 1399 | # CONFIG_NLS_ISO8859_8 is not set |
| 1492 | # CONFIG_NLS_CODEPAGE_1250 is not set | 1400 | # CONFIG_NLS_CODEPAGE_1250 is not set |
| 1493 | # CONFIG_NLS_CODEPAGE_1251 is not set | 1401 | # CONFIG_NLS_CODEPAGE_1251 is not set |
| 1494 | # CONFIG_NLS_ASCII is not set | 1402 | CONFIG_NLS_ASCII=y |
| 1495 | CONFIG_NLS_ISO8859_1=y | 1403 | CONFIG_NLS_ISO8859_1=y |
| 1496 | # CONFIG_NLS_ISO8859_2 is not set | 1404 | # CONFIG_NLS_ISO8859_2 is not set |
| 1497 | # CONFIG_NLS_ISO8859_3 is not set | 1405 | # CONFIG_NLS_ISO8859_3 is not set |
| @@ -1510,20 +1418,50 @@ CONFIG_NLS_UTF8=y | |||
| 1510 | # | 1418 | # |
| 1511 | # Instrumentation Support | 1419 | # Instrumentation Support |
| 1512 | # | 1420 | # |
| 1513 | # CONFIG_PROFILING is not set | 1421 | CONFIG_PROFILING=y |
| 1514 | # CONFIG_KPROBES is not set | 1422 | CONFIG_OPROFILE=y |
| 1423 | CONFIG_KPROBES=y | ||
| 1515 | 1424 | ||
| 1516 | # | 1425 | # |
| 1517 | # Kernel hacking | 1426 | # Kernel hacking |
| 1518 | # | 1427 | # |
| 1428 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
| 1519 | # CONFIG_PRINTK_TIME is not set | 1429 | # CONFIG_PRINTK_TIME is not set |
| 1520 | CONFIG_MAGIC_SYSRQ=y | 1430 | CONFIG_MAGIC_SYSRQ=y |
| 1521 | # CONFIG_DEBUG_KERNEL is not set | 1431 | CONFIG_UNUSED_SYMBOLS=y |
| 1522 | CONFIG_LOG_BUF_SHIFT=14 | 1432 | CONFIG_DEBUG_KERNEL=y |
| 1433 | CONFIG_LOG_BUF_SHIFT=18 | ||
| 1434 | CONFIG_DETECT_SOFTLOCKUP=y | ||
| 1435 | # CONFIG_SCHEDSTATS is not set | ||
| 1436 | # CONFIG_DEBUG_SLAB is not set | ||
| 1437 | # CONFIG_DEBUG_RT_MUTEXES is not set | ||
| 1438 | # CONFIG_RT_MUTEX_TESTER is not set | ||
| 1439 | # CONFIG_DEBUG_SPINLOCK is not set | ||
| 1440 | # CONFIG_DEBUG_MUTEXES is not set | ||
| 1441 | # CONFIG_DEBUG_RWSEMS is not set | ||
| 1442 | # CONFIG_DEBUG_LOCK_ALLOC is not set | ||
| 1443 | # CONFIG_PROVE_LOCKING is not set | ||
| 1444 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | ||
| 1445 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
| 1446 | # CONFIG_DEBUG_KOBJECT is not set | ||
| 1447 | # CONFIG_DEBUG_HIGHMEM is not set | ||
| 1523 | CONFIG_DEBUG_BUGVERBOSE=y | 1448 | CONFIG_DEBUG_BUGVERBOSE=y |
| 1449 | # CONFIG_DEBUG_INFO is not set | ||
| 1450 | # CONFIG_DEBUG_FS is not set | ||
| 1451 | # CONFIG_DEBUG_VM is not set | ||
| 1452 | # CONFIG_FRAME_POINTER is not set | ||
| 1453 | CONFIG_UNWIND_INFO=y | ||
| 1454 | CONFIG_STACK_UNWIND=y | ||
| 1455 | # CONFIG_FORCED_INLINING is not set | ||
| 1456 | # CONFIG_RCU_TORTURE_TEST is not set | ||
| 1524 | CONFIG_EARLY_PRINTK=y | 1457 | CONFIG_EARLY_PRINTK=y |
| 1458 | CONFIG_DEBUG_STACKOVERFLOW=y | ||
| 1459 | # CONFIG_DEBUG_STACK_USAGE is not set | ||
| 1460 | # CONFIG_DEBUG_RODATA is not set | ||
| 1461 | # CONFIG_4KSTACKS is not set | ||
| 1525 | CONFIG_X86_FIND_SMP_CONFIG=y | 1462 | CONFIG_X86_FIND_SMP_CONFIG=y |
| 1526 | CONFIG_X86_MPPARSE=y | 1463 | CONFIG_X86_MPPARSE=y |
| 1464 | CONFIG_DOUBLEFAULT=y | ||
| 1527 | 1465 | ||
| 1528 | # | 1466 | # |
| 1529 | # Security options | 1467 | # Security options |
| @@ -1537,10 +1475,6 @@ CONFIG_X86_MPPARSE=y | |||
| 1537 | # CONFIG_CRYPTO is not set | 1475 | # CONFIG_CRYPTO is not set |
| 1538 | 1476 | ||
| 1539 | # | 1477 | # |
| 1540 | # Hardware crypto devices | ||
| 1541 | # | ||
| 1542 | |||
| 1543 | # | ||
| 1544 | # Library routines | 1478 | # Library routines |
| 1545 | # | 1479 | # |
| 1546 | # CONFIG_CRC_CCITT is not set | 1480 | # CONFIG_CRC_CCITT is not set |
| @@ -1548,7 +1482,12 @@ CONFIG_X86_MPPARSE=y | |||
| 1548 | CONFIG_CRC32=y | 1482 | CONFIG_CRC32=y |
| 1549 | # CONFIG_LIBCRC32C is not set | 1483 | # CONFIG_LIBCRC32C is not set |
| 1550 | CONFIG_ZLIB_INFLATE=y | 1484 | CONFIG_ZLIB_INFLATE=y |
| 1485 | CONFIG_PLIST=y | ||
| 1551 | CONFIG_GENERIC_HARDIRQS=y | 1486 | CONFIG_GENERIC_HARDIRQS=y |
| 1552 | CONFIG_GENERIC_IRQ_PROBE=y | 1487 | CONFIG_GENERIC_IRQ_PROBE=y |
| 1488 | CONFIG_GENERIC_PENDING_IRQ=y | ||
| 1489 | CONFIG_X86_SMP=y | ||
| 1490 | CONFIG_X86_HT=y | ||
| 1553 | CONFIG_X86_BIOS_REBOOT=y | 1491 | CONFIG_X86_BIOS_REBOOT=y |
| 1492 | CONFIG_X86_TRAMPOLINE=y | ||
| 1554 | CONFIG_KTIME_SCALAR=y | 1493 | CONFIG_KTIME_SCALAR=y |
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 5427a842e841..1a884b6e6e5c 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
| 6 | 6 | ||
| 7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o signal.o entry.o traps.o irq.o \ |
| 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ | 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ |
| 9 | pci-dma.o i386_ksyms.o i387.o bootflag.o \ | 9 | pci-dma.o i386_ksyms.o i387.o bootflag.o \ |
| 10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o | 10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o |
| @@ -81,4 +81,5 @@ $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ | |||
| 81 | $(call if_changed,syscall) | 81 | $(call if_changed,syscall) |
| 82 | 82 | ||
| 83 | k8-y += ../../x86_64/kernel/k8.o | 83 | k8-y += ../../x86_64/kernel/k8.o |
| 84 | stacktrace-y += ../../x86_64/kernel/stacktrace.o | ||
| 84 | 85 | ||
diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile index 7e9ac99354f4..7f7be01f44e6 100644 --- a/arch/i386/kernel/acpi/Makefile +++ b/arch/i386/kernel/acpi/Makefile | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | obj-$(CONFIG_ACPI) += boot.o | 1 | obj-$(CONFIG_ACPI) += boot.o |
| 2 | ifneq ($(CONFIG_PCI),) | ||
| 2 | obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o | 3 | obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o |
| 4 | endif | ||
| 3 | obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o | 5 | obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o |
| 4 | 6 | ||
| 5 | ifneq ($(CONFIG_ACPI_PROCESSOR),) | 7 | ifneq ($(CONFIG_ACPI_PROCESSOR),) |
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index ee003bc0e8b1..1aaea6ab8c46 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
| @@ -26,9 +26,12 @@ | |||
| 26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
| 27 | #include <linux/acpi.h> | 27 | #include <linux/acpi.h> |
| 28 | #include <linux/efi.h> | 28 | #include <linux/efi.h> |
| 29 | #include <linux/cpumask.h> | ||
| 29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 30 | #include <linux/dmi.h> | 31 | #include <linux/dmi.h> |
| 31 | #include <linux/irq.h> | 32 | #include <linux/irq.h> |
| 33 | #include <linux/bootmem.h> | ||
| 34 | #include <linux/ioport.h> | ||
| 32 | 35 | ||
| 33 | #include <asm/pgtable.h> | 36 | #include <asm/pgtable.h> |
| 34 | #include <asm/io_apic.h> | 37 | #include <asm/io_apic.h> |
| @@ -36,11 +39,17 @@ | |||
| 36 | #include <asm/io.h> | 39 | #include <asm/io.h> |
| 37 | #include <asm/mpspec.h> | 40 | #include <asm/mpspec.h> |
| 38 | 41 | ||
| 39 | #ifdef CONFIG_X86_64 | 42 | static int __initdata acpi_force = 0; |
| 40 | 43 | ||
| 41 | extern void __init clustered_apic_check(void); | 44 | #ifdef CONFIG_ACPI |
| 45 | int acpi_disabled = 0; | ||
| 46 | #else | ||
| 47 | int acpi_disabled = 1; | ||
| 48 | #endif | ||
| 49 | EXPORT_SYMBOL(acpi_disabled); | ||
| 50 | |||
| 51 | #ifdef CONFIG_X86_64 | ||
| 42 | 52 | ||
| 43 | extern int gsi_irq_sharing(int gsi); | ||
| 44 | #include <asm/proto.h> | 53 | #include <asm/proto.h> |
| 45 | 54 | ||
| 46 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } | 55 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } |
| @@ -506,16 +515,76 @@ EXPORT_SYMBOL(acpi_register_gsi); | |||
| 506 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 515 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
| 507 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) | 516 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) |
| 508 | { | 517 | { |
| 509 | /* TBD */ | 518 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 510 | return -EINVAL; | 519 | union acpi_object *obj; |
| 520 | struct acpi_table_lapic *lapic; | ||
| 521 | cpumask_t tmp_map, new_map; | ||
| 522 | u8 physid; | ||
| 523 | int cpu; | ||
| 524 | |||
| 525 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | ||
| 526 | return -EINVAL; | ||
| 527 | |||
| 528 | if (!buffer.length || !buffer.pointer) | ||
| 529 | return -EINVAL; | ||
| 530 | |||
| 531 | obj = buffer.pointer; | ||
| 532 | if (obj->type != ACPI_TYPE_BUFFER || | ||
| 533 | obj->buffer.length < sizeof(*lapic)) { | ||
| 534 | kfree(buffer.pointer); | ||
| 535 | return -EINVAL; | ||
| 536 | } | ||
| 537 | |||
| 538 | lapic = (struct acpi_table_lapic *)obj->buffer.pointer; | ||
| 539 | |||
| 540 | if ((lapic->header.type != ACPI_MADT_LAPIC) || | ||
| 541 | (!lapic->flags.enabled)) { | ||
| 542 | kfree(buffer.pointer); | ||
| 543 | return -EINVAL; | ||
| 544 | } | ||
| 545 | |||
| 546 | physid = lapic->id; | ||
| 547 | |||
| 548 | kfree(buffer.pointer); | ||
| 549 | buffer.length = ACPI_ALLOCATE_BUFFER; | ||
| 550 | buffer.pointer = NULL; | ||
| 551 | |||
| 552 | tmp_map = cpu_present_map; | ||
| 553 | mp_register_lapic(physid, lapic->flags.enabled); | ||
| 554 | |||
| 555 | /* | ||
| 556 | * If mp_register_lapic successfully generates a new logical cpu | ||
| 557 | * number, then the following will get us exactly what was mapped | ||
| 558 | */ | ||
| 559 | cpus_andnot(new_map, cpu_present_map, tmp_map); | ||
| 560 | if (cpus_empty(new_map)) { | ||
| 561 | printk ("Unable to map lapic to logical cpu number\n"); | ||
| 562 | return -EINVAL; | ||
| 563 | } | ||
| 564 | |||
| 565 | cpu = first_cpu(new_map); | ||
| 566 | |||
| 567 | *pcpu = cpu; | ||
| 568 | return 0; | ||
| 511 | } | 569 | } |
| 512 | 570 | ||
| 513 | EXPORT_SYMBOL(acpi_map_lsapic); | 571 | EXPORT_SYMBOL(acpi_map_lsapic); |
| 514 | 572 | ||
| 515 | int acpi_unmap_lsapic(int cpu) | 573 | int acpi_unmap_lsapic(int cpu) |
| 516 | { | 574 | { |
| 517 | /* TBD */ | 575 | int i; |
| 518 | return -EINVAL; | 576 | |
| 577 | for_each_possible_cpu(i) { | ||
| 578 | if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) { | ||
| 579 | x86_acpiid_to_apicid[i] = -1; | ||
| 580 | break; | ||
| 581 | } | ||
| 582 | } | ||
| 583 | x86_cpu_to_apicid[cpu] = -1; | ||
| 584 | cpu_clear(cpu, cpu_present_map); | ||
| 585 | num_processors--; | ||
| 586 | |||
| 587 | return (0); | ||
| 519 | } | 588 | } |
| 520 | 589 | ||
| 521 | EXPORT_SYMBOL(acpi_unmap_lsapic); | 590 | EXPORT_SYMBOL(acpi_unmap_lsapic); |
| @@ -579,6 +648,8 @@ static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) | |||
| 579 | static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) | 648 | static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) |
| 580 | { | 649 | { |
| 581 | struct acpi_table_hpet *hpet_tbl; | 650 | struct acpi_table_hpet *hpet_tbl; |
| 651 | struct resource *hpet_res; | ||
| 652 | resource_size_t res_start; | ||
| 582 | 653 | ||
| 583 | if (!phys || !size) | 654 | if (!phys || !size) |
| 584 | return -EINVAL; | 655 | return -EINVAL; |
| @@ -594,12 +665,26 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) | |||
| 594 | "memory.\n"); | 665 | "memory.\n"); |
| 595 | return -1; | 666 | return -1; |
| 596 | } | 667 | } |
| 668 | |||
| 669 | #define HPET_RESOURCE_NAME_SIZE 9 | ||
| 670 | hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); | ||
| 671 | if (hpet_res) { | ||
| 672 | memset(hpet_res, 0, sizeof(*hpet_res)); | ||
| 673 | hpet_res->name = (void *)&hpet_res[1]; | ||
| 674 | hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
| 675 | snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, | ||
| 676 | "HPET %u", hpet_tbl->number); | ||
| 677 | hpet_res->end = (1 * 1024) - 1; | ||
| 678 | } | ||
| 679 | |||
| 597 | #ifdef CONFIG_X86_64 | 680 | #ifdef CONFIG_X86_64 |
| 598 | vxtime.hpet_address = hpet_tbl->addr.addrl | | 681 | vxtime.hpet_address = hpet_tbl->addr.addrl | |
| 599 | ((long)hpet_tbl->addr.addrh << 32); | 682 | ((long)hpet_tbl->addr.addrh << 32); |
| 600 | 683 | ||
| 601 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", | 684 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", |
| 602 | hpet_tbl->id, vxtime.hpet_address); | 685 | hpet_tbl->id, vxtime.hpet_address); |
| 686 | |||
| 687 | res_start = vxtime.hpet_address; | ||
| 603 | #else /* X86 */ | 688 | #else /* X86 */ |
| 604 | { | 689 | { |
| 605 | extern unsigned long hpet_address; | 690 | extern unsigned long hpet_address; |
| @@ -607,9 +692,17 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) | |||
| 607 | hpet_address = hpet_tbl->addr.addrl; | 692 | hpet_address = hpet_tbl->addr.addrl; |
| 608 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", | 693 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", |
| 609 | hpet_tbl->id, hpet_address); | 694 | hpet_tbl->id, hpet_address); |
| 695 | |||
| 696 | res_start = hpet_address; | ||
| 610 | } | 697 | } |
| 611 | #endif /* X86 */ | 698 | #endif /* X86 */ |
| 612 | 699 | ||
| 700 | if (hpet_res) { | ||
| 701 | hpet_res->start = res_start; | ||
| 702 | hpet_res->end += res_start; | ||
| 703 | insert_resource(&iomem_resource, hpet_res); | ||
| 704 | } | ||
| 705 | |||
| 613 | return 0; | 706 | return 0; |
| 614 | } | 707 | } |
| 615 | #else | 708 | #else |
| @@ -860,8 +953,6 @@ static void __init acpi_process_madt(void) | |||
| 860 | return; | 953 | return; |
| 861 | } | 954 | } |
| 862 | 955 | ||
| 863 | extern int acpi_force; | ||
| 864 | |||
| 865 | #ifdef __i386__ | 956 | #ifdef __i386__ |
| 866 | 957 | ||
| 867 | static int __init disable_acpi_irq(struct dmi_system_id *d) | 958 | static int __init disable_acpi_irq(struct dmi_system_id *d) |
| @@ -1163,3 +1254,75 @@ int __init acpi_boot_init(void) | |||
| 1163 | 1254 | ||
| 1164 | return 0; | 1255 | return 0; |
| 1165 | } | 1256 | } |
| 1257 | |||
| 1258 | static int __init parse_acpi(char *arg) | ||
| 1259 | { | ||
| 1260 | if (!arg) | ||
| 1261 | return -EINVAL; | ||
| 1262 | |||
| 1263 | /* "acpi=off" disables both ACPI table parsing and interpreter */ | ||
| 1264 | if (strcmp(arg, "off") == 0) { | ||
| 1265 | disable_acpi(); | ||
| 1266 | } | ||
| 1267 | /* acpi=force to over-ride black-list */ | ||
| 1268 | else if (strcmp(arg, "force") == 0) { | ||
| 1269 | acpi_force = 1; | ||
| 1270 | acpi_ht = 1; | ||
| 1271 | acpi_disabled = 0; | ||
| 1272 | } | ||
| 1273 | /* acpi=strict disables out-of-spec workarounds */ | ||
| 1274 | else if (strcmp(arg, "strict") == 0) { | ||
| 1275 | acpi_strict = 1; | ||
| 1276 | } | ||
| 1277 | /* Limit ACPI just to boot-time to enable HT */ | ||
| 1278 | else if (strcmp(arg, "ht") == 0) { | ||
| 1279 | if (!acpi_force) | ||
| 1280 | disable_acpi(); | ||
| 1281 | acpi_ht = 1; | ||
| 1282 | } | ||
| 1283 | /* "acpi=noirq" disables ACPI interrupt routing */ | ||
| 1284 | else if (strcmp(arg, "noirq") == 0) { | ||
| 1285 | acpi_noirq_set(); | ||
| 1286 | } else { | ||
| 1287 | /* Core will printk when we return error. */ | ||
| 1288 | return -EINVAL; | ||
| 1289 | } | ||
| 1290 | return 0; | ||
| 1291 | } | ||
| 1292 | early_param("acpi", parse_acpi); | ||
| 1293 | |||
| 1294 | /* FIXME: Using pci= for an ACPI parameter is a travesty. */ | ||
| 1295 | static int __init parse_pci(char *arg) | ||
| 1296 | { | ||
| 1297 | if (arg && strcmp(arg, "noacpi") == 0) | ||
| 1298 | acpi_disable_pci(); | ||
| 1299 | return 0; | ||
| 1300 | } | ||
| 1301 | early_param("pci", parse_pci); | ||
| 1302 | |||
| 1303 | #ifdef CONFIG_X86_IO_APIC | ||
| 1304 | static int __init parse_acpi_skip_timer_override(char *arg) | ||
| 1305 | { | ||
| 1306 | acpi_skip_timer_override = 1; | ||
| 1307 | return 0; | ||
| 1308 | } | ||
| 1309 | early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); | ||
| 1310 | #endif /* CONFIG_X86_IO_APIC */ | ||
| 1311 | |||
| 1312 | static int __init setup_acpi_sci(char *s) | ||
| 1313 | { | ||
| 1314 | if (!s) | ||
| 1315 | return -EINVAL; | ||
| 1316 | if (!strcmp(s, "edge")) | ||
| 1317 | acpi_sci_flags.trigger = 1; | ||
| 1318 | else if (!strcmp(s, "level")) | ||
| 1319 | acpi_sci_flags.trigger = 3; | ||
| 1320 | else if (!strcmp(s, "high")) | ||
| 1321 | acpi_sci_flags.polarity = 1; | ||
| 1322 | else if (!strcmp(s, "low")) | ||
| 1323 | acpi_sci_flags.polarity = 3; | ||
| 1324 | else | ||
| 1325 | return -EINVAL; | ||
| 1326 | return 0; | ||
| 1327 | } | ||
| 1328 | early_param("acpi_sci", setup_acpi_sci); | ||
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index 1649a175a206..fe799b11ac0a 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c | |||
| @@ -48,7 +48,11 @@ void __init check_acpi_pci(void) | |||
| 48 | int num, slot, func; | 48 | int num, slot, func; |
| 49 | 49 | ||
| 50 | /* Assume the machine supports type 1. If not it will | 50 | /* Assume the machine supports type 1. If not it will |
| 51 | always read ffffffff and should not have any side effect. */ | 51 | always read ffffffff and should not have any side effect. |
| 52 | Actually a few buggy systems can machine check. Allow the user | ||
| 53 | to disable it by command line option at least -AK */ | ||
| 54 | if (!early_pci_allowed()) | ||
| 55 | return; | ||
| 52 | 56 | ||
| 53 | /* Poor man's PCI discovery */ | 57 | /* Poor man's PCI discovery */ |
| 54 | for (num = 0; num < 32; num++) { | 58 | for (num = 0; num < 32; num++) { |
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 8c844d07862f..90faae5c5d30 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
| @@ -52,7 +52,18 @@ static cpumask_t timer_bcast_ipi; | |||
| 52 | /* | 52 | /* |
| 53 | * Knob to control our willingness to enable the local APIC. | 53 | * Knob to control our willingness to enable the local APIC. |
| 54 | */ | 54 | */ |
| 55 | int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */ | 55 | static int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */ |
| 56 | |||
| 57 | static inline void lapic_disable(void) | ||
| 58 | { | ||
| 59 | enable_local_apic = -1; | ||
| 60 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline void lapic_enable(void) | ||
| 64 | { | ||
| 65 | enable_local_apic = 1; | ||
| 66 | } | ||
| 56 | 67 | ||
| 57 | /* | 68 | /* |
| 58 | * Debug level | 69 | * Debug level |
| @@ -586,8 +597,7 @@ void __devinit setup_local_APIC(void) | |||
| 586 | printk("No ESR for 82489DX.\n"); | 597 | printk("No ESR for 82489DX.\n"); |
| 587 | } | 598 | } |
| 588 | 599 | ||
| 589 | if (nmi_watchdog == NMI_LOCAL_APIC) | 600 | setup_apic_nmi_watchdog(NULL); |
| 590 | setup_apic_nmi_watchdog(); | ||
| 591 | apic_pm_activate(); | 601 | apic_pm_activate(); |
| 592 | } | 602 | } |
| 593 | 603 | ||
| @@ -1373,3 +1383,18 @@ int __init APIC_init_uniprocessor (void) | |||
| 1373 | 1383 | ||
| 1374 | return 0; | 1384 | return 0; |
| 1375 | } | 1385 | } |
| 1386 | |||
| 1387 | static int __init parse_lapic(char *arg) | ||
| 1388 | { | ||
| 1389 | lapic_enable(); | ||
| 1390 | return 0; | ||
| 1391 | } | ||
| 1392 | early_param("lapic", parse_lapic); | ||
| 1393 | |||
| 1394 | static int __init parse_nolapic(char *arg) | ||
| 1395 | { | ||
| 1396 | lapic_disable(); | ||
| 1397 | return 0; | ||
| 1398 | } | ||
| 1399 | early_param("nolapic", parse_nolapic); | ||
| 1400 | |||
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 8591f2fa920c..ff9ce4b5eaa8 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
| @@ -1154,9 +1154,11 @@ out: | |||
| 1154 | 1154 | ||
| 1155 | static void set_time(void) | 1155 | static void set_time(void) |
| 1156 | { | 1156 | { |
| 1157 | struct timespec ts; | ||
| 1157 | if (got_clock_diff) { /* Must know time zone in order to set clock */ | 1158 | if (got_clock_diff) { /* Must know time zone in order to set clock */ |
| 1158 | xtime.tv_sec = get_cmos_time() + clock_cmos_diff; | 1159 | ts.tv_sec = get_cmos_time() + clock_cmos_diff; |
| 1159 | xtime.tv_nsec = 0; | 1160 | ts.tv_nsec = 0; |
| 1161 | do_settimeofday(&ts); | ||
| 1160 | } | 1162 | } |
| 1161 | } | 1163 | } |
| 1162 | 1164 | ||
| @@ -1232,13 +1234,8 @@ static int suspend(int vetoable) | |||
| 1232 | restore_processor_state(); | 1234 | restore_processor_state(); |
| 1233 | 1235 | ||
| 1234 | local_irq_disable(); | 1236 | local_irq_disable(); |
| 1235 | write_seqlock(&xtime_lock); | ||
| 1236 | spin_lock(&i8253_lock); | ||
| 1237 | reinit_timer(); | ||
| 1238 | set_time(); | 1237 | set_time(); |
| 1239 | 1238 | reinit_timer(); | |
| 1240 | spin_unlock(&i8253_lock); | ||
| 1241 | write_sequnlock(&xtime_lock); | ||
| 1242 | 1239 | ||
| 1243 | if (err == APM_NO_ERROR) | 1240 | if (err == APM_NO_ERROR) |
| 1244 | err = APM_SUCCESS; | 1241 | err = APM_SUCCESS; |
| @@ -1365,9 +1362,7 @@ static void check_events(void) | |||
| 1365 | ignore_bounce = 1; | 1362 | ignore_bounce = 1; |
| 1366 | if ((event != APM_NORMAL_RESUME) | 1363 | if ((event != APM_NORMAL_RESUME) |
| 1367 | || (ignore_normal_resume == 0)) { | 1364 | || (ignore_normal_resume == 0)) { |
| 1368 | write_seqlock_irq(&xtime_lock); | ||
| 1369 | set_time(); | 1365 | set_time(); |
| 1370 | write_sequnlock_irq(&xtime_lock); | ||
| 1371 | device_resume(); | 1366 | device_resume(); |
| 1372 | pm_send_all(PM_RESUME, (void *)0); | 1367 | pm_send_all(PM_RESUME, (void *)0); |
| 1373 | queue_event(event, NULL); | 1368 | queue_event(event, NULL); |
| @@ -1383,9 +1378,7 @@ static void check_events(void) | |||
| 1383 | break; | 1378 | break; |
| 1384 | 1379 | ||
| 1385 | case APM_UPDATE_TIME: | 1380 | case APM_UPDATE_TIME: |
| 1386 | write_seqlock_irq(&xtime_lock); | ||
| 1387 | set_time(); | 1381 | set_time(); |
| 1388 | write_sequnlock_irq(&xtime_lock); | ||
| 1389 | break; | 1382 | break; |
| 1390 | 1383 | ||
| 1391 | case APM_CRITICAL_SUSPEND: | 1384 | case APM_CRITICAL_SUSPEND: |
| @@ -2339,6 +2332,7 @@ static int __init apm_init(void) | |||
| 2339 | ret = kernel_thread(apm, NULL, CLONE_KERNEL | SIGCHLD); | 2332 | ret = kernel_thread(apm, NULL, CLONE_KERNEL | SIGCHLD); |
| 2340 | if (ret < 0) { | 2333 | if (ret < 0) { |
| 2341 | printk(KERN_ERR "apm: disabled - Unable to start kernel thread.\n"); | 2334 | printk(KERN_ERR "apm: disabled - Unable to start kernel thread.\n"); |
| 2335 | remove_proc_entry("apm", NULL); | ||
| 2342 | return -ENOMEM; | 2336 | return -ENOMEM; |
| 2343 | } | 2337 | } |
| 2344 | 2338 | ||
| @@ -2348,7 +2342,13 @@ static int __init apm_init(void) | |||
| 2348 | return 0; | 2342 | return 0; |
| 2349 | } | 2343 | } |
| 2350 | 2344 | ||
| 2351 | misc_register(&apm_device); | 2345 | /* |
| 2346 | * Note we don't actually care if the misc_device cannot be registered. | ||
| 2347 | * this driver can do its job without it, even if userspace can't | ||
| 2348 | * control it. just log the error | ||
| 2349 | */ | ||
| 2350 | if (misc_register(&apm_device)) | ||
| 2351 | printk(KERN_WARNING "apm: Could not register misc device.\n"); | ||
| 2352 | 2352 | ||
| 2353 | if (HZ != 100) | 2353 | if (HZ != 100) |
| 2354 | idle_period = (idle_period * HZ) / 100; | 2354 | idle_period = (idle_period * HZ) / 100; |
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index e6a2d6b80cda..e4758095d87a 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | extern void vide(void); | 22 | extern void vide(void); |
| 23 | __asm__(".align 4\nvide: ret"); | 23 | __asm__(".align 4\nvide: ret"); |
| 24 | 24 | ||
| 25 | static void __init init_amd(struct cpuinfo_x86 *c) | 25 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
| 26 | { | 26 | { |
| 27 | u32 l, h; | 27 | u32 l, h; |
| 28 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | 28 | int mbytes = num_physpages >> (20-PAGE_SHIFT); |
| @@ -246,7 +246,7 @@ static void __init init_amd(struct cpuinfo_x86 *c) | |||
| 246 | num_cache_leaves = 3; | 246 | num_cache_leaves = 3; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 249 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) |
| 250 | { | 250 | { |
| 251 | /* AMD errata T13 (order #21922) */ | 251 | /* AMD errata T13 (order #21922) */ |
| 252 | if ((c->x86 == 6)) { | 252 | if ((c->x86 == 6)) { |
| @@ -259,7 +259,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) | |||
| 259 | return size; | 259 | return size; |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | static struct cpu_dev amd_cpu_dev __initdata = { | 262 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { |
| 263 | .c_vendor = "AMD", | 263 | .c_vendor = "AMD", |
| 264 | .c_ident = { "AuthenticAMD" }, | 264 | .c_ident = { "AuthenticAMD" }, |
| 265 | .c_models = { | 265 | .c_models = { |
| @@ -275,7 +275,6 @@ static struct cpu_dev amd_cpu_dev __initdata = { | |||
| 275 | }, | 275 | }, |
| 276 | }, | 276 | }, |
| 277 | .c_init = init_amd, | 277 | .c_init = init_amd, |
| 278 | .c_identify = generic_identify, | ||
| 279 | .c_size_cache = amd_size_cache, | 278 | .c_size_cache = amd_size_cache, |
| 280 | }; | 279 | }; |
| 281 | 280 | ||
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c index bd75629dd262..8c25047975c0 100644 --- a/arch/i386/kernel/cpu/centaur.c +++ b/arch/i386/kernel/cpu/centaur.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #ifdef CONFIG_X86_OOSTORE | 10 | #ifdef CONFIG_X86_OOSTORE |
| 11 | 11 | ||
| 12 | static u32 __init power2(u32 x) | 12 | static u32 __cpuinit power2(u32 x) |
| 13 | { | 13 | { |
| 14 | u32 s=1; | 14 | u32 s=1; |
| 15 | while(s<=x) | 15 | while(s<=x) |
| @@ -22,7 +22,7 @@ static u32 __init power2(u32 x) | |||
| 22 | * Set up an actual MCR | 22 | * Set up an actual MCR |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | static void __init centaur_mcr_insert(int reg, u32 base, u32 size, int key) | 25 | static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) |
| 26 | { | 26 | { |
| 27 | u32 lo, hi; | 27 | u32 lo, hi; |
| 28 | 28 | ||
| @@ -40,7 +40,7 @@ static void __init centaur_mcr_insert(int reg, u32 base, u32 size, int key) | |||
| 40 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | 40 | * Shortcut: We know you can't put 4Gig of RAM on a winchip |
| 41 | */ | 41 | */ |
| 42 | 42 | ||
| 43 | static u32 __init ramtop(void) /* 16388 */ | 43 | static u32 __cpuinit ramtop(void) /* 16388 */ |
| 44 | { | 44 | { |
| 45 | int i; | 45 | int i; |
| 46 | u32 top = 0; | 46 | u32 top = 0; |
| @@ -91,7 +91,7 @@ static u32 __init ramtop(void) /* 16388 */ | |||
| 91 | * Compute a set of MCR's to give maximum coverage | 91 | * Compute a set of MCR's to give maximum coverage |
| 92 | */ | 92 | */ |
| 93 | 93 | ||
| 94 | static int __init centaur_mcr_compute(int nr, int key) | 94 | static int __cpuinit centaur_mcr_compute(int nr, int key) |
| 95 | { | 95 | { |
| 96 | u32 mem = ramtop(); | 96 | u32 mem = ramtop(); |
| 97 | u32 root = power2(mem); | 97 | u32 root = power2(mem); |
| @@ -166,7 +166,7 @@ static int __init centaur_mcr_compute(int nr, int key) | |||
| 166 | return ct; | 166 | return ct; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | static void __init centaur_create_optimal_mcr(void) | 169 | static void __cpuinit centaur_create_optimal_mcr(void) |
| 170 | { | 170 | { |
| 171 | int i; | 171 | int i; |
| 172 | /* | 172 | /* |
| @@ -189,7 +189,7 @@ static void __init centaur_create_optimal_mcr(void) | |||
| 189 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | 189 | wrmsr(MSR_IDT_MCR0+i, 0, 0); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static void __init winchip2_create_optimal_mcr(void) | 192 | static void __cpuinit winchip2_create_optimal_mcr(void) |
| 193 | { | 193 | { |
| 194 | u32 lo, hi; | 194 | u32 lo, hi; |
| 195 | int i; | 195 | int i; |
| @@ -227,7 +227,7 @@ static void __init winchip2_create_optimal_mcr(void) | |||
| 227 | * Handle the MCR key on the Winchip 2. | 227 | * Handle the MCR key on the Winchip 2. |
| 228 | */ | 228 | */ |
| 229 | 229 | ||
| 230 | static void __init winchip2_unprotect_mcr(void) | 230 | static void __cpuinit winchip2_unprotect_mcr(void) |
| 231 | { | 231 | { |
| 232 | u32 lo, hi; | 232 | u32 lo, hi; |
| 233 | u32 key; | 233 | u32 key; |
| @@ -239,7 +239,7 @@ static void __init winchip2_unprotect_mcr(void) | |||
| 239 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 239 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | static void __init winchip2_protect_mcr(void) | 242 | static void __cpuinit winchip2_protect_mcr(void) |
| 243 | { | 243 | { |
| 244 | u32 lo, hi; | 244 | u32 lo, hi; |
| 245 | 245 | ||
| @@ -257,7 +257,7 @@ static void __init winchip2_protect_mcr(void) | |||
| 257 | #define RNG_ENABLED (1 << 3) | 257 | #define RNG_ENABLED (1 << 3) |
| 258 | #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ | 258 | #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ |
| 259 | 259 | ||
| 260 | static void __init init_c3(struct cpuinfo_x86 *c) | 260 | static void __cpuinit init_c3(struct cpuinfo_x86 *c) |
| 261 | { | 261 | { |
| 262 | u32 lo, hi; | 262 | u32 lo, hi; |
| 263 | 263 | ||
| @@ -303,7 +303,7 @@ static void __init init_c3(struct cpuinfo_x86 *c) | |||
| 303 | display_cacheinfo(c); | 303 | display_cacheinfo(c); |
| 304 | } | 304 | } |
| 305 | 305 | ||
| 306 | static void __init init_centaur(struct cpuinfo_x86 *c) | 306 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
| 307 | { | 307 | { |
| 308 | enum { | 308 | enum { |
| 309 | ECX8=1<<1, | 309 | ECX8=1<<1, |
| @@ -442,7 +442,7 @@ static void __init init_centaur(struct cpuinfo_x86 *c) | |||
| 442 | } | 442 | } |
| 443 | } | 443 | } |
| 444 | 444 | ||
| 445 | static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 445 | static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) |
| 446 | { | 446 | { |
| 447 | /* VIA C3 CPUs (670-68F) need further shifting. */ | 447 | /* VIA C3 CPUs (670-68F) need further shifting. */ |
| 448 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) | 448 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) |
| @@ -457,7 +457,7 @@ static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size | |||
| 457 | return size; | 457 | return size; |
| 458 | } | 458 | } |
| 459 | 459 | ||
| 460 | static struct cpu_dev centaur_cpu_dev __initdata = { | 460 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { |
| 461 | .c_vendor = "Centaur", | 461 | .c_vendor = "Centaur", |
| 462 | .c_ident = { "CentaurHauls" }, | 462 | .c_ident = { "CentaurHauls" }, |
| 463 | .c_init = init_centaur, | 463 | .c_init = init_centaur, |
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 70c87de582c7..2799baaadf45 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
| @@ -36,7 +36,7 @@ struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | |||
| 36 | 36 | ||
| 37 | extern int disable_pse; | 37 | extern int disable_pse; |
| 38 | 38 | ||
| 39 | static void default_init(struct cpuinfo_x86 * c) | 39 | static void __cpuinit default_init(struct cpuinfo_x86 * c) |
| 40 | { | 40 | { |
| 41 | /* Not much we can do here... */ | 41 | /* Not much we can do here... */ |
| 42 | /* Check if at least it has cpuid */ | 42 | /* Check if at least it has cpuid */ |
| @@ -49,7 +49,7 @@ static void default_init(struct cpuinfo_x86 * c) | |||
| 49 | } | 49 | } |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static struct cpu_dev default_cpu = { | 52 | static struct cpu_dev __cpuinitdata default_cpu = { |
| 53 | .c_init = default_init, | 53 | .c_init = default_init, |
| 54 | .c_vendor = "Unknown", | 54 | .c_vendor = "Unknown", |
| 55 | }; | 55 | }; |
| @@ -265,7 +265,7 @@ static void __init early_cpu_detect(void) | |||
| 265 | } | 265 | } |
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) | 268 | static void __cpuinit generic_identify(struct cpuinfo_x86 * c) |
| 269 | { | 269 | { |
| 270 | u32 tfms, xlvl; | 270 | u32 tfms, xlvl; |
| 271 | int ebx; | 271 | int ebx; |
| @@ -675,7 +675,7 @@ old_gdt: | |||
| 675 | #endif | 675 | #endif |
| 676 | 676 | ||
| 677 | /* Clear %fs and %gs. */ | 677 | /* Clear %fs and %gs. */ |
| 678 | asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); | 678 | asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0)); |
| 679 | 679 | ||
| 680 | /* Clear all 6 debug registers: */ | 680 | /* Clear all 6 debug registers: */ |
| 681 | set_debugreg(0, 0); | 681 | set_debugreg(0, 0); |
diff --git a/arch/i386/kernel/cpu/cpu.h b/arch/i386/kernel/cpu/cpu.h index 5a1d4f163e84..2f6432cef6ff 100644 --- a/arch/i386/kernel/cpu/cpu.h +++ b/arch/i386/kernel/cpu/cpu.h | |||
| @@ -24,7 +24,5 @@ extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; | |||
| 24 | extern int get_model_name(struct cpuinfo_x86 *c); | 24 | extern int get_model_name(struct cpuinfo_x86 *c); |
| 25 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 25 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
| 26 | 26 | ||
| 27 | extern void generic_identify(struct cpuinfo_x86 * c); | ||
| 28 | |||
| 29 | extern void early_intel_workaround(struct cpuinfo_x86 *c); | 27 | extern void early_intel_workaround(struct cpuinfo_x86 *c); |
| 30 | 28 | ||
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c index f03b7f94c304..c0c3b59de32c 100644 --- a/arch/i386/kernel/cpu/cyrix.c +++ b/arch/i386/kernel/cpu/cyrix.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | /* | 12 | /* |
| 13 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU | 13 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
| 14 | */ | 14 | */ |
| 15 | static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 15 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
| 16 | { | 16 | { |
| 17 | unsigned char ccr2, ccr3; | 17 | unsigned char ccr2, ccr3; |
| 18 | unsigned long flags; | 18 | unsigned long flags; |
| @@ -52,25 +52,25 @@ static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
| 52 | * Actually since bugs.h doesn't even reference this perhaps someone should | 52 | * Actually since bugs.h doesn't even reference this perhaps someone should |
| 53 | * fix the documentation ??? | 53 | * fix the documentation ??? |
| 54 | */ | 54 | */ |
| 55 | static unsigned char Cx86_dir0_msb __initdata = 0; | 55 | static unsigned char Cx86_dir0_msb __cpuinitdata = 0; |
| 56 | 56 | ||
| 57 | static char Cx86_model[][9] __initdata = { | 57 | static char Cx86_model[][9] __cpuinitdata = { |
| 58 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", | 58 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", |
| 59 | "M II ", "Unknown" | 59 | "M II ", "Unknown" |
| 60 | }; | 60 | }; |
| 61 | static char Cx486_name[][5] __initdata = { | 61 | static char Cx486_name[][5] __cpuinitdata = { |
| 62 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", | 62 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", |
| 63 | "SRx2", "DRx2" | 63 | "SRx2", "DRx2" |
| 64 | }; | 64 | }; |
| 65 | static char Cx486S_name[][4] __initdata = { | 65 | static char Cx486S_name[][4] __cpuinitdata = { |
| 66 | "S", "S2", "Se", "S2e" | 66 | "S", "S2", "Se", "S2e" |
| 67 | }; | 67 | }; |
| 68 | static char Cx486D_name[][4] __initdata = { | 68 | static char Cx486D_name[][4] __cpuinitdata = { |
| 69 | "DX", "DX2", "?", "?", "?", "DX4" | 69 | "DX", "DX2", "?", "?", "?", "DX4" |
| 70 | }; | 70 | }; |
| 71 | static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock"; | 71 | static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; |
| 72 | static char cyrix_model_mult1[] __initdata = "12??43"; | 72 | static char cyrix_model_mult1[] __cpuinitdata = "12??43"; |
| 73 | static char cyrix_model_mult2[] __initdata = "12233445"; | 73 | static char cyrix_model_mult2[] __cpuinitdata = "12233445"; |
| 74 | 74 | ||
| 75 | /* | 75 | /* |
| 76 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old | 76 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old |
| @@ -82,7 +82,7 @@ static char cyrix_model_mult2[] __initdata = "12233445"; | |||
| 82 | 82 | ||
| 83 | extern void calibrate_delay(void) __init; | 83 | extern void calibrate_delay(void) __init; |
| 84 | 84 | ||
| 85 | static void __init check_cx686_slop(struct cpuinfo_x86 *c) | 85 | static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) |
| 86 | { | 86 | { |
| 87 | unsigned long flags; | 87 | unsigned long flags; |
| 88 | 88 | ||
| @@ -107,7 +107,7 @@ static void __init check_cx686_slop(struct cpuinfo_x86 *c) | |||
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | 109 | ||
| 110 | static void __init set_cx86_reorder(void) | 110 | static void __cpuinit set_cx86_reorder(void) |
| 111 | { | 111 | { |
| 112 | u8 ccr3; | 112 | u8 ccr3; |
| 113 | 113 | ||
| @@ -122,7 +122,7 @@ static void __init set_cx86_reorder(void) | |||
| 122 | setCx86(CX86_CCR3, ccr3); | 122 | setCx86(CX86_CCR3, ccr3); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static void __init set_cx86_memwb(void) | 125 | static void __cpuinit set_cx86_memwb(void) |
| 126 | { | 126 | { |
| 127 | u32 cr0; | 127 | u32 cr0; |
| 128 | 128 | ||
| @@ -137,7 +137,7 @@ static void __init set_cx86_memwb(void) | |||
| 137 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); | 137 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | static void __init set_cx86_inc(void) | 140 | static void __cpuinit set_cx86_inc(void) |
| 141 | { | 141 | { |
| 142 | unsigned char ccr3; | 142 | unsigned char ccr3; |
| 143 | 143 | ||
| @@ -158,7 +158,7 @@ static void __init set_cx86_inc(void) | |||
| 158 | * Configure later MediaGX and/or Geode processor. | 158 | * Configure later MediaGX and/or Geode processor. |
| 159 | */ | 159 | */ |
| 160 | 160 | ||
| 161 | static void __init geode_configure(void) | 161 | static void __cpuinit geode_configure(void) |
| 162 | { | 162 | { |
| 163 | unsigned long flags; | 163 | unsigned long flags; |
| 164 | u8 ccr3, ccr4; | 164 | u8 ccr3, ccr4; |
| @@ -184,14 +184,14 @@ static void __init geode_configure(void) | |||
| 184 | 184 | ||
| 185 | 185 | ||
| 186 | #ifdef CONFIG_PCI | 186 | #ifdef CONFIG_PCI |
| 187 | static struct pci_device_id __initdata cyrix_55x0[] = { | 187 | static struct pci_device_id __cpuinitdata cyrix_55x0[] = { |
| 188 | { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510) }, | 188 | { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510) }, |
| 189 | { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520) }, | 189 | { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520) }, |
| 190 | { }, | 190 | { }, |
| 191 | }; | 191 | }; |
| 192 | #endif | 192 | #endif |
| 193 | 193 | ||
| 194 | static void __init init_cyrix(struct cpuinfo_x86 *c) | 194 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) |
| 195 | { | 195 | { |
| 196 | unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; | 196 | unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; |
| 197 | char *buf = c->x86_model_id; | 197 | char *buf = c->x86_model_id; |
| @@ -346,7 +346,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) | |||
| 346 | /* | 346 | /* |
| 347 | * Handle National Semiconductor branded processors | 347 | * Handle National Semiconductor branded processors |
| 348 | */ | 348 | */ |
| 349 | static void __init init_nsc(struct cpuinfo_x86 *c) | 349 | static void __cpuinit init_nsc(struct cpuinfo_x86 *c) |
| 350 | { | 350 | { |
| 351 | /* There may be GX1 processors in the wild that are branded | 351 | /* There may be GX1 processors in the wild that are branded |
| 352 | * NSC and not Cyrix. | 352 | * NSC and not Cyrix. |
| @@ -394,7 +394,7 @@ static inline int test_cyrix_52div(void) | |||
| 394 | return (unsigned char) (test >> 8) == 0x02; | 394 | return (unsigned char) (test >> 8) == 0x02; |
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | static void cyrix_identify(struct cpuinfo_x86 * c) | 397 | static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c) |
| 398 | { | 398 | { |
| 399 | /* Detect Cyrix with disabled CPUID */ | 399 | /* Detect Cyrix with disabled CPUID */ |
| 400 | if ( c->x86 == 4 && test_cyrix_52div() ) { | 400 | if ( c->x86 == 4 && test_cyrix_52div() ) { |
| @@ -427,10 +427,9 @@ static void cyrix_identify(struct cpuinfo_x86 * c) | |||
| 427 | local_irq_restore(flags); | 427 | local_irq_restore(flags); |
| 428 | } | 428 | } |
| 429 | } | 429 | } |
| 430 | generic_identify(c); | ||
| 431 | } | 430 | } |
| 432 | 431 | ||
| 433 | static struct cpu_dev cyrix_cpu_dev __initdata = { | 432 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { |
| 434 | .c_vendor = "Cyrix", | 433 | .c_vendor = "Cyrix", |
| 435 | .c_ident = { "CyrixInstead" }, | 434 | .c_ident = { "CyrixInstead" }, |
| 436 | .c_init = init_cyrix, | 435 | .c_init = init_cyrix, |
| @@ -453,11 +452,10 @@ static int __init cyrix_exit_cpu(void) | |||
| 453 | 452 | ||
| 454 | late_initcall(cyrix_exit_cpu); | 453 | late_initcall(cyrix_exit_cpu); |
| 455 | 454 | ||
| 456 | static struct cpu_dev nsc_cpu_dev __initdata = { | 455 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { |
| 457 | .c_vendor = "NSC", | 456 | .c_vendor = "NSC", |
| 458 | .c_ident = { "Geode by NSC" }, | 457 | .c_ident = { "Geode by NSC" }, |
| 459 | .c_init = init_nsc, | 458 | .c_init = init_nsc, |
| 460 | .c_identify = generic_identify, | ||
| 461 | }; | 459 | }; |
| 462 | 460 | ||
| 463 | int __init nsc_init_cpu(void) | 461 | int __init nsc_init_cpu(void) |
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 5a2e270924b1..94a95aa5227e 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c | |||
| @@ -198,7 +198,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | 200 | ||
| 201 | static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 201 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) |
| 202 | { | 202 | { |
| 203 | /* Intel PIII Tualatin. This comes in two flavours. | 203 | /* Intel PIII Tualatin. This comes in two flavours. |
| 204 | * One has 256kb of cache, the other 512. We have no way | 204 | * One has 256kb of cache, the other 512. We have no way |
| @@ -263,7 +263,6 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
| 263 | }, | 263 | }, |
| 264 | }, | 264 | }, |
| 265 | .c_init = init_intel, | 265 | .c_init = init_intel, |
| 266 | .c_identify = generic_identify, | ||
| 267 | .c_size_cache = intel_size_cache, | 266 | .c_size_cache = intel_size_cache, |
| 268 | }; | 267 | }; |
| 269 | 268 | ||
diff --git a/arch/i386/kernel/cpu/mcheck/Makefile b/arch/i386/kernel/cpu/mcheck/Makefile index 30808f3d6715..f1ebe1c1c17a 100644 --- a/arch/i386/kernel/cpu/mcheck/Makefile +++ b/arch/i386/kernel/cpu/mcheck/Makefile | |||
| @@ -1,2 +1,2 @@ | |||
| 1 | obj-y = mce.o k7.o p4.o p5.o p6.o winchip.o | 1 | obj-y = mce.o k7.o p4.o p5.o p6.o winchip.o therm_throt.o |
| 2 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o | 2 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o |
diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c index b95f1b3d53aa..504434a46011 100644 --- a/arch/i386/kernel/cpu/mcheck/p4.c +++ b/arch/i386/kernel/cpu/mcheck/p4.c | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
| 14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
| 15 | 15 | ||
| 16 | #include <asm/therm_throt.h> | ||
| 17 | |||
| 16 | #include "mce.h" | 18 | #include "mce.h" |
| 17 | 19 | ||
| 18 | /* as supported by the P4/Xeon family */ | 20 | /* as supported by the P4/Xeon family */ |
| @@ -44,25 +46,12 @@ static void unexpected_thermal_interrupt(struct pt_regs *regs) | |||
| 44 | /* P4/Xeon Thermal transition interrupt handler */ | 46 | /* P4/Xeon Thermal transition interrupt handler */ |
| 45 | static void intel_thermal_interrupt(struct pt_regs *regs) | 47 | static void intel_thermal_interrupt(struct pt_regs *regs) |
| 46 | { | 48 | { |
| 47 | u32 l, h; | 49 | __u64 msr_val; |
| 48 | unsigned int cpu = smp_processor_id(); | ||
| 49 | static unsigned long next[NR_CPUS]; | ||
| 50 | 50 | ||
| 51 | ack_APIC_irq(); | 51 | ack_APIC_irq(); |
| 52 | 52 | ||
| 53 | if (time_after(next[cpu], jiffies)) | 53 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
| 54 | return; | 54 | therm_throt_process(msr_val & 0x1); |
| 55 | |||
| 56 | next[cpu] = jiffies + HZ*5; | ||
| 57 | rdmsr(MSR_IA32_THERM_STATUS, l, h); | ||
| 58 | if (l & 0x1) { | ||
| 59 | printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu); | ||
| 60 | printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n", | ||
| 61 | cpu); | ||
| 62 | add_taint(TAINT_MACHINE_CHECK); | ||
| 63 | } else { | ||
| 64 | printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); | ||
| 65 | } | ||
| 66 | } | 55 | } |
| 67 | 56 | ||
| 68 | /* Thermal interrupt handler for this CPU setup */ | 57 | /* Thermal interrupt handler for this CPU setup */ |
| @@ -122,10 +111,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 122 | 111 | ||
| 123 | rdmsr (MSR_IA32_MISC_ENABLE, l, h); | 112 | rdmsr (MSR_IA32_MISC_ENABLE, l, h); |
| 124 | wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h); | 113 | wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h); |
| 125 | 114 | ||
| 126 | l = apic_read (APIC_LVTTHMR); | 115 | l = apic_read (APIC_LVTTHMR); |
| 127 | apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 116 | apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
| 128 | printk (KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu); | 117 | printk (KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu); |
| 118 | |||
| 119 | /* enable thermal throttle processing */ | ||
| 120 | atomic_set(&therm_throt_en, 1); | ||
| 129 | return; | 121 | return; |
| 130 | } | 122 | } |
| 131 | #endif /* CONFIG_X86_MCE_P4THERMAL */ | 123 | #endif /* CONFIG_X86_MCE_P4THERMAL */ |
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c new file mode 100644 index 000000000000..4f43047de406 --- /dev/null +++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c | |||
| @@ -0,0 +1,180 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/i386/kerne/cpu/mcheck/therm_throt.c | ||
| 3 | * | ||
| 4 | * Thermal throttle event support code (such as syslog messaging and rate | ||
| 5 | * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). | ||
| 6 | * This allows consistent reporting of CPU thermal throttle events. | ||
| 7 | * | ||
| 8 | * Maintains a counter in /sys that keeps track of the number of thermal | ||
| 9 | * events, such that the user knows how bad the thermal problem might be | ||
| 10 | * (since the logging to syslog and mcelog is rate limited). | ||
| 11 | * | ||
| 12 | * Author: Dmitriy Zavin (dmitriyz@google.com) | ||
| 13 | * | ||
| 14 | * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c. | ||
| 15 | * Inspired by Ross Biro's and Al Borchers' counter code. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/percpu.h> | ||
| 19 | #include <linux/sysdev.h> | ||
| 20 | #include <linux/cpu.h> | ||
| 21 | #include <asm/cpu.h> | ||
| 22 | #include <linux/notifier.h> | ||
| 23 | #include <asm/therm_throt.h> | ||
| 24 | |||
| 25 | /* How long to wait between reporting thermal events */ | ||
| 26 | #define CHECK_INTERVAL (300 * HZ) | ||
| 27 | |||
| 28 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; | ||
| 29 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); | ||
| 30 | atomic_t therm_throt_en = ATOMIC_INIT(0); | ||
| 31 | |||
| 32 | #ifdef CONFIG_SYSFS | ||
| 33 | #define define_therm_throt_sysdev_one_ro(_name) \ | ||
| 34 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) | ||
| 35 | |||
| 36 | #define define_therm_throt_sysdev_show_func(name) \ | ||
| 37 | static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ | ||
| 38 | char *buf) \ | ||
| 39 | { \ | ||
| 40 | unsigned int cpu = dev->id; \ | ||
| 41 | ssize_t ret; \ | ||
| 42 | \ | ||
| 43 | preempt_disable(); /* CPU hotplug */ \ | ||
| 44 | if (cpu_online(cpu)) \ | ||
| 45 | ret = sprintf(buf, "%lu\n", \ | ||
| 46 | per_cpu(thermal_throttle_##name, cpu)); \ | ||
| 47 | else \ | ||
| 48 | ret = 0; \ | ||
| 49 | preempt_enable(); \ | ||
| 50 | \ | ||
| 51 | return ret; \ | ||
| 52 | } | ||
| 53 | |||
| 54 | define_therm_throt_sysdev_show_func(count); | ||
| 55 | define_therm_throt_sysdev_one_ro(count); | ||
| 56 | |||
| 57 | static struct attribute *thermal_throttle_attrs[] = { | ||
| 58 | &attr_count.attr, | ||
| 59 | NULL | ||
| 60 | }; | ||
| 61 | |||
| 62 | static struct attribute_group thermal_throttle_attr_group = { | ||
| 63 | .attrs = thermal_throttle_attrs, | ||
| 64 | .name = "thermal_throttle" | ||
| 65 | }; | ||
| 66 | #endif /* CONFIG_SYSFS */ | ||
| 67 | |||
| 68 | /*** | ||
| 69 | * therm_throt_process - Process thermal throttling event from interrupt | ||
| 70 | * @curr: Whether the condition is current or not (boolean), since the | ||
| 71 | * thermal interrupt normally gets called both when the thermal | ||
| 72 | * event begins and once the event has ended. | ||
| 73 | * | ||
| 74 | * This function is called by the thermal interrupt after the | ||
| 75 | * IRQ has been acknowledged. | ||
| 76 | * | ||
| 77 | * It will take care of rate limiting and printing messages to the syslog. | ||
| 78 | * | ||
| 79 | * Returns: 0 : Event should NOT be further logged, i.e. still in | ||
| 80 | * "timeout" from previous log message. | ||
| 81 | * 1 : Event should be logged further, and a message has been | ||
| 82 | * printed to the syslog. | ||
| 83 | */ | ||
| 84 | int therm_throt_process(int curr) | ||
| 85 | { | ||
| 86 | unsigned int cpu = smp_processor_id(); | ||
| 87 | __u64 tmp_jiffs = get_jiffies_64(); | ||
| 88 | |||
| 89 | if (curr) | ||
| 90 | __get_cpu_var(thermal_throttle_count)++; | ||
| 91 | |||
| 92 | if (time_before64(tmp_jiffs, __get_cpu_var(next_check))) | ||
| 93 | return 0; | ||
| 94 | |||
| 95 | __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; | ||
| 96 | |||
| 97 | /* if we just entered the thermal event */ | ||
| 98 | if (curr) { | ||
| 99 | printk(KERN_CRIT "CPU%d: Temperature above threshold, " | ||
| 100 | "cpu clock throttled (total events = %lu)\n", cpu, | ||
| 101 | __get_cpu_var(thermal_throttle_count)); | ||
| 102 | |||
| 103 | add_taint(TAINT_MACHINE_CHECK); | ||
| 104 | } else { | ||
| 105 | printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu); | ||
| 106 | } | ||
| 107 | |||
| 108 | return 1; | ||
| 109 | } | ||
| 110 | |||
| 111 | #ifdef CONFIG_SYSFS | ||
| 112 | /* Add/Remove thermal_throttle interface for CPU device */ | ||
| 113 | static __cpuinit int thermal_throttle_add_dev(struct sys_device * sys_dev) | ||
| 114 | { | ||
| 115 | sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); | ||
| 116 | return 0; | ||
| 117 | } | ||
| 118 | |||
| 119 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 120 | static __cpuinit int thermal_throttle_remove_dev(struct sys_device * sys_dev) | ||
| 121 | { | ||
| 122 | sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* Mutex protecting device creation against CPU hotplug */ | ||
| 127 | static DEFINE_MUTEX(therm_cpu_lock); | ||
| 128 | |||
| 129 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | ||
| 130 | static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, | ||
| 131 | unsigned long action, | ||
| 132 | void *hcpu) | ||
| 133 | { | ||
| 134 | unsigned int cpu = (unsigned long)hcpu; | ||
| 135 | struct sys_device *sys_dev; | ||
| 136 | |||
| 137 | sys_dev = get_cpu_sysdev(cpu); | ||
| 138 | mutex_lock(&therm_cpu_lock); | ||
| 139 | switch (action) { | ||
| 140 | case CPU_ONLINE: | ||
| 141 | thermal_throttle_add_dev(sys_dev); | ||
| 142 | break; | ||
| 143 | case CPU_DEAD: | ||
| 144 | thermal_throttle_remove_dev(sys_dev); | ||
| 145 | break; | ||
| 146 | } | ||
| 147 | mutex_unlock(&therm_cpu_lock); | ||
| 148 | return NOTIFY_OK; | ||
| 149 | } | ||
| 150 | |||
| 151 | static struct notifier_block thermal_throttle_cpu_notifier = | ||
| 152 | { | ||
| 153 | .notifier_call = thermal_throttle_cpu_callback, | ||
| 154 | }; | ||
| 155 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
| 156 | |||
| 157 | static __init int thermal_throttle_init_device(void) | ||
| 158 | { | ||
| 159 | unsigned int cpu = 0; | ||
| 160 | |||
| 161 | if (!atomic_read(&therm_throt_en)) | ||
| 162 | return 0; | ||
| 163 | |||
| 164 | register_hotcpu_notifier(&thermal_throttle_cpu_notifier); | ||
| 165 | |||
| 166 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 167 | mutex_lock(&therm_cpu_lock); | ||
| 168 | #endif | ||
| 169 | /* connect live CPUs to sysfs */ | ||
| 170 | for_each_online_cpu(cpu) | ||
| 171 | thermal_throttle_add_dev(get_cpu_sysdev(cpu)); | ||
| 172 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 173 | mutex_unlock(&therm_cpu_lock); | ||
| 174 | #endif | ||
| 175 | |||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | device_initcall(thermal_throttle_init_device); | ||
| 180 | #endif /* CONFIG_SYSFS */ | ||
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c index 169ac8e0db68..0b61eed8bbd8 100644 --- a/arch/i386/kernel/cpu/mtrr/generic.c +++ b/arch/i386/kernel/cpu/mtrr/generic.c | |||
| @@ -243,7 +243,7 @@ static DEFINE_SPINLOCK(set_atomicity_lock); | |||
| 243 | * has been called. | 243 | * has been called. |
| 244 | */ | 244 | */ |
| 245 | 245 | ||
| 246 | static void prepare_set(void) | 246 | static void prepare_set(void) __acquires(set_atomicity_lock) |
| 247 | { | 247 | { |
| 248 | unsigned long cr0; | 248 | unsigned long cr0; |
| 249 | 249 | ||
| @@ -274,7 +274,7 @@ static void prepare_set(void) | |||
| 274 | mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); | 274 | mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static void post_set(void) | 277 | static void post_set(void) __releases(set_atomicity_lock) |
| 278 | { | 278 | { |
| 279 | /* Flush TLBs (no need to flush caches - they are disabled) */ | 279 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
| 280 | __flush_tlb(); | 280 | __flush_tlb(); |
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c index ad87fa58058d..8bf23cc80c63 100644 --- a/arch/i386/kernel/cpu/nexgen.c +++ b/arch/i386/kernel/cpu/nexgen.c | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | * to have CPUID. (Thanks to Herbert Oppmann) | 10 | * to have CPUID. (Thanks to Herbert Oppmann) |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | static int __init deep_magic_nexgen_probe(void) | 13 | static int __cpuinit deep_magic_nexgen_probe(void) |
| 14 | { | 14 | { |
| 15 | int ret; | 15 | int ret; |
| 16 | 16 | ||
| @@ -27,21 +27,20 @@ static int __init deep_magic_nexgen_probe(void) | |||
| 27 | return ret; | 27 | return ret; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static void __init init_nexgen(struct cpuinfo_x86 * c) | 30 | static void __cpuinit init_nexgen(struct cpuinfo_x86 * c) |
| 31 | { | 31 | { |
| 32 | c->x86_cache_size = 256; /* A few had 1 MB... */ | 32 | c->x86_cache_size = 256; /* A few had 1 MB... */ |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | static void __init nexgen_identify(struct cpuinfo_x86 * c) | 35 | static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c) |
| 36 | { | 36 | { |
| 37 | /* Detect NexGen with old hypercode */ | 37 | /* Detect NexGen with old hypercode */ |
| 38 | if ( deep_magic_nexgen_probe() ) { | 38 | if ( deep_magic_nexgen_probe() ) { |
| 39 | strcpy(c->x86_vendor_id, "NexGenDriven"); | 39 | strcpy(c->x86_vendor_id, "NexGenDriven"); |
| 40 | } | 40 | } |
| 41 | generic_identify(c); | ||
| 42 | } | 41 | } |
| 43 | 42 | ||
| 44 | static struct cpu_dev nexgen_cpu_dev __initdata = { | 43 | static struct cpu_dev nexgen_cpu_dev __cpuinitdata = { |
| 45 | .c_vendor = "Nexgen", | 44 | .c_vendor = "Nexgen", |
| 46 | .c_ident = { "NexGenDriven" }, | 45 | .c_ident = { "NexGenDriven" }, |
| 47 | .c_models = { | 46 | .c_models = { |
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index f54a15268ed7..76aac088a323 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c | |||
| @@ -46,8 +46,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 46 | 46 | ||
| 47 | /* Intel-defined (#2) */ | 47 | /* Intel-defined (#2) */ |
| 48 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", | 48 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", |
| 49 | "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, | 49 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, |
| 50 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 50 | NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL, |
| 51 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 51 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
| 52 | 52 | ||
| 53 | /* VIA/Cyrix/Centaur-defined */ | 53 | /* VIA/Cyrix/Centaur-defined */ |
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c index d08d5a2811c8..9317f7414989 100644 --- a/arch/i386/kernel/cpu/rise.c +++ b/arch/i386/kernel/cpu/rise.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include "cpu.h" | 6 | #include "cpu.h" |
| 7 | 7 | ||
| 8 | static void __init init_rise(struct cpuinfo_x86 *c) | 8 | static void __cpuinit init_rise(struct cpuinfo_x86 *c) |
| 9 | { | 9 | { |
| 10 | printk("CPU: Rise iDragon"); | 10 | printk("CPU: Rise iDragon"); |
| 11 | if (c->x86_model > 2) | 11 | if (c->x86_model > 2) |
| @@ -28,7 +28,7 @@ static void __init init_rise(struct cpuinfo_x86 *c) | |||
| 28 | set_bit(X86_FEATURE_CX8, c->x86_capability); | 28 | set_bit(X86_FEATURE_CX8, c->x86_capability); |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static struct cpu_dev rise_cpu_dev __initdata = { | 31 | static struct cpu_dev rise_cpu_dev __cpuinitdata = { |
| 32 | .c_vendor = "Rise", | 32 | .c_vendor = "Rise", |
| 33 | .c_ident = { "RiseRiseRise" }, | 33 | .c_ident = { "RiseRiseRise" }, |
| 34 | .c_models = { | 34 | .c_models = { |
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c index 7214c9b577ab..4056fb7d2cdf 100644 --- a/arch/i386/kernel/cpu/transmeta.c +++ b/arch/i386/kernel/cpu/transmeta.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
| 6 | #include "cpu.h" | 6 | #include "cpu.h" |
| 7 | 7 | ||
| 8 | static void __init init_transmeta(struct cpuinfo_x86 *c) | 8 | static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) |
| 9 | { | 9 | { |
| 10 | unsigned int cap_mask, uk, max, dummy; | 10 | unsigned int cap_mask, uk, max, dummy; |
| 11 | unsigned int cms_rev1, cms_rev2; | 11 | unsigned int cms_rev1, cms_rev2; |
| @@ -85,10 +85,9 @@ static void __init init_transmeta(struct cpuinfo_x86 *c) | |||
| 85 | #endif | 85 | #endif |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static void __init transmeta_identify(struct cpuinfo_x86 * c) | 88 | static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c) |
| 89 | { | 89 | { |
| 90 | u32 xlvl; | 90 | u32 xlvl; |
| 91 | generic_identify(c); | ||
| 92 | 91 | ||
| 93 | /* Transmeta-defined flags: level 0x80860001 */ | 92 | /* Transmeta-defined flags: level 0x80860001 */ |
| 94 | xlvl = cpuid_eax(0x80860000); | 93 | xlvl = cpuid_eax(0x80860000); |
| @@ -98,7 +97,7 @@ static void __init transmeta_identify(struct cpuinfo_x86 * c) | |||
| 98 | } | 97 | } |
| 99 | } | 98 | } |
| 100 | 99 | ||
| 101 | static struct cpu_dev transmeta_cpu_dev __initdata = { | 100 | static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { |
| 102 | .c_vendor = "Transmeta", | 101 | .c_vendor = "Transmeta", |
| 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 102 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
| 104 | .c_init = init_transmeta, | 103 | .c_init = init_transmeta, |
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c index 2cd988f6dc55..1bf3f87e9c5b 100644 --- a/arch/i386/kernel/cpu/umc.c +++ b/arch/i386/kernel/cpu/umc.c | |||
| @@ -5,12 +5,8 @@ | |||
| 5 | 5 | ||
| 6 | /* UMC chips appear to be only either 386 or 486, so no special init takes place. | 6 | /* UMC chips appear to be only either 386 or 486, so no special init takes place. |
| 7 | */ | 7 | */ |
| 8 | static void __init init_umc(struct cpuinfo_x86 * c) | ||
| 9 | { | ||
| 10 | |||
| 11 | } | ||
| 12 | 8 | ||
| 13 | static struct cpu_dev umc_cpu_dev __initdata = { | 9 | static struct cpu_dev umc_cpu_dev __cpuinitdata = { |
| 14 | .c_vendor = "UMC", | 10 | .c_vendor = "UMC", |
| 15 | .c_ident = { "UMC UMC UMC" }, | 11 | .c_ident = { "UMC UMC UMC" }, |
| 16 | .c_models = { | 12 | .c_models = { |
| @@ -21,7 +17,6 @@ static struct cpu_dev umc_cpu_dev __initdata = { | |||
| 21 | } | 17 | } |
| 22 | }, | 18 | }, |
| 23 | }, | 19 | }, |
| 24 | .c_init = init_umc, | ||
| 25 | }; | 20 | }; |
| 26 | 21 | ||
| 27 | int __init umc_init_cpu(void) | 22 | int __init umc_init_cpu(void) |
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index 5b96f038367f..67d297dc1003 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | #include <asm/nmi.h> | 22 | #include <asm/nmi.h> |
| 23 | #include <asm/hw_irq.h> | 23 | #include <asm/hw_irq.h> |
| 24 | #include <asm/apic.h> | 24 | #include <asm/apic.h> |
| 25 | #include <asm/kdebug.h> | ||
| 26 | |||
| 25 | #include <mach_ipi.h> | 27 | #include <mach_ipi.h> |
| 26 | 28 | ||
| 27 | 29 | ||
| @@ -93,16 +95,25 @@ static void crash_save_self(struct pt_regs *regs) | |||
| 93 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 95 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
| 94 | static atomic_t waiting_for_crash_ipi; | 96 | static atomic_t waiting_for_crash_ipi; |
| 95 | 97 | ||
| 96 | static int crash_nmi_callback(struct pt_regs *regs, int cpu) | 98 | static int crash_nmi_callback(struct notifier_block *self, |
| 99 | unsigned long val, void *data) | ||
| 97 | { | 100 | { |
| 101 | struct pt_regs *regs; | ||
| 98 | struct pt_regs fixed_regs; | 102 | struct pt_regs fixed_regs; |
| 103 | int cpu; | ||
| 104 | |||
| 105 | if (val != DIE_NMI_IPI) | ||
| 106 | return NOTIFY_OK; | ||
| 107 | |||
| 108 | regs = ((struct die_args *)data)->regs; | ||
| 109 | cpu = raw_smp_processor_id(); | ||
| 99 | 110 | ||
| 100 | /* Don't do anything if this handler is invoked on crashing cpu. | 111 | /* Don't do anything if this handler is invoked on crashing cpu. |
| 101 | * Otherwise, system will completely hang. Crashing cpu can get | 112 | * Otherwise, system will completely hang. Crashing cpu can get |
| 102 | * an NMI if system was initially booted with nmi_watchdog parameter. | 113 | * an NMI if system was initially booted with nmi_watchdog parameter. |
| 103 | */ | 114 | */ |
| 104 | if (cpu == crashing_cpu) | 115 | if (cpu == crashing_cpu) |
| 105 | return 1; | 116 | return NOTIFY_STOP; |
| 106 | local_irq_disable(); | 117 | local_irq_disable(); |
| 107 | 118 | ||
| 108 | if (!user_mode_vm(regs)) { | 119 | if (!user_mode_vm(regs)) { |
| @@ -125,13 +136,18 @@ static void smp_send_nmi_allbutself(void) | |||
| 125 | send_IPI_allbutself(NMI_VECTOR); | 136 | send_IPI_allbutself(NMI_VECTOR); |
| 126 | } | 137 | } |
| 127 | 138 | ||
| 139 | static struct notifier_block crash_nmi_nb = { | ||
| 140 | .notifier_call = crash_nmi_callback, | ||
| 141 | }; | ||
| 142 | |||
| 128 | static void nmi_shootdown_cpus(void) | 143 | static void nmi_shootdown_cpus(void) |
| 129 | { | 144 | { |
| 130 | unsigned long msecs; | 145 | unsigned long msecs; |
| 131 | 146 | ||
| 132 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | 147 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); |
| 133 | /* Would it be better to replace the trap vector here? */ | 148 | /* Would it be better to replace the trap vector here? */ |
| 134 | set_nmi_callback(crash_nmi_callback); | 149 | if (register_die_notifier(&crash_nmi_nb)) |
| 150 | return; /* return what? */ | ||
| 135 | /* Ensure the new callback function is set before sending | 151 | /* Ensure the new callback function is set before sending |
| 136 | * out the NMI | 152 | * out the NMI |
| 137 | */ | 153 | */ |
diff --git a/arch/i386/kernel/efi_stub.S b/arch/i386/kernel/efi_stub.S index d3ee73a3eee3..ef00bb77d7e4 100644 --- a/arch/i386/kernel/efi_stub.S +++ b/arch/i386/kernel/efi_stub.S | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
| 9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
| 10 | #include <asm/pgtable.h> | ||
| 11 | 10 | ||
| 12 | /* | 11 | /* |
| 13 | * efi_call_phys(void *, ...) is a function with variable parameters. | 12 | * efi_call_phys(void *, ...) is a function with variable parameters. |
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 87f9f60b803b..5a63d6fdb70e 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
| @@ -76,8 +76,15 @@ DF_MASK = 0x00000400 | |||
| 76 | NT_MASK = 0x00004000 | 76 | NT_MASK = 0x00004000 |
| 77 | VM_MASK = 0x00020000 | 77 | VM_MASK = 0x00020000 |
| 78 | 78 | ||
| 79 | /* These are replaces for paravirtualization */ | ||
| 80 | #define DISABLE_INTERRUPTS cli | ||
| 81 | #define ENABLE_INTERRUPTS sti | ||
| 82 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
| 83 | #define INTERRUPT_RETURN iret | ||
| 84 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
| 85 | |||
| 79 | #ifdef CONFIG_PREEMPT | 86 | #ifdef CONFIG_PREEMPT |
| 80 | #define preempt_stop cli; TRACE_IRQS_OFF | 87 | #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF |
| 81 | #else | 88 | #else |
| 82 | #define preempt_stop | 89 | #define preempt_stop |
| 83 | #define resume_kernel restore_nocheck | 90 | #define resume_kernel restore_nocheck |
| @@ -176,18 +183,21 @@ VM_MASK = 0x00020000 | |||
| 176 | 183 | ||
| 177 | #define RING0_INT_FRAME \ | 184 | #define RING0_INT_FRAME \ |
| 178 | CFI_STARTPROC simple;\ | 185 | CFI_STARTPROC simple;\ |
| 186 | CFI_SIGNAL_FRAME;\ | ||
| 179 | CFI_DEF_CFA esp, 3*4;\ | 187 | CFI_DEF_CFA esp, 3*4;\ |
| 180 | /*CFI_OFFSET cs, -2*4;*/\ | 188 | /*CFI_OFFSET cs, -2*4;*/\ |
| 181 | CFI_OFFSET eip, -3*4 | 189 | CFI_OFFSET eip, -3*4 |
| 182 | 190 | ||
| 183 | #define RING0_EC_FRAME \ | 191 | #define RING0_EC_FRAME \ |
| 184 | CFI_STARTPROC simple;\ | 192 | CFI_STARTPROC simple;\ |
| 193 | CFI_SIGNAL_FRAME;\ | ||
| 185 | CFI_DEF_CFA esp, 4*4;\ | 194 | CFI_DEF_CFA esp, 4*4;\ |
| 186 | /*CFI_OFFSET cs, -2*4;*/\ | 195 | /*CFI_OFFSET cs, -2*4;*/\ |
| 187 | CFI_OFFSET eip, -3*4 | 196 | CFI_OFFSET eip, -3*4 |
| 188 | 197 | ||
| 189 | #define RING0_PTREGS_FRAME \ | 198 | #define RING0_PTREGS_FRAME \ |
| 190 | CFI_STARTPROC simple;\ | 199 | CFI_STARTPROC simple;\ |
| 200 | CFI_SIGNAL_FRAME;\ | ||
| 191 | CFI_DEF_CFA esp, OLDESP-EBX;\ | 201 | CFI_DEF_CFA esp, OLDESP-EBX;\ |
| 192 | /*CFI_OFFSET cs, CS-OLDESP;*/\ | 202 | /*CFI_OFFSET cs, CS-OLDESP;*/\ |
| 193 | CFI_OFFSET eip, EIP-OLDESP;\ | 203 | CFI_OFFSET eip, EIP-OLDESP;\ |
| @@ -233,10 +243,11 @@ ret_from_intr: | |||
| 233 | check_userspace: | 243 | check_userspace: |
| 234 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS | 244 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS |
| 235 | movb CS(%esp), %al | 245 | movb CS(%esp), %al |
| 236 | testl $(VM_MASK | 3), %eax | 246 | andl $(VM_MASK | SEGMENT_RPL_MASK), %eax |
| 237 | jz resume_kernel | 247 | cmpl $USER_RPL, %eax |
| 248 | jb resume_kernel # not returning to v8086 or userspace | ||
| 238 | ENTRY(resume_userspace) | 249 | ENTRY(resume_userspace) |
| 239 | cli # make sure we don't miss an interrupt | 250 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
| 240 | # setting need_resched or sigpending | 251 | # setting need_resched or sigpending |
| 241 | # between sampling and the iret | 252 | # between sampling and the iret |
| 242 | movl TI_flags(%ebp), %ecx | 253 | movl TI_flags(%ebp), %ecx |
| @@ -247,7 +258,7 @@ ENTRY(resume_userspace) | |||
| 247 | 258 | ||
| 248 | #ifdef CONFIG_PREEMPT | 259 | #ifdef CONFIG_PREEMPT |
| 249 | ENTRY(resume_kernel) | 260 | ENTRY(resume_kernel) |
| 250 | cli | 261 | DISABLE_INTERRUPTS |
| 251 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? | 262 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
| 252 | jnz restore_nocheck | 263 | jnz restore_nocheck |
| 253 | need_resched: | 264 | need_resched: |
| @@ -267,6 +278,7 @@ need_resched: | |||
| 267 | # sysenter call handler stub | 278 | # sysenter call handler stub |
| 268 | ENTRY(sysenter_entry) | 279 | ENTRY(sysenter_entry) |
| 269 | CFI_STARTPROC simple | 280 | CFI_STARTPROC simple |
| 281 | CFI_SIGNAL_FRAME | ||
| 270 | CFI_DEF_CFA esp, 0 | 282 | CFI_DEF_CFA esp, 0 |
| 271 | CFI_REGISTER esp, ebp | 283 | CFI_REGISTER esp, ebp |
| 272 | movl TSS_sysenter_esp0(%esp),%esp | 284 | movl TSS_sysenter_esp0(%esp),%esp |
| @@ -275,7 +287,7 @@ sysenter_past_esp: | |||
| 275 | * No need to follow this irqs on/off section: the syscall | 287 | * No need to follow this irqs on/off section: the syscall |
| 276 | * disabled irqs and here we enable it straight after entry: | 288 | * disabled irqs and here we enable it straight after entry: |
| 277 | */ | 289 | */ |
| 278 | sti | 290 | ENABLE_INTERRUPTS |
| 279 | pushl $(__USER_DS) | 291 | pushl $(__USER_DS) |
| 280 | CFI_ADJUST_CFA_OFFSET 4 | 292 | CFI_ADJUST_CFA_OFFSET 4 |
| 281 | /*CFI_REL_OFFSET ss, 0*/ | 293 | /*CFI_REL_OFFSET ss, 0*/ |
| @@ -320,7 +332,7 @@ sysenter_past_esp: | |||
| 320 | jae syscall_badsys | 332 | jae syscall_badsys |
| 321 | call *sys_call_table(,%eax,4) | 333 | call *sys_call_table(,%eax,4) |
| 322 | movl %eax,EAX(%esp) | 334 | movl %eax,EAX(%esp) |
| 323 | cli | 335 | DISABLE_INTERRUPTS |
| 324 | TRACE_IRQS_OFF | 336 | TRACE_IRQS_OFF |
| 325 | movl TI_flags(%ebp), %ecx | 337 | movl TI_flags(%ebp), %ecx |
| 326 | testw $_TIF_ALLWORK_MASK, %cx | 338 | testw $_TIF_ALLWORK_MASK, %cx |
| @@ -330,8 +342,7 @@ sysenter_past_esp: | |||
| 330 | movl OLDESP(%esp), %ecx | 342 | movl OLDESP(%esp), %ecx |
| 331 | xorl %ebp,%ebp | 343 | xorl %ebp,%ebp |
| 332 | TRACE_IRQS_ON | 344 | TRACE_IRQS_ON |
| 333 | sti | 345 | ENABLE_INTERRUPTS_SYSEXIT |
| 334 | sysexit | ||
| 335 | CFI_ENDPROC | 346 | CFI_ENDPROC |
| 336 | 347 | ||
| 337 | 348 | ||
| @@ -356,7 +367,7 @@ syscall_call: | |||
| 356 | call *sys_call_table(,%eax,4) | 367 | call *sys_call_table(,%eax,4) |
| 357 | movl %eax,EAX(%esp) # store the return value | 368 | movl %eax,EAX(%esp) # store the return value |
| 358 | syscall_exit: | 369 | syscall_exit: |
| 359 | cli # make sure we don't miss an interrupt | 370 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
| 360 | # setting need_resched or sigpending | 371 | # setting need_resched or sigpending |
| 361 | # between sampling and the iret | 372 | # between sampling and the iret |
| 362 | TRACE_IRQS_OFF | 373 | TRACE_IRQS_OFF |
| @@ -371,8 +382,8 @@ restore_all: | |||
| 371 | # See comments in process.c:copy_thread() for details. | 382 | # See comments in process.c:copy_thread() for details. |
| 372 | movb OLDSS(%esp), %ah | 383 | movb OLDSS(%esp), %ah |
| 373 | movb CS(%esp), %al | 384 | movb CS(%esp), %al |
| 374 | andl $(VM_MASK | (4 << 8) | 3), %eax | 385 | andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
| 375 | cmpl $((4 << 8) | 3), %eax | 386 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
| 376 | CFI_REMEMBER_STATE | 387 | CFI_REMEMBER_STATE |
| 377 | je ldt_ss # returning to user-space with LDT SS | 388 | je ldt_ss # returning to user-space with LDT SS |
| 378 | restore_nocheck: | 389 | restore_nocheck: |
| @@ -381,11 +392,11 @@ restore_nocheck_notrace: | |||
| 381 | RESTORE_REGS | 392 | RESTORE_REGS |
| 382 | addl $4, %esp | 393 | addl $4, %esp |
| 383 | CFI_ADJUST_CFA_OFFSET -4 | 394 | CFI_ADJUST_CFA_OFFSET -4 |
| 384 | 1: iret | 395 | 1: INTERRUPT_RETURN |
| 385 | .section .fixup,"ax" | 396 | .section .fixup,"ax" |
| 386 | iret_exc: | 397 | iret_exc: |
| 387 | TRACE_IRQS_ON | 398 | TRACE_IRQS_ON |
| 388 | sti | 399 | ENABLE_INTERRUPTS |
| 389 | pushl $0 # no error code | 400 | pushl $0 # no error code |
| 390 | pushl $do_iret_error | 401 | pushl $do_iret_error |
| 391 | jmp error_code | 402 | jmp error_code |
| @@ -409,7 +420,7 @@ ldt_ss: | |||
| 409 | * dosemu and wine happy. */ | 420 | * dosemu and wine happy. */ |
| 410 | subl $8, %esp # reserve space for switch16 pointer | 421 | subl $8, %esp # reserve space for switch16 pointer |
| 411 | CFI_ADJUST_CFA_OFFSET 8 | 422 | CFI_ADJUST_CFA_OFFSET 8 |
| 412 | cli | 423 | DISABLE_INTERRUPTS |
| 413 | TRACE_IRQS_OFF | 424 | TRACE_IRQS_OFF |
| 414 | movl %esp, %eax | 425 | movl %esp, %eax |
| 415 | /* Set up the 16bit stack frame with switch32 pointer on top, | 426 | /* Set up the 16bit stack frame with switch32 pointer on top, |
| @@ -419,7 +430,7 @@ ldt_ss: | |||
| 419 | TRACE_IRQS_IRET | 430 | TRACE_IRQS_IRET |
| 420 | RESTORE_REGS | 431 | RESTORE_REGS |
| 421 | lss 20+4(%esp), %esp # switch to 16bit stack | 432 | lss 20+4(%esp), %esp # switch to 16bit stack |
| 422 | 1: iret | 433 | 1: INTERRUPT_RETURN |
| 423 | .section __ex_table,"a" | 434 | .section __ex_table,"a" |
| 424 | .align 4 | 435 | .align 4 |
| 425 | .long 1b,iret_exc | 436 | .long 1b,iret_exc |
| @@ -434,7 +445,7 @@ work_pending: | |||
| 434 | jz work_notifysig | 445 | jz work_notifysig |
| 435 | work_resched: | 446 | work_resched: |
| 436 | call schedule | 447 | call schedule |
| 437 | cli # make sure we don't miss an interrupt | 448 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
| 438 | # setting need_resched or sigpending | 449 | # setting need_resched or sigpending |
| 439 | # between sampling and the iret | 450 | # between sampling and the iret |
| 440 | TRACE_IRQS_OFF | 451 | TRACE_IRQS_OFF |
| @@ -490,7 +501,7 @@ syscall_exit_work: | |||
| 490 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 501 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl |
| 491 | jz work_pending | 502 | jz work_pending |
| 492 | TRACE_IRQS_ON | 503 | TRACE_IRQS_ON |
| 493 | sti # could let do_syscall_trace() call | 504 | ENABLE_INTERRUPTS # could let do_syscall_trace() call |
| 494 | # schedule() instead | 505 | # schedule() instead |
| 495 | movl %esp, %eax | 506 | movl %esp, %eax |
| 496 | movl $1, %edx | 507 | movl $1, %edx |
| @@ -591,11 +602,9 @@ ENTRY(name) \ | |||
| 591 | /* The include is where all of the SMP etc. interrupts come from */ | 602 | /* The include is where all of the SMP etc. interrupts come from */ |
| 592 | #include "entry_arch.h" | 603 | #include "entry_arch.h" |
| 593 | 604 | ||
| 594 | ENTRY(divide_error) | 605 | KPROBE_ENTRY(page_fault) |
| 595 | RING0_INT_FRAME | 606 | RING0_EC_FRAME |
| 596 | pushl $0 # no error code | 607 | pushl $do_page_fault |
| 597 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 598 | pushl $do_divide_error | ||
| 599 | CFI_ADJUST_CFA_OFFSET 4 | 608 | CFI_ADJUST_CFA_OFFSET 4 |
| 600 | ALIGN | 609 | ALIGN |
| 601 | error_code: | 610 | error_code: |
| @@ -645,6 +654,7 @@ error_code: | |||
| 645 | call *%edi | 654 | call *%edi |
| 646 | jmp ret_from_exception | 655 | jmp ret_from_exception |
| 647 | CFI_ENDPROC | 656 | CFI_ENDPROC |
| 657 | KPROBE_END(page_fault) | ||
| 648 | 658 | ||
| 649 | ENTRY(coprocessor_error) | 659 | ENTRY(coprocessor_error) |
| 650 | RING0_INT_FRAME | 660 | RING0_INT_FRAME |
| @@ -669,7 +679,7 @@ ENTRY(device_not_available) | |||
| 669 | pushl $-1 # mark this as an int | 679 | pushl $-1 # mark this as an int |
| 670 | CFI_ADJUST_CFA_OFFSET 4 | 680 | CFI_ADJUST_CFA_OFFSET 4 |
| 671 | SAVE_ALL | 681 | SAVE_ALL |
| 672 | movl %cr0, %eax | 682 | GET_CR0_INTO_EAX |
| 673 | testl $0x4, %eax # EM (math emulation bit) | 683 | testl $0x4, %eax # EM (math emulation bit) |
| 674 | jne device_not_available_emulate | 684 | jne device_not_available_emulate |
| 675 | preempt_stop | 685 | preempt_stop |
| @@ -702,9 +712,15 @@ device_not_available_emulate: | |||
| 702 | jne ok; \ | 712 | jne ok; \ |
| 703 | label: \ | 713 | label: \ |
| 704 | movl TSS_sysenter_esp0+offset(%esp),%esp; \ | 714 | movl TSS_sysenter_esp0+offset(%esp),%esp; \ |
| 715 | CFI_DEF_CFA esp, 0; \ | ||
| 716 | CFI_UNDEFINED eip; \ | ||
| 705 | pushfl; \ | 717 | pushfl; \ |
| 718 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
| 706 | pushl $__KERNEL_CS; \ | 719 | pushl $__KERNEL_CS; \ |
| 707 | pushl $sysenter_past_esp | 720 | CFI_ADJUST_CFA_OFFSET 4; \ |
| 721 | pushl $sysenter_past_esp; \ | ||
| 722 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
| 723 | CFI_REL_OFFSET eip, 0 | ||
| 708 | 724 | ||
| 709 | KPROBE_ENTRY(debug) | 725 | KPROBE_ENTRY(debug) |
| 710 | RING0_INT_FRAME | 726 | RING0_INT_FRAME |
| @@ -720,7 +736,8 @@ debug_stack_correct: | |||
| 720 | call do_debug | 736 | call do_debug |
| 721 | jmp ret_from_exception | 737 | jmp ret_from_exception |
| 722 | CFI_ENDPROC | 738 | CFI_ENDPROC |
| 723 | .previous .text | 739 | KPROBE_END(debug) |
| 740 | |||
| 724 | /* | 741 | /* |
| 725 | * NMI is doubly nasty. It can happen _while_ we're handling | 742 | * NMI is doubly nasty. It can happen _while_ we're handling |
| 726 | * a debug fault, and the debug fault hasn't yet been able to | 743 | * a debug fault, and the debug fault hasn't yet been able to |
| @@ -729,7 +746,7 @@ debug_stack_correct: | |||
| 729 | * check whether we got an NMI on the debug path where the debug | 746 | * check whether we got an NMI on the debug path where the debug |
| 730 | * fault happened on the sysenter path. | 747 | * fault happened on the sysenter path. |
| 731 | */ | 748 | */ |
| 732 | ENTRY(nmi) | 749 | KPROBE_ENTRY(nmi) |
| 733 | RING0_INT_FRAME | 750 | RING0_INT_FRAME |
| 734 | pushl %eax | 751 | pushl %eax |
| 735 | CFI_ADJUST_CFA_OFFSET 4 | 752 | CFI_ADJUST_CFA_OFFSET 4 |
| @@ -754,6 +771,7 @@ ENTRY(nmi) | |||
| 754 | cmpl $sysenter_entry,12(%esp) | 771 | cmpl $sysenter_entry,12(%esp) |
| 755 | je nmi_debug_stack_check | 772 | je nmi_debug_stack_check |
| 756 | nmi_stack_correct: | 773 | nmi_stack_correct: |
| 774 | /* We have a RING0_INT_FRAME here */ | ||
| 757 | pushl %eax | 775 | pushl %eax |
| 758 | CFI_ADJUST_CFA_OFFSET 4 | 776 | CFI_ADJUST_CFA_OFFSET 4 |
| 759 | SAVE_ALL | 777 | SAVE_ALL |
| @@ -764,9 +782,12 @@ nmi_stack_correct: | |||
| 764 | CFI_ENDPROC | 782 | CFI_ENDPROC |
| 765 | 783 | ||
| 766 | nmi_stack_fixup: | 784 | nmi_stack_fixup: |
| 785 | RING0_INT_FRAME | ||
| 767 | FIX_STACK(12,nmi_stack_correct, 1) | 786 | FIX_STACK(12,nmi_stack_correct, 1) |
| 768 | jmp nmi_stack_correct | 787 | jmp nmi_stack_correct |
| 788 | |||
| 769 | nmi_debug_stack_check: | 789 | nmi_debug_stack_check: |
| 790 | /* We have a RING0_INT_FRAME here */ | ||
| 770 | cmpw $__KERNEL_CS,16(%esp) | 791 | cmpw $__KERNEL_CS,16(%esp) |
| 771 | jne nmi_stack_correct | 792 | jne nmi_stack_correct |
| 772 | cmpl $debug,(%esp) | 793 | cmpl $debug,(%esp) |
| @@ -777,8 +798,10 @@ nmi_debug_stack_check: | |||
| 777 | jmp nmi_stack_correct | 798 | jmp nmi_stack_correct |
| 778 | 799 | ||
| 779 | nmi_16bit_stack: | 800 | nmi_16bit_stack: |
| 780 | RING0_INT_FRAME | 801 | /* We have a RING0_INT_FRAME here. |
| 781 | /* create the pointer to lss back */ | 802 | * |
| 803 | * create the pointer to lss back | ||
| 804 | */ | ||
| 782 | pushl %ss | 805 | pushl %ss |
| 783 | CFI_ADJUST_CFA_OFFSET 4 | 806 | CFI_ADJUST_CFA_OFFSET 4 |
| 784 | pushl %esp | 807 | pushl %esp |
| @@ -799,12 +822,13 @@ nmi_16bit_stack: | |||
| 799 | call do_nmi | 822 | call do_nmi |
| 800 | RESTORE_REGS | 823 | RESTORE_REGS |
| 801 | lss 12+4(%esp), %esp # back to 16bit stack | 824 | lss 12+4(%esp), %esp # back to 16bit stack |
| 802 | 1: iret | 825 | 1: INTERRUPT_RETURN |
| 803 | CFI_ENDPROC | 826 | CFI_ENDPROC |
| 804 | .section __ex_table,"a" | 827 | .section __ex_table,"a" |
| 805 | .align 4 | 828 | .align 4 |
| 806 | .long 1b,iret_exc | 829 | .long 1b,iret_exc |
| 807 | .previous | 830 | .previous |
| 831 | KPROBE_END(nmi) | ||
| 808 | 832 | ||
| 809 | KPROBE_ENTRY(int3) | 833 | KPROBE_ENTRY(int3) |
| 810 | RING0_INT_FRAME | 834 | RING0_INT_FRAME |
| @@ -816,7 +840,7 @@ KPROBE_ENTRY(int3) | |||
| 816 | call do_int3 | 840 | call do_int3 |
| 817 | jmp ret_from_exception | 841 | jmp ret_from_exception |
| 818 | CFI_ENDPROC | 842 | CFI_ENDPROC |
| 819 | .previous .text | 843 | KPROBE_END(int3) |
| 820 | 844 | ||
| 821 | ENTRY(overflow) | 845 | ENTRY(overflow) |
| 822 | RING0_INT_FRAME | 846 | RING0_INT_FRAME |
| @@ -881,7 +905,7 @@ KPROBE_ENTRY(general_protection) | |||
| 881 | CFI_ADJUST_CFA_OFFSET 4 | 905 | CFI_ADJUST_CFA_OFFSET 4 |
| 882 | jmp error_code | 906 | jmp error_code |
| 883 | CFI_ENDPROC | 907 | CFI_ENDPROC |
| 884 | .previous .text | 908 | KPROBE_END(general_protection) |
| 885 | 909 | ||
| 886 | ENTRY(alignment_check) | 910 | ENTRY(alignment_check) |
| 887 | RING0_EC_FRAME | 911 | RING0_EC_FRAME |
| @@ -890,13 +914,14 @@ ENTRY(alignment_check) | |||
| 890 | jmp error_code | 914 | jmp error_code |
| 891 | CFI_ENDPROC | 915 | CFI_ENDPROC |
| 892 | 916 | ||
| 893 | KPROBE_ENTRY(page_fault) | 917 | ENTRY(divide_error) |
| 894 | RING0_EC_FRAME | 918 | RING0_INT_FRAME |
| 895 | pushl $do_page_fault | 919 | pushl $0 # no error code |
| 920 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 921 | pushl $do_divide_error | ||
| 896 | CFI_ADJUST_CFA_OFFSET 4 | 922 | CFI_ADJUST_CFA_OFFSET 4 |
| 897 | jmp error_code | 923 | jmp error_code |
| 898 | CFI_ENDPROC | 924 | CFI_ENDPROC |
| 899 | .previous .text | ||
| 900 | 925 | ||
| 901 | #ifdef CONFIG_X86_MCE | 926 | #ifdef CONFIG_X86_MCE |
| 902 | ENTRY(machine_check) | 927 | ENTRY(machine_check) |
| @@ -949,6 +974,19 @@ ENTRY(arch_unwind_init_running) | |||
| 949 | ENDPROC(arch_unwind_init_running) | 974 | ENDPROC(arch_unwind_init_running) |
| 950 | #endif | 975 | #endif |
| 951 | 976 | ||
| 977 | ENTRY(kernel_thread_helper) | ||
| 978 | pushl $0 # fake return address for unwinder | ||
| 979 | CFI_STARTPROC | ||
| 980 | movl %edx,%eax | ||
| 981 | push %edx | ||
| 982 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 983 | call *%ebx | ||
| 984 | push %eax | ||
| 985 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 986 | call do_exit | ||
| 987 | CFI_ENDPROC | ||
| 988 | ENDPROC(kernel_thread_helper) | ||
| 989 | |||
| 952 | .section .rodata,"a" | 990 | .section .rodata,"a" |
| 953 | #include "syscall_table.S" | 991 | #include "syscall_table.S" |
| 954 | 992 | ||
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index a6b8bd89aa27..be9d883c62ce 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S | |||
| @@ -371,8 +371,65 @@ rp_sidt: | |||
| 371 | addl $8,%edi | 371 | addl $8,%edi |
| 372 | dec %ecx | 372 | dec %ecx |
| 373 | jne rp_sidt | 373 | jne rp_sidt |
| 374 | |||
| 375 | .macro set_early_handler handler,trapno | ||
| 376 | lea \handler,%edx | ||
| 377 | movl $(__KERNEL_CS << 16),%eax | ||
| 378 | movw %dx,%ax | ||
| 379 | movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ | ||
| 380 | lea idt_table,%edi | ||
| 381 | movl %eax,8*\trapno(%edi) | ||
| 382 | movl %edx,8*\trapno+4(%edi) | ||
| 383 | .endm | ||
| 384 | |||
| 385 | set_early_handler handler=early_divide_err,trapno=0 | ||
| 386 | set_early_handler handler=early_illegal_opcode,trapno=6 | ||
| 387 | set_early_handler handler=early_protection_fault,trapno=13 | ||
| 388 | set_early_handler handler=early_page_fault,trapno=14 | ||
| 389 | |||
| 374 | ret | 390 | ret |
| 375 | 391 | ||
| 392 | early_divide_err: | ||
| 393 | xor %edx,%edx | ||
| 394 | pushl $0 /* fake errcode */ | ||
| 395 | jmp early_fault | ||
| 396 | |||
| 397 | early_illegal_opcode: | ||
| 398 | movl $6,%edx | ||
| 399 | pushl $0 /* fake errcode */ | ||
| 400 | jmp early_fault | ||
| 401 | |||
| 402 | early_protection_fault: | ||
| 403 | movl $13,%edx | ||
| 404 | jmp early_fault | ||
| 405 | |||
| 406 | early_page_fault: | ||
| 407 | movl $14,%edx | ||
| 408 | jmp early_fault | ||
| 409 | |||
| 410 | early_fault: | ||
| 411 | cld | ||
| 412 | #ifdef CONFIG_PRINTK | ||
| 413 | movl $(__KERNEL_DS),%eax | ||
| 414 | movl %eax,%ds | ||
| 415 | movl %eax,%es | ||
| 416 | cmpl $2,early_recursion_flag | ||
| 417 | je hlt_loop | ||
| 418 | incl early_recursion_flag | ||
| 419 | movl %cr2,%eax | ||
| 420 | pushl %eax | ||
| 421 | pushl %edx /* trapno */ | ||
| 422 | pushl $fault_msg | ||
| 423 | #ifdef CONFIG_EARLY_PRINTK | ||
| 424 | call early_printk | ||
| 425 | #else | ||
| 426 | call printk | ||
| 427 | #endif | ||
| 428 | #endif | ||
| 429 | hlt_loop: | ||
| 430 | hlt | ||
| 431 | jmp hlt_loop | ||
| 432 | |||
| 376 | /* This is the default interrupt "handler" :-) */ | 433 | /* This is the default interrupt "handler" :-) */ |
| 377 | ALIGN | 434 | ALIGN |
| 378 | ignore_int: | 435 | ignore_int: |
| @@ -386,6 +443,9 @@ ignore_int: | |||
| 386 | movl $(__KERNEL_DS),%eax | 443 | movl $(__KERNEL_DS),%eax |
| 387 | movl %eax,%ds | 444 | movl %eax,%ds |
| 388 | movl %eax,%es | 445 | movl %eax,%es |
| 446 | cmpl $2,early_recursion_flag | ||
| 447 | je hlt_loop | ||
| 448 | incl early_recursion_flag | ||
| 389 | pushl 16(%esp) | 449 | pushl 16(%esp) |
| 390 | pushl 24(%esp) | 450 | pushl 24(%esp) |
| 391 | pushl 32(%esp) | 451 | pushl 32(%esp) |
| @@ -431,9 +491,16 @@ ENTRY(stack_start) | |||
| 431 | 491 | ||
| 432 | ready: .byte 0 | 492 | ready: .byte 0 |
| 433 | 493 | ||
| 494 | early_recursion_flag: | ||
| 495 | .long 0 | ||
| 496 | |||
| 434 | int_msg: | 497 | int_msg: |
| 435 | .asciz "Unknown interrupt or fault at EIP %p %p %p\n" | 498 | .asciz "Unknown interrupt or fault at EIP %p %p %p\n" |
| 436 | 499 | ||
| 500 | fault_msg: | ||
| 501 | .ascii "Int %d: CR2 %p err %p EIP %p CS %p flags %p\n" | ||
| 502 | .asciz "Stack: %p %p %p %p %p %p %p %p\n" | ||
| 503 | |||
| 437 | /* | 504 | /* |
| 438 | * The IDT and GDT 'descriptors' are a strange 48-bit object | 505 | * The IDT and GDT 'descriptors' are a strange 48-bit object |
| 439 | * only used by the lidt and lgdt instructions. They are not | 506 | * only used by the lidt and lgdt instructions. They are not |
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index d4756d154f47..ea5f4e7958d8 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c | |||
| @@ -45,6 +45,8 @@ static void end_8259A_irq (unsigned int irq) | |||
| 45 | 45 | ||
| 46 | #define shutdown_8259A_irq disable_8259A_irq | 46 | #define shutdown_8259A_irq disable_8259A_irq |
| 47 | 47 | ||
| 48 | static int i8259A_auto_eoi; | ||
| 49 | |||
| 48 | static void mask_and_ack_8259A(unsigned int); | 50 | static void mask_and_ack_8259A(unsigned int); |
| 49 | 51 | ||
| 50 | unsigned int startup_8259A_irq(unsigned int irq) | 52 | unsigned int startup_8259A_irq(unsigned int irq) |
| @@ -253,7 +255,7 @@ static void save_ELCR(char *trigger) | |||
| 253 | 255 | ||
| 254 | static int i8259A_resume(struct sys_device *dev) | 256 | static int i8259A_resume(struct sys_device *dev) |
| 255 | { | 257 | { |
| 256 | init_8259A(0); | 258 | init_8259A(i8259A_auto_eoi); |
| 257 | restore_ELCR(irq_trigger); | 259 | restore_ELCR(irq_trigger); |
| 258 | return 0; | 260 | return 0; |
| 259 | } | 261 | } |
| @@ -301,6 +303,8 @@ void init_8259A(int auto_eoi) | |||
| 301 | { | 303 | { |
| 302 | unsigned long flags; | 304 | unsigned long flags; |
| 303 | 305 | ||
| 306 | i8259A_auto_eoi = auto_eoi; | ||
| 307 | |||
| 304 | spin_lock_irqsave(&i8259A_lock, flags); | 308 | spin_lock_irqsave(&i8259A_lock, flags); |
| 305 | 309 | ||
| 306 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ | 310 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 4fb32c551fe0..fd0df75cfbda 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <asm/nmi.h> | 40 | #include <asm/nmi.h> |
| 41 | 41 | ||
| 42 | #include <mach_apic.h> | 42 | #include <mach_apic.h> |
| 43 | #include <mach_apicdef.h> | ||
| 43 | 44 | ||
| 44 | #include "io_ports.h" | 45 | #include "io_ports.h" |
| 45 | 46 | ||
| @@ -65,7 +66,7 @@ int sis_apic_bug = -1; | |||
| 65 | */ | 66 | */ |
| 66 | int nr_ioapic_registers[MAX_IO_APICS]; | 67 | int nr_ioapic_registers[MAX_IO_APICS]; |
| 67 | 68 | ||
| 68 | int disable_timer_pin_1 __initdata; | 69 | static int disable_timer_pin_1 __initdata; |
| 69 | 70 | ||
| 70 | /* | 71 | /* |
| 71 | * Rough estimation of how many shared IRQs there are, can | 72 | * Rough estimation of how many shared IRQs there are, can |
| @@ -93,6 +94,34 @@ int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1}; | |||
| 93 | #define vector_to_irq(vector) (vector) | 94 | #define vector_to_irq(vector) (vector) |
| 94 | #endif | 95 | #endif |
| 95 | 96 | ||
| 97 | |||
| 98 | union entry_union { | ||
| 99 | struct { u32 w1, w2; }; | ||
| 100 | struct IO_APIC_route_entry entry; | ||
| 101 | }; | ||
| 102 | |||
| 103 | static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) | ||
| 104 | { | ||
| 105 | union entry_union eu; | ||
| 106 | unsigned long flags; | ||
| 107 | spin_lock_irqsave(&ioapic_lock, flags); | ||
| 108 | eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); | ||
| 109 | eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); | ||
| 110 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 111 | return eu.entry; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | ||
| 115 | { | ||
| 116 | unsigned long flags; | ||
| 117 | union entry_union eu; | ||
| 118 | eu.entry = e; | ||
| 119 | spin_lock_irqsave(&ioapic_lock, flags); | ||
| 120 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | ||
| 121 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | ||
| 122 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 123 | } | ||
| 124 | |||
| 96 | /* | 125 | /* |
| 97 | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are | 126 | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are |
| 98 | * shared ISA-space IRQs, so we have to support them. We are super | 127 | * shared ISA-space IRQs, so we have to support them. We are super |
| @@ -200,13 +229,9 @@ static void unmask_IO_APIC_irq (unsigned int irq) | |||
| 200 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | 229 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) |
| 201 | { | 230 | { |
| 202 | struct IO_APIC_route_entry entry; | 231 | struct IO_APIC_route_entry entry; |
| 203 | unsigned long flags; | ||
| 204 | 232 | ||
| 205 | /* Check delivery_mode to be sure we're not clearing an SMI pin */ | 233 | /* Check delivery_mode to be sure we're not clearing an SMI pin */ |
| 206 | spin_lock_irqsave(&ioapic_lock, flags); | 234 | entry = ioapic_read_entry(apic, pin); |
| 207 | *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); | ||
| 208 | *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); | ||
| 209 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 210 | if (entry.delivery_mode == dest_SMI) | 235 | if (entry.delivery_mode == dest_SMI) |
| 211 | return; | 236 | return; |
| 212 | 237 | ||
| @@ -215,10 +240,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | |||
| 215 | */ | 240 | */ |
| 216 | memset(&entry, 0, sizeof(entry)); | 241 | memset(&entry, 0, sizeof(entry)); |
| 217 | entry.mask = 1; | 242 | entry.mask = 1; |
| 218 | spin_lock_irqsave(&ioapic_lock, flags); | 243 | ioapic_write_entry(apic, pin, entry); |
| 219 | io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0)); | ||
| 220 | io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1)); | ||
| 221 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 222 | } | 244 | } |
| 223 | 245 | ||
| 224 | static void clear_IO_APIC (void) | 246 | static void clear_IO_APIC (void) |
| @@ -1283,9 +1305,8 @@ static void __init setup_IO_APIC_irqs(void) | |||
| 1283 | if (!apic && (irq < 16)) | 1305 | if (!apic && (irq < 16)) |
| 1284 | disable_8259A_irq(irq); | 1306 | disable_8259A_irq(irq); |
| 1285 | } | 1307 | } |
| 1308 | ioapic_write_entry(apic, pin, entry); | ||
| 1286 | spin_lock_irqsave(&ioapic_lock, flags); | 1309 | spin_lock_irqsave(&ioapic_lock, flags); |
| 1287 | io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); | ||
| 1288 | io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); | ||
| 1289 | set_native_irq_info(irq, TARGET_CPUS); | 1310 | set_native_irq_info(irq, TARGET_CPUS); |
| 1290 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1311 | spin_unlock_irqrestore(&ioapic_lock, flags); |
| 1291 | } | 1312 | } |
| @@ -1301,7 +1322,6 @@ static void __init setup_IO_APIC_irqs(void) | |||
| 1301 | static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) | 1322 | static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) |
| 1302 | { | 1323 | { |
| 1303 | struct IO_APIC_route_entry entry; | 1324 | struct IO_APIC_route_entry entry; |
| 1304 | unsigned long flags; | ||
| 1305 | 1325 | ||
| 1306 | memset(&entry,0,sizeof(entry)); | 1326 | memset(&entry,0,sizeof(entry)); |
| 1307 | 1327 | ||
| @@ -1331,10 +1351,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in | |||
| 1331 | /* | 1351 | /* |
| 1332 | * Add it to the IO-APIC irq-routing table: | 1352 | * Add it to the IO-APIC irq-routing table: |
| 1333 | */ | 1353 | */ |
| 1334 | spin_lock_irqsave(&ioapic_lock, flags); | 1354 | ioapic_write_entry(apic, pin, entry); |
| 1335 | io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); | ||
| 1336 | io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); | ||
| 1337 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 1338 | 1355 | ||
| 1339 | enable_8259A_irq(0); | 1356 | enable_8259A_irq(0); |
| 1340 | } | 1357 | } |
| @@ -1444,10 +1461,7 @@ void __init print_IO_APIC(void) | |||
| 1444 | for (i = 0; i <= reg_01.bits.entries; i++) { | 1461 | for (i = 0; i <= reg_01.bits.entries; i++) { |
| 1445 | struct IO_APIC_route_entry entry; | 1462 | struct IO_APIC_route_entry entry; |
| 1446 | 1463 | ||
| 1447 | spin_lock_irqsave(&ioapic_lock, flags); | 1464 | entry = ioapic_read_entry(apic, i); |
| 1448 | *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2); | ||
| 1449 | *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2); | ||
| 1450 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 1451 | 1465 | ||
| 1452 | printk(KERN_DEBUG " %02x %03X %02X ", | 1466 | printk(KERN_DEBUG " %02x %03X %02X ", |
| 1453 | i, | 1467 | i, |
| @@ -1666,10 +1680,7 @@ static void __init enable_IO_APIC(void) | |||
| 1666 | /* See if any of the pins is in ExtINT mode */ | 1680 | /* See if any of the pins is in ExtINT mode */ |
| 1667 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | 1681 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { |
| 1668 | struct IO_APIC_route_entry entry; | 1682 | struct IO_APIC_route_entry entry; |
| 1669 | spin_lock_irqsave(&ioapic_lock, flags); | 1683 | entry = ioapic_read_entry(apic, pin); |
| 1670 | *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); | ||
| 1671 | *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); | ||
| 1672 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 1673 | 1684 | ||
| 1674 | 1685 | ||
| 1675 | /* If the interrupt line is enabled and in ExtInt mode | 1686 | /* If the interrupt line is enabled and in ExtInt mode |
| @@ -1726,7 +1737,6 @@ void disable_IO_APIC(void) | |||
| 1726 | */ | 1737 | */ |
| 1727 | if (ioapic_i8259.pin != -1) { | 1738 | if (ioapic_i8259.pin != -1) { |
| 1728 | struct IO_APIC_route_entry entry; | 1739 | struct IO_APIC_route_entry entry; |
| 1729 | unsigned long flags; | ||
| 1730 | 1740 | ||
| 1731 | memset(&entry, 0, sizeof(entry)); | 1741 | memset(&entry, 0, sizeof(entry)); |
| 1732 | entry.mask = 0; /* Enabled */ | 1742 | entry.mask = 0; /* Enabled */ |
| @@ -1743,12 +1753,7 @@ void disable_IO_APIC(void) | |||
| 1743 | /* | 1753 | /* |
| 1744 | * Add it to the IO-APIC irq-routing table: | 1754 | * Add it to the IO-APIC irq-routing table: |
| 1745 | */ | 1755 | */ |
| 1746 | spin_lock_irqsave(&ioapic_lock, flags); | 1756 | ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); |
| 1747 | io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin, | ||
| 1748 | *(((int *)&entry)+1)); | ||
| 1749 | io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin, | ||
| 1750 | *(((int *)&entry)+0)); | ||
| 1751 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 1752 | } | 1757 | } |
| 1753 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); | 1758 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); |
| 1754 | } | 1759 | } |
| @@ -2213,17 +2218,13 @@ static inline void unlock_ExtINT_logic(void) | |||
| 2213 | int apic, pin, i; | 2218 | int apic, pin, i; |
| 2214 | struct IO_APIC_route_entry entry0, entry1; | 2219 | struct IO_APIC_route_entry entry0, entry1; |
| 2215 | unsigned char save_control, save_freq_select; | 2220 | unsigned char save_control, save_freq_select; |
| 2216 | unsigned long flags; | ||
| 2217 | 2221 | ||
| 2218 | pin = find_isa_irq_pin(8, mp_INT); | 2222 | pin = find_isa_irq_pin(8, mp_INT); |
| 2219 | apic = find_isa_irq_apic(8, mp_INT); | 2223 | apic = find_isa_irq_apic(8, mp_INT); |
| 2220 | if (pin == -1) | 2224 | if (pin == -1) |
| 2221 | return; | 2225 | return; |
| 2222 | 2226 | ||
| 2223 | spin_lock_irqsave(&ioapic_lock, flags); | 2227 | entry0 = ioapic_read_entry(apic, pin); |
| 2224 | *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin); | ||
| 2225 | *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin); | ||
| 2226 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 2227 | clear_IO_APIC_pin(apic, pin); | 2228 | clear_IO_APIC_pin(apic, pin); |
| 2228 | 2229 | ||
| 2229 | memset(&entry1, 0, sizeof(entry1)); | 2230 | memset(&entry1, 0, sizeof(entry1)); |
| @@ -2236,10 +2237,7 @@ static inline void unlock_ExtINT_logic(void) | |||
| 2236 | entry1.trigger = 0; | 2237 | entry1.trigger = 0; |
| 2237 | entry1.vector = 0; | 2238 | entry1.vector = 0; |
| 2238 | 2239 | ||
| 2239 | spin_lock_irqsave(&ioapic_lock, flags); | 2240 | ioapic_write_entry(apic, pin, entry1); |
| 2240 | io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); | ||
| 2241 | io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); | ||
| 2242 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 2243 | 2241 | ||
| 2244 | save_control = CMOS_READ(RTC_CONTROL); | 2242 | save_control = CMOS_READ(RTC_CONTROL); |
| 2245 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); | 2243 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); |
| @@ -2258,10 +2256,7 @@ static inline void unlock_ExtINT_logic(void) | |||
| 2258 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | 2256 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
| 2259 | clear_IO_APIC_pin(apic, pin); | 2257 | clear_IO_APIC_pin(apic, pin); |
| 2260 | 2258 | ||
| 2261 | spin_lock_irqsave(&ioapic_lock, flags); | 2259 | ioapic_write_entry(apic, pin, entry0); |
| 2262 | io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); | ||
| 2263 | io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); | ||
| 2264 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 2265 | } | 2260 | } |
| 2266 | 2261 | ||
| 2267 | int timer_uses_ioapic_pin_0; | 2262 | int timer_uses_ioapic_pin_0; |
| @@ -2461,17 +2456,12 @@ static int ioapic_suspend(struct sys_device *dev, pm_message_t state) | |||
| 2461 | { | 2456 | { |
| 2462 | struct IO_APIC_route_entry *entry; | 2457 | struct IO_APIC_route_entry *entry; |
| 2463 | struct sysfs_ioapic_data *data; | 2458 | struct sysfs_ioapic_data *data; |
| 2464 | unsigned long flags; | ||
| 2465 | int i; | 2459 | int i; |
| 2466 | 2460 | ||
| 2467 | data = container_of(dev, struct sysfs_ioapic_data, dev); | 2461 | data = container_of(dev, struct sysfs_ioapic_data, dev); |
| 2468 | entry = data->entry; | 2462 | entry = data->entry; |
| 2469 | spin_lock_irqsave(&ioapic_lock, flags); | 2463 | for (i = 0; i < nr_ioapic_registers[dev->id]; i ++) |
| 2470 | for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) { | 2464 | entry[i] = ioapic_read_entry(dev->id, i); |
| 2471 | *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i); | ||
| 2472 | *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i); | ||
| 2473 | } | ||
| 2474 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 2475 | 2465 | ||
| 2476 | return 0; | 2466 | return 0; |
| 2477 | } | 2467 | } |
| @@ -2493,11 +2483,9 @@ static int ioapic_resume(struct sys_device *dev) | |||
| 2493 | reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid; | 2483 | reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid; |
| 2494 | io_apic_write(dev->id, 0, reg_00.raw); | 2484 | io_apic_write(dev->id, 0, reg_00.raw); |
| 2495 | } | 2485 | } |
| 2496 | for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) { | ||
| 2497 | io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1)); | ||
| 2498 | io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0)); | ||
| 2499 | } | ||
| 2500 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2486 | spin_unlock_irqrestore(&ioapic_lock, flags); |
| 2487 | for (i = 0; i < nr_ioapic_registers[dev->id]; i ++) | ||
| 2488 | ioapic_write_entry(dev->id, i, entry[i]); | ||
| 2501 | 2489 | ||
| 2502 | return 0; | 2490 | return 0; |
| 2503 | } | 2491 | } |
| @@ -2694,9 +2682,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a | |||
| 2694 | if (!ioapic && (irq < 16)) | 2682 | if (!ioapic && (irq < 16)) |
| 2695 | disable_8259A_irq(irq); | 2683 | disable_8259A_irq(irq); |
| 2696 | 2684 | ||
| 2685 | ioapic_write_entry(ioapic, pin, entry); | ||
| 2697 | spin_lock_irqsave(&ioapic_lock, flags); | 2686 | spin_lock_irqsave(&ioapic_lock, flags); |
| 2698 | io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); | ||
| 2699 | io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); | ||
| 2700 | set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS); | 2687 | set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS); |
| 2701 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2688 | spin_unlock_irqrestore(&ioapic_lock, flags); |
| 2702 | 2689 | ||
| @@ -2704,3 +2691,25 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a | |||
| 2704 | } | 2691 | } |
| 2705 | 2692 | ||
| 2706 | #endif /* CONFIG_ACPI */ | 2693 | #endif /* CONFIG_ACPI */ |
| 2694 | |||
| 2695 | static int __init parse_disable_timer_pin_1(char *arg) | ||
| 2696 | { | ||
| 2697 | disable_timer_pin_1 = 1; | ||
| 2698 | return 0; | ||
| 2699 | } | ||
| 2700 | early_param("disable_timer_pin_1", parse_disable_timer_pin_1); | ||
| 2701 | |||
| 2702 | static int __init parse_enable_timer_pin_1(char *arg) | ||
| 2703 | { | ||
| 2704 | disable_timer_pin_1 = -1; | ||
| 2705 | return 0; | ||
| 2706 | } | ||
| 2707 | early_param("enable_timer_pin_1", parse_enable_timer_pin_1); | ||
| 2708 | |||
| 2709 | static int __init parse_noapic(char *arg) | ||
| 2710 | { | ||
| 2711 | /* disable IO-APIC */ | ||
| 2712 | disable_ioapic_setup(); | ||
| 2713 | return 0; | ||
| 2714 | } | ||
| 2715 | early_param("noapic", parse_noapic); | ||
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c index 6b1ae6ba76f0..91966bafb3dc 100644 --- a/arch/i386/kernel/machine_kexec.c +++ b/arch/i386/kernel/machine_kexec.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
| 10 | #include <linux/kexec.h> | 10 | #include <linux/kexec.h> |
| 11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
| 12 | #include <linux/init.h> | ||
| 12 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
| 13 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
| 14 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
| @@ -20,70 +21,13 @@ | |||
| 20 | #include <asm/system.h> | 21 | #include <asm/system.h> |
| 21 | 22 | ||
| 22 | #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) | 23 | #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) |
| 23 | 24 | static u32 kexec_pgd[1024] PAGE_ALIGNED; | |
| 24 | #define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 25 | #ifdef CONFIG_X86_PAE |
| 25 | #define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 26 | static u32 kexec_pmd0[1024] PAGE_ALIGNED; |
| 26 | #define L2_ATTR (_PAGE_PRESENT) | 27 | static u32 kexec_pmd1[1024] PAGE_ALIGNED; |
| 27 | |||
| 28 | #define LEVEL0_SIZE (1UL << 12UL) | ||
| 29 | |||
| 30 | #ifndef CONFIG_X86_PAE | ||
| 31 | #define LEVEL1_SIZE (1UL << 22UL) | ||
| 32 | static u32 pgtable_level1[1024] PAGE_ALIGNED; | ||
| 33 | |||
| 34 | static void identity_map_page(unsigned long address) | ||
| 35 | { | ||
| 36 | unsigned long level1_index, level2_index; | ||
| 37 | u32 *pgtable_level2; | ||
| 38 | |||
| 39 | /* Find the current page table */ | ||
| 40 | pgtable_level2 = __va(read_cr3()); | ||
| 41 | |||
| 42 | /* Find the indexes of the physical address to identity map */ | ||
| 43 | level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; | ||
| 44 | level2_index = address / LEVEL1_SIZE; | ||
| 45 | |||
| 46 | /* Identity map the page table entry */ | ||
| 47 | pgtable_level1[level1_index] = address | L0_ATTR; | ||
| 48 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; | ||
| 49 | |||
| 50 | /* Flush the tlb so the new mapping takes effect. | ||
| 51 | * Global tlb entries are not flushed but that is not an issue. | ||
| 52 | */ | ||
| 53 | load_cr3(pgtable_level2); | ||
| 54 | } | ||
| 55 | |||
| 56 | #else | ||
| 57 | #define LEVEL1_SIZE (1UL << 21UL) | ||
| 58 | #define LEVEL2_SIZE (1UL << 30UL) | ||
| 59 | static u64 pgtable_level1[512] PAGE_ALIGNED; | ||
| 60 | static u64 pgtable_level2[512] PAGE_ALIGNED; | ||
| 61 | |||
| 62 | static void identity_map_page(unsigned long address) | ||
| 63 | { | ||
| 64 | unsigned long level1_index, level2_index, level3_index; | ||
| 65 | u64 *pgtable_level3; | ||
| 66 | |||
| 67 | /* Find the current page table */ | ||
| 68 | pgtable_level3 = __va(read_cr3()); | ||
| 69 | |||
| 70 | /* Find the indexes of the physical address to identity map */ | ||
| 71 | level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; | ||
| 72 | level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE; | ||
| 73 | level3_index = address / LEVEL2_SIZE; | ||
| 74 | |||
| 75 | /* Identity map the page table entry */ | ||
| 76 | pgtable_level1[level1_index] = address | L0_ATTR; | ||
| 77 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; | ||
| 78 | set_64bit(&pgtable_level3[level3_index], | ||
| 79 | __pa(pgtable_level2) | L2_ATTR); | ||
| 80 | |||
| 81 | /* Flush the tlb so the new mapping takes effect. | ||
| 82 | * Global tlb entries are not flushed but that is not an issue. | ||
| 83 | */ | ||
| 84 | load_cr3(pgtable_level3); | ||
| 85 | } | ||
| 86 | #endif | 28 | #endif |
| 29 | static u32 kexec_pte0[1024] PAGE_ALIGNED; | ||
| 30 | static u32 kexec_pte1[1024] PAGE_ALIGNED; | ||
| 87 | 31 | ||
| 88 | static void set_idt(void *newidt, __u16 limit) | 32 | static void set_idt(void *newidt, __u16 limit) |
| 89 | { | 33 | { |
| @@ -127,16 +71,6 @@ static void load_segments(void) | |||
| 127 | #undef __STR | 71 | #undef __STR |
| 128 | } | 72 | } |
| 129 | 73 | ||
| 130 | typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( | ||
| 131 | unsigned long indirection_page, | ||
| 132 | unsigned long reboot_code_buffer, | ||
| 133 | unsigned long start_address, | ||
| 134 | unsigned int has_pae) ATTRIB_NORET; | ||
| 135 | |||
| 136 | extern const unsigned char relocate_new_kernel[]; | ||
| 137 | extern void relocate_new_kernel_end(void); | ||
| 138 | extern const unsigned int relocate_new_kernel_size; | ||
| 139 | |||
| 140 | /* | 74 | /* |
| 141 | * A architecture hook called to validate the | 75 | * A architecture hook called to validate the |
| 142 | * proposed image and prepare the control pages | 76 | * proposed image and prepare the control pages |
| @@ -169,25 +103,29 @@ void machine_kexec_cleanup(struct kimage *image) | |||
| 169 | */ | 103 | */ |
| 170 | NORET_TYPE void machine_kexec(struct kimage *image) | 104 | NORET_TYPE void machine_kexec(struct kimage *image) |
| 171 | { | 105 | { |
| 172 | unsigned long page_list; | 106 | unsigned long page_list[PAGES_NR]; |
| 173 | unsigned long reboot_code_buffer; | 107 | void *control_page; |
| 174 | |||
| 175 | relocate_new_kernel_t rnk; | ||
| 176 | 108 | ||
| 177 | /* Interrupts aren't acceptable while we reboot */ | 109 | /* Interrupts aren't acceptable while we reboot */ |
| 178 | local_irq_disable(); | 110 | local_irq_disable(); |
| 179 | 111 | ||
| 180 | /* Compute some offsets */ | 112 | control_page = page_address(image->control_code_page); |
| 181 | reboot_code_buffer = page_to_pfn(image->control_code_page) | 113 | memcpy(control_page, relocate_kernel, PAGE_SIZE); |
| 182 | << PAGE_SHIFT; | 114 | |
| 183 | page_list = image->head; | 115 | page_list[PA_CONTROL_PAGE] = __pa(control_page); |
| 184 | 116 | page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; | |
| 185 | /* Set up an identity mapping for the reboot_code_buffer */ | 117 | page_list[PA_PGD] = __pa(kexec_pgd); |
| 186 | identity_map_page(reboot_code_buffer); | 118 | page_list[VA_PGD] = (unsigned long)kexec_pgd; |
| 187 | 119 | #ifdef CONFIG_X86_PAE | |
| 188 | /* copy it out */ | 120 | page_list[PA_PMD_0] = __pa(kexec_pmd0); |
| 189 | memcpy((void *)reboot_code_buffer, relocate_new_kernel, | 121 | page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; |
| 190 | relocate_new_kernel_size); | 122 | page_list[PA_PMD_1] = __pa(kexec_pmd1); |
| 123 | page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; | ||
| 124 | #endif | ||
| 125 | page_list[PA_PTE_0] = __pa(kexec_pte0); | ||
| 126 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; | ||
| 127 | page_list[PA_PTE_1] = __pa(kexec_pte1); | ||
| 128 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; | ||
| 191 | 129 | ||
| 192 | /* The segment registers are funny things, they have both a | 130 | /* The segment registers are funny things, they have both a |
| 193 | * visible and an invisible part. Whenever the visible part is | 131 | * visible and an invisible part. Whenever the visible part is |
| @@ -206,6 +144,28 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
| 206 | set_idt(phys_to_virt(0),0); | 144 | set_idt(phys_to_virt(0),0); |
| 207 | 145 | ||
| 208 | /* now call it */ | 146 | /* now call it */ |
| 209 | rnk = (relocate_new_kernel_t) reboot_code_buffer; | 147 | relocate_kernel((unsigned long)image->head, (unsigned long)page_list, |
| 210 | (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae); | 148 | image->start, cpu_has_pae); |
| 149 | } | ||
| 150 | |||
| 151 | /* crashkernel=size@addr specifies the location to reserve for | ||
| 152 | * a crash kernel. By reserving this memory we guarantee | ||
| 153 | * that linux never sets it up as a DMA target. | ||
| 154 | * Useful for holding code to do something appropriate | ||
| 155 | * after a kernel panic. | ||
| 156 | */ | ||
| 157 | static int __init parse_crashkernel(char *arg) | ||
| 158 | { | ||
| 159 | unsigned long size, base; | ||
| 160 | size = memparse(arg, &arg); | ||
| 161 | if (*arg == '@') { | ||
| 162 | base = memparse(arg+1, &arg); | ||
| 163 | /* FIXME: Do I want a sanity check | ||
| 164 | * to validate the memory range? | ||
| 165 | */ | ||
| 166 | crashk_res.start = base; | ||
| 167 | crashk_res.end = base + size - 1; | ||
| 168 | } | ||
| 169 | return 0; | ||
| 211 | } | 170 | } |
| 171 | early_param("crashkernel", parse_crashkernel); | ||
diff --git a/arch/i386/kernel/mca.c b/arch/i386/kernel/mca.c index cd5456f14af4..eb57a851789d 100644 --- a/arch/i386/kernel/mca.c +++ b/arch/i386/kernel/mca.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <linux/errno.h> | 42 | #include <linux/errno.h> |
| 43 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
| 44 | #include <linux/mca.h> | 44 | #include <linux/mca.h> |
| 45 | #include <linux/kprobes.h> | ||
| 45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
| 46 | #include <asm/io.h> | 47 | #include <asm/io.h> |
| 47 | #include <linux/proc_fs.h> | 48 | #include <linux/proc_fs.h> |
| @@ -414,7 +415,8 @@ subsys_initcall(mca_init); | |||
| 414 | 415 | ||
| 415 | /*--------------------------------------------------------------------*/ | 416 | /*--------------------------------------------------------------------*/ |
| 416 | 417 | ||
| 417 | static void mca_handle_nmi_device(struct mca_device *mca_dev, int check_flag) | 418 | static __kprobes void |
| 419 | mca_handle_nmi_device(struct mca_device *mca_dev, int check_flag) | ||
| 418 | { | 420 | { |
| 419 | int slot = mca_dev->slot; | 421 | int slot = mca_dev->slot; |
| 420 | 422 | ||
| @@ -444,7 +446,7 @@ static void mca_handle_nmi_device(struct mca_device *mca_dev, int check_flag) | |||
| 444 | 446 | ||
| 445 | /*--------------------------------------------------------------------*/ | 447 | /*--------------------------------------------------------------------*/ |
| 446 | 448 | ||
| 447 | static int mca_handle_nmi_callback(struct device *dev, void *data) | 449 | static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data) |
| 448 | { | 450 | { |
| 449 | struct mca_device *mca_dev = to_mca_device(dev); | 451 | struct mca_device *mca_dev = to_mca_device(dev); |
| 450 | unsigned char pos5; | 452 | unsigned char pos5; |
| @@ -462,7 +464,7 @@ static int mca_handle_nmi_callback(struct device *dev, void *data) | |||
| 462 | return 0; | 464 | return 0; |
| 463 | } | 465 | } |
| 464 | 466 | ||
| 465 | void mca_handle_nmi(void) | 467 | void __kprobes mca_handle_nmi(void) |
| 466 | { | 468 | { |
| 467 | /* First try - scan the various adapters and see if a specific | 469 | /* First try - scan the various adapters and see if a specific |
| 468 | * adapter was responsible for the error. | 470 | * adapter was responsible for the error. |
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index 40b44cc0d14b..9b9479768d5e 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | * Intel CPU Microcode Update Driver for Linux | 2 | * Intel CPU Microcode Update Driver for Linux |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2000-2004 Tigran Aivazian | 4 | * Copyright (C) 2000-2004 Tigran Aivazian |
| 5 | * 2006 Shaohua Li <shaohua.li@intel.com> | ||
| 5 | * | 6 | * |
| 6 | * This driver allows to upgrade microcode on Intel processors | 7 | * This driver allows to upgrade microcode on Intel processors |
| 7 | * belonging to IA-32 family - PentiumPro, Pentium II, | 8 | * belonging to IA-32 family - PentiumPro, Pentium II, |
| @@ -82,6 +83,9 @@ | |||
| 82 | #include <linux/spinlock.h> | 83 | #include <linux/spinlock.h> |
| 83 | #include <linux/mm.h> | 84 | #include <linux/mm.h> |
| 84 | #include <linux/mutex.h> | 85 | #include <linux/mutex.h> |
| 86 | #include <linux/cpu.h> | ||
| 87 | #include <linux/firmware.h> | ||
| 88 | #include <linux/platform_device.h> | ||
| 85 | 89 | ||
| 86 | #include <asm/msr.h> | 90 | #include <asm/msr.h> |
| 87 | #include <asm/uaccess.h> | 91 | #include <asm/uaccess.h> |
| @@ -91,9 +95,6 @@ MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver"); | |||
| 91 | MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>"); | 95 | MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>"); |
| 92 | MODULE_LICENSE("GPL"); | 96 | MODULE_LICENSE("GPL"); |
| 93 | 97 | ||
| 94 | static int verbose; | ||
| 95 | module_param(verbose, int, 0644); | ||
| 96 | |||
| 97 | #define MICROCODE_VERSION "1.14a" | 98 | #define MICROCODE_VERSION "1.14a" |
| 98 | 99 | ||
| 99 | #define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */ | 100 | #define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */ |
| @@ -120,55 +121,40 @@ static DEFINE_SPINLOCK(microcode_update_lock); | |||
| 120 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ | 121 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ |
| 121 | static DEFINE_MUTEX(microcode_mutex); | 122 | static DEFINE_MUTEX(microcode_mutex); |
| 122 | 123 | ||
| 123 | static void __user *user_buffer; /* user area microcode data buffer */ | ||
| 124 | static unsigned int user_buffer_size; /* it's size */ | ||
| 125 | |||
| 126 | typedef enum mc_error_code { | ||
| 127 | MC_SUCCESS = 0, | ||
| 128 | MC_IGNORED = 1, | ||
| 129 | MC_NOTFOUND = 2, | ||
| 130 | MC_MARKED = 3, | ||
| 131 | MC_ALLOCATED = 4, | ||
| 132 | } mc_error_code_t; | ||
| 133 | |||
| 134 | static struct ucode_cpu_info { | 124 | static struct ucode_cpu_info { |
| 125 | int valid; | ||
| 135 | unsigned int sig; | 126 | unsigned int sig; |
| 136 | unsigned int pf, orig_pf; | 127 | unsigned int pf; |
| 137 | unsigned int rev; | 128 | unsigned int rev; |
| 138 | unsigned int cksum; | ||
| 139 | mc_error_code_t err; | ||
| 140 | microcode_t *mc; | 129 | microcode_t *mc; |
| 141 | } ucode_cpu_info[NR_CPUS]; | 130 | } ucode_cpu_info[NR_CPUS]; |
| 142 | |||
| 143 | static int microcode_open (struct inode *unused1, struct file *unused2) | ||
| 144 | { | ||
| 145 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | ||
| 146 | } | ||
| 147 | 131 | ||
| 148 | static void collect_cpu_info (void *unused) | 132 | static void collect_cpu_info(int cpu_num) |
| 149 | { | 133 | { |
| 150 | int cpu_num = smp_processor_id(); | ||
| 151 | struct cpuinfo_x86 *c = cpu_data + cpu_num; | 134 | struct cpuinfo_x86 *c = cpu_data + cpu_num; |
| 152 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; | 135 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; |
| 153 | unsigned int val[2]; | 136 | unsigned int val[2]; |
| 154 | 137 | ||
| 155 | uci->sig = uci->pf = uci->rev = uci->cksum = 0; | 138 | /* We should bind the task to the CPU */ |
| 156 | uci->err = MC_NOTFOUND; | 139 | BUG_ON(raw_smp_processor_id() != cpu_num); |
| 140 | uci->pf = uci->rev = 0; | ||
| 157 | uci->mc = NULL; | 141 | uci->mc = NULL; |
| 142 | uci->valid = 1; | ||
| 158 | 143 | ||
| 159 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | 144 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
| 160 | cpu_has(c, X86_FEATURE_IA64)) { | 145 | cpu_has(c, X86_FEATURE_IA64)) { |
| 161 | printk(KERN_ERR "microcode: CPU%d not a capable Intel processor\n", cpu_num); | 146 | printk(KERN_ERR "microcode: CPU%d not a capable Intel " |
| 147 | "processor\n", cpu_num); | ||
| 148 | uci->valid = 0; | ||
| 162 | return; | 149 | return; |
| 163 | } else { | 150 | } |
| 164 | uci->sig = cpuid_eax(0x00000001); | ||
| 165 | 151 | ||
| 166 | if ((c->x86_model >= 5) || (c->x86 > 6)) { | 152 | uci->sig = cpuid_eax(0x00000001); |
| 167 | /* get processor flags from MSR 0x17 */ | 153 | |
| 168 | rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | 154 | if ((c->x86_model >= 5) || (c->x86 > 6)) { |
| 169 | uci->pf = 1 << ((val[1] >> 18) & 7); | 155 | /* get processor flags from MSR 0x17 */ |
| 170 | } | 156 | rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); |
| 171 | uci->orig_pf = uci->pf; | 157 | uci->pf = 1 << ((val[1] >> 18) & 7); |
| 172 | } | 158 | } |
| 173 | 159 | ||
| 174 | wrmsr(MSR_IA32_UCODE_REV, 0, 0); | 160 | wrmsr(MSR_IA32_UCODE_REV, 0, 0); |
| @@ -180,218 +166,159 @@ static void collect_cpu_info (void *unused) | |||
| 180 | uci->sig, uci->pf, uci->rev); | 166 | uci->sig, uci->pf, uci->rev); |
| 181 | } | 167 | } |
| 182 | 168 | ||
| 183 | static inline void mark_microcode_update (int cpu_num, microcode_header_t *mc_header, int sig, int pf, int cksum) | 169 | static inline int microcode_update_match(int cpu_num, |
| 170 | microcode_header_t *mc_header, int sig, int pf) | ||
| 184 | { | 171 | { |
| 185 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; | 172 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; |
| 186 | 173 | ||
| 187 | pr_debug("Microcode Found.\n"); | 174 | if (!sigmatch(sig, uci->sig, pf, uci->pf) |
| 188 | pr_debug(" Header Revision 0x%x\n", mc_header->hdrver); | 175 | || mc_header->rev <= uci->rev) |
| 189 | pr_debug(" Loader Revision 0x%x\n", mc_header->ldrver); | 176 | return 0; |
| 190 | pr_debug(" Revision 0x%x \n", mc_header->rev); | 177 | return 1; |
| 191 | pr_debug(" Date %x/%x/%x\n", | ||
| 192 | ((mc_header->date >> 24 ) & 0xff), | ||
| 193 | ((mc_header->date >> 16 ) & 0xff), | ||
| 194 | (mc_header->date & 0xFFFF)); | ||
| 195 | pr_debug(" Signature 0x%x\n", sig); | ||
| 196 | pr_debug(" Type 0x%x Family 0x%x Model 0x%x Stepping 0x%x\n", | ||
| 197 | ((sig >> 12) & 0x3), | ||
| 198 | ((sig >> 8) & 0xf), | ||
| 199 | ((sig >> 4) & 0xf), | ||
| 200 | ((sig & 0xf))); | ||
| 201 | pr_debug(" Processor Flags 0x%x\n", pf); | ||
| 202 | pr_debug(" Checksum 0x%x\n", cksum); | ||
| 203 | |||
| 204 | if (mc_header->rev < uci->rev) { | ||
| 205 | if (uci->err == MC_NOTFOUND) { | ||
| 206 | uci->err = MC_IGNORED; | ||
| 207 | uci->cksum = mc_header->rev; | ||
| 208 | } else if (uci->err == MC_IGNORED && uci->cksum < mc_header->rev) | ||
| 209 | uci->cksum = mc_header->rev; | ||
| 210 | } else if (mc_header->rev == uci->rev) { | ||
| 211 | if (uci->err < MC_MARKED) { | ||
| 212 | /* notify the caller of success on this cpu */ | ||
| 213 | uci->err = MC_SUCCESS; | ||
| 214 | } | ||
| 215 | } else if (uci->err != MC_ALLOCATED || mc_header->rev > uci->mc->hdr.rev) { | ||
| 216 | pr_debug("microcode: CPU%d found a matching microcode update with " | ||
| 217 | " revision 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev); | ||
| 218 | uci->cksum = cksum; | ||
| 219 | uci->pf = pf; /* keep the original mc pf for cksum calculation */ | ||
| 220 | uci->err = MC_MARKED; /* found the match */ | ||
| 221 | for_each_online_cpu(cpu_num) { | ||
| 222 | if (ucode_cpu_info + cpu_num != uci | ||
| 223 | && ucode_cpu_info[cpu_num].mc == uci->mc) { | ||
| 224 | uci->mc = NULL; | ||
| 225 | break; | ||
| 226 | } | ||
| 227 | } | ||
| 228 | if (uci->mc != NULL) { | ||
| 229 | vfree(uci->mc); | ||
| 230 | uci->mc = NULL; | ||
| 231 | } | ||
| 232 | } | ||
| 233 | return; | ||
| 234 | } | 178 | } |
| 235 | 179 | ||
| 236 | static int find_matching_ucodes (void) | 180 | static int microcode_sanity_check(void *mc) |
| 237 | { | 181 | { |
| 238 | int cursor = 0; | 182 | microcode_header_t *mc_header = mc; |
| 239 | int error = 0; | 183 | struct extended_sigtable *ext_header = NULL; |
| 240 | 184 | struct extended_signature *ext_sig; | |
| 241 | while (cursor + MC_HEADER_SIZE < user_buffer_size) { | 185 | unsigned long total_size, data_size, ext_table_size; |
| 242 | microcode_header_t mc_header; | 186 | int sum, orig_sum, ext_sigcount = 0, i; |
| 243 | void *newmc = NULL; | 187 | |
| 244 | int i, sum, cpu_num, allocated_flag, total_size, data_size, ext_table_size; | 188 | total_size = get_totalsize(mc_header); |
| 189 | data_size = get_datasize(mc_header); | ||
| 190 | if (data_size + MC_HEADER_SIZE > total_size) { | ||
| 191 | printk(KERN_ERR "microcode: error! " | ||
| 192 | "Bad data size in microcode data file\n"); | ||
| 193 | return -EINVAL; | ||
| 194 | } | ||
| 245 | 195 | ||
| 246 | if (copy_from_user(&mc_header, user_buffer + cursor, MC_HEADER_SIZE)) { | 196 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { |
| 247 | printk(KERN_ERR "microcode: error! Can not read user data\n"); | 197 | printk(KERN_ERR "microcode: error! " |
| 248 | error = -EFAULT; | 198 | "Unknown microcode update format\n"); |
| 249 | goto out; | 199 | return -EINVAL; |
| 200 | } | ||
| 201 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | ||
| 202 | if (ext_table_size) { | ||
| 203 | if ((ext_table_size < EXT_HEADER_SIZE) | ||
| 204 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { | ||
| 205 | printk(KERN_ERR "microcode: error! " | ||
| 206 | "Small exttable size in microcode data file\n"); | ||
| 207 | return -EINVAL; | ||
| 250 | } | 208 | } |
| 251 | 209 | ext_header = mc + MC_HEADER_SIZE + data_size; | |
| 252 | total_size = get_totalsize(&mc_header); | 210 | if (ext_table_size != exttable_size(ext_header)) { |
| 253 | if ((cursor + total_size > user_buffer_size) || (total_size < DEFAULT_UCODE_TOTALSIZE)) { | 211 | printk(KERN_ERR "microcode: error! " |
| 254 | printk(KERN_ERR "microcode: error! Bad data in microcode data file\n"); | 212 | "Bad exttable size in microcode data file\n"); |
| 255 | error = -EINVAL; | 213 | return -EFAULT; |
| 256 | goto out; | ||
| 257 | } | 214 | } |
| 215 | ext_sigcount = ext_header->count; | ||
| 216 | } | ||
| 258 | 217 | ||
| 259 | data_size = get_datasize(&mc_header); | 218 | /* check extended table checksum */ |
| 260 | if ((data_size + MC_HEADER_SIZE > total_size) || (data_size < DEFAULT_UCODE_DATASIZE)) { | 219 | if (ext_table_size) { |
| 261 | printk(KERN_ERR "microcode: error! Bad data in microcode data file\n"); | 220 | int ext_table_sum = 0; |
| 262 | error = -EINVAL; | 221 | int *ext_tablep = (int *)ext_header; |
| 263 | goto out; | 222 | |
| 223 | i = ext_table_size / DWSIZE; | ||
| 224 | while (i--) | ||
| 225 | ext_table_sum += ext_tablep[i]; | ||
| 226 | if (ext_table_sum) { | ||
| 227 | printk(KERN_WARNING "microcode: aborting, " | ||
| 228 | "bad extended signature table checksum\n"); | ||
| 229 | return -EINVAL; | ||
| 264 | } | 230 | } |
| 231 | } | ||
| 265 | 232 | ||
| 266 | if (mc_header.ldrver != 1 || mc_header.hdrver != 1) { | 233 | /* calculate the checksum */ |
| 267 | printk(KERN_ERR "microcode: error! Unknown microcode update format\n"); | 234 | orig_sum = 0; |
| 268 | error = -EINVAL; | 235 | i = (MC_HEADER_SIZE + data_size) / DWSIZE; |
| 269 | goto out; | 236 | while (i--) |
| 237 | orig_sum += ((int *)mc)[i]; | ||
| 238 | if (orig_sum) { | ||
| 239 | printk(KERN_ERR "microcode: aborting, bad checksum\n"); | ||
| 240 | return -EINVAL; | ||
| 241 | } | ||
| 242 | if (!ext_table_size) | ||
| 243 | return 0; | ||
| 244 | /* check extended signature checksum */ | ||
| 245 | for (i = 0; i < ext_sigcount; i++) { | ||
| 246 | ext_sig = (struct extended_signature *)((void *)ext_header | ||
| 247 | + EXT_HEADER_SIZE + EXT_SIGNATURE_SIZE * i); | ||
| 248 | sum = orig_sum | ||
| 249 | - (mc_header->sig + mc_header->pf + mc_header->cksum) | ||
| 250 | + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); | ||
| 251 | if (sum) { | ||
| 252 | printk(KERN_ERR "microcode: aborting, bad checksum\n"); | ||
| 253 | return -EINVAL; | ||
| 270 | } | 254 | } |
| 255 | } | ||
| 256 | return 0; | ||
| 257 | } | ||
| 271 | 258 | ||
| 272 | for_each_online_cpu(cpu_num) { | 259 | /* |
| 273 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; | 260 | * return 0 - no update found |
| 274 | 261 | * return 1 - found update | |
| 275 | if (sigmatch(mc_header.sig, uci->sig, mc_header.pf, uci->orig_pf)) | 262 | * return < 0 - error |
| 276 | mark_microcode_update(cpu_num, &mc_header, mc_header.sig, mc_header.pf, mc_header.cksum); | 263 | */ |
| 277 | } | 264 | static int get_maching_microcode(void *mc, int cpu) |
| 265 | { | ||
| 266 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
| 267 | microcode_header_t *mc_header = mc; | ||
| 268 | struct extended_sigtable *ext_header; | ||
| 269 | unsigned long total_size = get_totalsize(mc_header); | ||
| 270 | int ext_sigcount, i; | ||
| 271 | struct extended_signature *ext_sig; | ||
| 272 | void *new_mc; | ||
| 273 | |||
| 274 | if (microcode_update_match(cpu, mc_header, | ||
| 275 | mc_header->sig, mc_header->pf)) | ||
| 276 | goto find; | ||
| 277 | |||
| 278 | if (total_size <= get_datasize(mc_header) + MC_HEADER_SIZE) | ||
| 279 | return 0; | ||
| 280 | |||
| 281 | ext_header = (struct extended_sigtable *)(mc + | ||
| 282 | get_datasize(mc_header) + MC_HEADER_SIZE); | ||
| 283 | ext_sigcount = ext_header->count; | ||
| 284 | ext_sig = (struct extended_signature *)((void *)ext_header | ||
| 285 | + EXT_HEADER_SIZE); | ||
| 286 | for (i = 0; i < ext_sigcount; i++) { | ||
| 287 | if (microcode_update_match(cpu, mc_header, | ||
| 288 | ext_sig->sig, ext_sig->pf)) | ||
| 289 | goto find; | ||
| 290 | ext_sig++; | ||
| 291 | } | ||
| 292 | return 0; | ||
| 293 | find: | ||
| 294 | pr_debug("microcode: CPU %d found a matching microcode update with" | ||
| 295 | " version 0x%x (current=0x%x)\n", cpu, mc_header->rev,uci->rev); | ||
| 296 | new_mc = vmalloc(total_size); | ||
| 297 | if (!new_mc) { | ||
| 298 | printk(KERN_ERR "microcode: error! Can not allocate memory\n"); | ||
| 299 | return -ENOMEM; | ||
| 300 | } | ||
| 278 | 301 | ||
| 279 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | 302 | /* free previous update file */ |
| 280 | if (ext_table_size) { | 303 | vfree(uci->mc); |
| 281 | struct extended_sigtable ext_header; | ||
| 282 | struct extended_signature ext_sig; | ||
| 283 | int ext_sigcount; | ||
| 284 | 304 | ||
| 285 | if ((ext_table_size < EXT_HEADER_SIZE) | 305 | memcpy(new_mc, mc, total_size); |
| 286 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { | 306 | uci->mc = new_mc; |
| 287 | printk(KERN_ERR "microcode: error! Bad data in microcode data file\n"); | 307 | return 1; |
| 288 | error = -EINVAL; | ||
| 289 | goto out; | ||
| 290 | } | ||
| 291 | if (copy_from_user(&ext_header, user_buffer + cursor | ||
| 292 | + MC_HEADER_SIZE + data_size, EXT_HEADER_SIZE)) { | ||
| 293 | printk(KERN_ERR "microcode: error! Can not read user data\n"); | ||
| 294 | error = -EFAULT; | ||
| 295 | goto out; | ||
| 296 | } | ||
| 297 | if (ext_table_size != exttable_size(&ext_header)) { | ||
| 298 | printk(KERN_ERR "microcode: error! Bad data in microcode data file\n"); | ||
| 299 | error = -EFAULT; | ||
| 300 | goto out; | ||
| 301 | } | ||
| 302 | |||
| 303 | ext_sigcount = ext_header.count; | ||
| 304 | |||
| 305 | for (i = 0; i < ext_sigcount; i++) { | ||
| 306 | if (copy_from_user(&ext_sig, user_buffer + cursor + MC_HEADER_SIZE + data_size + EXT_HEADER_SIZE | ||
| 307 | + EXT_SIGNATURE_SIZE * i, EXT_SIGNATURE_SIZE)) { | ||
| 308 | printk(KERN_ERR "microcode: error! Can not read user data\n"); | ||
| 309 | error = -EFAULT; | ||
| 310 | goto out; | ||
| 311 | } | ||
| 312 | for_each_online_cpu(cpu_num) { | ||
| 313 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; | ||
| 314 | |||
| 315 | if (sigmatch(ext_sig.sig, uci->sig, ext_sig.pf, uci->orig_pf)) { | ||
| 316 | mark_microcode_update(cpu_num, &mc_header, ext_sig.sig, ext_sig.pf, ext_sig.cksum); | ||
| 317 | } | ||
| 318 | } | ||
| 319 | } | ||
| 320 | } | ||
| 321 | /* now check if any cpu has matched */ | ||
| 322 | allocated_flag = 0; | ||
| 323 | sum = 0; | ||
| 324 | for_each_online_cpu(cpu_num) { | ||
| 325 | if (ucode_cpu_info[cpu_num].err == MC_MARKED) { | ||
| 326 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; | ||
| 327 | if (!allocated_flag) { | ||
| 328 | allocated_flag = 1; | ||
| 329 | newmc = vmalloc(total_size); | ||
| 330 | if (!newmc) { | ||
| 331 | printk(KERN_ERR "microcode: error! Can not allocate memory\n"); | ||
| 332 | error = -ENOMEM; | ||
| 333 | goto out; | ||
| 334 | } | ||
| 335 | if (copy_from_user(newmc + MC_HEADER_SIZE, | ||
| 336 | user_buffer + cursor + MC_HEADER_SIZE, | ||
| 337 | total_size - MC_HEADER_SIZE)) { | ||
| 338 | printk(KERN_ERR "microcode: error! Can not read user data\n"); | ||
| 339 | vfree(newmc); | ||
| 340 | error = -EFAULT; | ||
| 341 | goto out; | ||
| 342 | } | ||
| 343 | memcpy(newmc, &mc_header, MC_HEADER_SIZE); | ||
| 344 | /* check extended table checksum */ | ||
| 345 | if (ext_table_size) { | ||
| 346 | int ext_table_sum = 0; | ||
| 347 | int * ext_tablep = (((void *) newmc) + MC_HEADER_SIZE + data_size); | ||
| 348 | i = ext_table_size / DWSIZE; | ||
| 349 | while (i--) ext_table_sum += ext_tablep[i]; | ||
| 350 | if (ext_table_sum) { | ||
| 351 | printk(KERN_WARNING "microcode: aborting, bad extended signature table checksum\n"); | ||
| 352 | vfree(newmc); | ||
| 353 | error = -EINVAL; | ||
| 354 | goto out; | ||
| 355 | } | ||
| 356 | } | ||
| 357 | |||
| 358 | /* calculate the checksum */ | ||
| 359 | i = (MC_HEADER_SIZE + data_size) / DWSIZE; | ||
| 360 | while (i--) sum += ((int *)newmc)[i]; | ||
| 361 | sum -= (mc_header.sig + mc_header.pf + mc_header.cksum); | ||
| 362 | } | ||
| 363 | ucode_cpu_info[cpu_num].mc = newmc; | ||
| 364 | ucode_cpu_info[cpu_num].err = MC_ALLOCATED; /* mc updated */ | ||
| 365 | if (sum + uci->sig + uci->pf + uci->cksum != 0) { | ||
| 366 | printk(KERN_ERR "microcode: CPU%d aborting, bad checksum\n", cpu_num); | ||
| 367 | error = -EINVAL; | ||
| 368 | goto out; | ||
| 369 | } | ||
| 370 | } | ||
| 371 | } | ||
| 372 | cursor += total_size; /* goto the next update patch */ | ||
| 373 | } /* end of while */ | ||
| 374 | out: | ||
| 375 | return error; | ||
| 376 | } | 308 | } |
| 377 | 309 | ||
| 378 | static void do_update_one (void * unused) | 310 | static void apply_microcode(int cpu) |
| 379 | { | 311 | { |
| 380 | unsigned long flags; | 312 | unsigned long flags; |
| 381 | unsigned int val[2]; | 313 | unsigned int val[2]; |
| 382 | int cpu_num = smp_processor_id(); | 314 | int cpu_num = raw_smp_processor_id(); |
| 383 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; | 315 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; |
| 384 | 316 | ||
| 385 | if (uci->mc == NULL) { | 317 | /* We should bind the task to the CPU */ |
| 386 | if (verbose) { | 318 | BUG_ON(cpu_num != cpu); |
| 387 | if (uci->err == MC_SUCCESS) | 319 | |
| 388 | printk(KERN_INFO "microcode: CPU%d already at revision 0x%x\n", | 320 | if (uci->mc == NULL) |
| 389 | cpu_num, uci->rev); | ||
| 390 | else | ||
| 391 | printk(KERN_INFO "microcode: No new microcode data for CPU%d\n", cpu_num); | ||
| 392 | } | ||
| 393 | return; | 321 | return; |
| 394 | } | ||
| 395 | 322 | ||
| 396 | /* serialize access to the physical write to MSR 0x79 */ | 323 | /* serialize access to the physical write to MSR 0x79 */ |
| 397 | spin_lock_irqsave(µcode_update_lock, flags); | 324 | spin_lock_irqsave(µcode_update_lock, flags); |
| @@ -408,68 +335,107 @@ static void do_update_one (void * unused) | |||
| 408 | /* get the current revision from MSR 0x8B */ | 335 | /* get the current revision from MSR 0x8B */ |
| 409 | rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); | 336 | rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); |
| 410 | 337 | ||
| 411 | /* notify the caller of success on this cpu */ | ||
| 412 | uci->err = MC_SUCCESS; | ||
| 413 | spin_unlock_irqrestore(µcode_update_lock, flags); | 338 | spin_unlock_irqrestore(µcode_update_lock, flags); |
| 414 | printk(KERN_INFO "microcode: CPU%d updated from revision " | 339 | if (val[1] != uci->mc->hdr.rev) { |
| 340 | printk(KERN_ERR "microcode: CPU%d updated from revision " | ||
| 341 | "0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]); | ||
| 342 | return; | ||
| 343 | } | ||
| 344 | pr_debug("microcode: CPU%d updated from revision " | ||
| 415 | "0x%x to 0x%x, date = %08x \n", | 345 | "0x%x to 0x%x, date = %08x \n", |
| 416 | cpu_num, uci->rev, val[1], uci->mc->hdr.date); | 346 | cpu_num, uci->rev, val[1], uci->mc->hdr.date); |
| 417 | return; | 347 | uci->rev = val[1]; |
| 418 | } | 348 | } |
| 419 | 349 | ||
| 420 | static int do_microcode_update (void) | 350 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE |
| 421 | { | 351 | static void __user *user_buffer; /* user area microcode data buffer */ |
| 422 | int i, error; | 352 | static unsigned int user_buffer_size; /* it's size */ |
| 423 | 353 | ||
| 424 | if (on_each_cpu(collect_cpu_info, NULL, 1, 1) != 0) { | 354 | static long get_next_ucode(void **mc, long offset) |
| 425 | printk(KERN_ERR "microcode: Error! Could not run on all processors\n"); | 355 | { |
| 426 | error = -EIO; | 356 | microcode_header_t mc_header; |
| 427 | goto out; | 357 | unsigned long total_size; |
| 358 | |||
| 359 | /* No more data */ | ||
| 360 | if (offset >= user_buffer_size) | ||
| 361 | return 0; | ||
| 362 | if (copy_from_user(&mc_header, user_buffer + offset, MC_HEADER_SIZE)) { | ||
| 363 | printk(KERN_ERR "microcode: error! Can not read user data\n"); | ||
| 364 | return -EFAULT; | ||
| 428 | } | 365 | } |
| 429 | 366 | total_size = get_totalsize(&mc_header); | |
| 430 | if ((error = find_matching_ucodes())) { | 367 | if (offset + total_size > user_buffer_size) { |
| 431 | printk(KERN_ERR "microcode: Error in the microcode data\n"); | 368 | printk(KERN_ERR "microcode: error! Bad total size in microcode " |
| 432 | goto out_free; | 369 | "data file\n"); |
| 370 | return -EINVAL; | ||
| 433 | } | 371 | } |
| 434 | 372 | *mc = vmalloc(total_size); | |
| 435 | if (on_each_cpu(do_update_one, NULL, 1, 1) != 0) { | 373 | if (!*mc) |
| 436 | printk(KERN_ERR "microcode: Error! Could not run on all processors\n"); | 374 | return -ENOMEM; |
| 437 | error = -EIO; | 375 | if (copy_from_user(*mc, user_buffer + offset, total_size)) { |
| 376 | printk(KERN_ERR "microcode: error! Can not read user data\n"); | ||
| 377 | vfree(*mc); | ||
| 378 | return -EFAULT; | ||
| 438 | } | 379 | } |
| 380 | return offset + total_size; | ||
| 381 | } | ||
| 382 | |||
| 383 | static int do_microcode_update (void) | ||
| 384 | { | ||
| 385 | long cursor = 0; | ||
| 386 | int error = 0; | ||
| 387 | void *new_mc; | ||
| 388 | int cpu; | ||
| 389 | cpumask_t old; | ||
| 390 | |||
| 391 | old = current->cpus_allowed; | ||
| 439 | 392 | ||
| 440 | out_free: | 393 | while ((cursor = get_next_ucode(&new_mc, cursor)) > 0) { |
| 441 | for_each_online_cpu(i) { | 394 | error = microcode_sanity_check(new_mc); |
| 442 | if (ucode_cpu_info[i].mc) { | 395 | if (error) |
| 443 | int j; | 396 | goto out; |
| 444 | void *tmp = ucode_cpu_info[i].mc; | 397 | /* |
| 445 | vfree(tmp); | 398 | * It's possible the data file has multiple matching ucode, |
| 446 | for_each_online_cpu(j) { | 399 | * lets keep searching till the latest version |
| 447 | if (ucode_cpu_info[j].mc == tmp) | 400 | */ |
| 448 | ucode_cpu_info[j].mc = NULL; | 401 | for_each_online_cpu(cpu) { |
| 449 | } | 402 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
| 403 | |||
| 404 | if (!uci->valid) | ||
| 405 | continue; | ||
| 406 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
| 407 | error = get_maching_microcode(new_mc, cpu); | ||
| 408 | if (error < 0) | ||
| 409 | goto out; | ||
| 410 | if (error == 1) | ||
| 411 | apply_microcode(cpu); | ||
| 450 | } | 412 | } |
| 451 | if (ucode_cpu_info[i].err == MC_IGNORED && verbose) | 413 | vfree(new_mc); |
| 452 | printk(KERN_WARNING "microcode: CPU%d not 'upgrading' to earlier revision" | ||
| 453 | " 0x%x (current=0x%x)\n", i, ucode_cpu_info[i].cksum, ucode_cpu_info[i].rev); | ||
| 454 | } | 414 | } |
| 455 | out: | 415 | out: |
| 416 | if (cursor > 0) | ||
| 417 | vfree(new_mc); | ||
| 418 | if (cursor < 0) | ||
| 419 | error = cursor; | ||
| 420 | set_cpus_allowed(current, old); | ||
| 456 | return error; | 421 | return error; |
| 457 | } | 422 | } |
| 458 | 423 | ||
| 424 | static int microcode_open (struct inode *unused1, struct file *unused2) | ||
| 425 | { | ||
| 426 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | ||
| 427 | } | ||
| 428 | |||
| 459 | static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos) | 429 | static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos) |
| 460 | { | 430 | { |
| 461 | ssize_t ret; | 431 | ssize_t ret; |
| 462 | 432 | ||
| 463 | if (len < DEFAULT_UCODE_TOTALSIZE) { | ||
| 464 | printk(KERN_ERR "microcode: not enough data\n"); | ||
| 465 | return -EINVAL; | ||
| 466 | } | ||
| 467 | |||
| 468 | if ((len >> PAGE_SHIFT) > num_physpages) { | 433 | if ((len >> PAGE_SHIFT) > num_physpages) { |
| 469 | printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages); | 434 | printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages); |
| 470 | return -EINVAL; | 435 | return -EINVAL; |
| 471 | } | 436 | } |
| 472 | 437 | ||
| 438 | lock_cpu_hotplug(); | ||
| 473 | mutex_lock(µcode_mutex); | 439 | mutex_lock(µcode_mutex); |
| 474 | 440 | ||
| 475 | user_buffer = (void __user *) buf; | 441 | user_buffer = (void __user *) buf; |
| @@ -480,6 +446,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ | |||
| 480 | ret = (ssize_t)len; | 446 | ret = (ssize_t)len; |
| 481 | 447 | ||
| 482 | mutex_unlock(µcode_mutex); | 448 | mutex_unlock(µcode_mutex); |
| 449 | unlock_cpu_hotplug(); | ||
| 483 | 450 | ||
| 484 | return ret; | 451 | return ret; |
| 485 | } | 452 | } |
| @@ -496,7 +463,7 @@ static struct miscdevice microcode_dev = { | |||
| 496 | .fops = µcode_fops, | 463 | .fops = µcode_fops, |
| 497 | }; | 464 | }; |
| 498 | 465 | ||
| 499 | static int __init microcode_init (void) | 466 | static int __init microcode_dev_init (void) |
| 500 | { | 467 | { |
| 501 | int error; | 468 | int error; |
| 502 | 469 | ||
| @@ -508,6 +475,280 @@ static int __init microcode_init (void) | |||
| 508 | return error; | 475 | return error; |
| 509 | } | 476 | } |
| 510 | 477 | ||
| 478 | return 0; | ||
| 479 | } | ||
| 480 | |||
| 481 | static void __exit microcode_dev_exit (void) | ||
| 482 | { | ||
| 483 | misc_deregister(µcode_dev); | ||
| 484 | } | ||
| 485 | |||
| 486 | MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); | ||
| 487 | #else | ||
| 488 | #define microcode_dev_init() 0 | ||
| 489 | #define microcode_dev_exit() do { } while(0) | ||
| 490 | #endif | ||
| 491 | |||
| 492 | static long get_next_ucode_from_buffer(void **mc, void *buf, | ||
| 493 | unsigned long size, long offset) | ||
| 494 | { | ||
| 495 | microcode_header_t *mc_header; | ||
| 496 | unsigned long total_size; | ||
| 497 | |||
| 498 | /* No more data */ | ||
| 499 | if (offset >= size) | ||
| 500 | return 0; | ||
| 501 | mc_header = (microcode_header_t *)(buf + offset); | ||
| 502 | total_size = get_totalsize(mc_header); | ||
| 503 | |||
| 504 | if (offset + total_size > size) { | ||
| 505 | printk(KERN_ERR "microcode: error! Bad data in microcode data file\n"); | ||
| 506 | return -EINVAL; | ||
| 507 | } | ||
| 508 | |||
| 509 | *mc = vmalloc(total_size); | ||
| 510 | if (!*mc) { | ||
| 511 | printk(KERN_ERR "microcode: error! Can not allocate memory\n"); | ||
| 512 | return -ENOMEM; | ||
| 513 | } | ||
| 514 | memcpy(*mc, buf + offset, total_size); | ||
| 515 | return offset + total_size; | ||
| 516 | } | ||
| 517 | |||
| 518 | /* fake device for request_firmware */ | ||
| 519 | static struct platform_device *microcode_pdev; | ||
| 520 | |||
| 521 | static int cpu_request_microcode(int cpu) | ||
| 522 | { | ||
| 523 | char name[30]; | ||
| 524 | struct cpuinfo_x86 *c = cpu_data + cpu; | ||
| 525 | const struct firmware *firmware; | ||
| 526 | void *buf; | ||
| 527 | unsigned long size; | ||
| 528 | long offset = 0; | ||
| 529 | int error; | ||
| 530 | void *mc; | ||
| 531 | |||
| 532 | /* We should bind the task to the CPU */ | ||
| 533 | BUG_ON(cpu != raw_smp_processor_id()); | ||
| 534 | sprintf(name,"intel-ucode/%02x-%02x-%02x", | ||
| 535 | c->x86, c->x86_model, c->x86_mask); | ||
| 536 | error = request_firmware(&firmware, name, µcode_pdev->dev); | ||
| 537 | if (error) { | ||
| 538 | pr_debug("ucode data file %s load failed\n", name); | ||
| 539 | return error; | ||
| 540 | } | ||
| 541 | buf = (void *)firmware->data; | ||
| 542 | size = firmware->size; | ||
| 543 | while ((offset = get_next_ucode_from_buffer(&mc, buf, size, offset)) | ||
| 544 | > 0) { | ||
| 545 | error = microcode_sanity_check(mc); | ||
| 546 | if (error) | ||
| 547 | break; | ||
| 548 | error = get_maching_microcode(mc, cpu); | ||
| 549 | if (error < 0) | ||
| 550 | break; | ||
| 551 | /* | ||
| 552 | * It's possible the data file has multiple matching ucode, | ||
| 553 | * lets keep searching till the latest version | ||
| 554 | */ | ||
| 555 | if (error == 1) { | ||
| 556 | apply_microcode(cpu); | ||
| 557 | error = 0; | ||
| 558 | } | ||
| 559 | vfree(mc); | ||
| 560 | } | ||
| 561 | if (offset > 0) | ||
| 562 | vfree(mc); | ||
| 563 | if (offset < 0) | ||
| 564 | error = offset; | ||
| 565 | release_firmware(firmware); | ||
| 566 | |||
| 567 | return error; | ||
| 568 | } | ||
| 569 | |||
| 570 | static void microcode_init_cpu(int cpu) | ||
| 571 | { | ||
| 572 | cpumask_t old; | ||
| 573 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
| 574 | |||
| 575 | old = current->cpus_allowed; | ||
| 576 | |||
| 577 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
| 578 | mutex_lock(µcode_mutex); | ||
| 579 | collect_cpu_info(cpu); | ||
| 580 | if (uci->valid) | ||
| 581 | cpu_request_microcode(cpu); | ||
| 582 | mutex_unlock(µcode_mutex); | ||
| 583 | set_cpus_allowed(current, old); | ||
| 584 | } | ||
| 585 | |||
| 586 | static void microcode_fini_cpu(int cpu) | ||
| 587 | { | ||
| 588 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
| 589 | |||
| 590 | mutex_lock(µcode_mutex); | ||
| 591 | uci->valid = 0; | ||
| 592 | vfree(uci->mc); | ||
| 593 | uci->mc = NULL; | ||
| 594 | mutex_unlock(µcode_mutex); | ||
| 595 | } | ||
| 596 | |||
| 597 | static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) | ||
| 598 | { | ||
| 599 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | ||
| 600 | char *end; | ||
| 601 | unsigned long val = simple_strtoul(buf, &end, 0); | ||
| 602 | int err = 0; | ||
| 603 | int cpu = dev->id; | ||
| 604 | |||
| 605 | if (end == buf) | ||
| 606 | return -EINVAL; | ||
| 607 | if (val == 1) { | ||
| 608 | cpumask_t old; | ||
| 609 | |||
| 610 | old = current->cpus_allowed; | ||
| 611 | |||
| 612 | lock_cpu_hotplug(); | ||
| 613 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
| 614 | |||
| 615 | mutex_lock(µcode_mutex); | ||
| 616 | if (uci->valid) | ||
| 617 | err = cpu_request_microcode(cpu); | ||
| 618 | mutex_unlock(µcode_mutex); | ||
| 619 | unlock_cpu_hotplug(); | ||
| 620 | set_cpus_allowed(current, old); | ||
| 621 | } | ||
| 622 | if (err) | ||
| 623 | return err; | ||
| 624 | return sz; | ||
| 625 | } | ||
| 626 | |||
| 627 | static ssize_t version_show(struct sys_device *dev, char *buf) | ||
| 628 | { | ||
| 629 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | ||
| 630 | |||
| 631 | return sprintf(buf, "0x%x\n", uci->rev); | ||
| 632 | } | ||
| 633 | |||
| 634 | static ssize_t pf_show(struct sys_device *dev, char *buf) | ||
| 635 | { | ||
| 636 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | ||
| 637 | |||
| 638 | return sprintf(buf, "0x%x\n", uci->pf); | ||
| 639 | } | ||
| 640 | |||
| 641 | static SYSDEV_ATTR(reload, 0200, NULL, reload_store); | ||
| 642 | static SYSDEV_ATTR(version, 0400, version_show, NULL); | ||
| 643 | static SYSDEV_ATTR(processor_flags, 0400, pf_show, NULL); | ||
| 644 | |||
| 645 | static struct attribute *mc_default_attrs[] = { | ||
| 646 | &attr_reload.attr, | ||
| 647 | &attr_version.attr, | ||
| 648 | &attr_processor_flags.attr, | ||
| 649 | NULL | ||
| 650 | }; | ||
| 651 | |||
| 652 | static struct attribute_group mc_attr_group = { | ||
| 653 | .attrs = mc_default_attrs, | ||
| 654 | .name = "microcode", | ||
| 655 | }; | ||
| 656 | |||
| 657 | static int mc_sysdev_add(struct sys_device *sys_dev) | ||
| 658 | { | ||
| 659 | int cpu = sys_dev->id; | ||
| 660 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
| 661 | |||
| 662 | if (!cpu_online(cpu)) | ||
| 663 | return 0; | ||
| 664 | pr_debug("Microcode:CPU %d added\n", cpu); | ||
| 665 | memset(uci, 0, sizeof(*uci)); | ||
| 666 | sysfs_create_group(&sys_dev->kobj, &mc_attr_group); | ||
| 667 | |||
| 668 | microcode_init_cpu(cpu); | ||
| 669 | return 0; | ||
| 670 | } | ||
| 671 | |||
| 672 | static int mc_sysdev_remove(struct sys_device *sys_dev) | ||
| 673 | { | ||
| 674 | int cpu = sys_dev->id; | ||
| 675 | |||
| 676 | if (!cpu_online(cpu)) | ||
| 677 | return 0; | ||
| 678 | pr_debug("Microcode:CPU %d removed\n", cpu); | ||
| 679 | microcode_fini_cpu(cpu); | ||
| 680 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | ||
| 681 | return 0; | ||
| 682 | } | ||
| 683 | |||
| 684 | static int mc_sysdev_resume(struct sys_device *dev) | ||
| 685 | { | ||
| 686 | int cpu = dev->id; | ||
| 687 | |||
| 688 | if (!cpu_online(cpu)) | ||
| 689 | return 0; | ||
| 690 | pr_debug("Microcode:CPU %d resumed\n", cpu); | ||
| 691 | /* only CPU 0 will apply ucode here */ | ||
| 692 | apply_microcode(0); | ||
| 693 | return 0; | ||
| 694 | } | ||
| 695 | |||
| 696 | static struct sysdev_driver mc_sysdev_driver = { | ||
| 697 | .add = mc_sysdev_add, | ||
| 698 | .remove = mc_sysdev_remove, | ||
| 699 | .resume = mc_sysdev_resume, | ||
| 700 | }; | ||
| 701 | |||
| 702 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 703 | static __cpuinit int | ||
| 704 | mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | ||
| 705 | { | ||
| 706 | unsigned int cpu = (unsigned long)hcpu; | ||
| 707 | struct sys_device *sys_dev; | ||
| 708 | |||
| 709 | sys_dev = get_cpu_sysdev(cpu); | ||
| 710 | switch (action) { | ||
| 711 | case CPU_ONLINE: | ||
| 712 | case CPU_DOWN_FAILED: | ||
| 713 | mc_sysdev_add(sys_dev); | ||
| 714 | break; | ||
| 715 | case CPU_DOWN_PREPARE: | ||
| 716 | mc_sysdev_remove(sys_dev); | ||
| 717 | break; | ||
| 718 | } | ||
| 719 | return NOTIFY_OK; | ||
| 720 | } | ||
| 721 | |||
| 722 | static struct notifier_block mc_cpu_notifier = { | ||
| 723 | .notifier_call = mc_cpu_callback, | ||
| 724 | }; | ||
| 725 | #endif | ||
| 726 | |||
| 727 | static int __init microcode_init (void) | ||
| 728 | { | ||
| 729 | int error; | ||
| 730 | |||
| 731 | error = microcode_dev_init(); | ||
| 732 | if (error) | ||
| 733 | return error; | ||
| 734 | microcode_pdev = platform_device_register_simple("microcode", -1, | ||
| 735 | NULL, 0); | ||
| 736 | if (IS_ERR(microcode_pdev)) { | ||
| 737 | microcode_dev_exit(); | ||
| 738 | return PTR_ERR(microcode_pdev); | ||
| 739 | } | ||
| 740 | |||
| 741 | lock_cpu_hotplug(); | ||
| 742 | error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver); | ||
| 743 | unlock_cpu_hotplug(); | ||
| 744 | if (error) { | ||
| 745 | microcode_dev_exit(); | ||
| 746 | platform_device_unregister(microcode_pdev); | ||
| 747 | return error; | ||
| 748 | } | ||
| 749 | |||
| 750 | register_hotcpu_notifier(&mc_cpu_notifier); | ||
| 751 | |||
| 511 | printk(KERN_INFO | 752 | printk(KERN_INFO |
| 512 | "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n"); | 753 | "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n"); |
| 513 | return 0; | 754 | return 0; |
| @@ -515,9 +756,16 @@ static int __init microcode_init (void) | |||
| 515 | 756 | ||
| 516 | static void __exit microcode_exit (void) | 757 | static void __exit microcode_exit (void) |
| 517 | { | 758 | { |
| 518 | misc_deregister(µcode_dev); | 759 | microcode_dev_exit(); |
| 760 | |||
| 761 | unregister_hotcpu_notifier(&mc_cpu_notifier); | ||
| 762 | |||
| 763 | lock_cpu_hotplug(); | ||
| 764 | sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); | ||
| 765 | unlock_cpu_hotplug(); | ||
| 766 | |||
| 767 | platform_device_unregister(microcode_pdev); | ||
| 519 | } | 768 | } |
| 520 | 769 | ||
| 521 | module_init(microcode_init) | 770 | module_init(microcode_init) |
| 522 | module_exit(microcode_exit) | 771 | module_exit(microcode_exit) |
| 523 | MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); | ||
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index a70b5fa0ef06..442aaf8c77eb 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <asm/io_apic.h> | 30 | #include <asm/io_apic.h> |
| 31 | 31 | ||
| 32 | #include <mach_apic.h> | 32 | #include <mach_apic.h> |
| 33 | #include <mach_apicdef.h> | ||
| 33 | #include <mach_mpparse.h> | 34 | #include <mach_mpparse.h> |
| 34 | #include <bios_ebda.h> | 35 | #include <bios_ebda.h> |
| 35 | 36 | ||
| @@ -68,7 +69,7 @@ unsigned int def_to_bigsmp = 0; | |||
| 68 | /* Processor that is doing the boot up */ | 69 | /* Processor that is doing the boot up */ |
| 69 | unsigned int boot_cpu_physical_apicid = -1U; | 70 | unsigned int boot_cpu_physical_apicid = -1U; |
| 70 | /* Internal processor count */ | 71 | /* Internal processor count */ |
| 71 | static unsigned int __devinitdata num_processors; | 72 | unsigned int __cpuinitdata num_processors; |
| 72 | 73 | ||
| 73 | /* Bitmask of physically existing CPUs */ | 74 | /* Bitmask of physically existing CPUs */ |
| 74 | physid_mask_t phys_cpu_present_map; | 75 | physid_mask_t phys_cpu_present_map; |
| @@ -228,12 +229,14 @@ static void __init MP_bus_info (struct mpc_config_bus *m) | |||
| 228 | 229 | ||
| 229 | mpc_oem_bus_info(m, str, translation_table[mpc_record]); | 230 | mpc_oem_bus_info(m, str, translation_table[mpc_record]); |
| 230 | 231 | ||
| 232 | #if MAX_MP_BUSSES < 256 | ||
| 231 | if (m->mpc_busid >= MAX_MP_BUSSES) { | 233 | if (m->mpc_busid >= MAX_MP_BUSSES) { |
| 232 | printk(KERN_WARNING "MP table busid value (%d) for bustype %s " | 234 | printk(KERN_WARNING "MP table busid value (%d) for bustype %s " |
| 233 | " is too large, max. supported is %d\n", | 235 | " is too large, max. supported is %d\n", |
| 234 | m->mpc_busid, str, MAX_MP_BUSSES - 1); | 236 | m->mpc_busid, str, MAX_MP_BUSSES - 1); |
| 235 | return; | 237 | return; |
| 236 | } | 238 | } |
| 239 | #endif | ||
| 237 | 240 | ||
| 238 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { | 241 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { |
| 239 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; | 242 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; |
| @@ -293,19 +296,6 @@ static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) | |||
| 293 | m->mpc_irqtype, m->mpc_irqflag & 3, | 296 | m->mpc_irqtype, m->mpc_irqflag & 3, |
| 294 | (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, | 297 | (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, |
| 295 | m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); | 298 | m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); |
| 296 | /* | ||
| 297 | * Well it seems all SMP boards in existence | ||
| 298 | * use ExtINT/LVT1 == LINT0 and | ||
| 299 | * NMI/LVT2 == LINT1 - the following check | ||
| 300 | * will show us if this assumptions is false. | ||
| 301 | * Until then we do not have to add baggage. | ||
| 302 | */ | ||
| 303 | if ((m->mpc_irqtype == mp_ExtINT) && | ||
| 304 | (m->mpc_destapiclint != 0)) | ||
| 305 | BUG(); | ||
| 306 | if ((m->mpc_irqtype == mp_NMI) && | ||
| 307 | (m->mpc_destapiclint != 1)) | ||
| 308 | BUG(); | ||
| 309 | } | 299 | } |
| 310 | 300 | ||
| 311 | #ifdef CONFIG_X86_NUMAQ | 301 | #ifdef CONFIG_X86_NUMAQ |
| @@ -822,8 +812,7 @@ int es7000_plat; | |||
| 822 | 812 | ||
| 823 | #ifdef CONFIG_ACPI | 813 | #ifdef CONFIG_ACPI |
| 824 | 814 | ||
| 825 | void __init mp_register_lapic_address ( | 815 | void __init mp_register_lapic_address(u64 address) |
| 826 | u64 address) | ||
| 827 | { | 816 | { |
| 828 | mp_lapic_addr = (unsigned long) address; | 817 | mp_lapic_addr = (unsigned long) address; |
| 829 | 818 | ||
| @@ -835,13 +824,10 @@ void __init mp_register_lapic_address ( | |||
| 835 | Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); | 824 | Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); |
| 836 | } | 825 | } |
| 837 | 826 | ||
| 838 | 827 | void __devinit mp_register_lapic (u8 id, u8 enabled) | |
| 839 | void __devinit mp_register_lapic ( | ||
| 840 | u8 id, | ||
| 841 | u8 enabled) | ||
| 842 | { | 828 | { |
| 843 | struct mpc_config_processor processor; | 829 | struct mpc_config_processor processor; |
| 844 | int boot_cpu = 0; | 830 | int boot_cpu = 0; |
| 845 | 831 | ||
| 846 | if (MAX_APICS - id <= 0) { | 832 | if (MAX_APICS - id <= 0) { |
| 847 | printk(KERN_WARNING "Processor #%d invalid (max %d)\n", | 833 | printk(KERN_WARNING "Processor #%d invalid (max %d)\n", |
| @@ -878,11 +864,9 @@ static struct mp_ioapic_routing { | |||
| 878 | u32 pin_programmed[4]; | 864 | u32 pin_programmed[4]; |
| 879 | } mp_ioapic_routing[MAX_IO_APICS]; | 865 | } mp_ioapic_routing[MAX_IO_APICS]; |
| 880 | 866 | ||
| 881 | 867 | static int mp_find_ioapic (int gsi) | |
| 882 | static int mp_find_ioapic ( | ||
| 883 | int gsi) | ||
| 884 | { | 868 | { |
| 885 | int i = 0; | 869 | int i = 0; |
| 886 | 870 | ||
| 887 | /* Find the IOAPIC that manages this GSI. */ | 871 | /* Find the IOAPIC that manages this GSI. */ |
| 888 | for (i = 0; i < nr_ioapics; i++) { | 872 | for (i = 0; i < nr_ioapics; i++) { |
| @@ -895,15 +879,11 @@ static int mp_find_ioapic ( | |||
| 895 | 879 | ||
| 896 | return -1; | 880 | return -1; |
| 897 | } | 881 | } |
| 898 | |||
| 899 | 882 | ||
| 900 | void __init mp_register_ioapic ( | 883 | void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) |
| 901 | u8 id, | ||
| 902 | u32 address, | ||
| 903 | u32 gsi_base) | ||
| 904 | { | 884 | { |
| 905 | int idx = 0; | 885 | int idx = 0; |
| 906 | int tmpid; | 886 | int tmpid; |
| 907 | 887 | ||
| 908 | if (nr_ioapics >= MAX_IO_APICS) { | 888 | if (nr_ioapics >= MAX_IO_APICS) { |
| 909 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | 889 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " |
| @@ -949,16 +929,10 @@ void __init mp_register_ioapic ( | |||
| 949 | mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, | 929 | mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, |
| 950 | mp_ioapic_routing[idx].gsi_base, | 930 | mp_ioapic_routing[idx].gsi_base, |
| 951 | mp_ioapic_routing[idx].gsi_end); | 931 | mp_ioapic_routing[idx].gsi_end); |
| 952 | |||
| 953 | return; | ||
| 954 | } | 932 | } |
| 955 | 933 | ||
| 956 | 934 | void __init | |
| 957 | void __init mp_override_legacy_irq ( | 935 | mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) |
| 958 | u8 bus_irq, | ||
| 959 | u8 polarity, | ||
| 960 | u8 trigger, | ||
| 961 | u32 gsi) | ||
| 962 | { | 936 | { |
| 963 | struct mpc_config_intsrc intsrc; | 937 | struct mpc_config_intsrc intsrc; |
| 964 | int ioapic = -1; | 938 | int ioapic = -1; |
| @@ -996,15 +970,13 @@ void __init mp_override_legacy_irq ( | |||
| 996 | mp_irqs[mp_irq_entries] = intsrc; | 970 | mp_irqs[mp_irq_entries] = intsrc; |
| 997 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | 971 | if (++mp_irq_entries == MAX_IRQ_SOURCES) |
| 998 | panic("Max # of irq sources exceeded!\n"); | 972 | panic("Max # of irq sources exceeded!\n"); |
| 999 | |||
| 1000 | return; | ||
| 1001 | } | 973 | } |
| 1002 | 974 | ||
| 1003 | void __init mp_config_acpi_legacy_irqs (void) | 975 | void __init mp_config_acpi_legacy_irqs (void) |
| 1004 | { | 976 | { |
| 1005 | struct mpc_config_intsrc intsrc; | 977 | struct mpc_config_intsrc intsrc; |
| 1006 | int i = 0; | 978 | int i = 0; |
| 1007 | int ioapic = -1; | 979 | int ioapic = -1; |
| 1008 | 980 | ||
| 1009 | /* | 981 | /* |
| 1010 | * Fabricate the legacy ISA bus (bus #31). | 982 | * Fabricate the legacy ISA bus (bus #31). |
| @@ -1073,12 +1045,12 @@ void __init mp_config_acpi_legacy_irqs (void) | |||
| 1073 | 1045 | ||
| 1074 | #define MAX_GSI_NUM 4096 | 1046 | #define MAX_GSI_NUM 4096 |
| 1075 | 1047 | ||
| 1076 | int mp_register_gsi (u32 gsi, int triggering, int polarity) | 1048 | int mp_register_gsi(u32 gsi, int triggering, int polarity) |
| 1077 | { | 1049 | { |
| 1078 | int ioapic = -1; | 1050 | int ioapic = -1; |
| 1079 | int ioapic_pin = 0; | 1051 | int ioapic_pin = 0; |
| 1080 | int idx, bit = 0; | 1052 | int idx, bit = 0; |
| 1081 | static int pci_irq = 16; | 1053 | static int pci_irq = 16; |
| 1082 | /* | 1054 | /* |
| 1083 | * Mapping between Global System Interrups, which | 1055 | * Mapping between Global System Interrups, which |
| 1084 | * represent all possible interrupts, and IRQs | 1056 | * represent all possible interrupts, and IRQs |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index acb351478e42..dbda706fdd14 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
| @@ -21,83 +21,174 @@ | |||
| 21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
| 22 | #include <linux/sysctl.h> | 22 | #include <linux/sysctl.h> |
| 23 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
| 24 | #include <linux/dmi.h> | ||
| 25 | #include <linux/kprobes.h> | ||
| 24 | 26 | ||
| 25 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
| 26 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
| 29 | #include <asm/kdebug.h> | ||
| 27 | #include <asm/intel_arch_perfmon.h> | 30 | #include <asm/intel_arch_perfmon.h> |
| 28 | 31 | ||
| 29 | #include "mach_traps.h" | 32 | #include "mach_traps.h" |
| 30 | 33 | ||
| 31 | unsigned int nmi_watchdog = NMI_NONE; | 34 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: |
| 32 | extern int unknown_nmi_panic; | 35 | * evtsel_nmi_owner tracks the ownership of the event selection |
| 33 | static unsigned int nmi_hz = HZ; | 36 | * - different performance counters/ event selection may be reserved for |
| 34 | static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ | 37 | * different subsystems this reservation system just tries to coordinate |
| 35 | static unsigned int nmi_p4_cccr_val; | 38 | * things a little |
| 36 | extern void show_registers(struct pt_regs *regs); | 39 | */ |
| 40 | static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner); | ||
| 41 | static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]); | ||
| 37 | 42 | ||
| 38 | /* | 43 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's |
| 39 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: | 44 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) |
| 40 | * - it may be reserved by some other driver, or not | ||
| 41 | * - when not reserved by some other driver, it may be used for | ||
| 42 | * the NMI watchdog, or not | ||
| 43 | * | ||
| 44 | * This is maintained separately from nmi_active because the NMI | ||
| 45 | * watchdog may also be driven from the I/O APIC timer. | ||
| 46 | */ | 45 | */ |
| 47 | static DEFINE_SPINLOCK(lapic_nmi_owner_lock); | 46 | #define NMI_MAX_COUNTER_BITS 66 |
| 48 | static unsigned int lapic_nmi_owner; | ||
| 49 | #define LAPIC_NMI_WATCHDOG (1<<0) | ||
| 50 | #define LAPIC_NMI_RESERVED (1<<1) | ||
| 51 | 47 | ||
| 52 | /* nmi_active: | 48 | /* nmi_active: |
| 53 | * +1: the lapic NMI watchdog is active, but can be disabled | 49 | * >0: the lapic NMI watchdog is active, but can be disabled |
| 54 | * 0: the lapic NMI watchdog has not been set up, and cannot | 50 | * <0: the lapic NMI watchdog has not been set up, and cannot |
| 55 | * be enabled | 51 | * be enabled |
| 56 | * -1: the lapic NMI watchdog is disabled, but can be enabled | 52 | * 0: the lapic NMI watchdog is disabled, but can be enabled |
| 57 | */ | 53 | */ |
| 58 | int nmi_active; | 54 | atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ |
| 59 | 55 | ||
| 60 | #define K7_EVNTSEL_ENABLE (1 << 22) | 56 | unsigned int nmi_watchdog = NMI_DEFAULT; |
| 61 | #define K7_EVNTSEL_INT (1 << 20) | 57 | static unsigned int nmi_hz = HZ; |
| 62 | #define K7_EVNTSEL_OS (1 << 17) | ||
| 63 | #define K7_EVNTSEL_USR (1 << 16) | ||
| 64 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | ||
| 65 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | ||
| 66 | 58 | ||
| 67 | #define P6_EVNTSEL0_ENABLE (1 << 22) | 59 | struct nmi_watchdog_ctlblk { |
| 68 | #define P6_EVNTSEL_INT (1 << 20) | 60 | int enabled; |
| 69 | #define P6_EVNTSEL_OS (1 << 17) | 61 | u64 check_bit; |
| 70 | #define P6_EVNTSEL_USR (1 << 16) | 62 | unsigned int cccr_msr; |
| 71 | #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 | 63 | unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ |
| 72 | #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED | 64 | unsigned int evntsel_msr; /* the MSR to select the events to handle */ |
| 65 | }; | ||
| 66 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | ||
| 73 | 67 | ||
| 74 | #define MSR_P4_MISC_ENABLE 0x1A0 | 68 | /* local prototypes */ |
| 75 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | 69 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); |
| 76 | #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) | ||
| 77 | #define MSR_P4_PERFCTR0 0x300 | ||
| 78 | #define MSR_P4_CCCR0 0x360 | ||
| 79 | #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) | ||
| 80 | #define P4_ESCR_OS (1<<3) | ||
| 81 | #define P4_ESCR_USR (1<<2) | ||
| 82 | #define P4_CCCR_OVF_PMI0 (1<<26) | ||
| 83 | #define P4_CCCR_OVF_PMI1 (1<<27) | ||
| 84 | #define P4_CCCR_THRESHOLD(N) ((N)<<20) | ||
| 85 | #define P4_CCCR_COMPLEMENT (1<<19) | ||
| 86 | #define P4_CCCR_COMPARE (1<<18) | ||
| 87 | #define P4_CCCR_REQUIRED (3<<16) | ||
| 88 | #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) | ||
| 89 | #define P4_CCCR_ENABLE (1<<12) | ||
| 90 | /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | ||
| 91 | CRU_ESCR0 (with any non-null event selector) through a complemented | ||
| 92 | max threshold. [IA32-Vol3, Section 14.9.9] */ | ||
| 93 | #define MSR_P4_IQ_COUNTER0 0x30C | ||
| 94 | #define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR) | ||
| 95 | #define P4_NMI_IQ_CCCR0 \ | ||
| 96 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | ||
| 97 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | ||
| 98 | 70 | ||
| 99 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | 71 | extern void show_registers(struct pt_regs *regs); |
| 100 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | 72 | extern int unknown_nmi_panic; |
| 73 | |||
| 74 | /* converts an msr to an appropriate reservation bit */ | ||
| 75 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | ||
| 76 | { | ||
| 77 | /* returns the bit offset of the performance counter register */ | ||
| 78 | switch (boot_cpu_data.x86_vendor) { | ||
| 79 | case X86_VENDOR_AMD: | ||
| 80 | return (msr - MSR_K7_PERFCTR0); | ||
| 81 | case X86_VENDOR_INTEL: | ||
| 82 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
| 83 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | ||
| 84 | |||
| 85 | switch (boot_cpu_data.x86) { | ||
| 86 | case 6: | ||
| 87 | return (msr - MSR_P6_PERFCTR0); | ||
| 88 | case 15: | ||
| 89 | return (msr - MSR_P4_BPU_PERFCTR0); | ||
| 90 | } | ||
| 91 | } | ||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | /* converts an msr to an appropriate reservation bit */ | ||
| 96 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | ||
| 97 | { | ||
| 98 | /* returns the bit offset of the event selection register */ | ||
| 99 | switch (boot_cpu_data.x86_vendor) { | ||
| 100 | case X86_VENDOR_AMD: | ||
| 101 | return (msr - MSR_K7_EVNTSEL0); | ||
| 102 | case X86_VENDOR_INTEL: | ||
| 103 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
| 104 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | ||
| 105 | |||
| 106 | switch (boot_cpu_data.x86) { | ||
| 107 | case 6: | ||
| 108 | return (msr - MSR_P6_EVNTSEL0); | ||
| 109 | case 15: | ||
| 110 | return (msr - MSR_P4_BSU_ESCR0); | ||
| 111 | } | ||
| 112 | } | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | /* checks for a bit availability (hack for oprofile) */ | ||
| 117 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | ||
| 118 | { | ||
| 119 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
| 120 | |||
| 121 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* checks the an msr for availability */ | ||
| 125 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | ||
| 126 | { | ||
| 127 | unsigned int counter; | ||
| 128 | |||
| 129 | counter = nmi_perfctr_msr_to_bit(msr); | ||
| 130 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
| 131 | |||
| 132 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | ||
| 133 | } | ||
| 134 | |||
| 135 | int reserve_perfctr_nmi(unsigned int msr) | ||
| 136 | { | ||
| 137 | unsigned int counter; | ||
| 138 | |||
| 139 | counter = nmi_perfctr_msr_to_bit(msr); | ||
| 140 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
| 141 | |||
| 142 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | ||
| 143 | return 1; | ||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | void release_perfctr_nmi(unsigned int msr) | ||
| 148 | { | ||
| 149 | unsigned int counter; | ||
| 150 | |||
| 151 | counter = nmi_perfctr_msr_to_bit(msr); | ||
| 152 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
| 153 | |||
| 154 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | ||
| 155 | } | ||
| 156 | |||
| 157 | int reserve_evntsel_nmi(unsigned int msr) | ||
| 158 | { | ||
| 159 | unsigned int counter; | ||
| 160 | |||
| 161 | counter = nmi_evntsel_msr_to_bit(msr); | ||
| 162 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
| 163 | |||
| 164 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) | ||
| 165 | return 1; | ||
| 166 | return 0; | ||
| 167 | } | ||
| 168 | |||
| 169 | void release_evntsel_nmi(unsigned int msr) | ||
| 170 | { | ||
| 171 | unsigned int counter; | ||
| 172 | |||
| 173 | counter = nmi_evntsel_msr_to_bit(msr); | ||
| 174 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
| 175 | |||
| 176 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); | ||
| 177 | } | ||
| 178 | |||
| 179 | static __cpuinit inline int nmi_known_cpu(void) | ||
| 180 | { | ||
| 181 | switch (boot_cpu_data.x86_vendor) { | ||
| 182 | case X86_VENDOR_AMD: | ||
| 183 | return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6)); | ||
| 184 | case X86_VENDOR_INTEL: | ||
| 185 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
| 186 | return 1; | ||
| 187 | else | ||
| 188 | return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6)); | ||
| 189 | } | ||
| 190 | return 0; | ||
| 191 | } | ||
| 101 | 192 | ||
| 102 | #ifdef CONFIG_SMP | 193 | #ifdef CONFIG_SMP |
| 103 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | 194 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when |
| @@ -125,7 +216,18 @@ static int __init check_nmi_watchdog(void) | |||
| 125 | unsigned int *prev_nmi_count; | 216 | unsigned int *prev_nmi_count; |
| 126 | int cpu; | 217 | int cpu; |
| 127 | 218 | ||
| 128 | if (nmi_watchdog == NMI_NONE) | 219 | /* Enable NMI watchdog for newer systems. |
| 220 | Actually it should be safe for most systems before 2004 too except | ||
| 221 | for some IBM systems that corrupt registers when NMI happens | ||
| 222 | during SMM. Unfortunately we don't have more exact information | ||
| 223 | on these and use this coarse check. */ | ||
| 224 | if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004) | ||
| 225 | nmi_watchdog = NMI_LOCAL_APIC; | ||
| 226 | |||
| 227 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) | ||
| 228 | return 0; | ||
| 229 | |||
| 230 | if (!atomic_read(&nmi_active)) | ||
| 129 | return 0; | 231 | return 0; |
| 130 | 232 | ||
| 131 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); | 233 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
| @@ -149,25 +251,45 @@ static int __init check_nmi_watchdog(void) | |||
| 149 | if (!cpu_isset(cpu, cpu_callin_map)) | 251 | if (!cpu_isset(cpu, cpu_callin_map)) |
| 150 | continue; | 252 | continue; |
| 151 | #endif | 253 | #endif |
| 254 | if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled) | ||
| 255 | continue; | ||
| 152 | if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { | 256 | if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { |
| 153 | endflag = 1; | ||
| 154 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", | 257 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", |
| 155 | cpu, | 258 | cpu, |
| 156 | prev_nmi_count[cpu], | 259 | prev_nmi_count[cpu], |
| 157 | nmi_count(cpu)); | 260 | nmi_count(cpu)); |
| 158 | nmi_active = 0; | 261 | per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0; |
| 159 | lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; | 262 | atomic_dec(&nmi_active); |
| 160 | kfree(prev_nmi_count); | ||
| 161 | return -1; | ||
| 162 | } | 263 | } |
| 163 | } | 264 | } |
| 265 | if (!atomic_read(&nmi_active)) { | ||
| 266 | kfree(prev_nmi_count); | ||
| 267 | atomic_set(&nmi_active, -1); | ||
| 268 | return -1; | ||
| 269 | } | ||
| 164 | endflag = 1; | 270 | endflag = 1; |
| 165 | printk("OK.\n"); | 271 | printk("OK.\n"); |
| 166 | 272 | ||
| 167 | /* now that we know it works we can reduce NMI frequency to | 273 | /* now that we know it works we can reduce NMI frequency to |
| 168 | something more reasonable; makes a difference in some configs */ | 274 | something more reasonable; makes a difference in some configs */ |
| 169 | if (nmi_watchdog == NMI_LOCAL_APIC) | 275 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
| 276 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 277 | |||
| 170 | nmi_hz = 1; | 278 | nmi_hz = 1; |
| 279 | /* | ||
| 280 | * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter | ||
| 281 | * are writable, with higher bits sign extending from bit 31. | ||
| 282 | * So, we can only program the counter with 31 bit values and | ||
| 283 | * 32nd bit should be 1, for 33.. to be 1. | ||
| 284 | * Find the appropriate nmi_hz | ||
| 285 | */ | ||
| 286 | if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 && | ||
| 287 | ((u64)cpu_khz * 1000) > 0x7fffffffULL) { | ||
| 288 | u64 count = (u64)cpu_khz * 1000; | ||
| 289 | do_div(count, 0x7fffffffUL); | ||
| 290 | nmi_hz = count + 1; | ||
| 291 | } | ||
| 292 | } | ||
| 171 | 293 | ||
| 172 | kfree(prev_nmi_count); | 294 | kfree(prev_nmi_count); |
| 173 | return 0; | 295 | return 0; |
| @@ -181,124 +303,70 @@ static int __init setup_nmi_watchdog(char *str) | |||
| 181 | 303 | ||
| 182 | get_option(&str, &nmi); | 304 | get_option(&str, &nmi); |
| 183 | 305 | ||
| 184 | if (nmi >= NMI_INVALID) | 306 | if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE)) |
| 185 | return 0; | 307 | return 0; |
| 186 | if (nmi == NMI_NONE) | ||
| 187 | nmi_watchdog = nmi; | ||
| 188 | /* | 308 | /* |
| 189 | * If any other x86 CPU has a local APIC, then | 309 | * If any other x86 CPU has a local APIC, then |
| 190 | * please test the NMI stuff there and send me the | 310 | * please test the NMI stuff there and send me the |
| 191 | * missing bits. Right now Intel P6/P4 and AMD K7 only. | 311 | * missing bits. Right now Intel P6/P4 and AMD K7 only. |
| 192 | */ | 312 | */ |
| 193 | if ((nmi == NMI_LOCAL_APIC) && | 313 | if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0)) |
| 194 | (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | 314 | return 0; /* no lapic support */ |
| 195 | (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15)) | 315 | nmi_watchdog = nmi; |
| 196 | nmi_watchdog = nmi; | ||
| 197 | if ((nmi == NMI_LOCAL_APIC) && | ||
| 198 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && | ||
| 199 | (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15)) | ||
| 200 | nmi_watchdog = nmi; | ||
| 201 | /* | ||
| 202 | * We can enable the IO-APIC watchdog | ||
| 203 | * unconditionally. | ||
| 204 | */ | ||
| 205 | if (nmi == NMI_IO_APIC) { | ||
| 206 | nmi_active = 1; | ||
| 207 | nmi_watchdog = nmi; | ||
| 208 | } | ||
| 209 | return 1; | 316 | return 1; |
| 210 | } | 317 | } |
| 211 | 318 | ||
| 212 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 319 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
| 213 | 320 | ||
| 214 | static void disable_intel_arch_watchdog(void); | ||
| 215 | |||
| 216 | static void disable_lapic_nmi_watchdog(void) | 321 | static void disable_lapic_nmi_watchdog(void) |
| 217 | { | 322 | { |
| 218 | if (nmi_active <= 0) | 323 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); |
| 324 | |||
| 325 | if (atomic_read(&nmi_active) <= 0) | ||
| 219 | return; | 326 | return; |
| 220 | switch (boot_cpu_data.x86_vendor) { | ||
| 221 | case X86_VENDOR_AMD: | ||
| 222 | wrmsr(MSR_K7_EVNTSEL0, 0, 0); | ||
| 223 | break; | ||
| 224 | case X86_VENDOR_INTEL: | ||
| 225 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
| 226 | disable_intel_arch_watchdog(); | ||
| 227 | break; | ||
| 228 | } | ||
| 229 | switch (boot_cpu_data.x86) { | ||
| 230 | case 6: | ||
| 231 | if (boot_cpu_data.x86_model > 0xd) | ||
| 232 | break; | ||
| 233 | 327 | ||
| 234 | wrmsr(MSR_P6_EVNTSEL0, 0, 0); | 328 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); |
| 235 | break; | ||
| 236 | case 15: | ||
| 237 | if (boot_cpu_data.x86_model > 0x4) | ||
| 238 | break; | ||
| 239 | 329 | ||
| 240 | wrmsr(MSR_P4_IQ_CCCR0, 0, 0); | 330 | BUG_ON(atomic_read(&nmi_active) != 0); |
| 241 | wrmsr(MSR_P4_CRU_ESCR0, 0, 0); | ||
| 242 | break; | ||
| 243 | } | ||
| 244 | break; | ||
| 245 | } | ||
| 246 | nmi_active = -1; | ||
| 247 | /* tell do_nmi() and others that we're not active any more */ | ||
| 248 | nmi_watchdog = 0; | ||
| 249 | } | 331 | } |
| 250 | 332 | ||
| 251 | static void enable_lapic_nmi_watchdog(void) | 333 | static void enable_lapic_nmi_watchdog(void) |
| 252 | { | 334 | { |
| 253 | if (nmi_active < 0) { | 335 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); |
| 254 | nmi_watchdog = NMI_LOCAL_APIC; | ||
| 255 | setup_apic_nmi_watchdog(); | ||
| 256 | } | ||
| 257 | } | ||
| 258 | 336 | ||
| 259 | int reserve_lapic_nmi(void) | 337 | /* are we already enabled */ |
| 260 | { | 338 | if (atomic_read(&nmi_active) != 0) |
| 261 | unsigned int old_owner; | 339 | return; |
| 262 | |||
| 263 | spin_lock(&lapic_nmi_owner_lock); | ||
| 264 | old_owner = lapic_nmi_owner; | ||
| 265 | lapic_nmi_owner |= LAPIC_NMI_RESERVED; | ||
| 266 | spin_unlock(&lapic_nmi_owner_lock); | ||
| 267 | if (old_owner & LAPIC_NMI_RESERVED) | ||
| 268 | return -EBUSY; | ||
| 269 | if (old_owner & LAPIC_NMI_WATCHDOG) | ||
| 270 | disable_lapic_nmi_watchdog(); | ||
| 271 | return 0; | ||
| 272 | } | ||
| 273 | 340 | ||
| 274 | void release_lapic_nmi(void) | 341 | /* are we lapic aware */ |
| 275 | { | 342 | if (nmi_known_cpu() <= 0) |
| 276 | unsigned int new_owner; | 343 | return; |
| 277 | 344 | ||
| 278 | spin_lock(&lapic_nmi_owner_lock); | 345 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); |
| 279 | new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED; | 346 | touch_nmi_watchdog(); |
| 280 | lapic_nmi_owner = new_owner; | ||
| 281 | spin_unlock(&lapic_nmi_owner_lock); | ||
| 282 | if (new_owner & LAPIC_NMI_WATCHDOG) | ||
| 283 | enable_lapic_nmi_watchdog(); | ||
| 284 | } | 347 | } |
| 285 | 348 | ||
| 286 | void disable_timer_nmi_watchdog(void) | 349 | void disable_timer_nmi_watchdog(void) |
| 287 | { | 350 | { |
| 288 | if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0)) | 351 | BUG_ON(nmi_watchdog != NMI_IO_APIC); |
| 352 | |||
| 353 | if (atomic_read(&nmi_active) <= 0) | ||
| 289 | return; | 354 | return; |
| 290 | 355 | ||
| 291 | unset_nmi_callback(); | 356 | disable_irq(0); |
| 292 | nmi_active = -1; | 357 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); |
| 293 | nmi_watchdog = NMI_NONE; | 358 | |
| 359 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
| 294 | } | 360 | } |
| 295 | 361 | ||
| 296 | void enable_timer_nmi_watchdog(void) | 362 | void enable_timer_nmi_watchdog(void) |
| 297 | { | 363 | { |
| 298 | if (nmi_active < 0) { | 364 | BUG_ON(nmi_watchdog != NMI_IO_APIC); |
| 299 | nmi_watchdog = NMI_IO_APIC; | 365 | |
| 366 | if (atomic_read(&nmi_active) == 0) { | ||
| 300 | touch_nmi_watchdog(); | 367 | touch_nmi_watchdog(); |
| 301 | nmi_active = 1; | 368 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); |
| 369 | enable_irq(0); | ||
| 302 | } | 370 | } |
| 303 | } | 371 | } |
| 304 | 372 | ||
| @@ -308,15 +376,20 @@ static int nmi_pm_active; /* nmi_active before suspend */ | |||
| 308 | 376 | ||
| 309 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) | 377 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) |
| 310 | { | 378 | { |
| 311 | nmi_pm_active = nmi_active; | 379 | /* only CPU0 goes here, other CPUs should be offline */ |
| 312 | disable_lapic_nmi_watchdog(); | 380 | nmi_pm_active = atomic_read(&nmi_active); |
| 381 | stop_apic_nmi_watchdog(NULL); | ||
| 382 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
| 313 | return 0; | 383 | return 0; |
| 314 | } | 384 | } |
| 315 | 385 | ||
| 316 | static int lapic_nmi_resume(struct sys_device *dev) | 386 | static int lapic_nmi_resume(struct sys_device *dev) |
| 317 | { | 387 | { |
| 318 | if (nmi_pm_active > 0) | 388 | /* only CPU0 goes here, other CPUs should be offline */ |
| 319 | enable_lapic_nmi_watchdog(); | 389 | if (nmi_pm_active > 0) { |
| 390 | setup_apic_nmi_watchdog(NULL); | ||
| 391 | touch_nmi_watchdog(); | ||
| 392 | } | ||
| 320 | return 0; | 393 | return 0; |
| 321 | } | 394 | } |
| 322 | 395 | ||
| @@ -336,7 +409,13 @@ static int __init init_lapic_nmi_sysfs(void) | |||
| 336 | { | 409 | { |
| 337 | int error; | 410 | int error; |
| 338 | 411 | ||
| 339 | if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC) | 412 | /* should really be a BUG_ON but b/c this is an |
| 413 | * init call, it just doesn't work. -dcz | ||
| 414 | */ | ||
| 415 | if (nmi_watchdog != NMI_LOCAL_APIC) | ||
| 416 | return 0; | ||
| 417 | |||
| 418 | if ( atomic_read(&nmi_active) < 0 ) | ||
| 340 | return 0; | 419 | return 0; |
| 341 | 420 | ||
| 342 | error = sysdev_class_register(&nmi_sysclass); | 421 | error = sysdev_class_register(&nmi_sysclass); |
| @@ -354,138 +433,269 @@ late_initcall(init_lapic_nmi_sysfs); | |||
| 354 | * Original code written by Keith Owens. | 433 | * Original code written by Keith Owens. |
| 355 | */ | 434 | */ |
| 356 | 435 | ||
| 357 | static void clear_msr_range(unsigned int base, unsigned int n) | 436 | static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr) |
| 358 | { | ||
| 359 | unsigned int i; | ||
| 360 | |||
| 361 | for(i = 0; i < n; ++i) | ||
| 362 | wrmsr(base+i, 0, 0); | ||
| 363 | } | ||
| 364 | |||
| 365 | static void write_watchdog_counter(const char *descr) | ||
| 366 | { | 437 | { |
| 367 | u64 count = (u64)cpu_khz * 1000; | 438 | u64 count = (u64)cpu_khz * 1000; |
| 368 | 439 | ||
| 369 | do_div(count, nmi_hz); | 440 | do_div(count, nmi_hz); |
| 370 | if(descr) | 441 | if(descr) |
| 371 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | 442 | Dprintk("setting %s to -0x%08Lx\n", descr, count); |
| 372 | wrmsrl(nmi_perfctr_msr, 0 - count); | 443 | wrmsrl(perfctr_msr, 0 - count); |
| 373 | } | 444 | } |
| 374 | 445 | ||
| 375 | static void setup_k7_watchdog(void) | 446 | /* Note that these events don't tick when the CPU idles. This means |
| 447 | the frequency varies with CPU load. */ | ||
| 448 | |||
| 449 | #define K7_EVNTSEL_ENABLE (1 << 22) | ||
| 450 | #define K7_EVNTSEL_INT (1 << 20) | ||
| 451 | #define K7_EVNTSEL_OS (1 << 17) | ||
| 452 | #define K7_EVNTSEL_USR (1 << 16) | ||
| 453 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | ||
| 454 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | ||
| 455 | |||
| 456 | static int setup_k7_watchdog(void) | ||
| 376 | { | 457 | { |
| 458 | unsigned int perfctr_msr, evntsel_msr; | ||
| 377 | unsigned int evntsel; | 459 | unsigned int evntsel; |
| 460 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 461 | |||
| 462 | perfctr_msr = MSR_K7_PERFCTR0; | ||
| 463 | evntsel_msr = MSR_K7_EVNTSEL0; | ||
| 464 | if (!reserve_perfctr_nmi(perfctr_msr)) | ||
| 465 | goto fail; | ||
| 378 | 466 | ||
| 379 | nmi_perfctr_msr = MSR_K7_PERFCTR0; | 467 | if (!reserve_evntsel_nmi(evntsel_msr)) |
| 468 | goto fail1; | ||
| 380 | 469 | ||
| 381 | clear_msr_range(MSR_K7_EVNTSEL0, 4); | 470 | wrmsrl(perfctr_msr, 0UL); |
| 382 | clear_msr_range(MSR_K7_PERFCTR0, 4); | ||
| 383 | 471 | ||
| 384 | evntsel = K7_EVNTSEL_INT | 472 | evntsel = K7_EVNTSEL_INT |
| 385 | | K7_EVNTSEL_OS | 473 | | K7_EVNTSEL_OS |
| 386 | | K7_EVNTSEL_USR | 474 | | K7_EVNTSEL_USR |
| 387 | | K7_NMI_EVENT; | 475 | | K7_NMI_EVENT; |
| 388 | 476 | ||
| 389 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | 477 | /* setup the timer */ |
| 390 | write_watchdog_counter("K7_PERFCTR0"); | 478 | wrmsr(evntsel_msr, evntsel, 0); |
| 479 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0"); | ||
| 391 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 480 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 392 | evntsel |= K7_EVNTSEL_ENABLE; | 481 | evntsel |= K7_EVNTSEL_ENABLE; |
| 393 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | 482 | wrmsr(evntsel_msr, evntsel, 0); |
| 483 | |||
| 484 | wd->perfctr_msr = perfctr_msr; | ||
| 485 | wd->evntsel_msr = evntsel_msr; | ||
| 486 | wd->cccr_msr = 0; //unused | ||
| 487 | wd->check_bit = 1ULL<<63; | ||
| 488 | return 1; | ||
| 489 | fail1: | ||
| 490 | release_perfctr_nmi(perfctr_msr); | ||
| 491 | fail: | ||
| 492 | return 0; | ||
| 493 | } | ||
| 494 | |||
| 495 | static void stop_k7_watchdog(void) | ||
| 496 | { | ||
| 497 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 498 | |||
| 499 | wrmsr(wd->evntsel_msr, 0, 0); | ||
| 500 | |||
| 501 | release_evntsel_nmi(wd->evntsel_msr); | ||
| 502 | release_perfctr_nmi(wd->perfctr_msr); | ||
| 394 | } | 503 | } |
| 395 | 504 | ||
| 396 | static void setup_p6_watchdog(void) | 505 | #define P6_EVNTSEL0_ENABLE (1 << 22) |
| 506 | #define P6_EVNTSEL_INT (1 << 20) | ||
| 507 | #define P6_EVNTSEL_OS (1 << 17) | ||
| 508 | #define P6_EVNTSEL_USR (1 << 16) | ||
| 509 | #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 | ||
| 510 | #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED | ||
| 511 | |||
| 512 | static int setup_p6_watchdog(void) | ||
| 397 | { | 513 | { |
| 514 | unsigned int perfctr_msr, evntsel_msr; | ||
| 398 | unsigned int evntsel; | 515 | unsigned int evntsel; |
| 516 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 517 | |||
| 518 | perfctr_msr = MSR_P6_PERFCTR0; | ||
| 519 | evntsel_msr = MSR_P6_EVNTSEL0; | ||
| 520 | if (!reserve_perfctr_nmi(perfctr_msr)) | ||
| 521 | goto fail; | ||
| 399 | 522 | ||
| 400 | nmi_perfctr_msr = MSR_P6_PERFCTR0; | 523 | if (!reserve_evntsel_nmi(evntsel_msr)) |
| 524 | goto fail1; | ||
| 401 | 525 | ||
| 402 | clear_msr_range(MSR_P6_EVNTSEL0, 2); | 526 | wrmsrl(perfctr_msr, 0UL); |
| 403 | clear_msr_range(MSR_P6_PERFCTR0, 2); | ||
| 404 | 527 | ||
| 405 | evntsel = P6_EVNTSEL_INT | 528 | evntsel = P6_EVNTSEL_INT |
| 406 | | P6_EVNTSEL_OS | 529 | | P6_EVNTSEL_OS |
| 407 | | P6_EVNTSEL_USR | 530 | | P6_EVNTSEL_USR |
| 408 | | P6_NMI_EVENT; | 531 | | P6_NMI_EVENT; |
| 409 | 532 | ||
| 410 | wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); | 533 | /* setup the timer */ |
| 411 | write_watchdog_counter("P6_PERFCTR0"); | 534 | wrmsr(evntsel_msr, evntsel, 0); |
| 535 | write_watchdog_counter(perfctr_msr, "P6_PERFCTR0"); | ||
| 412 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 536 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 413 | evntsel |= P6_EVNTSEL0_ENABLE; | 537 | evntsel |= P6_EVNTSEL0_ENABLE; |
| 414 | wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); | 538 | wrmsr(evntsel_msr, evntsel, 0); |
| 539 | |||
| 540 | wd->perfctr_msr = perfctr_msr; | ||
| 541 | wd->evntsel_msr = evntsel_msr; | ||
| 542 | wd->cccr_msr = 0; //unused | ||
| 543 | wd->check_bit = 1ULL<<39; | ||
| 544 | return 1; | ||
| 545 | fail1: | ||
| 546 | release_perfctr_nmi(perfctr_msr); | ||
| 547 | fail: | ||
| 548 | return 0; | ||
| 549 | } | ||
| 550 | |||
| 551 | static void stop_p6_watchdog(void) | ||
| 552 | { | ||
| 553 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 554 | |||
| 555 | wrmsr(wd->evntsel_msr, 0, 0); | ||
| 556 | |||
| 557 | release_evntsel_nmi(wd->evntsel_msr); | ||
| 558 | release_perfctr_nmi(wd->perfctr_msr); | ||
| 415 | } | 559 | } |
| 416 | 560 | ||
| 561 | /* Note that these events don't tick when the CPU idles. This means | ||
| 562 | the frequency varies with CPU load. */ | ||
| 563 | |||
| 564 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | ||
| 565 | #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) | ||
| 566 | #define P4_ESCR_OS (1<<3) | ||
| 567 | #define P4_ESCR_USR (1<<2) | ||
| 568 | #define P4_CCCR_OVF_PMI0 (1<<26) | ||
| 569 | #define P4_CCCR_OVF_PMI1 (1<<27) | ||
| 570 | #define P4_CCCR_THRESHOLD(N) ((N)<<20) | ||
| 571 | #define P4_CCCR_COMPLEMENT (1<<19) | ||
| 572 | #define P4_CCCR_COMPARE (1<<18) | ||
| 573 | #define P4_CCCR_REQUIRED (3<<16) | ||
| 574 | #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) | ||
| 575 | #define P4_CCCR_ENABLE (1<<12) | ||
| 576 | #define P4_CCCR_OVF (1<<31) | ||
| 577 | /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | ||
| 578 | CRU_ESCR0 (with any non-null event selector) through a complemented | ||
| 579 | max threshold. [IA32-Vol3, Section 14.9.9] */ | ||
| 580 | |||
| 417 | static int setup_p4_watchdog(void) | 581 | static int setup_p4_watchdog(void) |
| 418 | { | 582 | { |
| 583 | unsigned int perfctr_msr, evntsel_msr, cccr_msr; | ||
| 584 | unsigned int evntsel, cccr_val; | ||
| 419 | unsigned int misc_enable, dummy; | 585 | unsigned int misc_enable, dummy; |
| 586 | unsigned int ht_num; | ||
| 587 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 420 | 588 | ||
| 421 | rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy); | 589 | rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); |
| 422 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) | 590 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) |
| 423 | return 0; | 591 | return 0; |
| 424 | 592 | ||
| 425 | nmi_perfctr_msr = MSR_P4_IQ_COUNTER0; | ||
| 426 | nmi_p4_cccr_val = P4_NMI_IQ_CCCR0; | ||
| 427 | #ifdef CONFIG_SMP | 593 | #ifdef CONFIG_SMP |
| 428 | if (smp_num_siblings == 2) | 594 | /* detect which hyperthread we are on */ |
| 429 | nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; | 595 | if (smp_num_siblings == 2) { |
| 596 | unsigned int ebx, apicid; | ||
| 597 | |||
| 598 | ebx = cpuid_ebx(1); | ||
| 599 | apicid = (ebx >> 24) & 0xff; | ||
| 600 | ht_num = apicid & 1; | ||
| 601 | } else | ||
| 430 | #endif | 602 | #endif |
| 603 | ht_num = 0; | ||
| 431 | 604 | ||
| 432 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) | 605 | /* performance counters are shared resources |
| 433 | clear_msr_range(0x3F1, 2); | 606 | * assign each hyperthread its own set |
| 434 | /* MSR 0x3F0 seems to have a default value of 0xFC00, but current | 607 | * (re-use the ESCR0 register, seems safe |
| 435 | docs doesn't fully define it, so leave it alone for now. */ | 608 | * and keeps the cccr_val the same) |
| 436 | if (boot_cpu_data.x86_model >= 0x3) { | 609 | */ |
| 437 | /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */ | 610 | if (!ht_num) { |
| 438 | clear_msr_range(0x3A0, 26); | 611 | /* logical cpu 0 */ |
| 439 | clear_msr_range(0x3BC, 3); | 612 | perfctr_msr = MSR_P4_IQ_PERFCTR0; |
| 613 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
| 614 | cccr_msr = MSR_P4_IQ_CCCR0; | ||
| 615 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | ||
| 440 | } else { | 616 | } else { |
| 441 | clear_msr_range(0x3A0, 31); | 617 | /* logical cpu 1 */ |
| 618 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | ||
| 619 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
| 620 | cccr_msr = MSR_P4_IQ_CCCR1; | ||
| 621 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | ||
| 442 | } | 622 | } |
| 443 | clear_msr_range(0x3C0, 6); | 623 | |
| 444 | clear_msr_range(0x3C8, 6); | 624 | if (!reserve_perfctr_nmi(perfctr_msr)) |
| 445 | clear_msr_range(0x3E0, 2); | 625 | goto fail; |
| 446 | clear_msr_range(MSR_P4_CCCR0, 18); | 626 | |
| 447 | clear_msr_range(MSR_P4_PERFCTR0, 18); | 627 | if (!reserve_evntsel_nmi(evntsel_msr)) |
| 448 | 628 | goto fail1; | |
| 449 | wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); | 629 | |
| 450 | wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); | 630 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
| 451 | write_watchdog_counter("P4_IQ_COUNTER0"); | 631 | | P4_ESCR_OS |
| 632 | | P4_ESCR_USR; | ||
| 633 | |||
| 634 | cccr_val |= P4_CCCR_THRESHOLD(15) | ||
| 635 | | P4_CCCR_COMPLEMENT | ||
| 636 | | P4_CCCR_COMPARE | ||
| 637 | | P4_CCCR_REQUIRED; | ||
| 638 | |||
| 639 | wrmsr(evntsel_msr, evntsel, 0); | ||
| 640 | wrmsr(cccr_msr, cccr_val, 0); | ||
| 641 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0"); | ||
| 452 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 642 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 453 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 643 | cccr_val |= P4_CCCR_ENABLE; |
| 644 | wrmsr(cccr_msr, cccr_val, 0); | ||
| 645 | wd->perfctr_msr = perfctr_msr; | ||
| 646 | wd->evntsel_msr = evntsel_msr; | ||
| 647 | wd->cccr_msr = cccr_msr; | ||
| 648 | wd->check_bit = 1ULL<<39; | ||
| 454 | return 1; | 649 | return 1; |
| 650 | fail1: | ||
| 651 | release_perfctr_nmi(perfctr_msr); | ||
| 652 | fail: | ||
| 653 | return 0; | ||
| 455 | } | 654 | } |
| 456 | 655 | ||
| 457 | static void disable_intel_arch_watchdog(void) | 656 | static void stop_p4_watchdog(void) |
| 458 | { | 657 | { |
| 459 | unsigned ebx; | 658 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
| 460 | 659 | ||
| 461 | /* | 660 | wrmsr(wd->cccr_msr, 0, 0); |
| 462 | * Check whether the Architectural PerfMon supports | 661 | wrmsr(wd->evntsel_msr, 0, 0); |
| 463 | * Unhalted Core Cycles Event or not. | 662 | |
| 464 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | 663 | release_evntsel_nmi(wd->evntsel_msr); |
| 465 | */ | 664 | release_perfctr_nmi(wd->perfctr_msr); |
| 466 | ebx = cpuid_ebx(10); | ||
| 467 | if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
| 468 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); | ||
| 469 | } | 665 | } |
| 470 | 666 | ||
| 667 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
| 668 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
| 669 | |||
| 471 | static int setup_intel_arch_watchdog(void) | 670 | static int setup_intel_arch_watchdog(void) |
| 472 | { | 671 | { |
| 672 | unsigned int ebx; | ||
| 673 | union cpuid10_eax eax; | ||
| 674 | unsigned int unused; | ||
| 675 | unsigned int perfctr_msr, evntsel_msr; | ||
| 473 | unsigned int evntsel; | 676 | unsigned int evntsel; |
| 474 | unsigned ebx; | 677 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
| 475 | 678 | ||
| 476 | /* | 679 | /* |
| 477 | * Check whether the Architectural PerfMon supports | 680 | * Check whether the Architectural PerfMon supports |
| 478 | * Unhalted Core Cycles Event or not. | 681 | * Unhalted Core Cycles Event or not. |
| 479 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | 682 | * NOTE: Corresponding bit = 0 in ebx indicates event present. |
| 480 | */ | 683 | */ |
| 481 | ebx = cpuid_ebx(10); | 684 | cpuid(10, &(eax.full), &ebx, &unused, &unused); |
| 482 | if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | 685 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || |
| 483 | return 0; | 686 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) |
| 687 | goto fail; | ||
| 688 | |||
| 689 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | ||
| 690 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; | ||
| 484 | 691 | ||
| 485 | nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | 692 | if (!reserve_perfctr_nmi(perfctr_msr)) |
| 693 | goto fail; | ||
| 486 | 694 | ||
| 487 | clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); | 695 | if (!reserve_evntsel_nmi(evntsel_msr)) |
| 488 | clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); | 696 | goto fail1; |
| 697 | |||
| 698 | wrmsrl(perfctr_msr, 0UL); | ||
| 489 | 699 | ||
| 490 | evntsel = ARCH_PERFMON_EVENTSEL_INT | 700 | evntsel = ARCH_PERFMON_EVENTSEL_INT |
| 491 | | ARCH_PERFMON_EVENTSEL_OS | 701 | | ARCH_PERFMON_EVENTSEL_OS |
| @@ -493,51 +703,145 @@ static int setup_intel_arch_watchdog(void) | |||
| 493 | | ARCH_PERFMON_NMI_EVENT_SEL | 703 | | ARCH_PERFMON_NMI_EVENT_SEL |
| 494 | | ARCH_PERFMON_NMI_EVENT_UMASK; | 704 | | ARCH_PERFMON_NMI_EVENT_UMASK; |
| 495 | 705 | ||
| 496 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | 706 | /* setup the timer */ |
| 497 | write_watchdog_counter("INTEL_ARCH_PERFCTR0"); | 707 | wrmsr(evntsel_msr, evntsel, 0); |
| 708 | write_watchdog_counter(perfctr_msr, "INTEL_ARCH_PERFCTR0"); | ||
| 498 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 709 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 499 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 710 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 500 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | 711 | wrmsr(evntsel_msr, evntsel, 0); |
| 712 | |||
| 713 | wd->perfctr_msr = perfctr_msr; | ||
| 714 | wd->evntsel_msr = evntsel_msr; | ||
| 715 | wd->cccr_msr = 0; //unused | ||
| 716 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); | ||
| 501 | return 1; | 717 | return 1; |
| 718 | fail1: | ||
| 719 | release_perfctr_nmi(perfctr_msr); | ||
| 720 | fail: | ||
| 721 | return 0; | ||
| 502 | } | 722 | } |
| 503 | 723 | ||
| 504 | void setup_apic_nmi_watchdog (void) | 724 | static void stop_intel_arch_watchdog(void) |
| 505 | { | 725 | { |
| 506 | switch (boot_cpu_data.x86_vendor) { | 726 | unsigned int ebx; |
| 507 | case X86_VENDOR_AMD: | 727 | union cpuid10_eax eax; |
| 508 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) | 728 | unsigned int unused; |
| 509 | return; | 729 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
| 510 | setup_k7_watchdog(); | 730 | |
| 511 | break; | 731 | /* |
| 512 | case X86_VENDOR_INTEL: | 732 | * Check whether the Architectural PerfMon supports |
| 513 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 733 | * Unhalted Core Cycles Event or not. |
| 514 | if (!setup_intel_arch_watchdog()) | 734 | * NOTE: Corresponding bit = 0 in ebx indicates event present. |
| 735 | */ | ||
| 736 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | ||
| 737 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
| 738 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
| 739 | return; | ||
| 740 | |||
| 741 | wrmsr(wd->evntsel_msr, 0, 0); | ||
| 742 | release_evntsel_nmi(wd->evntsel_msr); | ||
| 743 | release_perfctr_nmi(wd->perfctr_msr); | ||
| 744 | } | ||
| 745 | |||
| 746 | void setup_apic_nmi_watchdog (void *unused) | ||
| 747 | { | ||
| 748 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 749 | |||
| 750 | /* only support LOCAL and IO APICs for now */ | ||
| 751 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | ||
| 752 | (nmi_watchdog != NMI_IO_APIC)) | ||
| 753 | return; | ||
| 754 | |||
| 755 | if (wd->enabled == 1) | ||
| 756 | return; | ||
| 757 | |||
| 758 | /* cheap hack to support suspend/resume */ | ||
| 759 | /* if cpu0 is not active neither should the other cpus */ | ||
| 760 | if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) | ||
| 761 | return; | ||
| 762 | |||
| 763 | if (nmi_watchdog == NMI_LOCAL_APIC) { | ||
| 764 | switch (boot_cpu_data.x86_vendor) { | ||
| 765 | case X86_VENDOR_AMD: | ||
| 766 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) | ||
| 515 | return; | 767 | return; |
| 516 | break; | 768 | if (!setup_k7_watchdog()) |
| 517 | } | ||
| 518 | switch (boot_cpu_data.x86) { | ||
| 519 | case 6: | ||
| 520 | if (boot_cpu_data.x86_model > 0xd) | ||
| 521 | return; | 769 | return; |
| 522 | |||
| 523 | setup_p6_watchdog(); | ||
| 524 | break; | 770 | break; |
| 525 | case 15: | 771 | case X86_VENDOR_INTEL: |
| 526 | if (boot_cpu_data.x86_model > 0x4) | 772 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
| 527 | return; | 773 | if (!setup_intel_arch_watchdog()) |
| 774 | return; | ||
| 775 | break; | ||
| 776 | } | ||
| 777 | switch (boot_cpu_data.x86) { | ||
| 778 | case 6: | ||
| 779 | if (boot_cpu_data.x86_model > 0xd) | ||
| 780 | return; | ||
| 781 | |||
| 782 | if (!setup_p6_watchdog()) | ||
| 783 | return; | ||
| 784 | break; | ||
| 785 | case 15: | ||
| 786 | if (boot_cpu_data.x86_model > 0x4) | ||
| 787 | return; | ||
| 528 | 788 | ||
| 529 | if (!setup_p4_watchdog()) | 789 | if (!setup_p4_watchdog()) |
| 790 | return; | ||
| 791 | break; | ||
| 792 | default: | ||
| 530 | return; | 793 | return; |
| 794 | } | ||
| 531 | break; | 795 | break; |
| 532 | default: | 796 | default: |
| 533 | return; | 797 | return; |
| 534 | } | 798 | } |
| 535 | break; | 799 | } |
| 536 | default: | 800 | wd->enabled = 1; |
| 801 | atomic_inc(&nmi_active); | ||
| 802 | } | ||
| 803 | |||
| 804 | void stop_apic_nmi_watchdog(void *unused) | ||
| 805 | { | ||
| 806 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 807 | |||
| 808 | /* only support LOCAL and IO APICs for now */ | ||
| 809 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | ||
| 810 | (nmi_watchdog != NMI_IO_APIC)) | ||
| 811 | return; | ||
| 812 | |||
| 813 | if (wd->enabled == 0) | ||
| 537 | return; | 814 | return; |
| 815 | |||
| 816 | if (nmi_watchdog == NMI_LOCAL_APIC) { | ||
| 817 | switch (boot_cpu_data.x86_vendor) { | ||
| 818 | case X86_VENDOR_AMD: | ||
| 819 | stop_k7_watchdog(); | ||
| 820 | break; | ||
| 821 | case X86_VENDOR_INTEL: | ||
| 822 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
| 823 | stop_intel_arch_watchdog(); | ||
| 824 | break; | ||
| 825 | } | ||
| 826 | switch (boot_cpu_data.x86) { | ||
| 827 | case 6: | ||
| 828 | if (boot_cpu_data.x86_model > 0xd) | ||
| 829 | break; | ||
| 830 | stop_p6_watchdog(); | ||
| 831 | break; | ||
| 832 | case 15: | ||
| 833 | if (boot_cpu_data.x86_model > 0x4) | ||
| 834 | break; | ||
| 835 | stop_p4_watchdog(); | ||
| 836 | break; | ||
| 837 | } | ||
| 838 | break; | ||
| 839 | default: | ||
| 840 | return; | ||
| 841 | } | ||
| 538 | } | 842 | } |
| 539 | lapic_nmi_owner = LAPIC_NMI_WATCHDOG; | 843 | wd->enabled = 0; |
| 540 | nmi_active = 1; | 844 | atomic_dec(&nmi_active); |
| 541 | } | 845 | } |
| 542 | 846 | ||
| 543 | /* | 847 | /* |
| @@ -579,7 +883,7 @@ EXPORT_SYMBOL(touch_nmi_watchdog); | |||
| 579 | 883 | ||
| 580 | extern void die_nmi(struct pt_regs *, const char *msg); | 884 | extern void die_nmi(struct pt_regs *, const char *msg); |
| 581 | 885 | ||
| 582 | void nmi_watchdog_tick (struct pt_regs * regs) | 886 | __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) |
| 583 | { | 887 | { |
| 584 | 888 | ||
| 585 | /* | 889 | /* |
| @@ -588,11 +892,23 @@ void nmi_watchdog_tick (struct pt_regs * regs) | |||
| 588 | * smp_processor_id(). | 892 | * smp_processor_id(). |
| 589 | */ | 893 | */ |
| 590 | unsigned int sum; | 894 | unsigned int sum; |
| 895 | int touched = 0; | ||
| 591 | int cpu = smp_processor_id(); | 896 | int cpu = smp_processor_id(); |
| 897 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
| 898 | u64 dummy; | ||
| 899 | int rc=0; | ||
| 900 | |||
| 901 | /* check for other users first */ | ||
| 902 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) | ||
| 903 | == NOTIFY_STOP) { | ||
| 904 | rc = 1; | ||
| 905 | touched = 1; | ||
| 906 | } | ||
| 592 | 907 | ||
| 593 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs; | 908 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs; |
| 594 | 909 | ||
| 595 | if (last_irq_sums[cpu] == sum) { | 910 | /* if the apic timer isn't firing, this cpu isn't doing much */ |
| 911 | if (!touched && last_irq_sums[cpu] == sum) { | ||
| 596 | /* | 912 | /* |
| 597 | * Ayiee, looks like this CPU is stuck ... | 913 | * Ayiee, looks like this CPU is stuck ... |
| 598 | * wait a few IRQs (5 seconds) before doing the oops ... | 914 | * wait a few IRQs (5 seconds) before doing the oops ... |
| @@ -607,27 +923,59 @@ void nmi_watchdog_tick (struct pt_regs * regs) | |||
| 607 | last_irq_sums[cpu] = sum; | 923 | last_irq_sums[cpu] = sum; |
| 608 | alert_counter[cpu] = 0; | 924 | alert_counter[cpu] = 0; |
| 609 | } | 925 | } |
| 610 | if (nmi_perfctr_msr) { | 926 | /* see if the nmi watchdog went off */ |
| 611 | if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { | 927 | if (wd->enabled) { |
| 612 | /* | 928 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
| 613 | * P4 quirks: | 929 | rdmsrl(wd->perfctr_msr, dummy); |
| 614 | * - An overflown perfctr will assert its interrupt | 930 | if (dummy & wd->check_bit){ |
| 615 | * until the OVF flag in its CCCR is cleared. | 931 | /* this wasn't a watchdog timer interrupt */ |
| 616 | * - LVTPC is masked on interrupt and must be | 932 | goto done; |
| 617 | * unmasked by the LVTPC handler. | 933 | } |
| 934 | |||
| 935 | /* only Intel P4 uses the cccr msr */ | ||
| 936 | if (wd->cccr_msr != 0) { | ||
| 937 | /* | ||
| 938 | * P4 quirks: | ||
| 939 | * - An overflown perfctr will assert its interrupt | ||
| 940 | * until the OVF flag in its CCCR is cleared. | ||
| 941 | * - LVTPC is masked on interrupt and must be | ||
| 942 | * unmasked by the LVTPC handler. | ||
| 943 | */ | ||
| 944 | rdmsrl(wd->cccr_msr, dummy); | ||
| 945 | dummy &= ~P4_CCCR_OVF; | ||
| 946 | wrmsrl(wd->cccr_msr, dummy); | ||
| 947 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
| 948 | } | ||
| 949 | else if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | ||
| 950 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | ||
| 951 | /* P6 based Pentium M need to re-unmask | ||
| 952 | * the apic vector but it doesn't hurt | ||
| 953 | * other P6 variant. | ||
| 954 | * ArchPerfom/Core Duo also needs this */ | ||
| 955 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
| 956 | } | ||
| 957 | /* start the cycle over again */ | ||
| 958 | write_watchdog_counter(wd->perfctr_msr, NULL); | ||
| 959 | rc = 1; | ||
| 960 | } else if (nmi_watchdog == NMI_IO_APIC) { | ||
| 961 | /* don't know how to accurately check for this. | ||
| 962 | * just assume it was a watchdog timer interrupt | ||
| 963 | * This matches the old behaviour. | ||
| 618 | */ | 964 | */ |
| 619 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 965 | rc = 1; |
| 620 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
| 621 | } | 966 | } |
| 622 | else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 || | ||
| 623 | nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | ||
| 624 | /* Only P6 based Pentium M need to re-unmask | ||
| 625 | * the apic vector but it doesn't hurt | ||
| 626 | * other P6 variant */ | ||
| 627 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
| 628 | } | ||
| 629 | write_watchdog_counter(NULL); | ||
| 630 | } | 967 | } |
| 968 | done: | ||
| 969 | return rc; | ||
| 970 | } | ||
| 971 | |||
| 972 | int do_nmi_callback(struct pt_regs * regs, int cpu) | ||
| 973 | { | ||
| 974 | #ifdef CONFIG_SYSCTL | ||
| 975 | if (unknown_nmi_panic) | ||
| 976 | return unknown_nmi_panic_callback(regs, cpu); | ||
| 977 | #endif | ||
| 978 | return 0; | ||
| 631 | } | 979 | } |
| 632 | 980 | ||
| 633 | #ifdef CONFIG_SYSCTL | 981 | #ifdef CONFIG_SYSCTL |
| @@ -637,36 +985,46 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) | |||
| 637 | unsigned char reason = get_nmi_reason(); | 985 | unsigned char reason = get_nmi_reason(); |
| 638 | char buf[64]; | 986 | char buf[64]; |
| 639 | 987 | ||
| 640 | if (!(reason & 0xc0)) { | 988 | sprintf(buf, "NMI received for unknown reason %02x\n", reason); |
| 641 | sprintf(buf, "NMI received for unknown reason %02x\n", reason); | 989 | die_nmi(regs, buf); |
| 642 | die_nmi(regs, buf); | ||
| 643 | } | ||
| 644 | return 0; | 990 | return 0; |
| 645 | } | 991 | } |
| 646 | 992 | ||
| 647 | /* | 993 | /* |
| 648 | * proc handler for /proc/sys/kernel/unknown_nmi_panic | 994 | * proc handler for /proc/sys/kernel/nmi |
| 649 | */ | 995 | */ |
| 650 | int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file, | 996 | int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, |
| 651 | void __user *buffer, size_t *length, loff_t *ppos) | 997 | void __user *buffer, size_t *length, loff_t *ppos) |
| 652 | { | 998 | { |
| 653 | int old_state; | 999 | int old_state; |
| 654 | 1000 | ||
| 655 | old_state = unknown_nmi_panic; | 1001 | nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; |
| 1002 | old_state = nmi_watchdog_enabled; | ||
| 656 | proc_dointvec(table, write, file, buffer, length, ppos); | 1003 | proc_dointvec(table, write, file, buffer, length, ppos); |
| 657 | if (!!old_state == !!unknown_nmi_panic) | 1004 | if (!!old_state == !!nmi_watchdog_enabled) |
| 658 | return 0; | 1005 | return 0; |
| 659 | 1006 | ||
| 660 | if (unknown_nmi_panic) { | 1007 | if (atomic_read(&nmi_active) < 0) { |
| 661 | if (reserve_lapic_nmi() < 0) { | 1008 | printk( KERN_WARNING "NMI watchdog is permanently disabled\n"); |
| 662 | unknown_nmi_panic = 0; | 1009 | return -EIO; |
| 663 | return -EBUSY; | 1010 | } |
| 664 | } else { | 1011 | |
| 665 | set_nmi_callback(unknown_nmi_panic_callback); | 1012 | if (nmi_watchdog == NMI_DEFAULT) { |
| 666 | } | 1013 | if (nmi_known_cpu() > 0) |
| 1014 | nmi_watchdog = NMI_LOCAL_APIC; | ||
| 1015 | else | ||
| 1016 | nmi_watchdog = NMI_IO_APIC; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | if (nmi_watchdog == NMI_LOCAL_APIC) { | ||
| 1020 | if (nmi_watchdog_enabled) | ||
| 1021 | enable_lapic_nmi_watchdog(); | ||
| 1022 | else | ||
| 1023 | disable_lapic_nmi_watchdog(); | ||
| 667 | } else { | 1024 | } else { |
| 668 | release_lapic_nmi(); | 1025 | printk( KERN_WARNING |
| 669 | unset_nmi_callback(); | 1026 | "NMI watchdog doesn't know what hardware to touch\n"); |
| 1027 | return -EIO; | ||
| 670 | } | 1028 | } |
| 671 | return 0; | 1029 | return 0; |
| 672 | } | 1030 | } |
| @@ -675,7 +1033,11 @@ int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file, | |||
| 675 | 1033 | ||
| 676 | EXPORT_SYMBOL(nmi_active); | 1034 | EXPORT_SYMBOL(nmi_active); |
| 677 | EXPORT_SYMBOL(nmi_watchdog); | 1035 | EXPORT_SYMBOL(nmi_watchdog); |
| 678 | EXPORT_SYMBOL(reserve_lapic_nmi); | 1036 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); |
| 679 | EXPORT_SYMBOL(release_lapic_nmi); | 1037 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); |
| 1038 | EXPORT_SYMBOL(reserve_perfctr_nmi); | ||
| 1039 | EXPORT_SYMBOL(release_perfctr_nmi); | ||
| 1040 | EXPORT_SYMBOL(reserve_evntsel_nmi); | ||
| 1041 | EXPORT_SYMBOL(release_evntsel_nmi); | ||
| 680 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); | 1042 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); |
| 681 | EXPORT_SYMBOL(enable_timer_nmi_watchdog); | 1043 | EXPORT_SYMBOL(enable_timer_nmi_watchdog); |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 8657c739656a..8c190ca7ae44 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <linux/kallsyms.h> | 37 | #include <linux/kallsyms.h> |
| 38 | #include <linux/ptrace.h> | 38 | #include <linux/ptrace.h> |
| 39 | #include <linux/random.h> | 39 | #include <linux/random.h> |
| 40 | #include <linux/personality.h> | ||
| 40 | 41 | ||
| 41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
| 42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
| @@ -320,15 +321,6 @@ void show_regs(struct pt_regs * regs) | |||
| 320 | * the "args". | 321 | * the "args". |
| 321 | */ | 322 | */ |
| 322 | extern void kernel_thread_helper(void); | 323 | extern void kernel_thread_helper(void); |
| 323 | __asm__(".section .text\n" | ||
| 324 | ".align 4\n" | ||
| 325 | "kernel_thread_helper:\n\t" | ||
| 326 | "movl %edx,%eax\n\t" | ||
| 327 | "pushl %edx\n\t" | ||
| 328 | "call *%ebx\n\t" | ||
| 329 | "pushl %eax\n\t" | ||
| 330 | "call do_exit\n" | ||
| 331 | ".previous"); | ||
| 332 | 324 | ||
| 333 | /* | 325 | /* |
| 334 | * Create a kernel thread | 326 | * Create a kernel thread |
| @@ -346,7 +338,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
| 346 | regs.xes = __USER_DS; | 338 | regs.xes = __USER_DS; |
| 347 | regs.orig_eax = -1; | 339 | regs.orig_eax = -1; |
| 348 | regs.eip = (unsigned long) kernel_thread_helper; | 340 | regs.eip = (unsigned long) kernel_thread_helper; |
| 349 | regs.xcs = __KERNEL_CS; | 341 | regs.xcs = __KERNEL_CS | get_kernel_rpl(); |
| 350 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; | 342 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; |
| 351 | 343 | ||
| 352 | /* Ok, create the new process.. */ | 344 | /* Ok, create the new process.. */ |
| @@ -905,7 +897,7 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) | |||
| 905 | 897 | ||
| 906 | unsigned long arch_align_stack(unsigned long sp) | 898 | unsigned long arch_align_stack(unsigned long sp) |
| 907 | { | 899 | { |
| 908 | if (randomize_va_space) | 900 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
| 909 | sp -= get_random_int() % 8192; | 901 | sp -= get_random_int() % 8192; |
| 910 | return sp & ~0xf; | 902 | return sp & ~0xf; |
| 911 | } | 903 | } |
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index d3db03f4085d..775f50e9395b 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c | |||
| @@ -185,17 +185,17 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_ | |||
| 185 | return addr; | 185 | return addr; |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | static inline int is_at_popf(struct task_struct *child, struct pt_regs *regs) | 188 | static inline int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) |
| 189 | { | 189 | { |
| 190 | int i, copied; | 190 | int i, copied; |
| 191 | unsigned char opcode[16]; | 191 | unsigned char opcode[15]; |
| 192 | unsigned long addr = convert_eip_to_linear(child, regs); | 192 | unsigned long addr = convert_eip_to_linear(child, regs); |
| 193 | 193 | ||
| 194 | copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); | 194 | copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); |
| 195 | for (i = 0; i < copied; i++) { | 195 | for (i = 0; i < copied; i++) { |
| 196 | switch (opcode[i]) { | 196 | switch (opcode[i]) { |
| 197 | /* popf */ | 197 | /* popf and iret */ |
| 198 | case 0x9d: | 198 | case 0x9d: case 0xcf: |
| 199 | return 1; | 199 | return 1; |
| 200 | /* opcode and address size prefixes */ | 200 | /* opcode and address size prefixes */ |
| 201 | case 0x66: case 0x67: | 201 | case 0x66: case 0x67: |
| @@ -247,7 +247,7 @@ static void set_singlestep(struct task_struct *child) | |||
| 247 | * don't mark it as being "us" that set it, so that we | 247 | * don't mark it as being "us" that set it, so that we |
| 248 | * won't clear it by hand later. | 248 | * won't clear it by hand later. |
| 249 | */ | 249 | */ |
| 250 | if (is_at_popf(child, regs)) | 250 | if (is_setting_trap_flag(child, regs)) |
| 251 | return; | 251 | return; |
| 252 | 252 | ||
| 253 | child->ptrace |= PT_DTRACE; | 253 | child->ptrace |= PT_DTRACE; |
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 54cfeabbc5e4..84278e0093a2 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c | |||
| @@ -145,14 +145,10 @@ real_mode_gdt_entries [3] = | |||
| 145 | 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ | 145 | 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ |
| 146 | }; | 146 | }; |
| 147 | 147 | ||
| 148 | static struct | 148 | static struct Xgt_desc_struct |
| 149 | { | 149 | real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries }, |
| 150 | unsigned short size __attribute__ ((packed)); | 150 | real_mode_idt = { 0x3ff, 0 }, |
| 151 | unsigned long long * base __attribute__ ((packed)); | 151 | no_idt = { 0, 0 }; |
| 152 | } | ||
| 153 | real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries }, | ||
| 154 | real_mode_idt = { 0x3ff, NULL }, | ||
| 155 | no_idt = { 0, NULL }; | ||
| 156 | 152 | ||
| 157 | 153 | ||
| 158 | /* This is 16-bit protected mode code to disable paging and the cache, | 154 | /* This is 16-bit protected mode code to disable paging and the cache, |
diff --git a/arch/i386/kernel/relocate_kernel.S b/arch/i386/kernel/relocate_kernel.S index d312616effa1..f151d6fae462 100644 --- a/arch/i386/kernel/relocate_kernel.S +++ b/arch/i386/kernel/relocate_kernel.S | |||
| @@ -7,16 +7,138 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
| 10 | #include <asm/page.h> | ||
| 11 | #include <asm/kexec.h> | ||
| 12 | |||
| 13 | /* | ||
| 14 | * Must be relocatable PIC code callable as a C function | ||
| 15 | */ | ||
| 16 | |||
| 17 | #define PTR(x) (x << 2) | ||
| 18 | #define PAGE_ALIGNED (1 << PAGE_SHIFT) | ||
| 19 | #define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */ | ||
| 20 | #define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */ | ||
| 21 | |||
| 22 | .text | ||
| 23 | .align PAGE_ALIGNED | ||
| 24 | .globl relocate_kernel | ||
| 25 | relocate_kernel: | ||
| 26 | movl 8(%esp), %ebp /* list of pages */ | ||
| 27 | |||
| 28 | #ifdef CONFIG_X86_PAE | ||
| 29 | /* map the control page at its virtual address */ | ||
| 30 | |||
| 31 | movl PTR(VA_PGD)(%ebp), %edi | ||
| 32 | movl PTR(VA_CONTROL_PAGE)(%ebp), %eax | ||
| 33 | andl $0xc0000000, %eax | ||
| 34 | shrl $27, %eax | ||
| 35 | addl %edi, %eax | ||
| 36 | |||
| 37 | movl PTR(PA_PMD_0)(%ebp), %edx | ||
| 38 | orl $PAE_PGD_ATTR, %edx | ||
| 39 | movl %edx, (%eax) | ||
| 40 | |||
| 41 | movl PTR(VA_PMD_0)(%ebp), %edi | ||
| 42 | movl PTR(VA_CONTROL_PAGE)(%ebp), %eax | ||
| 43 | andl $0x3fe00000, %eax | ||
| 44 | shrl $18, %eax | ||
| 45 | addl %edi, %eax | ||
| 46 | |||
| 47 | movl PTR(PA_PTE_0)(%ebp), %edx | ||
| 48 | orl $PAGE_ATTR, %edx | ||
| 49 | movl %edx, (%eax) | ||
| 50 | |||
| 51 | movl PTR(VA_PTE_0)(%ebp), %edi | ||
| 52 | movl PTR(VA_CONTROL_PAGE)(%ebp), %eax | ||
| 53 | andl $0x001ff000, %eax | ||
| 54 | shrl $9, %eax | ||
| 55 | addl %edi, %eax | ||
| 56 | |||
| 57 | movl PTR(PA_CONTROL_PAGE)(%ebp), %edx | ||
| 58 | orl $PAGE_ATTR, %edx | ||
| 59 | movl %edx, (%eax) | ||
| 60 | |||
| 61 | /* identity map the control page at its physical address */ | ||
| 62 | |||
| 63 | movl PTR(VA_PGD)(%ebp), %edi | ||
| 64 | movl PTR(PA_CONTROL_PAGE)(%ebp), %eax | ||
| 65 | andl $0xc0000000, %eax | ||
| 66 | shrl $27, %eax | ||
| 67 | addl %edi, %eax | ||
| 68 | |||
| 69 | movl PTR(PA_PMD_1)(%ebp), %edx | ||
| 70 | orl $PAE_PGD_ATTR, %edx | ||
| 71 | movl %edx, (%eax) | ||
| 72 | |||
| 73 | movl PTR(VA_PMD_1)(%ebp), %edi | ||
| 74 | movl PTR(PA_CONTROL_PAGE)(%ebp), %eax | ||
| 75 | andl $0x3fe00000, %eax | ||
| 76 | shrl $18, %eax | ||
| 77 | addl %edi, %eax | ||
| 78 | |||
| 79 | movl PTR(PA_PTE_1)(%ebp), %edx | ||
| 80 | orl $PAGE_ATTR, %edx | ||
| 81 | movl %edx, (%eax) | ||
| 82 | |||
| 83 | movl PTR(VA_PTE_1)(%ebp), %edi | ||
| 84 | movl PTR(PA_CONTROL_PAGE)(%ebp), %eax | ||
| 85 | andl $0x001ff000, %eax | ||
| 86 | shrl $9, %eax | ||
| 87 | addl %edi, %eax | ||
| 88 | |||
| 89 | movl PTR(PA_CONTROL_PAGE)(%ebp), %edx | ||
| 90 | orl $PAGE_ATTR, %edx | ||
| 91 | movl %edx, (%eax) | ||
| 92 | #else | ||
| 93 | /* map the control page at its virtual address */ | ||
| 94 | |||
| 95 | movl PTR(VA_PGD)(%ebp), %edi | ||
| 96 | movl PTR(VA_CONTROL_PAGE)(%ebp), %eax | ||
| 97 | andl $0xffc00000, %eax | ||
| 98 | shrl $20, %eax | ||
| 99 | addl %edi, %eax | ||
| 100 | |||
| 101 | movl PTR(PA_PTE_0)(%ebp), %edx | ||
| 102 | orl $PAGE_ATTR, %edx | ||
| 103 | movl %edx, (%eax) | ||
| 104 | |||
| 105 | movl PTR(VA_PTE_0)(%ebp), %edi | ||
| 106 | movl PTR(VA_CONTROL_PAGE)(%ebp), %eax | ||
| 107 | andl $0x003ff000, %eax | ||
| 108 | shrl $10, %eax | ||
| 109 | addl %edi, %eax | ||
| 110 | |||
| 111 | movl PTR(PA_CONTROL_PAGE)(%ebp), %edx | ||
| 112 | orl $PAGE_ATTR, %edx | ||
| 113 | movl %edx, (%eax) | ||
| 114 | |||
| 115 | /* identity map the control page at its physical address */ | ||
| 116 | |||
| 117 | movl PTR(VA_PGD)(%ebp), %edi | ||
| 118 | movl PTR(PA_CONTROL_PAGE)(%ebp), %eax | ||
| 119 | andl $0xffc00000, %eax | ||
| 120 | shrl $20, %eax | ||
| 121 | addl %edi, %eax | ||
| 122 | |||
| 123 | movl PTR(PA_PTE_1)(%ebp), %edx | ||
| 124 | orl $PAGE_ATTR, %edx | ||
| 125 | movl %edx, (%eax) | ||
| 126 | |||
| 127 | movl PTR(VA_PTE_1)(%ebp), %edi | ||
| 128 | movl PTR(PA_CONTROL_PAGE)(%ebp), %eax | ||
| 129 | andl $0x003ff000, %eax | ||
| 130 | shrl $10, %eax | ||
| 131 | addl %edi, %eax | ||
| 132 | |||
| 133 | movl PTR(PA_CONTROL_PAGE)(%ebp), %edx | ||
| 134 | orl $PAGE_ATTR, %edx | ||
| 135 | movl %edx, (%eax) | ||
| 136 | #endif | ||
| 10 | 137 | ||
| 11 | /* | ||
| 12 | * Must be relocatable PIC code callable as a C function, that once | ||
| 13 | * it starts can not use the previous processes stack. | ||
| 14 | */ | ||
| 15 | .globl relocate_new_kernel | ||
| 16 | relocate_new_kernel: | 138 | relocate_new_kernel: |
| 17 | /* read the arguments and say goodbye to the stack */ | 139 | /* read the arguments and say goodbye to the stack */ |
| 18 | movl 4(%esp), %ebx /* page_list */ | 140 | movl 4(%esp), %ebx /* page_list */ |
| 19 | movl 8(%esp), %ebp /* reboot_code_buffer */ | 141 | movl 8(%esp), %ebp /* list of pages */ |
| 20 | movl 12(%esp), %edx /* start address */ | 142 | movl 12(%esp), %edx /* start address */ |
| 21 | movl 16(%esp), %ecx /* cpu_has_pae */ | 143 | movl 16(%esp), %ecx /* cpu_has_pae */ |
| 22 | 144 | ||
| @@ -24,11 +146,26 @@ relocate_new_kernel: | |||
| 24 | pushl $0 | 146 | pushl $0 |
| 25 | popfl | 147 | popfl |
| 26 | 148 | ||
| 27 | /* set a new stack at the bottom of our page... */ | 149 | /* get physical address of control page now */ |
| 28 | lea 4096(%ebp), %esp | 150 | /* this is impossible after page table switch */ |
| 151 | movl PTR(PA_CONTROL_PAGE)(%ebp), %edi | ||
| 29 | 152 | ||
| 30 | /* store the parameters back on the stack */ | 153 | /* switch to new set of page tables */ |
| 31 | pushl %edx /* store the start address */ | 154 | movl PTR(PA_PGD)(%ebp), %eax |
| 155 | movl %eax, %cr3 | ||
| 156 | |||
| 157 | /* setup a new stack at the end of the physical control page */ | ||
| 158 | lea 4096(%edi), %esp | ||
| 159 | |||
| 160 | /* jump to identity mapped page */ | ||
| 161 | movl %edi, %eax | ||
| 162 | addl $(identity_mapped - relocate_kernel), %eax | ||
| 163 | pushl %eax | ||
| 164 | ret | ||
| 165 | |||
| 166 | identity_mapped: | ||
| 167 | /* store the start address on the stack */ | ||
| 168 | pushl %edx | ||
| 32 | 169 | ||
| 33 | /* Set cr0 to a known state: | 170 | /* Set cr0 to a known state: |
| 34 | * 31 0 == Paging disabled | 171 | * 31 0 == Paging disabled |
| @@ -113,8 +250,3 @@ relocate_new_kernel: | |||
| 113 | xorl %edi, %edi | 250 | xorl %edi, %edi |
| 114 | xorl %ebp, %ebp | 251 | xorl %ebp, %ebp |
| 115 | ret | 252 | ret |
| 116 | relocate_new_kernel_end: | ||
| 117 | |||
| 118 | .globl relocate_new_kernel_size | ||
| 119 | relocate_new_kernel_size: | ||
| 120 | .long relocate_new_kernel_end - relocate_new_kernel | ||
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c deleted file mode 100644 index 98352c374c76..000000000000 --- a/arch/i386/kernel/semaphore.c +++ /dev/null | |||
| @@ -1,134 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * i386 semaphore implementation. | ||
| 3 | * | ||
| 4 | * (C) Copyright 1999 Linus Torvalds | ||
| 5 | * | ||
| 6 | * Portions Copyright 1999 Red Hat, Inc. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version | ||
| 11 | * 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
| 14 | */ | ||
| 15 | #include <asm/semaphore.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * The semaphore operations have a special calling sequence that | ||
| 19 | * allow us to do a simpler in-line version of them. These routines | ||
| 20 | * need to convert that sequence back into the C sequence when | ||
| 21 | * there is contention on the semaphore. | ||
| 22 | * | ||
| 23 | * %eax contains the semaphore pointer on entry. Save the C-clobbered | ||
| 24 | * registers (%eax, %edx and %ecx) except %eax whish is either a return | ||
| 25 | * value or just clobbered.. | ||
| 26 | */ | ||
| 27 | asm( | ||
| 28 | ".section .sched.text\n" | ||
| 29 | ".align 4\n" | ||
| 30 | ".globl __down_failed\n" | ||
| 31 | "__down_failed:\n\t" | ||
| 32 | #if defined(CONFIG_FRAME_POINTER) | ||
| 33 | "pushl %ebp\n\t" | ||
| 34 | "movl %esp,%ebp\n\t" | ||
| 35 | #endif | ||
| 36 | "pushl %edx\n\t" | ||
| 37 | "pushl %ecx\n\t" | ||
| 38 | "call __down\n\t" | ||
| 39 | "popl %ecx\n\t" | ||
| 40 | "popl %edx\n\t" | ||
| 41 | #if defined(CONFIG_FRAME_POINTER) | ||
| 42 | "movl %ebp,%esp\n\t" | ||
| 43 | "popl %ebp\n\t" | ||
| 44 | #endif | ||
| 45 | "ret" | ||
| 46 | ); | ||
| 47 | |||
| 48 | asm( | ||
| 49 | ".section .sched.text\n" | ||
| 50 | ".align 4\n" | ||
| 51 | ".globl __down_failed_interruptible\n" | ||
| 52 | "__down_failed_interruptible:\n\t" | ||
| 53 | #if defined(CONFIG_FRAME_POINTER) | ||
| 54 | "pushl %ebp\n\t" | ||
| 55 | "movl %esp,%ebp\n\t" | ||
| 56 | #endif | ||
| 57 | "pushl %edx\n\t" | ||
| 58 | "pushl %ecx\n\t" | ||
| 59 | "call __down_interruptible\n\t" | ||
| 60 | "popl %ecx\n\t" | ||
| 61 | "popl %edx\n\t" | ||
| 62 | #if defined(CONFIG_FRAME_POINTER) | ||
| 63 | "movl %ebp,%esp\n\t" | ||
| 64 | "popl %ebp\n\t" | ||
| 65 | #endif | ||
| 66 | "ret" | ||
| 67 | ); | ||
| 68 | |||
| 69 | asm( | ||
| 70 | ".section .sched.text\n" | ||
| 71 | ".align 4\n" | ||
| 72 | ".globl __down_failed_trylock\n" | ||
| 73 | "__down_failed_trylock:\n\t" | ||
| 74 | #if defined(CONFIG_FRAME_POINTER) | ||
| 75 | "pushl %ebp\n\t" | ||
| 76 | "movl %esp,%ebp\n\t" | ||
| 77 | #endif | ||
| 78 | "pushl %edx\n\t" | ||
| 79 | "pushl %ecx\n\t" | ||
| 80 | "call __down_trylock\n\t" | ||
| 81 | "popl %ecx\n\t" | ||
| 82 | "popl %edx\n\t" | ||
| 83 | #if defined(CONFIG_FRAME_POINTER) | ||
| 84 | "movl %ebp,%esp\n\t" | ||
| 85 | "popl %ebp\n\t" | ||
| 86 | #endif | ||
| 87 | "ret" | ||
| 88 | ); | ||
| 89 | |||
| 90 | asm( | ||
| 91 | ".section .sched.text\n" | ||
| 92 | ".align 4\n" | ||
| 93 | ".globl __up_wakeup\n" | ||
| 94 | "__up_wakeup:\n\t" | ||
| 95 | "pushl %edx\n\t" | ||
| 96 | "pushl %ecx\n\t" | ||
| 97 | "call __up\n\t" | ||
| 98 | "popl %ecx\n\t" | ||
| 99 | "popl %edx\n\t" | ||
| 100 | "ret" | ||
| 101 | ); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * rw spinlock fallbacks | ||
| 105 | */ | ||
| 106 | #if defined(CONFIG_SMP) | ||
| 107 | asm( | ||
| 108 | ".section .sched.text\n" | ||
| 109 | ".align 4\n" | ||
| 110 | ".globl __write_lock_failed\n" | ||
| 111 | "__write_lock_failed:\n\t" | ||
| 112 | LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" | ||
| 113 | "1: rep; nop\n\t" | ||
| 114 | "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | ||
| 115 | "jne 1b\n\t" | ||
| 116 | LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | ||
| 117 | "jnz __write_lock_failed\n\t" | ||
| 118 | "ret" | ||
| 119 | ); | ||
| 120 | |||
| 121 | asm( | ||
| 122 | ".section .sched.text\n" | ||
| 123 | ".align 4\n" | ||
| 124 | ".globl __read_lock_failed\n" | ||
| 125 | "__read_lock_failed:\n\t" | ||
| 126 | LOCK_PREFIX "incl (%eax)\n" | ||
| 127 | "1: rep; nop\n\t" | ||
| 128 | "cmpl $1,(%eax)\n\t" | ||
| 129 | "js 1b\n\t" | ||
| 130 | LOCK_PREFIX "decl (%eax)\n\t" | ||
| 131 | "js __read_lock_failed\n\t" | ||
| 132 | "ret" | ||
| 133 | ); | ||
| 134 | #endif | ||
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index f1682206d304..814cdebf7377 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | #include <asm/apic.h> | 53 | #include <asm/apic.h> |
| 54 | #include <asm/e820.h> | 54 | #include <asm/e820.h> |
| 55 | #include <asm/mpspec.h> | 55 | #include <asm/mpspec.h> |
| 56 | #include <asm/mmzone.h> | ||
| 56 | #include <asm/setup.h> | 57 | #include <asm/setup.h> |
| 57 | #include <asm/arch_hooks.h> | 58 | #include <asm/arch_hooks.h> |
| 58 | #include <asm/sections.h> | 59 | #include <asm/sections.h> |
| @@ -89,18 +90,6 @@ EXPORT_SYMBOL(boot_cpu_data); | |||
| 89 | 90 | ||
| 90 | unsigned long mmu_cr4_features; | 91 | unsigned long mmu_cr4_features; |
| 91 | 92 | ||
| 92 | #ifdef CONFIG_ACPI | ||
| 93 | int acpi_disabled = 0; | ||
| 94 | #else | ||
| 95 | int acpi_disabled = 1; | ||
| 96 | #endif | ||
| 97 | EXPORT_SYMBOL(acpi_disabled); | ||
| 98 | |||
| 99 | #ifdef CONFIG_ACPI | ||
| 100 | int __initdata acpi_force = 0; | ||
| 101 | extern acpi_interrupt_flags acpi_sci_flags; | ||
| 102 | #endif | ||
| 103 | |||
| 104 | /* for MCA, but anyone else can use it if they want */ | 93 | /* for MCA, but anyone else can use it if they want */ |
| 105 | unsigned int machine_id; | 94 | unsigned int machine_id; |
| 106 | #ifdef CONFIG_MCA | 95 | #ifdef CONFIG_MCA |
| @@ -148,7 +137,6 @@ EXPORT_SYMBOL(ist_info); | |||
| 148 | struct e820map e820; | 137 | struct e820map e820; |
| 149 | 138 | ||
| 150 | extern void early_cpu_init(void); | 139 | extern void early_cpu_init(void); |
| 151 | extern void generic_apic_probe(char *); | ||
| 152 | extern int root_mountflags; | 140 | extern int root_mountflags; |
| 153 | 141 | ||
| 154 | unsigned long saved_videomode; | 142 | unsigned long saved_videomode; |
| @@ -700,238 +688,150 @@ static inline void copy_edd(void) | |||
| 700 | } | 688 | } |
| 701 | #endif | 689 | #endif |
| 702 | 690 | ||
| 703 | static void __init parse_cmdline_early (char ** cmdline_p) | 691 | static int __initdata user_defined_memmap = 0; |
| 704 | { | ||
| 705 | char c = ' ', *to = command_line, *from = saved_command_line; | ||
| 706 | int len = 0; | ||
| 707 | int userdef = 0; | ||
| 708 | 692 | ||
| 709 | /* Save unparsed command line copy for /proc/cmdline */ | 693 | /* |
| 710 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; | 694 | * "mem=nopentium" disables the 4MB page tables. |
| 695 | * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM | ||
| 696 | * to <mem>, overriding the bios size. | ||
| 697 | * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from | ||
| 698 | * <start> to <start>+<mem>, overriding the bios size. | ||
| 699 | * | ||
| 700 | * HPA tells me bootloaders need to parse mem=, so no new | ||
| 701 | * option should be mem= [also see Documentation/i386/boot.txt] | ||
| 702 | */ | ||
| 703 | static int __init parse_mem(char *arg) | ||
| 704 | { | ||
| 705 | if (!arg) | ||
| 706 | return -EINVAL; | ||
| 711 | 707 | ||
| 712 | for (;;) { | 708 | if (strcmp(arg, "nopentium") == 0) { |
| 713 | if (c != ' ') | 709 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); |
| 714 | goto next_char; | 710 | disable_pse = 1; |
| 715 | /* | 711 | } else { |
| 716 | * "mem=nopentium" disables the 4MB page tables. | 712 | /* If the user specifies memory size, we |
| 717 | * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM | 713 | * limit the BIOS-provided memory map to |
| 718 | * to <mem>, overriding the bios size. | 714 | * that size. exactmap can be used to specify |
| 719 | * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from | 715 | * the exact map. mem=number can be used to |
| 720 | * <start> to <start>+<mem>, overriding the bios size. | 716 | * trim the existing memory map. |
| 721 | * | ||
| 722 | * HPA tells me bootloaders need to parse mem=, so no new | ||
| 723 | * option should be mem= [also see Documentation/i386/boot.txt] | ||
| 724 | */ | 717 | */ |
| 725 | if (!memcmp(from, "mem=", 4)) { | 718 | unsigned long long mem_size; |
| 726 | if (to != command_line) | ||
| 727 | to--; | ||
| 728 | if (!memcmp(from+4, "nopentium", 9)) { | ||
| 729 | from += 9+4; | ||
| 730 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | ||
| 731 | disable_pse = 1; | ||
| 732 | } else { | ||
| 733 | /* If the user specifies memory size, we | ||
| 734 | * limit the BIOS-provided memory map to | ||
| 735 | * that size. exactmap can be used to specify | ||
| 736 | * the exact map. mem=number can be used to | ||
| 737 | * trim the existing memory map. | ||
| 738 | */ | ||
| 739 | unsigned long long mem_size; | ||
| 740 | |||
| 741 | mem_size = memparse(from+4, &from); | ||
| 742 | limit_regions(mem_size); | ||
| 743 | userdef=1; | ||
| 744 | } | ||
| 745 | } | ||
| 746 | |||
| 747 | else if (!memcmp(from, "memmap=", 7)) { | ||
| 748 | if (to != command_line) | ||
| 749 | to--; | ||
| 750 | if (!memcmp(from+7, "exactmap", 8)) { | ||
| 751 | #ifdef CONFIG_CRASH_DUMP | ||
| 752 | /* If we are doing a crash dump, we | ||
| 753 | * still need to know the real mem | ||
| 754 | * size before original memory map is | ||
| 755 | * reset. | ||
| 756 | */ | ||
| 757 | find_max_pfn(); | ||
| 758 | saved_max_pfn = max_pfn; | ||
| 759 | #endif | ||
| 760 | from += 8+7; | ||
| 761 | e820.nr_map = 0; | ||
| 762 | userdef = 1; | ||
| 763 | } else { | ||
| 764 | /* If the user specifies memory size, we | ||
| 765 | * limit the BIOS-provided memory map to | ||
| 766 | * that size. exactmap can be used to specify | ||
| 767 | * the exact map. mem=number can be used to | ||
| 768 | * trim the existing memory map. | ||
| 769 | */ | ||
| 770 | unsigned long long start_at, mem_size; | ||
| 771 | 719 | ||
| 772 | mem_size = memparse(from+7, &from); | 720 | mem_size = memparse(arg, &arg); |
| 773 | if (*from == '@') { | 721 | limit_regions(mem_size); |
| 774 | start_at = memparse(from+1, &from); | 722 | user_defined_memmap = 1; |
| 775 | add_memory_region(start_at, mem_size, E820_RAM); | 723 | } |
| 776 | } else if (*from == '#') { | 724 | return 0; |
| 777 | start_at = memparse(from+1, &from); | 725 | } |
| 778 | add_memory_region(start_at, mem_size, E820_ACPI); | 726 | early_param("mem", parse_mem); |
| 779 | } else if (*from == '$') { | ||
| 780 | start_at = memparse(from+1, &from); | ||
| 781 | add_memory_region(start_at, mem_size, E820_RESERVED); | ||
| 782 | } else { | ||
| 783 | limit_regions(mem_size); | ||
| 784 | userdef=1; | ||
| 785 | } | ||
| 786 | } | ||
| 787 | } | ||
| 788 | |||
| 789 | else if (!memcmp(from, "noexec=", 7)) | ||
| 790 | noexec_setup(from + 7); | ||
| 791 | 727 | ||
| 728 | static int __init parse_memmap(char *arg) | ||
| 729 | { | ||
| 730 | if (!arg) | ||
| 731 | return -EINVAL; | ||
| 792 | 732 | ||
| 793 | #ifdef CONFIG_X86_SMP | 733 | if (strcmp(arg, "exactmap") == 0) { |
| 794 | /* | 734 | #ifdef CONFIG_CRASH_DUMP |
| 795 | * If the BIOS enumerates physical processors before logical, | 735 | /* If we are doing a crash dump, we |
| 796 | * maxcpus=N at enumeration-time can be used to disable HT. | 736 | * still need to know the real mem |
| 737 | * size before original memory map is | ||
| 738 | * reset. | ||
| 797 | */ | 739 | */ |
| 798 | else if (!memcmp(from, "maxcpus=", 8)) { | 740 | find_max_pfn(); |
| 799 | extern unsigned int maxcpus; | 741 | saved_max_pfn = max_pfn; |
| 800 | |||
| 801 | maxcpus = simple_strtoul(from + 8, NULL, 0); | ||
| 802 | } | ||
| 803 | #endif | 742 | #endif |
| 804 | 743 | e820.nr_map = 0; | |
| 805 | #ifdef CONFIG_ACPI | 744 | user_defined_memmap = 1; |
| 806 | /* "acpi=off" disables both ACPI table parsing and interpreter */ | 745 | } else { |
| 807 | else if (!memcmp(from, "acpi=off", 8)) { | 746 | /* If the user specifies memory size, we |
| 808 | disable_acpi(); | 747 | * limit the BIOS-provided memory map to |
| 809 | } | 748 | * that size. exactmap can be used to specify |
| 810 | 749 | * the exact map. mem=number can be used to | |
| 811 | /* acpi=force to over-ride black-list */ | 750 | * trim the existing memory map. |
| 812 | else if (!memcmp(from, "acpi=force", 10)) { | 751 | */ |
| 813 | acpi_force = 1; | 752 | unsigned long long start_at, mem_size; |
| 814 | acpi_ht = 1; | 753 | |
| 815 | acpi_disabled = 0; | 754 | mem_size = memparse(arg, &arg); |
| 816 | } | 755 | if (*arg == '@') { |
| 817 | 756 | start_at = memparse(arg+1, &arg); | |
| 818 | /* acpi=strict disables out-of-spec workarounds */ | 757 | add_memory_region(start_at, mem_size, E820_RAM); |
| 819 | else if (!memcmp(from, "acpi=strict", 11)) { | 758 | } else if (*arg == '#') { |
| 820 | acpi_strict = 1; | 759 | start_at = memparse(arg+1, &arg); |
| 821 | } | 760 | add_memory_region(start_at, mem_size, E820_ACPI); |
| 822 | 761 | } else if (*arg == '$') { | |
| 823 | /* Limit ACPI just to boot-time to enable HT */ | 762 | start_at = memparse(arg+1, &arg); |
| 824 | else if (!memcmp(from, "acpi=ht", 7)) { | 763 | add_memory_region(start_at, mem_size, E820_RESERVED); |
| 825 | if (!acpi_force) | 764 | } else { |
| 826 | disable_acpi(); | 765 | limit_regions(mem_size); |
| 827 | acpi_ht = 1; | 766 | user_defined_memmap = 1; |
| 828 | } | ||
| 829 | |||
| 830 | /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */ | ||
| 831 | else if (!memcmp(from, "pci=noacpi", 10)) { | ||
| 832 | acpi_disable_pci(); | ||
| 833 | } | ||
| 834 | /* "acpi=noirq" disables ACPI interrupt routing */ | ||
| 835 | else if (!memcmp(from, "acpi=noirq", 10)) { | ||
| 836 | acpi_noirq_set(); | ||
| 837 | } | 767 | } |
| 768 | } | ||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | early_param("memmap", parse_memmap); | ||
| 838 | 772 | ||
| 839 | else if (!memcmp(from, "acpi_sci=edge", 13)) | 773 | #ifdef CONFIG_PROC_VMCORE |
| 840 | acpi_sci_flags.trigger = 1; | 774 | /* elfcorehdr= specifies the location of elf core header |
| 841 | 775 | * stored by the crashed kernel. | |
| 842 | else if (!memcmp(from, "acpi_sci=level", 14)) | 776 | */ |
| 843 | acpi_sci_flags.trigger = 3; | 777 | static int __init parse_elfcorehdr(char *arg) |
| 844 | 778 | { | |
| 845 | else if (!memcmp(from, "acpi_sci=high", 13)) | 779 | if (!arg) |
| 846 | acpi_sci_flags.polarity = 1; | 780 | return -EINVAL; |
| 847 | 781 | ||
| 848 | else if (!memcmp(from, "acpi_sci=low", 12)) | 782 | elfcorehdr_addr = memparse(arg, &arg); |
| 849 | acpi_sci_flags.polarity = 3; | 783 | return 0; |
| 784 | } | ||
| 785 | early_param("elfcorehdr", parse_elfcorehdr); | ||
| 786 | #endif /* CONFIG_PROC_VMCORE */ | ||
| 850 | 787 | ||
| 851 | #ifdef CONFIG_X86_IO_APIC | 788 | /* |
| 852 | else if (!memcmp(from, "acpi_skip_timer_override", 24)) | 789 | * highmem=size forces highmem to be exactly 'size' bytes. |
| 853 | acpi_skip_timer_override = 1; | 790 | * This works even on boxes that have no highmem otherwise. |
| 791 | * This also works to reduce highmem size on bigger boxes. | ||
| 792 | */ | ||
| 793 | static int __init parse_highmem(char *arg) | ||
| 794 | { | ||
| 795 | if (!arg) | ||
| 796 | return -EINVAL; | ||
| 854 | 797 | ||
| 855 | if (!memcmp(from, "disable_timer_pin_1", 19)) | 798 | highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; |
| 856 | disable_timer_pin_1 = 1; | 799 | return 0; |
| 857 | if (!memcmp(from, "enable_timer_pin_1", 18)) | 800 | } |
| 858 | disable_timer_pin_1 = -1; | 801 | early_param("highmem", parse_highmem); |
| 859 | 802 | ||
| 860 | /* disable IO-APIC */ | 803 | /* |
| 861 | else if (!memcmp(from, "noapic", 6)) | 804 | * vmalloc=size forces the vmalloc area to be exactly 'size' |
| 862 | disable_ioapic_setup(); | 805 | * bytes. This can be used to increase (or decrease) the |
| 863 | #endif /* CONFIG_X86_IO_APIC */ | 806 | * vmalloc area - the default is 128m. |
| 864 | #endif /* CONFIG_ACPI */ | 807 | */ |
| 808 | static int __init parse_vmalloc(char *arg) | ||
| 809 | { | ||
| 810 | if (!arg) | ||
| 811 | return -EINVAL; | ||
| 865 | 812 | ||
| 866 | #ifdef CONFIG_X86_LOCAL_APIC | 813 | __VMALLOC_RESERVE = memparse(arg, &arg); |
| 867 | /* enable local APIC */ | 814 | return 0; |
| 868 | else if (!memcmp(from, "lapic", 5)) | 815 | } |
| 869 | lapic_enable(); | 816 | early_param("vmalloc", parse_vmalloc); |
| 870 | 817 | ||
| 871 | /* disable local APIC */ | 818 | /* |
| 872 | else if (!memcmp(from, "nolapic", 6)) | 819 | * reservetop=size reserves a hole at the top of the kernel address space which |
| 873 | lapic_disable(); | 820 | * a hypervisor can load into later. Needed for dynamically loaded hypervisors, |
| 874 | #endif /* CONFIG_X86_LOCAL_APIC */ | 821 | * so relocating the fixmap can be done before paging initialization. |
| 822 | */ | ||
| 823 | static int __init parse_reservetop(char *arg) | ||
| 824 | { | ||
| 825 | unsigned long address; | ||
| 875 | 826 | ||
| 876 | #ifdef CONFIG_KEXEC | 827 | if (!arg) |
| 877 | /* crashkernel=size@addr specifies the location to reserve for | 828 | return -EINVAL; |
| 878 | * a crash kernel. By reserving this memory we guarantee | ||
| 879 | * that linux never set's it up as a DMA target. | ||
| 880 | * Useful for holding code to do something appropriate | ||
| 881 | * after a kernel panic. | ||
| 882 | */ | ||
| 883 | else if (!memcmp(from, "crashkernel=", 12)) { | ||
| 884 | unsigned long size, base; | ||
| 885 | size = memparse(from+12, &from); | ||
| 886 | if (*from == '@') { | ||
| 887 | base = memparse(from+1, &from); | ||
| 888 | /* FIXME: Do I want a sanity check | ||
| 889 | * to validate the memory range? | ||
| 890 | */ | ||
| 891 | crashk_res.start = base; | ||
| 892 | crashk_res.end = base + size - 1; | ||
| 893 | } | ||
| 894 | } | ||
| 895 | #endif | ||
| 896 | #ifdef CONFIG_PROC_VMCORE | ||
| 897 | /* elfcorehdr= specifies the location of elf core header | ||
| 898 | * stored by the crashed kernel. | ||
| 899 | */ | ||
| 900 | else if (!memcmp(from, "elfcorehdr=", 11)) | ||
| 901 | elfcorehdr_addr = memparse(from+11, &from); | ||
| 902 | #endif | ||
| 903 | 829 | ||
| 904 | /* | 830 | address = memparse(arg, &arg); |
| 905 | * highmem=size forces highmem to be exactly 'size' bytes. | 831 | reserve_top_address(address); |
| 906 | * This works even on boxes that have no highmem otherwise. | 832 | return 0; |
| 907 | * This also works to reduce highmem size on bigger boxes. | ||
| 908 | */ | ||
| 909 | else if (!memcmp(from, "highmem=", 8)) | ||
| 910 | highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT; | ||
| 911 | |||
| 912 | /* | ||
| 913 | * vmalloc=size forces the vmalloc area to be exactly 'size' | ||
| 914 | * bytes. This can be used to increase (or decrease) the | ||
| 915 | * vmalloc area - the default is 128m. | ||
| 916 | */ | ||
| 917 | else if (!memcmp(from, "vmalloc=", 8)) | ||
| 918 | __VMALLOC_RESERVE = memparse(from+8, &from); | ||
| 919 | |||
| 920 | next_char: | ||
| 921 | c = *(from++); | ||
| 922 | if (!c) | ||
| 923 | break; | ||
| 924 | if (COMMAND_LINE_SIZE <= ++len) | ||
| 925 | break; | ||
| 926 | *(to++) = c; | ||
| 927 | } | ||
| 928 | *to = '\0'; | ||
| 929 | *cmdline_p = command_line; | ||
| 930 | if (userdef) { | ||
| 931 | printk(KERN_INFO "user-defined physical RAM map:\n"); | ||
| 932 | print_memory_map("user"); | ||
| 933 | } | ||
| 934 | } | 833 | } |
| 834 | early_param("reservetop", parse_reservetop); | ||
| 935 | 835 | ||
| 936 | /* | 836 | /* |
| 937 | * Callback for efi_memory_walk. | 837 | * Callback for efi_memory_walk. |
| @@ -1170,6 +1070,14 @@ static unsigned long __init setup_memory(void) | |||
| 1170 | } | 1070 | } |
| 1171 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 1071 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
| 1172 | pages_to_mb(highend_pfn - highstart_pfn)); | 1072 | pages_to_mb(highend_pfn - highstart_pfn)); |
| 1073 | num_physpages = highend_pfn; | ||
| 1074 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | ||
| 1075 | #else | ||
| 1076 | num_physpages = max_low_pfn; | ||
| 1077 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | ||
| 1078 | #endif | ||
| 1079 | #ifdef CONFIG_FLATMEM | ||
| 1080 | max_mapnr = num_physpages; | ||
| 1173 | #endif | 1081 | #endif |
| 1174 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | 1082 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
| 1175 | pages_to_mb(max_low_pfn)); | 1083 | pages_to_mb(max_low_pfn)); |
| @@ -1181,22 +1089,20 @@ static unsigned long __init setup_memory(void) | |||
| 1181 | 1089 | ||
| 1182 | void __init zone_sizes_init(void) | 1090 | void __init zone_sizes_init(void) |
| 1183 | { | 1091 | { |
| 1184 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | ||
| 1185 | unsigned int max_dma, low; | ||
| 1186 | |||
| 1187 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | ||
| 1188 | low = max_low_pfn; | ||
| 1189 | |||
| 1190 | if (low < max_dma) | ||
| 1191 | zones_size[ZONE_DMA] = low; | ||
| 1192 | else { | ||
| 1193 | zones_size[ZONE_DMA] = max_dma; | ||
| 1194 | zones_size[ZONE_NORMAL] = low - max_dma; | ||
| 1195 | #ifdef CONFIG_HIGHMEM | 1092 | #ifdef CONFIG_HIGHMEM |
| 1196 | zones_size[ZONE_HIGHMEM] = highend_pfn - low; | 1093 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { |
| 1094 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, | ||
| 1095 | max_low_pfn, | ||
| 1096 | highend_pfn}; | ||
| 1097 | add_active_range(0, 0, highend_pfn); | ||
| 1098 | #else | ||
| 1099 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { | ||
| 1100 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, | ||
| 1101 | max_low_pfn}; | ||
| 1102 | add_active_range(0, 0, max_low_pfn); | ||
| 1197 | #endif | 1103 | #endif |
| 1198 | } | 1104 | |
| 1199 | free_area_init(zones_size); | 1105 | free_area_init_nodes(max_zone_pfns); |
| 1200 | } | 1106 | } |
| 1201 | #else | 1107 | #else |
| 1202 | extern unsigned long __init setup_memory(void); | 1108 | extern unsigned long __init setup_memory(void); |
| @@ -1258,7 +1164,7 @@ void __init setup_bootmem_allocator(void) | |||
| 1258 | */ | 1164 | */ |
| 1259 | find_smp_config(); | 1165 | find_smp_config(); |
| 1260 | #endif | 1166 | #endif |
| 1261 | 1167 | numa_kva_reserve(); | |
| 1262 | #ifdef CONFIG_BLK_DEV_INITRD | 1168 | #ifdef CONFIG_BLK_DEV_INITRD |
| 1263 | if (LOADER_TYPE && INITRD_START) { | 1169 | if (LOADER_TYPE && INITRD_START) { |
| 1264 | if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { | 1170 | if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { |
| @@ -1499,17 +1405,15 @@ void __init setup_arch(char **cmdline_p) | |||
| 1499 | data_resource.start = virt_to_phys(_etext); | 1405 | data_resource.start = virt_to_phys(_etext); |
| 1500 | data_resource.end = virt_to_phys(_edata)-1; | 1406 | data_resource.end = virt_to_phys(_edata)-1; |
| 1501 | 1407 | ||
| 1502 | parse_cmdline_early(cmdline_p); | 1408 | parse_early_param(); |
| 1503 | 1409 | ||
| 1504 | #ifdef CONFIG_EARLY_PRINTK | 1410 | if (user_defined_memmap) { |
| 1505 | { | 1411 | printk(KERN_INFO "user-defined physical RAM map:\n"); |
| 1506 | char *s = strstr(*cmdline_p, "earlyprintk="); | 1412 | print_memory_map("user"); |
| 1507 | if (s) { | ||
| 1508 | setup_early_printk(strchr(s, '=') + 1); | ||
| 1509 | printk("early console enabled\n"); | ||
| 1510 | } | ||
| 1511 | } | 1413 | } |
| 1512 | #endif | 1414 | |
| 1415 | strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE); | ||
| 1416 | *cmdline_p = command_line; | ||
| 1513 | 1417 | ||
| 1514 | max_low_pfn = setup_memory(); | 1418 | max_low_pfn = setup_memory(); |
| 1515 | 1419 | ||
| @@ -1538,7 +1442,7 @@ void __init setup_arch(char **cmdline_p) | |||
| 1538 | dmi_scan_machine(); | 1442 | dmi_scan_machine(); |
| 1539 | 1443 | ||
| 1540 | #ifdef CONFIG_X86_GENERICARCH | 1444 | #ifdef CONFIG_X86_GENERICARCH |
| 1541 | generic_apic_probe(*cmdline_p); | 1445 | generic_apic_probe(); |
| 1542 | #endif | 1446 | #endif |
| 1543 | if (efi_enabled) | 1447 | if (efi_enabled) |
| 1544 | efi_map_memmap(); | 1448 | efi_map_memmap(); |
| @@ -1550,9 +1454,11 @@ void __init setup_arch(char **cmdline_p) | |||
| 1550 | acpi_boot_table_init(); | 1454 | acpi_boot_table_init(); |
| 1551 | #endif | 1455 | #endif |
| 1552 | 1456 | ||
| 1457 | #ifdef CONFIG_PCI | ||
| 1553 | #ifdef CONFIG_X86_IO_APIC | 1458 | #ifdef CONFIG_X86_IO_APIC |
| 1554 | check_acpi_pci(); /* Checks more than just ACPI actually */ | 1459 | check_acpi_pci(); /* Checks more than just ACPI actually */ |
| 1555 | #endif | 1460 | #endif |
| 1461 | #endif | ||
| 1556 | 1462 | ||
| 1557 | #ifdef CONFIG_ACPI | 1463 | #ifdef CONFIG_ACPI |
| 1558 | acpi_boot_init(); | 1464 | acpi_boot_init(); |
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index c10789d7a9d3..465188e2d701 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
| @@ -634,3 +634,69 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs) | |||
| 634 | } | 634 | } |
| 635 | } | 635 | } |
| 636 | 636 | ||
| 637 | /* | ||
| 638 | * this function sends a 'generic call function' IPI to one other CPU | ||
| 639 | * in the system. | ||
| 640 | * | ||
| 641 | * cpu is a standard Linux logical CPU number. | ||
| 642 | */ | ||
| 643 | static void | ||
| 644 | __smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 645 | int nonatomic, int wait) | ||
| 646 | { | ||
| 647 | struct call_data_struct data; | ||
| 648 | int cpus = 1; | ||
| 649 | |||
| 650 | data.func = func; | ||
| 651 | data.info = info; | ||
| 652 | atomic_set(&data.started, 0); | ||
| 653 | data.wait = wait; | ||
| 654 | if (wait) | ||
| 655 | atomic_set(&data.finished, 0); | ||
| 656 | |||
| 657 | call_data = &data; | ||
| 658 | wmb(); | ||
| 659 | /* Send a message to all other CPUs and wait for them to respond */ | ||
| 660 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); | ||
| 661 | |||
| 662 | /* Wait for response */ | ||
| 663 | while (atomic_read(&data.started) != cpus) | ||
| 664 | cpu_relax(); | ||
| 665 | |||
| 666 | if (!wait) | ||
| 667 | return; | ||
| 668 | |||
| 669 | while (atomic_read(&data.finished) != cpus) | ||
| 670 | cpu_relax(); | ||
| 671 | } | ||
| 672 | |||
| 673 | /* | ||
| 674 | * smp_call_function_single - Run a function on another CPU | ||
| 675 | * @func: The function to run. This must be fast and non-blocking. | ||
| 676 | * @info: An arbitrary pointer to pass to the function. | ||
| 677 | * @nonatomic: Currently unused. | ||
| 678 | * @wait: If true, wait until function has completed on other CPUs. | ||
| 679 | * | ||
| 680 | * Retrurns 0 on success, else a negative status code. | ||
| 681 | * | ||
| 682 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
| 683 | * or is or has executed. | ||
| 684 | */ | ||
| 685 | |||
| 686 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 687 | int nonatomic, int wait) | ||
| 688 | { | ||
| 689 | /* prevent preemption and reschedule on another processor */ | ||
| 690 | int me = get_cpu(); | ||
| 691 | if (cpu == me) { | ||
| 692 | WARN_ON(1); | ||
| 693 | put_cpu(); | ||
| 694 | return -EBUSY; | ||
| 695 | } | ||
| 696 | spin_lock_bh(&call_lock); | ||
| 697 | __smp_call_function_single(cpu, func, info, nonatomic, wait); | ||
| 698 | spin_unlock_bh(&call_lock); | ||
| 699 | put_cpu(); | ||
| 700 | return 0; | ||
| 701 | } | ||
| 702 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index f948419c888a..020d873b7d21 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
| @@ -177,6 +177,9 @@ static void __devinit smp_store_cpu_info(int id) | |||
| 177 | */ | 177 | */ |
| 178 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | 178 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { |
| 179 | 179 | ||
| 180 | if (num_possible_cpus() == 1) | ||
| 181 | goto valid_k7; | ||
| 182 | |||
| 180 | /* Athlon 660/661 is valid. */ | 183 | /* Athlon 660/661 is valid. */ |
| 181 | if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1))) | 184 | if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1))) |
| 182 | goto valid_k7; | 185 | goto valid_k7; |
| @@ -642,9 +645,13 @@ static void map_cpu_to_logical_apicid(void) | |||
| 642 | { | 645 | { |
| 643 | int cpu = smp_processor_id(); | 646 | int cpu = smp_processor_id(); |
| 644 | int apicid = logical_smp_processor_id(); | 647 | int apicid = logical_smp_processor_id(); |
| 648 | int node = apicid_to_node(apicid); | ||
| 649 | |||
| 650 | if (!node_online(node)) | ||
| 651 | node = first_online_node; | ||
| 645 | 652 | ||
| 646 | cpu_2_logical_apicid[cpu] = apicid; | 653 | cpu_2_logical_apicid[cpu] = apicid; |
| 647 | map_cpu_to_node(cpu, apicid_to_node(apicid)); | 654 | map_cpu_to_node(cpu, node); |
| 648 | } | 655 | } |
| 649 | 656 | ||
| 650 | static void unmap_cpu_to_logical_apicid(int cpu) | 657 | static void unmap_cpu_to_logical_apicid(int cpu) |
| @@ -1372,7 +1379,8 @@ int __cpu_disable(void) | |||
| 1372 | */ | 1379 | */ |
| 1373 | if (cpu == 0) | 1380 | if (cpu == 0) |
| 1374 | return -EBUSY; | 1381 | return -EBUSY; |
| 1375 | 1382 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
| 1383 | stop_apic_nmi_watchdog(NULL); | ||
| 1376 | clear_local_APIC(); | 1384 | clear_local_APIC(); |
| 1377 | /* Allow any queued timer interrupts to get serviced */ | 1385 | /* Allow any queued timer interrupts to get serviced */ |
| 1378 | local_irq_enable(); | 1386 | local_irq_enable(); |
| @@ -1486,3 +1494,16 @@ void __init smp_intr_init(void) | |||
| 1486 | /* IPI for generic function call */ | 1494 | /* IPI for generic function call */ |
| 1487 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 1495 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
| 1488 | } | 1496 | } |
| 1497 | |||
| 1498 | /* | ||
| 1499 | * If the BIOS enumerates physical processors before logical, | ||
| 1500 | * maxcpus=N at enumeration-time can be used to disable HT. | ||
| 1501 | */ | ||
| 1502 | static int __init parse_maxcpus(char *arg) | ||
| 1503 | { | ||
| 1504 | extern unsigned int maxcpus; | ||
| 1505 | |||
| 1506 | maxcpus = simple_strtoul(arg, NULL, 0); | ||
| 1507 | return 0; | ||
| 1508 | } | ||
| 1509 | early_param("maxcpus", parse_maxcpus); | ||
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c index b1809c9a0899..32413122c4c2 100644 --- a/arch/i386/kernel/srat.c +++ b/arch/i386/kernel/srat.c | |||
| @@ -42,7 +42,7 @@ | |||
| 42 | #define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8) | 42 | #define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8) |
| 43 | static u8 pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */ | 43 | static u8 pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */ |
| 44 | 44 | ||
| 45 | #define MAX_CHUNKS_PER_NODE 4 | 45 | #define MAX_CHUNKS_PER_NODE 3 |
| 46 | #define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES) | 46 | #define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES) |
| 47 | struct node_memory_chunk_s { | 47 | struct node_memory_chunk_s { |
| 48 | unsigned long start_pfn; | 48 | unsigned long start_pfn; |
| @@ -54,8 +54,6 @@ struct node_memory_chunk_s { | |||
| 54 | static struct node_memory_chunk_s node_memory_chunk[MAXCHUNKS]; | 54 | static struct node_memory_chunk_s node_memory_chunk[MAXCHUNKS]; |
| 55 | 55 | ||
| 56 | static int num_memory_chunks; /* total number of memory chunks */ | 56 | static int num_memory_chunks; /* total number of memory chunks */ |
| 57 | static int zholes_size_init; | ||
| 58 | static unsigned long zholes_size[MAX_NUMNODES * MAX_NR_ZONES]; | ||
| 59 | 57 | ||
| 60 | extern void * boot_ioremap(unsigned long, unsigned long); | 58 | extern void * boot_ioremap(unsigned long, unsigned long); |
| 61 | 59 | ||
| @@ -135,50 +133,6 @@ static void __init parse_memory_affinity_structure (char *sratp) | |||
| 135 | "enabled and removable" : "enabled" ) ); | 133 | "enabled and removable" : "enabled" ) ); |
| 136 | } | 134 | } |
| 137 | 135 | ||
| 138 | #if MAX_NR_ZONES != 4 | ||
| 139 | #error "MAX_NR_ZONES != 4, chunk_to_zone requires review" | ||
| 140 | #endif | ||
| 141 | /* Take a chunk of pages from page frame cstart to cend and count the number | ||
| 142 | * of pages in each zone, returned via zones[]. | ||
| 143 | */ | ||
| 144 | static __init void chunk_to_zones(unsigned long cstart, unsigned long cend, | ||
| 145 | unsigned long *zones) | ||
| 146 | { | ||
| 147 | unsigned long max_dma; | ||
| 148 | extern unsigned long max_low_pfn; | ||
| 149 | |||
| 150 | int z; | ||
| 151 | unsigned long rend; | ||
| 152 | |||
| 153 | /* FIXME: MAX_DMA_ADDRESS and max_low_pfn are trying to provide | ||
| 154 | * similarly scoped information and should be handled in a consistant | ||
| 155 | * manner. | ||
| 156 | */ | ||
| 157 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | ||
| 158 | |||
| 159 | /* Split the hole into the zones in which it falls. Repeatedly | ||
| 160 | * take the segment in which the remaining hole starts, round it | ||
| 161 | * to the end of that zone. | ||
| 162 | */ | ||
| 163 | memset(zones, 0, MAX_NR_ZONES * sizeof(long)); | ||
| 164 | while (cstart < cend) { | ||
| 165 | if (cstart < max_dma) { | ||
| 166 | z = ZONE_DMA; | ||
| 167 | rend = (cend < max_dma)? cend : max_dma; | ||
| 168 | |||
| 169 | } else if (cstart < max_low_pfn) { | ||
| 170 | z = ZONE_NORMAL; | ||
| 171 | rend = (cend < max_low_pfn)? cend : max_low_pfn; | ||
| 172 | |||
| 173 | } else { | ||
| 174 | z = ZONE_HIGHMEM; | ||
| 175 | rend = cend; | ||
| 176 | } | ||
| 177 | zones[z] += rend - cstart; | ||
| 178 | cstart = rend; | ||
| 179 | } | ||
| 180 | } | ||
| 181 | |||
| 182 | /* | 136 | /* |
| 183 | * The SRAT table always lists ascending addresses, so can always | 137 | * The SRAT table always lists ascending addresses, so can always |
| 184 | * assume that the first "start" address that you see is the real | 138 | * assume that the first "start" address that you see is the real |
| @@ -223,7 +177,6 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp) | |||
| 223 | 177 | ||
| 224 | memset(pxm_bitmap, 0, sizeof(pxm_bitmap)); /* init proximity domain bitmap */ | 178 | memset(pxm_bitmap, 0, sizeof(pxm_bitmap)); /* init proximity domain bitmap */ |
| 225 | memset(node_memory_chunk, 0, sizeof(node_memory_chunk)); | 179 | memset(node_memory_chunk, 0, sizeof(node_memory_chunk)); |
| 226 | memset(zholes_size, 0, sizeof(zholes_size)); | ||
| 227 | 180 | ||
| 228 | num_memory_chunks = 0; | 181 | num_memory_chunks = 0; |
| 229 | while (p < end) { | 182 | while (p < end) { |
| @@ -287,6 +240,7 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp) | |||
| 287 | printk("chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", | 240 | printk("chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", |
| 288 | j, chunk->nid, chunk->start_pfn, chunk->end_pfn); | 241 | j, chunk->nid, chunk->start_pfn, chunk->end_pfn); |
| 289 | node_read_chunk(chunk->nid, chunk); | 242 | node_read_chunk(chunk->nid, chunk); |
| 243 | add_active_range(chunk->nid, chunk->start_pfn, chunk->end_pfn); | ||
| 290 | } | 244 | } |
| 291 | 245 | ||
| 292 | for_each_online_node(nid) { | 246 | for_each_online_node(nid) { |
| @@ -395,57 +349,7 @@ int __init get_memcfg_from_srat(void) | |||
| 395 | return acpi20_parse_srat((struct acpi_table_srat *)header); | 349 | return acpi20_parse_srat((struct acpi_table_srat *)header); |
| 396 | } | 350 | } |
| 397 | out_err: | 351 | out_err: |
| 352 | remove_all_active_ranges(); | ||
| 398 | printk("failed to get NUMA memory information from SRAT table\n"); | 353 | printk("failed to get NUMA memory information from SRAT table\n"); |
| 399 | return 0; | 354 | return 0; |
| 400 | } | 355 | } |
| 401 | |||
| 402 | /* For each node run the memory list to determine whether there are | ||
| 403 | * any memory holes. For each hole determine which ZONE they fall | ||
| 404 | * into. | ||
| 405 | * | ||
| 406 | * NOTE#1: this requires knowledge of the zone boundries and so | ||
| 407 | * _cannot_ be performed before those are calculated in setup_memory. | ||
| 408 | * | ||
| 409 | * NOTE#2: we rely on the fact that the memory chunks are ordered by | ||
| 410 | * start pfn number during setup. | ||
| 411 | */ | ||
| 412 | static void __init get_zholes_init(void) | ||
| 413 | { | ||
| 414 | int nid; | ||
| 415 | int c; | ||
| 416 | int first; | ||
| 417 | unsigned long end = 0; | ||
| 418 | |||
| 419 | for_each_online_node(nid) { | ||
| 420 | first = 1; | ||
| 421 | for (c = 0; c < num_memory_chunks; c++){ | ||
| 422 | if (node_memory_chunk[c].nid == nid) { | ||
| 423 | if (first) { | ||
| 424 | end = node_memory_chunk[c].end_pfn; | ||
| 425 | first = 0; | ||
| 426 | |||
| 427 | } else { | ||
| 428 | /* Record any gap between this chunk | ||
| 429 | * and the previous chunk on this node | ||
| 430 | * against the zones it spans. | ||
| 431 | */ | ||
| 432 | chunk_to_zones(end, | ||
| 433 | node_memory_chunk[c].start_pfn, | ||
| 434 | &zholes_size[nid * MAX_NR_ZONES]); | ||
| 435 | } | ||
| 436 | } | ||
| 437 | } | ||
| 438 | } | ||
| 439 | } | ||
| 440 | |||
| 441 | unsigned long * __init get_zholes_size(int nid) | ||
| 442 | { | ||
| 443 | if (!zholes_size_init) { | ||
| 444 | zholes_size_init++; | ||
| 445 | get_zholes_init(); | ||
| 446 | } | ||
| 447 | if (nid >= MAX_NUMNODES || !node_online(nid)) | ||
| 448 | printk("%s: nid = %d is invalid/offline. num_online_nodes = %d", | ||
| 449 | __FUNCTION__, nid, num_online_nodes()); | ||
| 450 | return &zholes_size[nid * MAX_NR_ZONES]; | ||
| 451 | } | ||
diff --git a/arch/i386/kernel/stacktrace.c b/arch/i386/kernel/stacktrace.c deleted file mode 100644 index e62a037ab399..000000000000 --- a/arch/i386/kernel/stacktrace.c +++ /dev/null | |||
| @@ -1,98 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * arch/i386/kernel/stacktrace.c | ||
| 3 | * | ||
| 4 | * Stack trace management functions | ||
| 5 | * | ||
| 6 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
| 7 | */ | ||
| 8 | #include <linux/sched.h> | ||
| 9 | #include <linux/stacktrace.h> | ||
| 10 | |||
| 11 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
| 12 | { | ||
| 13 | return p > (void *)tinfo && | ||
| 14 | p < (void *)tinfo + THREAD_SIZE - 3; | ||
| 15 | } | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Save stack-backtrace addresses into a stack_trace buffer: | ||
| 19 | */ | ||
| 20 | static inline unsigned long | ||
| 21 | save_context_stack(struct stack_trace *trace, unsigned int skip, | ||
| 22 | struct thread_info *tinfo, unsigned long *stack, | ||
| 23 | unsigned long ebp) | ||
| 24 | { | ||
| 25 | unsigned long addr; | ||
| 26 | |||
| 27 | #ifdef CONFIG_FRAME_POINTER | ||
| 28 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | ||
| 29 | addr = *(unsigned long *)(ebp + 4); | ||
| 30 | if (!skip) | ||
| 31 | trace->entries[trace->nr_entries++] = addr; | ||
| 32 | else | ||
| 33 | skip--; | ||
| 34 | if (trace->nr_entries >= trace->max_entries) | ||
| 35 | break; | ||
| 36 | /* | ||
| 37 | * break out of recursive entries (such as | ||
| 38 | * end_of_stack_stop_unwind_function): | ||
| 39 | */ | ||
| 40 | if (ebp == *(unsigned long *)ebp) | ||
| 41 | break; | ||
| 42 | |||
| 43 | ebp = *(unsigned long *)ebp; | ||
| 44 | } | ||
| 45 | #else | ||
| 46 | while (valid_stack_ptr(tinfo, stack)) { | ||
| 47 | addr = *stack++; | ||
| 48 | if (__kernel_text_address(addr)) { | ||
| 49 | if (!skip) | ||
| 50 | trace->entries[trace->nr_entries++] = addr; | ||
| 51 | else | ||
| 52 | skip--; | ||
| 53 | if (trace->nr_entries >= trace->max_entries) | ||
| 54 | break; | ||
| 55 | } | ||
| 56 | } | ||
| 57 | #endif | ||
| 58 | |||
| 59 | return ebp; | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Save stack-backtrace addresses into a stack_trace buffer. | ||
| 64 | * If all_contexts is set, all contexts (hardirq, softirq and process) | ||
| 65 | * are saved. If not set then only the current context is saved. | ||
| 66 | */ | ||
| 67 | void save_stack_trace(struct stack_trace *trace, | ||
| 68 | struct task_struct *task, int all_contexts, | ||
| 69 | unsigned int skip) | ||
| 70 | { | ||
| 71 | unsigned long ebp; | ||
| 72 | unsigned long *stack = &ebp; | ||
| 73 | |||
| 74 | WARN_ON(trace->nr_entries || !trace->max_entries); | ||
| 75 | |||
| 76 | if (!task || task == current) { | ||
| 77 | /* Grab ebp right from our regs: */ | ||
| 78 | asm ("movl %%ebp, %0" : "=r" (ebp)); | ||
| 79 | } else { | ||
| 80 | /* ebp is the last reg pushed by switch_to(): */ | ||
| 81 | ebp = *(unsigned long *) task->thread.esp; | ||
| 82 | } | ||
| 83 | |||
| 84 | while (1) { | ||
| 85 | struct thread_info *context = (struct thread_info *) | ||
| 86 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
| 87 | |||
| 88 | ebp = save_context_stack(trace, skip, context, stack, ebp); | ||
| 89 | stack = (unsigned long *)context->previous_esp; | ||
| 90 | if (!all_contexts || !stack || | ||
| 91 | trace->nr_entries >= trace->max_entries) | ||
| 92 | break; | ||
| 93 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
| 94 | if (trace->nr_entries >= trace->max_entries) | ||
| 95 | break; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index dd63d4775398..7e639f78b0b9 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
| @@ -317,3 +317,4 @@ ENTRY(sys_call_table) | |||
| 317 | .long sys_tee /* 315 */ | 317 | .long sys_tee /* 315 */ |
| 318 | .long sys_vmsplice | 318 | .long sys_vmsplice |
| 319 | .long sys_move_pages | 319 | .long sys_move_pages |
| 320 | .long sys_getcpu | ||
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index edd00f6cee37..86944acfb647 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
| @@ -130,18 +130,33 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
| 130 | 130 | ||
| 131 | int timer_ack; | 131 | int timer_ack; |
| 132 | 132 | ||
| 133 | #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) | ||
| 134 | unsigned long profile_pc(struct pt_regs *regs) | 133 | unsigned long profile_pc(struct pt_regs *regs) |
| 135 | { | 134 | { |
| 136 | unsigned long pc = instruction_pointer(regs); | 135 | unsigned long pc = instruction_pointer(regs); |
| 137 | 136 | ||
| 138 | if (!user_mode_vm(regs) && in_lock_functions(pc)) | 137 | #ifdef CONFIG_SMP |
| 138 | if (!user_mode_vm(regs) && in_lock_functions(pc)) { | ||
| 139 | #ifdef CONFIG_FRAME_POINTER | ||
| 139 | return *(unsigned long *)(regs->ebp + 4); | 140 | return *(unsigned long *)(regs->ebp + 4); |
| 140 | 141 | #else | |
| 142 | unsigned long *sp; | ||
| 143 | if ((regs->xcs & 3) == 0) | ||
| 144 | sp = (unsigned long *)®s->esp; | ||
| 145 | else | ||
| 146 | sp = (unsigned long *)regs->esp; | ||
| 147 | /* Return address is either directly at stack pointer | ||
| 148 | or above a saved eflags. Eflags has bits 22-31 zero, | ||
| 149 | kernel addresses don't. */ | ||
| 150 | if (sp[0] >> 22) | ||
| 151 | return sp[0]; | ||
| 152 | if (sp[1] >> 22) | ||
| 153 | return sp[1]; | ||
| 154 | #endif | ||
| 155 | } | ||
| 156 | #endif | ||
| 141 | return pc; | 157 | return pc; |
| 142 | } | 158 | } |
| 143 | EXPORT_SYMBOL(profile_pc); | 159 | EXPORT_SYMBOL(profile_pc); |
| 144 | #endif | ||
| 145 | 160 | ||
| 146 | /* | 161 | /* |
| 147 | * This is the same as the above, except we _also_ save the current | 162 | * This is the same as the above, except we _also_ save the current |
| @@ -270,16 +285,19 @@ void notify_arch_cmos_timer(void) | |||
| 270 | mod_timer(&sync_cmos_timer, jiffies + 1); | 285 | mod_timer(&sync_cmos_timer, jiffies + 1); |
| 271 | } | 286 | } |
| 272 | 287 | ||
| 273 | static long clock_cmos_diff, sleep_start; | 288 | static long clock_cmos_diff; |
| 289 | static unsigned long sleep_start; | ||
| 274 | 290 | ||
| 275 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | 291 | static int timer_suspend(struct sys_device *dev, pm_message_t state) |
| 276 | { | 292 | { |
| 277 | /* | 293 | /* |
| 278 | * Estimate time zone so that set_time can update the clock | 294 | * Estimate time zone so that set_time can update the clock |
| 279 | */ | 295 | */ |
| 280 | clock_cmos_diff = -get_cmos_time(); | 296 | unsigned long ctime = get_cmos_time(); |
| 297 | |||
| 298 | clock_cmos_diff = -ctime; | ||
| 281 | clock_cmos_diff += get_seconds(); | 299 | clock_cmos_diff += get_seconds(); |
| 282 | sleep_start = get_cmos_time(); | 300 | sleep_start = ctime; |
| 283 | return 0; | 301 | return 0; |
| 284 | } | 302 | } |
| 285 | 303 | ||
| @@ -287,18 +305,29 @@ static int timer_resume(struct sys_device *dev) | |||
| 287 | { | 305 | { |
| 288 | unsigned long flags; | 306 | unsigned long flags; |
| 289 | unsigned long sec; | 307 | unsigned long sec; |
| 290 | unsigned long sleep_length; | 308 | unsigned long ctime = get_cmos_time(); |
| 291 | 309 | long sleep_length = (ctime - sleep_start) * HZ; | |
| 310 | struct timespec ts; | ||
| 311 | |||
| 312 | if (sleep_length < 0) { | ||
| 313 | printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n"); | ||
| 314 | /* The time after the resume must not be earlier than the time | ||
| 315 | * before the suspend or some nasty things will happen | ||
| 316 | */ | ||
| 317 | sleep_length = 0; | ||
| 318 | ctime = sleep_start; | ||
| 319 | } | ||
| 292 | #ifdef CONFIG_HPET_TIMER | 320 | #ifdef CONFIG_HPET_TIMER |
| 293 | if (is_hpet_enabled()) | 321 | if (is_hpet_enabled()) |
| 294 | hpet_reenable(); | 322 | hpet_reenable(); |
| 295 | #endif | 323 | #endif |
| 296 | setup_pit_timer(); | 324 | setup_pit_timer(); |
| 297 | sec = get_cmos_time() + clock_cmos_diff; | 325 | |
| 298 | sleep_length = (get_cmos_time() - sleep_start) * HZ; | 326 | sec = ctime + clock_cmos_diff; |
| 327 | ts.tv_sec = sec; | ||
| 328 | ts.tv_nsec = 0; | ||
| 329 | do_settimeofday(&ts); | ||
| 299 | write_seqlock_irqsave(&xtime_lock, flags); | 330 | write_seqlock_irqsave(&xtime_lock, flags); |
| 300 | xtime.tv_sec = sec; | ||
| 301 | xtime.tv_nsec = 0; | ||
| 302 | jiffies_64 += sleep_length; | 331 | jiffies_64 += sleep_length; |
| 303 | wall_jiffies += sleep_length; | 332 | wall_jiffies += sleep_length; |
| 304 | write_sequnlock_irqrestore(&xtime_lock, flags); | 333 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| @@ -334,10 +363,11 @@ extern void (*late_time_init)(void); | |||
| 334 | /* Duplicate of time_init() below, with hpet_enable part added */ | 363 | /* Duplicate of time_init() below, with hpet_enable part added */ |
| 335 | static void __init hpet_time_init(void) | 364 | static void __init hpet_time_init(void) |
| 336 | { | 365 | { |
| 337 | xtime.tv_sec = get_cmos_time(); | 366 | struct timespec ts; |
| 338 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | 367 | ts.tv_sec = get_cmos_time(); |
| 339 | set_normalized_timespec(&wall_to_monotonic, | 368 | ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
| 340 | -xtime.tv_sec, -xtime.tv_nsec); | 369 | |
| 370 | do_settimeofday(&ts); | ||
| 341 | 371 | ||
| 342 | if ((hpet_enable() >= 0) && hpet_use_timer) { | 372 | if ((hpet_enable() >= 0) && hpet_use_timer) { |
| 343 | printk("Using HPET for base-timer\n"); | 373 | printk("Using HPET for base-timer\n"); |
| @@ -349,6 +379,7 @@ static void __init hpet_time_init(void) | |||
| 349 | 379 | ||
| 350 | void __init time_init(void) | 380 | void __init time_init(void) |
| 351 | { | 381 | { |
| 382 | struct timespec ts; | ||
| 352 | #ifdef CONFIG_HPET_TIMER | 383 | #ifdef CONFIG_HPET_TIMER |
| 353 | if (is_hpet_capable()) { | 384 | if (is_hpet_capable()) { |
| 354 | /* | 385 | /* |
| @@ -359,10 +390,10 @@ void __init time_init(void) | |||
| 359 | return; | 390 | return; |
| 360 | } | 391 | } |
| 361 | #endif | 392 | #endif |
| 362 | xtime.tv_sec = get_cmos_time(); | 393 | ts.tv_sec = get_cmos_time(); |
| 363 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | 394 | ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
| 364 | set_normalized_timespec(&wall_to_monotonic, | 395 | |
| 365 | -xtime.tv_sec, -xtime.tv_nsec); | 396 | do_settimeofday(&ts); |
| 366 | 397 | ||
| 367 | time_init_hook(); | 398 | time_init_hook(); |
| 368 | } | 399 | } |
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c index 14a1376fedd1..6bf14a4e995e 100644 --- a/arch/i386/kernel/time_hpet.c +++ b/arch/i386/kernel/time_hpet.c | |||
| @@ -301,23 +301,25 @@ int hpet_rtc_timer_init(void) | |||
| 301 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; | 301 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; |
| 302 | 302 | ||
| 303 | local_irq_save(flags); | 303 | local_irq_save(flags); |
| 304 | |||
| 304 | cnt = hpet_readl(HPET_COUNTER); | 305 | cnt = hpet_readl(HPET_COUNTER); |
| 305 | cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); | 306 | cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); |
| 306 | hpet_writel(cnt, HPET_T1_CMP); | 307 | hpet_writel(cnt, HPET_T1_CMP); |
| 307 | hpet_t1_cmp = cnt; | 308 | hpet_t1_cmp = cnt; |
| 308 | local_irq_restore(flags); | ||
| 309 | 309 | ||
| 310 | cfg = hpet_readl(HPET_T1_CFG); | 310 | cfg = hpet_readl(HPET_T1_CFG); |
| 311 | cfg &= ~HPET_TN_PERIODIC; | 311 | cfg &= ~HPET_TN_PERIODIC; |
| 312 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | 312 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; |
| 313 | hpet_writel(cfg, HPET_T1_CFG); | 313 | hpet_writel(cfg, HPET_T1_CFG); |
| 314 | 314 | ||
| 315 | local_irq_restore(flags); | ||
| 316 | |||
| 315 | return 1; | 317 | return 1; |
| 316 | } | 318 | } |
| 317 | 319 | ||
| 318 | static void hpet_rtc_timer_reinit(void) | 320 | static void hpet_rtc_timer_reinit(void) |
| 319 | { | 321 | { |
| 320 | unsigned int cfg, cnt; | 322 | unsigned int cfg, cnt, ticks_per_int, lost_ints; |
| 321 | 323 | ||
| 322 | if (unlikely(!(PIE_on | AIE_on | UIE_on))) { | 324 | if (unlikely(!(PIE_on | AIE_on | UIE_on))) { |
| 323 | cfg = hpet_readl(HPET_T1_CFG); | 325 | cfg = hpet_readl(HPET_T1_CFG); |
| @@ -332,10 +334,33 @@ static void hpet_rtc_timer_reinit(void) | |||
| 332 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; | 334 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; |
| 333 | 335 | ||
| 334 | /* It is more accurate to use the comparator value than current count.*/ | 336 | /* It is more accurate to use the comparator value than current count.*/ |
| 335 | cnt = hpet_t1_cmp; | 337 | ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq; |
| 336 | cnt += hpet_tick*HZ/hpet_rtc_int_freq; | 338 | hpet_t1_cmp += ticks_per_int; |
| 337 | hpet_writel(cnt, HPET_T1_CMP); | 339 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); |
| 338 | hpet_t1_cmp = cnt; | 340 | |
| 341 | /* | ||
| 342 | * If the interrupt handler was delayed too long, the write above tries | ||
| 343 | * to schedule the next interrupt in the past and the hardware would | ||
| 344 | * not interrupt until the counter had wrapped around. | ||
| 345 | * So we have to check that the comparator wasn't set to a past time. | ||
| 346 | */ | ||
| 347 | cnt = hpet_readl(HPET_COUNTER); | ||
| 348 | if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) { | ||
| 349 | lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1; | ||
| 350 | /* Make sure that, even with the time needed to execute | ||
| 351 | * this code, the next scheduled interrupt has been moved | ||
| 352 | * back to the future: */ | ||
| 353 | lost_ints++; | ||
| 354 | |||
| 355 | hpet_t1_cmp += lost_ints * ticks_per_int; | ||
| 356 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | ||
| 357 | |||
| 358 | if (PIE_on) | ||
| 359 | PIE_count += lost_ints; | ||
| 360 | |||
| 361 | printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", | ||
| 362 | hpet_rtc_int_freq); | ||
| 363 | } | ||
| 339 | } | 364 | } |
| 340 | 365 | ||
| 341 | /* | 366 | /* |
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c index e2e281d4bcc8..07d6da36a825 100644 --- a/arch/i386/kernel/topology.c +++ b/arch/i386/kernel/topology.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
| 29 | #include <linux/smp.h> | 29 | #include <linux/smp.h> |
| 30 | #include <linux/nodemask.h> | 30 | #include <linux/nodemask.h> |
| 31 | #include <linux/mmzone.h> | ||
| 31 | #include <asm/cpu.h> | 32 | #include <asm/cpu.h> |
| 32 | 33 | ||
| 33 | static struct i386_cpu cpu_devices[NR_CPUS]; | 34 | static struct i386_cpu cpu_devices[NR_CPUS]; |
| @@ -55,34 +56,18 @@ EXPORT_SYMBOL(arch_register_cpu); | |||
| 55 | EXPORT_SYMBOL(arch_unregister_cpu); | 56 | EXPORT_SYMBOL(arch_unregister_cpu); |
| 56 | #endif /*CONFIG_HOTPLUG_CPU*/ | 57 | #endif /*CONFIG_HOTPLUG_CPU*/ |
| 57 | 58 | ||
| 58 | |||
| 59 | |||
| 60 | #ifdef CONFIG_NUMA | ||
| 61 | #include <linux/mmzone.h> | ||
| 62 | |||
| 63 | static int __init topology_init(void) | 59 | static int __init topology_init(void) |
| 64 | { | 60 | { |
| 65 | int i; | 61 | int i; |
| 66 | 62 | ||
| 63 | #ifdef CONFIG_NUMA | ||
| 67 | for_each_online_node(i) | 64 | for_each_online_node(i) |
| 68 | register_one_node(i); | 65 | register_one_node(i); |
| 66 | #endif /* CONFIG_NUMA */ | ||
| 69 | 67 | ||
| 70 | for_each_present_cpu(i) | 68 | for_each_present_cpu(i) |
| 71 | arch_register_cpu(i); | 69 | arch_register_cpu(i); |
| 72 | return 0; | 70 | return 0; |
| 73 | } | 71 | } |
| 74 | 72 | ||
| 75 | #else /* !CONFIG_NUMA */ | ||
| 76 | |||
| 77 | static int __init topology_init(void) | ||
| 78 | { | ||
| 79 | int i; | ||
| 80 | |||
| 81 | for_each_present_cpu(i) | ||
| 82 | arch_register_cpu(i); | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | |||
| 86 | #endif /* CONFIG_NUMA */ | ||
| 87 | |||
| 88 | subsys_initcall(topology_init); | 73 | subsys_initcall(topology_init); |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 7e9edafffd8a..a13037fe0ee3 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/kprobes.h> | 28 | #include <linux/kprobes.h> |
| 29 | #include <linux/kexec.h> | 29 | #include <linux/kexec.h> |
| 30 | #include <linux/unwind.h> | 30 | #include <linux/unwind.h> |
| 31 | #include <linux/uaccess.h> | ||
| 31 | 32 | ||
| 32 | #ifdef CONFIG_EISA | 33 | #ifdef CONFIG_EISA |
| 33 | #include <linux/ioport.h> | 34 | #include <linux/ioport.h> |
| @@ -40,7 +41,6 @@ | |||
| 40 | 41 | ||
| 41 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
| 42 | #include <asm/system.h> | 43 | #include <asm/system.h> |
| 43 | #include <asm/uaccess.h> | ||
| 44 | #include <asm/io.h> | 44 | #include <asm/io.h> |
| 45 | #include <asm/atomic.h> | 45 | #include <asm/atomic.h> |
| 46 | #include <asm/debugreg.h> | 46 | #include <asm/debugreg.h> |
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <asm/smp.h> | 51 | #include <asm/smp.h> |
| 52 | #include <asm/arch_hooks.h> | 52 | #include <asm/arch_hooks.h> |
| 53 | #include <asm/kdebug.h> | 53 | #include <asm/kdebug.h> |
| 54 | #include <asm/stacktrace.h> | ||
| 54 | 55 | ||
| 55 | #include <linux/module.h> | 56 | #include <linux/module.h> |
| 56 | 57 | ||
| @@ -118,26 +119,16 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | |||
| 118 | p < (void *)tinfo + THREAD_SIZE - 3; | 119 | p < (void *)tinfo + THREAD_SIZE - 3; |
| 119 | } | 120 | } |
| 120 | 121 | ||
| 121 | /* | ||
| 122 | * Print one address/symbol entries per line. | ||
| 123 | */ | ||
| 124 | static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl) | ||
| 125 | { | ||
| 126 | printk(" [<%08lx>] ", addr); | ||
| 127 | |||
| 128 | print_symbol("%s\n", addr); | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 122 | static inline unsigned long print_context_stack(struct thread_info *tinfo, |
| 132 | unsigned long *stack, unsigned long ebp, | 123 | unsigned long *stack, unsigned long ebp, |
| 133 | char *log_lvl) | 124 | struct stacktrace_ops *ops, void *data) |
| 134 | { | 125 | { |
| 135 | unsigned long addr; | 126 | unsigned long addr; |
| 136 | 127 | ||
| 137 | #ifdef CONFIG_FRAME_POINTER | 128 | #ifdef CONFIG_FRAME_POINTER |
| 138 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | 129 | while (valid_stack_ptr(tinfo, (void *)ebp)) { |
| 139 | addr = *(unsigned long *)(ebp + 4); | 130 | addr = *(unsigned long *)(ebp + 4); |
| 140 | print_addr_and_symbol(addr, log_lvl); | 131 | ops->address(data, addr); |
| 141 | /* | 132 | /* |
| 142 | * break out of recursive entries (such as | 133 | * break out of recursive entries (such as |
| 143 | * end_of_stack_stop_unwind_function): | 134 | * end_of_stack_stop_unwind_function): |
| @@ -150,30 +141,37 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
| 150 | while (valid_stack_ptr(tinfo, stack)) { | 141 | while (valid_stack_ptr(tinfo, stack)) { |
| 151 | addr = *stack++; | 142 | addr = *stack++; |
| 152 | if (__kernel_text_address(addr)) | 143 | if (__kernel_text_address(addr)) |
| 153 | print_addr_and_symbol(addr, log_lvl); | 144 | ops->address(data, addr); |
| 154 | } | 145 | } |
| 155 | #endif | 146 | #endif |
| 156 | return ebp; | 147 | return ebp; |
| 157 | } | 148 | } |
| 158 | 149 | ||
| 150 | struct ops_and_data { | ||
| 151 | struct stacktrace_ops *ops; | ||
| 152 | void *data; | ||
| 153 | }; | ||
| 154 | |||
| 159 | static asmlinkage int | 155 | static asmlinkage int |
| 160 | show_trace_unwind(struct unwind_frame_info *info, void *log_lvl) | 156 | dump_trace_unwind(struct unwind_frame_info *info, void *data) |
| 161 | { | 157 | { |
| 158 | struct ops_and_data *oad = (struct ops_and_data *)data; | ||
| 162 | int n = 0; | 159 | int n = 0; |
| 163 | 160 | ||
| 164 | while (unwind(info) == 0 && UNW_PC(info)) { | 161 | while (unwind(info) == 0 && UNW_PC(info)) { |
| 165 | n++; | 162 | n++; |
| 166 | print_addr_and_symbol(UNW_PC(info), log_lvl); | 163 | oad->ops->address(oad->data, UNW_PC(info)); |
| 167 | if (arch_unw_user_mode(info)) | 164 | if (arch_unw_user_mode(info)) |
| 168 | break; | 165 | break; |
| 169 | } | 166 | } |
| 170 | return n; | 167 | return n; |
| 171 | } | 168 | } |
| 172 | 169 | ||
| 173 | static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 170 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
| 174 | unsigned long *stack, char *log_lvl) | 171 | unsigned long *stack, |
| 172 | struct stacktrace_ops *ops, void *data) | ||
| 175 | { | 173 | { |
| 176 | unsigned long ebp; | 174 | unsigned long ebp = 0; |
| 177 | 175 | ||
| 178 | if (!task) | 176 | if (!task) |
| 179 | task = current; | 177 | task = current; |
| @@ -181,54 +179,116 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
| 181 | if (call_trace >= 0) { | 179 | if (call_trace >= 0) { |
| 182 | int unw_ret = 0; | 180 | int unw_ret = 0; |
| 183 | struct unwind_frame_info info; | 181 | struct unwind_frame_info info; |
| 182 | struct ops_and_data oad = { .ops = ops, .data = data }; | ||
| 184 | 183 | ||
| 185 | if (regs) { | 184 | if (regs) { |
| 186 | if (unwind_init_frame_info(&info, task, regs) == 0) | 185 | if (unwind_init_frame_info(&info, task, regs) == 0) |
| 187 | unw_ret = show_trace_unwind(&info, log_lvl); | 186 | unw_ret = dump_trace_unwind(&info, &oad); |
| 188 | } else if (task == current) | 187 | } else if (task == current) |
| 189 | unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl); | 188 | unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad); |
| 190 | else { | 189 | else { |
| 191 | if (unwind_init_blocked(&info, task) == 0) | 190 | if (unwind_init_blocked(&info, task) == 0) |
| 192 | unw_ret = show_trace_unwind(&info, log_lvl); | 191 | unw_ret = dump_trace_unwind(&info, &oad); |
| 193 | } | 192 | } |
| 194 | if (unw_ret > 0) { | 193 | if (unw_ret > 0) { |
| 195 | if (call_trace == 1 && !arch_unw_user_mode(&info)) { | 194 | if (call_trace == 1 && !arch_unw_user_mode(&info)) { |
| 196 | print_symbol("DWARF2 unwinder stuck at %s\n", | 195 | ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n", |
| 197 | UNW_PC(&info)); | 196 | UNW_PC(&info)); |
| 198 | if (UNW_SP(&info) >= PAGE_OFFSET) { | 197 | if (UNW_SP(&info) >= PAGE_OFFSET) { |
| 199 | printk("Leftover inexact backtrace:\n"); | 198 | ops->warning(data, "Leftover inexact backtrace:\n"); |
| 200 | stack = (void *)UNW_SP(&info); | 199 | stack = (void *)UNW_SP(&info); |
| 200 | if (!stack) | ||
| 201 | return; | ||
| 202 | ebp = UNW_FP(&info); | ||
| 201 | } else | 203 | } else |
| 202 | printk("Full inexact backtrace again:\n"); | 204 | ops->warning(data, "Full inexact backtrace again:\n"); |
| 203 | } else if (call_trace >= 1) | 205 | } else if (call_trace >= 1) |
| 204 | return; | 206 | return; |
| 205 | else | 207 | else |
| 206 | printk("Full inexact backtrace again:\n"); | 208 | ops->warning(data, "Full inexact backtrace again:\n"); |
| 207 | } else | 209 | } else |
| 208 | printk("Inexact backtrace:\n"); | 210 | ops->warning(data, "Inexact backtrace:\n"); |
| 211 | } | ||
| 212 | if (!stack) { | ||
| 213 | unsigned long dummy; | ||
| 214 | stack = &dummy; | ||
| 215 | if (task && task != current) | ||
| 216 | stack = (unsigned long *)task->thread.esp; | ||
| 209 | } | 217 | } |
| 210 | 218 | ||
| 211 | if (task == current) { | 219 | #ifdef CONFIG_FRAME_POINTER |
| 212 | /* Grab ebp right from our regs */ | 220 | if (!ebp) { |
| 213 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); | 221 | if (task == current) { |
| 214 | } else { | 222 | /* Grab ebp right from our regs */ |
| 215 | /* ebp is the last reg pushed by switch_to */ | 223 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); |
| 216 | ebp = *(unsigned long *) task->thread.esp; | 224 | } else { |
| 225 | /* ebp is the last reg pushed by switch_to */ | ||
| 226 | ebp = *(unsigned long *) task->thread.esp; | ||
| 227 | } | ||
| 217 | } | 228 | } |
| 229 | #endif | ||
| 218 | 230 | ||
| 219 | while (1) { | 231 | while (1) { |
| 220 | struct thread_info *context; | 232 | struct thread_info *context; |
| 221 | context = (struct thread_info *) | 233 | context = (struct thread_info *) |
| 222 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 234 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); |
| 223 | ebp = print_context_stack(context, stack, ebp, log_lvl); | 235 | ebp = print_context_stack(context, stack, ebp, ops, data); |
| 236 | /* Should be after the line below, but somewhere | ||
| 237 | in early boot context comes out corrupted and we | ||
| 238 | can't reference it -AK */ | ||
| 239 | if (ops->stack(data, "IRQ") < 0) | ||
| 240 | break; | ||
| 224 | stack = (unsigned long*)context->previous_esp; | 241 | stack = (unsigned long*)context->previous_esp; |
| 225 | if (!stack) | 242 | if (!stack) |
| 226 | break; | 243 | break; |
| 227 | printk("%s =======================\n", log_lvl); | ||
| 228 | } | 244 | } |
| 229 | } | 245 | } |
| 246 | EXPORT_SYMBOL(dump_trace); | ||
| 247 | |||
| 248 | static void | ||
| 249 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
| 250 | { | ||
| 251 | printk(data); | ||
| 252 | print_symbol(msg, symbol); | ||
| 253 | printk("\n"); | ||
| 254 | } | ||
| 255 | |||
| 256 | static void print_trace_warning(void *data, char *msg) | ||
| 257 | { | ||
| 258 | printk("%s%s\n", (char *)data, msg); | ||
| 259 | } | ||
| 260 | |||
| 261 | static int print_trace_stack(void *data, char *name) | ||
| 262 | { | ||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 266 | /* | ||
| 267 | * Print one address/symbol entries per line. | ||
| 268 | */ | ||
| 269 | static void print_trace_address(void *data, unsigned long addr) | ||
| 270 | { | ||
| 271 | printk("%s [<%08lx>] ", (char *)data, addr); | ||
| 272 | print_symbol("%s\n", addr); | ||
| 273 | } | ||
| 274 | |||
| 275 | static struct stacktrace_ops print_trace_ops = { | ||
| 276 | .warning = print_trace_warning, | ||
| 277 | .warning_symbol = print_trace_warning_symbol, | ||
| 278 | .stack = print_trace_stack, | ||
| 279 | .address = print_trace_address, | ||
| 280 | }; | ||
| 281 | |||
| 282 | static void | ||
| 283 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | ||
| 284 | unsigned long * stack, char *log_lvl) | ||
| 285 | { | ||
| 286 | dump_trace(task, regs, stack, &print_trace_ops, log_lvl); | ||
| 287 | printk("%s =======================\n", log_lvl); | ||
| 288 | } | ||
| 230 | 289 | ||
| 231 | void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack) | 290 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
| 291 | unsigned long * stack) | ||
| 232 | { | 292 | { |
| 233 | show_trace_log_lvl(task, regs, stack, ""); | 293 | show_trace_log_lvl(task, regs, stack, ""); |
| 234 | } | 294 | } |
| @@ -291,8 +351,9 @@ void show_registers(struct pt_regs *regs) | |||
| 291 | ss = regs->xss & 0xffff; | 351 | ss = regs->xss & 0xffff; |
| 292 | } | 352 | } |
| 293 | print_modules(); | 353 | print_modules(); |
| 294 | printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n" | 354 | printk(KERN_EMERG "CPU: %d\n" |
| 295 | "EFLAGS: %08lx (%s %.*s) \n", | 355 | KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n" |
| 356 | KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n", | ||
| 296 | smp_processor_id(), 0xffff & regs->xcs, regs->eip, | 357 | smp_processor_id(), 0xffff & regs->xcs, regs->eip, |
| 297 | print_tainted(), regs->eflags, system_utsname.release, | 358 | print_tainted(), regs->eflags, system_utsname.release, |
| 298 | (int)strcspn(system_utsname.version, " "), | 359 | (int)strcspn(system_utsname.version, " "), |
| @@ -313,6 +374,8 @@ void show_registers(struct pt_regs *regs) | |||
| 313 | */ | 374 | */ |
| 314 | if (in_kernel) { | 375 | if (in_kernel) { |
| 315 | u8 __user *eip; | 376 | u8 __user *eip; |
| 377 | int code_bytes = 64; | ||
| 378 | unsigned char c; | ||
| 316 | 379 | ||
| 317 | printk("\n" KERN_EMERG "Stack: "); | 380 | printk("\n" KERN_EMERG "Stack: "); |
| 318 | show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); | 381 | show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); |
| @@ -320,9 +383,12 @@ void show_registers(struct pt_regs *regs) | |||
| 320 | printk(KERN_EMERG "Code: "); | 383 | printk(KERN_EMERG "Code: "); |
| 321 | 384 | ||
| 322 | eip = (u8 __user *)regs->eip - 43; | 385 | eip = (u8 __user *)regs->eip - 43; |
| 323 | for (i = 0; i < 64; i++, eip++) { | 386 | if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) { |
| 324 | unsigned char c; | 387 | /* try starting at EIP */ |
| 325 | 388 | eip = (u8 __user *)regs->eip; | |
| 389 | code_bytes = 32; | ||
| 390 | } | ||
| 391 | for (i = 0; i < code_bytes; i++, eip++) { | ||
| 326 | if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) { | 392 | if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) { |
| 327 | printk(" Bad EIP value."); | 393 | printk(" Bad EIP value."); |
| 328 | break; | 394 | break; |
| @@ -343,7 +409,7 @@ static void handle_BUG(struct pt_regs *regs) | |||
| 343 | 409 | ||
| 344 | if (eip < PAGE_OFFSET) | 410 | if (eip < PAGE_OFFSET) |
| 345 | return; | 411 | return; |
| 346 | if (__get_user(ud2, (unsigned short __user *)eip)) | 412 | if (probe_kernel_address((unsigned short __user *)eip, ud2)) |
| 347 | return; | 413 | return; |
| 348 | if (ud2 != 0x0b0f) | 414 | if (ud2 != 0x0b0f) |
| 349 | return; | 415 | return; |
| @@ -356,7 +422,8 @@ static void handle_BUG(struct pt_regs *regs) | |||
| 356 | char *file; | 422 | char *file; |
| 357 | char c; | 423 | char c; |
| 358 | 424 | ||
| 359 | if (__get_user(line, (unsigned short __user *)(eip + 2))) | 425 | if (probe_kernel_address((unsigned short __user *)(eip + 2), |
| 426 | line)) | ||
| 360 | break; | 427 | break; |
| 361 | if (__get_user(file, (char * __user *)(eip + 4)) || | 428 | if (__get_user(file, (char * __user *)(eip + 4)) || |
| 362 | (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) | 429 | (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) |
| @@ -629,18 +696,24 @@ gp_in_kernel: | |||
| 629 | } | 696 | } |
| 630 | } | 697 | } |
| 631 | 698 | ||
| 632 | static void mem_parity_error(unsigned char reason, struct pt_regs * regs) | 699 | static __kprobes void |
| 700 | mem_parity_error(unsigned char reason, struct pt_regs * regs) | ||
| 633 | { | 701 | { |
| 634 | printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying " | 702 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on " |
| 635 | "to continue\n"); | 703 | "CPU %d.\n", reason, smp_processor_id()); |
| 636 | printk(KERN_EMERG "You probably have a hardware problem with your RAM " | 704 | printk(KERN_EMERG "You probably have a hardware problem with your RAM " |
| 637 | "chips\n"); | 705 | "chips\n"); |
| 706 | if (panic_on_unrecovered_nmi) | ||
| 707 | panic("NMI: Not continuing"); | ||
| 708 | |||
| 709 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
| 638 | 710 | ||
| 639 | /* Clear and disable the memory parity error line. */ | 711 | /* Clear and disable the memory parity error line. */ |
| 640 | clear_mem_error(reason); | 712 | clear_mem_error(reason); |
| 641 | } | 713 | } |
| 642 | 714 | ||
| 643 | static void io_check_error(unsigned char reason, struct pt_regs * regs) | 715 | static __kprobes void |
| 716 | io_check_error(unsigned char reason, struct pt_regs * regs) | ||
| 644 | { | 717 | { |
| 645 | unsigned long i; | 718 | unsigned long i; |
| 646 | 719 | ||
| @@ -656,7 +729,8 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs) | |||
| 656 | outb(reason, 0x61); | 729 | outb(reason, 0x61); |
| 657 | } | 730 | } |
| 658 | 731 | ||
| 659 | static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | 732 | static __kprobes void |
| 733 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | ||
| 660 | { | 734 | { |
| 661 | #ifdef CONFIG_MCA | 735 | #ifdef CONFIG_MCA |
| 662 | /* Might actually be able to figure out what the guilty party | 736 | /* Might actually be able to figure out what the guilty party |
| @@ -666,15 +740,18 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | |||
| 666 | return; | 740 | return; |
| 667 | } | 741 | } |
| 668 | #endif | 742 | #endif |
| 669 | printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | 743 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on " |
| 670 | reason, smp_processor_id()); | 744 | "CPU %d.\n", reason, smp_processor_id()); |
| 671 | printk("Dazed and confused, but trying to continue\n"); | 745 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); |
| 672 | printk("Do you have a strange power saving mode enabled?\n"); | 746 | if (panic_on_unrecovered_nmi) |
| 747 | panic("NMI: Not continuing"); | ||
| 748 | |||
| 749 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
| 673 | } | 750 | } |
| 674 | 751 | ||
| 675 | static DEFINE_SPINLOCK(nmi_print_lock); | 752 | static DEFINE_SPINLOCK(nmi_print_lock); |
| 676 | 753 | ||
| 677 | void die_nmi (struct pt_regs *regs, const char *msg) | 754 | void __kprobes die_nmi(struct pt_regs *regs, const char *msg) |
| 678 | { | 755 | { |
| 679 | if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == | 756 | if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == |
| 680 | NOTIFY_STOP) | 757 | NOTIFY_STOP) |
| @@ -706,7 +783,7 @@ void die_nmi (struct pt_regs *regs, const char *msg) | |||
| 706 | do_exit(SIGSEGV); | 783 | do_exit(SIGSEGV); |
| 707 | } | 784 | } |
| 708 | 785 | ||
| 709 | static void default_do_nmi(struct pt_regs * regs) | 786 | static __kprobes void default_do_nmi(struct pt_regs * regs) |
| 710 | { | 787 | { |
| 711 | unsigned char reason = 0; | 788 | unsigned char reason = 0; |
| 712 | 789 | ||
| @@ -723,12 +800,12 @@ static void default_do_nmi(struct pt_regs * regs) | |||
| 723 | * Ok, so this is none of the documented NMI sources, | 800 | * Ok, so this is none of the documented NMI sources, |
| 724 | * so it must be the NMI watchdog. | 801 | * so it must be the NMI watchdog. |
| 725 | */ | 802 | */ |
| 726 | if (nmi_watchdog) { | 803 | if (nmi_watchdog_tick(regs, reason)) |
| 727 | nmi_watchdog_tick(regs); | ||
| 728 | return; | 804 | return; |
| 729 | } | 805 | if (!do_nmi_callback(regs, smp_processor_id())) |
| 730 | #endif | 806 | #endif |
| 731 | unknown_nmi_error(reason, regs); | 807 | unknown_nmi_error(reason, regs); |
| 808 | |||
| 732 | return; | 809 | return; |
| 733 | } | 810 | } |
| 734 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 811 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
| @@ -744,14 +821,7 @@ static void default_do_nmi(struct pt_regs * regs) | |||
| 744 | reassert_nmi(); | 821 | reassert_nmi(); |
| 745 | } | 822 | } |
| 746 | 823 | ||
| 747 | static int dummy_nmi_callback(struct pt_regs * regs, int cpu) | 824 | fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code) |
| 748 | { | ||
| 749 | return 0; | ||
| 750 | } | ||
| 751 | |||
| 752 | static nmi_callback_t nmi_callback = dummy_nmi_callback; | ||
| 753 | |||
| 754 | fastcall void do_nmi(struct pt_regs * regs, long error_code) | ||
| 755 | { | 825 | { |
| 756 | int cpu; | 826 | int cpu; |
| 757 | 827 | ||
| @@ -761,25 +831,11 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code) | |||
| 761 | 831 | ||
| 762 | ++nmi_count(cpu); | 832 | ++nmi_count(cpu); |
| 763 | 833 | ||
| 764 | if (!rcu_dereference(nmi_callback)(regs, cpu)) | 834 | default_do_nmi(regs); |
| 765 | default_do_nmi(regs); | ||
| 766 | 835 | ||
| 767 | nmi_exit(); | 836 | nmi_exit(); |
| 768 | } | 837 | } |
| 769 | 838 | ||
| 770 | void set_nmi_callback(nmi_callback_t callback) | ||
| 771 | { | ||
| 772 | vmalloc_sync_all(); | ||
| 773 | rcu_assign_pointer(nmi_callback, callback); | ||
| 774 | } | ||
| 775 | EXPORT_SYMBOL_GPL(set_nmi_callback); | ||
| 776 | |||
| 777 | void unset_nmi_callback(void) | ||
| 778 | { | ||
| 779 | nmi_callback = dummy_nmi_callback; | ||
| 780 | } | ||
| 781 | EXPORT_SYMBOL_GPL(unset_nmi_callback); | ||
| 782 | |||
| 783 | #ifdef CONFIG_KPROBES | 839 | #ifdef CONFIG_KPROBES |
| 784 | fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) | 840 | fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) |
| 785 | { | 841 | { |
| @@ -1119,20 +1175,6 @@ void __init trap_init_f00f_bug(void) | |||
| 1119 | } | 1175 | } |
| 1120 | #endif | 1176 | #endif |
| 1121 | 1177 | ||
| 1122 | #define _set_gate(gate_addr,type,dpl,addr,seg) \ | ||
| 1123 | do { \ | ||
| 1124 | int __d0, __d1; \ | ||
| 1125 | __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ | ||
| 1126 | "movw %4,%%dx\n\t" \ | ||
| 1127 | "movl %%eax,%0\n\t" \ | ||
| 1128 | "movl %%edx,%1" \ | ||
| 1129 | :"=m" (*((long *) (gate_addr))), \ | ||
| 1130 | "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ | ||
| 1131 | :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ | ||
| 1132 | "3" ((char *) (addr)),"2" ((seg) << 16)); \ | ||
| 1133 | } while (0) | ||
| 1134 | |||
| 1135 | |||
| 1136 | /* | 1178 | /* |
| 1137 | * This needs to use 'idt_table' rather than 'idt', and | 1179 | * This needs to use 'idt_table' rather than 'idt', and |
| 1138 | * thus use the _nonmapped_ version of the IDT, as the | 1180 | * thus use the _nonmapped_ version of the IDT, as the |
| @@ -1141,7 +1183,7 @@ do { \ | |||
| 1141 | */ | 1183 | */ |
| 1142 | void set_intr_gate(unsigned int n, void *addr) | 1184 | void set_intr_gate(unsigned int n, void *addr) |
| 1143 | { | 1185 | { |
| 1144 | _set_gate(idt_table+n,14,0,addr,__KERNEL_CS); | 1186 | _set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS); |
| 1145 | } | 1187 | } |
| 1146 | 1188 | ||
| 1147 | /* | 1189 | /* |
| @@ -1149,22 +1191,22 @@ void set_intr_gate(unsigned int n, void *addr) | |||
| 1149 | */ | 1191 | */ |
| 1150 | static inline void set_system_intr_gate(unsigned int n, void *addr) | 1192 | static inline void set_system_intr_gate(unsigned int n, void *addr) |
| 1151 | { | 1193 | { |
| 1152 | _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS); | 1194 | _set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, addr, __KERNEL_CS); |
| 1153 | } | 1195 | } |
| 1154 | 1196 | ||
| 1155 | static void __init set_trap_gate(unsigned int n, void *addr) | 1197 | static void __init set_trap_gate(unsigned int n, void *addr) |
| 1156 | { | 1198 | { |
| 1157 | _set_gate(idt_table+n,15,0,addr,__KERNEL_CS); | 1199 | _set_gate(n, DESCTYPE_TRAP, addr, __KERNEL_CS); |
| 1158 | } | 1200 | } |
| 1159 | 1201 | ||
| 1160 | static void __init set_system_gate(unsigned int n, void *addr) | 1202 | static void __init set_system_gate(unsigned int n, void *addr) |
| 1161 | { | 1203 | { |
| 1162 | _set_gate(idt_table+n,15,3,addr,__KERNEL_CS); | 1204 | _set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS); |
| 1163 | } | 1205 | } |
| 1164 | 1206 | ||
| 1165 | static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) | 1207 | static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) |
| 1166 | { | 1208 | { |
| 1167 | _set_gate(idt_table+n,5,0,0,(gdt_entry<<3)); | 1209 | _set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3)); |
| 1168 | } | 1210 | } |
| 1169 | 1211 | ||
| 1170 | 1212 | ||
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 7e0d8dab2075..b8fa0a8b2e47 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
| @@ -192,7 +192,7 @@ int recalibrate_cpu_khz(void) | |||
| 192 | 192 | ||
| 193 | EXPORT_SYMBOL(recalibrate_cpu_khz); | 193 | EXPORT_SYMBOL(recalibrate_cpu_khz); |
| 194 | 194 | ||
| 195 | void tsc_init(void) | 195 | void __init tsc_init(void) |
| 196 | { | 196 | { |
| 197 | if (!cpu_has_tsc || tsc_disable) | 197 | if (!cpu_has_tsc || tsc_disable) |
| 198 | return; | 198 | return; |
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 2d4f1386e2b1..1e7ac1c44ddc 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S | |||
| @@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") | |||
| 13 | OUTPUT_ARCH(i386) | 13 | OUTPUT_ARCH(i386) |
| 14 | ENTRY(phys_startup_32) | 14 | ENTRY(phys_startup_32) |
| 15 | jiffies = jiffies_64; | 15 | jiffies = jiffies_64; |
| 16 | |||
| 17 | PHDRS { | ||
| 18 | text PT_LOAD FLAGS(5); /* R_E */ | ||
| 19 | data PT_LOAD FLAGS(7); /* RWE */ | ||
| 20 | note PT_NOTE FLAGS(4); /* R__ */ | ||
| 21 | } | ||
| 16 | SECTIONS | 22 | SECTIONS |
| 17 | { | 23 | { |
| 18 | . = __KERNEL_START; | 24 | . = __KERNEL_START; |
| @@ -26,7 +32,7 @@ SECTIONS | |||
| 26 | KPROBES_TEXT | 32 | KPROBES_TEXT |
| 27 | *(.fixup) | 33 | *(.fixup) |
| 28 | *(.gnu.warning) | 34 | *(.gnu.warning) |
| 29 | } = 0x9090 | 35 | } :text = 0x9090 |
| 30 | 36 | ||
| 31 | _etext = .; /* End of text section */ | 37 | _etext = .; /* End of text section */ |
| 32 | 38 | ||
| @@ -48,7 +54,7 @@ SECTIONS | |||
| 48 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ | 54 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ |
| 49 | *(.data) | 55 | *(.data) |
| 50 | CONSTRUCTORS | 56 | CONSTRUCTORS |
| 51 | } | 57 | } :data |
| 52 | 58 | ||
| 53 | . = ALIGN(4096); | 59 | . = ALIGN(4096); |
| 54 | __nosave_begin = .; | 60 | __nosave_begin = .; |
| @@ -184,4 +190,6 @@ SECTIONS | |||
| 184 | STABS_DEBUG | 190 | STABS_DEBUG |
| 185 | 191 | ||
| 186 | DWARF_DEBUG | 192 | DWARF_DEBUG |
| 193 | |||
| 194 | NOTES | ||
| 187 | } | 195 | } |
diff --git a/arch/i386/lib/Makefile b/arch/i386/lib/Makefile index 914933e9ec3d..d86a548b8d54 100644 --- a/arch/i386/lib/Makefile +++ b/arch/i386/lib/Makefile | |||
| @@ -4,6 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | 5 | ||
| 6 | lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \ | 6 | lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \ |
| 7 | bitops.o | 7 | bitops.o semaphore.o |
| 8 | 8 | ||
| 9 | lib-$(CONFIG_X86_USE_3DNOW) += mmx.o | 9 | lib-$(CONFIG_X86_USE_3DNOW) += mmx.o |
diff --git a/arch/i386/lib/semaphore.S b/arch/i386/lib/semaphore.S new file mode 100644 index 000000000000..01f80b5c45d2 --- /dev/null +++ b/arch/i386/lib/semaphore.S | |||
| @@ -0,0 +1,217 @@ | |||
| 1 | /* | ||
| 2 | * i386 semaphore implementation. | ||
| 3 | * | ||
| 4 | * (C) Copyright 1999 Linus Torvalds | ||
| 5 | * | ||
| 6 | * Portions Copyright 1999 Red Hat, Inc. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version | ||
| 11 | * 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/config.h> | ||
| 17 | #include <linux/linkage.h> | ||
| 18 | #include <asm/rwlock.h> | ||
| 19 | #include <asm/alternative-asm.i> | ||
| 20 | #include <asm/frame.i> | ||
| 21 | #include <asm/dwarf2.h> | ||
| 22 | |||
| 23 | /* | ||
| 24 | * The semaphore operations have a special calling sequence that | ||
| 25 | * allow us to do a simpler in-line version of them. These routines | ||
| 26 | * need to convert that sequence back into the C sequence when | ||
| 27 | * there is contention on the semaphore. | ||
| 28 | * | ||
| 29 | * %eax contains the semaphore pointer on entry. Save the C-clobbered | ||
| 30 | * registers (%eax, %edx and %ecx) except %eax whish is either a return | ||
| 31 | * value or just clobbered.. | ||
| 32 | */ | ||
| 33 | .section .sched.text | ||
| 34 | ENTRY(__down_failed) | ||
| 35 | CFI_STARTPROC | ||
| 36 | FRAME | ||
| 37 | pushl %edx | ||
| 38 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 39 | CFI_REL_OFFSET edx,0 | ||
| 40 | pushl %ecx | ||
| 41 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 42 | CFI_REL_OFFSET ecx,0 | ||
| 43 | call __down | ||
| 44 | popl %ecx | ||
| 45 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 46 | CFI_RESTORE ecx | ||
| 47 | popl %edx | ||
| 48 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 49 | CFI_RESTORE edx | ||
| 50 | ENDFRAME | ||
| 51 | ret | ||
| 52 | CFI_ENDPROC | ||
| 53 | END(__down_failed) | ||
| 54 | |||
| 55 | ENTRY(__down_failed_interruptible) | ||
| 56 | CFI_STARTPROC | ||
| 57 | FRAME | ||
| 58 | pushl %edx | ||
| 59 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 60 | CFI_REL_OFFSET edx,0 | ||
| 61 | pushl %ecx | ||
| 62 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 63 | CFI_REL_OFFSET ecx,0 | ||
| 64 | call __down_interruptible | ||
| 65 | popl %ecx | ||
| 66 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 67 | CFI_RESTORE ecx | ||
| 68 | popl %edx | ||
| 69 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 70 | CFI_RESTORE edx | ||
| 71 | ENDFRAME | ||
| 72 | ret | ||
| 73 | CFI_ENDPROC | ||
| 74 | END(__down_failed_interruptible) | ||
| 75 | |||
| 76 | ENTRY(__down_failed_trylock) | ||
| 77 | CFI_STARTPROC | ||
| 78 | FRAME | ||
| 79 | pushl %edx | ||
| 80 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 81 | CFI_REL_OFFSET edx,0 | ||
| 82 | pushl %ecx | ||
| 83 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 84 | CFI_REL_OFFSET ecx,0 | ||
| 85 | call __down_trylock | ||
| 86 | popl %ecx | ||
| 87 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 88 | CFI_RESTORE ecx | ||
| 89 | popl %edx | ||
| 90 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 91 | CFI_RESTORE edx | ||
| 92 | ENDFRAME | ||
| 93 | ret | ||
| 94 | CFI_ENDPROC | ||
| 95 | END(__down_failed_trylock) | ||
| 96 | |||
| 97 | ENTRY(__up_wakeup) | ||
| 98 | CFI_STARTPROC | ||
| 99 | FRAME | ||
| 100 | pushl %edx | ||
| 101 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 102 | CFI_REL_OFFSET edx,0 | ||
| 103 | pushl %ecx | ||
| 104 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 105 | CFI_REL_OFFSET ecx,0 | ||
| 106 | call __up | ||
| 107 | popl %ecx | ||
| 108 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 109 | CFI_RESTORE ecx | ||
| 110 | popl %edx | ||
| 111 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 112 | CFI_RESTORE edx | ||
| 113 | ENDFRAME | ||
| 114 | ret | ||
| 115 | CFI_ENDPROC | ||
| 116 | END(__up_wakeup) | ||
| 117 | |||
| 118 | /* | ||
| 119 | * rw spinlock fallbacks | ||
| 120 | */ | ||
| 121 | #ifdef CONFIG_SMP | ||
| 122 | ENTRY(__write_lock_failed) | ||
| 123 | CFI_STARTPROC simple | ||
| 124 | FRAME | ||
| 125 | 2: LOCK_PREFIX | ||
| 126 | addl $ RW_LOCK_BIAS,(%eax) | ||
| 127 | 1: rep; nop | ||
| 128 | cmpl $ RW_LOCK_BIAS,(%eax) | ||
| 129 | jne 1b | ||
| 130 | LOCK_PREFIX | ||
| 131 | subl $ RW_LOCK_BIAS,(%eax) | ||
| 132 | jnz 2b | ||
| 133 | ENDFRAME | ||
| 134 | ret | ||
| 135 | CFI_ENDPROC | ||
| 136 | END(__write_lock_failed) | ||
| 137 | |||
| 138 | ENTRY(__read_lock_failed) | ||
| 139 | CFI_STARTPROC | ||
| 140 | FRAME | ||
| 141 | 2: LOCK_PREFIX | ||
| 142 | incl (%eax) | ||
| 143 | 1: rep; nop | ||
| 144 | cmpl $1,(%eax) | ||
| 145 | js 1b | ||
| 146 | LOCK_PREFIX | ||
| 147 | decl (%eax) | ||
| 148 | js 2b | ||
| 149 | ENDFRAME | ||
| 150 | ret | ||
| 151 | CFI_ENDPROC | ||
| 152 | END(__read_lock_failed) | ||
| 153 | |||
| 154 | #endif | ||
| 155 | |||
| 156 | /* Fix up special calling conventions */ | ||
| 157 | ENTRY(call_rwsem_down_read_failed) | ||
| 158 | CFI_STARTPROC | ||
| 159 | push %ecx | ||
| 160 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 161 | CFI_REL_OFFSET ecx,0 | ||
| 162 | push %edx | ||
| 163 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 164 | CFI_REL_OFFSET edx,0 | ||
| 165 | call rwsem_down_read_failed | ||
| 166 | pop %edx | ||
| 167 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 168 | pop %ecx | ||
| 169 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 170 | ret | ||
| 171 | CFI_ENDPROC | ||
| 172 | END(call_rwsem_down_read_failed) | ||
| 173 | |||
| 174 | ENTRY(call_rwsem_down_write_failed) | ||
| 175 | CFI_STARTPROC | ||
| 176 | push %ecx | ||
| 177 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 178 | CFI_REL_OFFSET ecx,0 | ||
| 179 | calll rwsem_down_write_failed | ||
| 180 | pop %ecx | ||
| 181 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 182 | ret | ||
| 183 | CFI_ENDPROC | ||
| 184 | END(call_rwsem_down_write_failed) | ||
| 185 | |||
| 186 | ENTRY(call_rwsem_wake) | ||
| 187 | CFI_STARTPROC | ||
| 188 | decw %dx /* do nothing if still outstanding active readers */ | ||
| 189 | jnz 1f | ||
| 190 | push %ecx | ||
| 191 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 192 | CFI_REL_OFFSET ecx,0 | ||
| 193 | call rwsem_wake | ||
| 194 | pop %ecx | ||
| 195 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 196 | 1: ret | ||
| 197 | CFI_ENDPROC | ||
| 198 | END(call_rwsem_wake) | ||
| 199 | |||
| 200 | /* Fix up special calling conventions */ | ||
| 201 | ENTRY(call_rwsem_downgrade_wake) | ||
| 202 | CFI_STARTPROC | ||
| 203 | push %ecx | ||
| 204 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 205 | CFI_REL_OFFSET ecx,0 | ||
| 206 | push %edx | ||
| 207 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 208 | CFI_REL_OFFSET edx,0 | ||
| 209 | call rwsem_downgrade_wake | ||
| 210 | pop %edx | ||
| 211 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 212 | pop %ecx | ||
| 213 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 214 | ret | ||
| 215 | CFI_ENDPROC | ||
| 216 | END(call_rwsem_downgrade_wake) | ||
| 217 | |||
diff --git a/arch/i386/mach-generic/bigsmp.c b/arch/i386/mach-generic/bigsmp.c index ef7a6e6fcb9f..33d9f93557ba 100644 --- a/arch/i386/mach-generic/bigsmp.c +++ b/arch/i386/mach-generic/bigsmp.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #define APIC_DEFINITION 1 | 5 | #define APIC_DEFINITION 1 |
| 6 | #include <linux/threads.h> | 6 | #include <linux/threads.h> |
| 7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
| 8 | #include <asm/smp.h> | ||
| 8 | #include <asm/mpspec.h> | 9 | #include <asm/mpspec.h> |
| 9 | #include <asm/genapic.h> | 10 | #include <asm/genapic.h> |
| 10 | #include <asm/fixmap.h> | 11 | #include <asm/fixmap.h> |
diff --git a/arch/i386/mach-generic/es7000.c b/arch/i386/mach-generic/es7000.c index 845cdd0b3593..aa144d82334d 100644 --- a/arch/i386/mach-generic/es7000.c +++ b/arch/i386/mach-generic/es7000.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
| 5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
| 6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
| 7 | #include <asm/smp.h> | ||
| 7 | #include <asm/mpspec.h> | 8 | #include <asm/mpspec.h> |
| 8 | #include <asm/genapic.h> | 9 | #include <asm/genapic.h> |
| 9 | #include <asm/fixmap.h> | 10 | #include <asm/fixmap.h> |
diff --git a/arch/i386/mach-generic/probe.c b/arch/i386/mach-generic/probe.c index bcd1bcfaa723..94b1fd9cbe3c 100644 --- a/arch/i386/mach-generic/probe.c +++ b/arch/i386/mach-generic/probe.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/ctype.h> | 10 | #include <linux/ctype.h> |
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| 12 | #include <linux/errno.h> | ||
| 12 | #include <asm/fixmap.h> | 13 | #include <asm/fixmap.h> |
| 13 | #include <asm/mpspec.h> | 14 | #include <asm/mpspec.h> |
| 14 | #include <asm/apicdef.h> | 15 | #include <asm/apicdef.h> |
| @@ -29,7 +30,24 @@ struct genapic *apic_probe[] __initdata = { | |||
| 29 | NULL, | 30 | NULL, |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
| 32 | static int cmdline_apic; | 33 | static int cmdline_apic __initdata; |
| 34 | static int __init parse_apic(char *arg) | ||
| 35 | { | ||
| 36 | int i; | ||
| 37 | |||
| 38 | if (!arg) | ||
| 39 | return -EINVAL; | ||
| 40 | |||
| 41 | for (i = 0; apic_probe[i]; i++) { | ||
| 42 | if (!strcmp(apic_probe[i]->name, arg)) { | ||
| 43 | genapic = apic_probe[i]; | ||
| 44 | cmdline_apic = 1; | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | } | ||
| 48 | return -ENOENT; | ||
| 49 | } | ||
| 50 | early_param("apic", parse_apic); | ||
| 33 | 51 | ||
| 34 | void __init generic_bigsmp_probe(void) | 52 | void __init generic_bigsmp_probe(void) |
| 35 | { | 53 | { |
| @@ -48,40 +66,20 @@ void __init generic_bigsmp_probe(void) | |||
| 48 | } | 66 | } |
| 49 | } | 67 | } |
| 50 | 68 | ||
| 51 | void __init generic_apic_probe(char *command_line) | 69 | void __init generic_apic_probe(void) |
| 52 | { | 70 | { |
| 53 | char *s; | 71 | if (!cmdline_apic) { |
| 54 | int i; | 72 | int i; |
| 55 | int changed = 0; | 73 | for (i = 0; apic_probe[i]; i++) { |
| 56 | 74 | if (apic_probe[i]->probe()) { | |
| 57 | s = strstr(command_line, "apic="); | ||
| 58 | if (s && (s == command_line || isspace(s[-1]))) { | ||
| 59 | char *p = strchr(s, ' '), old; | ||
| 60 | if (!p) | ||
| 61 | p = strchr(s, '\0'); | ||
| 62 | old = *p; | ||
| 63 | *p = 0; | ||
| 64 | for (i = 0; !changed && apic_probe[i]; i++) { | ||
| 65 | if (!strcmp(apic_probe[i]->name, s+5)) { | ||
| 66 | changed = 1; | ||
| 67 | genapic = apic_probe[i]; | 75 | genapic = apic_probe[i]; |
| 76 | break; | ||
| 68 | } | 77 | } |
| 69 | } | 78 | } |
| 70 | if (!changed) | 79 | /* Not visible without early console */ |
| 71 | printk(KERN_ERR "Unknown genapic `%s' specified.\n", s); | 80 | if (!apic_probe[i]) |
| 72 | *p = old; | 81 | panic("Didn't find an APIC driver"); |
| 73 | cmdline_apic = changed; | ||
| 74 | } | ||
| 75 | for (i = 0; !changed && apic_probe[i]; i++) { | ||
| 76 | if (apic_probe[i]->probe()) { | ||
| 77 | changed = 1; | ||
| 78 | genapic = apic_probe[i]; | ||
| 79 | } | ||
| 80 | } | 82 | } |
| 81 | /* Not visible without early console */ | ||
| 82 | if (!changed) | ||
| 83 | panic("Didn't find an APIC driver"); | ||
| 84 | |||
| 85 | printk(KERN_INFO "Using APIC driver %s\n", genapic->name); | 83 | printk(KERN_INFO "Using APIC driver %s\n", genapic->name); |
| 86 | } | 84 | } |
| 87 | 85 | ||
| @@ -119,7 +117,9 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
| 119 | return 0; | 117 | return 0; |
| 120 | } | 118 | } |
| 121 | 119 | ||
| 120 | #ifdef CONFIG_SMP | ||
| 122 | int hard_smp_processor_id(void) | 121 | int hard_smp_processor_id(void) |
| 123 | { | 122 | { |
| 124 | return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); | 123 | return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); |
| 125 | } | 124 | } |
| 125 | #endif | ||
diff --git a/arch/i386/mach-generic/summit.c b/arch/i386/mach-generic/summit.c index b73501ddd653..f7e5d66648dc 100644 --- a/arch/i386/mach-generic/summit.c +++ b/arch/i386/mach-generic/summit.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
| 5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
| 6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
| 7 | #include <asm/smp.h> | ||
| 7 | #include <asm/mpspec.h> | 8 | #include <asm/mpspec.h> |
| 8 | #include <asm/genapic.h> | 9 | #include <asm/genapic.h> |
| 9 | #include <asm/fixmap.h> | 10 | #include <asm/fixmap.h> |
diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c index 50f6de6ff64d..f39887359e8e 100644 --- a/arch/i386/mach-voyager/voyager_thread.c +++ b/arch/i386/mach-voyager/voyager_thread.c | |||
| @@ -130,7 +130,6 @@ thread(void *unused) | |||
| 130 | init_timer(&wakeup_timer); | 130 | init_timer(&wakeup_timer); |
| 131 | 131 | ||
| 132 | sigfillset(¤t->blocked); | 132 | sigfillset(¤t->blocked); |
| 133 | current->signal->tty = NULL; | ||
| 134 | 133 | ||
| 135 | printk(KERN_NOTICE "Voyager starting monitor thread\n"); | 134 | printk(KERN_NOTICE "Voyager starting monitor thread\n"); |
| 136 | 135 | ||
diff --git a/arch/i386/mm/boot_ioremap.c b/arch/i386/mm/boot_ioremap.c index 5d44f4f5ff59..4de11f508c3a 100644 --- a/arch/i386/mm/boot_ioremap.c +++ b/arch/i386/mm/boot_ioremap.c | |||
| @@ -29,8 +29,11 @@ | |||
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | #define BOOT_PTE_PTRS (PTRS_PER_PTE*2) | 31 | #define BOOT_PTE_PTRS (PTRS_PER_PTE*2) |
| 32 | #define boot_pte_index(address) \ | 32 | |
| 33 | (((address) >> PAGE_SHIFT) & (BOOT_PTE_PTRS - 1)) | 33 | static unsigned long boot_pte_index(unsigned long vaddr) |
| 34 | { | ||
| 35 | return __pa(vaddr) >> PAGE_SHIFT; | ||
| 36 | } | ||
| 34 | 37 | ||
| 35 | static inline boot_pte_t* boot_vaddr_to_pte(void *address) | 38 | static inline boot_pte_t* boot_vaddr_to_pte(void *address) |
| 36 | { | 39 | { |
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index 7c392dc553b8..51e3739dd227 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c | |||
| @@ -117,7 +117,8 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | |||
| 117 | 117 | ||
| 118 | void *node_remap_end_vaddr[MAX_NUMNODES]; | 118 | void *node_remap_end_vaddr[MAX_NUMNODES]; |
| 119 | void *node_remap_alloc_vaddr[MAX_NUMNODES]; | 119 | void *node_remap_alloc_vaddr[MAX_NUMNODES]; |
| 120 | 120 | static unsigned long kva_start_pfn; | |
| 121 | static unsigned long kva_pages; | ||
| 121 | /* | 122 | /* |
| 122 | * FLAT - support for basic PC memory model with discontig enabled, essentially | 123 | * FLAT - support for basic PC memory model with discontig enabled, essentially |
| 123 | * a single node with all available processors in it with a flat | 124 | * a single node with all available processors in it with a flat |
| @@ -156,21 +157,6 @@ static void __init find_max_pfn_node(int nid) | |||
| 156 | BUG(); | 157 | BUG(); |
| 157 | } | 158 | } |
| 158 | 159 | ||
| 159 | /* Find the owning node for a pfn. */ | ||
| 160 | int early_pfn_to_nid(unsigned long pfn) | ||
| 161 | { | ||
| 162 | int nid; | ||
| 163 | |||
| 164 | for_each_node(nid) { | ||
| 165 | if (node_end_pfn[nid] == 0) | ||
| 166 | break; | ||
| 167 | if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn) | ||
| 168 | return nid; | ||
| 169 | } | ||
| 170 | |||
| 171 | return 0; | ||
| 172 | } | ||
| 173 | |||
| 174 | /* | 160 | /* |
| 175 | * Allocate memory for the pg_data_t for this node via a crude pre-bootmem | 161 | * Allocate memory for the pg_data_t for this node via a crude pre-bootmem |
| 176 | * method. For node zero take this from the bottom of memory, for | 162 | * method. For node zero take this from the bottom of memory, for |
| @@ -226,6 +212,8 @@ static unsigned long calculate_numa_remap_pages(void) | |||
| 226 | unsigned long pfn; | 212 | unsigned long pfn; |
| 227 | 213 | ||
| 228 | for_each_online_node(nid) { | 214 | for_each_online_node(nid) { |
| 215 | unsigned old_end_pfn = node_end_pfn[nid]; | ||
| 216 | |||
| 229 | /* | 217 | /* |
| 230 | * The acpi/srat node info can show hot-add memroy zones | 218 | * The acpi/srat node info can show hot-add memroy zones |
| 231 | * where memory could be added but not currently present. | 219 | * where memory could be added but not currently present. |
| @@ -275,6 +263,7 @@ static unsigned long calculate_numa_remap_pages(void) | |||
| 275 | 263 | ||
| 276 | node_end_pfn[nid] -= size; | 264 | node_end_pfn[nid] -= size; |
| 277 | node_remap_start_pfn[nid] = node_end_pfn[nid]; | 265 | node_remap_start_pfn[nid] = node_end_pfn[nid]; |
| 266 | shrink_active_range(nid, old_end_pfn, node_end_pfn[nid]); | ||
| 278 | } | 267 | } |
| 279 | printk("Reserving total of %ld pages for numa KVA remap\n", | 268 | printk("Reserving total of %ld pages for numa KVA remap\n", |
| 280 | reserve_pages); | 269 | reserve_pages); |
| @@ -286,7 +275,6 @@ unsigned long __init setup_memory(void) | |||
| 286 | { | 275 | { |
| 287 | int nid; | 276 | int nid; |
| 288 | unsigned long system_start_pfn, system_max_low_pfn; | 277 | unsigned long system_start_pfn, system_max_low_pfn; |
| 289 | unsigned long reserve_pages; | ||
| 290 | 278 | ||
| 291 | /* | 279 | /* |
| 292 | * When mapping a NUMA machine we allocate the node_mem_map arrays | 280 | * When mapping a NUMA machine we allocate the node_mem_map arrays |
| @@ -298,14 +286,23 @@ unsigned long __init setup_memory(void) | |||
| 298 | find_max_pfn(); | 286 | find_max_pfn(); |
| 299 | get_memcfg_numa(); | 287 | get_memcfg_numa(); |
| 300 | 288 | ||
| 301 | reserve_pages = calculate_numa_remap_pages(); | 289 | kva_pages = calculate_numa_remap_pages(); |
| 302 | 290 | ||
| 303 | /* partially used pages are not usable - thus round upwards */ | 291 | /* partially used pages are not usable - thus round upwards */ |
| 304 | system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); | 292 | system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); |
| 305 | 293 | ||
| 306 | system_max_low_pfn = max_low_pfn = find_max_low_pfn() - reserve_pages; | 294 | kva_start_pfn = find_max_low_pfn() - kva_pages; |
| 307 | printk("reserve_pages = %ld find_max_low_pfn() ~ %ld\n", | 295 | |
| 308 | reserve_pages, max_low_pfn + reserve_pages); | 296 | #ifdef CONFIG_BLK_DEV_INITRD |
| 297 | /* Numa kva area is below the initrd */ | ||
| 298 | if (LOADER_TYPE && INITRD_START) | ||
| 299 | kva_start_pfn = PFN_DOWN(INITRD_START) - kva_pages; | ||
| 300 | #endif | ||
| 301 | kva_start_pfn -= kva_start_pfn & (PTRS_PER_PTE-1); | ||
| 302 | |||
| 303 | system_max_low_pfn = max_low_pfn = find_max_low_pfn(); | ||
| 304 | printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n", | ||
| 305 | kva_start_pfn, max_low_pfn); | ||
| 309 | printk("max_pfn = %ld\n", max_pfn); | 306 | printk("max_pfn = %ld\n", max_pfn); |
| 310 | #ifdef CONFIG_HIGHMEM | 307 | #ifdef CONFIG_HIGHMEM |
| 311 | highstart_pfn = highend_pfn = max_pfn; | 308 | highstart_pfn = highend_pfn = max_pfn; |
| @@ -313,6 +310,11 @@ unsigned long __init setup_memory(void) | |||
| 313 | highstart_pfn = system_max_low_pfn; | 310 | highstart_pfn = system_max_low_pfn; |
| 314 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 311 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
| 315 | pages_to_mb(highend_pfn - highstart_pfn)); | 312 | pages_to_mb(highend_pfn - highstart_pfn)); |
| 313 | num_physpages = highend_pfn; | ||
| 314 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | ||
| 315 | #else | ||
| 316 | num_physpages = system_max_low_pfn; | ||
| 317 | high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1; | ||
| 316 | #endif | 318 | #endif |
| 317 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | 319 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
| 318 | pages_to_mb(system_max_low_pfn)); | 320 | pages_to_mb(system_max_low_pfn)); |
| @@ -323,7 +325,7 @@ unsigned long __init setup_memory(void) | |||
| 323 | (ulong) pfn_to_kaddr(max_low_pfn)); | 325 | (ulong) pfn_to_kaddr(max_low_pfn)); |
| 324 | for_each_online_node(nid) { | 326 | for_each_online_node(nid) { |
| 325 | node_remap_start_vaddr[nid] = pfn_to_kaddr( | 327 | node_remap_start_vaddr[nid] = pfn_to_kaddr( |
| 326 | highstart_pfn + node_remap_offset[nid]); | 328 | kva_start_pfn + node_remap_offset[nid]); |
| 327 | /* Init the node remap allocator */ | 329 | /* Init the node remap allocator */ |
| 328 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + | 330 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + |
| 329 | (node_remap_size[nid] * PAGE_SIZE); | 331 | (node_remap_size[nid] * PAGE_SIZE); |
| @@ -338,7 +340,6 @@ unsigned long __init setup_memory(void) | |||
| 338 | } | 340 | } |
| 339 | printk("High memory starts at vaddr %08lx\n", | 341 | printk("High memory starts at vaddr %08lx\n", |
| 340 | (ulong) pfn_to_kaddr(highstart_pfn)); | 342 | (ulong) pfn_to_kaddr(highstart_pfn)); |
| 341 | vmalloc_earlyreserve = reserve_pages * PAGE_SIZE; | ||
| 342 | for_each_online_node(nid) | 343 | for_each_online_node(nid) |
| 343 | find_max_pfn_node(nid); | 344 | find_max_pfn_node(nid); |
| 344 | 345 | ||
| @@ -348,48 +349,30 @@ unsigned long __init setup_memory(void) | |||
| 348 | return max_low_pfn; | 349 | return max_low_pfn; |
| 349 | } | 350 | } |
| 350 | 351 | ||
| 352 | void __init numa_kva_reserve(void) | ||
| 353 | { | ||
| 354 | reserve_bootmem(PFN_PHYS(kva_start_pfn),PFN_PHYS(kva_pages)); | ||
| 355 | } | ||
| 356 | |||
| 351 | void __init zone_sizes_init(void) | 357 | void __init zone_sizes_init(void) |
| 352 | { | 358 | { |
| 353 | int nid; | 359 | int nid; |
| 354 | 360 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { | |
| 355 | 361 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, | |
| 356 | for_each_online_node(nid) { | 362 | max_low_pfn, |
| 357 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | 363 | highend_pfn |
| 358 | unsigned long *zholes_size; | 364 | }; |
| 359 | unsigned int max_dma; | 365 | |
| 360 | 366 | /* If SRAT has not registered memory, register it now */ | |
| 361 | unsigned long low = max_low_pfn; | 367 | if (find_max_pfn_with_active_regions() == 0) { |
| 362 | unsigned long start = node_start_pfn[nid]; | 368 | for_each_online_node(nid) { |
| 363 | unsigned long high = node_end_pfn[nid]; | 369 | if (node_has_online_mem(nid)) |
| 364 | 370 | add_active_range(nid, node_start_pfn[nid], | |
| 365 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 371 | node_end_pfn[nid]); |
| 366 | |||
| 367 | if (node_has_online_mem(nid)){ | ||
| 368 | if (start > low) { | ||
| 369 | #ifdef CONFIG_HIGHMEM | ||
| 370 | BUG_ON(start > high); | ||
| 371 | zones_size[ZONE_HIGHMEM] = high - start; | ||
| 372 | #endif | ||
| 373 | } else { | ||
| 374 | if (low < max_dma) | ||
| 375 | zones_size[ZONE_DMA] = low; | ||
| 376 | else { | ||
| 377 | BUG_ON(max_dma > low); | ||
| 378 | BUG_ON(low > high); | ||
| 379 | zones_size[ZONE_DMA] = max_dma; | ||
| 380 | zones_size[ZONE_NORMAL] = low - max_dma; | ||
| 381 | #ifdef CONFIG_HIGHMEM | ||
| 382 | zones_size[ZONE_HIGHMEM] = high - low; | ||
| 383 | #endif | ||
| 384 | } | ||
| 385 | } | ||
| 386 | } | 372 | } |
| 387 | |||
| 388 | zholes_size = get_zholes_size(nid); | ||
| 389 | |||
| 390 | free_area_init_node(nid, NODE_DATA(nid), zones_size, start, | ||
| 391 | zholes_size); | ||
| 392 | } | 373 | } |
| 374 | |||
| 375 | free_area_init_nodes(max_zone_pfns); | ||
| 393 | return; | 376 | return; |
| 394 | } | 377 | } |
| 395 | 378 | ||
| @@ -409,7 +392,7 @@ void __init set_highmem_pages_init(int bad_ppro) | |||
| 409 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | 392 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; |
| 410 | 393 | ||
| 411 | printk("Initializing %s for node %d (%08lx:%08lx)\n", | 394 | printk("Initializing %s for node %d (%08lx:%08lx)\n", |
| 412 | zone->name, zone->zone_pgdat->node_id, | 395 | zone->name, zone_to_nid(zone), |
| 413 | zone_start_pfn, zone_end_pfn); | 396 | zone_start_pfn, zone_end_pfn); |
| 414 | 397 | ||
| 415 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { | 398 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { |
diff --git a/arch/i386/mm/extable.c b/arch/i386/mm/extable.c index de03c5430abc..0ce4f22a2635 100644 --- a/arch/i386/mm/extable.c +++ b/arch/i386/mm/extable.c | |||
| @@ -11,7 +11,7 @@ int fixup_exception(struct pt_regs *regs) | |||
| 11 | const struct exception_table_entry *fixup; | 11 | const struct exception_table_entry *fixup; |
| 12 | 12 | ||
| 13 | #ifdef CONFIG_PNPBIOS | 13 | #ifdef CONFIG_PNPBIOS |
| 14 | if (unlikely((regs->xcs & ~15) == (GDT_ENTRY_PNPBIOS_BASE << 3))) | 14 | if (unlikely(SEGMENT_IS_PNP_CODE(regs->xcs))) |
| 15 | { | 15 | { |
| 16 | extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; | 16 | extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; |
| 17 | extern u32 pnp_bios_is_utter_crap; | 17 | extern u32 pnp_bios_is_utter_crap; |
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index f7279468323a..5e17a3f43b41 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c | |||
| @@ -27,21 +27,24 @@ | |||
| 27 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
| 28 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
| 29 | #include <asm/kdebug.h> | 29 | #include <asm/kdebug.h> |
| 30 | #include <asm/segment.h> | ||
| 30 | 31 | ||
| 31 | extern void die(const char *,struct pt_regs *,long); | 32 | extern void die(const char *,struct pt_regs *,long); |
| 32 | 33 | ||
| 33 | #ifdef CONFIG_KPROBES | 34 | static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); |
| 34 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | 35 | |
| 35 | int register_page_fault_notifier(struct notifier_block *nb) | 36 | int register_page_fault_notifier(struct notifier_block *nb) |
| 36 | { | 37 | { |
| 37 | vmalloc_sync_all(); | 38 | vmalloc_sync_all(); |
| 38 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | 39 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); |
| 39 | } | 40 | } |
| 41 | EXPORT_SYMBOL_GPL(register_page_fault_notifier); | ||
| 40 | 42 | ||
| 41 | int unregister_page_fault_notifier(struct notifier_block *nb) | 43 | int unregister_page_fault_notifier(struct notifier_block *nb) |
| 42 | { | 44 | { |
| 43 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | 45 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); |
| 44 | } | 46 | } |
| 47 | EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); | ||
| 45 | 48 | ||
| 46 | static inline int notify_page_fault(enum die_val val, const char *str, | 49 | static inline int notify_page_fault(enum die_val val, const char *str, |
| 47 | struct pt_regs *regs, long err, int trap, int sig) | 50 | struct pt_regs *regs, long err, int trap, int sig) |
| @@ -55,14 +58,6 @@ static inline int notify_page_fault(enum die_val val, const char *str, | |||
| 55 | }; | 58 | }; |
| 56 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); | 59 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); |
| 57 | } | 60 | } |
| 58 | #else | ||
| 59 | static inline int notify_page_fault(enum die_val val, const char *str, | ||
| 60 | struct pt_regs *regs, long err, int trap, int sig) | ||
| 61 | { | ||
| 62 | return NOTIFY_DONE; | ||
| 63 | } | ||
| 64 | #endif | ||
| 65 | |||
| 66 | 61 | ||
| 67 | /* | 62 | /* |
| 68 | * Unlock any spinlocks which will prevent us from getting the | 63 | * Unlock any spinlocks which will prevent us from getting the |
| @@ -119,10 +114,10 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs, | |||
| 119 | } | 114 | } |
| 120 | 115 | ||
| 121 | /* The standard kernel/user address space limit. */ | 116 | /* The standard kernel/user address space limit. */ |
| 122 | *eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg; | 117 | *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg; |
| 123 | 118 | ||
| 124 | /* By far the most common cases. */ | 119 | /* By far the most common cases. */ |
| 125 | if (likely(seg == __USER_CS || seg == __KERNEL_CS)) | 120 | if (likely(SEGMENT_IS_FLAT_CODE(seg))) |
| 126 | return eip; | 121 | return eip; |
| 127 | 122 | ||
| 128 | /* Check the segment exists, is within the current LDT/GDT size, | 123 | /* Check the segment exists, is within the current LDT/GDT size, |
| @@ -436,11 +431,7 @@ good_area: | |||
| 436 | write = 0; | 431 | write = 0; |
| 437 | switch (error_code & 3) { | 432 | switch (error_code & 3) { |
| 438 | default: /* 3: write, present */ | 433 | default: /* 3: write, present */ |
| 439 | #ifdef TEST_VERIFY_AREA | 434 | /* fall through */ |
| 440 | if (regs->cs == KERNEL_CS) | ||
| 441 | printk("WP fault at %08lx\n", regs->eip); | ||
| 442 | #endif | ||
| 443 | /* fall through */ | ||
| 444 | case 2: /* write, not present */ | 435 | case 2: /* write, not present */ |
| 445 | if (!(vma->vm_flags & VM_WRITE)) | 436 | if (!(vma->vm_flags & VM_WRITE)) |
| 446 | goto bad_area; | 437 | goto bad_area; |
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index b6eb4dcb8777..ba44000b9069 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c | |||
| @@ -54,7 +54,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
| 54 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 54 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 55 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 55 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
| 56 | 56 | ||
| 57 | if (vaddr < FIXADDR_START) { // FIXME | 57 | if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { |
| 58 | dec_preempt_count(); | 58 | dec_preempt_count(); |
| 59 | preempt_check_resched(); | 59 | preempt_check_resched(); |
| 60 | return; | 60 | return; |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 89e8486aac34..4a5a914b3432 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
| @@ -435,16 +435,22 @@ u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; | |||
| 435 | * on Enable | 435 | * on Enable |
| 436 | * off Disable | 436 | * off Disable |
| 437 | */ | 437 | */ |
| 438 | void __init noexec_setup(const char *str) | 438 | static int __init noexec_setup(char *str) |
| 439 | { | 439 | { |
| 440 | if (!strncmp(str, "on",2) && cpu_has_nx) { | 440 | if (!str || !strcmp(str, "on")) { |
| 441 | __supported_pte_mask |= _PAGE_NX; | 441 | if (cpu_has_nx) { |
| 442 | disable_nx = 0; | 442 | __supported_pte_mask |= _PAGE_NX; |
| 443 | } else if (!strncmp(str,"off",3)) { | 443 | disable_nx = 0; |
| 444 | } | ||
| 445 | } else if (!strcmp(str,"off")) { | ||
| 444 | disable_nx = 1; | 446 | disable_nx = 1; |
| 445 | __supported_pte_mask &= ~_PAGE_NX; | 447 | __supported_pte_mask &= ~_PAGE_NX; |
| 446 | } | 448 | } else |
| 449 | return -EINVAL; | ||
| 450 | |||
| 451 | return 0; | ||
| 447 | } | 452 | } |
| 453 | early_param("noexec", noexec_setup); | ||
| 448 | 454 | ||
| 449 | int nx_enabled = 0; | 455 | int nx_enabled = 0; |
| 450 | #ifdef CONFIG_X86_PAE | 456 | #ifdef CONFIG_X86_PAE |
| @@ -552,18 +558,6 @@ static void __init test_wp_bit(void) | |||
| 552 | } | 558 | } |
| 553 | } | 559 | } |
| 554 | 560 | ||
| 555 | static void __init set_max_mapnr_init(void) | ||
| 556 | { | ||
| 557 | #ifdef CONFIG_HIGHMEM | ||
| 558 | num_physpages = highend_pfn; | ||
| 559 | #else | ||
| 560 | num_physpages = max_low_pfn; | ||
| 561 | #endif | ||
| 562 | #ifdef CONFIG_FLATMEM | ||
| 563 | max_mapnr = num_physpages; | ||
| 564 | #endif | ||
| 565 | } | ||
| 566 | |||
| 567 | static struct kcore_list kcore_mem, kcore_vmalloc; | 561 | static struct kcore_list kcore_mem, kcore_vmalloc; |
| 568 | 562 | ||
| 569 | void __init mem_init(void) | 563 | void __init mem_init(void) |
| @@ -590,14 +584,6 @@ void __init mem_init(void) | |||
| 590 | } | 584 | } |
| 591 | #endif | 585 | #endif |
| 592 | 586 | ||
| 593 | set_max_mapnr_init(); | ||
| 594 | |||
| 595 | #ifdef CONFIG_HIGHMEM | ||
| 596 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | ||
| 597 | #else | ||
| 598 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | ||
| 599 | #endif | ||
| 600 | |||
| 601 | /* this will put all low memory onto the freelists */ | 587 | /* this will put all low memory onto the freelists */ |
| 602 | totalram_pages += free_all_bootmem(); | 588 | totalram_pages += free_all_bootmem(); |
| 603 | 589 | ||
| @@ -629,6 +615,48 @@ void __init mem_init(void) | |||
| 629 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | 615 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) |
| 630 | ); | 616 | ); |
| 631 | 617 | ||
| 618 | #if 1 /* double-sanity-check paranoia */ | ||
| 619 | printk("virtual kernel memory layout:\n" | ||
| 620 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 621 | #ifdef CONFIG_HIGHMEM | ||
| 622 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 623 | #endif | ||
| 624 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
| 625 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
| 626 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 627 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 628 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | ||
| 629 | FIXADDR_START, FIXADDR_TOP, | ||
| 630 | (FIXADDR_TOP - FIXADDR_START) >> 10, | ||
| 631 | |||
| 632 | #ifdef CONFIG_HIGHMEM | ||
| 633 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | ||
| 634 | (LAST_PKMAP*PAGE_SIZE) >> 10, | ||
| 635 | #endif | ||
| 636 | |||
| 637 | VMALLOC_START, VMALLOC_END, | ||
| 638 | (VMALLOC_END - VMALLOC_START) >> 20, | ||
| 639 | |||
| 640 | (unsigned long)__va(0), (unsigned long)high_memory, | ||
| 641 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | ||
| 642 | |||
| 643 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | ||
| 644 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, | ||
| 645 | |||
| 646 | (unsigned long)&_etext, (unsigned long)&_edata, | ||
| 647 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | ||
| 648 | |||
| 649 | (unsigned long)&_text, (unsigned long)&_etext, | ||
| 650 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | ||
| 651 | |||
| 652 | #ifdef CONFIG_HIGHMEM | ||
| 653 | BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | ||
| 654 | BUG_ON(VMALLOC_END > PKMAP_BASE); | ||
| 655 | #endif | ||
| 656 | BUG_ON(VMALLOC_START > VMALLOC_END); | ||
| 657 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | ||
| 658 | #endif /* double-sanity-check paranoia */ | ||
| 659 | |||
| 632 | #ifdef CONFIG_X86_PAE | 660 | #ifdef CONFIG_X86_PAE |
| 633 | if (!cpu_has_pae) | 661 | if (!cpu_has_pae) |
| 634 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | 662 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); |
| @@ -657,7 +685,7 @@ void __init mem_init(void) | |||
| 657 | int arch_add_memory(int nid, u64 start, u64 size) | 685 | int arch_add_memory(int nid, u64 start, u64 size) |
| 658 | { | 686 | { |
| 659 | struct pglist_data *pgdata = &contig_page_data; | 687 | struct pglist_data *pgdata = &contig_page_data; |
| 660 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | 688 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
| 661 | unsigned long start_pfn = start >> PAGE_SHIFT; | 689 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 662 | unsigned long nr_pages = size >> PAGE_SHIFT; | 690 | unsigned long nr_pages = size >> PAGE_SHIFT; |
| 663 | 691 | ||
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index bd98768d8764..10126e3f8174 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 13 | #include <linux/pagemap.h> | 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
| 15 | #include <linux/module.h> | ||
| 15 | 16 | ||
| 16 | #include <asm/system.h> | 17 | #include <asm/system.h> |
| 17 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
| @@ -60,7 +61,9 @@ void show_mem(void) | |||
| 60 | printk(KERN_INFO "%lu pages writeback\n", | 61 | printk(KERN_INFO "%lu pages writeback\n", |
| 61 | global_page_state(NR_WRITEBACK)); | 62 | global_page_state(NR_WRITEBACK)); |
| 62 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); | 63 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
| 63 | printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); | 64 | printk(KERN_INFO "%lu pages slab\n", |
| 65 | global_page_state(NR_SLAB_RECLAIMABLE) + | ||
| 66 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | ||
| 64 | printk(KERN_INFO "%lu pages pagetables\n", | 67 | printk(KERN_INFO "%lu pages pagetables\n", |
| 65 | global_page_state(NR_PAGETABLE)); | 68 | global_page_state(NR_PAGETABLE)); |
| 66 | } | 69 | } |
| @@ -137,6 +140,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
| 137 | __flush_tlb_one(vaddr); | 140 | __flush_tlb_one(vaddr); |
| 138 | } | 141 | } |
| 139 | 142 | ||
| 143 | static int fixmaps; | ||
| 144 | #ifndef CONFIG_COMPAT_VDSO | ||
| 145 | unsigned long __FIXADDR_TOP = 0xfffff000; | ||
| 146 | EXPORT_SYMBOL(__FIXADDR_TOP); | ||
| 147 | #endif | ||
| 148 | |||
| 140 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | 149 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
| 141 | { | 150 | { |
| 142 | unsigned long address = __fix_to_virt(idx); | 151 | unsigned long address = __fix_to_virt(idx); |
| @@ -146,6 +155,25 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | |||
| 146 | return; | 155 | return; |
| 147 | } | 156 | } |
| 148 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | 157 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); |
| 158 | fixmaps++; | ||
| 159 | } | ||
| 160 | |||
| 161 | /** | ||
| 162 | * reserve_top_address - reserves a hole in the top of kernel address space | ||
| 163 | * @reserve - size of hole to reserve | ||
| 164 | * | ||
| 165 | * Can be used to relocate the fixmap area and poke a hole in the top | ||
| 166 | * of kernel address space to make room for a hypervisor. | ||
| 167 | */ | ||
| 168 | void reserve_top_address(unsigned long reserve) | ||
| 169 | { | ||
| 170 | BUG_ON(fixmaps > 0); | ||
| 171 | #ifdef CONFIG_COMPAT_VDSO | ||
| 172 | BUG_ON(reserve != 0); | ||
| 173 | #else | ||
| 174 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | ||
| 175 | __VMALLOC_RESERVE += reserve; | ||
| 176 | #endif | ||
| 149 | } | 177 | } |
| 150 | 178 | ||
| 151 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 179 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 5f8dc8a21bd7..3700eef78743 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c | |||
| @@ -17,14 +17,15 @@ | |||
| 17 | #include <asm/nmi.h> | 17 | #include <asm/nmi.h> |
| 18 | #include <asm/msr.h> | 18 | #include <asm/msr.h> |
| 19 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
| 20 | #include <asm/kdebug.h> | ||
| 20 | 21 | ||
| 21 | #include "op_counter.h" | 22 | #include "op_counter.h" |
| 22 | #include "op_x86_model.h" | 23 | #include "op_x86_model.h" |
| 23 | 24 | ||
| 24 | static struct op_x86_model_spec const * model; | 25 | static struct op_x86_model_spec const * model; |
| 25 | static struct op_msrs cpu_msrs[NR_CPUS]; | 26 | static struct op_msrs cpu_msrs[NR_CPUS]; |
| 26 | static unsigned long saved_lvtpc[NR_CPUS]; | 27 | static unsigned long saved_lvtpc[NR_CPUS]; |
| 27 | 28 | ||
| 28 | static int nmi_start(void); | 29 | static int nmi_start(void); |
| 29 | static void nmi_stop(void); | 30 | static void nmi_stop(void); |
| 30 | 31 | ||
| @@ -82,13 +83,24 @@ static void exit_driverfs(void) | |||
| 82 | #define exit_driverfs() do { } while (0) | 83 | #define exit_driverfs() do { } while (0) |
| 83 | #endif /* CONFIG_PM */ | 84 | #endif /* CONFIG_PM */ |
| 84 | 85 | ||
| 85 | 86 | static int profile_exceptions_notify(struct notifier_block *self, | |
| 86 | static int nmi_callback(struct pt_regs * regs, int cpu) | 87 | unsigned long val, void *data) |
| 87 | { | 88 | { |
| 88 | return model->check_ctrs(regs, &cpu_msrs[cpu]); | 89 | struct die_args *args = (struct die_args *)data; |
| 90 | int ret = NOTIFY_DONE; | ||
| 91 | int cpu = smp_processor_id(); | ||
| 92 | |||
| 93 | switch(val) { | ||
| 94 | case DIE_NMI: | ||
| 95 | if (model->check_ctrs(args->regs, &cpu_msrs[cpu])) | ||
| 96 | ret = NOTIFY_STOP; | ||
| 97 | break; | ||
| 98 | default: | ||
| 99 | break; | ||
| 100 | } | ||
| 101 | return ret; | ||
| 89 | } | 102 | } |
| 90 | 103 | ||
| 91 | |||
| 92 | static void nmi_cpu_save_registers(struct op_msrs * msrs) | 104 | static void nmi_cpu_save_registers(struct op_msrs * msrs) |
| 93 | { | 105 | { |
| 94 | unsigned int const nr_ctrs = model->num_counters; | 106 | unsigned int const nr_ctrs = model->num_counters; |
| @@ -98,15 +110,19 @@ static void nmi_cpu_save_registers(struct op_msrs * msrs) | |||
| 98 | unsigned int i; | 110 | unsigned int i; |
| 99 | 111 | ||
| 100 | for (i = 0; i < nr_ctrs; ++i) { | 112 | for (i = 0; i < nr_ctrs; ++i) { |
| 101 | rdmsr(counters[i].addr, | 113 | if (counters[i].addr){ |
| 102 | counters[i].saved.low, | 114 | rdmsr(counters[i].addr, |
| 103 | counters[i].saved.high); | 115 | counters[i].saved.low, |
| 116 | counters[i].saved.high); | ||
| 117 | } | ||
| 104 | } | 118 | } |
| 105 | 119 | ||
| 106 | for (i = 0; i < nr_ctrls; ++i) { | 120 | for (i = 0; i < nr_ctrls; ++i) { |
| 107 | rdmsr(controls[i].addr, | 121 | if (controls[i].addr){ |
| 108 | controls[i].saved.low, | 122 | rdmsr(controls[i].addr, |
| 109 | controls[i].saved.high); | 123 | controls[i].saved.low, |
| 124 | controls[i].saved.high); | ||
| 125 | } | ||
| 110 | } | 126 | } |
| 111 | } | 127 | } |
| 112 | 128 | ||
| @@ -170,27 +186,29 @@ static void nmi_cpu_setup(void * dummy) | |||
| 170 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 186 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 171 | } | 187 | } |
| 172 | 188 | ||
| 189 | static struct notifier_block profile_exceptions_nb = { | ||
| 190 | .notifier_call = profile_exceptions_notify, | ||
| 191 | .next = NULL, | ||
| 192 | .priority = 0 | ||
| 193 | }; | ||
| 173 | 194 | ||
| 174 | static int nmi_setup(void) | 195 | static int nmi_setup(void) |
| 175 | { | 196 | { |
| 197 | int err=0; | ||
| 198 | |||
| 176 | if (!allocate_msrs()) | 199 | if (!allocate_msrs()) |
| 177 | return -ENOMEM; | 200 | return -ENOMEM; |
| 178 | 201 | ||
| 179 | /* We walk a thin line between law and rape here. | 202 | if ((err = register_die_notifier(&profile_exceptions_nb))){ |
| 180 | * We need to be careful to install our NMI handler | ||
| 181 | * without actually triggering any NMIs as this will | ||
| 182 | * break the core code horrifically. | ||
| 183 | */ | ||
| 184 | if (reserve_lapic_nmi() < 0) { | ||
| 185 | free_msrs(); | 203 | free_msrs(); |
| 186 | return -EBUSY; | 204 | return err; |
| 187 | } | 205 | } |
| 206 | |||
| 188 | /* We need to serialize save and setup for HT because the subset | 207 | /* We need to serialize save and setup for HT because the subset |
| 189 | * of msrs are distinct for save and setup operations | 208 | * of msrs are distinct for save and setup operations |
| 190 | */ | 209 | */ |
| 191 | on_each_cpu(nmi_save_registers, NULL, 0, 1); | 210 | on_each_cpu(nmi_save_registers, NULL, 0, 1); |
| 192 | on_each_cpu(nmi_cpu_setup, NULL, 0, 1); | 211 | on_each_cpu(nmi_cpu_setup, NULL, 0, 1); |
| 193 | set_nmi_callback(nmi_callback); | ||
| 194 | nmi_enabled = 1; | 212 | nmi_enabled = 1; |
| 195 | return 0; | 213 | return 0; |
| 196 | } | 214 | } |
| @@ -205,15 +223,19 @@ static void nmi_restore_registers(struct op_msrs * msrs) | |||
| 205 | unsigned int i; | 223 | unsigned int i; |
| 206 | 224 | ||
| 207 | for (i = 0; i < nr_ctrls; ++i) { | 225 | for (i = 0; i < nr_ctrls; ++i) { |
| 208 | wrmsr(controls[i].addr, | 226 | if (controls[i].addr){ |
| 209 | controls[i].saved.low, | 227 | wrmsr(controls[i].addr, |
| 210 | controls[i].saved.high); | 228 | controls[i].saved.low, |
| 229 | controls[i].saved.high); | ||
| 230 | } | ||
| 211 | } | 231 | } |
| 212 | 232 | ||
| 213 | for (i = 0; i < nr_ctrs; ++i) { | 233 | for (i = 0; i < nr_ctrs; ++i) { |
| 214 | wrmsr(counters[i].addr, | 234 | if (counters[i].addr){ |
| 215 | counters[i].saved.low, | 235 | wrmsr(counters[i].addr, |
| 216 | counters[i].saved.high); | 236 | counters[i].saved.low, |
| 237 | counters[i].saved.high); | ||
| 238 | } | ||
| 217 | } | 239 | } |
| 218 | } | 240 | } |
| 219 | 241 | ||
| @@ -234,6 +256,7 @@ static void nmi_cpu_shutdown(void * dummy) | |||
| 234 | apic_write(APIC_LVTPC, saved_lvtpc[cpu]); | 256 | apic_write(APIC_LVTPC, saved_lvtpc[cpu]); |
| 235 | apic_write(APIC_LVTERR, v); | 257 | apic_write(APIC_LVTERR, v); |
| 236 | nmi_restore_registers(msrs); | 258 | nmi_restore_registers(msrs); |
| 259 | model->shutdown(msrs); | ||
| 237 | } | 260 | } |
| 238 | 261 | ||
| 239 | 262 | ||
| @@ -241,8 +264,7 @@ static void nmi_shutdown(void) | |||
| 241 | { | 264 | { |
| 242 | nmi_enabled = 0; | 265 | nmi_enabled = 0; |
| 243 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); | 266 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); |
| 244 | unset_nmi_callback(); | 267 | unregister_die_notifier(&profile_exceptions_nb); |
| 245 | release_lapic_nmi(); | ||
| 246 | free_msrs(); | 268 | free_msrs(); |
| 247 | } | 269 | } |
| 248 | 270 | ||
| @@ -284,6 +306,14 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root) | |||
| 284 | struct dentry * dir; | 306 | struct dentry * dir; |
| 285 | char buf[4]; | 307 | char buf[4]; |
| 286 | 308 | ||
| 309 | /* quick little hack to _not_ expose a counter if it is not | ||
| 310 | * available for use. This should protect userspace app. | ||
| 311 | * NOTE: assumes 1:1 mapping here (that counters are organized | ||
| 312 | * sequentially in their struct assignment). | ||
| 313 | */ | ||
| 314 | if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i))) | ||
| 315 | continue; | ||
| 316 | |||
| 287 | snprintf(buf, sizeof(buf), "%d", i); | 317 | snprintf(buf, sizeof(buf), "%d", i); |
| 288 | dir = oprofilefs_mkdir(sb, root, buf); | 318 | dir = oprofilefs_mkdir(sb, root, buf); |
| 289 | oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); | 319 | oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); |
diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c index 930a1127bb30..abf0ba52a635 100644 --- a/arch/i386/oprofile/nmi_timer_int.c +++ b/arch/i386/oprofile/nmi_timer_int.c | |||
| @@ -17,34 +17,49 @@ | |||
| 17 | #include <asm/nmi.h> | 17 | #include <asm/nmi.h> |
| 18 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
| 19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
| 20 | #include <asm/kdebug.h> | ||
| 20 | 21 | ||
| 21 | static int nmi_timer_callback(struct pt_regs * regs, int cpu) | 22 | static int profile_timer_exceptions_notify(struct notifier_block *self, |
| 23 | unsigned long val, void *data) | ||
| 22 | { | 24 | { |
| 23 | oprofile_add_sample(regs, 0); | 25 | struct die_args *args = (struct die_args *)data; |
| 24 | return 1; | 26 | int ret = NOTIFY_DONE; |
| 27 | |||
| 28 | switch(val) { | ||
| 29 | case DIE_NMI: | ||
| 30 | oprofile_add_sample(args->regs, 0); | ||
| 31 | ret = NOTIFY_STOP; | ||
| 32 | break; | ||
| 33 | default: | ||
| 34 | break; | ||
| 35 | } | ||
| 36 | return ret; | ||
| 25 | } | 37 | } |
| 26 | 38 | ||
| 39 | static struct notifier_block profile_timer_exceptions_nb = { | ||
| 40 | .notifier_call = profile_timer_exceptions_notify, | ||
| 41 | .next = NULL, | ||
| 42 | .priority = 0 | ||
| 43 | }; | ||
| 44 | |||
| 27 | static int timer_start(void) | 45 | static int timer_start(void) |
| 28 | { | 46 | { |
| 29 | disable_timer_nmi_watchdog(); | 47 | if (register_die_notifier(&profile_timer_exceptions_nb)) |
| 30 | set_nmi_callback(nmi_timer_callback); | 48 | return 1; |
| 31 | return 0; | 49 | return 0; |
| 32 | } | 50 | } |
| 33 | 51 | ||
| 34 | 52 | ||
| 35 | static void timer_stop(void) | 53 | static void timer_stop(void) |
| 36 | { | 54 | { |
| 37 | enable_timer_nmi_watchdog(); | 55 | unregister_die_notifier(&profile_timer_exceptions_nb); |
| 38 | unset_nmi_callback(); | ||
| 39 | synchronize_sched(); /* Allow already-started NMIs to complete. */ | 56 | synchronize_sched(); /* Allow already-started NMIs to complete. */ |
| 40 | } | 57 | } |
| 41 | 58 | ||
| 42 | 59 | ||
| 43 | int __init op_nmi_timer_init(struct oprofile_operations * ops) | 60 | int __init op_nmi_timer_init(struct oprofile_operations * ops) |
| 44 | { | 61 | { |
| 45 | extern int nmi_active; | 62 | if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0)) |
| 46 | |||
| 47 | if (nmi_active <= 0) | ||
| 48 | return -ENODEV; | 63 | return -ENODEV; |
| 49 | 64 | ||
| 50 | ops->start = timer_start; | 65 | ops->start = timer_start; |
diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c index 693bdea4a52b..3057a19e4641 100644 --- a/arch/i386/oprofile/op_model_athlon.c +++ b/arch/i386/oprofile/op_model_athlon.c | |||
| @@ -21,10 +21,12 @@ | |||
| 21 | #define NUM_COUNTERS 4 | 21 | #define NUM_COUNTERS 4 |
| 22 | #define NUM_CONTROLS 4 | 22 | #define NUM_CONTROLS 4 |
| 23 | 23 | ||
| 24 | #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) | ||
| 24 | #define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) | 25 | #define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) |
| 25 | #define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) | 26 | #define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) |
| 26 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) | 27 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) |
| 27 | 28 | ||
| 29 | #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) | ||
| 28 | #define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0) | 30 | #define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0) |
| 29 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) | 31 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) |
| 30 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | 32 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) |
| @@ -40,15 +42,21 @@ static unsigned long reset_value[NUM_COUNTERS]; | |||
| 40 | 42 | ||
| 41 | static void athlon_fill_in_addresses(struct op_msrs * const msrs) | 43 | static void athlon_fill_in_addresses(struct op_msrs * const msrs) |
| 42 | { | 44 | { |
| 43 | msrs->counters[0].addr = MSR_K7_PERFCTR0; | 45 | int i; |
| 44 | msrs->counters[1].addr = MSR_K7_PERFCTR1; | 46 | |
| 45 | msrs->counters[2].addr = MSR_K7_PERFCTR2; | 47 | for (i=0; i < NUM_COUNTERS; i++) { |
| 46 | msrs->counters[3].addr = MSR_K7_PERFCTR3; | 48 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) |
| 47 | 49 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; | |
| 48 | msrs->controls[0].addr = MSR_K7_EVNTSEL0; | 50 | else |
| 49 | msrs->controls[1].addr = MSR_K7_EVNTSEL1; | 51 | msrs->counters[i].addr = 0; |
| 50 | msrs->controls[2].addr = MSR_K7_EVNTSEL2; | 52 | } |
| 51 | msrs->controls[3].addr = MSR_K7_EVNTSEL3; | 53 | |
| 54 | for (i=0; i < NUM_CONTROLS; i++) { | ||
| 55 | if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) | ||
| 56 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; | ||
| 57 | else | ||
| 58 | msrs->controls[i].addr = 0; | ||
| 59 | } | ||
| 52 | } | 60 | } |
| 53 | 61 | ||
| 54 | 62 | ||
| @@ -59,19 +67,23 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs) | |||
| 59 | 67 | ||
| 60 | /* clear all counters */ | 68 | /* clear all counters */ |
| 61 | for (i = 0 ; i < NUM_CONTROLS; ++i) { | 69 | for (i = 0 ; i < NUM_CONTROLS; ++i) { |
| 70 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | ||
| 71 | continue; | ||
| 62 | CTRL_READ(low, high, msrs, i); | 72 | CTRL_READ(low, high, msrs, i); |
| 63 | CTRL_CLEAR(low); | 73 | CTRL_CLEAR(low); |
| 64 | CTRL_WRITE(low, high, msrs, i); | 74 | CTRL_WRITE(low, high, msrs, i); |
| 65 | } | 75 | } |
| 66 | 76 | ||
| 67 | /* avoid a false detection of ctr overflows in NMI handler */ | 77 | /* avoid a false detection of ctr overflows in NMI handler */ |
| 68 | for (i = 0; i < NUM_COUNTERS; ++i) { | 78 | for (i = 0; i < NUM_COUNTERS; ++i) { |
| 79 | if (unlikely(!CTR_IS_RESERVED(msrs,i))) | ||
| 80 | continue; | ||
| 69 | CTR_WRITE(1, msrs, i); | 81 | CTR_WRITE(1, msrs, i); |
| 70 | } | 82 | } |
| 71 | 83 | ||
| 72 | /* enable active counters */ | 84 | /* enable active counters */ |
| 73 | for (i = 0; i < NUM_COUNTERS; ++i) { | 85 | for (i = 0; i < NUM_COUNTERS; ++i) { |
| 74 | if (counter_config[i].enabled) { | 86 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) { |
| 75 | reset_value[i] = counter_config[i].count; | 87 | reset_value[i] = counter_config[i].count; |
| 76 | 88 | ||
| 77 | CTR_WRITE(counter_config[i].count, msrs, i); | 89 | CTR_WRITE(counter_config[i].count, msrs, i); |
| @@ -98,6 +110,8 @@ static int athlon_check_ctrs(struct pt_regs * const regs, | |||
| 98 | int i; | 110 | int i; |
| 99 | 111 | ||
| 100 | for (i = 0 ; i < NUM_COUNTERS; ++i) { | 112 | for (i = 0 ; i < NUM_COUNTERS; ++i) { |
| 113 | if (!reset_value[i]) | ||
| 114 | continue; | ||
| 101 | CTR_READ(low, high, msrs, i); | 115 | CTR_READ(low, high, msrs, i); |
| 102 | if (CTR_OVERFLOWED(low)) { | 116 | if (CTR_OVERFLOWED(low)) { |
| 103 | oprofile_add_sample(regs, i); | 117 | oprofile_add_sample(regs, i); |
| @@ -132,12 +146,27 @@ static void athlon_stop(struct op_msrs const * const msrs) | |||
| 132 | /* Subtle: stop on all counters to avoid race with | 146 | /* Subtle: stop on all counters to avoid race with |
| 133 | * setting our pm callback */ | 147 | * setting our pm callback */ |
| 134 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 148 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { |
| 149 | if (!reset_value[i]) | ||
| 150 | continue; | ||
| 135 | CTRL_READ(low, high, msrs, i); | 151 | CTRL_READ(low, high, msrs, i); |
| 136 | CTRL_SET_INACTIVE(low); | 152 | CTRL_SET_INACTIVE(low); |
| 137 | CTRL_WRITE(low, high, msrs, i); | 153 | CTRL_WRITE(low, high, msrs, i); |
| 138 | } | 154 | } |
| 139 | } | 155 | } |
| 140 | 156 | ||
| 157 | static void athlon_shutdown(struct op_msrs const * const msrs) | ||
| 158 | { | ||
| 159 | int i; | ||
| 160 | |||
| 161 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | ||
| 162 | if (CTR_IS_RESERVED(msrs,i)) | ||
| 163 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
| 164 | } | ||
| 165 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { | ||
| 166 | if (CTRL_IS_RESERVED(msrs,i)) | ||
| 167 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
| 168 | } | ||
| 169 | } | ||
| 141 | 170 | ||
| 142 | struct op_x86_model_spec const op_athlon_spec = { | 171 | struct op_x86_model_spec const op_athlon_spec = { |
| 143 | .num_counters = NUM_COUNTERS, | 172 | .num_counters = NUM_COUNTERS, |
| @@ -146,5 +175,6 @@ struct op_x86_model_spec const op_athlon_spec = { | |||
| 146 | .setup_ctrs = &athlon_setup_ctrs, | 175 | .setup_ctrs = &athlon_setup_ctrs, |
| 147 | .check_ctrs = &athlon_check_ctrs, | 176 | .check_ctrs = &athlon_check_ctrs, |
| 148 | .start = &athlon_start, | 177 | .start = &athlon_start, |
| 149 | .stop = &athlon_stop | 178 | .stop = &athlon_stop, |
| 179 | .shutdown = &athlon_shutdown | ||
| 150 | }; | 180 | }; |
diff --git a/arch/i386/oprofile/op_model_p4.c b/arch/i386/oprofile/op_model_p4.c index 7c61d357b82b..47925927b12f 100644 --- a/arch/i386/oprofile/op_model_p4.c +++ b/arch/i386/oprofile/op_model_p4.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) | 32 | #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) |
| 33 | 33 | ||
| 34 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; | 34 | static unsigned int num_counters = NUM_COUNTERS_NON_HT; |
| 35 | 35 | static unsigned int num_controls = NUM_CONTROLS_NON_HT; | |
| 36 | 36 | ||
| 37 | /* this has to be checked dynamically since the | 37 | /* this has to be checked dynamically since the |
| 38 | hyper-threadedness of a chip is discovered at | 38 | hyper-threadedness of a chip is discovered at |
| @@ -40,8 +40,10 @@ static unsigned int num_counters = NUM_COUNTERS_NON_HT; | |||
| 40 | static inline void setup_num_counters(void) | 40 | static inline void setup_num_counters(void) |
| 41 | { | 41 | { |
| 42 | #ifdef CONFIG_SMP | 42 | #ifdef CONFIG_SMP |
| 43 | if (smp_num_siblings == 2) | 43 | if (smp_num_siblings == 2){ |
| 44 | num_counters = NUM_COUNTERS_HT2; | 44 | num_counters = NUM_COUNTERS_HT2; |
| 45 | num_controls = NUM_CONTROLS_HT2; | ||
| 46 | } | ||
| 45 | #endif | 47 | #endif |
| 46 | } | 48 | } |
| 47 | 49 | ||
| @@ -97,15 +99,6 @@ static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { | |||
| 97 | 99 | ||
| 98 | #define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT | 100 | #define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT |
| 99 | 101 | ||
| 100 | /* All cccr we don't use. */ | ||
| 101 | static int p4_unused_cccr[NUM_UNUSED_CCCRS] = { | ||
| 102 | MSR_P4_BPU_CCCR1, MSR_P4_BPU_CCCR3, | ||
| 103 | MSR_P4_MS_CCCR1, MSR_P4_MS_CCCR3, | ||
| 104 | MSR_P4_FLAME_CCCR1, MSR_P4_FLAME_CCCR3, | ||
| 105 | MSR_P4_IQ_CCCR0, MSR_P4_IQ_CCCR1, | ||
| 106 | MSR_P4_IQ_CCCR2, MSR_P4_IQ_CCCR3 | ||
| 107 | }; | ||
| 108 | |||
| 109 | /* p4 event codes in libop/op_event.h are indices into this table. */ | 102 | /* p4 event codes in libop/op_event.h are indices into this table. */ |
| 110 | 103 | ||
| 111 | static struct p4_event_binding p4_events[NUM_EVENTS] = { | 104 | static struct p4_event_binding p4_events[NUM_EVENTS] = { |
| @@ -372,6 +365,8 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
| 372 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) | 365 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) |
| 373 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) | 366 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) |
| 374 | 367 | ||
| 368 | #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) | ||
| 369 | #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) | ||
| 375 | #define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) | 370 | #define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) |
| 376 | #define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) | 371 | #define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) |
| 377 | #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) | 372 | #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) |
| @@ -401,29 +396,34 @@ static unsigned long reset_value[NUM_COUNTERS_NON_HT]; | |||
| 401 | static void p4_fill_in_addresses(struct op_msrs * const msrs) | 396 | static void p4_fill_in_addresses(struct op_msrs * const msrs) |
| 402 | { | 397 | { |
| 403 | unsigned int i; | 398 | unsigned int i; |
| 404 | unsigned int addr, stag; | 399 | unsigned int addr, cccraddr, stag; |
| 405 | 400 | ||
| 406 | setup_num_counters(); | 401 | setup_num_counters(); |
| 407 | stag = get_stagger(); | 402 | stag = get_stagger(); |
| 408 | 403 | ||
| 409 | /* the counter registers we pay attention to */ | 404 | /* initialize some registers */ |
| 410 | for (i = 0; i < num_counters; ++i) { | 405 | for (i = 0; i < num_counters; ++i) { |
| 411 | msrs->counters[i].addr = | 406 | msrs->counters[i].addr = 0; |
| 412 | p4_counters[VIRT_CTR(stag, i)].counter_address; | ||
| 413 | } | 407 | } |
| 414 | 408 | for (i = 0; i < num_controls; ++i) { | |
| 415 | /* FIXME: bad feeling, we don't save the 10 counters we don't use. */ | 409 | msrs->controls[i].addr = 0; |
| 416 | |||
| 417 | /* 18 CCCR registers */ | ||
| 418 | for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag; | ||
| 419 | addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) { | ||
| 420 | msrs->controls[i].addr = addr; | ||
| 421 | } | 410 | } |
| 422 | 411 | ||
| 412 | /* the counter & cccr registers we pay attention to */ | ||
| 413 | for (i = 0; i < num_counters; ++i) { | ||
| 414 | addr = p4_counters[VIRT_CTR(stag, i)].counter_address; | ||
| 415 | cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; | ||
| 416 | if (reserve_perfctr_nmi(addr)){ | ||
| 417 | msrs->counters[i].addr = addr; | ||
| 418 | msrs->controls[i].addr = cccraddr; | ||
| 419 | } | ||
| 420 | } | ||
| 421 | |||
| 423 | /* 43 ESCR registers in three or four discontiguous group */ | 422 | /* 43 ESCR registers in three or four discontiguous group */ |
| 424 | for (addr = MSR_P4_BSU_ESCR0 + stag; | 423 | for (addr = MSR_P4_BSU_ESCR0 + stag; |
| 425 | addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { | 424 | addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { |
| 426 | msrs->controls[i].addr = addr; | 425 | if (reserve_evntsel_nmi(addr)) |
| 426 | msrs->controls[i].addr = addr; | ||
| 427 | } | 427 | } |
| 428 | 428 | ||
| 429 | /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 | 429 | /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 |
| @@ -431,47 +431,57 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs) | |||
| 431 | if (boot_cpu_data.x86_model >= 0x3) { | 431 | if (boot_cpu_data.x86_model >= 0x3) { |
| 432 | for (addr = MSR_P4_BSU_ESCR0 + stag; | 432 | for (addr = MSR_P4_BSU_ESCR0 + stag; |
| 433 | addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { | 433 | addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { |
| 434 | msrs->controls[i].addr = addr; | 434 | if (reserve_evntsel_nmi(addr)) |
| 435 | msrs->controls[i].addr = addr; | ||
| 435 | } | 436 | } |
| 436 | } else { | 437 | } else { |
| 437 | for (addr = MSR_P4_IQ_ESCR0 + stag; | 438 | for (addr = MSR_P4_IQ_ESCR0 + stag; |
| 438 | addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { | 439 | addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { |
| 439 | msrs->controls[i].addr = addr; | 440 | if (reserve_evntsel_nmi(addr)) |
| 441 | msrs->controls[i].addr = addr; | ||
| 440 | } | 442 | } |
| 441 | } | 443 | } |
| 442 | 444 | ||
| 443 | for (addr = MSR_P4_RAT_ESCR0 + stag; | 445 | for (addr = MSR_P4_RAT_ESCR0 + stag; |
| 444 | addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { | 446 | addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { |
| 445 | msrs->controls[i].addr = addr; | 447 | if (reserve_evntsel_nmi(addr)) |
| 448 | msrs->controls[i].addr = addr; | ||
| 446 | } | 449 | } |
| 447 | 450 | ||
| 448 | for (addr = MSR_P4_MS_ESCR0 + stag; | 451 | for (addr = MSR_P4_MS_ESCR0 + stag; |
| 449 | addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { | 452 | addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { |
| 450 | msrs->controls[i].addr = addr; | 453 | if (reserve_evntsel_nmi(addr)) |
| 454 | msrs->controls[i].addr = addr; | ||
| 451 | } | 455 | } |
| 452 | 456 | ||
| 453 | for (addr = MSR_P4_IX_ESCR0 + stag; | 457 | for (addr = MSR_P4_IX_ESCR0 + stag; |
| 454 | addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { | 458 | addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { |
| 455 | msrs->controls[i].addr = addr; | 459 | if (reserve_evntsel_nmi(addr)) |
| 460 | msrs->controls[i].addr = addr; | ||
| 456 | } | 461 | } |
| 457 | 462 | ||
| 458 | /* there are 2 remaining non-contiguously located ESCRs */ | 463 | /* there are 2 remaining non-contiguously located ESCRs */ |
| 459 | 464 | ||
| 460 | if (num_counters == NUM_COUNTERS_NON_HT) { | 465 | if (num_counters == NUM_COUNTERS_NON_HT) { |
| 461 | /* standard non-HT CPUs handle both remaining ESCRs*/ | 466 | /* standard non-HT CPUs handle both remaining ESCRs*/ |
| 462 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | 467 | if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) |
| 463 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; | 468 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; |
| 469 | if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4)) | ||
| 470 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; | ||
| 464 | 471 | ||
| 465 | } else if (stag == 0) { | 472 | } else if (stag == 0) { |
| 466 | /* HT CPUs give the first remainder to the even thread, as | 473 | /* HT CPUs give the first remainder to the even thread, as |
| 467 | the 32nd control register */ | 474 | the 32nd control register */ |
| 468 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; | 475 | if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4)) |
| 476 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; | ||
| 469 | 477 | ||
| 470 | } else { | 478 | } else { |
| 471 | /* and two copies of the second to the odd thread, | 479 | /* and two copies of the second to the odd thread, |
| 472 | for the 22st and 23nd control registers */ | 480 | for the 22st and 23nd control registers */ |
| 473 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | 481 | if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) { |
| 474 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | 482 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; |
| 483 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | ||
| 484 | } | ||
| 475 | } | 485 | } |
| 476 | } | 486 | } |
| 477 | 487 | ||
| @@ -544,7 +554,6 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
| 544 | { | 554 | { |
| 545 | unsigned int i; | 555 | unsigned int i; |
| 546 | unsigned int low, high; | 556 | unsigned int low, high; |
| 547 | unsigned int addr; | ||
| 548 | unsigned int stag; | 557 | unsigned int stag; |
| 549 | 558 | ||
| 550 | stag = get_stagger(); | 559 | stag = get_stagger(); |
| @@ -557,59 +566,24 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
| 557 | 566 | ||
| 558 | /* clear the cccrs we will use */ | 567 | /* clear the cccrs we will use */ |
| 559 | for (i = 0 ; i < num_counters ; i++) { | 568 | for (i = 0 ; i < num_counters ; i++) { |
| 569 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | ||
| 570 | continue; | ||
| 560 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); | 571 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
| 561 | CCCR_CLEAR(low); | 572 | CCCR_CLEAR(low); |
| 562 | CCCR_SET_REQUIRED_BITS(low); | 573 | CCCR_SET_REQUIRED_BITS(low); |
| 563 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); | 574 | wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
| 564 | } | 575 | } |
| 565 | 576 | ||
| 566 | /* clear cccrs outside our concern */ | ||
| 567 | for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { | ||
| 568 | rdmsr(p4_unused_cccr[i], low, high); | ||
| 569 | CCCR_CLEAR(low); | ||
| 570 | CCCR_SET_REQUIRED_BITS(low); | ||
| 571 | wrmsr(p4_unused_cccr[i], low, high); | ||
| 572 | } | ||
| 573 | |||
| 574 | /* clear all escrs (including those outside our concern) */ | 577 | /* clear all escrs (including those outside our concern) */ |
| 575 | for (addr = MSR_P4_BSU_ESCR0 + stag; | 578 | for (i = num_counters; i < num_controls; i++) { |
| 576 | addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { | 579 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) |
| 577 | wrmsr(addr, 0, 0); | 580 | continue; |
| 578 | } | 581 | wrmsr(msrs->controls[i].addr, 0, 0); |
| 579 | |||
| 580 | /* On older models clear also MSR_P4_IQ_ESCR0/1 */ | ||
| 581 | if (boot_cpu_data.x86_model < 0x3) { | ||
| 582 | wrmsr(MSR_P4_IQ_ESCR0, 0, 0); | ||
| 583 | wrmsr(MSR_P4_IQ_ESCR1, 0, 0); | ||
| 584 | } | ||
| 585 | |||
| 586 | for (addr = MSR_P4_RAT_ESCR0 + stag; | ||
| 587 | addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { | ||
| 588 | wrmsr(addr, 0, 0); | ||
| 589 | } | ||
| 590 | |||
| 591 | for (addr = MSR_P4_MS_ESCR0 + stag; | ||
| 592 | addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ | ||
| 593 | wrmsr(addr, 0, 0); | ||
| 594 | } | ||
| 595 | |||
| 596 | for (addr = MSR_P4_IX_ESCR0 + stag; | ||
| 597 | addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ | ||
| 598 | wrmsr(addr, 0, 0); | ||
| 599 | } | 582 | } |
| 600 | 583 | ||
| 601 | if (num_counters == NUM_COUNTERS_NON_HT) { | ||
| 602 | wrmsr(MSR_P4_CRU_ESCR4, 0, 0); | ||
| 603 | wrmsr(MSR_P4_CRU_ESCR5, 0, 0); | ||
| 604 | } else if (stag == 0) { | ||
| 605 | wrmsr(MSR_P4_CRU_ESCR4, 0, 0); | ||
| 606 | } else { | ||
| 607 | wrmsr(MSR_P4_CRU_ESCR5, 0, 0); | ||
| 608 | } | ||
| 609 | |||
| 610 | /* setup all counters */ | 584 | /* setup all counters */ |
| 611 | for (i = 0 ; i < num_counters ; ++i) { | 585 | for (i = 0 ; i < num_counters ; ++i) { |
| 612 | if (counter_config[i].enabled) { | 586 | if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) { |
| 613 | reset_value[i] = counter_config[i].count; | 587 | reset_value[i] = counter_config[i].count; |
| 614 | pmc_setup_one_p4_counter(i); | 588 | pmc_setup_one_p4_counter(i); |
| 615 | CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); | 589 | CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); |
| @@ -696,12 +670,32 @@ static void p4_stop(struct op_msrs const * const msrs) | |||
| 696 | stag = get_stagger(); | 670 | stag = get_stagger(); |
| 697 | 671 | ||
| 698 | for (i = 0; i < num_counters; ++i) { | 672 | for (i = 0; i < num_counters; ++i) { |
| 673 | if (!reset_value[i]) | ||
| 674 | continue; | ||
| 699 | CCCR_READ(low, high, VIRT_CTR(stag, i)); | 675 | CCCR_READ(low, high, VIRT_CTR(stag, i)); |
| 700 | CCCR_SET_DISABLE(low); | 676 | CCCR_SET_DISABLE(low); |
| 701 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); | 677 | CCCR_WRITE(low, high, VIRT_CTR(stag, i)); |
| 702 | } | 678 | } |
| 703 | } | 679 | } |
| 704 | 680 | ||
| 681 | static void p4_shutdown(struct op_msrs const * const msrs) | ||
| 682 | { | ||
| 683 | int i; | ||
| 684 | |||
| 685 | for (i = 0 ; i < num_counters ; ++i) { | ||
| 686 | if (CTR_IS_RESERVED(msrs,i)) | ||
| 687 | release_perfctr_nmi(msrs->counters[i].addr); | ||
| 688 | } | ||
| 689 | /* some of the control registers are specially reserved in | ||
| 690 | * conjunction with the counter registers (hence the starting offset). | ||
| 691 | * This saves a few bits. | ||
| 692 | */ | ||
| 693 | for (i = num_counters ; i < num_controls ; ++i) { | ||
| 694 | if (CTRL_IS_RESERVED(msrs,i)) | ||
| 695 | release_evntsel_nmi(msrs->controls[i].addr); | ||
| 696 | } | ||
| 697 | } | ||
| 698 | |||
| 705 | 699 | ||
| 706 | #ifdef CONFIG_SMP | 700 | #ifdef CONFIG_SMP |
| 707 | struct op_x86_model_spec const op_p4_ht2_spec = { | 701 | struct op_x86_model_spec const op_p4_ht2_spec = { |
| @@ -711,7 +705,8 @@ struct op_x86_model_spec const op_p4_ht2_spec = { | |||
| 711 | .setup_ctrs = &p4_setup_ctrs, | 705 | .setup_ctrs = &p4_setup_ctrs, |
| 712 | .check_ctrs = &p4_check_ctrs, | 706 | .check_ctrs = &p4_check_ctrs, |
| 713 | .start = &p4_start, | 707 | .start = &p4_start, |
| 714 | .stop = &p4_stop | 708 | .stop = &p4_stop, |
| 709 | .shutdown = &p4_shutdown | ||
| 715 | }; | 710 | }; |
| 716 | #endif | 711 | #endif |
| 717 | 712 | ||
| @@ -722,5 +717,6 @@ struct op_x86_model_spec const op_p4_spec = { | |||
| 722 | .setup_ctrs = &p4_setup_ctrs, | 717 | .setup_ctrs = &p4_setup_ctrs, |
| 723 | .check_ctrs = &p4_check_ctrs, | 718 | .check_ctrs = &p4_check_ctrs, |
| 724 | .start = &p4_start, | 719 | .start = &p4_start, |
| 725 | .stop = &p4_stop | 720 | .stop = &p4_stop, |
| 721 | .shutdown = &p4_shutdown | ||
| 726 | }; | 722 | }; |
diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c index 5c3ab4b027ad..f88e05ba8eb3 100644 --- a/arch/i386/oprofile/op_model_ppro.c +++ b/arch/i386/oprofile/op_model_ppro.c | |||
| @@ -22,10 +22,12 @@ | |||
| 22 | #define NUM_COUNTERS 2 | 22 | #define NUM_COUNTERS 2 |
| 23 | #define NUM_CONTROLS 2 | 23 | #define NUM_CONTROLS 2 |
| 24 | 24 | ||
| 25 | #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) | ||
| 25 | #define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) | 26 | #define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) |
| 26 | #define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), -1);} while (0) | 27 | #define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), -1);} while (0) |
| 27 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) | 28 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) |
| 28 | 29 | ||
| 30 | #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) | ||
| 29 | #define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0) | 31 | #define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0) |
| 30 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0) | 32 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0) |
| 31 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | 33 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) |
| @@ -41,11 +43,21 @@ static unsigned long reset_value[NUM_COUNTERS]; | |||
| 41 | 43 | ||
| 42 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) | 44 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) |
| 43 | { | 45 | { |
| 44 | msrs->counters[0].addr = MSR_P6_PERFCTR0; | 46 | int i; |
| 45 | msrs->counters[1].addr = MSR_P6_PERFCTR1; | 47 | |
| 48 | for (i=0; i < NUM_COUNTERS; i++) { | ||
| 49 | if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) | ||
| 50 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | ||
| 51 | else | ||
| 52 | msrs->counters[i].addr = 0; | ||
| 53 | } | ||
| 46 | 54 | ||
| 47 | msrs->controls[0].addr = MSR_P6_EVNTSEL0; | 55 | for (i=0; i < NUM_CONTROLS; i++) { |
| 48 | msrs->controls[1].addr = MSR_P6_EVNTSEL1; | 56 | if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) |
| 57 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | ||
| 58 | else | ||
| 59 | msrs->controls[i].addr = 0; | ||
| 60 | } | ||
| 49 | } | 61 | } |
| 50 | 62 | ||
| 51 | 63 | ||
| @@ -56,6 +68,8 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
| 56 | 68 | ||
| 57 | /* clear all counters */ | 69 | /* clear all counters */ |
| 58 | for (i = 0 ; i < NUM_CONTROLS; ++i) { | 70 | for (i = 0 ; i < NUM_CONTROLS; ++i) { |
| 71 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | ||
| 72 | continue; | ||
| 59 | CTRL_READ(low, high, msrs, i); | 73 | CTRL_READ(low, high, msrs, i); |
| 60 | CTRL_CLEAR(low); | 74 | CTRL_CLEAR(low); |
| 61 | CTRL_WRITE(low, high, msrs, i); | 75 | CTRL_WRITE(low, high, msrs, i); |
| @@ -63,12 +77,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
| 63 | 77 | ||
| 64 | /* avoid a false detection of ctr overflows in NMI handler */ | 78 | /* avoid a false detection of ctr overflows in NMI handler */ |
| 65 | for (i = 0; i < NUM_COUNTERS; ++i) { | 79 | for (i = 0; i < NUM_COUNTERS; ++i) { |
| 80 | if (unlikely(!CTR_IS_RESERVED(msrs,i))) | ||
| 81 | continue; | ||
| 66 | CTR_WRITE(1, msrs, i); | 82 | CTR_WRITE(1, msrs, i); |
| 67 | } | 83 | } |
| 68 | 84 | ||
| 69 | /* enable active counters */ | 85 | /* enable active counters */ |
| 70 | for (i = 0; i < NUM_COUNTERS; ++i) { | 86 | for (i = 0; i < NUM_COUNTERS; ++i) { |
| 71 | if (counter_config[i].enabled) { | 87 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) { |
| 72 | reset_value[i] = counter_config[i].count; | 88 | reset_value[i] = counter_config[i].count; |
| 73 | 89 | ||
| 74 | CTR_WRITE(counter_config[i].count, msrs, i); | 90 | CTR_WRITE(counter_config[i].count, msrs, i); |
| @@ -81,6 +97,8 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
| 81 | CTRL_SET_UM(low, counter_config[i].unit_mask); | 97 | CTRL_SET_UM(low, counter_config[i].unit_mask); |
| 82 | CTRL_SET_EVENT(low, counter_config[i].event); | 98 | CTRL_SET_EVENT(low, counter_config[i].event); |
| 83 | CTRL_WRITE(low, high, msrs, i); | 99 | CTRL_WRITE(low, high, msrs, i); |
| 100 | } else { | ||
| 101 | reset_value[i] = 0; | ||
| 84 | } | 102 | } |
| 85 | } | 103 | } |
| 86 | } | 104 | } |
| @@ -93,6 +111,8 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
| 93 | int i; | 111 | int i; |
| 94 | 112 | ||
| 95 | for (i = 0 ; i < NUM_COUNTERS; ++i) { | 113 | for (i = 0 ; i < NUM_COUNTERS; ++i) { |
| 114 | if (!reset_value[i]) | ||
| 115 | continue; | ||
| 96 | CTR_READ(low, high, msrs, i); | 116 | CTR_READ(low, high, msrs, i); |
| 97 | if (CTR_OVERFLOWED(low)) { | 117 | if (CTR_OVERFLOWED(low)) { |
| 98 | oprofile_add_sample(regs, i); | 118 | oprofile_add_sample(regs, i); |
| @@ -118,18 +138,38 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
| 118 | static void ppro_start(struct op_msrs const * const msrs) | 138 | static void ppro_start(struct op_msrs const * const msrs) |
| 119 | { | 139 | { |
| 120 | unsigned int low,high; | 140 | unsigned int low,high; |
| 121 | CTRL_READ(low, high, msrs, 0); | 141 | |
| 122 | CTRL_SET_ACTIVE(low); | 142 | if (reset_value[0]) { |
| 123 | CTRL_WRITE(low, high, msrs, 0); | 143 | CTRL_READ(low, high, msrs, 0); |
| 144 | CTRL_SET_ACTIVE(low); | ||
| 145 | CTRL_WRITE(low, high, msrs, 0); | ||
| 146 | } | ||
| 124 | } | 147 | } |
| 125 | 148 | ||
| 126 | 149 | ||
| 127 | static void ppro_stop(struct op_msrs const * const msrs) | 150 | static void ppro_stop(struct op_msrs const * const msrs) |
| 128 | { | 151 | { |
| 129 | unsigned int low,high; | 152 | unsigned int low,high; |
| 130 | CTRL_READ(low, high, msrs, 0); | 153 | |
| 131 | CTRL_SET_INACTIVE(low); | 154 | if (reset_value[0]) { |
| 132 | CTRL_WRITE(low, high, msrs, 0); | 155 | CTRL_READ(low, high, msrs, 0); |
| 156 | CTRL_SET_INACTIVE(low); | ||
| 157 | CTRL_WRITE(low, high, msrs, 0); | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | static void ppro_shutdown(struct op_msrs const * const msrs) | ||
| 162 | { | ||
| 163 | int i; | ||
| 164 | |||
| 165 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | ||
| 166 | if (CTR_IS_RESERVED(msrs,i)) | ||
| 167 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | ||
| 168 | } | ||
| 169 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { | ||
| 170 | if (CTRL_IS_RESERVED(msrs,i)) | ||
| 171 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | ||
| 172 | } | ||
| 133 | } | 173 | } |
| 134 | 174 | ||
| 135 | 175 | ||
| @@ -140,5 +180,6 @@ struct op_x86_model_spec const op_ppro_spec = { | |||
| 140 | .setup_ctrs = &ppro_setup_ctrs, | 180 | .setup_ctrs = &ppro_setup_ctrs, |
| 141 | .check_ctrs = &ppro_check_ctrs, | 181 | .check_ctrs = &ppro_check_ctrs, |
| 142 | .start = &ppro_start, | 182 | .start = &ppro_start, |
| 143 | .stop = &ppro_stop | 183 | .stop = &ppro_stop, |
| 184 | .shutdown = &ppro_shutdown | ||
| 144 | }; | 185 | }; |
diff --git a/arch/i386/oprofile/op_x86_model.h b/arch/i386/oprofile/op_x86_model.h index 123b7e90a9ee..abb1aa95b979 100644 --- a/arch/i386/oprofile/op_x86_model.h +++ b/arch/i386/oprofile/op_x86_model.h | |||
| @@ -40,6 +40,7 @@ struct op_x86_model_spec { | |||
| 40 | struct op_msrs const * const msrs); | 40 | struct op_msrs const * const msrs); |
| 41 | void (*start)(struct op_msrs const * const msrs); | 41 | void (*start)(struct op_msrs const * const msrs); |
| 42 | void (*stop)(struct op_msrs const * const msrs); | 42 | void (*stop)(struct op_msrs const * const msrs); |
| 43 | void (*shutdown)(struct op_msrs const * const msrs); | ||
| 43 | }; | 44 | }; |
| 44 | 45 | ||
| 45 | extern struct op_x86_model_spec const op_ppro_spec; | 46 | extern struct op_x86_model_spec const op_ppro_spec; |
diff --git a/arch/i386/pci/Makefile b/arch/i386/pci/Makefile index 62ad75c57e6a..1594d2f55c8f 100644 --- a/arch/i386/pci/Makefile +++ b/arch/i386/pci/Makefile | |||
| @@ -11,4 +11,4 @@ pci-y += legacy.o irq.o | |||
| 11 | pci-$(CONFIG_X86_VISWS) := visws.o fixup.o | 11 | pci-$(CONFIG_X86_VISWS) := visws.o fixup.o |
| 12 | pci-$(CONFIG_X86_NUMAQ) := numa.o irq.o | 12 | pci-$(CONFIG_X86_NUMAQ) := numa.o irq.o |
| 13 | 13 | ||
| 14 | obj-y += $(pci-y) common.o | 14 | obj-y += $(pci-y) common.o early.o |
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c index 0a362e3aeac5..68bce194e688 100644 --- a/arch/i386/pci/common.c +++ b/arch/i386/pci/common.c | |||
| @@ -242,6 +242,10 @@ char * __devinit pcibios_setup(char *str) | |||
| 242 | acpi_noirq_set(); | 242 | acpi_noirq_set(); |
| 243 | return NULL; | 243 | return NULL; |
| 244 | } | 244 | } |
| 245 | else if (!strcmp(str, "noearly")) { | ||
| 246 | pci_probe |= PCI_PROBE_NOEARLY; | ||
| 247 | return NULL; | ||
| 248 | } | ||
| 245 | #ifndef CONFIG_X86_VISWS | 249 | #ifndef CONFIG_X86_VISWS |
| 246 | else if (!strcmp(str, "usepirqmask")) { | 250 | else if (!strcmp(str, "usepirqmask")) { |
| 247 | pci_probe |= PCI_USE_PIRQ_MASK; | 251 | pci_probe |= PCI_USE_PIRQ_MASK; |
diff --git a/arch/i386/pci/direct.c b/arch/i386/pci/direct.c index 5d81fb510375..5acf0b4743cf 100644 --- a/arch/i386/pci/direct.c +++ b/arch/i386/pci/direct.c | |||
| @@ -254,7 +254,16 @@ static int __init pci_check_type2(void) | |||
| 254 | return works; | 254 | return works; |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | void __init pci_direct_init(void) | 257 | void __init pci_direct_init(int type) |
| 258 | { | ||
| 259 | printk(KERN_INFO "PCI: Using configuration type %d\n", type); | ||
| 260 | if (type == 1) | ||
| 261 | raw_pci_ops = &pci_direct_conf1; | ||
| 262 | else | ||
| 263 | raw_pci_ops = &pci_direct_conf2; | ||
| 264 | } | ||
| 265 | |||
| 266 | int __init pci_direct_probe(void) | ||
| 258 | { | 267 | { |
| 259 | struct resource *region, *region2; | 268 | struct resource *region, *region2; |
| 260 | 269 | ||
| @@ -264,19 +273,16 @@ void __init pci_direct_init(void) | |||
| 264 | if (!region) | 273 | if (!region) |
| 265 | goto type2; | 274 | goto type2; |
| 266 | 275 | ||
| 267 | if (pci_check_type1()) { | 276 | if (pci_check_type1()) |
| 268 | printk(KERN_INFO "PCI: Using configuration type 1\n"); | 277 | return 1; |
| 269 | raw_pci_ops = &pci_direct_conf1; | ||
| 270 | return; | ||
| 271 | } | ||
| 272 | release_resource(region); | 278 | release_resource(region); |
| 273 | 279 | ||
| 274 | type2: | 280 | type2: |
| 275 | if ((pci_probe & PCI_PROBE_CONF2) == 0) | 281 | if ((pci_probe & PCI_PROBE_CONF2) == 0) |
| 276 | return; | 282 | return 0; |
| 277 | region = request_region(0xCF8, 4, "PCI conf2"); | 283 | region = request_region(0xCF8, 4, "PCI conf2"); |
| 278 | if (!region) | 284 | if (!region) |
| 279 | return; | 285 | return 0; |
| 280 | region2 = request_region(0xC000, 0x1000, "PCI conf2"); | 286 | region2 = request_region(0xC000, 0x1000, "PCI conf2"); |
| 281 | if (!region2) | 287 | if (!region2) |
| 282 | goto fail2; | 288 | goto fail2; |
| @@ -284,10 +290,11 @@ void __init pci_direct_init(void) | |||
| 284 | if (pci_check_type2()) { | 290 | if (pci_check_type2()) { |
| 285 | printk(KERN_INFO "PCI: Using configuration type 2\n"); | 291 | printk(KERN_INFO "PCI: Using configuration type 2\n"); |
| 286 | raw_pci_ops = &pci_direct_conf2; | 292 | raw_pci_ops = &pci_direct_conf2; |
| 287 | return; | 293 | return 2; |
| 288 | } | 294 | } |
| 289 | 295 | ||
| 290 | release_resource(region2); | 296 | release_resource(region2); |
| 291 | fail2: | 297 | fail2: |
| 292 | release_resource(region); | 298 | release_resource(region); |
| 299 | return 0; | ||
| 293 | } | 300 | } |
diff --git a/arch/i386/pci/early.c b/arch/i386/pci/early.c new file mode 100644 index 000000000000..713d6c866cae --- /dev/null +++ b/arch/i386/pci/early.c | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/pci.h> | ||
| 3 | #include <asm/pci-direct.h> | ||
| 4 | #include <asm/io.h> | ||
| 5 | #include "pci.h" | ||
| 6 | |||
| 7 | /* Direct PCI access. This is used for PCI accesses in early boot before | ||
| 8 | the PCI subsystem works. */ | ||
| 9 | |||
| 10 | #define PDprintk(x...) | ||
| 11 | |||
| 12 | u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) | ||
| 13 | { | ||
| 14 | u32 v; | ||
| 15 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | ||
| 16 | v = inl(0xcfc); | ||
| 17 | if (v != 0xffffffff) | ||
| 18 | PDprintk("%x reading 4 from %x: %x\n", slot, offset, v); | ||
| 19 | return v; | ||
| 20 | } | ||
| 21 | |||
| 22 | u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset) | ||
| 23 | { | ||
| 24 | u8 v; | ||
| 25 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | ||
| 26 | v = inb(0xcfc + (offset&3)); | ||
| 27 | PDprintk("%x reading 1 from %x: %x\n", slot, offset, v); | ||
| 28 | return v; | ||
| 29 | } | ||
| 30 | |||
| 31 | u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset) | ||
| 32 | { | ||
| 33 | u16 v; | ||
| 34 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | ||
| 35 | v = inw(0xcfc + (offset&2)); | ||
| 36 | PDprintk("%x reading 2 from %x: %x\n", slot, offset, v); | ||
| 37 | return v; | ||
| 38 | } | ||
| 39 | |||
| 40 | void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, | ||
| 41 | u32 val) | ||
| 42 | { | ||
| 43 | PDprintk("%x writing to %x: %x\n", slot, offset, val); | ||
| 44 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | ||
| 45 | outl(val, 0xcfc); | ||
| 46 | } | ||
| 47 | |||
| 48 | int early_pci_allowed(void) | ||
| 49 | { | ||
| 50 | return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) == | ||
| 51 | PCI_PROBE_CONF1; | ||
| 52 | } | ||
diff --git a/arch/i386/pci/init.c b/arch/i386/pci/init.c index 51087a9d9172..d028e1b05c36 100644 --- a/arch/i386/pci/init.c +++ b/arch/i386/pci/init.c | |||
| @@ -6,8 +6,13 @@ | |||
| 6 | in the right sequence from here. */ | 6 | in the right sequence from here. */ |
| 7 | static __init int pci_access_init(void) | 7 | static __init int pci_access_init(void) |
| 8 | { | 8 | { |
| 9 | int type = 0; | ||
| 10 | |||
| 11 | #ifdef CONFIG_PCI_DIRECT | ||
| 12 | type = pci_direct_probe(); | ||
| 13 | #endif | ||
| 9 | #ifdef CONFIG_PCI_MMCONFIG | 14 | #ifdef CONFIG_PCI_MMCONFIG |
| 10 | pci_mmcfg_init(); | 15 | pci_mmcfg_init(type); |
| 11 | #endif | 16 | #endif |
| 12 | if (raw_pci_ops) | 17 | if (raw_pci_ops) |
| 13 | return 0; | 18 | return 0; |
| @@ -21,7 +26,7 @@ static __init int pci_access_init(void) | |||
| 21 | * fails. | 26 | * fails. |
| 22 | */ | 27 | */ |
| 23 | #ifdef CONFIG_PCI_DIRECT | 28 | #ifdef CONFIG_PCI_DIRECT |
| 24 | pci_direct_init(); | 29 | pci_direct_init(type); |
| 25 | #endif | 30 | #endif |
| 26 | return 0; | 31 | return 0; |
| 27 | } | 32 | } |
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c index 972180f738d9..05be8db58a8c 100644 --- a/arch/i386/pci/mmconfig.c +++ b/arch/i386/pci/mmconfig.c | |||
| @@ -151,6 +151,38 @@ static struct pci_raw_ops pci_mmcfg = { | |||
| 151 | .write = pci_mmcfg_write, | 151 | .write = pci_mmcfg_write, |
| 152 | }; | 152 | }; |
| 153 | 153 | ||
| 154 | |||
| 155 | static __init void pci_mmcfg_insert_resources(void) | ||
| 156 | { | ||
| 157 | #define PCI_MMCFG_RESOURCE_NAME_LEN 19 | ||
| 158 | int i; | ||
| 159 | struct resource *res; | ||
| 160 | char *names; | ||
| 161 | unsigned num_buses; | ||
| 162 | |||
| 163 | res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res), | ||
| 164 | pci_mmcfg_config_num, GFP_KERNEL); | ||
| 165 | |||
| 166 | if (!res) { | ||
| 167 | printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n"); | ||
| 168 | return; | ||
| 169 | } | ||
| 170 | |||
| 171 | names = (void *)&res[pci_mmcfg_config_num]; | ||
| 172 | for (i = 0; i < pci_mmcfg_config_num; i++, res++) { | ||
| 173 | num_buses = pci_mmcfg_config[i].end_bus_number - | ||
| 174 | pci_mmcfg_config[i].start_bus_number + 1; | ||
| 175 | res->name = names; | ||
| 176 | snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", | ||
| 177 | pci_mmcfg_config[i].pci_segment_group_number); | ||
| 178 | res->start = pci_mmcfg_config[i].base_address; | ||
| 179 | res->end = res->start + (num_buses << 20) - 1; | ||
| 180 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
| 181 | insert_resource(&iomem_resource, res); | ||
| 182 | names += PCI_MMCFG_RESOURCE_NAME_LEN; | ||
| 183 | } | ||
| 184 | } | ||
| 185 | |||
| 154 | /* K8 systems have some devices (typically in the builtin northbridge) | 186 | /* K8 systems have some devices (typically in the builtin northbridge) |
| 155 | that are only accessible using type1 | 187 | that are only accessible using type1 |
| 156 | Normally this can be expressed in the MCFG by not listing them | 188 | Normally this can be expressed in the MCFG by not listing them |
| @@ -187,7 +219,9 @@ static __init void unreachable_devices(void) | |||
| 187 | } | 219 | } |
| 188 | } | 220 | } |
| 189 | 221 | ||
| 190 | void __init pci_mmcfg_init(void) | 222 | |
| 223 | |||
| 224 | void __init pci_mmcfg_init(int type) | ||
| 191 | { | 225 | { |
| 192 | if ((pci_probe & PCI_PROBE_MMCONF) == 0) | 226 | if ((pci_probe & PCI_PROBE_MMCONF) == 0) |
| 193 | return; | 227 | return; |
| @@ -198,7 +232,9 @@ void __init pci_mmcfg_init(void) | |||
| 198 | (pci_mmcfg_config[0].base_address == 0)) | 232 | (pci_mmcfg_config[0].base_address == 0)) |
| 199 | return; | 233 | return; |
| 200 | 234 | ||
| 201 | if (!e820_all_mapped(pci_mmcfg_config[0].base_address, | 235 | /* Only do this check when type 1 works. If it doesn't work |
| 236 | assume we run on a Mac and always use MCFG */ | ||
| 237 | if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, | ||
| 202 | pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, | 238 | pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, |
| 203 | E820_RESERVED)) { | 239 | E820_RESERVED)) { |
| 204 | printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", | 240 | printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", |
| @@ -212,4 +248,5 @@ void __init pci_mmcfg_init(void) | |||
| 212 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; | 248 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; |
| 213 | 249 | ||
| 214 | unreachable_devices(); | 250 | unreachable_devices(); |
| 251 | pci_mmcfg_insert_resources(); | ||
| 215 | } | 252 | } |
diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h index bf4e79335388..1814f74569c6 100644 --- a/arch/i386/pci/pci.h +++ b/arch/i386/pci/pci.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #define PCI_PROBE_CONF2 0x0004 | 17 | #define PCI_PROBE_CONF2 0x0004 |
| 18 | #define PCI_PROBE_MMCONF 0x0008 | 18 | #define PCI_PROBE_MMCONF 0x0008 |
| 19 | #define PCI_PROBE_MASK 0x000f | 19 | #define PCI_PROBE_MASK 0x000f |
| 20 | #define PCI_PROBE_NOEARLY 0x0010 | ||
| 20 | 21 | ||
| 21 | #define PCI_NO_SORT 0x0100 | 22 | #define PCI_NO_SORT 0x0100 |
| 22 | #define PCI_BIOS_SORT 0x0200 | 23 | #define PCI_BIOS_SORT 0x0200 |
| @@ -81,7 +82,9 @@ extern int pci_conf1_write(unsigned int seg, unsigned int bus, | |||
| 81 | extern int pci_conf1_read(unsigned int seg, unsigned int bus, | 82 | extern int pci_conf1_read(unsigned int seg, unsigned int bus, |
| 82 | unsigned int devfn, int reg, int len, u32 *value); | 83 | unsigned int devfn, int reg, int len, u32 *value); |
| 83 | 84 | ||
| 84 | extern void pci_direct_init(void); | 85 | extern int pci_direct_probe(void); |
| 86 | extern void pci_direct_init(int type); | ||
| 85 | extern void pci_pcbios_init(void); | 87 | extern void pci_pcbios_init(void); |
| 86 | extern void pci_mmcfg_init(void); | 88 | extern void pci_mmcfg_init(int type); |
| 87 | extern void pcibios_sort(void); | 89 | extern void pcibios_sort(void); |
| 90 | |||
diff --git a/arch/i386/power/swsusp.S b/arch/i386/power/swsusp.S index c893b897217f..8a2b50a0aaad 100644 --- a/arch/i386/power/swsusp.S +++ b/arch/i386/power/swsusp.S | |||
| @@ -32,7 +32,7 @@ ENTRY(swsusp_arch_resume) | |||
| 32 | movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx | 32 | movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx |
| 33 | movl %ecx, %cr3 | 33 | movl %ecx, %cr3 |
| 34 | 34 | ||
| 35 | movl pagedir_nosave, %edx | 35 | movl restore_pblist, %edx |
| 36 | .p2align 4,,7 | 36 | .p2align 4,,7 |
| 37 | 37 | ||
| 38 | copy_loop: | 38 | copy_loop: |
