diff options
| author | Anton Altaparmakov <aia21@hera.kernel.org> | 2006-03-23 12:05:47 -0500 |
|---|---|---|
| committer | Anton Altaparmakov <aia21@hera.kernel.org> | 2006-03-23 12:05:47 -0500 |
| commit | a05ba4561fa3ad8b64a27577d0d38c190f60f762 (patch) | |
| tree | 5eb7561113e006b7bad0bef50dec6821962b1b36 /arch | |
| parent | 74293759002aa7db0179158c20676a034614577b (diff) | |
| parent | b0e6e962992b76580f4900b166a337bad7c1e81b (diff) | |
Merge branch 'master' of /home/aia21/ntfs-2.6/
Diffstat (limited to 'arch')
69 files changed, 823 insertions, 509 deletions
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 30deaf1b728a..b504def3e346 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c | |||
| @@ -52,9 +52,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 52 | 52 | ||
| 53 | if (i == 0) { | 53 | if (i == 0) { |
| 54 | seq_printf(p, " "); | 54 | seq_printf(p, " "); |
| 55 | for (j=0; j<NR_CPUS; j++) | 55 | for_each_online_cpu(j) |
| 56 | if (cpu_online(j)) | 56 | seq_printf(p, "CPU%d ",j); |
| 57 | seq_printf(p, "CPU%d ",j); | ||
| 58 | seq_putc(p, '\n'); | 57 | seq_putc(p, '\n'); |
| 59 | } | 58 | } |
| 60 | 59 | ||
| @@ -67,9 +66,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 67 | #ifndef CONFIG_SMP | 66 | #ifndef CONFIG_SMP |
| 68 | seq_printf(p, "%10u ", kstat_irqs(i)); | 67 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 69 | #else | 68 | #else |
| 70 | for (j = 0; j < NR_CPUS; j++) | 69 | for_each_online_cpu(j) |
| 71 | if (cpu_online(j)) | 70 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
| 72 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
| 73 | #endif | 71 | #endif |
| 74 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 72 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
| 75 | seq_printf(p, " %s", action->name); | 73 | seq_printf(p, " %s", action->name); |
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 27ab4c30aac6..11fa326a8f62 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c | |||
| @@ -75,9 +75,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 75 | switch (i) { | 75 | switch (i) { |
| 76 | case 0: | 76 | case 0: |
| 77 | seq_printf(p, " "); | 77 | seq_printf(p, " "); |
| 78 | for (j = 0; j < NR_CPUS; j++) | 78 | for_each_online_cpu(j) |
| 79 | if (cpu_online(j)) | 79 | seq_printf(p, "CPU%d ",j); |
| 80 | seq_printf(p, "CPU%d ",j); | ||
| 81 | 80 | ||
| 82 | seq_putc(p, '\n'); | 81 | seq_putc(p, '\n'); |
| 83 | break; | 82 | break; |
| @@ -100,9 +99,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 100 | #ifndef CONFIG_SMP | 99 | #ifndef CONFIG_SMP |
| 101 | seq_printf(p, "%10u ", kstat_irqs(i)); | 100 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 102 | #else | 101 | #else |
| 103 | for (j = 0; j < NR_CPUS; j++) | 102 | for_each_online_cpu(j) |
| 104 | if (cpu_online(j)) | 103 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); |
| 105 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); | ||
| 106 | #endif | 104 | #endif |
| 107 | 105 | ||
| 108 | level = group->sources[ix]->level - frv_irq_levels; | 106 | level = group->sources[ix]->level - frv_irq_levels; |
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 5b1a7d46d1d9..bfea1bedcbf2 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
| @@ -80,6 +80,7 @@ config X86_VOYAGER | |||
| 80 | 80 | ||
| 81 | config X86_NUMAQ | 81 | config X86_NUMAQ |
| 82 | bool "NUMAQ (IBM/Sequent)" | 82 | bool "NUMAQ (IBM/Sequent)" |
| 83 | select SMP | ||
| 83 | select NUMA | 84 | select NUMA |
| 84 | help | 85 | help |
| 85 | This option is used for getting Linux to run on a (IBM/Sequent) NUMA | 86 | This option is used for getting Linux to run on a (IBM/Sequent) NUMA |
| @@ -400,6 +401,7 @@ choice | |||
| 400 | 401 | ||
| 401 | config NOHIGHMEM | 402 | config NOHIGHMEM |
| 402 | bool "off" | 403 | bool "off" |
| 404 | depends on !X86_NUMAQ | ||
| 403 | ---help--- | 405 | ---help--- |
| 404 | Linux can use up to 64 Gigabytes of physical memory on x86 systems. | 406 | Linux can use up to 64 Gigabytes of physical memory on x86 systems. |
| 405 | However, the address space of 32-bit x86 processors is only 4 | 407 | However, the address space of 32-bit x86 processors is only 4 |
| @@ -436,6 +438,7 @@ config NOHIGHMEM | |||
| 436 | 438 | ||
| 437 | config HIGHMEM4G | 439 | config HIGHMEM4G |
| 438 | bool "4GB" | 440 | bool "4GB" |
| 441 | depends on !X86_NUMAQ | ||
| 439 | help | 442 | help |
| 440 | Select this if you have a 32-bit processor and between 1 and 4 | 443 | Select this if you have a 32-bit processor and between 1 and 4 |
| 441 | gigabytes of physical RAM. | 444 | gigabytes of physical RAM. |
| @@ -503,10 +506,6 @@ config NUMA | |||
| 503 | default n if X86_PC | 506 | default n if X86_PC |
| 504 | default y if (X86_NUMAQ || X86_SUMMIT) | 507 | default y if (X86_NUMAQ || X86_SUMMIT) |
| 505 | 508 | ||
| 506 | # Need comments to help the hapless user trying to turn on NUMA support | ||
| 507 | comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" | ||
| 508 | depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) | ||
| 509 | |||
| 510 | comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" | 509 | comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" |
| 511 | depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI) | 510 | depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI) |
| 512 | 511 | ||
| @@ -660,13 +659,18 @@ config BOOT_IOREMAP | |||
| 660 | default y | 659 | default y |
| 661 | 660 | ||
| 662 | config REGPARM | 661 | config REGPARM |
| 663 | bool "Use register arguments (EXPERIMENTAL)" | 662 | bool "Use register arguments" |
| 664 | depends on EXPERIMENTAL | 663 | default y |
| 665 | default n | ||
| 666 | help | 664 | help |
| 667 | Compile the kernel with -mregparm=3. This uses a different ABI | 665 | Compile the kernel with -mregparm=3. This instructs gcc to use |
| 668 | and passes the first three arguments of a function call in registers. | 666 | a more efficient function call ABI which passes the first three |
| 669 | This will probably break binary only modules. | 667 | arguments of a function call via registers, which results in denser |
| 668 | and faster code. | ||
| 669 | |||
| 670 | If this option is disabled, then the default ABI of passing | ||
| 671 | arguments via the stack is used. | ||
| 672 | |||
| 673 | If unsure, say Y. | ||
| 670 | 674 | ||
| 671 | config SECCOMP | 675 | config SECCOMP |
| 672 | bool "Enable seccomp to safely compute untrusted bytecode" | 676 | bool "Enable seccomp to safely compute untrusted bytecode" |
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug index bf32ecc9ad04..00108ba9a78d 100644 --- a/arch/i386/Kconfig.debug +++ b/arch/i386/Kconfig.debug | |||
| @@ -31,6 +31,15 @@ config DEBUG_STACK_USAGE | |||
| 31 | 31 | ||
| 32 | This option will slow down process creation somewhat. | 32 | This option will slow down process creation somewhat. |
| 33 | 33 | ||
| 34 | config STACK_BACKTRACE_COLS | ||
| 35 | int "Stack backtraces per line" if DEBUG_KERNEL | ||
| 36 | range 1 3 | ||
| 37 | default 2 | ||
| 38 | help | ||
| 39 | Selects how many stack backtrace entries per line to display. | ||
| 40 | |||
| 41 | This can save screen space when displaying traces. | ||
| 42 | |||
| 34 | comment "Page alloc debug is incompatible with Software Suspend on i386" | 43 | comment "Page alloc debug is incompatible with Software Suspend on i386" |
| 35 | depends on DEBUG_KERNEL && SOFTWARE_SUSPEND | 44 | depends on DEBUG_KERNEL && SOFTWARE_SUSPEND |
| 36 | 45 | ||
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 65656c033d70..5b9ed21216cf 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
| @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
| 7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ |
| 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ | 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ |
| 9 | pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ | 9 | pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ |
| 10 | quirks.o i8237.o topology.o | 10 | quirks.o i8237.o topology.o alternative.o |
| 11 | 11 | ||
| 12 | obj-y += cpu/ | 12 | obj-y += cpu/ |
| 13 | obj-y += timers/ | 13 | obj-y += timers/ |
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c new file mode 100644 index 000000000000..5cbd6f99fb2a --- /dev/null +++ b/arch/i386/kernel/alternative.c | |||
| @@ -0,0 +1,321 @@ | |||
| 1 | #include <linux/module.h> | ||
| 2 | #include <linux/spinlock.h> | ||
| 3 | #include <linux/list.h> | ||
| 4 | #include <asm/alternative.h> | ||
| 5 | #include <asm/sections.h> | ||
| 6 | |||
| 7 | #define DEBUG 0 | ||
| 8 | #if DEBUG | ||
| 9 | # define DPRINTK(fmt, args...) printk(fmt, args) | ||
| 10 | #else | ||
| 11 | # define DPRINTK(fmt, args...) | ||
| 12 | #endif | ||
| 13 | |||
| 14 | /* Use inline assembly to define this because the nops are defined | ||
| 15 | as inline assembly strings in the include files and we cannot | ||
| 16 | get them easily into strings. */ | ||
| 17 | asm("\t.data\nintelnops: " | ||
| 18 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | ||
| 19 | GENERIC_NOP7 GENERIC_NOP8); | ||
| 20 | asm("\t.data\nk8nops: " | ||
| 21 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | ||
| 22 | K8_NOP7 K8_NOP8); | ||
| 23 | asm("\t.data\nk7nops: " | ||
| 24 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | ||
| 25 | K7_NOP7 K7_NOP8); | ||
| 26 | |||
| 27 | extern unsigned char intelnops[], k8nops[], k7nops[]; | ||
| 28 | static unsigned char *intel_nops[ASM_NOP_MAX+1] = { | ||
| 29 | NULL, | ||
| 30 | intelnops, | ||
| 31 | intelnops + 1, | ||
| 32 | intelnops + 1 + 2, | ||
| 33 | intelnops + 1 + 2 + 3, | ||
| 34 | intelnops + 1 + 2 + 3 + 4, | ||
| 35 | intelnops + 1 + 2 + 3 + 4 + 5, | ||
| 36 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 37 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 38 | }; | ||
| 39 | static unsigned char *k8_nops[ASM_NOP_MAX+1] = { | ||
| 40 | NULL, | ||
| 41 | k8nops, | ||
| 42 | k8nops + 1, | ||
| 43 | k8nops + 1 + 2, | ||
| 44 | k8nops + 1 + 2 + 3, | ||
| 45 | k8nops + 1 + 2 + 3 + 4, | ||
| 46 | k8nops + 1 + 2 + 3 + 4 + 5, | ||
| 47 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 48 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 49 | }; | ||
| 50 | static unsigned char *k7_nops[ASM_NOP_MAX+1] = { | ||
| 51 | NULL, | ||
| 52 | k7nops, | ||
| 53 | k7nops + 1, | ||
| 54 | k7nops + 1 + 2, | ||
| 55 | k7nops + 1 + 2 + 3, | ||
| 56 | k7nops + 1 + 2 + 3 + 4, | ||
| 57 | k7nops + 1 + 2 + 3 + 4 + 5, | ||
| 58 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 59 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 60 | }; | ||
| 61 | static struct nop { | ||
| 62 | int cpuid; | ||
| 63 | unsigned char **noptable; | ||
| 64 | } noptypes[] = { | ||
| 65 | { X86_FEATURE_K8, k8_nops }, | ||
| 66 | { X86_FEATURE_K7, k7_nops }, | ||
| 67 | { -1, NULL } | ||
| 68 | }; | ||
| 69 | |||
| 70 | |||
| 71 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | ||
| 72 | extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[]; | ||
| 73 | extern u8 *__smp_locks[], *__smp_locks_end[]; | ||
| 74 | |||
| 75 | extern u8 __smp_alt_begin[], __smp_alt_end[]; | ||
| 76 | |||
| 77 | |||
| 78 | static unsigned char** find_nop_table(void) | ||
| 79 | { | ||
| 80 | unsigned char **noptable = intel_nops; | ||
| 81 | int i; | ||
| 82 | |||
| 83 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | ||
| 84 | if (boot_cpu_has(noptypes[i].cpuid)) { | ||
| 85 | noptable = noptypes[i].noptable; | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | return noptable; | ||
| 90 | } | ||
| 91 | |||
| 92 | /* Replace instructions with better alternatives for this CPU type. | ||
| 93 | This runs before SMP is initialized to avoid SMP problems with | ||
| 94 | self modifying code. This implies that assymetric systems where | ||
| 95 | APs have less capabilities than the boot processor are not handled. | ||
| 96 | Tough. Make sure you disable such features by hand. */ | ||
| 97 | |||
| 98 | void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | ||
| 99 | { | ||
| 100 | unsigned char **noptable = find_nop_table(); | ||
| 101 | struct alt_instr *a; | ||
| 102 | int diff, i, k; | ||
| 103 | |||
| 104 | DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); | ||
| 105 | for (a = start; a < end; a++) { | ||
| 106 | BUG_ON(a->replacementlen > a->instrlen); | ||
| 107 | if (!boot_cpu_has(a->cpuid)) | ||
| 108 | continue; | ||
| 109 | memcpy(a->instr, a->replacement, a->replacementlen); | ||
| 110 | diff = a->instrlen - a->replacementlen; | ||
| 111 | /* Pad the rest with nops */ | ||
| 112 | for (i = a->replacementlen; diff > 0; diff -= k, i += k) { | ||
| 113 | k = diff; | ||
| 114 | if (k > ASM_NOP_MAX) | ||
| 115 | k = ASM_NOP_MAX; | ||
| 116 | memcpy(a->instr + i, noptable[k], k); | ||
| 117 | } | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end) | ||
| 122 | { | ||
| 123 | struct alt_instr *a; | ||
| 124 | |||
| 125 | DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end); | ||
| 126 | for (a = start; a < end; a++) { | ||
| 127 | memcpy(a->replacement + a->replacementlen, | ||
| 128 | a->instr, | ||
| 129 | a->instrlen); | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end) | ||
| 134 | { | ||
| 135 | struct alt_instr *a; | ||
| 136 | |||
| 137 | for (a = start; a < end; a++) { | ||
| 138 | memcpy(a->instr, | ||
| 139 | a->replacement + a->replacementlen, | ||
| 140 | a->instrlen); | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | ||
| 145 | { | ||
| 146 | u8 **ptr; | ||
| 147 | |||
| 148 | for (ptr = start; ptr < end; ptr++) { | ||
| 149 | if (*ptr < text) | ||
| 150 | continue; | ||
| 151 | if (*ptr > text_end) | ||
| 152 | continue; | ||
| 153 | **ptr = 0xf0; /* lock prefix */ | ||
| 154 | }; | ||
| 155 | } | ||
| 156 | |||
| 157 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | ||
| 158 | { | ||
| 159 | unsigned char **noptable = find_nop_table(); | ||
| 160 | u8 **ptr; | ||
| 161 | |||
| 162 | for (ptr = start; ptr < end; ptr++) { | ||
| 163 | if (*ptr < text) | ||
| 164 | continue; | ||
| 165 | if (*ptr > text_end) | ||
| 166 | continue; | ||
| 167 | **ptr = noptable[1][0]; | ||
| 168 | }; | ||
| 169 | } | ||
| 170 | |||
| 171 | struct smp_alt_module { | ||
| 172 | /* what is this ??? */ | ||
| 173 | struct module *mod; | ||
| 174 | char *name; | ||
| 175 | |||
| 176 | /* ptrs to lock prefixes */ | ||
| 177 | u8 **locks; | ||
| 178 | u8 **locks_end; | ||
| 179 | |||
| 180 | /* .text segment, needed to avoid patching init code ;) */ | ||
| 181 | u8 *text; | ||
| 182 | u8 *text_end; | ||
| 183 | |||
| 184 | struct list_head next; | ||
| 185 | }; | ||
| 186 | static LIST_HEAD(smp_alt_modules); | ||
| 187 | static DEFINE_SPINLOCK(smp_alt); | ||
| 188 | |||
| 189 | static int smp_alt_once = 0; | ||
| 190 | static int __init bootonly(char *str) | ||
| 191 | { | ||
| 192 | smp_alt_once = 1; | ||
| 193 | return 1; | ||
| 194 | } | ||
| 195 | __setup("smp-alt-boot", bootonly); | ||
| 196 | |||
| 197 | void alternatives_smp_module_add(struct module *mod, char *name, | ||
| 198 | void *locks, void *locks_end, | ||
| 199 | void *text, void *text_end) | ||
| 200 | { | ||
| 201 | struct smp_alt_module *smp; | ||
| 202 | unsigned long flags; | ||
| 203 | |||
| 204 | if (smp_alt_once) { | ||
| 205 | if (boot_cpu_has(X86_FEATURE_UP)) | ||
| 206 | alternatives_smp_unlock(locks, locks_end, | ||
| 207 | text, text_end); | ||
| 208 | return; | ||
| 209 | } | ||
| 210 | |||
| 211 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); | ||
| 212 | if (NULL == smp) | ||
| 213 | return; /* we'll run the (safe but slow) SMP code then ... */ | ||
| 214 | |||
| 215 | smp->mod = mod; | ||
| 216 | smp->name = name; | ||
| 217 | smp->locks = locks; | ||
| 218 | smp->locks_end = locks_end; | ||
| 219 | smp->text = text; | ||
| 220 | smp->text_end = text_end; | ||
| 221 | DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", | ||
| 222 | __FUNCTION__, smp->locks, smp->locks_end, | ||
| 223 | smp->text, smp->text_end, smp->name); | ||
| 224 | |||
| 225 | spin_lock_irqsave(&smp_alt, flags); | ||
| 226 | list_add_tail(&smp->next, &smp_alt_modules); | ||
| 227 | if (boot_cpu_has(X86_FEATURE_UP)) | ||
| 228 | alternatives_smp_unlock(smp->locks, smp->locks_end, | ||
| 229 | smp->text, smp->text_end); | ||
| 230 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 231 | } | ||
| 232 | |||
| 233 | void alternatives_smp_module_del(struct module *mod) | ||
| 234 | { | ||
| 235 | struct smp_alt_module *item; | ||
| 236 | unsigned long flags; | ||
| 237 | |||
| 238 | if (smp_alt_once) | ||
| 239 | return; | ||
| 240 | |||
| 241 | spin_lock_irqsave(&smp_alt, flags); | ||
| 242 | list_for_each_entry(item, &smp_alt_modules, next) { | ||
| 243 | if (mod != item->mod) | ||
| 244 | continue; | ||
| 245 | list_del(&item->next); | ||
| 246 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 247 | DPRINTK("%s: %s\n", __FUNCTION__, item->name); | ||
| 248 | kfree(item); | ||
| 249 | return; | ||
| 250 | } | ||
| 251 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 252 | } | ||
| 253 | |||
| 254 | void alternatives_smp_switch(int smp) | ||
| 255 | { | ||
| 256 | struct smp_alt_module *mod; | ||
| 257 | unsigned long flags; | ||
| 258 | |||
| 259 | if (smp_alt_once) | ||
| 260 | return; | ||
| 261 | BUG_ON(!smp && (num_online_cpus() > 1)); | ||
| 262 | |||
| 263 | spin_lock_irqsave(&smp_alt, flags); | ||
| 264 | if (smp) { | ||
| 265 | printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); | ||
| 266 | clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | ||
| 267 | clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); | ||
| 268 | alternatives_smp_apply(__smp_alt_instructions, | ||
| 269 | __smp_alt_instructions_end); | ||
| 270 | list_for_each_entry(mod, &smp_alt_modules, next) | ||
| 271 | alternatives_smp_lock(mod->locks, mod->locks_end, | ||
| 272 | mod->text, mod->text_end); | ||
| 273 | } else { | ||
| 274 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | ||
| 275 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | ||
| 276 | set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); | ||
| 277 | apply_alternatives(__smp_alt_instructions, | ||
| 278 | __smp_alt_instructions_end); | ||
| 279 | list_for_each_entry(mod, &smp_alt_modules, next) | ||
| 280 | alternatives_smp_unlock(mod->locks, mod->locks_end, | ||
| 281 | mod->text, mod->text_end); | ||
| 282 | } | ||
| 283 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 284 | } | ||
| 285 | |||
| 286 | void __init alternative_instructions(void) | ||
| 287 | { | ||
| 288 | apply_alternatives(__alt_instructions, __alt_instructions_end); | ||
| 289 | |||
| 290 | /* switch to patch-once-at-boottime-only mode and free the | ||
| 291 | * tables in case we know the number of CPUs will never ever | ||
| 292 | * change */ | ||
| 293 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 294 | if (num_possible_cpus() < 2) | ||
| 295 | smp_alt_once = 1; | ||
| 296 | #else | ||
| 297 | smp_alt_once = 1; | ||
| 298 | #endif | ||
| 299 | |||
| 300 | if (smp_alt_once) { | ||
| 301 | if (1 == num_possible_cpus()) { | ||
| 302 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | ||
| 303 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | ||
| 304 | set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); | ||
| 305 | apply_alternatives(__smp_alt_instructions, | ||
| 306 | __smp_alt_instructions_end); | ||
| 307 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, | ||
| 308 | _text, _etext); | ||
| 309 | } | ||
| 310 | free_init_pages("SMP alternatives", | ||
| 311 | (unsigned long)__smp_alt_begin, | ||
| 312 | (unsigned long)__smp_alt_end); | ||
| 313 | } else { | ||
| 314 | alternatives_smp_save(__smp_alt_instructions, | ||
| 315 | __smp_alt_instructions_end); | ||
| 316 | alternatives_smp_module_add(NULL, "core kernel", | ||
| 317 | __smp_locks, __smp_locks_end, | ||
| 318 | _text, _etext); | ||
| 319 | alternatives_smp_switch(0); | ||
| 320 | } | ||
| 321 | } | ||
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 776c90989e06..eb5279d23b7f 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <asm/i8253.h> | 38 | #include <asm/i8253.h> |
| 39 | 39 | ||
| 40 | #include <mach_apic.h> | 40 | #include <mach_apic.h> |
| 41 | #include <mach_apicdef.h> | ||
| 41 | #include <mach_ipi.h> | 42 | #include <mach_ipi.h> |
| 42 | 43 | ||
| 43 | #include "io_ports.h" | 44 | #include "io_ports.h" |
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c index f52669ecb93f..bd75629dd262 100644 --- a/arch/i386/kernel/cpu/centaur.c +++ b/arch/i386/kernel/cpu/centaur.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
| 5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
| 6 | #include <asm/e820.h> | 6 | #include <asm/e820.h> |
| 7 | #include <asm/mtrr.h> | ||
| 7 | #include "cpu.h" | 8 | #include "cpu.h" |
| 8 | 9 | ||
| 9 | #ifdef CONFIG_X86_OOSTORE | 10 | #ifdef CONFIG_X86_OOSTORE |
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index e6bd095ae108..7e3d6b6a4e96 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
| @@ -25,9 +25,10 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); | |||
| 25 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | 25 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); |
| 26 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | 26 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); |
| 27 | 27 | ||
| 28 | static int cachesize_override __devinitdata = -1; | 28 | static int cachesize_override __cpuinitdata = -1; |
| 29 | static int disable_x86_fxsr __devinitdata = 0; | 29 | static int disable_x86_fxsr __cpuinitdata; |
| 30 | static int disable_x86_serial_nr __devinitdata = 1; | 30 | static int disable_x86_serial_nr __cpuinitdata = 1; |
| 31 | static int disable_x86_sep __cpuinitdata; | ||
| 31 | 32 | ||
| 32 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | 33 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; |
| 33 | 34 | ||
| @@ -59,7 +60,7 @@ static int __init cachesize_setup(char *str) | |||
| 59 | } | 60 | } |
| 60 | __setup("cachesize=", cachesize_setup); | 61 | __setup("cachesize=", cachesize_setup); |
| 61 | 62 | ||
| 62 | int __devinit get_model_name(struct cpuinfo_x86 *c) | 63 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) |
| 63 | { | 64 | { |
| 64 | unsigned int *v; | 65 | unsigned int *v; |
| 65 | char *p, *q; | 66 | char *p, *q; |
| @@ -89,7 +90,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c) | |||
| 89 | } | 90 | } |
| 90 | 91 | ||
| 91 | 92 | ||
| 92 | void __devinit display_cacheinfo(struct cpuinfo_x86 *c) | 93 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
| 93 | { | 94 | { |
| 94 | unsigned int n, dummy, ecx, edx, l2size; | 95 | unsigned int n, dummy, ecx, edx, l2size; |
| 95 | 96 | ||
| @@ -130,7 +131,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
| 130 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | 131 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ |
| 131 | 132 | ||
| 132 | /* Look up CPU names by table lookup. */ | 133 | /* Look up CPU names by table lookup. */ |
| 133 | static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | 134 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) |
| 134 | { | 135 | { |
| 135 | struct cpu_model_info *info; | 136 | struct cpu_model_info *info; |
| 136 | 137 | ||
| @@ -151,7 +152,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
| 151 | } | 152 | } |
| 152 | 153 | ||
| 153 | 154 | ||
| 154 | static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 155 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) |
| 155 | { | 156 | { |
| 156 | char *v = c->x86_vendor_id; | 157 | char *v = c->x86_vendor_id; |
| 157 | int i; | 158 | int i; |
| @@ -187,6 +188,14 @@ static int __init x86_fxsr_setup(char * s) | |||
| 187 | __setup("nofxsr", x86_fxsr_setup); | 188 | __setup("nofxsr", x86_fxsr_setup); |
| 188 | 189 | ||
| 189 | 190 | ||
| 191 | static int __init x86_sep_setup(char * s) | ||
| 192 | { | ||
| 193 | disable_x86_sep = 1; | ||
| 194 | return 1; | ||
| 195 | } | ||
| 196 | __setup("nosep", x86_sep_setup); | ||
| 197 | |||
| 198 | |||
| 190 | /* Standard macro to see if a specific flag is changeable */ | 199 | /* Standard macro to see if a specific flag is changeable */ |
| 191 | static inline int flag_is_changeable_p(u32 flag) | 200 | static inline int flag_is_changeable_p(u32 flag) |
| 192 | { | 201 | { |
| @@ -210,7 +219,7 @@ static inline int flag_is_changeable_p(u32 flag) | |||
| 210 | 219 | ||
| 211 | 220 | ||
| 212 | /* Probe for the CPUID instruction */ | 221 | /* Probe for the CPUID instruction */ |
| 213 | static int __devinit have_cpuid_p(void) | 222 | static int __cpuinit have_cpuid_p(void) |
| 214 | { | 223 | { |
| 215 | return flag_is_changeable_p(X86_EFLAGS_ID); | 224 | return flag_is_changeable_p(X86_EFLAGS_ID); |
| 216 | } | 225 | } |
| @@ -254,7 +263,7 @@ static void __init early_cpu_detect(void) | |||
| 254 | } | 263 | } |
| 255 | } | 264 | } |
| 256 | 265 | ||
| 257 | void __devinit generic_identify(struct cpuinfo_x86 * c) | 266 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) |
| 258 | { | 267 | { |
| 259 | u32 tfms, xlvl; | 268 | u32 tfms, xlvl; |
| 260 | int junk; | 269 | int junk; |
| @@ -307,7 +316,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) | |||
| 307 | #endif | 316 | #endif |
| 308 | } | 317 | } |
| 309 | 318 | ||
| 310 | static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 319 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| 311 | { | 320 | { |
| 312 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | 321 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { |
| 313 | /* Disable processor serial number */ | 322 | /* Disable processor serial number */ |
| @@ -335,7 +344,7 @@ __setup("serialnumber", x86_serial_nr_setup); | |||
| 335 | /* | 344 | /* |
| 336 | * This does the hard work of actually picking apart the CPU stuff... | 345 | * This does the hard work of actually picking apart the CPU stuff... |
| 337 | */ | 346 | */ |
| 338 | void __devinit identify_cpu(struct cpuinfo_x86 *c) | 347 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) |
| 339 | { | 348 | { |
| 340 | int i; | 349 | int i; |
| 341 | 350 | ||
| @@ -405,6 +414,10 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 405 | clear_bit(X86_FEATURE_XMM, c->x86_capability); | 414 | clear_bit(X86_FEATURE_XMM, c->x86_capability); |
| 406 | } | 415 | } |
| 407 | 416 | ||
| 417 | /* SEP disabled? */ | ||
| 418 | if (disable_x86_sep) | ||
| 419 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | ||
| 420 | |||
| 408 | if (disable_pse) | 421 | if (disable_pse) |
| 409 | clear_bit(X86_FEATURE_PSE, c->x86_capability); | 422 | clear_bit(X86_FEATURE_PSE, c->x86_capability); |
| 410 | 423 | ||
| @@ -417,7 +430,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 417 | else | 430 | else |
| 418 | /* Last resort... */ | 431 | /* Last resort... */ |
| 419 | sprintf(c->x86_model_id, "%02x/%02x", | 432 | sprintf(c->x86_model_id, "%02x/%02x", |
| 420 | c->x86_vendor, c->x86_model); | 433 | c->x86, c->x86_model); |
| 421 | } | 434 | } |
| 422 | 435 | ||
| 423 | /* Now the feature flags better reflect actual CPU features! */ | 436 | /* Now the feature flags better reflect actual CPU features! */ |
| @@ -453,7 +466,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 453 | } | 466 | } |
| 454 | 467 | ||
| 455 | #ifdef CONFIG_X86_HT | 468 | #ifdef CONFIG_X86_HT |
| 456 | void __devinit detect_ht(struct cpuinfo_x86 *c) | 469 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
| 457 | { | 470 | { |
| 458 | u32 eax, ebx, ecx, edx; | 471 | u32 eax, ebx, ecx, edx; |
| 459 | int index_msb, core_bits; | 472 | int index_msb, core_bits; |
| @@ -500,7 +513,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c) | |||
| 500 | } | 513 | } |
| 501 | #endif | 514 | #endif |
| 502 | 515 | ||
| 503 | void __devinit print_cpu_info(struct cpuinfo_x86 *c) | 516 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
| 504 | { | 517 | { |
| 505 | char *vendor = NULL; | 518 | char *vendor = NULL; |
| 506 | 519 | ||
| @@ -523,7 +536,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c) | |||
| 523 | printk("\n"); | 536 | printk("\n"); |
| 524 | } | 537 | } |
| 525 | 538 | ||
| 526 | cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; | 539 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
| 527 | 540 | ||
| 528 | /* This is hacky. :) | 541 | /* This is hacky. :) |
| 529 | * We're emulating future behavior. | 542 | * We're emulating future behavior. |
| @@ -570,7 +583,7 @@ void __init early_cpu_init(void) | |||
| 570 | * and IDT. We reload them nevertheless, this function acts as a | 583 | * and IDT. We reload them nevertheless, this function acts as a |
| 571 | * 'CPU state barrier', nothing should get across. | 584 | * 'CPU state barrier', nothing should get across. |
| 572 | */ | 585 | */ |
| 573 | void __devinit cpu_init(void) | 586 | void __cpuinit cpu_init(void) |
| 574 | { | 587 | { |
| 575 | int cpu = smp_processor_id(); | 588 | int cpu = smp_processor_id(); |
| 576 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 589 | struct tss_struct * t = &per_cpu(init_tss, cpu); |
| @@ -670,7 +683,7 @@ void __devinit cpu_init(void) | |||
| 670 | } | 683 | } |
| 671 | 684 | ||
| 672 | #ifdef CONFIG_HOTPLUG_CPU | 685 | #ifdef CONFIG_HOTPLUG_CPU |
| 673 | void __devinit cpu_uninit(void) | 686 | void __cpuinit cpu_uninit(void) |
| 674 | { | 687 | { |
| 675 | int cpu = raw_smp_processor_id(); | 688 | int cpu = raw_smp_processor_id(); |
| 676 | cpu_clear(cpu, cpu_initialized); | 689 | cpu_clear(cpu, cpu_initialized); |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e11a09207ec8..3d5110b65cc3 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void) | |||
| 1145 | { | 1145 | { |
| 1146 | unsigned int i, supported_cpus = 0; | 1146 | unsigned int i, supported_cpus = 0; |
| 1147 | 1147 | ||
| 1148 | for (i=0; i<NR_CPUS; i++) { | 1148 | for_each_cpu(i) { |
| 1149 | if (!cpu_online(i)) | ||
| 1150 | continue; | ||
| 1151 | if (check_supported_cpu(i)) | 1149 | if (check_supported_cpu(i)) |
| 1152 | supported_cpus++; | 1150 | supported_cpus++; |
| 1153 | } | 1151 | } |
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 8c0120186b9f..5386b29bb5a5 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c | |||
| @@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void); | |||
| 29 | struct movsl_mask movsl_mask __read_mostly; | 29 | struct movsl_mask movsl_mask __read_mostly; |
| 30 | #endif | 30 | #endif |
| 31 | 31 | ||
| 32 | void __devinit early_intel_workaround(struct cpuinfo_x86 *c) | 32 | void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) |
| 33 | { | 33 | { |
| 34 | if (c->x86_vendor != X86_VENDOR_INTEL) | 34 | if (c->x86_vendor != X86_VENDOR_INTEL) |
| 35 | return; | 35 | return; |
| @@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c) | |||
| 44 | * This is called before we do cpu ident work | 44 | * This is called before we do cpu ident work |
| 45 | */ | 45 | */ |
| 46 | 46 | ||
| 47 | int __devinit ppro_with_ram_bug(void) | 47 | int __cpuinit ppro_with_ram_bug(void) |
| 48 | { | 48 | { |
| 49 | /* Uses data from early_cpu_detect now */ | 49 | /* Uses data from early_cpu_detect now */ |
| 50 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 50 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
| @@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void) | |||
| 62 | * P4 Xeon errata 037 workaround. | 62 | * P4 Xeon errata 037 workaround. |
| 63 | * Hardware prefetcher may cause stale data to be loaded into the cache. | 63 | * Hardware prefetcher may cause stale data to be loaded into the cache. |
| 64 | */ | 64 | */ |
| 65 | static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | 65 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) |
| 66 | { | 66 | { |
| 67 | unsigned long lo, hi; | 67 | unsigned long lo, hi; |
| 68 | 68 | ||
| @@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
| 81 | /* | 81 | /* |
| 82 | * find out the number of processor cores on the die | 82 | * find out the number of processor cores on the die |
| 83 | */ | 83 | */ |
| 84 | static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) | 84 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) |
| 85 | { | 85 | { |
| 86 | unsigned int eax, ebx, ecx, edx; | 86 | unsigned int eax, ebx, ecx, edx; |
| 87 | 87 | ||
| @@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) | |||
| 96 | return 1; | 96 | return 1; |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static void __devinit init_intel(struct cpuinfo_x86 *c) | 99 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
| 100 | { | 100 | { |
| 101 | unsigned int l2 = 0; | 101 | unsigned int l2 = 0; |
| 102 | char *p = NULL; | 102 | char *p = NULL; |
| @@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) | |||
| 205 | return size; | 205 | return size; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | static struct cpu_dev intel_cpu_dev __devinitdata = { | 208 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
| 209 | .c_vendor = "Intel", | 209 | .c_vendor = "Intel", |
| 210 | .c_ident = { "GenuineIntel" }, | 210 | .c_ident = { "GenuineIntel" }, |
| 211 | .c_models = { | 211 | .c_models = { |
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index ffe58cee0c48..ce61921369e5 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
| @@ -174,7 +174,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 174 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 174 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
| 175 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 175 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
| 176 | 176 | ||
| 177 | if (c->cpuid_level > 4) { | 177 | if (c->cpuid_level > 3) { |
| 178 | static int is_initialized; | 178 | static int is_initialized; |
| 179 | 179 | ||
| 180 | if (is_initialized == 0) { | 180 | if (is_initialized == 0) { |
| @@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 330 | } | 330 | } |
| 331 | } | 331 | } |
| 332 | } | 332 | } |
| 333 | static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | 333 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) |
| 334 | { | 334 | { |
| 335 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 335 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
| 336 | int sibling; | 336 | int sibling; |
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index 89a85af33d28..5cfbd8011698 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c | |||
| @@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 40 | /* Other (Linux-defined) */ | 40 | /* Other (Linux-defined) */ |
| 41 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", | 41 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", |
| 42 | NULL, NULL, NULL, NULL, | 42 | NULL, NULL, NULL, NULL, |
| 43 | "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 43 | "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL, |
| 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
| 45 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 45 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
| 46 | 46 | ||
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index d49dbe8dc96b..e3c5fca0aa8a 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c | |||
| @@ -105,7 +105,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) | |||
| 105 | return 1; | 105 | return 1; |
| 106 | local_irq_disable(); | 106 | local_irq_disable(); |
| 107 | 107 | ||
| 108 | if (!user_mode(regs)) { | 108 | if (!user_mode_vm(regs)) { |
| 109 | crash_fixup_ss_esp(&fixed_regs, regs); | 109 | crash_fixup_ss_esp(&fixed_regs, regs); |
| 110 | regs = &fixed_regs; | 110 | regs = &fixed_regs; |
| 111 | } | 111 | } |
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 4d704724b2f5..cfc683f153b9 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
| @@ -226,6 +226,10 @@ ENTRY(system_call) | |||
| 226 | pushl %eax # save orig_eax | 226 | pushl %eax # save orig_eax |
| 227 | SAVE_ALL | 227 | SAVE_ALL |
| 228 | GET_THREAD_INFO(%ebp) | 228 | GET_THREAD_INFO(%ebp) |
| 229 | testl $TF_MASK,EFLAGS(%esp) | ||
| 230 | jz no_singlestep | ||
| 231 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) | ||
| 232 | no_singlestep: | ||
| 229 | # system call tracing in operation / emulation | 233 | # system call tracing in operation / emulation |
| 230 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 234 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
| 231 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 235 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index e0b7c632efbc..3debc2e26542 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S | |||
| @@ -450,7 +450,6 @@ int_msg: | |||
| 450 | 450 | ||
| 451 | .globl boot_gdt_descr | 451 | .globl boot_gdt_descr |
| 452 | .globl idt_descr | 452 | .globl idt_descr |
| 453 | .globl cpu_gdt_descr | ||
| 454 | 453 | ||
| 455 | ALIGN | 454 | ALIGN |
| 456 | # early boot GDT descriptor (must use 1:1 address mapping) | 455 | # early boot GDT descriptor (must use 1:1 address mapping) |
| @@ -470,8 +469,6 @@ cpu_gdt_descr: | |||
| 470 | .word GDT_ENTRIES*8-1 | 469 | .word GDT_ENTRIES*8-1 |
| 471 | .long cpu_gdt_table | 470 | .long cpu_gdt_table |
| 472 | 471 | ||
| 473 | .fill NR_CPUS-1,8,0 # space for the other GDT descriptors | ||
| 474 | |||
| 475 | /* | 472 | /* |
| 476 | * The boot_gdt_table must mirror the equivalent in setup.S and is | 473 | * The boot_gdt_table must mirror the equivalent in setup.S and is |
| 477 | * used only for booting. | 474 | * used only for booting. |
| @@ -485,7 +482,7 @@ ENTRY(boot_gdt_table) | |||
| 485 | /* | 482 | /* |
| 486 | * The Global Descriptor Table contains 28 quadwords, per-CPU. | 483 | * The Global Descriptor Table contains 28 quadwords, per-CPU. |
| 487 | */ | 484 | */ |
| 488 | .align PAGE_SIZE_asm | 485 | .align L1_CACHE_BYTES |
| 489 | ENTRY(cpu_gdt_table) | 486 | ENTRY(cpu_gdt_table) |
| 490 | .quad 0x0000000000000000 /* NULL descriptor */ | 487 | .quad 0x0000000000000000 /* NULL descriptor */ |
| 491 | .quad 0x0000000000000000 /* 0x0b reserved */ | 488 | .quad 0x0000000000000000 /* 0x0b reserved */ |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 39d9a5fa907e..311b4e7266f1 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
| @@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | |||
| 351 | { | 351 | { |
| 352 | int i, j; | 352 | int i, j; |
| 353 | Dprintk("Rotating IRQs among CPUs.\n"); | 353 | Dprintk("Rotating IRQs among CPUs.\n"); |
| 354 | for (i = 0; i < NR_CPUS; i++) { | 354 | for_each_online_cpu(i) { |
| 355 | for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { | 355 | for (j = 0; j < NR_IRQS; j++) { |
| 356 | if (!irq_desc[j].action) | 356 | if (!irq_desc[j].action) |
| 357 | continue; | 357 | continue; |
| 358 | /* Is it a significant load ? */ | 358 | /* Is it a significant load ? */ |
| @@ -381,7 +381,7 @@ static void do_irq_balance(void) | |||
| 381 | unsigned long imbalance = 0; | 381 | unsigned long imbalance = 0; |
| 382 | cpumask_t allowed_mask, target_cpu_mask, tmp; | 382 | cpumask_t allowed_mask, target_cpu_mask, tmp; |
| 383 | 383 | ||
| 384 | for (i = 0; i < NR_CPUS; i++) { | 384 | for_each_cpu(i) { |
| 385 | int package_index; | 385 | int package_index; |
| 386 | CPU_IRQ(i) = 0; | 386 | CPU_IRQ(i) = 0; |
| 387 | if (!cpu_online(i)) | 387 | if (!cpu_online(i)) |
| @@ -422,9 +422,7 @@ static void do_irq_balance(void) | |||
| 422 | } | 422 | } |
| 423 | } | 423 | } |
| 424 | /* Find the least loaded processor package */ | 424 | /* Find the least loaded processor package */ |
| 425 | for (i = 0; i < NR_CPUS; i++) { | 425 | for_each_online_cpu(i) { |
| 426 | if (!cpu_online(i)) | ||
| 427 | continue; | ||
| 428 | if (i != CPU_TO_PACKAGEINDEX(i)) | 426 | if (i != CPU_TO_PACKAGEINDEX(i)) |
| 429 | continue; | 427 | continue; |
| 430 | if (min_cpu_irq > CPU_IRQ(i)) { | 428 | if (min_cpu_irq > CPU_IRQ(i)) { |
| @@ -441,9 +439,7 @@ tryanothercpu: | |||
| 441 | */ | 439 | */ |
| 442 | tmp_cpu_irq = 0; | 440 | tmp_cpu_irq = 0; |
| 443 | tmp_loaded = -1; | 441 | tmp_loaded = -1; |
| 444 | for (i = 0; i < NR_CPUS; i++) { | 442 | for_each_online_cpu(i) { |
| 445 | if (!cpu_online(i)) | ||
| 446 | continue; | ||
| 447 | if (i != CPU_TO_PACKAGEINDEX(i)) | 443 | if (i != CPU_TO_PACKAGEINDEX(i)) |
| 448 | continue; | 444 | continue; |
| 449 | if (max_cpu_irq <= CPU_IRQ(i)) | 445 | if (max_cpu_irq <= CPU_IRQ(i)) |
| @@ -619,9 +615,7 @@ static int __init balanced_irq_init(void) | |||
| 619 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) | 615 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) |
| 620 | physical_balance = 1; | 616 | physical_balance = 1; |
| 621 | 617 | ||
| 622 | for (i = 0; i < NR_CPUS; i++) { | 618 | for_each_online_cpu(i) { |
| 623 | if (!cpu_online(i)) | ||
| 624 | continue; | ||
| 625 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 619 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
| 626 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 620 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
| 627 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { | 621 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { |
| @@ -638,9 +632,11 @@ static int __init balanced_irq_init(void) | |||
| 638 | else | 632 | else |
| 639 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); | 633 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); |
| 640 | failed: | 634 | failed: |
| 641 | for (i = 0; i < NR_CPUS; i++) { | 635 | for_each_cpu(i) { |
| 642 | kfree(irq_cpu_data[i].irq_delta); | 636 | kfree(irq_cpu_data[i].irq_delta); |
| 637 | irq_cpu_data[i].irq_delta = NULL; | ||
| 643 | kfree(irq_cpu_data[i].last_irq); | 638 | kfree(irq_cpu_data[i].last_irq); |
| 639 | irq_cpu_data[i].last_irq = NULL; | ||
| 644 | } | 640 | } |
| 645 | return 0; | 641 | return 0; |
| 646 | } | 642 | } |
| @@ -1761,7 +1757,8 @@ static void __init setup_ioapic_ids_from_mpc(void) | |||
| 1761 | * Don't check I/O APIC IDs for xAPIC systems. They have | 1757 | * Don't check I/O APIC IDs for xAPIC systems. They have |
| 1762 | * no meaning without the serial APIC bus. | 1758 | * no meaning without the serial APIC bus. |
| 1763 | */ | 1759 | */ |
| 1764 | if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15)) | 1760 | if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 1761 | || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
| 1765 | return; | 1762 | return; |
| 1766 | /* | 1763 | /* |
| 1767 | * This is broken; anything with a real cpu count has to | 1764 | * This is broken; anything with a real cpu count has to |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 694a13997637..7a59050242a7 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
| @@ -84,9 +84,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
| 84 | 84 | ||
| 85 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 85 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 86 | { | 86 | { |
| 87 | down(&kprobe_mutex); | 87 | mutex_lock(&kprobe_mutex); |
| 88 | free_insn_slot(p->ainsn.insn); | 88 | free_insn_slot(p->ainsn.insn); |
| 89 | up(&kprobe_mutex); | 89 | mutex_unlock(&kprobe_mutex); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) | 92 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c index 5149c8a621f0..470cf97e7cd3 100644 --- a/arch/i386/kernel/module.c +++ b/arch/i386/kernel/module.c | |||
| @@ -104,26 +104,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
| 104 | return -ENOEXEC; | 104 | return -ENOEXEC; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | extern void apply_alternatives(void *start, void *end); | ||
| 108 | |||
| 109 | int module_finalize(const Elf_Ehdr *hdr, | 107 | int module_finalize(const Elf_Ehdr *hdr, |
| 110 | const Elf_Shdr *sechdrs, | 108 | const Elf_Shdr *sechdrs, |
| 111 | struct module *me) | 109 | struct module *me) |
| 112 | { | 110 | { |
| 113 | const Elf_Shdr *s; | 111 | const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL; |
| 114 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | 112 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
| 115 | 113 | ||
| 116 | /* look for .altinstructions to patch */ | ||
| 117 | for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { | 114 | for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { |
| 118 | void *seg; | 115 | if (!strcmp(".text", secstrings + s->sh_name)) |
| 119 | if (strcmp(".altinstructions", secstrings + s->sh_name)) | 116 | text = s; |
| 120 | continue; | 117 | if (!strcmp(".altinstructions", secstrings + s->sh_name)) |
| 121 | seg = (void *)s->sh_addr; | 118 | alt = s; |
| 122 | apply_alternatives(seg, seg + s->sh_size); | 119 | if (!strcmp(".smp_locks", secstrings + s->sh_name)) |
| 123 | } | 120 | locks= s; |
| 121 | } | ||
| 122 | |||
| 123 | if (alt) { | ||
| 124 | /* patch .altinstructions */ | ||
| 125 | void *aseg = (void *)alt->sh_addr; | ||
| 126 | apply_alternatives(aseg, aseg + alt->sh_size); | ||
| 127 | } | ||
| 128 | if (locks && text) { | ||
| 129 | void *lseg = (void *)locks->sh_addr; | ||
| 130 | void *tseg = (void *)text->sh_addr; | ||
| 131 | alternatives_smp_module_add(me, me->name, | ||
| 132 | lseg, lseg + locks->sh_size, | ||
| 133 | tseg, tseg + text->sh_size); | ||
| 134 | } | ||
| 124 | return 0; | 135 | return 0; |
| 125 | } | 136 | } |
| 126 | 137 | ||
| 127 | void module_arch_cleanup(struct module *mod) | 138 | void module_arch_cleanup(struct module *mod) |
| 128 | { | 139 | { |
| 140 | alternatives_smp_module_del(mod); | ||
| 129 | } | 141 | } |
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index e6e2f43db85e..8d8aa9d1796d 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c | |||
| @@ -828,6 +828,8 @@ void __init find_smp_config (void) | |||
| 828 | smp_scan_config(address, 0x400); | 828 | smp_scan_config(address, 0x400); |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | int es7000_plat; | ||
| 832 | |||
| 831 | /* -------------------------------------------------------------------------- | 833 | /* -------------------------------------------------------------------------- |
| 832 | ACPI-based MP Configuration | 834 | ACPI-based MP Configuration |
| 833 | -------------------------------------------------------------------------- */ | 835 | -------------------------------------------------------------------------- */ |
| @@ -935,7 +937,8 @@ void __init mp_register_ioapic ( | |||
| 935 | mp_ioapics[idx].mpc_apicaddr = address; | 937 | mp_ioapics[idx].mpc_apicaddr = address; |
| 936 | 938 | ||
| 937 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | 939 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); |
| 938 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) | 940 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 941 | && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
| 939 | tmpid = io_apic_get_unique_id(idx, id); | 942 | tmpid = io_apic_get_unique_id(idx, id); |
| 940 | else | 943 | else |
| 941 | tmpid = id; | 944 | tmpid = id; |
| @@ -1011,8 +1014,6 @@ void __init mp_override_legacy_irq ( | |||
| 1011 | return; | 1014 | return; |
| 1012 | } | 1015 | } |
| 1013 | 1016 | ||
| 1014 | int es7000_plat; | ||
| 1015 | |||
| 1016 | void __init mp_config_acpi_legacy_irqs (void) | 1017 | void __init mp_config_acpi_legacy_irqs (void) |
| 1017 | { | 1018 | { |
| 1018 | struct mpc_config_intsrc intsrc; | 1019 | struct mpc_config_intsrc intsrc; |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index be87c5e2ee95..9074818b9473 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
| @@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void) | |||
| 143 | local_irq_enable(); | 143 | local_irq_enable(); |
| 144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
| 145 | 145 | ||
| 146 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 146 | for_each_cpu(cpu) { |
| 147 | #ifdef CONFIG_SMP | 147 | #ifdef CONFIG_SMP |
| 148 | /* Check cpu_callin_map here because that is set | 148 | /* Check cpu_callin_map here because that is set |
| 149 | after the timer is started. */ | 149 | after the timer is started. */ |
| @@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) | |||
| 510 | * Just reset the alert counters, (other CPUs might be | 510 | * Just reset the alert counters, (other CPUs might be |
| 511 | * spinning on locks we hold): | 511 | * spinning on locks we hold): |
| 512 | */ | 512 | */ |
| 513 | for (i = 0; i < NR_CPUS; i++) | 513 | for_each_cpu(i) |
| 514 | alert_counter[i] = 0; | 514 | alert_counter[i] = 0; |
| 515 | 515 | ||
| 516 | /* | 516 | /* |
| @@ -543,7 +543,7 @@ void nmi_watchdog_tick (struct pt_regs * regs) | |||
| 543 | /* | 543 | /* |
| 544 | * die_nmi will return ONLY if NOTIFY_STOP happens.. | 544 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
| 545 | */ | 545 | */ |
| 546 | die_nmi(regs, "NMI Watchdog detected LOCKUP"); | 546 | die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP"); |
| 547 | } else { | 547 | } else { |
| 548 | last_irq_sums[cpu] = sum; | 548 | last_irq_sums[cpu] = sum; |
| 549 | alert_counter[cpu] = 0; | 549 | alert_counter[cpu] = 0; |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 0480454ebffa..299e61674084 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
| @@ -295,7 +295,7 @@ void show_regs(struct pt_regs * regs) | |||
| 295 | printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); | 295 | printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); |
| 296 | print_symbol("EIP is at %s\n", regs->eip); | 296 | print_symbol("EIP is at %s\n", regs->eip); |
| 297 | 297 | ||
| 298 | if (user_mode(regs)) | 298 | if (user_mode_vm(regs)) |
| 299 | printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); | 299 | printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); |
| 300 | printk(" EFLAGS: %08lx %s (%s %.*s)\n", | 300 | printk(" EFLAGS: %08lx %s (%s %.*s)\n", |
| 301 | regs->eflags, print_tainted(), system_utsname.release, | 301 | regs->eflags, print_tainted(), system_utsname.release, |
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 5c1fb6aada5b..506462ef36a0 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c | |||
| @@ -34,10 +34,10 @@ | |||
| 34 | 34 | ||
| 35 | /* | 35 | /* |
| 36 | * Determines which flags the user has access to [1 = access, 0 = no access]. | 36 | * Determines which flags the user has access to [1 = access, 0 = no access]. |
| 37 | * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9). | 37 | * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9). |
| 38 | * Also masks reserved bits (31-22, 15, 5, 3, 1). | 38 | * Also masks reserved bits (31-22, 15, 5, 3, 1). |
| 39 | */ | 39 | */ |
| 40 | #define FLAG_MASK 0x00054dd5 | 40 | #define FLAG_MASK 0x00050dd5 |
| 41 | 41 | ||
| 42 | /* set's the trap flag. */ | 42 | /* set's the trap flag. */ |
| 43 | #define TRAP_FLAG 0x100 | 43 | #define TRAP_FLAG 0x100 |
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c index 7455ab643943..967dc74df9ee 100644 --- a/arch/i386/kernel/semaphore.c +++ b/arch/i386/kernel/semaphore.c | |||
| @@ -110,11 +110,11 @@ asm( | |||
| 110 | ".align 4\n" | 110 | ".align 4\n" |
| 111 | ".globl __write_lock_failed\n" | 111 | ".globl __write_lock_failed\n" |
| 112 | "__write_lock_failed:\n\t" | 112 | "__write_lock_failed:\n\t" |
| 113 | LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" | 113 | LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" |
| 114 | "1: rep; nop\n\t" | 114 | "1: rep; nop\n\t" |
| 115 | "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | 115 | "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" |
| 116 | "jne 1b\n\t" | 116 | "jne 1b\n\t" |
| 117 | LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | 117 | LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" |
| 118 | "jnz __write_lock_failed\n\t" | 118 | "jnz __write_lock_failed\n\t" |
| 119 | "ret" | 119 | "ret" |
| 120 | ); | 120 | ); |
| @@ -124,11 +124,11 @@ asm( | |||
| 124 | ".align 4\n" | 124 | ".align 4\n" |
| 125 | ".globl __read_lock_failed\n" | 125 | ".globl __read_lock_failed\n" |
| 126 | "__read_lock_failed:\n\t" | 126 | "__read_lock_failed:\n\t" |
| 127 | LOCK "incl (%eax)\n" | 127 | LOCK_PREFIX "incl (%eax)\n" |
| 128 | "1: rep; nop\n\t" | 128 | "1: rep; nop\n\t" |
| 129 | "cmpl $1,(%eax)\n\t" | 129 | "cmpl $1,(%eax)\n\t" |
| 130 | "js 1b\n\t" | 130 | "js 1b\n\t" |
| 131 | LOCK "decl (%eax)\n\t" | 131 | LOCK_PREFIX "decl (%eax)\n\t" |
| 132 | "js __read_lock_failed\n\t" | 132 | "js __read_lock_failed\n\t" |
| 133 | "ret" | 133 | "ret" |
| 134 | ); | 134 | ); |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index ab62a9f4701e..2d8782960f41 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
| @@ -1377,101 +1377,6 @@ static void __init register_memory(void) | |||
| 1377 | pci_mem_start, gapstart, gapsize); | 1377 | pci_mem_start, gapstart, gapsize); |
| 1378 | } | 1378 | } |
| 1379 | 1379 | ||
| 1380 | /* Use inline assembly to define this because the nops are defined | ||
| 1381 | as inline assembly strings in the include files and we cannot | ||
| 1382 | get them easily into strings. */ | ||
| 1383 | asm("\t.data\nintelnops: " | ||
| 1384 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | ||
| 1385 | GENERIC_NOP7 GENERIC_NOP8); | ||
| 1386 | asm("\t.data\nk8nops: " | ||
| 1387 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | ||
| 1388 | K8_NOP7 K8_NOP8); | ||
| 1389 | asm("\t.data\nk7nops: " | ||
| 1390 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | ||
| 1391 | K7_NOP7 K7_NOP8); | ||
| 1392 | |||
| 1393 | extern unsigned char intelnops[], k8nops[], k7nops[]; | ||
| 1394 | static unsigned char *intel_nops[ASM_NOP_MAX+1] = { | ||
| 1395 | NULL, | ||
| 1396 | intelnops, | ||
| 1397 | intelnops + 1, | ||
| 1398 | intelnops + 1 + 2, | ||
| 1399 | intelnops + 1 + 2 + 3, | ||
| 1400 | intelnops + 1 + 2 + 3 + 4, | ||
| 1401 | intelnops + 1 + 2 + 3 + 4 + 5, | ||
| 1402 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 1403 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 1404 | }; | ||
| 1405 | static unsigned char *k8_nops[ASM_NOP_MAX+1] = { | ||
| 1406 | NULL, | ||
| 1407 | k8nops, | ||
| 1408 | k8nops + 1, | ||
| 1409 | k8nops + 1 + 2, | ||
| 1410 | k8nops + 1 + 2 + 3, | ||
| 1411 | k8nops + 1 + 2 + 3 + 4, | ||
| 1412 | k8nops + 1 + 2 + 3 + 4 + 5, | ||
| 1413 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 1414 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 1415 | }; | ||
| 1416 | static unsigned char *k7_nops[ASM_NOP_MAX+1] = { | ||
| 1417 | NULL, | ||
| 1418 | k7nops, | ||
| 1419 | k7nops + 1, | ||
| 1420 | k7nops + 1 + 2, | ||
| 1421 | k7nops + 1 + 2 + 3, | ||
| 1422 | k7nops + 1 + 2 + 3 + 4, | ||
| 1423 | k7nops + 1 + 2 + 3 + 4 + 5, | ||
| 1424 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 1425 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 1426 | }; | ||
| 1427 | static struct nop { | ||
| 1428 | int cpuid; | ||
| 1429 | unsigned char **noptable; | ||
| 1430 | } noptypes[] = { | ||
| 1431 | { X86_FEATURE_K8, k8_nops }, | ||
| 1432 | { X86_FEATURE_K7, k7_nops }, | ||
| 1433 | { -1, NULL } | ||
| 1434 | }; | ||
| 1435 | |||
| 1436 | /* Replace instructions with better alternatives for this CPU type. | ||
| 1437 | |||
| 1438 | This runs before SMP is initialized to avoid SMP problems with | ||
| 1439 | self modifying code. This implies that assymetric systems where | ||
| 1440 | APs have less capabilities than the boot processor are not handled. | ||
| 1441 | Tough. Make sure you disable such features by hand. */ | ||
| 1442 | void apply_alternatives(void *start, void *end) | ||
| 1443 | { | ||
| 1444 | struct alt_instr *a; | ||
| 1445 | int diff, i, k; | ||
| 1446 | unsigned char **noptable = intel_nops; | ||
| 1447 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | ||
| 1448 | if (boot_cpu_has(noptypes[i].cpuid)) { | ||
| 1449 | noptable = noptypes[i].noptable; | ||
| 1450 | break; | ||
| 1451 | } | ||
| 1452 | } | ||
| 1453 | for (a = start; (void *)a < end; a++) { | ||
| 1454 | if (!boot_cpu_has(a->cpuid)) | ||
| 1455 | continue; | ||
| 1456 | BUG_ON(a->replacementlen > a->instrlen); | ||
| 1457 | memcpy(a->instr, a->replacement, a->replacementlen); | ||
| 1458 | diff = a->instrlen - a->replacementlen; | ||
| 1459 | /* Pad the rest with nops */ | ||
| 1460 | for (i = a->replacementlen; diff > 0; diff -= k, i += k) { | ||
| 1461 | k = diff; | ||
| 1462 | if (k > ASM_NOP_MAX) | ||
| 1463 | k = ASM_NOP_MAX; | ||
| 1464 | memcpy(a->instr + i, noptable[k], k); | ||
| 1465 | } | ||
| 1466 | } | ||
| 1467 | } | ||
| 1468 | |||
| 1469 | void __init alternative_instructions(void) | ||
| 1470 | { | ||
| 1471 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | ||
| 1472 | apply_alternatives(__alt_instructions, __alt_instructions_end); | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | static char * __init machine_specific_memory_setup(void); | 1380 | static char * __init machine_specific_memory_setup(void); |
| 1476 | 1381 | ||
| 1477 | #ifdef CONFIG_MCA | 1382 | #ifdef CONFIG_MCA |
| @@ -1554,6 +1459,16 @@ void __init setup_arch(char **cmdline_p) | |||
| 1554 | 1459 | ||
| 1555 | parse_cmdline_early(cmdline_p); | 1460 | parse_cmdline_early(cmdline_p); |
| 1556 | 1461 | ||
| 1462 | #ifdef CONFIG_EARLY_PRINTK | ||
| 1463 | { | ||
| 1464 | char *s = strstr(*cmdline_p, "earlyprintk="); | ||
| 1465 | if (s) { | ||
| 1466 | setup_early_printk(strchr(s, '=') + 1); | ||
| 1467 | printk("early console enabled\n"); | ||
| 1468 | } | ||
| 1469 | } | ||
| 1470 | #endif | ||
| 1471 | |||
| 1557 | max_low_pfn = setup_memory(); | 1472 | max_low_pfn = setup_memory(); |
| 1558 | 1473 | ||
| 1559 | /* | 1474 | /* |
| @@ -1578,19 +1493,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 1578 | * NOTE: at this point the bootmem allocator is fully available. | 1493 | * NOTE: at this point the bootmem allocator is fully available. |
| 1579 | */ | 1494 | */ |
| 1580 | 1495 | ||
| 1581 | #ifdef CONFIG_EARLY_PRINTK | ||
| 1582 | { | ||
| 1583 | char *s = strstr(*cmdline_p, "earlyprintk="); | ||
| 1584 | if (s) { | ||
| 1585 | extern void setup_early_printk(char *); | ||
| 1586 | |||
| 1587 | setup_early_printk(strchr(s, '=') + 1); | ||
| 1588 | printk("early console enabled\n"); | ||
| 1589 | } | ||
| 1590 | } | ||
| 1591 | #endif | ||
| 1592 | |||
| 1593 | |||
| 1594 | dmi_scan_machine(); | 1496 | dmi_scan_machine(); |
| 1595 | 1497 | ||
| 1596 | #ifdef CONFIG_X86_GENERICARCH | 1498 | #ifdef CONFIG_X86_GENERICARCH |
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 963616d364ec..5c352c3a9e7f 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c | |||
| @@ -123,7 +123,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax | |||
| 123 | err |= __get_user(tmp, &sc->seg); \ | 123 | err |= __get_user(tmp, &sc->seg); \ |
| 124 | loadsegment(seg,tmp); } | 124 | loadsegment(seg,tmp); } |
| 125 | 125 | ||
| 126 | #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \ | 126 | #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \ |
| 127 | X86_EFLAGS_OF | X86_EFLAGS_DF | \ | ||
| 127 | X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ | 128 | X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ |
| 128 | X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) | 129 | X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) |
| 129 | 130 | ||
| @@ -582,9 +583,6 @@ static void fastcall do_signal(struct pt_regs *regs) | |||
| 582 | if (!user_mode(regs)) | 583 | if (!user_mode(regs)) |
| 583 | return; | 584 | return; |
| 584 | 585 | ||
| 585 | if (try_to_freeze()) | ||
| 586 | goto no_signal; | ||
| 587 | |||
| 588 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 586 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
| 589 | oldset = ¤t->saved_sigmask; | 587 | oldset = ¤t->saved_sigmask; |
| 590 | else | 588 | else |
| @@ -613,7 +611,6 @@ static void fastcall do_signal(struct pt_regs *regs) | |||
| 613 | return; | 611 | return; |
| 614 | } | 612 | } |
| 615 | 613 | ||
| 616 | no_signal: | ||
| 617 | /* Did we come from a system call? */ | 614 | /* Did we come from a system call? */ |
| 618 | if (regs->orig_eax >= 0) { | 615 | if (regs->orig_eax >= 0) { |
| 619 | /* Restart the system call - no handlers present */ | 616 | /* Restart the system call - no handlers present */ |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 7007e1783797..4c470e99a742 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
| @@ -899,6 +899,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu) | |||
| 899 | unsigned short nmi_high = 0, nmi_low = 0; | 899 | unsigned short nmi_high = 0, nmi_low = 0; |
| 900 | 900 | ||
| 901 | ++cpucount; | 901 | ++cpucount; |
| 902 | alternatives_smp_switch(1); | ||
| 902 | 903 | ||
| 903 | /* | 904 | /* |
| 904 | * We can't use kernel_thread since we must avoid to | 905 | * We can't use kernel_thread since we must avoid to |
| @@ -1368,6 +1369,8 @@ void __cpu_die(unsigned int cpu) | |||
| 1368 | /* They ack this in play_dead by setting CPU_DEAD */ | 1369 | /* They ack this in play_dead by setting CPU_DEAD */ |
| 1369 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | 1370 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
| 1370 | printk ("CPU %d is now offline\n", cpu); | 1371 | printk ("CPU %d is now offline\n", cpu); |
| 1372 | if (1 == num_online_cpus()) | ||
| 1373 | alternatives_smp_switch(0); | ||
| 1371 | return; | 1374 | return; |
| 1372 | } | 1375 | } |
| 1373 | msleep(100); | 1376 | msleep(100); |
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c index 67a0e1baa28b..296355292c7c 100644 --- a/arch/i386/kernel/topology.c +++ b/arch/i386/kernel/topology.c | |||
| @@ -41,6 +41,15 @@ int arch_register_cpu(int num){ | |||
| 41 | parent = &node_devices[node].node; | 41 | parent = &node_devices[node].node; |
| 42 | #endif /* CONFIG_NUMA */ | 42 | #endif /* CONFIG_NUMA */ |
| 43 | 43 | ||
| 44 | /* | ||
| 45 | * CPU0 cannot be offlined due to several | ||
| 46 | * restrictions and assumptions in kernel. This basically | ||
| 47 | * doesnt add a control file, one cannot attempt to offline | ||
| 48 | * BSP. | ||
| 49 | */ | ||
| 50 | if (!num) | ||
| 51 | cpu_devices[num].cpu.no_control = 1; | ||
| 52 | |||
| 44 | return register_cpu(&cpu_devices[num].cpu, num, parent); | 53 | return register_cpu(&cpu_devices[num].cpu, num, parent); |
| 45 | } | 54 | } |
| 46 | 55 | ||
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index b814dbdcc91e..de5386b01d38 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
| @@ -99,6 +99,8 @@ int register_die_notifier(struct notifier_block *nb) | |||
| 99 | { | 99 | { |
| 100 | int err = 0; | 100 | int err = 0; |
| 101 | unsigned long flags; | 101 | unsigned long flags; |
| 102 | |||
| 103 | vmalloc_sync_all(); | ||
| 102 | spin_lock_irqsave(&die_notifier_lock, flags); | 104 | spin_lock_irqsave(&die_notifier_lock, flags); |
| 103 | err = notifier_chain_register(&i386die_chain, nb); | 105 | err = notifier_chain_register(&i386die_chain, nb); |
| 104 | spin_unlock_irqrestore(&die_notifier_lock, flags); | 106 | spin_unlock_irqrestore(&die_notifier_lock, flags); |
| @@ -112,12 +114,30 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | |||
| 112 | p < (void *)tinfo + THREAD_SIZE - 3; | 114 | p < (void *)tinfo + THREAD_SIZE - 3; |
| 113 | } | 115 | } |
| 114 | 116 | ||
| 115 | static void print_addr_and_symbol(unsigned long addr, char *log_lvl) | 117 | /* |
| 118 | * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line. | ||
| 119 | */ | ||
| 120 | static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, | ||
| 121 | int printed) | ||
| 116 | { | 122 | { |
| 117 | printk(log_lvl); | 123 | if (!printed) |
| 124 | printk(log_lvl); | ||
| 125 | |||
| 126 | #if CONFIG_STACK_BACKTRACE_COLS == 1 | ||
| 118 | printk(" [<%08lx>] ", addr); | 127 | printk(" [<%08lx>] ", addr); |
| 128 | #else | ||
| 129 | printk(" <%08lx> ", addr); | ||
| 130 | #endif | ||
| 119 | print_symbol("%s", addr); | 131 | print_symbol("%s", addr); |
| 120 | printk("\n"); | 132 | |
| 133 | printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; | ||
| 134 | |||
| 135 | if (printed) | ||
| 136 | printk(" "); | ||
| 137 | else | ||
| 138 | printk("\n"); | ||
| 139 | |||
| 140 | return printed; | ||
| 121 | } | 141 | } |
| 122 | 142 | ||
| 123 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 143 | static inline unsigned long print_context_stack(struct thread_info *tinfo, |
| @@ -125,20 +145,24 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
| 125 | char *log_lvl) | 145 | char *log_lvl) |
| 126 | { | 146 | { |
| 127 | unsigned long addr; | 147 | unsigned long addr; |
| 148 | int printed = 0; /* nr of entries already printed on current line */ | ||
| 128 | 149 | ||
| 129 | #ifdef CONFIG_FRAME_POINTER | 150 | #ifdef CONFIG_FRAME_POINTER |
| 130 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | 151 | while (valid_stack_ptr(tinfo, (void *)ebp)) { |
| 131 | addr = *(unsigned long *)(ebp + 4); | 152 | addr = *(unsigned long *)(ebp + 4); |
| 132 | print_addr_and_symbol(addr, log_lvl); | 153 | printed = print_addr_and_symbol(addr, log_lvl, printed); |
| 133 | ebp = *(unsigned long *)ebp; | 154 | ebp = *(unsigned long *)ebp; |
| 134 | } | 155 | } |
| 135 | #else | 156 | #else |
| 136 | while (valid_stack_ptr(tinfo, stack)) { | 157 | while (valid_stack_ptr(tinfo, stack)) { |
| 137 | addr = *stack++; | 158 | addr = *stack++; |
| 138 | if (__kernel_text_address(addr)) | 159 | if (__kernel_text_address(addr)) |
| 139 | print_addr_and_symbol(addr, log_lvl); | 160 | printed = print_addr_and_symbol(addr, log_lvl, printed); |
| 140 | } | 161 | } |
| 141 | #endif | 162 | #endif |
| 163 | if (printed) | ||
| 164 | printk("\n"); | ||
| 165 | |||
| 142 | return ebp; | 166 | return ebp; |
| 143 | } | 167 | } |
| 144 | 168 | ||
| @@ -166,8 +190,7 @@ static void show_trace_log_lvl(struct task_struct *task, | |||
| 166 | stack = (unsigned long*)context->previous_esp; | 190 | stack = (unsigned long*)context->previous_esp; |
| 167 | if (!stack) | 191 | if (!stack) |
| 168 | break; | 192 | break; |
| 169 | printk(log_lvl); | 193 | printk("%s =======================\n", log_lvl); |
| 170 | printk(" =======================\n"); | ||
| 171 | } | 194 | } |
| 172 | } | 195 | } |
| 173 | 196 | ||
| @@ -194,21 +217,17 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, | |||
| 194 | for(i = 0; i < kstack_depth_to_print; i++) { | 217 | for(i = 0; i < kstack_depth_to_print; i++) { |
| 195 | if (kstack_end(stack)) | 218 | if (kstack_end(stack)) |
| 196 | break; | 219 | break; |
| 197 | if (i && ((i % 8) == 0)) { | 220 | if (i && ((i % 8) == 0)) |
| 198 | printk("\n"); | 221 | printk("\n%s ", log_lvl); |
| 199 | printk(log_lvl); | ||
| 200 | printk(" "); | ||
| 201 | } | ||
| 202 | printk("%08lx ", *stack++); | 222 | printk("%08lx ", *stack++); |
| 203 | } | 223 | } |
| 204 | printk("\n"); | 224 | printk("\n%sCall Trace:\n", log_lvl); |
| 205 | printk(log_lvl); | ||
| 206 | printk("Call Trace:\n"); | ||
| 207 | show_trace_log_lvl(task, esp, log_lvl); | 225 | show_trace_log_lvl(task, esp, log_lvl); |
| 208 | } | 226 | } |
| 209 | 227 | ||
| 210 | void show_stack(struct task_struct *task, unsigned long *esp) | 228 | void show_stack(struct task_struct *task, unsigned long *esp) |
| 211 | { | 229 | { |
| 230 | printk(" "); | ||
| 212 | show_stack_log_lvl(task, esp, ""); | 231 | show_stack_log_lvl(task, esp, ""); |
| 213 | } | 232 | } |
| 214 | 233 | ||
| @@ -233,7 +252,7 @@ void show_registers(struct pt_regs *regs) | |||
| 233 | 252 | ||
| 234 | esp = (unsigned long) (®s->esp); | 253 | esp = (unsigned long) (®s->esp); |
| 235 | savesegment(ss, ss); | 254 | savesegment(ss, ss); |
| 236 | if (user_mode(regs)) { | 255 | if (user_mode_vm(regs)) { |
| 237 | in_kernel = 0; | 256 | in_kernel = 0; |
| 238 | esp = regs->esp; | 257 | esp = regs->esp; |
| 239 | ss = regs->xss & 0xffff; | 258 | ss = regs->xss & 0xffff; |
| @@ -333,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
| 333 | static int die_counter; | 352 | static int die_counter; |
| 334 | unsigned long flags; | 353 | unsigned long flags; |
| 335 | 354 | ||
| 355 | oops_enter(); | ||
| 356 | |||
| 336 | if (die.lock_owner != raw_smp_processor_id()) { | 357 | if (die.lock_owner != raw_smp_processor_id()) { |
| 337 | console_verbose(); | 358 | console_verbose(); |
| 338 | spin_lock_irqsave(&die.lock, flags); | 359 | spin_lock_irqsave(&die.lock, flags); |
| @@ -385,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
| 385 | ssleep(5); | 406 | ssleep(5); |
| 386 | panic("Fatal exception"); | 407 | panic("Fatal exception"); |
| 387 | } | 408 | } |
| 409 | oops_exit(); | ||
| 388 | do_exit(SIGSEGV); | 410 | do_exit(SIGSEGV); |
| 389 | } | 411 | } |
| 390 | 412 | ||
| @@ -623,7 +645,7 @@ void die_nmi (struct pt_regs *regs, const char *msg) | |||
| 623 | /* If we are in kernel we are probably nested up pretty bad | 645 | /* If we are in kernel we are probably nested up pretty bad |
| 624 | * and might aswell get out now while we still can. | 646 | * and might aswell get out now while we still can. |
| 625 | */ | 647 | */ |
| 626 | if (!user_mode(regs)) { | 648 | if (!user_mode_vm(regs)) { |
| 627 | current->thread.trap_no = 2; | 649 | current->thread.trap_no = 2; |
| 628 | crash_kexec(regs); | 650 | crash_kexec(regs); |
| 629 | } | 651 | } |
| @@ -694,6 +716,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code) | |||
| 694 | 716 | ||
| 695 | void set_nmi_callback(nmi_callback_t callback) | 717 | void set_nmi_callback(nmi_callback_t callback) |
| 696 | { | 718 | { |
| 719 | vmalloc_sync_all(); | ||
| 697 | rcu_assign_pointer(nmi_callback, callback); | 720 | rcu_assign_pointer(nmi_callback, callback); |
| 698 | } | 721 | } |
| 699 | EXPORT_SYMBOL_GPL(set_nmi_callback); | 722 | EXPORT_SYMBOL_GPL(set_nmi_callback); |
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 4710195b6b74..3f21c6f6466d 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S | |||
| @@ -68,6 +68,26 @@ SECTIONS | |||
| 68 | *(.data.init_task) | 68 | *(.data.init_task) |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | /* might get freed after init */ | ||
| 72 | . = ALIGN(4096); | ||
| 73 | __smp_alt_begin = .; | ||
| 74 | __smp_alt_instructions = .; | ||
| 75 | .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) { | ||
| 76 | *(.smp_altinstructions) | ||
| 77 | } | ||
| 78 | __smp_alt_instructions_end = .; | ||
| 79 | . = ALIGN(4); | ||
| 80 | __smp_locks = .; | ||
| 81 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | ||
| 82 | *(.smp_locks) | ||
| 83 | } | ||
| 84 | __smp_locks_end = .; | ||
| 85 | .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) { | ||
| 86 | *(.smp_altinstr_replacement) | ||
| 87 | } | ||
| 88 | . = ALIGN(4096); | ||
| 89 | __smp_alt_end = .; | ||
| 90 | |||
| 71 | /* will be freed after init */ | 91 | /* will be freed after init */ |
| 72 | . = ALIGN(4096); /* Init code and data */ | 92 | . = ALIGN(4096); /* Init code and data */ |
| 73 | __init_begin = .; | 93 | __init_begin = .; |
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 76b728159403..3b62baa6a371 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S | |||
| @@ -21,6 +21,9 @@ | |||
| 21 | * instruction clobbers %esp, the user's %esp won't even survive entry | 21 | * instruction clobbers %esp, the user's %esp won't even survive entry |
| 22 | * into the kernel. We store %esp in %ebp. Code in entry.S must fetch | 22 | * into the kernel. We store %esp in %ebp. Code in entry.S must fetch |
| 23 | * arg6 from the stack. | 23 | * arg6 from the stack. |
| 24 | * | ||
| 25 | * You can not use this vsyscall for the clone() syscall because the | ||
| 26 | * three dwords on the parent stack do not get copied to the child. | ||
| 24 | */ | 27 | */ |
| 25 | .text | 28 | .text |
| 26 | .globl __kernel_vsyscall | 29 | .globl __kernel_vsyscall |
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h index f1e3204f5dec..80566ca4a80a 100644 --- a/arch/i386/mach-es7000/es7000.h +++ b/arch/i386/mach-es7000/es7000.h | |||
| @@ -83,6 +83,7 @@ struct es7000_oem_table { | |||
| 83 | struct psai psai; | 83 | struct psai psai; |
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | #ifdef CONFIG_ACPI | ||
| 86 | struct acpi_table_sdt { | 87 | struct acpi_table_sdt { |
| 87 | unsigned long pa; | 88 | unsigned long pa; |
| 88 | unsigned long count; | 89 | unsigned long count; |
| @@ -99,6 +100,9 @@ struct oem_table { | |||
| 99 | u32 OEMTableSize; | 100 | u32 OEMTableSize; |
| 100 | }; | 101 | }; |
| 101 | 102 | ||
| 103 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | ||
| 104 | #endif | ||
| 105 | |||
| 102 | struct mip_reg { | 106 | struct mip_reg { |
| 103 | unsigned long long off_0; | 107 | unsigned long long off_0; |
| 104 | unsigned long long off_8; | 108 | unsigned long long off_8; |
| @@ -114,7 +118,6 @@ struct mip_reg { | |||
| 114 | #define MIP_FUNC(VALUE) (VALUE & 0xff) | 118 | #define MIP_FUNC(VALUE) (VALUE & 0xff) |
| 115 | 119 | ||
| 116 | extern int parse_unisys_oem (char *oemptr); | 120 | extern int parse_unisys_oem (char *oemptr); |
| 117 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | ||
| 118 | extern void setup_unisys(void); | 121 | extern void setup_unisys(void); |
| 119 | extern int es7000_start_cpu(int cpu, unsigned long eip); | 122 | extern int es7000_start_cpu(int cpu, unsigned long eip); |
| 120 | extern void es7000_sw_apic(void); | 123 | extern void es7000_sw_apic(void); |
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c index a9ab0644f403..3d0fc853516d 100644 --- a/arch/i386/mach-es7000/es7000plat.c +++ b/arch/i386/mach-es7000/es7000plat.c | |||
| @@ -51,8 +51,6 @@ struct mip_reg *host_reg; | |||
| 51 | int mip_port; | 51 | int mip_port; |
| 52 | unsigned long mip_addr, host_addr; | 52 | unsigned long mip_addr, host_addr; |
| 53 | 53 | ||
| 54 | #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI) | ||
| 55 | |||
| 56 | /* | 54 | /* |
| 57 | * GSI override for ES7000 platforms. | 55 | * GSI override for ES7000 platforms. |
| 58 | */ | 56 | */ |
| @@ -76,8 +74,6 @@ es7000_rename_gsi(int ioapic, int gsi) | |||
| 76 | return gsi; | 74 | return gsi; |
| 77 | } | 75 | } |
| 78 | 76 | ||
| 79 | #endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */ | ||
| 80 | |||
| 81 | void __init | 77 | void __init |
| 82 | setup_unisys(void) | 78 | setup_unisys(void) |
| 83 | { | 79 | { |
| @@ -160,6 +156,7 @@ parse_unisys_oem (char *oemptr) | |||
| 160 | return es7000_plat; | 156 | return es7000_plat; |
| 161 | } | 157 | } |
| 162 | 158 | ||
| 159 | #ifdef CONFIG_ACPI | ||
| 163 | int __init | 160 | int __init |
| 164 | find_unisys_acpi_oem_table(unsigned long *oem_addr) | 161 | find_unisys_acpi_oem_table(unsigned long *oem_addr) |
| 165 | { | 162 | { |
| @@ -212,6 +209,7 @@ find_unisys_acpi_oem_table(unsigned long *oem_addr) | |||
| 212 | } | 209 | } |
| 213 | return -1; | 210 | return -1; |
| 214 | } | 211 | } |
| 212 | #endif | ||
| 215 | 213 | ||
| 216 | static void | 214 | static void |
| 217 | es7000_spin(int n) | 215 | es7000_spin(int n) |
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index cf572d9a3b6e..7f0fcf219a26 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c | |||
| @@ -214,6 +214,68 @@ static noinline void force_sig_info_fault(int si_signo, int si_code, | |||
| 214 | 214 | ||
| 215 | fastcall void do_invalid_op(struct pt_regs *, unsigned long); | 215 | fastcall void do_invalid_op(struct pt_regs *, unsigned long); |
| 216 | 216 | ||
| 217 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
| 218 | { | ||
| 219 | unsigned index = pgd_index(address); | ||
| 220 | pgd_t *pgd_k; | ||
| 221 | pud_t *pud, *pud_k; | ||
| 222 | pmd_t *pmd, *pmd_k; | ||
| 223 | |||
| 224 | pgd += index; | ||
| 225 | pgd_k = init_mm.pgd + index; | ||
| 226 | |||
| 227 | if (!pgd_present(*pgd_k)) | ||
| 228 | return NULL; | ||
| 229 | |||
| 230 | /* | ||
| 231 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | ||
| 232 | * and redundant with the set_pmd() on non-PAE. As would | ||
| 233 | * set_pud. | ||
| 234 | */ | ||
| 235 | |||
| 236 | pud = pud_offset(pgd, address); | ||
| 237 | pud_k = pud_offset(pgd_k, address); | ||
| 238 | if (!pud_present(*pud_k)) | ||
| 239 | return NULL; | ||
| 240 | |||
| 241 | pmd = pmd_offset(pud, address); | ||
| 242 | pmd_k = pmd_offset(pud_k, address); | ||
| 243 | if (!pmd_present(*pmd_k)) | ||
| 244 | return NULL; | ||
| 245 | if (!pmd_present(*pmd)) | ||
| 246 | set_pmd(pmd, *pmd_k); | ||
| 247 | else | ||
| 248 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
| 249 | return pmd_k; | ||
| 250 | } | ||
| 251 | |||
| 252 | /* | ||
| 253 | * Handle a fault on the vmalloc or module mapping area | ||
| 254 | * | ||
| 255 | * This assumes no large pages in there. | ||
| 256 | */ | ||
| 257 | static inline int vmalloc_fault(unsigned long address) | ||
| 258 | { | ||
| 259 | unsigned long pgd_paddr; | ||
| 260 | pmd_t *pmd_k; | ||
| 261 | pte_t *pte_k; | ||
| 262 | /* | ||
| 263 | * Synchronize this task's top level page-table | ||
| 264 | * with the 'reference' page table. | ||
| 265 | * | ||
| 266 | * Do _not_ use "current" here. We might be inside | ||
| 267 | * an interrupt in the middle of a task switch.. | ||
| 268 | */ | ||
| 269 | pgd_paddr = read_cr3(); | ||
| 270 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | ||
| 271 | if (!pmd_k) | ||
| 272 | return -1; | ||
| 273 | pte_k = pte_offset_kernel(pmd_k, address); | ||
| 274 | if (!pte_present(*pte_k)) | ||
| 275 | return -1; | ||
| 276 | return 0; | ||
| 277 | } | ||
| 278 | |||
| 217 | /* | 279 | /* |
| 218 | * This routine handles page faults. It determines the address, | 280 | * This routine handles page faults. It determines the address, |
| 219 | * and the problem, and then passes it off to one of the appropriate | 281 | * and the problem, and then passes it off to one of the appropriate |
| @@ -223,6 +285,8 @@ fastcall void do_invalid_op(struct pt_regs *, unsigned long); | |||
| 223 | * bit 0 == 0 means no page found, 1 means protection fault | 285 | * bit 0 == 0 means no page found, 1 means protection fault |
| 224 | * bit 1 == 0 means read, 1 means write | 286 | * bit 1 == 0 means read, 1 means write |
| 225 | * bit 2 == 0 means kernel, 1 means user-mode | 287 | * bit 2 == 0 means kernel, 1 means user-mode |
| 288 | * bit 3 == 1 means use of reserved bit detected | ||
| 289 | * bit 4 == 1 means fault was an instruction fetch | ||
| 226 | */ | 290 | */ |
| 227 | fastcall void __kprobes do_page_fault(struct pt_regs *regs, | 291 | fastcall void __kprobes do_page_fault(struct pt_regs *regs, |
| 228 | unsigned long error_code) | 292 | unsigned long error_code) |
| @@ -237,13 +301,6 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, | |||
| 237 | /* get the address */ | 301 | /* get the address */ |
| 238 | address = read_cr2(); | 302 | address = read_cr2(); |
| 239 | 303 | ||
| 240 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, | ||
| 241 | SIGSEGV) == NOTIFY_STOP) | ||
| 242 | return; | ||
| 243 | /* It's safe to allow irq's after cr2 has been saved */ | ||
| 244 | if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) | ||
| 245 | local_irq_enable(); | ||
| 246 | |||
| 247 | tsk = current; | 304 | tsk = current; |
| 248 | 305 | ||
| 249 | si_code = SEGV_MAPERR; | 306 | si_code = SEGV_MAPERR; |
| @@ -259,17 +316,29 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, | |||
| 259 | * | 316 | * |
| 260 | * This verifies that the fault happens in kernel space | 317 | * This verifies that the fault happens in kernel space |
| 261 | * (error_code & 4) == 0, and that the fault was not a | 318 | * (error_code & 4) == 0, and that the fault was not a |
| 262 | * protection error (error_code & 1) == 0. | 319 | * protection error (error_code & 9) == 0. |
| 263 | */ | 320 | */ |
| 264 | if (unlikely(address >= TASK_SIZE)) { | 321 | if (unlikely(address >= TASK_SIZE)) { |
| 265 | if (!(error_code & 5)) | 322 | if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) |
| 266 | goto vmalloc_fault; | 323 | return; |
| 267 | /* | 324 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, |
| 325 | SIGSEGV) == NOTIFY_STOP) | ||
| 326 | return; | ||
| 327 | /* | ||
| 268 | * Don't take the mm semaphore here. If we fixup a prefetch | 328 | * Don't take the mm semaphore here. If we fixup a prefetch |
| 269 | * fault we could otherwise deadlock. | 329 | * fault we could otherwise deadlock. |
| 270 | */ | 330 | */ |
| 271 | goto bad_area_nosemaphore; | 331 | goto bad_area_nosemaphore; |
| 272 | } | 332 | } |
| 333 | |||
| 334 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, | ||
| 335 | SIGSEGV) == NOTIFY_STOP) | ||
| 336 | return; | ||
| 337 | |||
| 338 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc | ||
| 339 | fault has been handled. */ | ||
| 340 | if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) | ||
| 341 | local_irq_enable(); | ||
| 273 | 342 | ||
| 274 | mm = tsk->mm; | 343 | mm = tsk->mm; |
| 275 | 344 | ||
| @@ -440,24 +509,31 @@ no_context: | |||
| 440 | 509 | ||
| 441 | bust_spinlocks(1); | 510 | bust_spinlocks(1); |
| 442 | 511 | ||
| 443 | #ifdef CONFIG_X86_PAE | 512 | if (oops_may_print()) { |
| 444 | if (error_code & 16) { | 513 | #ifdef CONFIG_X86_PAE |
| 445 | pte_t *pte = lookup_address(address); | 514 | if (error_code & 16) { |
| 515 | pte_t *pte = lookup_address(address); | ||
| 446 | 516 | ||
| 447 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | 517 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) |
| 448 | printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid); | 518 | printk(KERN_CRIT "kernel tried to execute " |
| 519 | "NX-protected page - exploit attempt? " | ||
| 520 | "(uid: %d)\n", current->uid); | ||
| 521 | } | ||
| 522 | #endif | ||
| 523 | if (address < PAGE_SIZE) | ||
| 524 | printk(KERN_ALERT "BUG: unable to handle kernel NULL " | ||
| 525 | "pointer dereference"); | ||
| 526 | else | ||
| 527 | printk(KERN_ALERT "BUG: unable to handle kernel paging" | ||
| 528 | " request"); | ||
| 529 | printk(" at virtual address %08lx\n",address); | ||
| 530 | printk(KERN_ALERT " printing eip:\n"); | ||
| 531 | printk("%08lx\n", regs->eip); | ||
| 449 | } | 532 | } |
| 450 | #endif | ||
| 451 | if (address < PAGE_SIZE) | ||
| 452 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
| 453 | else | ||
| 454 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
| 455 | printk(" at virtual address %08lx\n",address); | ||
| 456 | printk(KERN_ALERT " printing eip:\n"); | ||
| 457 | printk("%08lx\n", regs->eip); | ||
| 458 | page = read_cr3(); | 533 | page = read_cr3(); |
| 459 | page = ((unsigned long *) __va(page))[address >> 22]; | 534 | page = ((unsigned long *) __va(page))[address >> 22]; |
| 460 | printk(KERN_ALERT "*pde = %08lx\n", page); | 535 | if (oops_may_print()) |
| 536 | printk(KERN_ALERT "*pde = %08lx\n", page); | ||
| 461 | /* | 537 | /* |
| 462 | * We must not directly access the pte in the highpte | 538 | * We must not directly access the pte in the highpte |
| 463 | * case, the page table might be allocated in highmem. | 539 | * case, the page table might be allocated in highmem. |
| @@ -465,7 +541,7 @@ no_context: | |||
| 465 | * it's allocated already. | 541 | * it's allocated already. |
| 466 | */ | 542 | */ |
| 467 | #ifndef CONFIG_HIGHPTE | 543 | #ifndef CONFIG_HIGHPTE |
| 468 | if (page & 1) { | 544 | if ((page & 1) && oops_may_print()) { |
| 469 | page &= PAGE_MASK; | 545 | page &= PAGE_MASK; |
| 470 | address &= 0x003ff000; | 546 | address &= 0x003ff000; |
| 471 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; | 547 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; |
| @@ -510,51 +586,41 @@ do_sigbus: | |||
| 510 | tsk->thread.error_code = error_code; | 586 | tsk->thread.error_code = error_code; |
| 511 | tsk->thread.trap_no = 14; | 587 | tsk->thread.trap_no = 14; |
| 512 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | 588 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
| 513 | return; | 589 | } |
| 514 | |||
| 515 | vmalloc_fault: | ||
| 516 | { | ||
| 517 | /* | ||
| 518 | * Synchronize this task's top level page-table | ||
| 519 | * with the 'reference' page table. | ||
| 520 | * | ||
| 521 | * Do _not_ use "tsk" here. We might be inside | ||
| 522 | * an interrupt in the middle of a task switch.. | ||
| 523 | */ | ||
| 524 | int index = pgd_index(address); | ||
| 525 | unsigned long pgd_paddr; | ||
| 526 | pgd_t *pgd, *pgd_k; | ||
| 527 | pud_t *pud, *pud_k; | ||
| 528 | pmd_t *pmd, *pmd_k; | ||
| 529 | pte_t *pte_k; | ||
| 530 | |||
| 531 | pgd_paddr = read_cr3(); | ||
| 532 | pgd = index + (pgd_t *)__va(pgd_paddr); | ||
| 533 | pgd_k = init_mm.pgd + index; | ||
| 534 | |||
| 535 | if (!pgd_present(*pgd_k)) | ||
| 536 | goto no_context; | ||
| 537 | |||
| 538 | /* | ||
| 539 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | ||
| 540 | * and redundant with the set_pmd() on non-PAE. As would | ||
| 541 | * set_pud. | ||
| 542 | */ | ||
| 543 | 590 | ||
| 544 | pud = pud_offset(pgd, address); | 591 | #ifndef CONFIG_X86_PAE |
| 545 | pud_k = pud_offset(pgd_k, address); | 592 | void vmalloc_sync_all(void) |
| 546 | if (!pud_present(*pud_k)) | 593 | { |
| 547 | goto no_context; | 594 | /* |
| 548 | 595 | * Note that races in the updates of insync and start aren't | |
| 549 | pmd = pmd_offset(pud, address); | 596 | * problematic: insync can only get set bits added, and updates to |
| 550 | pmd_k = pmd_offset(pud_k, address); | 597 | * start are only improving performance (without affecting correctness |
| 551 | if (!pmd_present(*pmd_k)) | 598 | * if undone). |
| 552 | goto no_context; | 599 | */ |
| 553 | set_pmd(pmd, *pmd_k); | 600 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); |
| 601 | static unsigned long start = TASK_SIZE; | ||
| 602 | unsigned long address; | ||
| 554 | 603 | ||
| 555 | pte_k = pte_offset_kernel(pmd_k, address); | 604 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); |
| 556 | if (!pte_present(*pte_k)) | 605 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { |
| 557 | goto no_context; | 606 | if (!test_bit(pgd_index(address), insync)) { |
| 558 | return; | 607 | unsigned long flags; |
| 608 | struct page *page; | ||
| 609 | |||
| 610 | spin_lock_irqsave(&pgd_lock, flags); | ||
| 611 | for (page = pgd_list; page; page = | ||
| 612 | (struct page *)page->index) | ||
| 613 | if (!vmalloc_sync_one(page_address(page), | ||
| 614 | address)) { | ||
| 615 | BUG_ON(page != pgd_list); | ||
| 616 | break; | ||
| 617 | } | ||
| 618 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
| 619 | if (!page) | ||
| 620 | set_bit(pgd_index(address), insync); | ||
| 621 | } | ||
| 622 | if (address == start && test_bit(pgd_index(address), insync)) | ||
| 623 | start = address + PGDIR_SIZE; | ||
| 559 | } | 624 | } |
| 560 | } | 625 | } |
| 626 | #endif | ||
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 7ba55a6e2dbc..9f66ac582a8b 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
| @@ -720,21 +720,6 @@ static int noinline do_test_wp_bit(void) | |||
| 720 | return flag; | 720 | return flag; |
| 721 | } | 721 | } |
| 722 | 722 | ||
| 723 | void free_initmem(void) | ||
| 724 | { | ||
| 725 | unsigned long addr; | ||
| 726 | |||
| 727 | addr = (unsigned long)(&__init_begin); | ||
| 728 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
| 729 | ClearPageReserved(virt_to_page(addr)); | ||
| 730 | init_page_count(virt_to_page(addr)); | ||
| 731 | memset((void *)addr, 0xcc, PAGE_SIZE); | ||
| 732 | free_page(addr); | ||
| 733 | totalram_pages++; | ||
| 734 | } | ||
| 735 | printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); | ||
| 736 | } | ||
| 737 | |||
| 738 | #ifdef CONFIG_DEBUG_RODATA | 723 | #ifdef CONFIG_DEBUG_RODATA |
| 739 | 724 | ||
| 740 | extern char __start_rodata, __end_rodata; | 725 | extern char __start_rodata, __end_rodata; |
| @@ -758,17 +743,31 @@ void mark_rodata_ro(void) | |||
| 758 | } | 743 | } |
| 759 | #endif | 744 | #endif |
| 760 | 745 | ||
| 746 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
| 747 | { | ||
| 748 | unsigned long addr; | ||
| 749 | |||
| 750 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
| 751 | ClearPageReserved(virt_to_page(addr)); | ||
| 752 | init_page_count(virt_to_page(addr)); | ||
| 753 | memset((void *)addr, 0xcc, PAGE_SIZE); | ||
| 754 | free_page(addr); | ||
| 755 | totalram_pages++; | ||
| 756 | } | ||
| 757 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | ||
| 758 | } | ||
| 759 | |||
| 760 | void free_initmem(void) | ||
| 761 | { | ||
| 762 | free_init_pages("unused kernel memory", | ||
| 763 | (unsigned long)(&__init_begin), | ||
| 764 | (unsigned long)(&__init_end)); | ||
| 765 | } | ||
| 761 | 766 | ||
| 762 | #ifdef CONFIG_BLK_DEV_INITRD | 767 | #ifdef CONFIG_BLK_DEV_INITRD |
| 763 | void free_initrd_mem(unsigned long start, unsigned long end) | 768 | void free_initrd_mem(unsigned long start, unsigned long end) |
| 764 | { | 769 | { |
| 765 | if (start < end) | 770 | free_init_pages("initrd memory", start, end); |
| 766 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
| 767 | for (; start < end; start += PAGE_SIZE) { | ||
| 768 | ClearPageReserved(virt_to_page(start)); | ||
| 769 | init_page_count(virt_to_page(start)); | ||
| 770 | free_page(start); | ||
| 771 | totalram_pages++; | ||
| 772 | } | ||
| 773 | } | 771 | } |
| 774 | #endif | 772 | #endif |
| 773 | |||
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 0493e8b8ec49..1accce50c2c7 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c | |||
| @@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy) | |||
| 122 | static void free_msrs(void) | 122 | static void free_msrs(void) |
| 123 | { | 123 | { |
| 124 | int i; | 124 | int i; |
| 125 | for (i = 0; i < NR_CPUS; ++i) { | 125 | for_each_cpu(i) { |
| 126 | kfree(cpu_msrs[i].counters); | 126 | kfree(cpu_msrs[i].counters); |
| 127 | cpu_msrs[i].counters = NULL; | 127 | cpu_msrs[i].counters = NULL; |
| 128 | kfree(cpu_msrs[i].controls); | 128 | kfree(cpu_msrs[i].controls); |
| @@ -138,10 +138,7 @@ static int allocate_msrs(void) | |||
| 138 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; | 138 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; |
| 139 | 139 | ||
| 140 | int i; | 140 | int i; |
| 141 | for (i = 0; i < NR_CPUS; ++i) { | 141 | for_each_online_cpu(i) { |
| 142 | if (!cpu_online(i)) | ||
| 143 | continue; | ||
| 144 | |||
| 145 | cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); | 142 | cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); |
| 146 | if (!cpu_msrs[i].counters) { | 143 | if (!cpu_msrs[i].counters) { |
| 147 | success = 0; | 144 | success = 0; |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 626cdc83668b..0e5c6ae50228 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
| @@ -46,11 +46,6 @@ | |||
| 46 | #define KEYBOARD_INTR 3 /* must match with simulator! */ | 46 | #define KEYBOARD_INTR 3 /* must match with simulator! */ |
| 47 | 47 | ||
| 48 | #define NR_PORTS 1 /* only one port for now */ | 48 | #define NR_PORTS 1 /* only one port for now */ |
| 49 | #define SERIAL_INLINE 1 | ||
| 50 | |||
| 51 | #ifdef SERIAL_INLINE | ||
| 52 | #define _INLINE_ inline | ||
| 53 | #endif | ||
| 54 | 49 | ||
| 55 | #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) | 50 | #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) |
| 56 | 51 | ||
| @@ -237,7 +232,7 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 237 | local_irq_restore(flags); | 232 | local_irq_restore(flags); |
| 238 | } | 233 | } |
| 239 | 234 | ||
| 240 | static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) | 235 | static void transmit_chars(struct async_struct *info, int *intr_done) |
| 241 | { | 236 | { |
| 242 | int count; | 237 | int count; |
| 243 | unsigned long flags; | 238 | unsigned long flags; |
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 1ce63926a3c0..a4634b06f675 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
| @@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 37 | 37 | ||
| 38 | if (i == 0) { | 38 | if (i == 0) { |
| 39 | seq_printf(p, " "); | 39 | seq_printf(p, " "); |
| 40 | for (j=0; j<NR_CPUS; j++) | 40 | for_each_online_cpu(j) |
| 41 | if (cpu_online(j)) | 41 | seq_printf(p, "CPU%d ",j); |
| 42 | seq_printf(p, "CPU%d ",j); | ||
| 43 | seq_putc(p, '\n'); | 42 | seq_putc(p, '\n'); |
| 44 | } | 43 | } |
| 45 | 44 | ||
| @@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 52 | #ifndef CONFIG_SMP | 51 | #ifndef CONFIG_SMP |
| 53 | seq_printf(p, "%10u ", kstat_irqs(i)); | 52 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 54 | #else | 53 | #else |
| 55 | for (j = 0; j < NR_CPUS; j++) | 54 | for_each_online_cpu(j) |
| 56 | if (cpu_online(j)) | 55 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
| 57 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
| 58 | #endif | 56 | #endif |
| 59 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 57 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
| 60 | seq_printf(p, " %s", action->name); | 58 | seq_printf(p, " %s", action->name); |
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c index 703cbc6dc9cc..15c16b62dff5 100644 --- a/arch/m68k/bvme6000/rtc.c +++ b/arch/m68k/bvme6000/rtc.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ | 19 | #include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ |
| 20 | #include <linux/smp_lock.h> | 20 | #include <linux/smp_lock.h> |
| 21 | #include <linux/bcd.h> | ||
| 21 | #include <asm/bvme6000hw.h> | 22 | #include <asm/bvme6000hw.h> |
| 22 | 23 | ||
| 23 | #include <asm/io.h> | 24 | #include <asm/io.h> |
| @@ -32,9 +33,6 @@ | |||
| 32 | * ioctls. | 33 | * ioctls. |
| 33 | */ | 34 | */ |
| 34 | 35 | ||
| 35 | #define BCD2BIN(val) (((val)&15) + ((val)>>4)*10) | ||
| 36 | #define BIN2BCD(val) ((((val)/10)<<4) + (val)%10) | ||
| 37 | |||
| 38 | static unsigned char days_in_mo[] = | 36 | static unsigned char days_in_mo[] = |
| 39 | {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; | 37 | {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; |
| 40 | 38 | ||
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7d93992e462c..3dd76b3d2967 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
| @@ -68,9 +68,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 68 | 68 | ||
| 69 | if (i == 0) { | 69 | if (i == 0) { |
| 70 | seq_printf(p, " "); | 70 | seq_printf(p, " "); |
| 71 | for (j=0; j<NR_CPUS; j++) | 71 | for_each_online_cpu(j) |
| 72 | if (cpu_online(j)) | 72 | seq_printf(p, "CPU%d ",j); |
| 73 | seq_printf(p, "CPU%d ",j); | ||
| 74 | seq_putc(p, '\n'); | 73 | seq_putc(p, '\n'); |
| 75 | } | 74 | } |
| 76 | 75 | ||
| @@ -83,9 +82,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 83 | #ifndef CONFIG_SMP | 82 | #ifndef CONFIG_SMP |
| 84 | seq_printf(p, "%10u ", kstat_irqs(i)); | 83 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 85 | #else | 84 | #else |
| 86 | for (j = 0; j < NR_CPUS; j++) | 85 | for_each_online_cpu(j) |
| 87 | if (cpu_online(j)) | 86 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
| 88 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
| 89 | #endif | 87 | #endif |
| 90 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 88 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
| 91 | seq_printf(p, " %s", action->name); | 89 | seq_printf(p, " %s", action->name); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 06ed90752424..78d171bfa331 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
| @@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, | |||
| 167 | mb(); | 167 | mb(); |
| 168 | 168 | ||
| 169 | /* Send a message to all other CPUs and wait for them to respond */ | 169 | /* Send a message to all other CPUs and wait for them to respond */ |
| 170 | for (i = 0; i < NR_CPUS; i++) | 170 | for_each_online_cpu(i) |
| 171 | if (cpu_online(i) && i != cpu) | 171 | if (i != cpu) |
| 172 | core_send_ipi(i, SMP_CALL_FUNCTION); | 172 | core_send_ipi(i, SMP_CALL_FUNCTION); |
| 173 | 173 | ||
| 174 | /* Wait for response */ | 174 | /* Wait for response */ |
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 73e5e52781d8..2854ac4c9be1 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c | |||
| @@ -88,12 +88,9 @@ static inline int find_level(cpuid_t *cpunum, int irq) | |||
| 88 | { | 88 | { |
| 89 | int cpu, i; | 89 | int cpu, i; |
| 90 | 90 | ||
| 91 | for (cpu = 0; cpu <= NR_CPUS; cpu++) { | 91 | for_each_online_cpu(cpu) { |
| 92 | struct slice_data *si = cpu_data[cpu].data; | 92 | struct slice_data *si = cpu_data[cpu].data; |
| 93 | 93 | ||
| 94 | if (!cpu_online(cpu)) | ||
| 95 | continue; | ||
| 96 | |||
| 97 | for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) | 94 | for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) |
| 98 | if (si->level_to_irq[i] == irq) { | 95 | if (si->level_to_irq[i] == irq) { |
| 99 | *cpunum = cpu; | 96 | *cpunum = cpu; |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 25564b7ca6bb..d6ac1c60a471 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
| @@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op) | |||
| 298 | { | 298 | { |
| 299 | int i; | 299 | int i; |
| 300 | 300 | ||
| 301 | for (i = 0; i < NR_CPUS; i++) { | 301 | for_each_online_cpu(i) { |
| 302 | if (cpu_online(i) && i != smp_processor_id()) | 302 | if (i != smp_processor_id()) |
| 303 | send_IPI_single(i, op); | 303 | send_IPI_single(i, op); |
| 304 | } | 304 | } |
| 305 | } | 305 | } |
| @@ -643,14 +643,13 @@ int sys_cpus(int argc, char **argv) | |||
| 643 | if ( argc == 1 ){ | 643 | if ( argc == 1 ){ |
| 644 | 644 | ||
| 645 | #ifdef DUMP_MORE_STATE | 645 | #ifdef DUMP_MORE_STATE |
| 646 | for(i=0; i<NR_CPUS; i++) { | 646 | for_each_online_cpu(i) { |
| 647 | int cpus_per_line = 4; | 647 | int cpus_per_line = 4; |
| 648 | if(cpu_online(i)) { | 648 | |
| 649 | if (j++ % cpus_per_line) | 649 | if (j++ % cpus_per_line) |
| 650 | printk(" %3d",i); | 650 | printk(" %3d",i); |
| 651 | else | 651 | else |
| 652 | printk("\n %3d",i); | 652 | printk("\n %3d",i); |
| 653 | } | ||
| 654 | } | 653 | } |
| 655 | printk("\n"); | 654 | printk("\n"); |
| 656 | #else | 655 | #else |
| @@ -659,9 +658,7 @@ int sys_cpus(int argc, char **argv) | |||
| 659 | } else if((argc==2) && !(strcmp(argv[1],"-l"))) { | 658 | } else if((argc==2) && !(strcmp(argv[1],"-l"))) { |
| 660 | printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); | 659 | printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); |
| 661 | #ifdef DUMP_MORE_STATE | 660 | #ifdef DUMP_MORE_STATE |
| 662 | for(i=0;i<NR_CPUS;i++) { | 661 | for_each_online_cpu(i) { |
| 663 | if (!cpu_online(i)) | ||
| 664 | continue; | ||
| 665 | if (cpu_data[i].cpuid != NO_PROC_ID) { | 662 | if (cpu_data[i].cpuid != NO_PROC_ID) { |
| 666 | switch(cpu_data[i].state) { | 663 | switch(cpu_data[i].state) { |
| 667 | case STATE_RENDEZVOUS: | 664 | case STATE_RENDEZVOUS: |
| @@ -695,9 +692,7 @@ int sys_cpus(int argc, char **argv) | |||
| 695 | } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { | 692 | } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { |
| 696 | #ifdef DUMP_MORE_STATE | 693 | #ifdef DUMP_MORE_STATE |
| 697 | printk("\nCPUSTATE CPUID\n"); | 694 | printk("\nCPUSTATE CPUID\n"); |
| 698 | for (i=0;i<NR_CPUS;i++) { | 695 | for_each_online_cpu(i) { |
| 699 | if (!cpu_online(i)) | ||
| 700 | continue; | ||
| 701 | if (cpu_data[i].cpuid != NO_PROC_ID) { | 696 | if (cpu_data[i].cpuid != NO_PROC_ID) { |
| 702 | switch(cpu_data[i].state) { | 697 | switch(cpu_data[i].state) { |
| 703 | case STATE_RENDEZVOUS: | 698 | case STATE_RENDEZVOUS: |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 24dc8117b822..771a59cbd213 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
| @@ -135,9 +135,8 @@ skip: | |||
| 135 | #ifdef CONFIG_TAU_INT | 135 | #ifdef CONFIG_TAU_INT |
| 136 | if (tau_initialized){ | 136 | if (tau_initialized){ |
| 137 | seq_puts(p, "TAU: "); | 137 | seq_puts(p, "TAU: "); |
| 138 | for (j = 0; j < NR_CPUS; j++) | 138 | for_each_online_cpu(j) |
| 139 | if (cpu_online(j)) | 139 | seq_printf(p, "%10u ", tau_interrupts(j)); |
| 140 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
| 141 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | 140 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
| 142 | } | 141 | } |
| 143 | #endif | 142 | #endif |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 258039fb3016..cb1fe5878e8b 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
| @@ -81,9 +81,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
| 81 | 81 | ||
| 82 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 82 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 83 | { | 83 | { |
| 84 | down(&kprobe_mutex); | 84 | mutex_lock(&kprobe_mutex); |
| 85 | free_insn_slot(p->ainsn.insn); | 85 | free_insn_slot(p->ainsn.insn); |
| 86 | up(&kprobe_mutex); | 86 | mutex_unlock(&kprobe_mutex); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 89 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index be12041c0fc5..c1d62bf11f29 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
| @@ -162,9 +162,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 162 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) | 162 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) |
| 163 | unsigned long bogosum = 0; | 163 | unsigned long bogosum = 0; |
| 164 | int i; | 164 | int i; |
| 165 | for (i = 0; i < NR_CPUS; ++i) | 165 | for_each_online_cpu(i) |
| 166 | if (cpu_online(i)) | 166 | bogosum += loops_per_jiffy; |
| 167 | bogosum += loops_per_jiffy; | ||
| 168 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", | 167 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", |
| 169 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | 168 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); |
| 170 | #endif /* CONFIG_SMP && CONFIG_PPC32 */ | 169 | #endif /* CONFIG_SMP && CONFIG_PPC32 */ |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index db72a92943bf..dc2770df25b3 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
| @@ -272,9 +272,8 @@ int __init ppc_init(void) | |||
| 272 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); | 272 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); |
| 273 | 273 | ||
| 274 | /* register CPU devices */ | 274 | /* register CPU devices */ |
| 275 | for (i = 0; i < NR_CPUS; i++) | 275 | for_each_cpu(i) |
| 276 | if (cpu_possible(i)) | 276 | register_cpu(&cpu_devices[i], i, NULL); |
| 277 | register_cpu(&cpu_devices[i], i, NULL); | ||
| 278 | 277 | ||
| 279 | /* call platform init */ | 278 | /* call platform init */ |
| 280 | if (ppc_md.init != NULL) { | 279 | if (ppc_md.init != NULL) { |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 6d64a9bf3474..1065d87fc279 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
| @@ -191,9 +191,7 @@ static void smp_psurge_message_pass(int target, int msg) | |||
| 191 | if (num_online_cpus() < 2) | 191 | if (num_online_cpus() < 2) |
| 192 | return; | 192 | return; |
| 193 | 193 | ||
| 194 | for (i = 0; i < NR_CPUS; i++) { | 194 | for_each_online_cpu(i) { |
| 195 | if (!cpu_online(i)) | ||
| 196 | continue; | ||
| 197 | if (target == MSG_ALL | 195 | if (target == MSG_ALL |
| 198 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) | 196 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) |
| 199 | || target == i) { | 197 | || target == i) { |
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index c08ab432e958..53e9deacee82 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c | |||
| @@ -168,9 +168,8 @@ int show_cpuinfo(struct seq_file *m, void *v) | |||
| 168 | /* Show summary information */ | 168 | /* Show summary information */ |
| 169 | #ifdef CONFIG_SMP | 169 | #ifdef CONFIG_SMP |
| 170 | unsigned long bogosum = 0; | 170 | unsigned long bogosum = 0; |
| 171 | for (i = 0; i < NR_CPUS; ++i) | 171 | for_each_online_cpu(i) |
| 172 | if (cpu_online(i)) | 172 | bogosum += cpu_data[i].loops_per_jiffy; |
| 173 | bogosum += cpu_data[i].loops_per_jiffy; | ||
| 174 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", | 173 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", |
| 175 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | 174 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); |
| 176 | #endif /* CONFIG_SMP */ | 175 | #endif /* CONFIG_SMP */ |
| @@ -712,9 +711,8 @@ int __init ppc_init(void) | |||
| 712 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); | 711 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); |
| 713 | 712 | ||
| 714 | /* register CPU devices */ | 713 | /* register CPU devices */ |
| 715 | for (i = 0; i < NR_CPUS; i++) | 714 | for_each_cpu(i) |
| 716 | if (cpu_possible(i)) | 715 | register_cpu(&cpu_devices[i], i, NULL); |
| 717 | register_cpu(&cpu_devices[i], i, NULL); | ||
| 718 | 716 | ||
| 719 | /* call platform init */ | 717 | /* call platform init */ |
| 720 | if (ppc_md.init != NULL) { | 718 | if (ppc_md.init != NULL) { |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 7dbe00c76c6b..d52d6d211d9f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -799,9 +799,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
| 799 | */ | 799 | */ |
| 800 | print_cpu_info(&S390_lowcore.cpu_data); | 800 | print_cpu_info(&S390_lowcore.cpu_data); |
| 801 | 801 | ||
| 802 | for(i = 0; i < NR_CPUS; i++) { | 802 | for_each_cpu(i) { |
| 803 | if (!cpu_possible(i)) | ||
| 804 | continue; | ||
| 805 | lowcore_ptr[i] = (struct _lowcore *) | 803 | lowcore_ptr[i] = (struct _lowcore *) |
| 806 | __get_free_pages(GFP_KERNEL|GFP_DMA, | 804 | __get_free_pages(GFP_KERNEL|GFP_DMA, |
| 807 | sizeof(void*) == 8 ? 1 : 0); | 805 | sizeof(void*) == 8 ? 1 : 0); |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 6883c00728cb..b56e79632f24 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
| @@ -35,9 +35,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 35 | 35 | ||
| 36 | if (i == 0) { | 36 | if (i == 0) { |
| 37 | seq_puts(p, " "); | 37 | seq_puts(p, " "); |
| 38 | for (j=0; j<NR_CPUS; j++) | 38 | for_each_online_cpu(j) |
| 39 | if (cpu_online(j)) | 39 | seq_printf(p, "CPU%d ",j); |
| 40 | seq_printf(p, "CPU%d ",j); | ||
| 41 | seq_putc(p, '\n'); | 40 | seq_putc(p, '\n'); |
| 42 | } | 41 | } |
| 43 | 42 | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index a067a34e0b64..c0e79843f580 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
| @@ -404,9 +404,8 @@ static int __init topology_init(void) | |||
| 404 | { | 404 | { |
| 405 | int cpu_id; | 405 | int cpu_id; |
| 406 | 406 | ||
| 407 | for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) | 407 | for_each_cpu(cpu_id) |
| 408 | if (cpu_possible(cpu_id)) | 408 | register_cpu(&cpu[cpu_id], cpu_id, NULL); |
| 409 | register_cpu(&cpu[cpu_id], cpu_id, NULL); | ||
| 410 | 409 | ||
| 411 | return 0; | 410 | return 0; |
| 412 | } | 411 | } |
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c index 9fc2b71dbd84..d69879c0e063 100644 --- a/arch/sh64/kernel/irq.c +++ b/arch/sh64/kernel/irq.c | |||
| @@ -53,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 53 | 53 | ||
| 54 | if (i == 0) { | 54 | if (i == 0) { |
| 55 | seq_puts(p, " "); | 55 | seq_puts(p, " "); |
| 56 | for (j=0; j<NR_CPUS; j++) | 56 | for_each_online_cpu(j) |
| 57 | if (cpu_online(j)) | 57 | seq_printf(p, "CPU%d ",j); |
| 58 | seq_printf(p, "CPU%d ",j); | ||
| 59 | seq_putc(p, '\n'); | 58 | seq_putc(p, '\n'); |
| 60 | } | 59 | } |
| 61 | 60 | ||
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 410b9a72aba9..4c60a6ef54a9 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c | |||
| @@ -184,9 +184,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 184 | #ifndef CONFIG_SMP | 184 | #ifndef CONFIG_SMP |
| 185 | seq_printf(p, "%10u ", kstat_irqs(i)); | 185 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 186 | #else | 186 | #else |
| 187 | for (j = 0; j < NR_CPUS; j++) { | 187 | for_each_online_cpu(j) { |
| 188 | if (cpu_online(j)) | 188 | seq_printf(p, "%10u ", |
| 189 | seq_printf(p, "%10u ", | ||
| 190 | kstat_cpu(cpu_logical_map(j)).irqs[i]); | 189 | kstat_cpu(cpu_logical_map(j)).irqs[i]); |
| 191 | } | 190 | } |
| 192 | #endif | 191 | #endif |
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index c6e721d8f477..ea5682ce7031 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c | |||
| @@ -243,9 +243,8 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 243 | return -EINVAL; | 243 | return -EINVAL; |
| 244 | 244 | ||
| 245 | spin_lock_irqsave(&prof_setup_lock, flags); | 245 | spin_lock_irqsave(&prof_setup_lock, flags); |
| 246 | for(i = 0; i < NR_CPUS; i++) { | 246 | for_each_cpu(i) { |
| 247 | if (cpu_possible(i)) | 247 | load_profile_irq(i, lvl14_resolution / multiplier); |
| 248 | load_profile_irq(i, lvl14_resolution / multiplier); | ||
| 249 | prof_multiplier(i) = multiplier; | 248 | prof_multiplier(i) = multiplier; |
| 250 | } | 249 | } |
| 251 | spin_unlock_irqrestore(&prof_setup_lock, flags); | 250 | spin_unlock_irqrestore(&prof_setup_lock, flags); |
| @@ -273,13 +272,12 @@ void smp_bogo(struct seq_file *m) | |||
| 273 | { | 272 | { |
| 274 | int i; | 273 | int i; |
| 275 | 274 | ||
| 276 | for (i = 0; i < NR_CPUS; i++) { | 275 | for_each_online_cpu(i) { |
| 277 | if (cpu_online(i)) | 276 | seq_printf(m, |
| 278 | seq_printf(m, | 277 | "Cpu%dBogo\t: %lu.%02lu\n", |
| 279 | "Cpu%dBogo\t: %lu.%02lu\n", | 278 | i, |
| 280 | i, | 279 | cpu_data(i).udelay_val/(500000/HZ), |
| 281 | cpu_data(i).udelay_val/(500000/HZ), | 280 | (cpu_data(i).udelay_val/(5000/HZ))%100); |
| 282 | (cpu_data(i).udelay_val/(5000/HZ))%100); | ||
| 283 | } | 281 | } |
| 284 | } | 282 | } |
| 285 | 283 | ||
| @@ -288,8 +286,6 @@ void smp_info(struct seq_file *m) | |||
| 288 | int i; | 286 | int i; |
| 289 | 287 | ||
| 290 | seq_printf(m, "State:\n"); | 288 | seq_printf(m, "State:\n"); |
| 291 | for (i = 0; i < NR_CPUS; i++) { | 289 | for_each_online_cpu(i) |
| 292 | if (cpu_online(i)) | 290 | seq_printf(m, "CPU%d\t\t: online\n", i); |
| 293 | seq_printf(m, "CPU%d\t\t: online\n", i); | ||
| 294 | } | ||
| 295 | } | 291 | } |
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index 52621348a56c..cea7fc6fc6e5 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c | |||
| @@ -103,11 +103,9 @@ found_it: seq_printf(p, "%3d: ", i); | |||
| 103 | #ifndef CONFIG_SMP | 103 | #ifndef CONFIG_SMP |
| 104 | seq_printf(p, "%10u ", kstat_irqs(i)); | 104 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 105 | #else | 105 | #else |
| 106 | for (x = 0; x < NR_CPUS; x++) { | 106 | for_each_online_cpu(x) |
| 107 | if (cpu_online(x)) | 107 | seq_printf(p, "%10u ", |
| 108 | seq_printf(p, "%10u ", | 108 | kstat_cpu(cpu_logical_map(x)).irqs[i]); |
| 109 | kstat_cpu(cpu_logical_map(x)).irqs[i]); | ||
| 110 | } | ||
| 111 | #endif | 109 | #endif |
| 112 | seq_printf(p, "%c %s", | 110 | seq_printf(p, "%c %s", |
| 113 | (action->flags & SA_INTERRUPT) ? '+' : ' ', | 111 | (action->flags & SA_INTERRUPT) ? '+' : ' ', |
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 4219dd2ce3a2..41bb9596be48 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
| @@ -249,11 +249,9 @@ void __init smp4d_boot_cpus(void) | |||
| 249 | } else { | 249 | } else { |
| 250 | unsigned long bogosum = 0; | 250 | unsigned long bogosum = 0; |
| 251 | 251 | ||
| 252 | for(i = 0; i < NR_CPUS; i++) { | 252 | for_each_present_cpu(i) { |
| 253 | if (cpu_isset(i, cpu_present_map)) { | 253 | bogosum += cpu_data(i).udelay_val; |
| 254 | bogosum += cpu_data(i).udelay_val; | 254 | smp_highest_cpu = i; |
| 255 | smp_highest_cpu = i; | ||
| 256 | } | ||
| 257 | } | 255 | } |
| 258 | SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); | 256 | SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); |
| 259 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", | 257 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", |
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index fbbd8a474c4c..1dde312eebda 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c | |||
| @@ -218,10 +218,8 @@ void __init smp4m_boot_cpus(void) | |||
| 218 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); | 218 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); |
| 219 | } else { | 219 | } else { |
| 220 | unsigned long bogosum = 0; | 220 | unsigned long bogosum = 0; |
| 221 | for(i = 0; i < NR_CPUS; i++) { | 221 | for_each_present_cpu(i) |
| 222 | if (cpu_isset(i, cpu_present_map)) | 222 | bogosum += cpu_data(i).udelay_val; |
| 223 | bogosum += cpu_data(i).udelay_val; | ||
| 224 | } | ||
| 225 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", | 223 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", |
| 226 | cpucount + 1, | 224 | cpucount + 1, |
| 227 | bogosum/(500000/HZ), | 225 | bogosum/(500000/HZ), |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 8c93ba655b33..e505a4125e35 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
| @@ -117,9 +117,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 117 | #ifndef CONFIG_SMP | 117 | #ifndef CONFIG_SMP |
| 118 | seq_printf(p, "%10u ", kstat_irqs(i)); | 118 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 119 | #else | 119 | #else |
| 120 | for (j = 0; j < NR_CPUS; j++) { | 120 | for_each_online_cpu(j) { |
| 121 | if (!cpu_online(j)) | ||
| 122 | continue; | ||
| 123 | seq_printf(p, "%10u ", | 121 | seq_printf(p, "%10u ", |
| 124 | kstat_cpu(j).irqs[i]); | 122 | kstat_cpu(j).irqs[i]); |
| 125 | } | 123 | } |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 373a701c90a5..1b6e2ade1008 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
| @@ -57,25 +57,21 @@ void smp_info(struct seq_file *m) | |||
| 57 | int i; | 57 | int i; |
| 58 | 58 | ||
| 59 | seq_printf(m, "State:\n"); | 59 | seq_printf(m, "State:\n"); |
| 60 | for (i = 0; i < NR_CPUS; i++) { | 60 | for_each_online_cpu(i) |
| 61 | if (cpu_online(i)) | 61 | seq_printf(m, "CPU%d:\t\tonline\n", i); |
| 62 | seq_printf(m, | ||
| 63 | "CPU%d:\t\tonline\n", i); | ||
| 64 | } | ||
| 65 | } | 62 | } |
| 66 | 63 | ||
| 67 | void smp_bogo(struct seq_file *m) | 64 | void smp_bogo(struct seq_file *m) |
| 68 | { | 65 | { |
| 69 | int i; | 66 | int i; |
| 70 | 67 | ||
| 71 | for (i = 0; i < NR_CPUS; i++) | 68 | for_each_online_cpu(i) |
| 72 | if (cpu_online(i)) | 69 | seq_printf(m, |
| 73 | seq_printf(m, | 70 | "Cpu%dBogo\t: %lu.%02lu\n" |
| 74 | "Cpu%dBogo\t: %lu.%02lu\n" | 71 | "Cpu%dClkTck\t: %016lx\n", |
| 75 | "Cpu%dClkTck\t: %016lx\n", | 72 | i, cpu_data(i).udelay_val / (500000/HZ), |
| 76 | i, cpu_data(i).udelay_val / (500000/HZ), | 73 | (cpu_data(i).udelay_val / (5000/HZ)) % 100, |
| 77 | (cpu_data(i).udelay_val / (5000/HZ)) % 100, | 74 | i, cpu_data(i).clock_tick); |
| 78 | i, cpu_data(i).clock_tick); | ||
| 79 | } | 75 | } |
| 80 | 76 | ||
| 81 | void __init smp_store_cpu_info(int id) | 77 | void __init smp_store_cpu_info(int id) |
| @@ -1282,7 +1278,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 1282 | return -EINVAL; | 1278 | return -EINVAL; |
| 1283 | 1279 | ||
| 1284 | spin_lock_irqsave(&prof_setup_lock, flags); | 1280 | spin_lock_irqsave(&prof_setup_lock, flags); |
| 1285 | for (i = 0; i < NR_CPUS; i++) | 1281 | for_each_cpu(i) |
| 1286 | prof_multiplier(i) = multiplier; | 1282 | prof_multiplier(i) = multiplier; |
| 1287 | current_tick_offset = (timer_tick_offset / multiplier); | 1283 | current_tick_offset = (timer_tick_offset / multiplier); |
| 1288 | spin_unlock_irqrestore(&prof_setup_lock, flags); | 1284 | spin_unlock_irqrestore(&prof_setup_lock, flags); |
| @@ -1384,10 +1380,8 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
| 1384 | unsigned long bogosum = 0; | 1380 | unsigned long bogosum = 0; |
| 1385 | int i; | 1381 | int i; |
| 1386 | 1382 | ||
| 1387 | for (i = 0; i < NR_CPUS; i++) { | 1383 | for_each_online_cpu(i) |
| 1388 | if (cpu_online(i)) | 1384 | bogosum += cpu_data(i).udelay_val; |
| 1389 | bogosum += cpu_data(i).udelay_val; | ||
| 1390 | } | ||
| 1391 | printk("Total of %ld processors activated " | 1385 | printk("Total of %ld processors activated " |
| 1392 | "(%lu.%02lu BogoMIPS).\n", | 1386 | "(%lu.%02lu BogoMIPS).\n", |
| 1393 | (long) num_online_cpus(), | 1387 | (long) num_online_cpus(), |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ded63ee9c4fd..1539a8362b6f 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
| @@ -1828,8 +1828,8 @@ void __flush_tlb_all(void) | |||
| 1828 | void online_page(struct page *page) | 1828 | void online_page(struct page *page) |
| 1829 | { | 1829 | { |
| 1830 | ClearPageReserved(page); | 1830 | ClearPageReserved(page); |
| 1831 | set_page_count(page, 0); | 1831 | init_page_count(page); |
| 1832 | free_cold_page(page); | 1832 | __free_page(page); |
| 1833 | totalram_pages++; | 1833 | totalram_pages++; |
| 1834 | num_physpages++; | 1834 | num_physpages++; |
| 1835 | } | 1835 | } |
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 27cdf9164422..80c9c18aae94 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c | |||
| @@ -491,6 +491,16 @@ void __init check_bugs(void) | |||
| 491 | check_devanon(); | 491 | check_devanon(); |
| 492 | } | 492 | } |
| 493 | 493 | ||
| 494 | void apply_alternatives(void *start, void *end) | 494 | void apply_alternatives(struct alt_instr *start, struct alt_instr *end) |
| 495 | { | ||
| 496 | } | ||
| 497 | |||
| 498 | void alternatives_smp_module_add(struct module *mod, char *name, | ||
| 499 | void *locks, void *locks_end, | ||
| 500 | void *text, void *text_end) | ||
| 501 | { | ||
| 502 | } | ||
| 503 | |||
| 504 | void alternatives_smp_module_del(struct module *mod) | ||
| 495 | { | 505 | { |
| 496 | } | 506 | } |
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c index 6dffb498ccd7..a8a6aa70d695 100644 --- a/arch/x86_64/kernel/early_printk.c +++ b/arch/x86_64/kernel/early_printk.c | |||
| @@ -17,11 +17,8 @@ | |||
| 17 | #define VGABASE ((void __iomem *)0xffffffff800b8000UL) | 17 | #define VGABASE ((void __iomem *)0xffffffff800b8000UL) |
| 18 | #endif | 18 | #endif |
| 19 | 19 | ||
| 20 | #define MAX_YPOS max_ypos | ||
| 21 | #define MAX_XPOS max_xpos | ||
| 22 | |||
| 23 | static int max_ypos = 25, max_xpos = 80; | 20 | static int max_ypos = 25, max_xpos = 80; |
| 24 | static int current_ypos = 1, current_xpos = 0; | 21 | static int current_ypos = 25, current_xpos = 0; |
| 25 | 22 | ||
| 26 | static void early_vga_write(struct console *con, const char *str, unsigned n) | 23 | static void early_vga_write(struct console *con, const char *str, unsigned n) |
| 27 | { | 24 | { |
| @@ -29,26 +26,26 @@ static void early_vga_write(struct console *con, const char *str, unsigned n) | |||
| 29 | int i, k, j; | 26 | int i, k, j; |
| 30 | 27 | ||
| 31 | while ((c = *str++) != '\0' && n-- > 0) { | 28 | while ((c = *str++) != '\0' && n-- > 0) { |
| 32 | if (current_ypos >= MAX_YPOS) { | 29 | if (current_ypos >= max_ypos) { |
| 33 | /* scroll 1 line up */ | 30 | /* scroll 1 line up */ |
| 34 | for (k = 1, j = 0; k < MAX_YPOS; k++, j++) { | 31 | for (k = 1, j = 0; k < max_ypos; k++, j++) { |
| 35 | for (i = 0; i < MAX_XPOS; i++) { | 32 | for (i = 0; i < max_xpos; i++) { |
| 36 | writew(readw(VGABASE + 2*(MAX_XPOS*k + i)), | 33 | writew(readw(VGABASE+2*(max_xpos*k+i)), |
| 37 | VGABASE + 2*(MAX_XPOS*j + i)); | 34 | VGABASE + 2*(max_xpos*j + i)); |
| 38 | } | 35 | } |
| 39 | } | 36 | } |
| 40 | for (i = 0; i < MAX_XPOS; i++) | 37 | for (i = 0; i < max_xpos; i++) |
| 41 | writew(0x720, VGABASE + 2*(MAX_XPOS*j + i)); | 38 | writew(0x720, VGABASE + 2*(max_xpos*j + i)); |
| 42 | current_ypos = MAX_YPOS-1; | 39 | current_ypos = max_ypos-1; |
| 43 | } | 40 | } |
| 44 | if (c == '\n') { | 41 | if (c == '\n') { |
| 45 | current_xpos = 0; | 42 | current_xpos = 0; |
| 46 | current_ypos++; | 43 | current_ypos++; |
| 47 | } else if (c != '\r') { | 44 | } else if (c != '\r') { |
| 48 | writew(((0x7 << 8) | (unsigned short) c), | 45 | writew(((0x7 << 8) | (unsigned short) c), |
| 49 | VGABASE + 2*(MAX_XPOS*current_ypos + | 46 | VGABASE + 2*(max_xpos*current_ypos + |
| 50 | current_xpos++)); | 47 | current_xpos++)); |
| 51 | if (current_xpos >= MAX_XPOS) { | 48 | if (current_xpos >= max_xpos) { |
| 52 | current_xpos = 0; | 49 | current_xpos = 0; |
| 53 | current_ypos++; | 50 | current_ypos++; |
| 54 | } | 51 | } |
| @@ -244,6 +241,7 @@ int __init setup_early_printk(char *opt) | |||
| 244 | && SCREEN_INFO.orig_video_isVGA == 1) { | 241 | && SCREEN_INFO.orig_video_isVGA == 1) { |
| 245 | max_xpos = SCREEN_INFO.orig_video_cols; | 242 | max_xpos = SCREEN_INFO.orig_video_cols; |
| 246 | max_ypos = SCREEN_INFO.orig_video_lines; | 243 | max_ypos = SCREEN_INFO.orig_video_lines; |
| 244 | current_ypos = SCREEN_INFO.orig_y; | ||
| 247 | early_console = &early_vga_console; | 245 | early_console = &early_vga_console; |
| 248 | } else if (!strncmp(buf, "simnow", 6)) { | 246 | } else if (!strncmp(buf, "simnow", 6)) { |
| 249 | simnow_init(buf + 6); | 247 | simnow_init(buf + 6); |
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index 30d2a1e545fe..d8bd0b345b1e 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c | |||
| @@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 38 | 38 | ||
| 39 | if (i == 0) { | 39 | if (i == 0) { |
| 40 | seq_printf(p, " "); | 40 | seq_printf(p, " "); |
| 41 | for (j=0; j<NR_CPUS; j++) | 41 | for_each_online_cpu(j) |
| 42 | if (cpu_online(j)) | 42 | seq_printf(p, "CPU%d ",j); |
| 43 | seq_printf(p, "CPU%d ",j); | ||
| 44 | seq_putc(p, '\n'); | 43 | seq_putc(p, '\n'); |
| 45 | } | 44 | } |
| 46 | 45 | ||
| @@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 53 | #ifndef CONFIG_SMP | 52 | #ifndef CONFIG_SMP |
| 54 | seq_printf(p, "%10u ", kstat_irqs(i)); | 53 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 55 | #else | 54 | #else |
| 56 | for (j=0; j<NR_CPUS; j++) | 55 | for_each_online_cpu(j) |
| 57 | if (cpu_online(j)) | 56 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
| 58 | seq_printf(p, "%10u ", | ||
| 59 | kstat_cpu(j).irqs[i]); | ||
| 60 | #endif | 57 | #endif |
| 61 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 58 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
| 62 | 59 | ||
| @@ -68,15 +65,13 @@ skip: | |||
| 68 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 65 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
| 69 | } else if (i == NR_IRQS) { | 66 | } else if (i == NR_IRQS) { |
| 70 | seq_printf(p, "NMI: "); | 67 | seq_printf(p, "NMI: "); |
| 71 | for (j = 0; j < NR_CPUS; j++) | 68 | for_each_online_cpu(j) |
| 72 | if (cpu_online(j)) | 69 | seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); |
| 73 | seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); | ||
| 74 | seq_putc(p, '\n'); | 70 | seq_putc(p, '\n'); |
| 75 | #ifdef CONFIG_X86_LOCAL_APIC | 71 | #ifdef CONFIG_X86_LOCAL_APIC |
| 76 | seq_printf(p, "LOC: "); | 72 | seq_printf(p, "LOC: "); |
| 77 | for (j = 0; j < NR_CPUS; j++) | 73 | for_each_online_cpu(j) |
| 78 | if (cpu_online(j)) | 74 | seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); |
| 79 | seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); | ||
| 80 | seq_putc(p, '\n'); | 75 | seq_putc(p, '\n'); |
| 81 | #endif | 76 | #endif |
| 82 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 77 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 8b866a8572cf..14f0ced613b6 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
| @@ -222,9 +222,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
| 222 | 222 | ||
| 223 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 223 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 224 | { | 224 | { |
| 225 | down(&kprobe_mutex); | 225 | mutex_lock(&kprobe_mutex); |
| 226 | free_insn_slot(p->ainsn.insn); | 226 | free_insn_slot(p->ainsn.insn); |
| 227 | up(&kprobe_mutex); | 227 | mutex_unlock(&kprobe_mutex); |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) | 230 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 5bf17e41cd2d..66c009e10bac 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
| @@ -162,9 +162,7 @@ int __init check_nmi_watchdog (void) | |||
| 162 | local_irq_enable(); | 162 | local_irq_enable(); |
| 163 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 163 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
| 164 | 164 | ||
| 165 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 165 | for_each_online_cpu(cpu) { |
| 166 | if (!cpu_online(cpu)) | ||
| 167 | continue; | ||
| 168 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { | 166 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { |
| 169 | endflag = 1; | 167 | endflag = 1; |
| 170 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", | 168 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", |
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index 5876df116c92..e5f5ce7909a3 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c | |||
| @@ -443,9 +443,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
| 443 | if (!user_mode(regs)) | 443 | if (!user_mode(regs)) |
| 444 | return 1; | 444 | return 1; |
| 445 | 445 | ||
| 446 | if (try_to_freeze()) | ||
| 447 | goto no_signal; | ||
| 448 | |||
| 449 | if (!oldset) | 446 | if (!oldset) |
| 450 | oldset = ¤t->blocked; | 447 | oldset = ¤t->blocked; |
| 451 | 448 | ||
| @@ -463,7 +460,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
| 463 | return handle_signal(signr, &info, &ka, oldset, regs); | 460 | return handle_signal(signr, &info, &ka, oldset, regs); |
| 464 | } | 461 | } |
| 465 | 462 | ||
| 466 | no_signal: | ||
| 467 | /* Did we come from a system call? */ | 463 | /* Did we come from a system call? */ |
| 468 | if ((long)regs->orig_rax >= 0) { | 464 | if ((long)regs->orig_rax >= 0) { |
| 469 | /* Restart the system call - no handlers present */ | 465 | /* Restart the system call - no handlers present */ |
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 4cbf6d91571f..51f9bed455fa 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
| @@ -83,9 +83,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 83 | 83 | ||
| 84 | if (i == 0) { | 84 | if (i == 0) { |
| 85 | seq_printf(p, " "); | 85 | seq_printf(p, " "); |
| 86 | for (j=0; j<NR_CPUS; j++) | 86 | for_each_online_cpu(j) |
| 87 | if (cpu_online(j)) | 87 | seq_printf(p, "CPU%d ",j); |
| 88 | seq_printf(p, "CPU%d ",j); | ||
| 89 | seq_putc(p, '\n'); | 88 | seq_putc(p, '\n'); |
| 90 | } | 89 | } |
| 91 | 90 | ||
| @@ -98,9 +97,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 98 | #ifndef CONFIG_SMP | 97 | #ifndef CONFIG_SMP |
| 99 | seq_printf(p, "%10u ", kstat_irqs(i)); | 98 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 100 | #else | 99 | #else |
| 101 | for (j = 0; j < NR_CPUS; j++) | 100 | for_each_online_cpu(j) |
| 102 | if (cpu_online(j)) | 101 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
| 103 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
| 104 | #endif | 102 | #endif |
| 105 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 103 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
| 106 | seq_printf(p, " %s", action->name); | 104 | seq_printf(p, " %s", action->name); |
| @@ -113,9 +111,8 @@ skip: | |||
| 113 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 111 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
| 114 | } else if (i == NR_IRQS) { | 112 | } else if (i == NR_IRQS) { |
| 115 | seq_printf(p, "NMI: "); | 113 | seq_printf(p, "NMI: "); |
| 116 | for (j = 0; j < NR_CPUS; j++) | 114 | for_each_online_cpu(j) |
| 117 | if (cpu_online(j)) | 115 | seq_printf(p, "%10u ", nmi_count(j)); |
| 118 | seq_printf(p, "%10u ", nmi_count(j)); | ||
| 119 | seq_putc(p, '\n'); | 116 | seq_putc(p, '\n'); |
| 120 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 117 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
| 121 | } | 118 | } |
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c index 94fdfe474ac1..2a580efb58ec 100644 --- a/arch/xtensa/platform-iss/console.c +++ b/arch/xtensa/platform-iss/console.c | |||
| @@ -31,10 +31,6 @@ | |||
| 31 | #include <linux/tty.h> | 31 | #include <linux/tty.h> |
| 32 | #include <linux/tty_flip.h> | 32 | #include <linux/tty_flip.h> |
| 33 | 33 | ||
| 34 | #ifdef SERIAL_INLINE | ||
| 35 | #define _INLINE_ inline | ||
| 36 | #endif | ||
| 37 | |||
| 38 | #define SERIAL_MAX_NUM_LINES 1 | 34 | #define SERIAL_MAX_NUM_LINES 1 |
| 39 | #define SERIAL_TIMER_VALUE (20 * HZ) | 35 | #define SERIAL_TIMER_VALUE (20 * HZ) |
| 40 | 36 | ||
