aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile7
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/hw_irq.h3
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/msr.h16
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c5
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c5
-rw-r--r--arch/x86/kernel/apic/io_apic.c32
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c53
-rw-r--r--arch/x86/kernel/cpu/intel.c1
-rw-r--r--arch/x86/kernel/cpuid.c5
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/msr.c4
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/kernel/uv_irq.c3
-rw-r--r--arch/x86/lib/Makefile4
-rw-r--r--arch/x86/lib/msr-smp.c204
-rw-r--r--arch/x86/lib/msr.c213
-rw-r--r--arch/x86/mm/srat_32.c2
-rw-r--r--arch/x86/mm/srat_64.c4
-rw-r--r--arch/x86/tools/chkobjdump.awk2
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk10
-rw-r--r--include/linux/decompress/mm.h4
-rw-r--r--include/linux/mm.h3
-rw-r--r--init/initramfs.c10
-rw-r--r--lib/decompress_bunzip2.c10
-rw-r--r--mm/page_alloc.c4
31 files changed, 307 insertions, 320 deletions
diff --git a/Makefile b/Makefile
index 0ac5812f3b6d..e6b06cbeb47e 100644
--- a/Makefile
+++ b/Makefile
@@ -16,6 +16,13 @@ NAME = Man-Eating Seals of Antiquity
16# o print "Entering directory ..."; 16# o print "Entering directory ...";
17MAKEFLAGS += -rR --no-print-directory 17MAKEFLAGS += -rR --no-print-directory
18 18
19# Avoid funny character set dependencies
20unexport LC_ALL
21LC_CTYPE=C
22LC_COLLATE=C
23LC_NUMERIC=C
24export LC_CTYPE LC_COLLATE LC_NUMERIC
25
19# We are using a recursive build, so we need to do a little thinking 26# We are using a recursive build, so we need to do a little thinking
20# to get the ordering right. 27# to get the ordering right.
21# 28#
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 613700f27a4a..637e1ec963c3 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -153,6 +153,7 @@
153#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ 153#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
154#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ 154#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
155#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ 155#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
156#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
156 157
157/* 158/*
158 * Auxiliary flags: Linux defined - For features scattered in various 159 * Auxiliary flags: Linux defined - For features scattered in various
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 08c48a81841f..eeac829a0f44 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -103,7 +103,8 @@ extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
103extern void send_cleanup_vector(struct irq_cfg *); 103extern void send_cleanup_vector(struct irq_cfg *);
104 104
105struct irq_desc; 105struct irq_desc;
106extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *); 106extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *,
107 unsigned int *dest_id);
107extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); 108extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
108extern void setup_ioapic_dest(void); 109extern void setup_ioapic_dest(void);
109 110
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4ffe09b2ad75..1cd58cdbc03f 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -12,6 +12,7 @@
12#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ 12#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
13#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ 13#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
14#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ 14#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
15#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
15 16
16/* EFER bits: */ 17/* EFER bits: */
17#define _EFER_SCE 0 /* SYSCALL/SYSRET */ 18#define _EFER_SCE 0 /* SYSCALL/SYSRET */
@@ -123,6 +124,7 @@
123#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 124#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
124#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff 125#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
125#define FAM10H_MMIO_CONF_BASE_SHIFT 20 126#define FAM10H_MMIO_CONF_BASE_SHIFT 20
127#define MSR_FAM10H_NODE_ID 0xc001100c
126 128
127/* K8 MSRs */ 129/* K8 MSRs */
128#define MSR_K8_TOP_MEM1 0xc001001a 130#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 2d228fc9b4b7..c5bc4c2d33f5 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -27,6 +27,18 @@ struct msr {
27 }; 27 };
28}; 28};
29 29
30struct msr_info {
31 u32 msr_no;
32 struct msr reg;
33 struct msr *msrs;
34 int err;
35};
36
37struct msr_regs_info {
38 u32 *regs;
39 int err;
40};
41
30static inline unsigned long long native_read_tscp(unsigned int *aux) 42static inline unsigned long long native_read_tscp(unsigned int *aux)
31{ 43{
32 unsigned long low, high; 44 unsigned long low, high;
@@ -240,9 +252,9 @@ do { \
240#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ 252#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
241 (u32)((val) >> 32)) 253 (u32)((val) >> 32))
242 254
243#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2)) 255#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
244 256
245#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 257#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
246 258
247struct msr *msrs_alloc(void); 259struct msr *msrs_alloc(void);
248void msrs_free(struct msr *msrs); 260void msrs_free(struct msr *msrs);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6f8ec1c37e0a..fc801bab1b3b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -181,7 +181,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
181 unsigned int *ecx, unsigned int *edx) 181 unsigned int *ecx, unsigned int *edx)
182{ 182{
183 /* ecx is often an input as well as an output. */ 183 /* ecx is often an input as well as an output. */
184 asm("cpuid" 184 asm volatile("cpuid"
185 : "=a" (*eax), 185 : "=a" (*eax),
186 "=b" (*ebx), 186 "=b" (*ebx),
187 "=c" (*ecx), 187 "=c" (*ecx),
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index d0c99abc26c3..eacbd2b31d27 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -306,10 +306,7 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
306 if (cpumask_test_cpu(cpu, cpu_online_mask)) 306 if (cpumask_test_cpu(cpu, cpu_online_mask))
307 break; 307 break;
308 } 308 }
309 if (cpu < nr_cpu_ids) 309 return per_cpu(x86_cpu_to_apicid, cpu);
310 return per_cpu(x86_cpu_to_apicid, cpu);
311
312 return BAD_APICID;
313} 310}
314 311
315struct apic apic_physflat = { 312struct apic apic_physflat = {
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 38dcecfa5818..cb804c5091b9 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -131,10 +131,7 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 if (cpumask_test_cpu(cpu, cpu_online_mask)) 131 if (cpumask_test_cpu(cpu, cpu_online_mask))
132 break; 132 break;
133 } 133 }
134 if (cpu < nr_cpu_ids) 134 return bigsmp_cpu_to_logical_apicid(cpu);
135 return bigsmp_cpu_to_logical_apicid(cpu);
136
137 return BAD_APICID;
138} 135}
139 136
140static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) 137static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 11a5851f1f50..de00c4619a55 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2276,26 +2276,28 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2276 2276
2277/* 2277/*
2278 * Either sets desc->affinity to a valid value, and returns 2278 * Either sets desc->affinity to a valid value, and returns
2279 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and 2279 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2280 * leaves desc->affinity untouched. 2280 * leaves desc->affinity untouched.
2281 */ 2281 */
2282unsigned int 2282unsigned int
2283set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) 2283set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask,
2284 unsigned int *dest_id)
2284{ 2285{
2285 struct irq_cfg *cfg; 2286 struct irq_cfg *cfg;
2286 unsigned int irq; 2287 unsigned int irq;
2287 2288
2288 if (!cpumask_intersects(mask, cpu_online_mask)) 2289 if (!cpumask_intersects(mask, cpu_online_mask))
2289 return BAD_APICID; 2290 return -1;
2290 2291
2291 irq = desc->irq; 2292 irq = desc->irq;
2292 cfg = desc->chip_data; 2293 cfg = desc->chip_data;
2293 if (assign_irq_vector(irq, cfg, mask)) 2294 if (assign_irq_vector(irq, cfg, mask))
2294 return BAD_APICID; 2295 return -1;
2295 2296
2296 cpumask_copy(desc->affinity, mask); 2297 cpumask_copy(desc->affinity, mask);
2297 2298
2298 return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); 2299 *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
2300 return 0;
2299} 2301}
2300 2302
2301static int 2303static int
@@ -2311,12 +2313,11 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2311 cfg = desc->chip_data; 2313 cfg = desc->chip_data;
2312 2314
2313 spin_lock_irqsave(&ioapic_lock, flags); 2315 spin_lock_irqsave(&ioapic_lock, flags);
2314 dest = set_desc_affinity(desc, mask); 2316 ret = set_desc_affinity(desc, mask, &dest);
2315 if (dest != BAD_APICID) { 2317 if (!ret) {
2316 /* Only the high 8 bits are valid. */ 2318 /* Only the high 8 bits are valid. */
2317 dest = SET_APIC_LOGICAL_ID(dest); 2319 dest = SET_APIC_LOGICAL_ID(dest);
2318 __target_IO_APIC_irq(irq, dest, cfg); 2320 __target_IO_APIC_irq(irq, dest, cfg);
2319 ret = 0;
2320 } 2321 }
2321 spin_unlock_irqrestore(&ioapic_lock, flags); 2322 spin_unlock_irqrestore(&ioapic_lock, flags);
2322 2323
@@ -3351,8 +3352,7 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3351 struct msi_msg msg; 3352 struct msi_msg msg;
3352 unsigned int dest; 3353 unsigned int dest;
3353 3354
3354 dest = set_desc_affinity(desc, mask); 3355 if (set_desc_affinity(desc, mask, &dest))
3355 if (dest == BAD_APICID)
3356 return -1; 3356 return -1;
3357 3357
3358 cfg = desc->chip_data; 3358 cfg = desc->chip_data;
@@ -3384,8 +3384,7 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3384 if (get_irte(irq, &irte)) 3384 if (get_irte(irq, &irte))
3385 return -1; 3385 return -1;
3386 3386
3387 dest = set_desc_affinity(desc, mask); 3387 if (set_desc_affinity(desc, mask, &dest))
3388 if (dest == BAD_APICID)
3389 return -1; 3388 return -1;
3390 3389
3391 irte.vector = cfg->vector; 3390 irte.vector = cfg->vector;
@@ -3567,8 +3566,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3567 struct msi_msg msg; 3566 struct msi_msg msg;
3568 unsigned int dest; 3567 unsigned int dest;
3569 3568
3570 dest = set_desc_affinity(desc, mask); 3569 if (set_desc_affinity(desc, mask, &dest))
3571 if (dest == BAD_APICID)
3572 return -1; 3570 return -1;
3573 3571
3574 cfg = desc->chip_data; 3572 cfg = desc->chip_data;
@@ -3623,8 +3621,7 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3623 struct msi_msg msg; 3621 struct msi_msg msg;
3624 unsigned int dest; 3622 unsigned int dest;
3625 3623
3626 dest = set_desc_affinity(desc, mask); 3624 if (set_desc_affinity(desc, mask, &dest))
3627 if (dest == BAD_APICID)
3628 return -1; 3625 return -1;
3629 3626
3630 cfg = desc->chip_data; 3627 cfg = desc->chip_data;
@@ -3730,8 +3727,7 @@ static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3730 struct irq_cfg *cfg; 3727 struct irq_cfg *cfg;
3731 unsigned int dest; 3728 unsigned int dest;
3732 3729
3733 dest = set_desc_affinity(desc, mask); 3730 if (set_desc_affinity(desc, mask, &dest))
3734 if (dest == BAD_APICID)
3735 return -1; 3731 return -1;
3736 3732
3737 cfg = desc->chip_data; 3733 cfg = desc->chip_data;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index a5371ec36776..cf69c59f4910 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -148,10 +148,7 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
148 break; 148 break;
149 } 149 }
150 150
151 if (cpu < nr_cpu_ids) 151 return per_cpu(x86_cpu_to_logical_apicid, cpu);
152 return per_cpu(x86_cpu_to_logical_apicid, cpu);
153
154 return BAD_APICID;
155} 152}
156 153
157static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x) 154static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index a8989aadc99a..8972f38c5ced 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -146,10 +146,7 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
146 break; 146 break;
147 } 147 }
148 148
149 if (cpu < nr_cpu_ids) 149 return per_cpu(x86_cpu_to_apicid, cpu);
150 return per_cpu(x86_cpu_to_apicid, cpu);
151
152 return BAD_APICID;
153} 150}
154 151
155static unsigned int x2apic_phys_get_apic_id(unsigned long x) 152static unsigned int x2apic_phys_get_apic_id(unsigned long x)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b684bb303cbf..d56b0efb2057 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -225,10 +225,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
225 if (cpumask_test_cpu(cpu, cpu_online_mask)) 225 if (cpumask_test_cpu(cpu, cpu_online_mask))
226 break; 226 break;
227 } 227 }
228 if (cpu < nr_cpu_ids) 228 return per_cpu(x86_cpu_to_apicid, cpu);
229 return per_cpu(x86_cpu_to_apicid, cpu);
230
231 return BAD_APICID;
232} 229}
233 230
234static unsigned int x2apic_get_apic_id(unsigned long x) 231static unsigned int x2apic_get_apic_id(unsigned long x)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8dc3ea145c97..e485825130d2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -254,59 +254,36 @@ static int __cpuinit nearby_node(int apicid)
254 254
255/* 255/*
256 * Fixup core topology information for AMD multi-node processors. 256 * Fixup core topology information for AMD multi-node processors.
257 * Assumption 1: Number of cores in each internal node is the same. 257 * Assumption: Number of cores in each internal node is the same.
258 * Assumption 2: Mixed systems with both single-node and dual-node
259 * processors are not supported.
260 */ 258 */
261#ifdef CONFIG_X86_HT 259#ifdef CONFIG_X86_HT
262static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c) 260static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
263{ 261{
264#ifdef CONFIG_PCI 262 unsigned long long value;
265 u32 t, cpn; 263 u32 nodes, cores_per_node;
266 u8 n, n_id;
267 int cpu = smp_processor_id(); 264 int cpu = smp_processor_id();
268 265
266 if (!cpu_has(c, X86_FEATURE_NODEID_MSR))
267 return;
268
269 /* fixup topology information only once for a core */ 269 /* fixup topology information only once for a core */
270 if (cpu_has(c, X86_FEATURE_AMD_DCM)) 270 if (cpu_has(c, X86_FEATURE_AMD_DCM))
271 return; 271 return;
272 272
273 /* check for multi-node processor on boot cpu */ 273 rdmsrl(MSR_FAM10H_NODE_ID, value);
274 t = read_pci_config(0, 24, 3, 0xe8); 274
275 if (!(t & (1 << 29))) 275 nodes = ((value >> 3) & 7) + 1;
276 if (nodes == 1)
276 return; 277 return;
277 278
278 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 279 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
280 cores_per_node = c->x86_max_cores / nodes;
279 281
280 /* cores per node: each internal node has half the number of cores */ 282 /* store NodeID, use llc_shared_map to store sibling info */
281 cpn = c->x86_max_cores >> 1; 283 per_cpu(cpu_llc_id, cpu) = value & 7;
282 284
283 /* even-numbered NB_id of this dual-node processor */ 285 /* fixup core id to be in range from 0 to (cores_per_node - 1) */
284 n = c->phys_proc_id << 1; 286 c->cpu_core_id = c->cpu_core_id % cores_per_node;
285
286 /*
287 * determine internal node id and assign cores fifty-fifty to
288 * each node of the dual-node processor
289 */
290 t = read_pci_config(0, 24 + n, 3, 0xe8);
291 n = (t>>30) & 0x3;
292 if (n == 0) {
293 if (c->cpu_core_id < cpn)
294 n_id = 0;
295 else
296 n_id = 1;
297 } else {
298 if (c->cpu_core_id < cpn)
299 n_id = 1;
300 else
301 n_id = 0;
302 }
303
304 /* compute entire NodeID, use llc_shared_map to store sibling info */
305 per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
306
307 /* fixup core id to be in range from 0 to cpn */
308 c->cpu_core_id = c->cpu_core_id % cpn;
309#endif
310} 287}
311#endif 288#endif
312 289
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9c31e8b09d2c..879666f4d871 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -70,7 +70,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
70 if (c->x86_power & (1 << 8)) { 70 if (c->x86_power & (1 << 8)) {
71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
73 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
74 sched_clock_stable = 1; 73 sched_clock_stable = 1;
75 } 74 }
76 75
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 7ef24a796992..cb27fd6136c9 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -187,7 +187,8 @@ static int __init cpuid_init(void)
187 int i, err = 0; 187 int i, err = 0;
188 i = 0; 188 i = 0;
189 189
190 if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) { 190 if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
191 "cpu/cpuid", &cpuid_fops)) {
191 printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", 192 printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
192 CPUID_MAJOR); 193 CPUID_MAJOR);
193 err = -EBUSY; 194 err = -EBUSY;
@@ -216,7 +217,7 @@ out_class:
216 } 217 }
217 class_destroy(cpuid_class); 218 class_destroy(cpuid_class);
218out_chrdev: 219out_chrdev:
219 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); 220 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
220out: 221out:
221 return err; 222 return err;
222} 223}
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index f50447d961c0..05ed7ab2ca48 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -724,7 +724,7 @@ core_initcall(e820_mark_nvs_memory);
724/* 724/*
725 * Early reserved memory areas. 725 * Early reserved memory areas.
726 */ 726 */
727#define MAX_EARLY_RES 20 727#define MAX_EARLY_RES 32
728 728
729struct early_res { 729struct early_res {
730 u64 start, end; 730 u64 start, end;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 572b07eee3f4..4bd93c9b2b27 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -246,7 +246,7 @@ static int __init msr_init(void)
246 int i, err = 0; 246 int i, err = 0;
247 i = 0; 247 i = 0;
248 248
249 if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) { 249 if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
250 printk(KERN_ERR "msr: unable to get major %d for msr\n", 250 printk(KERN_ERR "msr: unable to get major %d for msr\n",
251 MSR_MAJOR); 251 MSR_MAJOR);
252 err = -EBUSY; 252 err = -EBUSY;
@@ -274,7 +274,7 @@ out_class:
274 msr_device_destroy(i); 274 msr_device_destroy(i);
275 class_destroy(msr_class); 275 class_destroy(msr_class);
276out_chrdev: 276out_chrdev:
277 unregister_chrdev(MSR_MAJOR, "cpu/msr"); 277 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
278out: 278out:
279 return err; 279 return err;
280} 280}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index cd982f48e23e..597683aa5ba0 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -763,6 +763,7 @@ void mark_tsc_unstable(char *reason)
763{ 763{
764 if (!tsc_unstable) { 764 if (!tsc_unstable) {
765 tsc_unstable = 1; 765 tsc_unstable = 1;
766 sched_clock_stable = 0;
766 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); 767 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
767 /* Change only the rating, when not registered */ 768 /* Change only the rating, when not registered */
768 if (clocksource_tsc.mult) 769 if (clocksource_tsc.mult)
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 61d805df4c91..ece73d8e3240 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -215,8 +215,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
215 unsigned long mmr_offset; 215 unsigned long mmr_offset;
216 unsigned mmr_pnode; 216 unsigned mmr_pnode;
217 217
218 dest = set_desc_affinity(desc, mask); 218 if (set_desc_affinity(desc, mask, &dest))
219 if (dest == BAD_APICID)
220 return -1; 219 return -1;
221 220
222 mmr_value = 0; 221 mmr_value = 0;
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 45b20e486c2f..cffd754f3039 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
14 14
15clean-files := inat-tables.c 15clean-files := inat-tables.c
16 16
17obj-$(CONFIG_SMP) := msr.o 17obj-$(CONFIG_SMP) += msr-smp.o
18 18
19lib-y := delay.o 19lib-y := delay.o
20lib-y += thunk_$(BITS).o 20lib-y += thunk_$(BITS).o
@@ -22,7 +22,7 @@ lib-y += usercopy_$(BITS).o getuser.o putuser.o
22lib-y += memcpy_$(BITS).o 22lib-y += memcpy_$(BITS).o
23lib-$(CONFIG_KPROBES) += insn.o inat.o 23lib-$(CONFIG_KPROBES) += insn.o inat.o
24 24
25obj-y += msr-reg.o msr-reg-export.o 25obj-y += msr.o msr-reg.o msr-reg-export.o
26 26
27ifeq ($(CONFIG_X86_32),y) 27ifeq ($(CONFIG_X86_32),y)
28 obj-y += atomic64_32.o 28 obj-y += atomic64_32.o
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
new file mode 100644
index 000000000000..a6b1b86d2253
--- /dev/null
+++ b/arch/x86/lib/msr-smp.c
@@ -0,0 +1,204 @@
1#include <linux/module.h>
2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h>
5
6static void __rdmsr_on_cpu(void *info)
7{
8 struct msr_info *rv = info;
9 struct msr *reg;
10 int this_cpu = raw_smp_processor_id();
11
12 if (rv->msrs)
13 reg = per_cpu_ptr(rv->msrs, this_cpu);
14 else
15 reg = &rv->reg;
16
17 rdmsr(rv->msr_no, reg->l, reg->h);
18}
19
20static void __wrmsr_on_cpu(void *info)
21{
22 struct msr_info *rv = info;
23 struct msr *reg;
24 int this_cpu = raw_smp_processor_id();
25
26 if (rv->msrs)
27 reg = per_cpu_ptr(rv->msrs, this_cpu);
28 else
29 reg = &rv->reg;
30
31 wrmsr(rv->msr_no, reg->l, reg->h);
32}
33
34int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
35{
36 int err;
37 struct msr_info rv;
38
39 memset(&rv, 0, sizeof(rv));
40
41 rv.msr_no = msr_no;
42 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
43 *l = rv.reg.l;
44 *h = rv.reg.h;
45
46 return err;
47}
48EXPORT_SYMBOL(rdmsr_on_cpu);
49
50int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
51{
52 int err;
53 struct msr_info rv;
54
55 memset(&rv, 0, sizeof(rv));
56
57 rv.msr_no = msr_no;
58 rv.reg.l = l;
59 rv.reg.h = h;
60 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
61
62 return err;
63}
64EXPORT_SYMBOL(wrmsr_on_cpu);
65
66static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
67 struct msr *msrs,
68 void (*msr_func) (void *info))
69{
70 struct msr_info rv;
71 int this_cpu;
72
73 memset(&rv, 0, sizeof(rv));
74
75 rv.msrs = msrs;
76 rv.msr_no = msr_no;
77
78 this_cpu = get_cpu();
79
80 if (cpumask_test_cpu(this_cpu, mask))
81 msr_func(&rv);
82
83 smp_call_function_many(mask, msr_func, &rv, 1);
84 put_cpu();
85}
86
87/* rdmsr on a bunch of CPUs
88 *
89 * @mask: which CPUs
90 * @msr_no: which MSR
91 * @msrs: array of MSR values
92 *
93 */
94void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
95{
96 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
97}
98EXPORT_SYMBOL(rdmsr_on_cpus);
99
100/*
101 * wrmsr on a bunch of CPUs
102 *
103 * @mask: which CPUs
104 * @msr_no: which MSR
105 * @msrs: array of MSR values
106 *
107 */
108void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
109{
110 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
111}
112EXPORT_SYMBOL(wrmsr_on_cpus);
113
114/* These "safe" variants are slower and should be used when the target MSR
115 may not actually exist. */
116static void __rdmsr_safe_on_cpu(void *info)
117{
118 struct msr_info *rv = info;
119
120 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
121}
122
123static void __wrmsr_safe_on_cpu(void *info)
124{
125 struct msr_info *rv = info;
126
127 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
128}
129
130int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
131{
132 int err;
133 struct msr_info rv;
134
135 memset(&rv, 0, sizeof(rv));
136
137 rv.msr_no = msr_no;
138 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
139 *l = rv.reg.l;
140 *h = rv.reg.h;
141
142 return err ? err : rv.err;
143}
144EXPORT_SYMBOL(rdmsr_safe_on_cpu);
145
146int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
147{
148 int err;
149 struct msr_info rv;
150
151 memset(&rv, 0, sizeof(rv));
152
153 rv.msr_no = msr_no;
154 rv.reg.l = l;
155 rv.reg.h = h;
156 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
157
158 return err ? err : rv.err;
159}
160EXPORT_SYMBOL(wrmsr_safe_on_cpu);
161
162/*
163 * These variants are significantly slower, but allows control over
164 * the entire 32-bit GPR set.
165 */
166static void __rdmsr_safe_regs_on_cpu(void *info)
167{
168 struct msr_regs_info *rv = info;
169
170 rv->err = rdmsr_safe_regs(rv->regs);
171}
172
173static void __wrmsr_safe_regs_on_cpu(void *info)
174{
175 struct msr_regs_info *rv = info;
176
177 rv->err = wrmsr_safe_regs(rv->regs);
178}
179
180int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
181{
182 int err;
183 struct msr_regs_info rv;
184
185 rv.regs = regs;
186 rv.err = -EIO;
187 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
188
189 return err ? err : rv.err;
190}
191EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
192
193int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
194{
195 int err;
196 struct msr_regs_info rv;
197
198 rv.regs = regs;
199 rv.err = -EIO;
200 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
201
202 return err ? err : rv.err;
203}
204EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 872834177937..8f8eebdca7d4 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -1,123 +1,7 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/preempt.h> 2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h> 3#include <asm/msr.h>
5 4
6struct msr_info {
7 u32 msr_no;
8 struct msr reg;
9 struct msr *msrs;
10 int err;
11};
12
13static void __rdmsr_on_cpu(void *info)
14{
15 struct msr_info *rv = info;
16 struct msr *reg;
17 int this_cpu = raw_smp_processor_id();
18
19 if (rv->msrs)
20 reg = per_cpu_ptr(rv->msrs, this_cpu);
21 else
22 reg = &rv->reg;
23
24 rdmsr(rv->msr_no, reg->l, reg->h);
25}
26
27static void __wrmsr_on_cpu(void *info)
28{
29 struct msr_info *rv = info;
30 struct msr *reg;
31 int this_cpu = raw_smp_processor_id();
32
33 if (rv->msrs)
34 reg = per_cpu_ptr(rv->msrs, this_cpu);
35 else
36 reg = &rv->reg;
37
38 wrmsr(rv->msr_no, reg->l, reg->h);
39}
40
41int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
42{
43 int err;
44 struct msr_info rv;
45
46 memset(&rv, 0, sizeof(rv));
47
48 rv.msr_no = msr_no;
49 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
50 *l = rv.reg.l;
51 *h = rv.reg.h;
52
53 return err;
54}
55EXPORT_SYMBOL(rdmsr_on_cpu);
56
57int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
58{
59 int err;
60 struct msr_info rv;
61
62 memset(&rv, 0, sizeof(rv));
63
64 rv.msr_no = msr_no;
65 rv.reg.l = l;
66 rv.reg.h = h;
67 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
68
69 return err;
70}
71EXPORT_SYMBOL(wrmsr_on_cpu);
72
73static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
74 struct msr *msrs,
75 void (*msr_func) (void *info))
76{
77 struct msr_info rv;
78 int this_cpu;
79
80 memset(&rv, 0, sizeof(rv));
81
82 rv.msrs = msrs;
83 rv.msr_no = msr_no;
84
85 this_cpu = get_cpu();
86
87 if (cpumask_test_cpu(this_cpu, mask))
88 msr_func(&rv);
89
90 smp_call_function_many(mask, msr_func, &rv, 1);
91 put_cpu();
92}
93
94/* rdmsr on a bunch of CPUs
95 *
96 * @mask: which CPUs
97 * @msr_no: which MSR
98 * @msrs: array of MSR values
99 *
100 */
101void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
102{
103 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
104}
105EXPORT_SYMBOL(rdmsr_on_cpus);
106
107/*
108 * wrmsr on a bunch of CPUs
109 *
110 * @mask: which CPUs
111 * @msr_no: which MSR
112 * @msrs: array of MSR values
113 *
114 */
115void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
116{
117 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
118}
119EXPORT_SYMBOL(wrmsr_on_cpus);
120
121struct msr *msrs_alloc(void) 5struct msr *msrs_alloc(void)
122{ 6{
123 struct msr *msrs = NULL; 7 struct msr *msrs = NULL;
@@ -137,100 +21,3 @@ void msrs_free(struct msr *msrs)
137 free_percpu(msrs); 21 free_percpu(msrs);
138} 22}
139EXPORT_SYMBOL(msrs_free); 23EXPORT_SYMBOL(msrs_free);
140
141/* These "safe" variants are slower and should be used when the target MSR
142 may not actually exist. */
143static void __rdmsr_safe_on_cpu(void *info)
144{
145 struct msr_info *rv = info;
146
147 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
148}
149
150static void __wrmsr_safe_on_cpu(void *info)
151{
152 struct msr_info *rv = info;
153
154 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
155}
156
157int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
158{
159 int err;
160 struct msr_info rv;
161
162 memset(&rv, 0, sizeof(rv));
163
164 rv.msr_no = msr_no;
165 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
166 *l = rv.reg.l;
167 *h = rv.reg.h;
168
169 return err ? err : rv.err;
170}
171EXPORT_SYMBOL(rdmsr_safe_on_cpu);
172
173int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
174{
175 int err;
176 struct msr_info rv;
177
178 memset(&rv, 0, sizeof(rv));
179
180 rv.msr_no = msr_no;
181 rv.reg.l = l;
182 rv.reg.h = h;
183 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
184
185 return err ? err : rv.err;
186}
187EXPORT_SYMBOL(wrmsr_safe_on_cpu);
188
189/*
190 * These variants are significantly slower, but allows control over
191 * the entire 32-bit GPR set.
192 */
193struct msr_regs_info {
194 u32 *regs;
195 int err;
196};
197
198static void __rdmsr_safe_regs_on_cpu(void *info)
199{
200 struct msr_regs_info *rv = info;
201
202 rv->err = rdmsr_safe_regs(rv->regs);
203}
204
205static void __wrmsr_safe_regs_on_cpu(void *info)
206{
207 struct msr_regs_info *rv = info;
208
209 rv->err = wrmsr_safe_regs(rv->regs);
210}
211
212int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
213{
214 int err;
215 struct msr_regs_info rv;
216
217 rv.regs = regs;
218 rv.err = -EIO;
219 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
220
221 return err ? err : rv.err;
222}
223EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
224
225int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
226{
227 int err;
228 struct msr_regs_info rv;
229
230 rv.regs = regs;
231 rv.err = -EIO;
232 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
233
234 return err ? err : rv.err;
235}
236EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 6f8aa33031c7..9324f13492d5 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -267,6 +267,8 @@ int __init get_memcfg_from_srat(void)
267 e820_register_active_regions(chunk->nid, chunk->start_pfn, 267 e820_register_active_regions(chunk->nid, chunk->start_pfn,
268 min(chunk->end_pfn, max_pfn)); 268 min(chunk->end_pfn, max_pfn));
269 } 269 }
270 /* for out of order entries in SRAT */
271 sort_node_map();
270 272
271 for_each_online_node(nid) { 273 for_each_online_node(nid) {
272 unsigned long start = node_start_pfn[nid]; 274 unsigned long start = node_start_pfn[nid];
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index d89075489664..a27124185fc1 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -317,7 +317,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
317 unsigned long s = nodes[i].start >> PAGE_SHIFT; 317 unsigned long s = nodes[i].start >> PAGE_SHIFT;
318 unsigned long e = nodes[i].end >> PAGE_SHIFT; 318 unsigned long e = nodes[i].end >> PAGE_SHIFT;
319 pxmram += e - s; 319 pxmram += e - s;
320 pxmram -= absent_pages_in_range(s, e); 320 pxmram -= __absent_pages_in_range(i, s, e);
321 if ((long)pxmram < 0) 321 if ((long)pxmram < 0)
322 pxmram = 0; 322 pxmram = 0;
323 } 323 }
@@ -373,6 +373,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
373 for_each_node_mask(i, nodes_parsed) 373 for_each_node_mask(i, nodes_parsed)
374 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 374 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
375 nodes[i].end >> PAGE_SHIFT); 375 nodes[i].end >> PAGE_SHIFT);
376 /* for out of order entries in SRAT */
377 sort_node_map();
376 if (!nodes_cover_memory(nodes)) { 378 if (!nodes_cover_memory(nodes)) {
377 bad_srat(); 379 bad_srat();
378 return -1; 380 return -1;
diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk
index 0d13cd9fdcff..5bbb5a33f220 100644
--- a/arch/x86/tools/chkobjdump.awk
+++ b/arch/x86/tools/chkobjdump.awk
@@ -9,7 +9,7 @@ BEGIN {
9} 9}
10 10
11/^GNU/ { 11/^GNU/ {
12 split($4, ver, "."); 12 split($3, ver, ".");
13 if (ver[1] > od_ver || 13 if (ver[1] > od_ver ||
14 (ver[1] == od_ver && ver[2] >= od_sver)) { 14 (ver[1] == od_ver && ver[2] >= od_sver)) {
15 exit 1; 15 exit 1;
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 7a6850683c34..eaf11f52fc0b 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -6,8 +6,6 @@
6 6
7# Awk implementation sanity check 7# Awk implementation sanity check
8function check_awk_implement() { 8function check_awk_implement() {
9 if (!match("abc", "[[:lower:]]+"))
10 return "Your awk doesn't support charactor-class."
11 if (sprintf("%x", 0) != "0") 9 if (sprintf("%x", 0) != "0")
12 return "Your awk has a printf-format problem." 10 return "Your awk has a printf-format problem."
13 return "" 11 return ""
@@ -44,12 +42,12 @@ BEGIN {
44 delete gtable 42 delete gtable
45 delete atable 43 delete atable
46 44
47 opnd_expr = "^[[:alpha:]/]" 45 opnd_expr = "^[A-Za-z/]"
48 ext_expr = "^\\(" 46 ext_expr = "^\\("
49 sep_expr = "^\\|$" 47 sep_expr = "^\\|$"
50 group_expr = "^Grp[[:alnum:]]+" 48 group_expr = "^Grp[0-9A-Za-z]+"
51 49
52 imm_expr = "^[IJAO][[:lower:]]" 50 imm_expr = "^[IJAO][a-z]"
53 imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" 51 imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
54 imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" 52 imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
55 imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" 53 imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
@@ -62,7 +60,7 @@ BEGIN {
62 imm_flag["Ob"] = "INAT_MOFFSET" 60 imm_flag["Ob"] = "INAT_MOFFSET"
63 imm_flag["Ov"] = "INAT_MOFFSET" 61 imm_flag["Ov"] = "INAT_MOFFSET"
64 62
65 modrm_expr = "^([CDEGMNPQRSUVW/][[:lower:]]+|NTA|T[012])" 63 modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
66 force64_expr = "\\([df]64\\)" 64 force64_expr = "\\([df]64\\)"
67 rex_expr = "^REX(\\.[XRWB]+)*" 65 rex_expr = "^REX(\\.[XRWB]+)*"
68 fpu_expr = "^ESC" # TODO 66 fpu_expr = "^ESC" # TODO
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 12ff8c3f1d05..5032b9a31ae7 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,7 +25,7 @@ static void *malloc(int size)
25 void *p; 25 void *p;
26 26
27 if (size < 0) 27 if (size < 0)
28 error("Malloc error"); 28 return NULL;
29 if (!malloc_ptr) 29 if (!malloc_ptr)
30 malloc_ptr = free_mem_ptr; 30 malloc_ptr = free_mem_ptr;
31 31
@@ -35,7 +35,7 @@ static void *malloc(int size)
35 malloc_ptr += size; 35 malloc_ptr += size;
36 36
37 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) 37 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
38 error("Out of memory"); 38 return NULL;
39 39
40 malloc_count++; 40 malloc_count++;
41 return p; 41 return p;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 849b4a61bd8f..2265f28eb47a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1037,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1037extern void remove_active_range(unsigned int nid, unsigned long start_pfn, 1037extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1038 unsigned long end_pfn); 1038 unsigned long end_pfn);
1039extern void remove_all_active_ranges(void); 1039extern void remove_all_active_ranges(void);
1040void sort_node_map(void);
1041unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1042 unsigned long end_pfn);
1040extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1043extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1041 unsigned long end_pfn); 1044 unsigned long end_pfn);
1042extern void get_pfn_range_for_nid(unsigned int nid, 1045extern void get_pfn_range_for_nid(unsigned int nid,
diff --git a/init/initramfs.c b/init/initramfs.c
index 4c00edc59689..b37d34beb90b 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -413,7 +413,7 @@ static unsigned my_inptr; /* index of next byte to be processed in inbuf */
413 413
414static char * __init unpack_to_rootfs(char *buf, unsigned len) 414static char * __init unpack_to_rootfs(char *buf, unsigned len)
415{ 415{
416 int written; 416 int written, res;
417 decompress_fn decompress; 417 decompress_fn decompress;
418 const char *compress_name; 418 const char *compress_name;
419 static __initdata char msg_buf[64]; 419 static __initdata char msg_buf[64];
@@ -445,10 +445,12 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
445 } 445 }
446 this_header = 0; 446 this_header = 0;
447 decompress = decompress_method(buf, len, &compress_name); 447 decompress = decompress_method(buf, len, &compress_name);
448 if (decompress) 448 if (decompress) {
449 decompress(buf, len, NULL, flush_buffer, NULL, 449 res = decompress(buf, len, NULL, flush_buffer, NULL,
450 &my_inptr, error); 450 &my_inptr, error);
451 else if (compress_name) { 451 if (res)
452 error("decompressor failed");
453 } else if (compress_name) {
452 if (!message) { 454 if (!message) {
453 snprintf(msg_buf, sizeof msg_buf, 455 snprintf(msg_buf, sizeof msg_buf,
454 "compression method %s not configured", 456 "compression method %s not configured",
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 76074209f9a2..a4e971dee102 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -637,6 +637,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
637 637
638 /* Allocate bunzip_data. Most fields initialize to zero. */ 638 /* Allocate bunzip_data. Most fields initialize to zero. */
639 bd = *bdp = malloc(i); 639 bd = *bdp = malloc(i);
640 if (!bd)
641 return RETVAL_OUT_OF_MEMORY;
640 memset(bd, 0, sizeof(struct bunzip_data)); 642 memset(bd, 0, sizeof(struct bunzip_data));
641 /* Setup input buffer */ 643 /* Setup input buffer */
642 bd->inbuf = inbuf; 644 bd->inbuf = inbuf;
@@ -664,6 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
664 bd->dbufSize = 100000*(i-BZh0); 666 bd->dbufSize = 100000*(i-BZh0);
665 667
666 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); 668 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
669 if (!bd->dbuf)
670 return RETVAL_OUT_OF_MEMORY;
667 return RETVAL_OK; 671 return RETVAL_OK;
668} 672}
669 673
@@ -686,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
686 690
687 if (!outbuf) { 691 if (!outbuf) {
688 error("Could not allocate output bufer"); 692 error("Could not allocate output bufer");
689 return -1; 693 return RETVAL_OUT_OF_MEMORY;
690 } 694 }
691 if (buf) 695 if (buf)
692 inbuf = buf; 696 inbuf = buf;
@@ -694,6 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
694 inbuf = malloc(BZIP2_IOBUF_SIZE); 698 inbuf = malloc(BZIP2_IOBUF_SIZE);
695 if (!inbuf) { 699 if (!inbuf) {
696 error("Could not allocate input bufer"); 700 error("Could not allocate input bufer");
701 i = RETVAL_OUT_OF_MEMORY;
697 goto exit_0; 702 goto exit_0;
698 } 703 }
699 i = start_bunzip(&bd, inbuf, len, fill); 704 i = start_bunzip(&bd, inbuf, len, fill);
@@ -720,11 +725,14 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
720 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { 725 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
721 error("Compressed file ends unexpectedly"); 726 error("Compressed file ends unexpectedly");
722 } 727 }
728 if (!bd)
729 goto exit_1;
723 if (bd->dbuf) 730 if (bd->dbuf)
724 large_free(bd->dbuf); 731 large_free(bd->dbuf);
725 if (pos) 732 if (pos)
726 *pos = bd->inbufPos; 733 *pos = bd->inbufPos;
727 free(bd); 734 free(bd);
735exit_1:
728 if (!buf) 736 if (!buf)
729 free(inbuf); 737 free(inbuf);
730exit_0: 738exit_0:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 74af449b1f1d..4e869657cb51 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3579,7 +3579,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3579 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3579 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3580 * then all holes in the requested range will be accounted for. 3580 * then all holes in the requested range will be accounted for.
3581 */ 3581 */
3582static unsigned long __meminit __absent_pages_in_range(int nid, 3582unsigned long __meminit __absent_pages_in_range(int nid,
3583 unsigned long range_start_pfn, 3583 unsigned long range_start_pfn,
3584 unsigned long range_end_pfn) 3584 unsigned long range_end_pfn)
3585{ 3585{
@@ -4108,7 +4108,7 @@ static int __init cmp_node_active_region(const void *a, const void *b)
4108} 4108}
4109 4109
4110/* sort the node_map by start_pfn */ 4110/* sort the node_map by start_pfn */
4111static void __init sort_node_map(void) 4111void __init sort_node_map(void)
4112{ 4112{
4113 sort(early_node_map, (size_t)nr_nodemap_entries, 4113 sort(early_node_map, (size_t)nr_nodemap_entries,
4114 sizeof(struct node_active_region), 4114 sizeof(struct node_active_region),