aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt7
-rw-r--r--Documentation/sysctl/kernel.txt31
-rw-r--r--arch/x86/include/asm/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/msr.h3
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/kernel/aperture_64.c11
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c15
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c13
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c20
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/pci-dma.c5
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/smpboot.c45
-rw-r--r--arch/x86/kernel/trampoline.c20
-rw-r--r--arch/x86/lib/msr.c26
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--drivers/char/nvram.c14
-rw-r--r--drivers/edac/amd64_edac.c46
-rw-r--r--mm/migrate.c2
27 files changed, 192 insertions, 119 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 2a4d77946c7d..eb2c138c277c 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -291,13 +291,6 @@ Who: Michael Buesch <mb@bu3sch.de>
291 291
292--------------------------- 292---------------------------
293 293
294What: usedac i386 kernel parameter
295When: 2.6.27
296Why: replaced by allowdac and no dac combination
297Who: Glauber Costa <gcosta@redhat.com>
298
299---------------------------
300
301What: print_fn_descriptor_symbol() 294What: print_fn_descriptor_symbol()
302When: October 2009 295When: October 2009
303Why: The %pF vsprintf format provides the same functionality in a 296Why: The %pF vsprintf format provides the same functionality in a
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 8f7a0e73ef44..3894eaa23486 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -19,6 +19,8 @@ Currently, these files might (depending on your configuration)
19show up in /proc/sys/kernel: 19show up in /proc/sys/kernel:
20- acpi_video_flags 20- acpi_video_flags
21- acct 21- acct
22- bootloader_type [ X86 only ]
23- bootloader_version [ X86 only ]
22- callhome [ S390 only ] 24- callhome [ S390 only ]
23- auto_msgmni 25- auto_msgmni
24- core_pattern 26- core_pattern
@@ -93,6 +95,35 @@ valid for 30 seconds.
93 95
94============================================================== 96==============================================================
95 97
98bootloader_type:
99
100x86 bootloader identification
101
102This gives the bootloader type number as indicated by the bootloader,
103shifted left by 4, and OR'd with the low four bits of the bootloader
104version. The reason for this encoding is that this used to match the
105type_of_loader field in the kernel header; the encoding is kept for
106backwards compatibility. That is, if the full bootloader type number
107is 0x15 and the full version number is 0x234, this file will contain
108the value 340 = 0x154.
109
110See the type_of_loader and ext_loader_type fields in
111Documentation/x86/boot.txt for additional information.
112
113==============================================================
114
115bootloader_version:
116
117x86 bootloader version
118
119The complete bootloader version number. In the example above, this
120file will contain the value 564 = 0x234.
121
122See the type_of_loader and ext_loader_ver fields in
123Documentation/x86/boot.txt for additional information.
124
125==============================================================
126
96callhome: 127callhome:
97 128
98Controls the kernel's callhome behavior in case of a kernel panic. 129Controls the kernel's callhome behavior in case of a kernel panic.
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6a635bd39867..4611f085cd43 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -113,7 +113,7 @@
113 */ 113 */
114#define LOCAL_PENDING_VECTOR 0xec 114#define LOCAL_PENDING_VECTOR 0xec
115 115
116#define UV_BAU_MESSAGE 0xec 116#define UV_BAU_MESSAGE 0xea
117 117
118/* 118/*
119 * Self IPI vector for machine checks 119 * Self IPI vector for machine checks
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 5bef931f8b14..2d228fc9b4b7 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -244,6 +244,9 @@ do { \
244 244
245#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 245#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
246 246
247struct msr *msrs_alloc(void);
248void msrs_free(struct msr *msrs);
249
247#ifdef CONFIG_SMP 250#ifdef CONFIG_SMP
248int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 251int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
249int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 252int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 90f06c25221d..cb507bb05d79 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -16,7 +16,6 @@ extern unsigned long initial_code;
16extern unsigned long initial_gs; 16extern unsigned long initial_gs;
17 17
18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
19#define TRAMPOLINE_BASE 0x6000
20 19
21extern unsigned long setup_trampoline(void); 20extern unsigned long setup_trampoline(void);
22extern void __init reserve_trampoline_memory(void); 21extern void __init reserve_trampoline_memory(void);
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index e0dfb6856aa2..3704997e8b25 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -280,7 +280,8 @@ void __init early_gart_iommu_check(void)
280 * or BIOS forget to put that in reserved. 280 * or BIOS forget to put that in reserved.
281 * try to update e820 to make that region as reserved. 281 * try to update e820 to make that region as reserved.
282 */ 282 */
283 int i, fix, slot; 283 u32 agp_aper_base = 0, agp_aper_order = 0;
284 int i, fix, slot, valid_agp = 0;
284 u32 ctl; 285 u32 ctl;
285 u32 aper_size = 0, aper_order = 0, last_aper_order = 0; 286 u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
286 u64 aper_base = 0, last_aper_base = 0; 287 u64 aper_base = 0, last_aper_base = 0;
@@ -290,6 +291,8 @@ void __init early_gart_iommu_check(void)
290 return; 291 return;
291 292
292 /* This is mostly duplicate of iommu_hole_init */ 293 /* This is mostly duplicate of iommu_hole_init */
294 agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp);
295
293 fix = 0; 296 fix = 0;
294 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 297 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
295 int bus; 298 int bus;
@@ -342,10 +345,10 @@ void __init early_gart_iommu_check(void)
342 } 345 }
343 } 346 }
344 347
345 if (!fix) 348 if (valid_agp)
346 return; 349 return;
347 350
348 /* different nodes have different setting, disable them all at first*/ 351 /* disable them all at first */
349 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 352 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
350 int bus; 353 int bus;
351 int dev_base, dev_limit; 354 int dev_base, dev_limit;
@@ -458,8 +461,6 @@ out:
458 461
459 if (aper_alloc) { 462 if (aper_alloc) {
460 /* Got the aperture from the AGP bridge */ 463 /* Got the aperture from the AGP bridge */
461 } else if (!valid_agp) {
462 /* Do nothing */
463 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || 464 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
464 force_iommu || 465 force_iommu ||
465 valid_agp || 466 valid_agp ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index efb2b9cd132c..aa57c079c98f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1341,7 +1341,7 @@ void enable_x2apic(void)
1341 1341
1342 rdmsr(MSR_IA32_APICBASE, msr, msr2); 1342 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1343 if (!(msr & X2APIC_ENABLE)) { 1343 if (!(msr & X2APIC_ENABLE)) {
1344 pr_info("Enabling x2apic\n"); 1344 printk_once(KERN_INFO "Enabling x2apic\n");
1345 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); 1345 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
1346 } 1346 }
1347} 1347}
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index c965e5212714..468489b57aae 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -74,6 +74,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
74 unsigned int eax, ebx, ecx, edx, sub_index; 74 unsigned int eax, ebx, ecx, edx, sub_index;
75 unsigned int ht_mask_width, core_plus_mask_width; 75 unsigned int ht_mask_width, core_plus_mask_width;
76 unsigned int core_select_mask, core_level_siblings; 76 unsigned int core_select_mask, core_level_siblings;
77 static bool printed;
77 78
78 if (c->cpuid_level < 0xb) 79 if (c->cpuid_level < 0xb)
79 return; 80 return;
@@ -127,12 +128,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
127 128
128 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 129 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
129 130
130 131 if (!printed) {
131 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 132 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
132 c->phys_proc_id); 133 c->phys_proc_id);
133 if (c->x86_max_cores > 1) 134 if (c->x86_max_cores > 1)
134 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 135 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
135 c->cpu_core_id); 136 c->cpu_core_id);
137 printed = 1;
138 }
136 return; 139 return;
137#endif 140#endif
138} 141}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7128b3799cec..8dc3ea145c97 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -375,8 +375,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
375 node = nearby_node(apicid); 375 node = nearby_node(apicid);
376 } 376 }
377 numa_set_node(cpu, node); 377 numa_set_node(cpu, node);
378
379 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
380#endif 378#endif
381} 379}
382 380
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 20399b7b0c3f..4868e4a951ee 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -427,6 +427,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
427#ifdef CONFIG_X86_HT 427#ifdef CONFIG_X86_HT
428 u32 eax, ebx, ecx, edx; 428 u32 eax, ebx, ecx, edx;
429 int index_msb, core_bits; 429 int index_msb, core_bits;
430 static bool printed;
430 431
431 if (!cpu_has(c, X86_FEATURE_HT)) 432 if (!cpu_has(c, X86_FEATURE_HT))
432 return; 433 return;
@@ -442,7 +443,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
442 smp_num_siblings = (ebx & 0xff0000) >> 16; 443 smp_num_siblings = (ebx & 0xff0000) >> 16;
443 444
444 if (smp_num_siblings == 1) { 445 if (smp_num_siblings == 1) {
445 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 446 printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
446 goto out; 447 goto out;
447 } 448 }
448 449
@@ -469,11 +470,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
469 ((1 << core_bits) - 1); 470 ((1 << core_bits) - 1);
470 471
471out: 472out:
472 if ((c->x86_max_cores * smp_num_siblings) > 1) { 473 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
473 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 474 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
474 c->phys_proc_id); 475 c->phys_proc_id);
475 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 476 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
476 c->cpu_core_id); 477 c->cpu_core_id);
478 printed = 1;
477 } 479 }
478#endif 480#endif
479} 481}
@@ -1115,7 +1117,7 @@ void __cpuinit cpu_init(void)
1115 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) 1117 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1116 panic("CPU#%d already initialized!\n", cpu); 1118 panic("CPU#%d already initialized!\n", cpu);
1117 1119
1118 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1120 pr_debug("Initializing CPU#%d\n", cpu);
1119 1121
1120 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1122 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1121 1123
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c900b73f9224..9c31e8b09d2c 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -270,8 +270,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
270 node = cpu_to_node(cpu); 270 node = cpu_to_node(cpu);
271 } 271 }
272 numa_set_node(cpu, node); 272 numa_set_node(cpu, node);
273
274 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
275#endif 273#endif
276} 274}
277 275
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 0c06bca2a1dc..fc6c8ef92dcc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -507,18 +507,19 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
507{ 507{
508 struct _cpuid4_info *this_leaf, *sibling_leaf; 508 struct _cpuid4_info *this_leaf, *sibling_leaf;
509 unsigned long num_threads_sharing; 509 unsigned long num_threads_sharing;
510 int index_msb, i; 510 int index_msb, i, sibling;
511 struct cpuinfo_x86 *c = &cpu_data(cpu); 511 struct cpuinfo_x86 *c = &cpu_data(cpu);
512 512
513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
514 struct cpuinfo_x86 *d; 514 for_each_cpu(i, c->llc_shared_map) {
515 for_each_online_cpu(i) {
516 if (!per_cpu(ici_cpuid4_info, i)) 515 if (!per_cpu(ici_cpuid4_info, i))
517 continue; 516 continue;
518 d = &cpu_data(i);
519 this_leaf = CPUID4_INFO_IDX(i, index); 517 this_leaf = CPUID4_INFO_IDX(i, index);
520 cpumask_copy(to_cpumask(this_leaf->shared_cpu_map), 518 for_each_cpu(sibling, c->llc_shared_map) {
521 d->llc_shared_map); 519 if (!cpu_online(sibling))
520 continue;
521 set_bit(sibling, this_leaf->shared_cpu_map);
522 }
522 } 523 }
523 return; 524 return;
524 } 525 }
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 4fef985fc221..81c499eceb21 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -256,6 +256,16 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
256 ack_APIC_irq(); 256 ack_APIC_irq();
257} 257}
258 258
259/* Thermal monitoring depends on APIC, ACPI and clock modulation */
260static int intel_thermal_supported(struct cpuinfo_x86 *c)
261{
262 if (!cpu_has_apic)
263 return 0;
264 if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
265 return 0;
266 return 1;
267}
268
259void __init mcheck_intel_therm_init(void) 269void __init mcheck_intel_therm_init(void)
260{ 270{
261 /* 271 /*
@@ -263,8 +273,7 @@ void __init mcheck_intel_therm_init(void)
263 * LVT value on BSP and use that value to restore APs' thermal LVT 273 * LVT value on BSP and use that value to restore APs' thermal LVT
264 * entry BIOS programmed later 274 * entry BIOS programmed later
265 */ 275 */
266 if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && 276 if (intel_thermal_supported(&boot_cpu_data))
267 cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
268 lvtthmr_init = apic_read(APIC_LVTTHMR); 277 lvtthmr_init = apic_read(APIC_LVTTHMR);
269} 278}
270 279
@@ -274,8 +283,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
274 int tm2 = 0; 283 int tm2 = 0;
275 u32 l, h; 284 u32 l, h;
276 285
277 /* Thermal monitoring depends on ACPI and clock modulation*/ 286 if (!intel_thermal_supported(c))
278 if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
279 return; 287 return;
280 288
281 /* 289 /*
@@ -339,8 +347,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
339 l = apic_read(APIC_LVTTHMR); 347 l = apic_read(APIC_LVTTHMR);
340 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 348 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
341 349
342 printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", 350 printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
343 cpu, tm2 ? "TM2" : "TM1"); 351 tm2 ? "TM2" : "TM1");
344 352
345 /* enable thermal throttle processing */ 353 /* enable thermal throttle processing */
346 atomic_set(&therm_throt_en, 1); 354 atomic_set(&therm_throt_en, 1);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d17d482a04f4..f50447d961c0 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -732,7 +732,16 @@ struct early_res {
732 char overlap_ok; 732 char overlap_ok;
733}; 733};
734static struct early_res early_res[MAX_EARLY_RES] __initdata = { 734static struct early_res early_res[MAX_EARLY_RES] __initdata = {
735 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ 735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
736#ifdef CONFIG_X86_32
737 /*
738 * But first pinch a few for the stack/trampoline stuff
739 * FIXME: Don't need the extra page at 4K, but need to fix
740 * trampoline before removing it. (see the GDT stuff)
741 */
742 { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 },
743#endif
744
736 {} 745 {}
737}; 746};
738 747
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 4f8e2507e8f3..5051b94c9069 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,8 +29,6 @@ static void __init i386_default_early_setup(void)
29 29
30void __init i386_start_kernel(void) 30void __init i386_start_kernel(void)
31{ 31{
32 reserve_trampoline_memory();
33
34 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 32 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
35 33
36#ifdef CONFIG_BLK_DEV_INITRD 34#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 0b06cd778fd9..b5a9896ca1e7 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -98,8 +98,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
98{ 98{
99 copy_bootdata(__va(real_mode_data)); 99 copy_bootdata(__va(real_mode_data));
100 100
101 reserve_trampoline_memory();
102
103 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 101 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
104 102
105#ifdef CONFIG_BLK_DEV_INITRD 103#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 35a57c963df9..40b54ceb68b5 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -945,9 +945,6 @@ void __init early_reserve_e820_mpc_new(void)
945{ 945{
946 if (enable_update_mptable && alloc_mptable) { 946 if (enable_update_mptable && alloc_mptable) {
947 u64 startt = 0; 947 u64 startt = 0;
948#ifdef CONFIG_X86_TRAMPOLINE
949 startt = TRAMPOLINE_BASE;
950#endif
951 mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); 948 mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
952 } 949 }
953} 950}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index afcc58b69c7c..fcc2f2bfa39c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -120,11 +120,14 @@ static void __init dma32_free_bootmem(void)
120 120
121void __init pci_iommu_alloc(void) 121void __init pci_iommu_alloc(void)
122{ 122{
123 int use_swiotlb;
124
125 use_swiotlb = pci_swiotlb_init();
123#ifdef CONFIG_X86_64 126#ifdef CONFIG_X86_64
124 /* free the range so iommu could get some range less than 4G */ 127 /* free the range so iommu could get some range less than 4G */
125 dma32_free_bootmem(); 128 dma32_free_bootmem();
126#endif 129#endif
127 if (pci_swiotlb_init()) 130 if (use_swiotlb)
128 return; 131 return;
129 132
130 gart_iommu_hole_init(); 133 gart_iommu_hole_init();
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index e6a0d402f171..56c0e730d3fe 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -710,7 +710,8 @@ static void gart_iommu_shutdown(void)
710 struct pci_dev *dev; 710 struct pci_dev *dev;
711 int i; 711 int i;
712 712
713 if (no_agp) 713 /* don't shutdown it if there is AGP installed */
714 if (!no_agp)
714 return; 715 return;
715 716
716 for (i = 0; i < num_k8_northbridges; i++) { 717 for (i = 0; i < num_k8_northbridges; i++) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 946a311a25c9..f7b8b9894b22 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -73,6 +73,7 @@
73 73
74#include <asm/mtrr.h> 74#include <asm/mtrr.h>
75#include <asm/apic.h> 75#include <asm/apic.h>
76#include <asm/trampoline.h>
76#include <asm/e820.h> 77#include <asm/e820.h>
77#include <asm/mpspec.h> 78#include <asm/mpspec.h>
78#include <asm/setup.h> 79#include <asm/setup.h>
@@ -875,6 +876,13 @@ void __init setup_arch(char **cmdline_p)
875 876
876 reserve_brk(); 877 reserve_brk();
877 878
879 /*
880 * Find and reserve possible boot-time SMP configuration:
881 */
882 find_smp_config();
883
884 reserve_trampoline_memory();
885
878#ifdef CONFIG_ACPI_SLEEP 886#ifdef CONFIG_ACPI_SLEEP
879 /* 887 /*
880 * Reserve low memory region for sleep support. 888 * Reserve low memory region for sleep support.
@@ -921,11 +929,6 @@ void __init setup_arch(char **cmdline_p)
921 929
922 early_acpi_boot_init(); 930 early_acpi_boot_init();
923 931
924 /*
925 * Find and reserve possible boot-time SMP configuration:
926 */
927 find_smp_config();
928
929#ifdef CONFIG_ACPI_NUMA 932#ifdef CONFIG_ACPI_NUMA
930 /* 933 /*
931 * Parse SRAT to discover nodes. 934 * Parse SRAT to discover nodes.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 29e6744f51e3..678d0b8c26f3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -671,6 +671,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
671 complete(&c_idle->done); 671 complete(&c_idle->done);
672} 672}
673 673
674/* reduce the number of lines printed when booting a large cpu count system */
675static void __cpuinit announce_cpu(int cpu, int apicid)
676{
677 static int current_node = -1;
678 int node = cpu_to_node(cpu);
679
680 if (system_state == SYSTEM_BOOTING) {
681 if (node != current_node) {
682 if (current_node > (-1))
683 pr_cont(" Ok.\n");
684 current_node = node;
685 pr_info("Booting Node %3d, Processors ", node);
686 }
687 pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
688 return;
689 } else
690 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
691 node, cpu, apicid);
692}
693
674/* 694/*
675 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 695 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
676 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 696 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -737,9 +757,8 @@ do_rest:
737 /* start_ip had better be page-aligned! */ 757 /* start_ip had better be page-aligned! */
738 start_ip = setup_trampoline(); 758 start_ip = setup_trampoline();
739 759
740 /* So we see what's up */ 760 /* So we see what's up */
741 printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n", 761 announce_cpu(cpu, apicid);
742 cpu, apicid, start_ip);
743 762
744 /* 763 /*
745 * This grunge runs the startup process for 764 * This grunge runs the startup process for
@@ -788,21 +807,17 @@ do_rest:
788 udelay(100); 807 udelay(100);
789 } 808 }
790 809
791 if (cpumask_test_cpu(cpu, cpu_callin_mask)) { 810 if (cpumask_test_cpu(cpu, cpu_callin_mask))
792 /* number CPUs logically, starting from 1 (BSP is 0) */ 811 pr_debug("CPU%d: has booted.\n", cpu);
793 pr_debug("OK.\n"); 812 else {
794 printk(KERN_INFO "CPU%d: ", cpu);
795 print_cpu_info(&cpu_data(cpu));
796 pr_debug("CPU has booted.\n");
797 } else {
798 boot_error = 1; 813 boot_error = 1;
799 if (*((volatile unsigned char *)trampoline_base) 814 if (*((volatile unsigned char *)trampoline_base)
800 == 0xA5) 815 == 0xA5)
801 /* trampoline started but...? */ 816 /* trampoline started but...? */
802 printk(KERN_ERR "Stuck ??\n"); 817 pr_err("CPU%d: Stuck ??\n", cpu);
803 else 818 else
804 /* trampoline code not run */ 819 /* trampoline code not run */
805 printk(KERN_ERR "Not responding.\n"); 820 pr_err("CPU%d: Not responding.\n", cpu);
806 if (apic->inquire_remote_apic) 821 if (apic->inquire_remote_apic)
807 apic->inquire_remote_apic(apicid); 822 apic->inquire_remote_apic(apicid);
808 } 823 }
@@ -1293,14 +1308,16 @@ void native_cpu_die(unsigned int cpu)
1293 for (i = 0; i < 10; i++) { 1308 for (i = 0; i < 10; i++) {
1294 /* They ack this in play_dead by setting CPU_DEAD */ 1309 /* They ack this in play_dead by setting CPU_DEAD */
1295 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1310 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1296 printk(KERN_INFO "CPU %d is now offline\n", cpu); 1311 if (system_state == SYSTEM_RUNNING)
1312 pr_info("CPU %u is now offline\n", cpu);
1313
1297 if (1 == num_online_cpus()) 1314 if (1 == num_online_cpus())
1298 alternatives_smp_switch(0); 1315 alternatives_smp_switch(0);
1299 return; 1316 return;
1300 } 1317 }
1301 msleep(100); 1318 msleep(100);
1302 } 1319 }
1303 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1320 pr_err("CPU %u didn't die...\n", cpu);
1304} 1321}
1305 1322
1306void play_dead_common(void) 1323void play_dead_common(void)
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index cd022121cab6..c652ef62742d 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -12,21 +12,19 @@
12#endif 12#endif
13 13
14/* ready for x86_64 and x86 */ 14/* ready for x86_64 and x86 */
15unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); 15unsigned char *__trampinitdata trampoline_base;
16 16
17void __init reserve_trampoline_memory(void) 17void __init reserve_trampoline_memory(void)
18{ 18{
19#ifdef CONFIG_X86_32 19 unsigned long mem;
20 /* 20
21 * But first pinch a few for the stack/trampoline stuff
22 * FIXME: Don't need the extra page at 4K, but need to fix
23 * trampoline before removing it. (see the GDT stuff)
24 */
25 reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
26#endif
27 /* Has to be in very low memory so we can execute real-mode AP code. */ 21 /* Has to be in very low memory so we can execute real-mode AP code. */
28 reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE, 22 mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
29 "TRAMPOLINE"); 23 if (mem == -1L)
24 panic("Cannot allocate trampoline\n");
25
26 trampoline_base = __va(mem);
27 reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
30} 28}
31 29
32/* 30/*
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 41628b104b9e..872834177937 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -7,7 +7,6 @@ struct msr_info {
7 u32 msr_no; 7 u32 msr_no;
8 struct msr reg; 8 struct msr reg;
9 struct msr *msrs; 9 struct msr *msrs;
10 int off;
11 int err; 10 int err;
12}; 11};
13 12
@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
18 int this_cpu = raw_smp_processor_id(); 17 int this_cpu = raw_smp_processor_id();
19 18
20 if (rv->msrs) 19 if (rv->msrs)
21 reg = &rv->msrs[this_cpu - rv->off]; 20 reg = per_cpu_ptr(rv->msrs, this_cpu);
22 else 21 else
23 reg = &rv->reg; 22 reg = &rv->reg;
24 23
@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
32 int this_cpu = raw_smp_processor_id(); 31 int this_cpu = raw_smp_processor_id();
33 32
34 if (rv->msrs) 33 if (rv->msrs)
35 reg = &rv->msrs[this_cpu - rv->off]; 34 reg = per_cpu_ptr(rv->msrs, this_cpu);
36 else 35 else
37 reg = &rv->reg; 36 reg = &rv->reg;
38 37
@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
80 79
81 memset(&rv, 0, sizeof(rv)); 80 memset(&rv, 0, sizeof(rv));
82 81
83 rv.off = cpumask_first(mask);
84 rv.msrs = msrs; 82 rv.msrs = msrs;
85 rv.msr_no = msr_no; 83 rv.msr_no = msr_no;
86 84
@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
120} 118}
121EXPORT_SYMBOL(wrmsr_on_cpus); 119EXPORT_SYMBOL(wrmsr_on_cpus);
122 120
121struct msr *msrs_alloc(void)
122{
123 struct msr *msrs = NULL;
124
125 msrs = alloc_percpu(struct msr);
126 if (!msrs) {
127 pr_warning("%s: error allocating msrs\n", __func__);
128 return NULL;
129 }
130
131 return msrs;
132}
133EXPORT_SYMBOL(msrs_alloc);
134
135void msrs_free(struct msr *msrs)
136{
137 free_percpu(msrs);
138}
139EXPORT_SYMBOL(msrs_free);
140
123/* These "safe" variants are slower and should be used when the target MSR 141/* These "safe" variants are slower and should be used when the target MSR
124 may not actually exist. */ 142 may not actually exist. */
125static void __rdmsr_safe_on_cpu(void *info) 143static void __rdmsr_safe_on_cpu(void *info)
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 4c765e9c4664..34a3291ca103 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -20,7 +20,7 @@
20 * Derived from the read-mod example from relay-examples by Tom Zanussi. 20 * Derived from the read-mod example from relay-examples by Tom Zanussi.
21 */ 21 */
22 22
23#define pr_fmt(fmt) "mmiotrace: " 23#define pr_fmt(fmt) "mmiotrace: " fmt
24 24
25#define DEBUG 1 25#define DEBUG 1
26 26
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 4008e2ce73c1..fdbcc9fd6d31 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -264,10 +264,16 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
264 unsigned char contents[NVRAM_BYTES]; 264 unsigned char contents[NVRAM_BYTES];
265 unsigned i = *ppos; 265 unsigned i = *ppos;
266 unsigned char *tmp; 266 unsigned char *tmp;
267 int len;
268 267
269 len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count; 268 if (i >= NVRAM_BYTES)
270 if (copy_from_user(contents, buf, len)) 269 return 0; /* Past EOF */
270
271 if (count > NVRAM_BYTES - i)
272 count = NVRAM_BYTES - i;
273 if (count > NVRAM_BYTES)
274 return -EFAULT; /* Can't happen, but prove it to gcc */
275
276 if (copy_from_user(contents, buf, count))
271 return -EFAULT; 277 return -EFAULT;
272 278
273 spin_lock_irq(&rtc_lock); 279 spin_lock_irq(&rtc_lock);
@@ -275,7 +281,7 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
275 if (!__nvram_check_checksum()) 281 if (!__nvram_check_checksum())
276 goto checksum_err; 282 goto checksum_err;
277 283
278 for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp) 284 for (tmp = contents; count--; ++i, ++tmp)
279 __nvram_write_byte(*tmp, i); 285 __nvram_write_byte(*tmp, i);
280 286
281 __nvram_set_checksum(); 287 __nvram_set_checksum();
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5fdd6daa40ea..df5b68433f34 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
13static int ecc_enable_override; 13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644); 14module_param(ecc_enable_override, int, 0644);
15 15
16static struct msr *msrs;
17
16/* Lookup table for all possible MC control instances */ 18/* Lookup table for all possible MC control instances */
17struct amd64_pvt; 19struct amd64_pvt;
18static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; 20static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
@@ -2495,8 +2497,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2495static bool amd64_nb_mce_bank_enabled_on_node(int nid) 2497static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2496{ 2498{
2497 cpumask_var_t mask; 2499 cpumask_var_t mask;
2498 struct msr *msrs; 2500 int cpu, nbe;
2499 int cpu, nbe, idx = 0;
2500 bool ret = false; 2501 bool ret = false;
2501 2502
2502 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2503 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -2507,32 +2508,22 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2507 2508
2508 get_cpus_on_this_dct_cpumask(mask, nid); 2509 get_cpus_on_this_dct_cpumask(mask, nid);
2509 2510
2510 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
2511 if (!msrs) {
2512 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2513 __func__);
2514 free_cpumask_var(mask);
2515 return false;
2516 }
2517
2518 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); 2511 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2519 2512
2520 for_each_cpu(cpu, mask) { 2513 for_each_cpu(cpu, mask) {
2521 nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; 2514 struct msr *reg = per_cpu_ptr(msrs, cpu);
2515 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2522 2516
2523 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2517 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2524 cpu, msrs[idx].q, 2518 cpu, reg->q,
2525 (nbe ? "enabled" : "disabled")); 2519 (nbe ? "enabled" : "disabled"));
2526 2520
2527 if (!nbe) 2521 if (!nbe)
2528 goto out; 2522 goto out;
2529
2530 idx++;
2531 } 2523 }
2532 ret = true; 2524 ret = true;
2533 2525
2534out: 2526out:
2535 kfree(msrs);
2536 free_cpumask_var(mask); 2527 free_cpumask_var(mask);
2537 return ret; 2528 return ret;
2538} 2529}
@@ -2540,8 +2531,7 @@ out:
2540static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) 2531static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2541{ 2532{
2542 cpumask_var_t cmask; 2533 cpumask_var_t cmask;
2543 struct msr *msrs = NULL; 2534 int cpu;
2544 int cpu, idx = 0;
2545 2535
2546 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { 2536 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2547 amd64_printk(KERN_WARNING, "%s: error allocating mask\n", 2537 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
@@ -2551,34 +2541,27 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2551 2541
2552 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); 2542 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2553 2543
2554 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
2555 if (!msrs) {
2556 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2557 __func__);
2558 return -ENOMEM;
2559 }
2560
2561 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2544 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2562 2545
2563 for_each_cpu(cpu, cmask) { 2546 for_each_cpu(cpu, cmask) {
2564 2547
2548 struct msr *reg = per_cpu_ptr(msrs, cpu);
2549
2565 if (on) { 2550 if (on) {
2566 if (msrs[idx].l & K8_MSR_MCGCTL_NBE) 2551 if (reg->l & K8_MSR_MCGCTL_NBE)
2567 pvt->flags.ecc_report = 1; 2552 pvt->flags.ecc_report = 1;
2568 2553
2569 msrs[idx].l |= K8_MSR_MCGCTL_NBE; 2554 reg->l |= K8_MSR_MCGCTL_NBE;
2570 } else { 2555 } else {
2571 /* 2556 /*
2572 * Turn off ECC reporting only when it was off before 2557 * Turn off ECC reporting only when it was off before
2573 */ 2558 */
2574 if (!pvt->flags.ecc_report) 2559 if (!pvt->flags.ecc_report)
2575 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; 2560 reg->l &= ~K8_MSR_MCGCTL_NBE;
2576 } 2561 }
2577 idx++;
2578 } 2562 }
2579 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2563 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2580 2564
2581 kfree(msrs);
2582 free_cpumask_var(cmask); 2565 free_cpumask_var(cmask);
2583 2566
2584 return 0; 2567 return 0;
@@ -3036,6 +3019,8 @@ static int __init amd64_edac_init(void)
3036 if (cache_k8_northbridges() < 0) 3019 if (cache_k8_northbridges() < 0)
3037 return err; 3020 return err;
3038 3021
3022 msrs = msrs_alloc();
3023
3039 err = pci_register_driver(&amd64_pci_driver); 3024 err = pci_register_driver(&amd64_pci_driver);
3040 if (err) 3025 if (err)
3041 return err; 3026 return err;
@@ -3071,6 +3056,9 @@ static void __exit amd64_edac_exit(void)
3071 edac_pci_release_generic_ctl(amd64_ctl_pci); 3056 edac_pci_release_generic_ctl(amd64_ctl_pci);
3072 3057
3073 pci_unregister_driver(&amd64_pci_driver); 3058 pci_unregister_driver(&amd64_pci_driver);
3059
3060 msrs_free(msrs);
3061 msrs = NULL;
3074} 3062}
3075 3063
3076module_init(amd64_edac_init); 3064module_init(amd64_edac_init);
diff --git a/mm/migrate.c b/mm/migrate.c
index 7dbcb22316d2..0bc640fd68fa 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1044,7 +1044,7 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1044 int err; 1044 int err;
1045 1045
1046 for (i = 0; i < nr_pages; i += chunk_nr) { 1046 for (i = 0; i < nr_pages; i += chunk_nr) {
1047 if (chunk_nr + i > nr_pages) 1047 if (chunk_nr > nr_pages - i)
1048 chunk_nr = nr_pages - i; 1048 chunk_nr = nr_pages - i;
1049 1049
1050 err = copy_from_user(chunk_pages, &pages[i], 1050 err = copy_from_user(chunk_pages, &pages[i],