aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c54
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c80
-rw-r--r--arch/x86/kernel/dumpstack.h5
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/mm/init.c32
10 files changed, 137 insertions, 59 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0316ffe851bd..db5bdc8addf8 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,6 +29,7 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
32 33
33static u64 perf_event_mask __read_mostly; 34static u64 perf_event_mask __read_mostly;
34 35
@@ -159,7 +160,7 @@ struct x86_pmu {
159 struct perf_event *event); 160 struct perf_event *event);
160 struct event_constraint *event_constraints; 161 struct event_constraint *event_constraints;
161 162
162 void (*cpu_prepare)(int cpu); 163 int (*cpu_prepare)(int cpu);
163 void (*cpu_starting)(int cpu); 164 void (*cpu_starting)(int cpu);
164 void (*cpu_dying)(int cpu); 165 void (*cpu_dying)(int cpu);
165 void (*cpu_dead)(int cpu); 166 void (*cpu_dead)(int cpu);
@@ -1334,11 +1335,12 @@ static int __cpuinit
1334x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1335x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1335{ 1336{
1336 unsigned int cpu = (long)hcpu; 1337 unsigned int cpu = (long)hcpu;
1338 int ret = NOTIFY_OK;
1337 1339
1338 switch (action & ~CPU_TASKS_FROZEN) { 1340 switch (action & ~CPU_TASKS_FROZEN) {
1339 case CPU_UP_PREPARE: 1341 case CPU_UP_PREPARE:
1340 if (x86_pmu.cpu_prepare) 1342 if (x86_pmu.cpu_prepare)
1341 x86_pmu.cpu_prepare(cpu); 1343 ret = x86_pmu.cpu_prepare(cpu);
1342 break; 1344 break;
1343 1345
1344 case CPU_STARTING: 1346 case CPU_STARTING:
@@ -1351,6 +1353,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1351 x86_pmu.cpu_dying(cpu); 1353 x86_pmu.cpu_dying(cpu);
1352 break; 1354 break;
1353 1355
1356 case CPU_UP_CANCELED:
1354 case CPU_DEAD: 1357 case CPU_DEAD:
1355 if (x86_pmu.cpu_dead) 1358 if (x86_pmu.cpu_dead)
1356 x86_pmu.cpu_dead(cpu); 1359 x86_pmu.cpu_dead(cpu);
@@ -1360,7 +1363,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1360 break; 1363 break;
1361 } 1364 }
1362 1365
1363 return NOTIFY_OK; 1366 return ret;
1364} 1367}
1365 1368
1366static void __init pmu_check_apic(void) 1369static void __init pmu_check_apic(void)
@@ -1629,14 +1632,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1629 return len; 1632 return len;
1630} 1633}
1631 1634
1632static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) 1635#ifdef CONFIG_COMPAT
1636static inline int
1637perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1633{ 1638{
1634 unsigned long bytes; 1639 /* 32-bit process in 64-bit kernel. */
1640 struct stack_frame_ia32 frame;
1641 const void __user *fp;
1635 1642
1636 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); 1643 if (!test_thread_flag(TIF_IA32))
1644 return 0;
1645
1646 fp = compat_ptr(regs->bp);
1647 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1648 unsigned long bytes;
1649 frame.next_frame = 0;
1650 frame.return_address = 0;
1651
1652 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1653 if (bytes != sizeof(frame))
1654 break;
1655
1656 if (fp < compat_ptr(regs->sp))
1657 break;
1637 1658
1638 return bytes == sizeof(*frame); 1659 callchain_store(entry, frame.return_address);
1660 fp = compat_ptr(frame.next_frame);
1661 }
1662 return 1;
1663}
1664#else
1665static inline int
1666perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1667{
1668 return 0;
1639} 1669}
1670#endif
1640 1671
1641static void 1672static void
1642perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) 1673perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1652,11 +1683,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1652 callchain_store(entry, PERF_CONTEXT_USER); 1683 callchain_store(entry, PERF_CONTEXT_USER);
1653 callchain_store(entry, regs->ip); 1684 callchain_store(entry, regs->ip);
1654 1685
1686 if (perf_callchain_user32(regs, entry))
1687 return;
1688
1655 while (entry->nr < PERF_MAX_STACK_DEPTH) { 1689 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1690 unsigned long bytes;
1656 frame.next_frame = NULL; 1691 frame.next_frame = NULL;
1657 frame.return_address = 0; 1692 frame.return_address = 0;
1658 1693
1659 if (!copy_stack_frame(fp, &frame)) 1694 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1695 if (bytes != sizeof(frame))
1660 break; 1696 break;
1661 1697
1662 if ((unsigned long)fp < regs->sp) 1698 if ((unsigned long)fp < regs->sp)
@@ -1703,7 +1739,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1703 return entry; 1739 return entry;
1704} 1740}
1705 1741
1706#ifdef CONFIG_EVENT_TRACING
1707void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) 1742void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1708{ 1743{
1709 regs->ip = ip; 1744 regs->ip = ip;
@@ -1715,4 +1750,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
1715 regs->cs = __KERNEL_CS; 1750 regs->cs = __KERNEL_CS;
1716 local_save_flags(regs->flags); 1751 local_save_flags(regs->flags);
1717} 1752}
1718#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index b87e0b6970cb..db6f7d4056e1 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
137 return (hwc->config & 0xe0) == 0xe0; 137 return (hwc->config & 0xe0) == 0xe0;
138} 138}
139 139
140static inline int amd_has_nb(struct cpu_hw_events *cpuc)
141{
142 struct amd_nb *nb = cpuc->amd_nb;
143
144 return nb && nb->nb_id != -1;
145}
146
140static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 147static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
141 struct perf_event *event) 148 struct perf_event *event)
142{ 149{
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
147 /* 154 /*
148 * only care about NB events 155 * only care about NB events
149 */ 156 */
150 if (!(nb && amd_is_nb_event(hwc))) 157 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
151 return; 158 return;
152 159
153 /* 160 /*
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
214 /* 221 /*
215 * if not NB event or no NB, then no constraints 222 * if not NB event or no NB, then no constraints
216 */ 223 */
217 if (!(nb && amd_is_nb_event(hwc))) 224 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
218 return &unconstrained; 225 return &unconstrained;
219 226
220 /* 227 /*
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
293 return nb; 300 return nb;
294} 301}
295 302
296static void amd_pmu_cpu_online(int cpu) 303static int amd_pmu_cpu_prepare(int cpu)
304{
305 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
306
307 WARN_ON_ONCE(cpuc->amd_nb);
308
309 if (boot_cpu_data.x86_max_cores < 2)
310 return NOTIFY_OK;
311
312 cpuc->amd_nb = amd_alloc_nb(cpu, -1);
313 if (!cpuc->amd_nb)
314 return NOTIFY_BAD;
315
316 return NOTIFY_OK;
317}
318
319static void amd_pmu_cpu_starting(int cpu)
297{ 320{
298 struct cpu_hw_events *cpu1, *cpu2; 321 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
299 struct amd_nb *nb = NULL; 322 struct amd_nb *nb;
300 int i, nb_id; 323 int i, nb_id;
301 324
302 if (boot_cpu_data.x86_max_cores < 2) 325 if (boot_cpu_data.x86_max_cores < 2)
303 return; 326 return;
304 327
305 /*
306 * function may be called too early in the
307 * boot process, in which case nb_id is bogus
308 */
309 nb_id = amd_get_nb_id(cpu); 328 nb_id = amd_get_nb_id(cpu);
310 if (nb_id == BAD_APICID) 329 WARN_ON_ONCE(nb_id == BAD_APICID);
311 return;
312
313 cpu1 = &per_cpu(cpu_hw_events, cpu);
314 cpu1->amd_nb = NULL;
315 330
316 raw_spin_lock(&amd_nb_lock); 331 raw_spin_lock(&amd_nb_lock);
317 332
318 for_each_online_cpu(i) { 333 for_each_online_cpu(i) {
319 cpu2 = &per_cpu(cpu_hw_events, i); 334 nb = per_cpu(cpu_hw_events, i).amd_nb;
320 nb = cpu2->amd_nb; 335 if (WARN_ON_ONCE(!nb))
321 if (!nb)
322 continue; 336 continue;
323 if (nb->nb_id == nb_id)
324 goto found;
325 }
326 337
327 nb = amd_alloc_nb(cpu, nb_id); 338 if (nb->nb_id == nb_id) {
328 if (!nb) { 339 kfree(cpuc->amd_nb);
329 pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); 340 cpuc->amd_nb = nb;
330 raw_spin_unlock(&amd_nb_lock); 341 break;
331 return; 342 }
332 } 343 }
333found: 344
334 nb->refcnt++; 345 cpuc->amd_nb->nb_id = nb_id;
335 cpu1->amd_nb = nb; 346 cpuc->amd_nb->refcnt++;
336 347
337 raw_spin_unlock(&amd_nb_lock); 348 raw_spin_unlock(&amd_nb_lock);
338} 349}
339 350
340static void amd_pmu_cpu_offline(int cpu) 351static void amd_pmu_cpu_dead(int cpu)
341{ 352{
342 struct cpu_hw_events *cpuhw; 353 struct cpu_hw_events *cpuhw;
343 354
@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu)
349 raw_spin_lock(&amd_nb_lock); 360 raw_spin_lock(&amd_nb_lock);
350 361
351 if (cpuhw->amd_nb) { 362 if (cpuhw->amd_nb) {
352 if (--cpuhw->amd_nb->refcnt == 0) 363 struct amd_nb *nb = cpuhw->amd_nb;
353 kfree(cpuhw->amd_nb); 364
365 if (nb->nb_id == -1 || --nb->refcnt == 0)
366 kfree(nb);
354 367
355 cpuhw->amd_nb = NULL; 368 cpuhw->amd_nb = NULL;
356 } 369 }
@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = {
379 .get_event_constraints = amd_get_event_constraints, 392 .get_event_constraints = amd_get_event_constraints,
380 .put_event_constraints = amd_put_event_constraints, 393 .put_event_constraints = amd_put_event_constraints,
381 394
382 .cpu_prepare = amd_pmu_cpu_online, 395 .cpu_prepare = amd_pmu_cpu_prepare,
383 .cpu_dead = amd_pmu_cpu_offline, 396 .cpu_starting = amd_pmu_cpu_starting,
397 .cpu_dead = amd_pmu_cpu_dead,
384}; 398};
385 399
386static __init int amd_pmu_init(void) 400static __init int amd_pmu_init(void)
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index 29e5f7c845b2..e39e77168a37 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -30,6 +30,11 @@ struct stack_frame {
30 unsigned long return_address; 30 unsigned long return_address;
31}; 31};
32 32
33struct stack_frame_ia32 {
34 u32 next_frame;
35 u32 return_address;
36};
37
33static inline unsigned long rewind_frame_pointer(int n) 38static inline unsigned long rewind_frame_pointer(int n)
34{ 39{
35 struct stack_frame *frame; 40 struct stack_frame *frame;
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index adedeef1dedc..b2e246037392 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/start_kernel.h> 9#include <linux/start_kernel.h>
10#include <linux/mm.h>
10 11
11#include <asm/setup.h> 12#include <asm/setup.h>
12#include <asm/sections.h> 13#include <asm/sections.h>
@@ -44,9 +45,10 @@ void __init i386_start_kernel(void)
44#ifdef CONFIG_BLK_DEV_INITRD 45#ifdef CONFIG_BLK_DEV_INITRD
45 /* Reserve INITRD */ 46 /* Reserve INITRD */
46 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 47 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
48 /* Assume only end is not page aligned */
47 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 49 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
48 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 50 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
49 u64 ramdisk_end = ramdisk_image + ramdisk_size; 51 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
50 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 52 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
51 } 53 }
52#endif 54#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b5a9896ca1e7..7147143fd614 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data)
103#ifdef CONFIG_BLK_DEV_INITRD 103#ifdef CONFIG_BLK_DEV_INITRD
104 /* Reserve INITRD */ 104 /* Reserve INITRD */
105 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 105 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
106 /* Assume only end is not page aligned */
106 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; 107 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
107 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; 108 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
108 unsigned long ramdisk_end = ramdisk_image + ramdisk_size; 109 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
109 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 110 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
110 } 111 }
111#endif 112#endif
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index bfba6019d762..b2258ca91003 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
618 * portion of kgdb because this operation requires mutexs to 618 * portion of kgdb because this operation requires mutexs to
619 * complete. 619 * complete.
620 */ 620 */
621 hw_breakpoint_init(&attr);
621 attr.bp_addr = (unsigned long)kgdb_arch_init; 622 attr.bp_addr = (unsigned long)kgdb_arch_init;
622 attr.type = PERF_TYPE_BREAKPOINT;
623 attr.bp_len = HW_BREAKPOINT_LEN_1; 623 attr.bp_len = HW_BREAKPOINT_LEN_1;
624 attr.bp_type = HW_BREAKPOINT_W; 624 attr.bp_type = HW_BREAKPOINT_W;
625 attr.disabled = 1; 625 attr.disabled = 1;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c08d1e3261a8..9570541caf7c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -313,16 +313,17 @@ static void __init reserve_brk(void)
313#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) 313#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
314static void __init relocate_initrd(void) 314static void __init relocate_initrd(void)
315{ 315{
316 316 /* Assume only end is not page aligned */
317 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 317 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
318 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 318 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
319 u64 area_size = PAGE_ALIGN(ramdisk_size);
319 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; 320 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
320 u64 ramdisk_here; 321 u64 ramdisk_here;
321 unsigned long slop, clen, mapaddr; 322 unsigned long slop, clen, mapaddr;
322 char *p, *q; 323 char *p, *q;
323 324
324 /* We need to move the initrd down into lowmem */ 325 /* We need to move the initrd down into lowmem */
325 ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size, 326 ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
326 PAGE_SIZE); 327 PAGE_SIZE);
327 328
328 if (ramdisk_here == -1ULL) 329 if (ramdisk_here == -1ULL)
@@ -331,7 +332,7 @@ static void __init relocate_initrd(void)
331 332
332 /* Note: this includes all the lowmem currently occupied by 333 /* Note: this includes all the lowmem currently occupied by
333 the initrd, we rely on that fact to keep the data intact. */ 334 the initrd, we rely on that fact to keep the data intact. */
334 reserve_early(ramdisk_here, ramdisk_here + ramdisk_size, 335 reserve_early(ramdisk_here, ramdisk_here + area_size,
335 "NEW RAMDISK"); 336 "NEW RAMDISK");
336 initrd_start = ramdisk_here + PAGE_OFFSET; 337 initrd_start = ramdisk_here + PAGE_OFFSET;
337 initrd_end = initrd_start + ramdisk_size; 338 initrd_end = initrd_start + ramdisk_size;
@@ -375,9 +376,10 @@ static void __init relocate_initrd(void)
375 376
376static void __init reserve_initrd(void) 377static void __init reserve_initrd(void)
377{ 378{
379 /* Assume only end is not page aligned */
378 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 380 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
379 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 381 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
380 u64 ramdisk_end = ramdisk_image + ramdisk_size; 382 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
381 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; 383 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
382 384
383 if (!boot_params.hdr.type_of_loader || 385 if (!boot_params.hdr.type_of_loader ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index be40f82b09af..763d815e27a0 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -243,8 +243,6 @@ static void __cpuinit smp_callin(void)
243 end_local_APIC_setup(); 243 end_local_APIC_setup();
244 map_cpu_to_logical_apicid(); 244 map_cpu_to_logical_apicid();
245 245
246 notify_cpu_starting(cpuid);
247
248 /* 246 /*
249 * Need to setup vector mappings before we enable interrupts. 247 * Need to setup vector mappings before we enable interrupts.
250 */ 248 */
@@ -265,6 +263,8 @@ static void __cpuinit smp_callin(void)
265 */ 263 */
266 smp_store_cpu_info(cpuid); 264 smp_store_cpu_info(cpuid);
267 265
266 notify_cpu_starting(cpuid);
267
268 /* 268 /*
269 * Allow the master to continue. 269 * Allow the master to continue.
270 */ 270 */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 44879df55696..2cc249718c46 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -291,8 +291,8 @@ SECTIONS
291 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 291 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
292 __smp_locks = .; 292 __smp_locks = .;
293 *(.smp_locks) 293 *(.smp_locks)
294 __smp_locks_end = .;
295 . = ALIGN(PAGE_SIZE); 294 . = ALIGN(PAGE_SIZE);
295 __smp_locks_end = .;
296 } 296 }
297 297
298#ifdef CONFIG_X86_64 298#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a4a7d7dc8aa4..b278535b14aa 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -332,11 +332,23 @@ int devmem_is_allowed(unsigned long pagenr)
332 332
333void free_init_pages(char *what, unsigned long begin, unsigned long end) 333void free_init_pages(char *what, unsigned long begin, unsigned long end)
334{ 334{
335 unsigned long addr = begin; 335 unsigned long addr;
336 unsigned long begin_aligned, end_aligned;
336 337
337 if (addr >= end) 338 /* Make sure boundaries are page aligned */
339 begin_aligned = PAGE_ALIGN(begin);
340 end_aligned = end & PAGE_MASK;
341
342 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
343 begin = begin_aligned;
344 end = end_aligned;
345 }
346
347 if (begin >= end)
338 return; 348 return;
339 349
350 addr = begin;
351
340 /* 352 /*
341 * If debugging page accesses then do not free this memory but 353 * If debugging page accesses then do not free this memory but
342 * mark them not present - any buggy init-section access will 354 * mark them not present - any buggy init-section access will
@@ -344,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
344 */ 356 */
345#ifdef CONFIG_DEBUG_PAGEALLOC 357#ifdef CONFIG_DEBUG_PAGEALLOC
346 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 358 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
347 begin, PAGE_ALIGN(end)); 359 begin, end);
348 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 360 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
349#else 361#else
350 /* 362 /*
@@ -359,8 +371,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
359 for (; addr < end; addr += PAGE_SIZE) { 371 for (; addr < end; addr += PAGE_SIZE) {
360 ClearPageReserved(virt_to_page(addr)); 372 ClearPageReserved(virt_to_page(addr));
361 init_page_count(virt_to_page(addr)); 373 init_page_count(virt_to_page(addr));
362 memset((void *)(addr & ~(PAGE_SIZE-1)), 374 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
363 POISON_FREE_INITMEM, PAGE_SIZE);
364 free_page(addr); 375 free_page(addr);
365 totalram_pages++; 376 totalram_pages++;
366 } 377 }
@@ -377,6 +388,15 @@ void free_initmem(void)
377#ifdef CONFIG_BLK_DEV_INITRD 388#ifdef CONFIG_BLK_DEV_INITRD
378void free_initrd_mem(unsigned long start, unsigned long end) 389void free_initrd_mem(unsigned long start, unsigned long end)
379{ 390{
380 free_init_pages("initrd memory", start, end); 391 /*
392 * end could be not aligned, and We can not align that,
393 * decompresser could be confused by aligned initrd_end
394 * We already reserve the end partial page before in
395 * - i386_start_kernel()
396 * - x86_64_start_kernel()
397 * - relocate_initrd()
398 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
399 */
400 free_init_pages("initrd memory", start, PAGE_ALIGN(end));
381} 401}
382#endif 402#endif