diff options
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/Kconfig | 4 | ||||
-rw-r--r-- | arch/i386/kernel/apm.c | 26 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/generic.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/efi_stub.S | 1 | ||||
-rw-r--r-- | arch/i386/kernel/reboot.c | 12 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 23 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 66 | ||||
-rw-r--r-- | arch/i386/kernel/srat.c | 5 | ||||
-rw-r--r-- | arch/i386/kernel/time.c | 50 | ||||
-rw-r--r-- | arch/i386/kernel/time_hpet.c | 37 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 11 | ||||
-rw-r--r-- | arch/i386/kernel/vmlinux.lds.S | 12 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_thread.c | 1 | ||||
-rw-r--r-- | arch/i386/mm/discontig.c | 33 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 44 | ||||
-rw-r--r-- | arch/i386/mm/pgtable.c | 30 | ||||
-rw-r--r-- | arch/i386/power/swsusp.S | 2 |
17 files changed, 288 insertions, 73 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 9b6cddc8c51b..758044f5e718 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -493,7 +493,7 @@ config HIGHMEM64G | |||
493 | endchoice | 493 | endchoice |
494 | 494 | ||
495 | choice | 495 | choice |
496 | depends on EXPERIMENTAL && !X86_PAE | 496 | depends on EXPERIMENTAL |
497 | prompt "Memory split" if EMBEDDED | 497 | prompt "Memory split" if EMBEDDED |
498 | default VMSPLIT_3G | 498 | default VMSPLIT_3G |
499 | help | 499 | help |
@@ -515,6 +515,7 @@ choice | |||
515 | config VMSPLIT_3G | 515 | config VMSPLIT_3G |
516 | bool "3G/1G user/kernel split" | 516 | bool "3G/1G user/kernel split" |
517 | config VMSPLIT_3G_OPT | 517 | config VMSPLIT_3G_OPT |
518 | depends on !HIGHMEM | ||
518 | bool "3G/1G user/kernel split (for full 1G low memory)" | 519 | bool "3G/1G user/kernel split (for full 1G low memory)" |
519 | config VMSPLIT_2G | 520 | config VMSPLIT_2G |
520 | bool "2G/2G user/kernel split" | 521 | bool "2G/2G user/kernel split" |
@@ -799,6 +800,7 @@ config HOTPLUG_CPU | |||
799 | config COMPAT_VDSO | 800 | config COMPAT_VDSO |
800 | bool "Compat VDSO support" | 801 | bool "Compat VDSO support" |
801 | default y | 802 | default y |
803 | depends on !PARAVIRT | ||
802 | help | 804 | help |
803 | Map the VDSO to the predictable old-style address too. | 805 | Map the VDSO to the predictable old-style address too. |
804 | ---help--- | 806 | ---help--- |
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 8591f2fa920c..ff9ce4b5eaa8 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
@@ -1154,9 +1154,11 @@ out: | |||
1154 | 1154 | ||
1155 | static void set_time(void) | 1155 | static void set_time(void) |
1156 | { | 1156 | { |
1157 | struct timespec ts; | ||
1157 | if (got_clock_diff) { /* Must know time zone in order to set clock */ | 1158 | if (got_clock_diff) { /* Must know time zone in order to set clock */ |
1158 | xtime.tv_sec = get_cmos_time() + clock_cmos_diff; | 1159 | ts.tv_sec = get_cmos_time() + clock_cmos_diff; |
1159 | xtime.tv_nsec = 0; | 1160 | ts.tv_nsec = 0; |
1161 | do_settimeofday(&ts); | ||
1160 | } | 1162 | } |
1161 | } | 1163 | } |
1162 | 1164 | ||
@@ -1232,13 +1234,8 @@ static int suspend(int vetoable) | |||
1232 | restore_processor_state(); | 1234 | restore_processor_state(); |
1233 | 1235 | ||
1234 | local_irq_disable(); | 1236 | local_irq_disable(); |
1235 | write_seqlock(&xtime_lock); | ||
1236 | spin_lock(&i8253_lock); | ||
1237 | reinit_timer(); | ||
1238 | set_time(); | 1237 | set_time(); |
1239 | 1238 | reinit_timer(); | |
1240 | spin_unlock(&i8253_lock); | ||
1241 | write_sequnlock(&xtime_lock); | ||
1242 | 1239 | ||
1243 | if (err == APM_NO_ERROR) | 1240 | if (err == APM_NO_ERROR) |
1244 | err = APM_SUCCESS; | 1241 | err = APM_SUCCESS; |
@@ -1365,9 +1362,7 @@ static void check_events(void) | |||
1365 | ignore_bounce = 1; | 1362 | ignore_bounce = 1; |
1366 | if ((event != APM_NORMAL_RESUME) | 1363 | if ((event != APM_NORMAL_RESUME) |
1367 | || (ignore_normal_resume == 0)) { | 1364 | || (ignore_normal_resume == 0)) { |
1368 | write_seqlock_irq(&xtime_lock); | ||
1369 | set_time(); | 1365 | set_time(); |
1370 | write_sequnlock_irq(&xtime_lock); | ||
1371 | device_resume(); | 1366 | device_resume(); |
1372 | pm_send_all(PM_RESUME, (void *)0); | 1367 | pm_send_all(PM_RESUME, (void *)0); |
1373 | queue_event(event, NULL); | 1368 | queue_event(event, NULL); |
@@ -1383,9 +1378,7 @@ static void check_events(void) | |||
1383 | break; | 1378 | break; |
1384 | 1379 | ||
1385 | case APM_UPDATE_TIME: | 1380 | case APM_UPDATE_TIME: |
1386 | write_seqlock_irq(&xtime_lock); | ||
1387 | set_time(); | 1381 | set_time(); |
1388 | write_sequnlock_irq(&xtime_lock); | ||
1389 | break; | 1382 | break; |
1390 | 1383 | ||
1391 | case APM_CRITICAL_SUSPEND: | 1384 | case APM_CRITICAL_SUSPEND: |
@@ -2339,6 +2332,7 @@ static int __init apm_init(void) | |||
2339 | ret = kernel_thread(apm, NULL, CLONE_KERNEL | SIGCHLD); | 2332 | ret = kernel_thread(apm, NULL, CLONE_KERNEL | SIGCHLD); |
2340 | if (ret < 0) { | 2333 | if (ret < 0) { |
2341 | printk(KERN_ERR "apm: disabled - Unable to start kernel thread.\n"); | 2334 | printk(KERN_ERR "apm: disabled - Unable to start kernel thread.\n"); |
2335 | remove_proc_entry("apm", NULL); | ||
2342 | return -ENOMEM; | 2336 | return -ENOMEM; |
2343 | } | 2337 | } |
2344 | 2338 | ||
@@ -2348,7 +2342,13 @@ static int __init apm_init(void) | |||
2348 | return 0; | 2342 | return 0; |
2349 | } | 2343 | } |
2350 | 2344 | ||
2351 | misc_register(&apm_device); | 2345 | /* |
2346 | * Note we don't actually care if the misc_device cannot be registered. | ||
2347 | * this driver can do its job without it, even if userspace can't | ||
2348 | * control it. just log the error | ||
2349 | */ | ||
2350 | if (misc_register(&apm_device)) | ||
2351 | printk(KERN_WARNING "apm: Could not register misc device.\n"); | ||
2352 | 2352 | ||
2353 | if (HZ != 100) | 2353 | if (HZ != 100) |
2354 | idle_period = (idle_period * HZ) / 100; | 2354 | idle_period = (idle_period * HZ) / 100; |
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c index 169ac8e0db68..0b61eed8bbd8 100644 --- a/arch/i386/kernel/cpu/mtrr/generic.c +++ b/arch/i386/kernel/cpu/mtrr/generic.c | |||
@@ -243,7 +243,7 @@ static DEFINE_SPINLOCK(set_atomicity_lock); | |||
243 | * has been called. | 243 | * has been called. |
244 | */ | 244 | */ |
245 | 245 | ||
246 | static void prepare_set(void) | 246 | static void prepare_set(void) __acquires(set_atomicity_lock) |
247 | { | 247 | { |
248 | unsigned long cr0; | 248 | unsigned long cr0; |
249 | 249 | ||
@@ -274,7 +274,7 @@ static void prepare_set(void) | |||
274 | mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); | 274 | mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); |
275 | } | 275 | } |
276 | 276 | ||
277 | static void post_set(void) | 277 | static void post_set(void) __releases(set_atomicity_lock) |
278 | { | 278 | { |
279 | /* Flush TLBs (no need to flush caches - they are disabled) */ | 279 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
280 | __flush_tlb(); | 280 | __flush_tlb(); |
diff --git a/arch/i386/kernel/efi_stub.S b/arch/i386/kernel/efi_stub.S index d3ee73a3eee3..ef00bb77d7e4 100644 --- a/arch/i386/kernel/efi_stub.S +++ b/arch/i386/kernel/efi_stub.S | |||
@@ -7,7 +7,6 @@ | |||
7 | 7 | ||
8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | #include <asm/pgtable.h> | ||
11 | 10 | ||
12 | /* | 11 | /* |
13 | * efi_call_phys(void *, ...) is a function with variable parameters. | 12 | * efi_call_phys(void *, ...) is a function with variable parameters. |
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 54cfeabbc5e4..84278e0093a2 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c | |||
@@ -145,14 +145,10 @@ real_mode_gdt_entries [3] = | |||
145 | 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ | 145 | 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ |
146 | }; | 146 | }; |
147 | 147 | ||
148 | static struct | 148 | static struct Xgt_desc_struct |
149 | { | 149 | real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries }, |
150 | unsigned short size __attribute__ ((packed)); | 150 | real_mode_idt = { 0x3ff, 0 }, |
151 | unsigned long long * base __attribute__ ((packed)); | 151 | no_idt = { 0, 0 }; |
152 | } | ||
153 | real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries }, | ||
154 | real_mode_idt = { 0x3ff, NULL }, | ||
155 | no_idt = { 0, NULL }; | ||
156 | 152 | ||
157 | 153 | ||
158 | /* This is 16-bit protected mode code to disable paging and the cache, | 154 | /* This is 16-bit protected mode code to disable paging and the cache, |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 7a99b1369fa2..76a524b4c90f 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <asm/apic.h> | 53 | #include <asm/apic.h> |
54 | #include <asm/e820.h> | 54 | #include <asm/e820.h> |
55 | #include <asm/mpspec.h> | 55 | #include <asm/mpspec.h> |
56 | #include <asm/mmzone.h> | ||
56 | #include <asm/setup.h> | 57 | #include <asm/setup.h> |
57 | #include <asm/arch_hooks.h> | 58 | #include <asm/arch_hooks.h> |
58 | #include <asm/sections.h> | 59 | #include <asm/sections.h> |
@@ -815,6 +816,24 @@ static int __init parse_vmalloc(char *arg) | |||
815 | early_param("vmalloc", parse_vmalloc); | 816 | early_param("vmalloc", parse_vmalloc); |
816 | 817 | ||
817 | /* | 818 | /* |
819 | * reservetop=size reserves a hole at the top of the kernel address space which | ||
820 | * a hypervisor can load into later. Needed for dynamically loaded hypervisors, | ||
821 | * so relocating the fixmap can be done before paging initialization. | ||
822 | */ | ||
823 | static int __init parse_reservetop(char *arg) | ||
824 | { | ||
825 | unsigned long address; | ||
826 | |||
827 | if (!arg) | ||
828 | return -EINVAL; | ||
829 | |||
830 | address = memparse(arg, &arg); | ||
831 | reserve_top_address(address); | ||
832 | return 0; | ||
833 | } | ||
834 | early_param("reservetop", parse_reservetop); | ||
835 | |||
836 | /* | ||
818 | * Callback for efi_memory_walk. | 837 | * Callback for efi_memory_walk. |
819 | */ | 838 | */ |
820 | static int __init | 839 | static int __init |
@@ -1070,7 +1089,7 @@ static unsigned long __init setup_memory(void) | |||
1070 | 1089 | ||
1071 | void __init zone_sizes_init(void) | 1090 | void __init zone_sizes_init(void) |
1072 | { | 1091 | { |
1073 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | 1092 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
1074 | unsigned int max_dma, low; | 1093 | unsigned int max_dma, low; |
1075 | 1094 | ||
1076 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 1095 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
@@ -1147,7 +1166,7 @@ void __init setup_bootmem_allocator(void) | |||
1147 | */ | 1166 | */ |
1148 | find_smp_config(); | 1167 | find_smp_config(); |
1149 | #endif | 1168 | #endif |
1150 | 1169 | numa_kva_reserve(); | |
1151 | #ifdef CONFIG_BLK_DEV_INITRD | 1170 | #ifdef CONFIG_BLK_DEV_INITRD |
1152 | if (LOADER_TYPE && INITRD_START) { | 1171 | if (LOADER_TYPE && INITRD_START) { |
1153 | if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { | 1172 | if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { |
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index c10789d7a9d3..465188e2d701 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -634,3 +634,69 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs) | |||
634 | } | 634 | } |
635 | } | 635 | } |
636 | 636 | ||
637 | /* | ||
638 | * this function sends a 'generic call function' IPI to one other CPU | ||
639 | * in the system. | ||
640 | * | ||
641 | * cpu is a standard Linux logical CPU number. | ||
642 | */ | ||
643 | static void | ||
644 | __smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
645 | int nonatomic, int wait) | ||
646 | { | ||
647 | struct call_data_struct data; | ||
648 | int cpus = 1; | ||
649 | |||
650 | data.func = func; | ||
651 | data.info = info; | ||
652 | atomic_set(&data.started, 0); | ||
653 | data.wait = wait; | ||
654 | if (wait) | ||
655 | atomic_set(&data.finished, 0); | ||
656 | |||
657 | call_data = &data; | ||
658 | wmb(); | ||
659 | /* Send a message to all other CPUs and wait for them to respond */ | ||
660 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); | ||
661 | |||
662 | /* Wait for response */ | ||
663 | while (atomic_read(&data.started) != cpus) | ||
664 | cpu_relax(); | ||
665 | |||
666 | if (!wait) | ||
667 | return; | ||
668 | |||
669 | while (atomic_read(&data.finished) != cpus) | ||
670 | cpu_relax(); | ||
671 | } | ||
672 | |||
673 | /* | ||
674 | * smp_call_function_single - Run a function on another CPU | ||
675 | * @func: The function to run. This must be fast and non-blocking. | ||
676 | * @info: An arbitrary pointer to pass to the function. | ||
677 | * @nonatomic: Currently unused. | ||
678 | * @wait: If true, wait until function has completed on other CPUs. | ||
679 | * | ||
680 | * Retrurns 0 on success, else a negative status code. | ||
681 | * | ||
682 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
683 | * or is or has executed. | ||
684 | */ | ||
685 | |||
686 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
687 | int nonatomic, int wait) | ||
688 | { | ||
689 | /* prevent preemption and reschedule on another processor */ | ||
690 | int me = get_cpu(); | ||
691 | if (cpu == me) { | ||
692 | WARN_ON(1); | ||
693 | put_cpu(); | ||
694 | return -EBUSY; | ||
695 | } | ||
696 | spin_lock_bh(&call_lock); | ||
697 | __smp_call_function_single(cpu, func, info, nonatomic, wait); | ||
698 | spin_unlock_bh(&call_lock); | ||
699 | put_cpu(); | ||
700 | return 0; | ||
701 | } | ||
702 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c index b1809c9a0899..83db411b3aa7 100644 --- a/arch/i386/kernel/srat.c +++ b/arch/i386/kernel/srat.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8) | 42 | #define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8) |
43 | static u8 pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */ | 43 | static u8 pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */ |
44 | 44 | ||
45 | #define MAX_CHUNKS_PER_NODE 4 | 45 | #define MAX_CHUNKS_PER_NODE 3 |
46 | #define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES) | 46 | #define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES) |
47 | struct node_memory_chunk_s { | 47 | struct node_memory_chunk_s { |
48 | unsigned long start_pfn; | 48 | unsigned long start_pfn; |
@@ -135,9 +135,6 @@ static void __init parse_memory_affinity_structure (char *sratp) | |||
135 | "enabled and removable" : "enabled" ) ); | 135 | "enabled and removable" : "enabled" ) ); |
136 | } | 136 | } |
137 | 137 | ||
138 | #if MAX_NR_ZONES != 4 | ||
139 | #error "MAX_NR_ZONES != 4, chunk_to_zone requires review" | ||
140 | #endif | ||
141 | /* Take a chunk of pages from page frame cstart to cend and count the number | 138 | /* Take a chunk of pages from page frame cstart to cend and count the number |
142 | * of pages in each zone, returned via zones[]. | 139 | * of pages in each zone, returned via zones[]. |
143 | */ | 140 | */ |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 5af802ef00b2..86944acfb647 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -285,16 +285,19 @@ void notify_arch_cmos_timer(void) | |||
285 | mod_timer(&sync_cmos_timer, jiffies + 1); | 285 | mod_timer(&sync_cmos_timer, jiffies + 1); |
286 | } | 286 | } |
287 | 287 | ||
288 | static long clock_cmos_diff, sleep_start; | 288 | static long clock_cmos_diff; |
289 | static unsigned long sleep_start; | ||
289 | 290 | ||
290 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | 291 | static int timer_suspend(struct sys_device *dev, pm_message_t state) |
291 | { | 292 | { |
292 | /* | 293 | /* |
293 | * Estimate time zone so that set_time can update the clock | 294 | * Estimate time zone so that set_time can update the clock |
294 | */ | 295 | */ |
295 | clock_cmos_diff = -get_cmos_time(); | 296 | unsigned long ctime = get_cmos_time(); |
297 | |||
298 | clock_cmos_diff = -ctime; | ||
296 | clock_cmos_diff += get_seconds(); | 299 | clock_cmos_diff += get_seconds(); |
297 | sleep_start = get_cmos_time(); | 300 | sleep_start = ctime; |
298 | return 0; | 301 | return 0; |
299 | } | 302 | } |
300 | 303 | ||
@@ -302,18 +305,29 @@ static int timer_resume(struct sys_device *dev) | |||
302 | { | 305 | { |
303 | unsigned long flags; | 306 | unsigned long flags; |
304 | unsigned long sec; | 307 | unsigned long sec; |
305 | unsigned long sleep_length; | 308 | unsigned long ctime = get_cmos_time(); |
306 | 309 | long sleep_length = (ctime - sleep_start) * HZ; | |
310 | struct timespec ts; | ||
311 | |||
312 | if (sleep_length < 0) { | ||
313 | printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n"); | ||
314 | /* The time after the resume must not be earlier than the time | ||
315 | * before the suspend or some nasty things will happen | ||
316 | */ | ||
317 | sleep_length = 0; | ||
318 | ctime = sleep_start; | ||
319 | } | ||
307 | #ifdef CONFIG_HPET_TIMER | 320 | #ifdef CONFIG_HPET_TIMER |
308 | if (is_hpet_enabled()) | 321 | if (is_hpet_enabled()) |
309 | hpet_reenable(); | 322 | hpet_reenable(); |
310 | #endif | 323 | #endif |
311 | setup_pit_timer(); | 324 | setup_pit_timer(); |
312 | sec = get_cmos_time() + clock_cmos_diff; | 325 | |
313 | sleep_length = (get_cmos_time() - sleep_start) * HZ; | 326 | sec = ctime + clock_cmos_diff; |
327 | ts.tv_sec = sec; | ||
328 | ts.tv_nsec = 0; | ||
329 | do_settimeofday(&ts); | ||
314 | write_seqlock_irqsave(&xtime_lock, flags); | 330 | write_seqlock_irqsave(&xtime_lock, flags); |
315 | xtime.tv_sec = sec; | ||
316 | xtime.tv_nsec = 0; | ||
317 | jiffies_64 += sleep_length; | 331 | jiffies_64 += sleep_length; |
318 | wall_jiffies += sleep_length; | 332 | wall_jiffies += sleep_length; |
319 | write_sequnlock_irqrestore(&xtime_lock, flags); | 333 | write_sequnlock_irqrestore(&xtime_lock, flags); |
@@ -349,10 +363,11 @@ extern void (*late_time_init)(void); | |||
349 | /* Duplicate of time_init() below, with hpet_enable part added */ | 363 | /* Duplicate of time_init() below, with hpet_enable part added */ |
350 | static void __init hpet_time_init(void) | 364 | static void __init hpet_time_init(void) |
351 | { | 365 | { |
352 | xtime.tv_sec = get_cmos_time(); | 366 | struct timespec ts; |
353 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | 367 | ts.tv_sec = get_cmos_time(); |
354 | set_normalized_timespec(&wall_to_monotonic, | 368 | ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
355 | -xtime.tv_sec, -xtime.tv_nsec); | 369 | |
370 | do_settimeofday(&ts); | ||
356 | 371 | ||
357 | if ((hpet_enable() >= 0) && hpet_use_timer) { | 372 | if ((hpet_enable() >= 0) && hpet_use_timer) { |
358 | printk("Using HPET for base-timer\n"); | 373 | printk("Using HPET for base-timer\n"); |
@@ -364,6 +379,7 @@ static void __init hpet_time_init(void) | |||
364 | 379 | ||
365 | void __init time_init(void) | 380 | void __init time_init(void) |
366 | { | 381 | { |
382 | struct timespec ts; | ||
367 | #ifdef CONFIG_HPET_TIMER | 383 | #ifdef CONFIG_HPET_TIMER |
368 | if (is_hpet_capable()) { | 384 | if (is_hpet_capable()) { |
369 | /* | 385 | /* |
@@ -374,10 +390,10 @@ void __init time_init(void) | |||
374 | return; | 390 | return; |
375 | } | 391 | } |
376 | #endif | 392 | #endif |
377 | xtime.tv_sec = get_cmos_time(); | 393 | ts.tv_sec = get_cmos_time(); |
378 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | 394 | ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
379 | set_normalized_timespec(&wall_to_monotonic, | 395 | |
380 | -xtime.tv_sec, -xtime.tv_nsec); | 396 | do_settimeofday(&ts); |
381 | 397 | ||
382 | time_init_hook(); | 398 | time_init_hook(); |
383 | } | 399 | } |
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c index 14a1376fedd1..6bf14a4e995e 100644 --- a/arch/i386/kernel/time_hpet.c +++ b/arch/i386/kernel/time_hpet.c | |||
@@ -301,23 +301,25 @@ int hpet_rtc_timer_init(void) | |||
301 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; | 301 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; |
302 | 302 | ||
303 | local_irq_save(flags); | 303 | local_irq_save(flags); |
304 | |||
304 | cnt = hpet_readl(HPET_COUNTER); | 305 | cnt = hpet_readl(HPET_COUNTER); |
305 | cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); | 306 | cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); |
306 | hpet_writel(cnt, HPET_T1_CMP); | 307 | hpet_writel(cnt, HPET_T1_CMP); |
307 | hpet_t1_cmp = cnt; | 308 | hpet_t1_cmp = cnt; |
308 | local_irq_restore(flags); | ||
309 | 309 | ||
310 | cfg = hpet_readl(HPET_T1_CFG); | 310 | cfg = hpet_readl(HPET_T1_CFG); |
311 | cfg &= ~HPET_TN_PERIODIC; | 311 | cfg &= ~HPET_TN_PERIODIC; |
312 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | 312 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; |
313 | hpet_writel(cfg, HPET_T1_CFG); | 313 | hpet_writel(cfg, HPET_T1_CFG); |
314 | 314 | ||
315 | local_irq_restore(flags); | ||
316 | |||
315 | return 1; | 317 | return 1; |
316 | } | 318 | } |
317 | 319 | ||
318 | static void hpet_rtc_timer_reinit(void) | 320 | static void hpet_rtc_timer_reinit(void) |
319 | { | 321 | { |
320 | unsigned int cfg, cnt; | 322 | unsigned int cfg, cnt, ticks_per_int, lost_ints; |
321 | 323 | ||
322 | if (unlikely(!(PIE_on | AIE_on | UIE_on))) { | 324 | if (unlikely(!(PIE_on | AIE_on | UIE_on))) { |
323 | cfg = hpet_readl(HPET_T1_CFG); | 325 | cfg = hpet_readl(HPET_T1_CFG); |
@@ -332,10 +334,33 @@ static void hpet_rtc_timer_reinit(void) | |||
332 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; | 334 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; |
333 | 335 | ||
334 | /* It is more accurate to use the comparator value than current count.*/ | 336 | /* It is more accurate to use the comparator value than current count.*/ |
335 | cnt = hpet_t1_cmp; | 337 | ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq; |
336 | cnt += hpet_tick*HZ/hpet_rtc_int_freq; | 338 | hpet_t1_cmp += ticks_per_int; |
337 | hpet_writel(cnt, HPET_T1_CMP); | 339 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); |
338 | hpet_t1_cmp = cnt; | 340 | |
341 | /* | ||
342 | * If the interrupt handler was delayed too long, the write above tries | ||
343 | * to schedule the next interrupt in the past and the hardware would | ||
344 | * not interrupt until the counter had wrapped around. | ||
345 | * So we have to check that the comparator wasn't set to a past time. | ||
346 | */ | ||
347 | cnt = hpet_readl(HPET_COUNTER); | ||
348 | if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) { | ||
349 | lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1; | ||
350 | /* Make sure that, even with the time needed to execute | ||
351 | * this code, the next scheduled interrupt has been moved | ||
352 | * back to the future: */ | ||
353 | lost_ints++; | ||
354 | |||
355 | hpet_t1_cmp += lost_ints * ticks_per_int; | ||
356 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | ||
357 | |||
358 | if (PIE_on) | ||
359 | PIE_count += lost_ints; | ||
360 | |||
361 | printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", | ||
362 | hpet_rtc_int_freq); | ||
363 | } | ||
339 | } | 364 | } |
340 | 365 | ||
341 | /* | 366 | /* |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index c7adb076e811..21aa1cd57773 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -374,6 +374,8 @@ void show_registers(struct pt_regs *regs) | |||
374 | */ | 374 | */ |
375 | if (in_kernel) { | 375 | if (in_kernel) { |
376 | u8 __user *eip; | 376 | u8 __user *eip; |
377 | int code_bytes = 64; | ||
378 | unsigned char c; | ||
377 | 379 | ||
378 | printk("\n" KERN_EMERG "Stack: "); | 380 | printk("\n" KERN_EMERG "Stack: "); |
379 | show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); | 381 | show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); |
@@ -381,9 +383,12 @@ void show_registers(struct pt_regs *regs) | |||
381 | printk(KERN_EMERG "Code: "); | 383 | printk(KERN_EMERG "Code: "); |
382 | 384 | ||
383 | eip = (u8 __user *)regs->eip - 43; | 385 | eip = (u8 __user *)regs->eip - 43; |
384 | for (i = 0; i < 64; i++, eip++) { | 386 | if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) { |
385 | unsigned char c; | 387 | /* try starting at EIP */ |
386 | 388 | eip = (u8 __user *)regs->eip; | |
389 | code_bytes = 32; | ||
390 | } | ||
391 | for (i = 0; i < code_bytes; i++, eip++) { | ||
387 | if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) { | 392 | if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) { |
388 | printk(" Bad EIP value."); | 393 | printk(" Bad EIP value."); |
389 | break; | 394 | break; |
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 2d4f1386e2b1..1e7ac1c44ddc 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S | |||
@@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") | |||
13 | OUTPUT_ARCH(i386) | 13 | OUTPUT_ARCH(i386) |
14 | ENTRY(phys_startup_32) | 14 | ENTRY(phys_startup_32) |
15 | jiffies = jiffies_64; | 15 | jiffies = jiffies_64; |
16 | |||
17 | PHDRS { | ||
18 | text PT_LOAD FLAGS(5); /* R_E */ | ||
19 | data PT_LOAD FLAGS(7); /* RWE */ | ||
20 | note PT_NOTE FLAGS(4); /* R__ */ | ||
21 | } | ||
16 | SECTIONS | 22 | SECTIONS |
17 | { | 23 | { |
18 | . = __KERNEL_START; | 24 | . = __KERNEL_START; |
@@ -26,7 +32,7 @@ SECTIONS | |||
26 | KPROBES_TEXT | 32 | KPROBES_TEXT |
27 | *(.fixup) | 33 | *(.fixup) |
28 | *(.gnu.warning) | 34 | *(.gnu.warning) |
29 | } = 0x9090 | 35 | } :text = 0x9090 |
30 | 36 | ||
31 | _etext = .; /* End of text section */ | 37 | _etext = .; /* End of text section */ |
32 | 38 | ||
@@ -48,7 +54,7 @@ SECTIONS | |||
48 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ | 54 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ |
49 | *(.data) | 55 | *(.data) |
50 | CONSTRUCTORS | 56 | CONSTRUCTORS |
51 | } | 57 | } :data |
52 | 58 | ||
53 | . = ALIGN(4096); | 59 | . = ALIGN(4096); |
54 | __nosave_begin = .; | 60 | __nosave_begin = .; |
@@ -184,4 +190,6 @@ SECTIONS | |||
184 | STABS_DEBUG | 190 | STABS_DEBUG |
185 | 191 | ||
186 | DWARF_DEBUG | 192 | DWARF_DEBUG |
193 | |||
194 | NOTES | ||
187 | } | 195 | } |
diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c index 50f6de6ff64d..f39887359e8e 100644 --- a/arch/i386/mach-voyager/voyager_thread.c +++ b/arch/i386/mach-voyager/voyager_thread.c | |||
@@ -130,7 +130,6 @@ thread(void *unused) | |||
130 | init_timer(&wakeup_timer); | 130 | init_timer(&wakeup_timer); |
131 | 131 | ||
132 | sigfillset(¤t->blocked); | 132 | sigfillset(¤t->blocked); |
133 | current->signal->tty = NULL; | ||
134 | 133 | ||
135 | printk(KERN_NOTICE "Voyager starting monitor thread\n"); | 134 | printk(KERN_NOTICE "Voyager starting monitor thread\n"); |
136 | 135 | ||
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index f0c10b3cd158..941d1a5ebabb 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c | |||
@@ -117,7 +117,8 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | |||
117 | 117 | ||
118 | void *node_remap_end_vaddr[MAX_NUMNODES]; | 118 | void *node_remap_end_vaddr[MAX_NUMNODES]; |
119 | void *node_remap_alloc_vaddr[MAX_NUMNODES]; | 119 | void *node_remap_alloc_vaddr[MAX_NUMNODES]; |
120 | 120 | static unsigned long kva_start_pfn; | |
121 | static unsigned long kva_pages; | ||
121 | /* | 122 | /* |
122 | * FLAT - support for basic PC memory model with discontig enabled, essentially | 123 | * FLAT - support for basic PC memory model with discontig enabled, essentially |
123 | * a single node with all available processors in it with a flat | 124 | * a single node with all available processors in it with a flat |
@@ -286,7 +287,6 @@ unsigned long __init setup_memory(void) | |||
286 | { | 287 | { |
287 | int nid; | 288 | int nid; |
288 | unsigned long system_start_pfn, system_max_low_pfn; | 289 | unsigned long system_start_pfn, system_max_low_pfn; |
289 | unsigned long reserve_pages; | ||
290 | 290 | ||
291 | /* | 291 | /* |
292 | * When mapping a NUMA machine we allocate the node_mem_map arrays | 292 | * When mapping a NUMA machine we allocate the node_mem_map arrays |
@@ -298,14 +298,23 @@ unsigned long __init setup_memory(void) | |||
298 | find_max_pfn(); | 298 | find_max_pfn(); |
299 | get_memcfg_numa(); | 299 | get_memcfg_numa(); |
300 | 300 | ||
301 | reserve_pages = calculate_numa_remap_pages(); | 301 | kva_pages = calculate_numa_remap_pages(); |
302 | 302 | ||
303 | /* partially used pages are not usable - thus round upwards */ | 303 | /* partially used pages are not usable - thus round upwards */ |
304 | system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); | 304 | system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); |
305 | 305 | ||
306 | system_max_low_pfn = max_low_pfn = find_max_low_pfn() - reserve_pages; | 306 | kva_start_pfn = find_max_low_pfn() - kva_pages; |
307 | printk("reserve_pages = %ld find_max_low_pfn() ~ %ld\n", | 307 | |
308 | reserve_pages, max_low_pfn + reserve_pages); | 308 | #ifdef CONFIG_BLK_DEV_INITRD |
309 | /* Numa kva area is below the initrd */ | ||
310 | if (LOADER_TYPE && INITRD_START) | ||
311 | kva_start_pfn = PFN_DOWN(INITRD_START) - kva_pages; | ||
312 | #endif | ||
313 | kva_start_pfn -= kva_start_pfn & (PTRS_PER_PTE-1); | ||
314 | |||
315 | system_max_low_pfn = max_low_pfn = find_max_low_pfn(); | ||
316 | printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n", | ||
317 | kva_start_pfn, max_low_pfn); | ||
309 | printk("max_pfn = %ld\n", max_pfn); | 318 | printk("max_pfn = %ld\n", max_pfn); |
310 | #ifdef CONFIG_HIGHMEM | 319 | #ifdef CONFIG_HIGHMEM |
311 | highstart_pfn = highend_pfn = max_pfn; | 320 | highstart_pfn = highend_pfn = max_pfn; |
@@ -328,7 +337,7 @@ unsigned long __init setup_memory(void) | |||
328 | (ulong) pfn_to_kaddr(max_low_pfn)); | 337 | (ulong) pfn_to_kaddr(max_low_pfn)); |
329 | for_each_online_node(nid) { | 338 | for_each_online_node(nid) { |
330 | node_remap_start_vaddr[nid] = pfn_to_kaddr( | 339 | node_remap_start_vaddr[nid] = pfn_to_kaddr( |
331 | highstart_pfn + node_remap_offset[nid]); | 340 | kva_start_pfn + node_remap_offset[nid]); |
332 | /* Init the node remap allocator */ | 341 | /* Init the node remap allocator */ |
333 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + | 342 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + |
334 | (node_remap_size[nid] * PAGE_SIZE); | 343 | (node_remap_size[nid] * PAGE_SIZE); |
@@ -343,7 +352,6 @@ unsigned long __init setup_memory(void) | |||
343 | } | 352 | } |
344 | printk("High memory starts at vaddr %08lx\n", | 353 | printk("High memory starts at vaddr %08lx\n", |
345 | (ulong) pfn_to_kaddr(highstart_pfn)); | 354 | (ulong) pfn_to_kaddr(highstart_pfn)); |
346 | vmalloc_earlyreserve = reserve_pages * PAGE_SIZE; | ||
347 | for_each_online_node(nid) | 355 | for_each_online_node(nid) |
348 | find_max_pfn_node(nid); | 356 | find_max_pfn_node(nid); |
349 | 357 | ||
@@ -353,13 +361,18 @@ unsigned long __init setup_memory(void) | |||
353 | return max_low_pfn; | 361 | return max_low_pfn; |
354 | } | 362 | } |
355 | 363 | ||
364 | void __init numa_kva_reserve(void) | ||
365 | { | ||
366 | reserve_bootmem(PFN_PHYS(kva_start_pfn),PFN_PHYS(kva_pages)); | ||
367 | } | ||
368 | |||
356 | void __init zone_sizes_init(void) | 369 | void __init zone_sizes_init(void) |
357 | { | 370 | { |
358 | int nid; | 371 | int nid; |
359 | 372 | ||
360 | 373 | ||
361 | for_each_online_node(nid) { | 374 | for_each_online_node(nid) { |
362 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | 375 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
363 | unsigned long *zholes_size; | 376 | unsigned long *zholes_size; |
364 | unsigned int max_dma; | 377 | unsigned int max_dma; |
365 | 378 | ||
@@ -414,7 +427,7 @@ void __init set_highmem_pages_init(int bad_ppro) | |||
414 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | 427 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; |
415 | 428 | ||
416 | printk("Initializing %s for node %d (%08lx:%08lx)\n", | 429 | printk("Initializing %s for node %d (%08lx:%08lx)\n", |
417 | zone->name, zone->zone_pgdat->node_id, | 430 | zone->name, zone_to_nid(zone), |
418 | zone_start_pfn, zone_end_pfn); | 431 | zone_start_pfn, zone_end_pfn); |
419 | 432 | ||
420 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { | 433 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 951386606d09..4a5a914b3432 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -615,6 +615,48 @@ void __init mem_init(void) | |||
615 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | 615 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) |
616 | ); | 616 | ); |
617 | 617 | ||
618 | #if 1 /* double-sanity-check paranoia */ | ||
619 | printk("virtual kernel memory layout:\n" | ||
620 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
621 | #ifdef CONFIG_HIGHMEM | ||
622 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
623 | #endif | ||
624 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
625 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
626 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
627 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
628 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | ||
629 | FIXADDR_START, FIXADDR_TOP, | ||
630 | (FIXADDR_TOP - FIXADDR_START) >> 10, | ||
631 | |||
632 | #ifdef CONFIG_HIGHMEM | ||
633 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | ||
634 | (LAST_PKMAP*PAGE_SIZE) >> 10, | ||
635 | #endif | ||
636 | |||
637 | VMALLOC_START, VMALLOC_END, | ||
638 | (VMALLOC_END - VMALLOC_START) >> 20, | ||
639 | |||
640 | (unsigned long)__va(0), (unsigned long)high_memory, | ||
641 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | ||
642 | |||
643 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | ||
644 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, | ||
645 | |||
646 | (unsigned long)&_etext, (unsigned long)&_edata, | ||
647 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | ||
648 | |||
649 | (unsigned long)&_text, (unsigned long)&_etext, | ||
650 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | ||
651 | |||
652 | #ifdef CONFIG_HIGHMEM | ||
653 | BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | ||
654 | BUG_ON(VMALLOC_END > PKMAP_BASE); | ||
655 | #endif | ||
656 | BUG_ON(VMALLOC_START > VMALLOC_END); | ||
657 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | ||
658 | #endif /* double-sanity-check paranoia */ | ||
659 | |||
618 | #ifdef CONFIG_X86_PAE | 660 | #ifdef CONFIG_X86_PAE |
619 | if (!cpu_has_pae) | 661 | if (!cpu_has_pae) |
620 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | 662 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); |
@@ -643,7 +685,7 @@ void __init mem_init(void) | |||
643 | int arch_add_memory(int nid, u64 start, u64 size) | 685 | int arch_add_memory(int nid, u64 start, u64 size) |
644 | { | 686 | { |
645 | struct pglist_data *pgdata = &contig_page_data; | 687 | struct pglist_data *pgdata = &contig_page_data; |
646 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | 688 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
647 | unsigned long start_pfn = start >> PAGE_SHIFT; | 689 | unsigned long start_pfn = start >> PAGE_SHIFT; |
648 | unsigned long nr_pages = size >> PAGE_SHIFT; | 690 | unsigned long nr_pages = size >> PAGE_SHIFT; |
649 | 691 | ||
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index bd98768d8764..10126e3f8174 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/pagemap.h> | 13 | #include <linux/pagemap.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/module.h> | ||
15 | 16 | ||
16 | #include <asm/system.h> | 17 | #include <asm/system.h> |
17 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
@@ -60,7 +61,9 @@ void show_mem(void) | |||
60 | printk(KERN_INFO "%lu pages writeback\n", | 61 | printk(KERN_INFO "%lu pages writeback\n", |
61 | global_page_state(NR_WRITEBACK)); | 62 | global_page_state(NR_WRITEBACK)); |
62 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); | 63 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
63 | printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); | 64 | printk(KERN_INFO "%lu pages slab\n", |
65 | global_page_state(NR_SLAB_RECLAIMABLE) + | ||
66 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | ||
64 | printk(KERN_INFO "%lu pages pagetables\n", | 67 | printk(KERN_INFO "%lu pages pagetables\n", |
65 | global_page_state(NR_PAGETABLE)); | 68 | global_page_state(NR_PAGETABLE)); |
66 | } | 69 | } |
@@ -137,6 +140,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
137 | __flush_tlb_one(vaddr); | 140 | __flush_tlb_one(vaddr); |
138 | } | 141 | } |
139 | 142 | ||
143 | static int fixmaps; | ||
144 | #ifndef CONFIG_COMPAT_VDSO | ||
145 | unsigned long __FIXADDR_TOP = 0xfffff000; | ||
146 | EXPORT_SYMBOL(__FIXADDR_TOP); | ||
147 | #endif | ||
148 | |||
140 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | 149 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
141 | { | 150 | { |
142 | unsigned long address = __fix_to_virt(idx); | 151 | unsigned long address = __fix_to_virt(idx); |
@@ -146,6 +155,25 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | |||
146 | return; | 155 | return; |
147 | } | 156 | } |
148 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | 157 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); |
158 | fixmaps++; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * reserve_top_address - reserves a hole in the top of kernel address space | ||
163 | * @reserve - size of hole to reserve | ||
164 | * | ||
165 | * Can be used to relocate the fixmap area and poke a hole in the top | ||
166 | * of kernel address space to make room for a hypervisor. | ||
167 | */ | ||
168 | void reserve_top_address(unsigned long reserve) | ||
169 | { | ||
170 | BUG_ON(fixmaps > 0); | ||
171 | #ifdef CONFIG_COMPAT_VDSO | ||
172 | BUG_ON(reserve != 0); | ||
173 | #else | ||
174 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | ||
175 | __VMALLOC_RESERVE += reserve; | ||
176 | #endif | ||
149 | } | 177 | } |
150 | 178 | ||
151 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 179 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
diff --git a/arch/i386/power/swsusp.S b/arch/i386/power/swsusp.S index c893b897217f..8a2b50a0aaad 100644 --- a/arch/i386/power/swsusp.S +++ b/arch/i386/power/swsusp.S | |||
@@ -32,7 +32,7 @@ ENTRY(swsusp_arch_resume) | |||
32 | movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx | 32 | movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx |
33 | movl %ecx, %cr3 | 33 | movl %ecx, %cr3 |
34 | 34 | ||
35 | movl pagedir_nosave, %edx | 35 | movl restore_pblist, %edx |
36 | .p2align 4,,7 | 36 | .p2align 4,,7 |
37 | 37 | ||
38 | copy_loop: | 38 | copy_loop: |