aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 16:07:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 16:07:55 -0400
commitb278240839e20fa9384ea430df463b367b90e04e (patch)
treef99f0c8cdd4cc7f177cd75440e6bd181cded7fb3 /arch/i386/mm
parentdd77a4ee0f3981693d4229aa1d57cea9e526ff47 (diff)
parent3f75f42d7733e73aca5c78326489efd4189e0111 (diff)
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (225 commits) [PATCH] Don't set calgary iommu as default y [PATCH] i386/x86-64: New Intel feature flags [PATCH] x86: Add a cumulative thermal throttle event counter. [PATCH] i386: Make the jiffies compares use the 64bit safe macros. [PATCH] x86: Refactor thermal throttle processing [PATCH] Add 64bit jiffies compares (for use with get_jiffies_64) [PATCH] Fix unwinder warning in traps.c [PATCH] x86: Allow disabling early pci scans with pci=noearly or disallowing conf1 [PATCH] x86: Move direct PCI scanning functions out of line [PATCH] i386/x86-64: Make all early PCI scans dependent on CONFIG_PCI [PATCH] Don't leak NT bit into next task [PATCH] i386/x86-64: Work around gcc bug with noreturn functions in unwinder [PATCH] Fix some broken white space in ia32_signal.c [PATCH] Initialize argument registers for 32bit signal handlers. [PATCH] Remove all traces of signal number conversion [PATCH] Don't synchronize time reading on single core AMD systems [PATCH] Remove outdated comment in x86-64 mmconfig code [PATCH] Use string instructions for Core2 copy/clear [PATCH] x86: - restore i8259A eoi status on resume [PATCH] i386: Split multi-line printk in oops output. ...
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/discontig.c5
-rw-r--r--arch/i386/mm/extable.c2
-rw-r--r--arch/i386/mm/fault.c25
-rw-r--r--arch/i386/mm/highmem.c2
-rw-r--r--arch/i386/mm/init.c38
5 files changed, 27 insertions, 45 deletions
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index fb5d8b747de4..941d1a5ebabb 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -322,6 +322,11 @@ unsigned long __init setup_memory(void)
322 highstart_pfn = system_max_low_pfn; 322 highstart_pfn = system_max_low_pfn;
323 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 323 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
324 pages_to_mb(highend_pfn - highstart_pfn)); 324 pages_to_mb(highend_pfn - highstart_pfn));
325 num_physpages = highend_pfn;
326 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
327#else
328 num_physpages = system_max_low_pfn;
329 high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1;
325#endif 330#endif
326 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 331 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
327 pages_to_mb(system_max_low_pfn)); 332 pages_to_mb(system_max_low_pfn));
diff --git a/arch/i386/mm/extable.c b/arch/i386/mm/extable.c
index de03c5430abc..0ce4f22a2635 100644
--- a/arch/i386/mm/extable.c
+++ b/arch/i386/mm/extable.c
@@ -11,7 +11,7 @@ int fixup_exception(struct pt_regs *regs)
11 const struct exception_table_entry *fixup; 11 const struct exception_table_entry *fixup;
12 12
13#ifdef CONFIG_PNPBIOS 13#ifdef CONFIG_PNPBIOS
14 if (unlikely((regs->xcs & ~15) == (GDT_ENTRY_PNPBIOS_BASE << 3))) 14 if (unlikely(SEGMENT_IS_PNP_CODE(regs->xcs)))
15 { 15 {
16 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; 16 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
17 extern u32 pnp_bios_is_utter_crap; 17 extern u32 pnp_bios_is_utter_crap;
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index f7279468323a..5e17a3f43b41 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -27,21 +27,24 @@
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/desc.h> 28#include <asm/desc.h>
29#include <asm/kdebug.h> 29#include <asm/kdebug.h>
30#include <asm/segment.h>
30 31
31extern void die(const char *,struct pt_regs *,long); 32extern void die(const char *,struct pt_regs *,long);
32 33
33#ifdef CONFIG_KPROBES 34static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
34ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 35
35int register_page_fault_notifier(struct notifier_block *nb) 36int register_page_fault_notifier(struct notifier_block *nb)
36{ 37{
37 vmalloc_sync_all(); 38 vmalloc_sync_all();
38 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 39 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
39} 40}
41EXPORT_SYMBOL_GPL(register_page_fault_notifier);
40 42
41int unregister_page_fault_notifier(struct notifier_block *nb) 43int unregister_page_fault_notifier(struct notifier_block *nb)
42{ 44{
43 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb); 45 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
44} 46}
47EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
45 48
46static inline int notify_page_fault(enum die_val val, const char *str, 49static inline int notify_page_fault(enum die_val val, const char *str,
47 struct pt_regs *regs, long err, int trap, int sig) 50 struct pt_regs *regs, long err, int trap, int sig)
@@ -55,14 +58,6 @@ static inline int notify_page_fault(enum die_val val, const char *str,
55 }; 58 };
56 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args); 59 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
57} 60}
58#else
59static inline int notify_page_fault(enum die_val val, const char *str,
60 struct pt_regs *regs, long err, int trap, int sig)
61{
62 return NOTIFY_DONE;
63}
64#endif
65
66 61
67/* 62/*
68 * Unlock any spinlocks which will prevent us from getting the 63 * Unlock any spinlocks which will prevent us from getting the
@@ -119,10 +114,10 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
119 } 114 }
120 115
121 /* The standard kernel/user address space limit. */ 116 /* The standard kernel/user address space limit. */
122 *eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg; 117 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
123 118
124 /* By far the most common cases. */ 119 /* By far the most common cases. */
125 if (likely(seg == __USER_CS || seg == __KERNEL_CS)) 120 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
126 return eip; 121 return eip;
127 122
128 /* Check the segment exists, is within the current LDT/GDT size, 123 /* Check the segment exists, is within the current LDT/GDT size,
@@ -436,11 +431,7 @@ good_area:
436 write = 0; 431 write = 0;
437 switch (error_code & 3) { 432 switch (error_code & 3) {
438 default: /* 3: write, present */ 433 default: /* 3: write, present */
439#ifdef TEST_VERIFY_AREA 434 /* fall through */
440 if (regs->cs == KERNEL_CS)
441 printk("WP fault at %08lx\n", regs->eip);
442#endif
443 /* fall through */
444 case 2: /* write, not present */ 435 case 2: /* write, not present */
445 if (!(vma->vm_flags & VM_WRITE)) 436 if (!(vma->vm_flags & VM_WRITE))
446 goto bad_area; 437 goto bad_area;
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index b6eb4dcb8777..ba44000b9069 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -54,7 +54,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
54 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 54 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
55 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 55 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
56 56
57 if (vaddr < FIXADDR_START) { // FIXME 57 if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
58 dec_preempt_count(); 58 dec_preempt_count();
59 preempt_check_resched(); 59 preempt_check_resched();
60 return; 60 return;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index efd0bcdac65d..4a5a914b3432 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -435,16 +435,22 @@ u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
435 * on Enable 435 * on Enable
436 * off Disable 436 * off Disable
437 */ 437 */
438void __init noexec_setup(const char *str) 438static int __init noexec_setup(char *str)
439{ 439{
440 if (!strncmp(str, "on",2) && cpu_has_nx) { 440 if (!str || !strcmp(str, "on")) {
441 __supported_pte_mask |= _PAGE_NX; 441 if (cpu_has_nx) {
442 disable_nx = 0; 442 __supported_pte_mask |= _PAGE_NX;
443 } else if (!strncmp(str,"off",3)) { 443 disable_nx = 0;
444 }
445 } else if (!strcmp(str,"off")) {
444 disable_nx = 1; 446 disable_nx = 1;
445 __supported_pte_mask &= ~_PAGE_NX; 447 __supported_pte_mask &= ~_PAGE_NX;
446 } 448 } else
449 return -EINVAL;
450
451 return 0;
447} 452}
453early_param("noexec", noexec_setup);
448 454
449int nx_enabled = 0; 455int nx_enabled = 0;
450#ifdef CONFIG_X86_PAE 456#ifdef CONFIG_X86_PAE
@@ -552,18 +558,6 @@ static void __init test_wp_bit(void)
552 } 558 }
553} 559}
554 560
555static void __init set_max_mapnr_init(void)
556{
557#ifdef CONFIG_HIGHMEM
558 num_physpages = highend_pfn;
559#else
560 num_physpages = max_low_pfn;
561#endif
562#ifdef CONFIG_FLATMEM
563 max_mapnr = num_physpages;
564#endif
565}
566
567static struct kcore_list kcore_mem, kcore_vmalloc; 561static struct kcore_list kcore_mem, kcore_vmalloc;
568 562
569void __init mem_init(void) 563void __init mem_init(void)
@@ -590,14 +584,6 @@ void __init mem_init(void)
590 } 584 }
591#endif 585#endif
592 586
593 set_max_mapnr_init();
594
595#ifdef CONFIG_HIGHMEM
596 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
597#else
598 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
599#endif
600
601 /* this will put all low memory onto the freelists */ 587 /* this will put all low memory onto the freelists */
602 totalram_pages += free_all_bootmem(); 588 totalram_pages += free_all_bootmem();
603 589