aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/discontig.c8
-rw-r--r--arch/i386/mm/fault.c41
-rw-r--r--arch/i386/mm/hugetlbpage.c27
-rw-r--r--arch/i386/mm/init.c9
-rw-r--r--arch/i386/mm/pageattr.c7
-rw-r--r--arch/i386/mm/pgtable.c10
6 files changed, 52 insertions, 50 deletions
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 6711ce3f6916..244d8ec66be2 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -37,7 +37,7 @@
37#include <asm/mmzone.h> 37#include <asm/mmzone.h>
38#include <bios_ebda.h> 38#include <bios_ebda.h>
39 39
40struct pglist_data *node_data[MAX_NUMNODES]; 40struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
41EXPORT_SYMBOL(node_data); 41EXPORT_SYMBOL(node_data);
42bootmem_data_t node0_bdata; 42bootmem_data_t node0_bdata;
43 43
@@ -49,8 +49,8 @@ bootmem_data_t node0_bdata;
49 * 2) node_start_pfn - the starting page frame number for a node 49 * 2) node_start_pfn - the starting page frame number for a node
50 * 3) node_end_pfn - the ending page fram number for a node 50 * 3) node_end_pfn - the ending page fram number for a node
51 */ 51 */
52unsigned long node_start_pfn[MAX_NUMNODES]; 52unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
53unsigned long node_end_pfn[MAX_NUMNODES]; 53unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
54 54
55 55
56#ifdef CONFIG_DISCONTIGMEM 56#ifdef CONFIG_DISCONTIGMEM
@@ -66,7 +66,7 @@ unsigned long node_end_pfn[MAX_NUMNODES];
66 * physnode_map[4-7] = 1; 66 * physnode_map[4-7] = 1;
67 * physnode_map[8- ] = -1; 67 * physnode_map[8- ] = -1;
68 */ 68 */
69s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1}; 69s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
70EXPORT_SYMBOL(physnode_map); 70EXPORT_SYMBOL(physnode_map);
71 71
72void memory_present(int nid, unsigned long start, unsigned long end) 72void memory_present(int nid, unsigned long start, unsigned long end)
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 8e90339d6eaa..9edd4485b91e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -21,6 +21,7 @@
21#include <linux/vt_kern.h> /* For unblank_screen() */ 21#include <linux/vt_kern.h> /* For unblank_screen() */
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/kprobes.h>
24 25
25#include <asm/system.h> 26#include <asm/system.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
@@ -199,6 +200,18 @@ static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
199 return 0; 200 return 0;
200} 201}
201 202
203static noinline void force_sig_info_fault(int si_signo, int si_code,
204 unsigned long address, struct task_struct *tsk)
205{
206 siginfo_t info;
207
208 info.si_signo = si_signo;
209 info.si_errno = 0;
210 info.si_code = si_code;
211 info.si_addr = (void __user *)address;
212 force_sig_info(si_signo, &info, tsk);
213}
214
202fastcall void do_invalid_op(struct pt_regs *, unsigned long); 215fastcall void do_invalid_op(struct pt_regs *, unsigned long);
203 216
204/* 217/*
@@ -211,18 +224,18 @@ fastcall void do_invalid_op(struct pt_regs *, unsigned long);
211 * bit 1 == 0 means read, 1 means write 224 * bit 1 == 0 means read, 1 means write
212 * bit 2 == 0 means kernel, 1 means user-mode 225 * bit 2 == 0 means kernel, 1 means user-mode
213 */ 226 */
214fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code) 227fastcall void __kprobes do_page_fault(struct pt_regs *regs,
228 unsigned long error_code)
215{ 229{
216 struct task_struct *tsk; 230 struct task_struct *tsk;
217 struct mm_struct *mm; 231 struct mm_struct *mm;
218 struct vm_area_struct * vma; 232 struct vm_area_struct * vma;
219 unsigned long address; 233 unsigned long address;
220 unsigned long page; 234 unsigned long page;
221 int write; 235 int write, si_code;
222 siginfo_t info;
223 236
224 /* get the address */ 237 /* get the address */
225 __asm__("movl %%cr2,%0":"=r" (address)); 238 address = read_cr2();
226 239
227 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 240 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
228 SIGSEGV) == NOTIFY_STOP) 241 SIGSEGV) == NOTIFY_STOP)
@@ -233,7 +246,7 @@ fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
233 246
234 tsk = current; 247 tsk = current;
235 248
236 info.si_code = SEGV_MAPERR; 249 si_code = SEGV_MAPERR;
237 250
238 /* 251 /*
239 * We fault-in kernel-space virtual memory on-demand. The 252 * We fault-in kernel-space virtual memory on-demand. The
@@ -313,7 +326,7 @@ fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
313 * we can handle it.. 326 * we can handle it..
314 */ 327 */
315good_area: 328good_area:
316 info.si_code = SEGV_ACCERR; 329 si_code = SEGV_ACCERR;
317 write = 0; 330 write = 0;
318 switch (error_code & 3) { 331 switch (error_code & 3) {
319 default: /* 3: write, present */ 332 default: /* 3: write, present */
@@ -387,11 +400,7 @@ bad_area_nosemaphore:
387 /* Kernel addresses are always protection faults */ 400 /* Kernel addresses are always protection faults */
388 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 401 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
389 tsk->thread.trap_no = 14; 402 tsk->thread.trap_no = 14;
390 info.si_signo = SIGSEGV; 403 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
391 info.si_errno = 0;
392 /* info.si_code has been set above */
393 info.si_addr = (void __user *)address;
394 force_sig_info(SIGSEGV, &info, tsk);
395 return; 404 return;
396 } 405 }
397 406
@@ -446,7 +455,7 @@ no_context:
446 printk(" at virtual address %08lx\n",address); 455 printk(" at virtual address %08lx\n",address);
447 printk(KERN_ALERT " printing eip:\n"); 456 printk(KERN_ALERT " printing eip:\n");
448 printk("%08lx\n", regs->eip); 457 printk("%08lx\n", regs->eip);
449 asm("movl %%cr3,%0":"=r" (page)); 458 page = read_cr3();
450 page = ((unsigned long *) __va(page))[address >> 22]; 459 page = ((unsigned long *) __va(page))[address >> 22];
451 printk(KERN_ALERT "*pde = %08lx\n", page); 460 printk(KERN_ALERT "*pde = %08lx\n", page);
452 /* 461 /*
@@ -500,11 +509,7 @@ do_sigbus:
500 tsk->thread.cr2 = address; 509 tsk->thread.cr2 = address;
501 tsk->thread.error_code = error_code; 510 tsk->thread.error_code = error_code;
502 tsk->thread.trap_no = 14; 511 tsk->thread.trap_no = 14;
503 info.si_signo = SIGBUS; 512 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
504 info.si_errno = 0;
505 info.si_code = BUS_ADRERR;
506 info.si_addr = (void __user *)address;
507 force_sig_info(SIGBUS, &info, tsk);
508 return; 513 return;
509 514
510vmalloc_fault: 515vmalloc_fault:
@@ -523,7 +528,7 @@ vmalloc_fault:
523 pmd_t *pmd, *pmd_k; 528 pmd_t *pmd, *pmd_k;
524 pte_t *pte_k; 529 pte_t *pte_k;
525 530
526 asm("movl %%cr3,%0":"=r" (pgd_paddr)); 531 pgd_paddr = read_cr3();
527 pgd = index + (pgd_t *)__va(pgd_paddr); 532 pgd = index + (pgd_t *)__va(pgd_paddr);
528 pgd_k = init_mm.pgd + index; 533 pgd_k = init_mm.pgd + index;
529 534
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 3b099f32b948..d524127c9afc 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -22,12 +22,15 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22{ 22{
23 pgd_t *pgd; 23 pgd_t *pgd;
24 pud_t *pud; 24 pud_t *pud;
25 pmd_t *pmd = NULL; 25 pte_t *pte = NULL;
26 26
27 pgd = pgd_offset(mm, addr); 27 pgd = pgd_offset(mm, addr);
28 pud = pud_alloc(mm, pgd, addr); 28 pud = pud_alloc(mm, pgd, addr);
29 pmd = pmd_alloc(mm, pud, addr); 29 if (pud)
30 return (pte_t *) pmd; 30 pte = (pte_t *) pmd_alloc(mm, pud, addr);
31 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
32
33 return pte;
31} 34}
32 35
33pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 36pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
@@ -37,8 +40,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
37 pmd_t *pmd = NULL; 40 pmd_t *pmd = NULL;
38 41
39 pgd = pgd_offset(mm, addr); 42 pgd = pgd_offset(mm, addr);
40 pud = pud_offset(pgd, addr); 43 if (pgd_present(*pgd)) {
41 pmd = pmd_offset(pud, addr); 44 pud = pud_offset(pgd, addr);
45 if (pud_present(*pud))
46 pmd = pmd_offset(pud, addr);
47 }
42 return (pte_t *) pmd; 48 return (pte_t *) pmd;
43} 49}
44 50
@@ -118,17 +124,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
118} 124}
119#endif 125#endif
120 126
121void hugetlb_clean_stale_pgtable(pte_t *pte)
122{
123 pmd_t *pmd = (pmd_t *) pte;
124 struct page *page;
125
126 page = pmd_page(*pmd);
127 pmd_clear(pmd);
128 dec_page_state(nr_page_table_pages);
129 page_cache_release(page);
130}
131
132/* x86_64 also uses this file */ 127/* x86_64 also uses this file */
133 128
134#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 129#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 12216b52e28b..2ebaf75f732e 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -198,9 +198,10 @@ int page_is_ram(unsigned long pagenr)
198 198
199 if (efi_enabled) { 199 if (efi_enabled) {
200 efi_memory_desc_t *md; 200 efi_memory_desc_t *md;
201 void *p;
201 202
202 for (i = 0; i < memmap.nr_map; i++) { 203 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
203 md = &memmap.map[i]; 204 md = p;
204 if (!is_available_memory(md)) 205 if (!is_available_memory(md))
205 continue; 206 continue;
206 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT; 207 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
@@ -348,7 +349,7 @@ static void __init pagetable_init (void)
348 * All user-space mappings are explicitly cleared after 349 * All user-space mappings are explicitly cleared after
349 * SMP startup. 350 * SMP startup.
350 */ 351 */
351 pgd_base[0] = pgd_base[USER_PTRS_PER_PGD]; 352 set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]);
352#endif 353#endif
353} 354}
354 355
@@ -392,7 +393,7 @@ void zap_low_mappings (void)
392} 393}
393 394
394static int disable_nx __initdata = 0; 395static int disable_nx __initdata = 0;
395u64 __supported_pte_mask = ~_PAGE_NX; 396u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
396 397
397/* 398/*
398 * noexec = on|off 399 * noexec = on|off
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index cb3da6baa704..f600fc244f02 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -12,6 +12,7 @@
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/pgalloc.h>
15 16
16static DEFINE_SPINLOCK(cpa_lock); 17static DEFINE_SPINLOCK(cpa_lock);
17static struct list_head df_list = LIST_HEAD_INIT(df_list); 18static struct list_head df_list = LIST_HEAD_INIT(df_list);
@@ -52,8 +53,8 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot)
52 addr = address & LARGE_PAGE_MASK; 53 addr = address & LARGE_PAGE_MASK;
53 pbase = (pte_t *)page_address(base); 54 pbase = (pte_t *)page_address(base);
54 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { 55 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
55 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 56 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
56 addr == address ? prot : PAGE_KERNEL); 57 addr == address ? prot : PAGE_KERNEL));
57 } 58 }
58 return base; 59 return base;
59} 60}
@@ -62,7 +63,7 @@ static void flush_kernel_map(void *dummy)
62{ 63{
63 /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */ 64 /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
64 if (boot_cpu_data.x86_model >= 4) 65 if (boot_cpu_data.x86_model >= 4)
65 asm volatile("wbinvd":::"memory"); 66 wbinvd();
66 /* Flush all to work around Errata in early athlons regarding 67 /* Flush all to work around Errata in early athlons regarding
67 * large page flushing. 68 * large page flushing.
68 */ 69 */
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index bd2f7afc7a2a..dcdce2c6c532 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -207,19 +207,19 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
207{ 207{
208 unsigned long flags; 208 unsigned long flags;
209 209
210 if (PTRS_PER_PMD == 1) 210 if (PTRS_PER_PMD == 1) {
211 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
211 spin_lock_irqsave(&pgd_lock, flags); 212 spin_lock_irqsave(&pgd_lock, flags);
213 }
212 214
213 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 215 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
214 swapper_pg_dir + USER_PTRS_PER_PGD, 216 swapper_pg_dir + USER_PTRS_PER_PGD,
215 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 217 KERNEL_PGD_PTRS);
216
217 if (PTRS_PER_PMD > 1) 218 if (PTRS_PER_PMD > 1)
218 return; 219 return;
219 220
220 pgd_list_add(pgd); 221 pgd_list_add(pgd);
221 spin_unlock_irqrestore(&pgd_lock, flags); 222 spin_unlock_irqrestore(&pgd_lock, flags);
222 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
223} 223}
224 224
225/* never called when PTRS_PER_PMD > 1 */ 225/* never called when PTRS_PER_PMD > 1 */