diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-14 10:59:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-14 10:59:32 -0400 |
commit | 7dfd54a905be0242bd604557d543c3a2d7b0a540 (patch) | |
tree | 75906d8bf8d835ae570667ea4d9311d9b443a77a /arch | |
parent | 6512c0d62589c9f4da00328518e492ddd1c01c7f (diff) | |
parent | 565b0c1f100408ccbcb04ba458a14da454cb271d (diff) |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, highmem_32.c: Clean up comment
x86, pgtable.h: Clean up types
x86: Clean up dump_pagetable()
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 16 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 51 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 2 |
3 files changed, 30 insertions, 39 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 16748077559a..4c5b51fdc788 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -135,6 +135,11 @@ static inline unsigned long pte_pfn(pte_t pte) | |||
135 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; | 135 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline unsigned long pmd_pfn(pmd_t pmd) | ||
139 | { | ||
140 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
141 | } | ||
142 | |||
138 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 143 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
139 | 144 | ||
140 | static inline int pmd_large(pmd_t pte) | 145 | static inline int pmd_large(pmd_t pte) |
@@ -359,7 +364,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) | |||
359 | * this macro returns the index of the entry in the pmd page which would | 364 | * this macro returns the index of the entry in the pmd page which would |
360 | * control the given virtual address | 365 | * control the given virtual address |
361 | */ | 366 | */ |
362 | static inline unsigned pmd_index(unsigned long address) | 367 | static inline unsigned long pmd_index(unsigned long address) |
363 | { | 368 | { |
364 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | 369 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); |
365 | } | 370 | } |
@@ -379,7 +384,7 @@ static inline unsigned pmd_index(unsigned long address) | |||
379 | * this function returns the index of the entry in the pte page which would | 384 | * this function returns the index of the entry in the pte page which would |
380 | * control the given virtual address | 385 | * control the given virtual address |
381 | */ | 386 | */ |
382 | static inline unsigned pte_index(unsigned long address) | 387 | static inline unsigned long pte_index(unsigned long address) |
383 | { | 388 | { |
384 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | 389 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
385 | } | 390 | } |
@@ -430,11 +435,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |||
430 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | 435 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); |
431 | } | 436 | } |
432 | 437 | ||
433 | static inline unsigned long pmd_pfn(pmd_t pmd) | ||
434 | { | ||
435 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
436 | } | ||
437 | |||
438 | static inline int pud_large(pud_t pud) | 438 | static inline int pud_large(pud_t pud) |
439 | { | 439 | { |
440 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == | 440 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == |
@@ -470,7 +470,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) | |||
470 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) | 470 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) |
471 | 471 | ||
472 | /* to find an entry in a page-table-directory. */ | 472 | /* to find an entry in a page-table-directory. */ |
473 | static inline unsigned pud_index(unsigned long address) | 473 | static inline unsigned long pud_index(unsigned long address) |
474 | { | 474 | { |
475 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | 475 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); |
476 | } | 476 | } |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index bfae139182ff..775a020990a5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -285,26 +285,25 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address, | |||
285 | tsk->thread.screen_bitmap |= 1 << bit; | 285 | tsk->thread.screen_bitmap |= 1 << bit; |
286 | } | 286 | } |
287 | 287 | ||
288 | static void dump_pagetable(unsigned long address) | 288 | static bool low_pfn(unsigned long pfn) |
289 | { | 289 | { |
290 | __typeof__(pte_val(__pte(0))) page; | 290 | return pfn < max_low_pfn; |
291 | } | ||
291 | 292 | ||
292 | page = read_cr3(); | 293 | static void dump_pagetable(unsigned long address) |
293 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | 294 | { |
295 | pgd_t *base = __va(read_cr3()); | ||
296 | pgd_t *pgd = &base[pgd_index(address)]; | ||
297 | pmd_t *pmd; | ||
298 | pte_t *pte; | ||
294 | 299 | ||
295 | #ifdef CONFIG_X86_PAE | 300 | #ifdef CONFIG_X86_PAE |
296 | printk("*pdpt = %016Lx ", page); | 301 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); |
297 | if ((page >> PAGE_SHIFT) < max_low_pfn | 302 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) |
298 | && page & _PAGE_PRESENT) { | 303 | goto out; |
299 | page &= PAGE_MASK; | ||
300 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | ||
301 | & (PTRS_PER_PMD - 1)]; | ||
302 | printk(KERN_CONT "*pde = %016Lx ", page); | ||
303 | page &= ~_PAGE_NX; | ||
304 | } | ||
305 | #else | ||
306 | printk("*pde = %08lx ", page); | ||
307 | #endif | 304 | #endif |
305 | pmd = pmd_offset(pud_offset(pgd, address), address); | ||
306 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); | ||
308 | 307 | ||
309 | /* | 308 | /* |
310 | * We must not directly access the pte in the highpte | 309 | * We must not directly access the pte in the highpte |
@@ -312,16 +311,12 @@ static void dump_pagetable(unsigned long address) | |||
312 | * And let's rather not kmap-atomic the pte, just in case | 311 | * And let's rather not kmap-atomic the pte, just in case |
313 | * it's allocated already: | 312 | * it's allocated already: |
314 | */ | 313 | */ |
315 | if ((page >> PAGE_SHIFT) < max_low_pfn | 314 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
316 | && (page & _PAGE_PRESENT) | 315 | goto out; |
317 | && !(page & _PAGE_PSE)) { | ||
318 | |||
319 | page &= PAGE_MASK; | ||
320 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | ||
321 | & (PTRS_PER_PTE - 1)]; | ||
322 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); | ||
323 | } | ||
324 | 316 | ||
317 | pte = pte_offset_kernel(pmd, address); | ||
318 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | ||
319 | out: | ||
325 | printk("\n"); | 320 | printk("\n"); |
326 | } | 321 | } |
327 | 322 | ||
@@ -450,16 +445,12 @@ static int bad_address(void *p) | |||
450 | 445 | ||
451 | static void dump_pagetable(unsigned long address) | 446 | static void dump_pagetable(unsigned long address) |
452 | { | 447 | { |
453 | pgd_t *pgd; | 448 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
449 | pgd_t *pgd = base + pgd_index(address); | ||
454 | pud_t *pud; | 450 | pud_t *pud; |
455 | pmd_t *pmd; | 451 | pmd_t *pmd; |
456 | pte_t *pte; | 452 | pte_t *pte; |
457 | 453 | ||
458 | pgd = (pgd_t *)read_cr3(); | ||
459 | |||
460 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | ||
461 | |||
462 | pgd += pgd_index(address); | ||
463 | if (bad_address(pgd)) | 454 | if (bad_address(pgd)) |
464 | goto bad; | 455 | goto bad; |
465 | 456 | ||
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 2112ed55e7ea..1617958a3805 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -24,7 +24,7 @@ void kunmap(struct page *page) | |||
24 | * no global lock is needed and because the kmap code must perform a global TLB | 24 | * no global lock is needed and because the kmap code must perform a global TLB |
25 | * invalidation when the kmap pool wraps. | 25 | * invalidation when the kmap pool wraps. |
26 | * | 26 | * |
27 | * However when holding an atomic kmap is is not legal to sleep, so atomic | 27 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
28 | * kmaps are appropriate for short, tight code paths only. | 28 | * kmaps are appropriate for short, tight code paths only. |
29 | */ | 29 | */ |
30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |