aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/pgtable.h')
-rw-r--r--arch/x86/include/asm/pgtable.h226
1 files changed, 213 insertions, 13 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 4f5af8447d54..8fef0f6bfbb6 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define _ASM_X86_PGTABLE_H
3 3
4#include <asm/page.h>
5
4#define FIRST_USER_ADDRESS 0 6#define FIRST_USER_ADDRESS 0
5 7
6#define _PAGE_BIT_PRESENT 0 /* is present */ 8#define _PAGE_BIT_PRESENT 0 /* is present */
@@ -236,68 +238,82 @@ static inline unsigned long pte_pfn(pte_t pte)
236 238
237static inline int pmd_large(pmd_t pte) 239static inline int pmd_large(pmd_t pte)
238{ 240{
239 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 241 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 (_PAGE_PSE | _PAGE_PRESENT); 242 (_PAGE_PSE | _PAGE_PRESENT);
241} 243}
242 244
245static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
246{
247 pteval_t v = native_pte_val(pte);
248
249 return native_make_pte(v | set);
250}
251
252static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
253{
254 pteval_t v = native_pte_val(pte);
255
256 return native_make_pte(v & ~clear);
257}
258
243static inline pte_t pte_mkclean(pte_t pte) 259static inline pte_t pte_mkclean(pte_t pte)
244{ 260{
245 return __pte(pte_val(pte) & ~_PAGE_DIRTY); 261 return pte_clear_flags(pte, _PAGE_DIRTY);
246} 262}
247 263
248static inline pte_t pte_mkold(pte_t pte) 264static inline pte_t pte_mkold(pte_t pte)
249{ 265{
250 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 266 return pte_clear_flags(pte, _PAGE_ACCESSED);
251} 267}
252 268
253static inline pte_t pte_wrprotect(pte_t pte) 269static inline pte_t pte_wrprotect(pte_t pte)
254{ 270{
255 return __pte(pte_val(pte) & ~_PAGE_RW); 271 return pte_clear_flags(pte, _PAGE_RW);
256} 272}
257 273
258static inline pte_t pte_mkexec(pte_t pte) 274static inline pte_t pte_mkexec(pte_t pte)
259{ 275{
260 return __pte(pte_val(pte) & ~_PAGE_NX); 276 return pte_clear_flags(pte, _PAGE_NX);
261} 277}
262 278
263static inline pte_t pte_mkdirty(pte_t pte) 279static inline pte_t pte_mkdirty(pte_t pte)
264{ 280{
265 return __pte(pte_val(pte) | _PAGE_DIRTY); 281 return pte_set_flags(pte, _PAGE_DIRTY);
266} 282}
267 283
268static inline pte_t pte_mkyoung(pte_t pte) 284static inline pte_t pte_mkyoung(pte_t pte)
269{ 285{
270 return __pte(pte_val(pte) | _PAGE_ACCESSED); 286 return pte_set_flags(pte, _PAGE_ACCESSED);
271} 287}
272 288
273static inline pte_t pte_mkwrite(pte_t pte) 289static inline pte_t pte_mkwrite(pte_t pte)
274{ 290{
275 return __pte(pte_val(pte) | _PAGE_RW); 291 return pte_set_flags(pte, _PAGE_RW);
276} 292}
277 293
278static inline pte_t pte_mkhuge(pte_t pte) 294static inline pte_t pte_mkhuge(pte_t pte)
279{ 295{
280 return __pte(pte_val(pte) | _PAGE_PSE); 296 return pte_set_flags(pte, _PAGE_PSE);
281} 297}
282 298
283static inline pte_t pte_clrhuge(pte_t pte) 299static inline pte_t pte_clrhuge(pte_t pte)
284{ 300{
285 return __pte(pte_val(pte) & ~_PAGE_PSE); 301 return pte_clear_flags(pte, _PAGE_PSE);
286} 302}
287 303
288static inline pte_t pte_mkglobal(pte_t pte) 304static inline pte_t pte_mkglobal(pte_t pte)
289{ 305{
290 return __pte(pte_val(pte) | _PAGE_GLOBAL); 306 return pte_set_flags(pte, _PAGE_GLOBAL);
291} 307}
292 308
293static inline pte_t pte_clrglobal(pte_t pte) 309static inline pte_t pte_clrglobal(pte_t pte)
294{ 310{
295 return __pte(pte_val(pte) & ~_PAGE_GLOBAL); 311 return pte_clear_flags(pte, _PAGE_GLOBAL);
296} 312}
297 313
298static inline pte_t pte_mkspecial(pte_t pte) 314static inline pte_t pte_mkspecial(pte_t pte)
299{ 315{
300 return __pte(pte_val(pte) | _PAGE_SPECIAL); 316 return pte_set_flags(pte, _PAGE_SPECIAL);
301} 317}
302 318
303extern pteval_t __supported_pte_mask; 319extern pteval_t __supported_pte_mask;
@@ -451,6 +467,190 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
451# include "pgtable_64.h" 467# include "pgtable_64.h"
452#endif 468#endif
453 469
470#ifndef __ASSEMBLY__
471#include <linux/mm_types.h>
472
473static inline int pte_none(pte_t pte)
474{
475 return !pte.pte;
476}
477
478#define __HAVE_ARCH_PTE_SAME
479static inline int pte_same(pte_t a, pte_t b)
480{
481 return a.pte == b.pte;
482}
483
484static inline int pte_present(pte_t a)
485{
486 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
487}
488
489static inline int pmd_present(pmd_t pmd)
490{
491 return pmd_flags(pmd) & _PAGE_PRESENT;
492}
493
494static inline int pmd_none(pmd_t pmd)
495{
496 /* Only check low word on 32-bit platforms, since it might be
497 out of sync with upper half. */
498 return (unsigned long)native_pmd_val(pmd) == 0;
499}
500
501static inline unsigned long pmd_page_vaddr(pmd_t pmd)
502{
503 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
504}
505
506/*
507 * Currently stuck as a macro due to indirect forward reference to
508 * linux/mmzone.h's __section_mem_map_addr() definition:
509 */
510#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
511
512/*
513 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
514 *
515 * this macro returns the index of the entry in the pmd page which would
516 * control the given virtual address
517 */
518static inline unsigned pmd_index(unsigned long address)
519{
520 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
521}
522
523/*
524 * Conversion functions: convert a page and protection to a page entry,
525 * and a page entry and page directory to the page they refer to.
526 *
527 * (Currently stuck as a macro because of indirect forward reference
528 * to linux/mm.h:page_to_nid())
529 */
530#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
531
532/*
533 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
534 *
535 * this function returns the index of the entry in the pte page which would
536 * control the given virtual address
537 */
538static inline unsigned pte_index(unsigned long address)
539{
540 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
541}
542
543static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
544{
545 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
546}
547
548static inline int pmd_bad(pmd_t pmd)
549{
550 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
551}
552
553static inline unsigned long pages_to_mb(unsigned long npg)
554{
555 return npg >> (20 - PAGE_SHIFT);
556}
557
558#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
559 remap_pfn_range(vma, vaddr, pfn, size, prot)
560
561#if PAGETABLE_LEVELS == 2
562static inline int pud_large(pud_t pud)
563{
564 return 0;
565}
566#endif
567
568#if PAGETABLE_LEVELS > 2
569static inline int pud_none(pud_t pud)
570{
571 return native_pud_val(pud) == 0;
572}
573
574static inline int pud_present(pud_t pud)
575{
576 return pud_flags(pud) & _PAGE_PRESENT;
577}
578
579static inline unsigned long pud_page_vaddr(pud_t pud)
580{
581 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
582}
583
584/*
585 * Currently stuck as a macro due to indirect forward reference to
586 * linux/mmzone.h's __section_mem_map_addr() definition:
587 */
588#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
589
590/* Find an entry in the second-level page table.. */
591static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
592{
593 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
594}
595
596static inline unsigned long pmd_pfn(pmd_t pmd)
597{
598 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
599}
600
601static inline int pud_large(pud_t pud)
602{
603 return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
604 (_PAGE_PSE | _PAGE_PRESENT);
605}
606
607static inline int pud_bad(pud_t pud)
608{
609 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
610}
611#endif /* PAGETABLE_LEVELS > 2 */
612
613#if PAGETABLE_LEVELS > 3
614static inline int pgd_present(pgd_t pgd)
615{
616 return pgd_flags(pgd) & _PAGE_PRESENT;
617}
618
619static inline unsigned long pgd_page_vaddr(pgd_t pgd)
620{
621 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
622}
623
624/*
625 * Currently stuck as a macro due to indirect forward reference to
626 * linux/mmzone.h's __section_mem_map_addr() definition:
627 */
628#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
629
630/* to find an entry in a page-table-directory. */
631static inline unsigned pud_index(unsigned long address)
632{
633 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
634}
635
636static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
637{
638 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
639}
640
641static inline int pgd_bad(pgd_t pgd)
642{
643 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
644}
645
646static inline int pgd_none(pgd_t pgd)
647{
648 return !native_pgd_val(pgd);
649}
650#endif /* PAGETABLE_LEVELS > 3 */
651
652#endif /* __ASSEMBLY__ */
653
454/* 654/*
455 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 655 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
456 * 656 *