aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/pgtable.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 12:24:35 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 12:24:40 -0500
commit146e4b3c8b92071b18f0b2e6f47165bad4f9e825 (patch)
tree7e9db61cacca0f55ce34db089f27fc22a56ebbdd /include/asm-s390/pgtable.h
parent0c1f1dcd8c7792aeff6ef62e9508b0041928ab87 (diff)
[S390] 1K/2K page table pages.
This patch implements 1K/2K page table pages for s390. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r--include/asm-s390/pgtable.h105
1 files changed, 36 insertions, 69 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65d333849150..4fc937711482 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -57,11 +57,11 @@ extern char empty_zero_page[PAGE_SIZE];
57 * PGDIR_SHIFT determines what a third-level page table entry can map 57 * PGDIR_SHIFT determines what a third-level page table entry can map
58 */ 58 */
59#ifndef __s390x__ 59#ifndef __s390x__
60# define PMD_SHIFT 22 60# define PMD_SHIFT 20
61# define PUD_SHIFT 22 61# define PUD_SHIFT 20
62# define PGDIR_SHIFT 22 62# define PGDIR_SHIFT 20
63#else /* __s390x__ */ 63#else /* __s390x__ */
64# define PMD_SHIFT 21 64# define PMD_SHIFT 20
65# define PUD_SHIFT 31 65# define PUD_SHIFT 31
66# define PGDIR_SHIFT 31 66# define PGDIR_SHIFT 31
67#endif /* __s390x__ */ 67#endif /* __s390x__ */
@@ -79,17 +79,14 @@ extern char empty_zero_page[PAGE_SIZE];
79 * for S390 segment-table entries are combined to one PGD 79 * for S390 segment-table entries are combined to one PGD
80 * that leads to 1024 pte per pgd 80 * that leads to 1024 pte per pgd
81 */ 81 */
82#define PTRS_PER_PTE 256
82#ifndef __s390x__ 83#ifndef __s390x__
83# define PTRS_PER_PTE 1024 84#define PTRS_PER_PMD 1
84# define PTRS_PER_PMD 1
85# define PTRS_PER_PUD 1
86# define PTRS_PER_PGD 512
87#else /* __s390x__ */ 85#else /* __s390x__ */
88# define PTRS_PER_PTE 512 86#define PTRS_PER_PMD 2048
89# define PTRS_PER_PMD 1024
90# define PTRS_PER_PUD 1
91# define PTRS_PER_PGD 2048
92#endif /* __s390x__ */ 87#endif /* __s390x__ */
88#define PTRS_PER_PUD 1
89#define PTRS_PER_PGD 2048
93 90
94#define FIRST_USER_ADDRESS 0 91#define FIRST_USER_ADDRESS 0
95 92
@@ -376,24 +373,6 @@ extern char empty_zero_page[PAGE_SIZE];
376# define PxD_SHADOW_SHIFT 2 373# define PxD_SHADOW_SHIFT 2
377#endif /* __s390x__ */ 374#endif /* __s390x__ */
378 375
379static inline struct page *get_shadow_page(struct page *page)
380{
381 if (s390_noexec && page->index)
382 return virt_to_page((void *)(addr_t) page->index);
383 return NULL;
384}
385
386static inline void *get_shadow_pte(void *table)
387{
388 unsigned long addr, offset;
389 struct page *page;
390
391 addr = (unsigned long) table;
392 offset = addr & (PAGE_SIZE - 1);
393 page = virt_to_page((void *)(addr ^ offset));
394 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
395}
396
397static inline void *get_shadow_table(void *table) 376static inline void *get_shadow_table(void *table)
398{ 377{
399 unsigned long addr, offset; 378 unsigned long addr, offset;
@@ -411,17 +390,16 @@ static inline void *get_shadow_table(void *table)
411 * hook is made available. 390 * hook is made available.
412 */ 391 */
413static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 392static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
414 pte_t *pteptr, pte_t pteval) 393 pte_t *ptep, pte_t entry)
415{ 394{
416 pte_t *shadow_pte = get_shadow_pte(pteptr); 395 *ptep = entry;
417 396 if (mm->context.noexec) {
418 *pteptr = pteval; 397 if (!(pte_val(entry) & _PAGE_INVALID) &&
419 if (shadow_pte) { 398 (pte_val(entry) & _PAGE_SWX))
420 if (!(pte_val(pteval) & _PAGE_INVALID) && 399 pte_val(entry) |= _PAGE_RO;
421 (pte_val(pteval) & _PAGE_SWX))
422 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
423 else 400 else
424 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 401 pte_val(entry) = _PAGE_TYPE_EMPTY;
402 ptep[PTRS_PER_PTE] = entry;
425 } 403 }
426} 404}
427 405
@@ -536,14 +514,6 @@ static inline int pte_young(pte_t pte)
536#define pgd_clear(pgd) do { } while (0) 514#define pgd_clear(pgd) do { } while (0)
537#define pud_clear(pud) do { } while (0) 515#define pud_clear(pud) do { } while (0)
538 516
539static inline void pmd_clear_kernel(pmd_t * pmdp)
540{
541 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
542 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
543 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
544 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
545}
546
547#else /* __s390x__ */ 517#else /* __s390x__ */
548 518
549#define pgd_clear(pgd) do { } while (0) 519#define pgd_clear(pgd) do { } while (0)
@@ -562,30 +532,27 @@ static inline void pud_clear(pud_t * pud)
562 pud_clear_kernel(shadow); 532 pud_clear_kernel(shadow);
563} 533}
564 534
535#endif /* __s390x__ */
536
565static inline void pmd_clear_kernel(pmd_t * pmdp) 537static inline void pmd_clear_kernel(pmd_t * pmdp)
566{ 538{
567 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 539 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
568 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
569} 540}
570 541
571#endif /* __s390x__ */ 542static inline void pmd_clear(pmd_t *pmd)
572
573static inline void pmd_clear(pmd_t * pmdp)
574{ 543{
575 pmd_t *shadow_pmd = get_shadow_table(pmdp); 544 pmd_t *shadow = get_shadow_table(pmd);
576 545
577 pmd_clear_kernel(pmdp); 546 pmd_clear_kernel(pmd);
578 if (shadow_pmd) 547 if (shadow)
579 pmd_clear_kernel(shadow_pmd); 548 pmd_clear_kernel(shadow);
580} 549}
581 550
582static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 551static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
583{ 552{
584 pte_t *shadow_pte = get_shadow_pte(ptep);
585
586 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 553 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
587 if (shadow_pte) 554 if (mm->context.noexec)
588 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 555 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
589} 556}
590 557
591/* 558/*
@@ -666,7 +633,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
666{ 633{
667 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 634 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
668#ifndef __s390x__ 635#ifndef __s390x__
669 /* S390 has 1mb segments, we are emulating 4MB segments */ 636 /* pto must point to the start of the segment table */
670 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 637 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
671#else 638#else
672 /* ipte in zarch mode can do the math */ 639 /* ipte in zarch mode can do the math */
@@ -680,12 +647,12 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
680 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 647 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
681} 648}
682 649
683static inline void ptep_invalidate(unsigned long address, pte_t *ptep) 650static inline void ptep_invalidate(struct mm_struct *mm,
651 unsigned long address, pte_t *ptep)
684{ 652{
685 __ptep_ipte(address, ptep); 653 __ptep_ipte(address, ptep);
686 ptep = get_shadow_pte(ptep); 654 if (mm->context.noexec)
687 if (ptep) 655 __ptep_ipte(address, ptep + PTRS_PER_PTE);
688 __ptep_ipte(address, ptep);
689} 656}
690 657
691/* 658/*
@@ -707,7 +674,7 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
707 pte_t __pte = *(__ptep); \ 674 pte_t __pte = *(__ptep); \
708 if (atomic_read(&(__mm)->mm_users) > 1 || \ 675 if (atomic_read(&(__mm)->mm_users) > 1 || \
709 (__mm) != current->active_mm) \ 676 (__mm) != current->active_mm) \
710 ptep_invalidate(__address, __ptep); \ 677 ptep_invalidate(__mm, __address, __ptep); \
711 else \ 678 else \
712 pte_clear((__mm), (__address), (__ptep)); \ 679 pte_clear((__mm), (__address), (__ptep)); \
713 __pte; \ 680 __pte; \
@@ -718,7 +685,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
718 unsigned long address, pte_t *ptep) 685 unsigned long address, pte_t *ptep)
719{ 686{
720 pte_t pte = *ptep; 687 pte_t pte = *ptep;
721 ptep_invalidate(address, ptep); 688 ptep_invalidate(vma->vm_mm, address, ptep);
722 return pte; 689 return pte;
723} 690}
724 691
@@ -739,7 +706,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
739 if (full) 706 if (full)
740 pte_clear(mm, addr, ptep); 707 pte_clear(mm, addr, ptep);
741 else 708 else
742 ptep_invalidate(addr, ptep); 709 ptep_invalidate(mm, addr, ptep);
743 return pte; 710 return pte;
744} 711}
745 712
@@ -750,7 +717,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
750 if (pte_write(__pte)) { \ 717 if (pte_write(__pte)) { \
751 if (atomic_read(&(__mm)->mm_users) > 1 || \ 718 if (atomic_read(&(__mm)->mm_users) > 1 || \
752 (__mm) != current->active_mm) \ 719 (__mm) != current->active_mm) \
753 ptep_invalidate(__addr, __ptep); \ 720 ptep_invalidate(__mm, __addr, __ptep); \
754 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 721 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
755 } \ 722 } \
756}) 723})
@@ -760,7 +727,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
760({ \ 727({ \
761 int __changed = !pte_same(*(__ptep), __entry); \ 728 int __changed = !pte_same(*(__ptep), __entry); \
762 if (__changed) { \ 729 if (__changed) { \
763 ptep_invalidate(__addr, __ptep); \ 730 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
764 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 731 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
765 } \ 732 } \
766 __changed; \ 733 __changed; \