aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 12:24:35 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 12:24:40 -0500
commit146e4b3c8b92071b18f0b2e6f47165bad4f9e825 (patch)
tree7e9db61cacca0f55ce34db089f27fc22a56ebbdd /arch/s390/mm/pgtable.c
parent0c1f1dcd8c7792aeff6ef62e9508b0041928ab87 (diff)
[S390] 1K/2K page table pages.
This patch implements 1K/2K page table pages for s390. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c102
1 files changed, 81 insertions, 21 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 019f518cd5a..809e7789303 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -26,8 +26,14 @@
26 26
27#ifndef CONFIG_64BIT 27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1 28#define ALLOC_ORDER 1
29#define TABLES_PER_PAGE 4
30#define FRAG_MASK 15UL
31#define SECOND_HALVES 10UL
29#else 32#else
30#define ALLOC_ORDER 2 33#define ALLOC_ORDER 2
34#define TABLES_PER_PAGE 2
35#define FRAG_MASK 3UL
36#define SECOND_HALVES 2UL
31#endif 37#endif
32 38
33unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 39unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
@@ -45,13 +51,20 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
45 } 51 }
46 page->index = page_to_phys(shadow); 52 page->index = page_to_phys(shadow);
47 } 53 }
54 spin_lock(&mm->page_table_lock);
55 list_add(&page->lru, &mm->context.crst_list);
56 spin_unlock(&mm->page_table_lock);
48 return (unsigned long *) page_to_phys(page); 57 return (unsigned long *) page_to_phys(page);
49} 58}
50 59
51void crst_table_free(unsigned long *table) 60void crst_table_free(struct mm_struct *mm, unsigned long *table)
52{ 61{
53 unsigned long *shadow = get_shadow_table(table); 62 unsigned long *shadow = get_shadow_table(table);
63 struct page *page = virt_to_page(table);
54 64
65 spin_lock(&mm->page_table_lock);
66 list_del(&page->lru);
67 spin_unlock(&mm->page_table_lock);
55 if (shadow) 68 if (shadow)
56 free_pages((unsigned long) shadow, ALLOC_ORDER); 69 free_pages((unsigned long) shadow, ALLOC_ORDER);
57 free_pages((unsigned long) table, ALLOC_ORDER); 70 free_pages((unsigned long) table, ALLOC_ORDER);
@@ -60,37 +73,84 @@ void crst_table_free(unsigned long *table)
60/* 73/*
61 * page table entry allocation/free routines. 74 * page table entry allocation/free routines.
62 */ 75 */
63unsigned long *page_table_alloc(int noexec) 76unsigned long *page_table_alloc(struct mm_struct *mm)
64{ 77{
65 struct page *page = alloc_page(GFP_KERNEL); 78 struct page *page;
66 unsigned long *table; 79 unsigned long *table;
80 unsigned long bits;
67 81
68 if (!page) 82 bits = mm->context.noexec ? 3UL : 1UL;
69 return NULL; 83 spin_lock(&mm->page_table_lock);
70 page->index = 0; 84 page = NULL;
71 if (noexec) { 85 if (!list_empty(&mm->context.pgtable_list)) {
72 struct page *shadow = alloc_page(GFP_KERNEL); 86 page = list_first_entry(&mm->context.pgtable_list,
73 if (!shadow) { 87 struct page, lru);
74 __free_page(page); 88 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
89 page = NULL;
90 }
91 if (!page) {
92 spin_unlock(&mm->page_table_lock);
93 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
94 if (!page)
75 return NULL; 95 return NULL;
76 } 96 pgtable_page_ctor(page);
77 table = (unsigned long *) page_to_phys(shadow); 97 page->flags &= ~FRAG_MASK;
98 table = (unsigned long *) page_to_phys(page);
78 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 99 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
79 page->index = (addr_t) table; 100 spin_lock(&mm->page_table_lock);
101 list_add(&page->lru, &mm->context.pgtable_list);
80 } 102 }
81 pgtable_page_ctor(page);
82 table = (unsigned long *) page_to_phys(page); 103 table = (unsigned long *) page_to_phys(page);
83 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 104 while (page->flags & bits) {
105 table += 256;
106 bits <<= 1;
107 }
108 page->flags |= bits;
109 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
110 list_move_tail(&page->lru, &mm->context.pgtable_list);
111 spin_unlock(&mm->page_table_lock);
84 return table; 112 return table;
85} 113}
86 114
87void page_table_free(unsigned long *table) 115void page_table_free(struct mm_struct *mm, unsigned long *table)
88{ 116{
89 unsigned long *shadow = get_shadow_pte(table); 117 struct page *page;
118 unsigned long bits;
90 119
91 pgtable_page_dtor(virt_to_page(table)); 120 bits = mm->context.noexec ? 3UL : 1UL;
92 if (shadow) 121 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
93 free_page((unsigned long) shadow); 122 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
94 free_page((unsigned long) table); 123 spin_lock(&mm->page_table_lock);
124 page->flags ^= bits;
125 if (page->flags & FRAG_MASK) {
126 /* Page now has some free pgtable fragments. */
127 list_move(&page->lru, &mm->context.pgtable_list);
128 page = NULL;
129 } else
130 /* All fragments of the 4K page have been freed. */
131 list_del(&page->lru);
132 spin_unlock(&mm->page_table_lock);
133 if (page) {
134 pgtable_page_dtor(page);
135 __free_page(page);
136 }
137}
95 138
139void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
140{
141 struct page *page;
142
143 spin_lock(&mm->page_table_lock);
144 /* Free shadow region and segment tables. */
145 list_for_each_entry(page, &mm->context.crst_list, lru)
146 if (page->index) {
147 free_pages((unsigned long) page->index, ALLOC_ORDER);
148 page->index = 0;
149 }
150 /* "Free" second halves of page tables. */
151 list_for_each_entry(page, &mm->context.pgtable_list, lru)
152 page->flags &= ~SECOND_HALVES;
153 spin_unlock(&mm->page_table_lock);
154 mm->context.noexec = 0;
155 update_mm(mm, tsk);
96} 156}