diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-02-09 12:24:35 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-02-09 12:24:40 -0500 |
commit | 146e4b3c8b92071b18f0b2e6f47165bad4f9e825 (patch) | |
tree | 7e9db61cacca0f55ce34db089f27fc22a56ebbdd /arch | |
parent | 0c1f1dcd8c7792aeff6ef62e9508b0041928ab87 (diff) |
[S390] 1K/2K page table pages.
This patch implements 1K/2K page table pages for s390.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/mm/init.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 102 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 14 |
3 files changed, 93 insertions, 25 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 983ec6ec0e7c..01dfe20f846d 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -184,7 +184,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
184 | pmd = pmd_offset(pud, address); | 184 | pmd = pmd_offset(pud, address); |
185 | pte = pte_offset_kernel(pmd, address); | 185 | pte = pte_offset_kernel(pmd, address); |
186 | if (!enable) { | 186 | if (!enable) { |
187 | ptep_invalidate(address, pte); | 187 | ptep_invalidate(&init_mm, address, pte); |
188 | continue; | 188 | continue; |
189 | } | 189 | } |
190 | *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); | 190 | *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 019f518cd5a0..809e77893039 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -26,8 +26,14 @@ | |||
26 | 26 | ||
27 | #ifndef CONFIG_64BIT | 27 | #ifndef CONFIG_64BIT |
28 | #define ALLOC_ORDER 1 | 28 | #define ALLOC_ORDER 1 |
29 | #define TABLES_PER_PAGE 4 | ||
30 | #define FRAG_MASK 15UL | ||
31 | #define SECOND_HALVES 10UL | ||
29 | #else | 32 | #else |
30 | #define ALLOC_ORDER 2 | 33 | #define ALLOC_ORDER 2 |
34 | #define TABLES_PER_PAGE 2 | ||
35 | #define FRAG_MASK 3UL | ||
36 | #define SECOND_HALVES 2UL | ||
31 | #endif | 37 | #endif |
32 | 38 | ||
33 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | 39 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) |
@@ -45,13 +51,20 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | |||
45 | } | 51 | } |
46 | page->index = page_to_phys(shadow); | 52 | page->index = page_to_phys(shadow); |
47 | } | 53 | } |
54 | spin_lock(&mm->page_table_lock); | ||
55 | list_add(&page->lru, &mm->context.crst_list); | ||
56 | spin_unlock(&mm->page_table_lock); | ||
48 | return (unsigned long *) page_to_phys(page); | 57 | return (unsigned long *) page_to_phys(page); |
49 | } | 58 | } |
50 | 59 | ||
51 | void crst_table_free(unsigned long *table) | 60 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
52 | { | 61 | { |
53 | unsigned long *shadow = get_shadow_table(table); | 62 | unsigned long *shadow = get_shadow_table(table); |
63 | struct page *page = virt_to_page(table); | ||
54 | 64 | ||
65 | spin_lock(&mm->page_table_lock); | ||
66 | list_del(&page->lru); | ||
67 | spin_unlock(&mm->page_table_lock); | ||
55 | if (shadow) | 68 | if (shadow) |
56 | free_pages((unsigned long) shadow, ALLOC_ORDER); | 69 | free_pages((unsigned long) shadow, ALLOC_ORDER); |
57 | free_pages((unsigned long) table, ALLOC_ORDER); | 70 | free_pages((unsigned long) table, ALLOC_ORDER); |
@@ -60,37 +73,84 @@ void crst_table_free(unsigned long *table) | |||
60 | /* | 73 | /* |
61 | * page table entry allocation/free routines. | 74 | * page table entry allocation/free routines. |
62 | */ | 75 | */ |
63 | unsigned long *page_table_alloc(int noexec) | 76 | unsigned long *page_table_alloc(struct mm_struct *mm) |
64 | { | 77 | { |
65 | struct page *page = alloc_page(GFP_KERNEL); | 78 | struct page *page; |
66 | unsigned long *table; | 79 | unsigned long *table; |
80 | unsigned long bits; | ||
67 | 81 | ||
68 | if (!page) | 82 | bits = mm->context.noexec ? 3UL : 1UL; |
69 | return NULL; | 83 | spin_lock(&mm->page_table_lock); |
70 | page->index = 0; | 84 | page = NULL; |
71 | if (noexec) { | 85 | if (!list_empty(&mm->context.pgtable_list)) { |
72 | struct page *shadow = alloc_page(GFP_KERNEL); | 86 | page = list_first_entry(&mm->context.pgtable_list, |
73 | if (!shadow) { | 87 | struct page, lru); |
74 | __free_page(page); | 88 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) |
89 | page = NULL; | ||
90 | } | ||
91 | if (!page) { | ||
92 | spin_unlock(&mm->page_table_lock); | ||
93 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | ||
94 | if (!page) | ||
75 | return NULL; | 95 | return NULL; |
76 | } | 96 | pgtable_page_ctor(page); |
77 | table = (unsigned long *) page_to_phys(shadow); | 97 | page->flags &= ~FRAG_MASK; |
98 | table = (unsigned long *) page_to_phys(page); | ||
78 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 99 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
79 | page->index = (addr_t) table; | 100 | spin_lock(&mm->page_table_lock); |
101 | list_add(&page->lru, &mm->context.pgtable_list); | ||
80 | } | 102 | } |
81 | pgtable_page_ctor(page); | ||
82 | table = (unsigned long *) page_to_phys(page); | 103 | table = (unsigned long *) page_to_phys(page); |
83 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 104 | while (page->flags & bits) { |
105 | table += 256; | ||
106 | bits <<= 1; | ||
107 | } | ||
108 | page->flags |= bits; | ||
109 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | ||
110 | list_move_tail(&page->lru, &mm->context.pgtable_list); | ||
111 | spin_unlock(&mm->page_table_lock); | ||
84 | return table; | 112 | return table; |
85 | } | 113 | } |
86 | 114 | ||
87 | void page_table_free(unsigned long *table) | 115 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
88 | { | 116 | { |
89 | unsigned long *shadow = get_shadow_pte(table); | 117 | struct page *page; |
118 | unsigned long bits; | ||
90 | 119 | ||
91 | pgtable_page_dtor(virt_to_page(table)); | 120 | bits = mm->context.noexec ? 3UL : 1UL; |
92 | if (shadow) | 121 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
93 | free_page((unsigned long) shadow); | 122 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
94 | free_page((unsigned long) table); | 123 | spin_lock(&mm->page_table_lock); |
124 | page->flags ^= bits; | ||
125 | if (page->flags & FRAG_MASK) { | ||
126 | /* Page now has some free pgtable fragments. */ | ||
127 | list_move(&page->lru, &mm->context.pgtable_list); | ||
128 | page = NULL; | ||
129 | } else | ||
130 | /* All fragments of the 4K page have been freed. */ | ||
131 | list_del(&page->lru); | ||
132 | spin_unlock(&mm->page_table_lock); | ||
133 | if (page) { | ||
134 | pgtable_page_dtor(page); | ||
135 | __free_page(page); | ||
136 | } | ||
137 | } | ||
95 | 138 | ||
139 | void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | ||
140 | { | ||
141 | struct page *page; | ||
142 | |||
143 | spin_lock(&mm->page_table_lock); | ||
144 | /* Free shadow region and segment tables. */ | ||
145 | list_for_each_entry(page, &mm->context.crst_list, lru) | ||
146 | if (page->index) { | ||
147 | free_pages((unsigned long) page->index, ALLOC_ORDER); | ||
148 | page->index = 0; | ||
149 | } | ||
150 | /* "Free" second halves of page tables. */ | ||
151 | list_for_each_entry(page, &mm->context.pgtable_list, lru) | ||
152 | page->flags &= ~SECOND_HALVES; | ||
153 | spin_unlock(&mm->page_table_lock); | ||
154 | mm->context.noexec = 0; | ||
155 | update_mm(mm, tsk); | ||
96 | } | 156 | } |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 7c1287ccf788..434491f8f47c 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -84,13 +84,18 @@ static inline pmd_t *vmem_pmd_alloc(void) | |||
84 | return pmd; | 84 | return pmd; |
85 | } | 85 | } |
86 | 86 | ||
87 | static inline pte_t *vmem_pte_alloc(void) | 87 | static pte_t __init_refok *vmem_pte_alloc(void) |
88 | { | 88 | { |
89 | pte_t *pte = vmem_alloc_pages(0); | 89 | pte_t *pte; |
90 | 90 | ||
91 | if (slab_is_available()) | ||
92 | pte = (pte_t *) page_table_alloc(&init_mm); | ||
93 | else | ||
94 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); | ||
91 | if (!pte) | 95 | if (!pte) |
92 | return NULL; | 96 | return NULL; |
93 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 97 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, |
98 | PTRS_PER_PTE * sizeof(pte_t)); | ||
94 | return pte; | 99 | return pte; |
95 | } | 100 | } |
96 | 101 | ||
@@ -360,6 +365,9 @@ void __init vmem_map_init(void) | |||
360 | { | 365 | { |
361 | int i; | 366 | int i; |
362 | 367 | ||
368 | INIT_LIST_HEAD(&init_mm.context.crst_list); | ||
369 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | ||
370 | init_mm.context.noexec = 0; | ||
363 | NODE_DATA(0)->node_mem_map = VMEM_MAP; | 371 | NODE_DATA(0)->node_mem_map = VMEM_MAP; |
364 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) | 372 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) |
365 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); | 373 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); |