diff options
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/init.c | 49 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 44 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 20 |
3 files changed, 64 insertions, 49 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 29f3a63806b9..05598649b326 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | |||
44 | 44 | ||
45 | void show_mem(void) | 45 | void show_mem(void) |
46 | { | 46 | { |
47 | int i, total = 0, reserved = 0; | 47 | unsigned long i, total = 0, reserved = 0; |
48 | int shared = 0, cached = 0; | 48 | unsigned long shared = 0, cached = 0; |
49 | unsigned long flags; | ||
49 | struct page *page; | 50 | struct page *page; |
51 | pg_data_t *pgdat; | ||
50 | 52 | ||
51 | printk("Mem-info:\n"); | 53 | printk("Mem-info:\n"); |
52 | show_free_areas(); | 54 | show_free_areas(); |
53 | i = max_mapnr; | 55 | for_each_online_pgdat(pgdat) { |
54 | while (i-- > 0) { | 56 | pgdat_resize_lock(pgdat, &flags); |
55 | if (!pfn_valid(i)) | 57 | for (i = 0; i < pgdat->node_spanned_pages; i++) { |
56 | continue; | 58 | if (!pfn_valid(pgdat->node_start_pfn + i)) |
57 | page = pfn_to_page(i); | 59 | continue; |
58 | total++; | 60 | page = pfn_to_page(pgdat->node_start_pfn + i); |
59 | if (PageReserved(page)) | 61 | total++; |
60 | reserved++; | 62 | if (PageReserved(page)) |
61 | else if (PageSwapCache(page)) | 63 | reserved++; |
62 | cached++; | 64 | else if (PageSwapCache(page)) |
63 | else if (page_count(page)) | 65 | cached++; |
64 | shared += page_count(page) - 1; | 66 | else if (page_count(page)) |
67 | shared += page_count(page) - 1; | ||
68 | } | ||
69 | pgdat_resize_unlock(pgdat, &flags); | ||
65 | } | 70 | } |
66 | printk("%d pages of RAM\n", total); | 71 | printk("%ld pages of RAM\n", total); |
67 | printk("%d reserved pages\n", reserved); | 72 | printk("%ld reserved pages\n", reserved); |
68 | printk("%d pages shared\n", shared); | 73 | printk("%ld pages shared\n", shared); |
69 | printk("%d pages swap cached\n", cached); | 74 | printk("%ld pages swap cached\n", cached); |
70 | |||
71 | printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); | ||
72 | printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK)); | ||
73 | printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); | ||
74 | printk("%lu pages slab\n", | ||
75 | global_page_state(NR_SLAB_RECLAIMABLE) + | ||
76 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | ||
77 | printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); | ||
78 | } | 75 | } |
79 | 76 | ||
80 | /* | 77 | /* |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5c1aea97cd12..3d98ba82ea67 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
254 | int s390_enable_sie(void) | 254 | int s390_enable_sie(void) |
255 | { | 255 | { |
256 | struct task_struct *tsk = current; | 256 | struct task_struct *tsk = current; |
257 | struct mm_struct *mm; | 257 | struct mm_struct *mm, *old_mm; |
258 | int rc; | ||
259 | 258 | ||
260 | task_lock(tsk); | 259 | /* Do we have pgstes? if yes, we are done */ |
261 | |||
262 | rc = 0; | ||
263 | if (tsk->mm->context.pgstes) | 260 | if (tsk->mm->context.pgstes) |
264 | goto unlock; | 261 | return 0; |
265 | 262 | ||
266 | rc = -EINVAL; | 263 | /* lets check if we are allowed to replace the mm */ |
264 | task_lock(tsk); | ||
267 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | 265 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
268 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) | 266 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { |
269 | goto unlock; | 267 | task_unlock(tsk); |
268 | return -EINVAL; | ||
269 | } | ||
270 | task_unlock(tsk); | ||
270 | 271 | ||
271 | tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ | 272 | /* we copy the mm with pgstes enabled */ |
273 | tsk->mm->context.pgstes = 1; | ||
272 | mm = dup_mm(tsk); | 274 | mm = dup_mm(tsk); |
273 | tsk->mm->context.pgstes = 0; | 275 | tsk->mm->context.pgstes = 0; |
274 | |||
275 | rc = -ENOMEM; | ||
276 | if (!mm) | 276 | if (!mm) |
277 | goto unlock; | 277 | return -ENOMEM; |
278 | mmput(tsk->mm); | 278 | |
279 | /* Now lets check again if somebody attached ptrace etc */ | ||
280 | task_lock(tsk); | ||
281 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | ||
282 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { | ||
283 | mmput(mm); | ||
284 | task_unlock(tsk); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | |||
288 | /* ok, we are alone. No ptrace, no threads, etc. */ | ||
289 | old_mm = tsk->mm; | ||
279 | tsk->mm = tsk->active_mm = mm; | 290 | tsk->mm = tsk->active_mm = mm; |
280 | preempt_disable(); | 291 | preempt_disable(); |
281 | update_mm(mm, tsk); | 292 | update_mm(mm, tsk); |
282 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 293 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); |
283 | preempt_enable(); | 294 | preempt_enable(); |
284 | rc = 0; | ||
285 | unlock: | ||
286 | task_unlock(tsk); | 295 | task_unlock(tsk); |
287 | return rc; | 296 | mmput(old_mm); |
297 | return 0; | ||
288 | } | 298 | } |
289 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 299 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ea2804808f39..e4868bfc672f 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -27,12 +27,19 @@ struct memory_segment { | |||
27 | 27 | ||
28 | static LIST_HEAD(mem_segs); | 28 | static LIST_HEAD(mem_segs); |
29 | 29 | ||
30 | static pud_t *vmem_pud_alloc(void) | 30 | static void __ref *vmem_alloc_pages(unsigned int order) |
31 | { | ||
32 | if (slab_is_available()) | ||
33 | return (void *)__get_free_pages(GFP_KERNEL, order); | ||
34 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | ||
35 | } | ||
36 | |||
37 | static inline pud_t *vmem_pud_alloc(void) | ||
31 | { | 38 | { |
32 | pud_t *pud = NULL; | 39 | pud_t *pud = NULL; |
33 | 40 | ||
34 | #ifdef CONFIG_64BIT | 41 | #ifdef CONFIG_64BIT |
35 | pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); | 42 | pud = vmem_alloc_pages(2); |
36 | if (!pud) | 43 | if (!pud) |
37 | return NULL; | 44 | return NULL; |
38 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); | 45 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
@@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void) | |||
40 | return pud; | 47 | return pud; |
41 | } | 48 | } |
42 | 49 | ||
43 | static pmd_t *vmem_pmd_alloc(void) | 50 | static inline pmd_t *vmem_pmd_alloc(void) |
44 | { | 51 | { |
45 | pmd_t *pmd = NULL; | 52 | pmd_t *pmd = NULL; |
46 | 53 | ||
47 | #ifdef CONFIG_64BIT | 54 | #ifdef CONFIG_64BIT |
48 | pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); | 55 | pmd = vmem_alloc_pages(2); |
49 | if (!pmd) | 56 | if (!pmd) |
50 | return NULL; | 57 | return NULL; |
51 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); | 58 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
@@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) | |||
207 | if (pte_none(*pt_dir)) { | 214 | if (pte_none(*pt_dir)) { |
208 | unsigned long new_page; | 215 | unsigned long new_page; |
209 | 216 | ||
210 | new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); | 217 | new_page =__pa(vmem_alloc_pages(0)); |
211 | if (!new_page) | 218 | if (!new_page) |
212 | goto out; | 219 | goto out; |
213 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | 220 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); |
214 | *pt_dir = pte; | 221 | *pt_dir = pte; |
215 | } | 222 | } |
216 | } | 223 | } |
224 | memset(start, 0, nr * sizeof(struct page)); | ||
217 | ret = 0; | 225 | ret = 0; |
218 | out: | 226 | out: |
219 | flush_tlb_kernel_range(start_addr, end_addr); | 227 | flush_tlb_kernel_range(start_addr, end_addr); |
@@ -228,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg) | |||
228 | { | 236 | { |
229 | struct memory_segment *tmp; | 237 | struct memory_segment *tmp; |
230 | 238 | ||
231 | if (seg->start + seg->size >= VMEM_MAX_PHYS || | 239 | if (seg->start + seg->size > VMEM_MAX_PHYS || |
232 | seg->start + seg->size < seg->start) | 240 | seg->start + seg->size < seg->start) |
233 | return -ERANGE; | 241 | return -ERANGE; |
234 | 242 | ||