aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-07-11 09:36:25 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-07-11 09:36:25 -0400
commita8931ef380c92d121ae74ecfb03b2d63f72eea6f (patch)
tree980fb6b019e11e6cb1ece55b7faff184721a8053 /arch/s390/mm
parent90574d0a4d4b73308ae54a2a57a4f3f1fa98e984 (diff)
parente5a5816f7875207cb0a0a7032e39a4686c5e10a4 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile1
-rw-r--r--arch/s390/mm/init.c52
-rw-r--r--arch/s390/mm/page-states.c79
-rw-r--r--arch/s390/mm/pgtable.c44
-rw-r--r--arch/s390/mm/vmem.c5
5 files changed, 136 insertions, 45 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index fb988a48a754..2a7458134544 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -5,3 +5,4 @@
5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
6obj-$(CONFIG_CMM) += cmm.o 6obj-$(CONFIG_CMM) += cmm.o
7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
8obj-$(CONFIG_PAGE_STATES) += page-states.o
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index fa31de6ae97a..05598649b326 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
44 44
45void show_mem(void) 45void show_mem(void)
46{ 46{
47 int i, total = 0, reserved = 0; 47 unsigned long i, total = 0, reserved = 0;
48 int shared = 0, cached = 0; 48 unsigned long shared = 0, cached = 0;
49 unsigned long flags;
49 struct page *page; 50 struct page *page;
51 pg_data_t *pgdat;
50 52
51 printk("Mem-info:\n"); 53 printk("Mem-info:\n");
52 show_free_areas(); 54 show_free_areas();
53 i = max_mapnr; 55 for_each_online_pgdat(pgdat) {
54 while (i-- > 0) { 56 pgdat_resize_lock(pgdat, &flags);
55 if (!pfn_valid(i)) 57 for (i = 0; i < pgdat->node_spanned_pages; i++) {
56 continue; 58 if (!pfn_valid(pgdat->node_start_pfn + i))
57 page = pfn_to_page(i); 59 continue;
58 total++; 60 page = pfn_to_page(pgdat->node_start_pfn + i);
59 if (PageReserved(page)) 61 total++;
60 reserved++; 62 if (PageReserved(page))
61 else if (PageSwapCache(page)) 63 reserved++;
62 cached++; 64 else if (PageSwapCache(page))
63 else if (page_count(page)) 65 cached++;
64 shared += page_count(page) - 1; 66 else if (page_count(page))
67 shared += page_count(page) - 1;
68 }
69 pgdat_resize_unlock(pgdat, &flags);
65 } 70 }
66 printk("%d pages of RAM\n", total); 71 printk("%ld pages of RAM\n", total);
67 printk("%d reserved pages\n", reserved); 72 printk("%ld reserved pages\n", reserved);
68 printk("%d pages shared\n", shared); 73 printk("%ld pages shared\n", shared);
69 printk("%d pages swap cached\n", cached); 74 printk("%ld pages swap cached\n", cached);
70
71 printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
72 printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK));
73 printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
74 printk("%lu pages slab\n",
75 global_page_state(NR_SLAB_RECLAIMABLE) +
76 global_page_state(NR_SLAB_UNRECLAIMABLE));
77 printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
78} 75}
79 76
80/* 77/*
@@ -126,6 +123,9 @@ void __init mem_init(void)
126 /* clear the zero-page */ 123 /* clear the zero-page */
127 memset(empty_zero_page, 0, PAGE_SIZE); 124 memset(empty_zero_page, 0, PAGE_SIZE);
128 125
126 /* Setup guest page hinting */
127 cmma_init();
128
129 /* this will put all low memory onto the freelists */ 129 /* this will put all low memory onto the freelists */
130 totalram_pages += free_all_bootmem(); 130 totalram_pages += free_all_bootmem();
131 131
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
new file mode 100644
index 000000000000..fc0ad73ffd90
--- /dev/null
+++ b/arch/s390/mm/page-states.c
@@ -0,0 +1,79 @@
1/*
2 * arch/s390/mm/page-states.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Guest page hinting for unused pages.
7 *
8 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16
17#define ESSA_SET_STABLE 1
18#define ESSA_SET_UNUSED 2
19
20static int cmma_flag;
21
22static int __init cmma(char *str)
23{
24 char *parm;
25 parm = strstrip(str);
26 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
27 cmma_flag = 1;
28 return 1;
29 }
30 cmma_flag = 0;
31 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
32 return 1;
33 return 0;
34}
35
36__setup("cmma=", cmma);
37
38void __init cmma_init(void)
39{
40 register unsigned long tmp asm("0") = 0;
41 register int rc asm("1") = -EOPNOTSUPP;
42
43 if (!cmma_flag)
44 return;
45 asm volatile(
46 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
47 "0: la %0,0\n"
48 "1:\n"
49 EX_TABLE(0b,1b)
50 : "+&d" (rc), "+&d" (tmp));
51 if (rc)
52 cmma_flag = 0;
53}
54
55void arch_free_page(struct page *page, int order)
56{
57 int i, rc;
58
59 if (!cmma_flag)
60 return;
61 for (i = 0; i < (1 << order); i++)
62 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
63 : "=&d" (rc)
64 : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
65 "i" (ESSA_SET_UNUSED));
66}
67
68void arch_alloc_page(struct page *page, int order)
69{
70 int i, rc;
71
72 if (!cmma_flag)
73 return;
74 for (i = 0; i < (1 << order); i++)
75 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
76 : "=&d" (rc)
77 : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
78 "i" (ESSA_SET_STABLE));
79}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5c1aea97cd12..3d98ba82ea67 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
254int s390_enable_sie(void) 254int s390_enable_sie(void)
255{ 255{
256 struct task_struct *tsk = current; 256 struct task_struct *tsk = current;
257 struct mm_struct *mm; 257 struct mm_struct *mm, *old_mm;
258 int rc;
259 258
260 task_lock(tsk); 259 /* Do we have pgstes? if yes, we are done */
261
262 rc = 0;
263 if (tsk->mm->context.pgstes) 260 if (tsk->mm->context.pgstes)
264 goto unlock; 261 return 0;
265 262
266 rc = -EINVAL; 263 /* lets check if we are allowed to replace the mm */
264 task_lock(tsk);
267 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || 265 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
268 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) 266 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
269 goto unlock; 267 task_unlock(tsk);
268 return -EINVAL;
269 }
270 task_unlock(tsk);
270 271
271 tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ 272 /* we copy the mm with pgstes enabled */
273 tsk->mm->context.pgstes = 1;
272 mm = dup_mm(tsk); 274 mm = dup_mm(tsk);
273 tsk->mm->context.pgstes = 0; 275 tsk->mm->context.pgstes = 0;
274
275 rc = -ENOMEM;
276 if (!mm) 276 if (!mm)
277 goto unlock; 277 return -ENOMEM;
278 mmput(tsk->mm); 278
279 /* Now lets check again if somebody attached ptrace etc */
280 task_lock(tsk);
281 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
282 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
283 mmput(mm);
284 task_unlock(tsk);
285 return -EINVAL;
286 }
287
288 /* ok, we are alone. No ptrace, no threads, etc. */
289 old_mm = tsk->mm;
279 tsk->mm = tsk->active_mm = mm; 290 tsk->mm = tsk->active_mm = mm;
280 preempt_disable(); 291 preempt_disable();
281 update_mm(mm, tsk); 292 update_mm(mm, tsk);
282 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 293 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
283 preempt_enable(); 294 preempt_enable();
284 rc = 0;
285unlock:
286 task_unlock(tsk); 295 task_unlock(tsk);
287 return rc; 296 mmput(old_mm);
297 return 0;
288} 298}
289EXPORT_SYMBOL_GPL(s390_enable_sie); 299EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index beccacf907f3..e4868bfc672f 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -60,7 +60,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
60 return pmd; 60 return pmd;
61} 61}
62 62
63static pte_t __init_refok *vmem_pte_alloc(void) 63static pte_t __ref *vmem_pte_alloc(void)
64{ 64{
65 pte_t *pte; 65 pte_t *pte;
66 66
@@ -221,6 +221,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
221 *pt_dir = pte; 221 *pt_dir = pte;
222 } 222 }
223 } 223 }
224 memset(start, 0, nr * sizeof(struct page));
224 ret = 0; 225 ret = 0;
225out: 226out:
226 flush_tlb_kernel_range(start_addr, end_addr); 227 flush_tlb_kernel_range(start_addr, end_addr);
@@ -235,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg)
235{ 236{
236 struct memory_segment *tmp; 237 struct memory_segment *tmp;
237 238
238 if (seg->start + seg->size >= VMEM_MAX_PHYS || 239 if (seg->start + seg->size > VMEM_MAX_PHYS ||
239 seg->start + seg->size < seg->start) 240 seg->start + seg->size < seg->start)
240 return -ERANGE; 241 return -ERANGE;
241 242