aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2016-05-20 02:08:14 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-06-13 09:58:16 -0400
commit37cd944c8d8f406eee8e0c580f823ff66738c0af (patch)
treebc6d06963da9d43f445b92d0e2b15b05b65c6f3c /arch/s390/mm
parentbab247ff5f669216e3ed2f9a4034c540187e874c (diff)
s390/pgtable: add mapping statistics
Add statistics that show how memory is mapped within the kernel identity mapping. This is more or less the same like git commit ce0c0e50f94e ("x86, generic: CPA add statistics about state of direct mapping v4") for x86. I also intentionally copied the lower case "k" within DirectMap4k vs the upper case "M" and "G" within the two other lines. Let's have consistent inconsistencies across architectures. The output of /proc/meminfo now contains these additional lines: DirectMap4k: 2048 kB DirectMap1M: 3991552 kB DirectMap2G: 4194304 kB The implementation on s390 is lockless unlike the x86 version, since I assume changes to the kernel mapping are a very rare event. Therefore it really doesn't matter if these statistics could potentially be inconsistent if read while kernel pages tables are being changed. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pageattr.c18
-rw-r--r--arch/s390/mm/vmem.c16
2 files changed, 34 insertions, 0 deletions
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 91e5e29c1f5c..ba124d9c96ba 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -40,6 +40,20 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
40} 40}
41#endif 41#endif
42 42
43#ifdef CONFIG_PROC_FS
44atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
45
46void arch_report_meminfo(struct seq_file *m)
47{
48 seq_printf(m, "DirectMap4k: %8lu kB\n",
49 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
50 seq_printf(m, "DirectMap1M: %8lu kB\n",
51 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
52 seq_printf(m, "DirectMap2G: %8lu kB\n",
53 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
54}
55#endif /* CONFIG_PROC_FS */
56
43static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr, 57static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
44 unsigned long dtt) 58 unsigned long dtt)
45{ 59{
@@ -114,6 +128,8 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
114 } 128 }
115 pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY; 129 pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
116 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT); 130 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
131 update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
132 update_page_count(PG_DIRECT_MAP_1M, -1);
117 return 0; 133 return 0;
118} 134}
119 135
@@ -181,6 +197,8 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
181 } 197 }
182 pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY; 198 pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
183 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); 199 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
200 update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
201 update_page_count(PG_DIRECT_MAP_2G, -1);
184 return 0; 202 return 0;
185} 203}
186 204
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b200f976c36b..a1e7c0b207e6 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -80,6 +80,7 @@ pte_t __ref *vmem_pte_alloc(void)
80 */ 80 */
81static int vmem_add_mem(unsigned long start, unsigned long size) 81static int vmem_add_mem(unsigned long start, unsigned long size)
82{ 82{
83 unsigned long pages4k, pages1m, pages2g;
83 unsigned long end = start + size; 84 unsigned long end = start + size;
84 unsigned long address = start; 85 unsigned long address = start;
85 pgd_t *pg_dir; 86 pgd_t *pg_dir;
@@ -88,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
88 pte_t *pt_dir; 89 pte_t *pt_dir;
89 int ret = -ENOMEM; 90 int ret = -ENOMEM;
90 91
92 pages4k = pages1m = pages2g = 0;
91 while (address < end) { 93 while (address < end) {
92 pg_dir = pgd_offset_k(address); 94 pg_dir = pgd_offset_k(address);
93 if (pgd_none(*pg_dir)) { 95 if (pgd_none(*pg_dir)) {
@@ -102,6 +104,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
102 !debug_pagealloc_enabled()) { 104 !debug_pagealloc_enabled()) {
103 pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL); 105 pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
104 address += PUD_SIZE; 106 address += PUD_SIZE;
107 pages2g++;
105 continue; 108 continue;
106 } 109 }
107 if (pud_none(*pu_dir)) { 110 if (pud_none(*pu_dir)) {
@@ -116,6 +119,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
116 !debug_pagealloc_enabled()) { 119 !debug_pagealloc_enabled()) {
117 pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL); 120 pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
118 address += PMD_SIZE; 121 address += PMD_SIZE;
122 pages1m++;
119 continue; 123 continue;
120 } 124 }
121 if (pmd_none(*pm_dir)) { 125 if (pmd_none(*pm_dir)) {
@@ -128,9 +132,13 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
128 pt_dir = pte_offset_kernel(pm_dir, address); 132 pt_dir = pte_offset_kernel(pm_dir, address);
129 pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL); 133 pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
130 address += PAGE_SIZE; 134 address += PAGE_SIZE;
135 pages4k++;
131 } 136 }
132 ret = 0; 137 ret = 0;
133out: 138out:
139 update_page_count(PG_DIRECT_MAP_4K, pages4k);
140 update_page_count(PG_DIRECT_MAP_1M, pages1m);
141 update_page_count(PG_DIRECT_MAP_2G, pages2g);
134 return ret; 142 return ret;
135} 143}
136 144
@@ -140,6 +148,7 @@ out:
140 */ 148 */
141static void vmem_remove_range(unsigned long start, unsigned long size) 149static void vmem_remove_range(unsigned long start, unsigned long size)
142{ 150{
151 unsigned long pages4k, pages1m, pages2g;
143 unsigned long end = start + size; 152 unsigned long end = start + size;
144 unsigned long address = start; 153 unsigned long address = start;
145 pgd_t *pg_dir; 154 pgd_t *pg_dir;
@@ -147,6 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
147 pmd_t *pm_dir; 156 pmd_t *pm_dir;
148 pte_t *pt_dir; 157 pte_t *pt_dir;
149 158
159 pages4k = pages1m = pages2g = 0;
150 while (address < end) { 160 while (address < end) {
151 pg_dir = pgd_offset_k(address); 161 pg_dir = pgd_offset_k(address);
152 if (pgd_none(*pg_dir)) { 162 if (pgd_none(*pg_dir)) {
@@ -161,6 +171,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
161 if (pud_large(*pu_dir)) { 171 if (pud_large(*pu_dir)) {
162 pud_clear(pu_dir); 172 pud_clear(pu_dir);
163 address += PUD_SIZE; 173 address += PUD_SIZE;
174 pages2g++;
164 continue; 175 continue;
165 } 176 }
166 pm_dir = pmd_offset(pu_dir, address); 177 pm_dir = pmd_offset(pu_dir, address);
@@ -171,13 +182,18 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
171 if (pmd_large(*pm_dir)) { 182 if (pmd_large(*pm_dir)) {
172 pmd_clear(pm_dir); 183 pmd_clear(pm_dir);
173 address += PMD_SIZE; 184 address += PMD_SIZE;
185 pages1m++;
174 continue; 186 continue;
175 } 187 }
176 pt_dir = pte_offset_kernel(pm_dir, address); 188 pt_dir = pte_offset_kernel(pm_dir, address);
177 pte_clear(&init_mm, address, pt_dir); 189 pte_clear(&init_mm, address, pt_dir);
178 address += PAGE_SIZE; 190 address += PAGE_SIZE;
191 pages4k++;
179 } 192 }
180 flush_tlb_kernel_range(start, end); 193 flush_tlb_kernel_range(start, end);
194 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
195 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
196 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
181} 197}
182 198
183/* 199/*