aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-10-17 06:18:05 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-11-23 05:14:25 -0500
commitf7817968d03df390d77d3af1b13298efa4f31047 (patch)
tree11e355fc56ce3ff4fd433a3fa282881a6481d676 /arch/s390/mm
parent18da236908793abccebc6f365fbe6e95a5b41db0 (diff)
s390/mm,vmemmap: use 1MB frames for vmemmap
Use 1MB frames for vmemmap if EDAT1 is available in order to reduce TLB pressure Always use a 1MB frame even if its only partially needed for struct pages. Otherwise we would end up with a mix of large frame and page mappings, because vmemmap_populate gets called for each section (256MB -> 3.5MB memmap) separately. Worst case is that we would waste 512KB. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/vmem.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index bf37a094a46..6ed1426d27c 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
205 start_addr = (unsigned long) start; 205 start_addr = (unsigned long) start;
206 end_addr = (unsigned long) (start + nr); 206 end_addr = (unsigned long) (start + nr);
207 207
208 for (address = start_addr; address < end_addr; address += PAGE_SIZE) { 208 for (address = start_addr; address < end_addr;) {
209 pg_dir = pgd_offset_k(address); 209 pg_dir = pgd_offset_k(address);
210 if (pgd_none(*pg_dir)) { 210 if (pgd_none(*pg_dir)) {
211 pu_dir = vmem_pud_alloc(); 211 pu_dir = vmem_pud_alloc();
@@ -224,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
224 224
225 pm_dir = pmd_offset(pu_dir, address); 225 pm_dir = pmd_offset(pu_dir, address);
226 if (pmd_none(*pm_dir)) { 226 if (pmd_none(*pm_dir)) {
227#ifdef CONFIG_64BIT
228 /* Use 1MB frames for vmemmap if available. We always
229 * use large frames even if they are only partially
230 * used.
231 * Otherwise we would have also page tables since
232 * vmemmap_populate gets called for each section
233 * separately. */
234 if (MACHINE_HAS_EDAT1) {
235 void *new_page;
236
237 new_page = vmemmap_alloc_block(PMD_SIZE, node);
238 if (!new_page)
239 goto out;
240 pte = mk_pte_phys(__pa(new_page), PAGE_RW);
241 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
242 pmd_val(*pm_dir) = pte_val(pte);
243 address = (address + PMD_SIZE) & PMD_MASK;
244 continue;
245 }
246#endif
227 pt_dir = vmem_pte_alloc(address); 247 pt_dir = vmem_pte_alloc(address);
228 if (!pt_dir) 248 if (!pt_dir)
229 goto out; 249 goto out;
230 pmd_populate(&init_mm, pm_dir, pt_dir); 250 pmd_populate(&init_mm, pm_dir, pt_dir);
251 } else if (pmd_large(*pm_dir)) {
252 address = (address + PMD_SIZE) & PMD_MASK;
253 continue;
231 } 254 }
232 255
233 pt_dir = pte_offset_kernel(pm_dir, address); 256 pt_dir = pte_offset_kernel(pm_dir, address);
@@ -240,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
240 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 263 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
241 *pt_dir = pte; 264 *pt_dir = pte;
242 } 265 }
266 address += PAGE_SIZE;
243 } 267 }
244 memset(start, 0, nr * sizeof(struct page)); 268 memset(start, 0, nr * sizeof(struct page));
245 ret = 0; 269 ret = 0;