aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-08 07:22:04 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 12:22:42 -0500
commit2f569afd9ced9ebec9a6eb3dbf6f83429be0a7b4 (patch)
tree23a31763887d9505e62e9d7cc8ec2fa4b86bd380 /mm/memory.c
parent13214adf738abc92b0a00c0763fd3be79eebaa7c (diff)
CONFIG_HIGHPTE vs. sub-page page tables.
Background: I've implemented 1K/2K page tables for s390. These sub-page page tables are required to properly support the s390 virtualization instruction with KVM. The SIE instruction requires that the page tables have 256 page table entries (pte) followed by 256 page status table entries (pgste). The pgstes are only required if the process is using the SIE instruction. The pgstes are updated by the hardware and by the hypervisor for a number of reasons, one of them is dirty and reference bit tracking. To avoid wasting memory the standard pte table allocation should return 1K/2K (31/64 bit) and 2K/4K if the process is using SIE. Problem: Page size on s390 is 4K, page table size is 1K or 2K. That means the s390 version for pte_alloc_one cannot return a pointer to a struct page. Trouble is that with the CONFIG_HIGHPTE feature on x86 pte_alloc_one cannot return a pointer to a pte either, since that would require more than 32 bit for the return value of pte_alloc_one (and the pte * would not be accessible since its not kmapped). Solution: The only solution I found to this dilemma is a new typedef: a pgtable_t. For s390 pgtable_t will be a (pte *) - to be introduced with a later patch. For everybody else it will be a (struct page *). The additional problem with the initialization of the ptl lock and the NR_PAGETABLE accounting is solved with a constructor pgtable_page_ctor and a destructor pgtable_page_dtor. The page table allocation and free functions need to call these two whenever a page table page is allocated or freed. pmd_populate will get a pgtable_t instead of a struct page pointer. To get the pgtable_t back from a pmd entry that has been installed with pmd_populate a new function pmd_pgtable is added. It replaces the pmd_page call in free_pte_range and apply_to_pte_range. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 153a54b2013c..e5628a5fd678 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -134,11 +134,9 @@ void pmd_clear_bad(pmd_t *pmd)
134 */ 134 */
135static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 135static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
136{ 136{
137 struct page *page = pmd_page(*pmd); 137 pgtable_t token = pmd_pgtable(*pmd);
138 pmd_clear(pmd); 138 pmd_clear(pmd);
139 pte_lock_deinit(page); 139 pte_free_tlb(tlb, token);
140 pte_free_tlb(tlb, page);
141 dec_zone_page_state(page, NR_PAGETABLE);
142 tlb->mm->nr_ptes--; 140 tlb->mm->nr_ptes--;
143} 141}
144 142
@@ -309,21 +307,19 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
309 307
310int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 308int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311{ 309{
312 struct page *new = pte_alloc_one(mm, address); 310 pgtable_t new = pte_alloc_one(mm, address);
313 if (!new) 311 if (!new)
314 return -ENOMEM; 312 return -ENOMEM;
315 313
316 pte_lock_init(new);
317 spin_lock(&mm->page_table_lock); 314 spin_lock(&mm->page_table_lock);
318 if (pmd_present(*pmd)) { /* Another has populated it */ 315 if (!pmd_present(*pmd)) { /* Has another populated it ? */
319 pte_lock_deinit(new);
320 pte_free(mm, new);
321 } else {
322 mm->nr_ptes++; 316 mm->nr_ptes++;
323 inc_zone_page_state(new, NR_PAGETABLE);
324 pmd_populate(mm, pmd, new); 317 pmd_populate(mm, pmd, new);
318 new = NULL;
325 } 319 }
326 spin_unlock(&mm->page_table_lock); 320 spin_unlock(&mm->page_table_lock);
321 if (new)
322 pte_free(mm, new);
327 return 0; 323 return 0;
328} 324}
329 325
@@ -334,11 +330,13 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
334 return -ENOMEM; 330 return -ENOMEM;
335 331
336 spin_lock(&init_mm.page_table_lock); 332 spin_lock(&init_mm.page_table_lock);
337 if (pmd_present(*pmd)) /* Another has populated it */ 333 if (!pmd_present(*pmd)) { /* Has another populated it ? */
338 pte_free_kernel(&init_mm, new);
339 else
340 pmd_populate_kernel(&init_mm, pmd, new); 334 pmd_populate_kernel(&init_mm, pmd, new);
335 new = NULL;
336 }
341 spin_unlock(&init_mm.page_table_lock); 337 spin_unlock(&init_mm.page_table_lock);
338 if (new)
339 pte_free_kernel(&init_mm, new);
342 return 0; 340 return 0;
343} 341}
344 342
@@ -1390,7 +1388,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1390{ 1388{
1391 pte_t *pte; 1389 pte_t *pte;
1392 int err; 1390 int err;
1393 struct page *pmd_page; 1391 pgtable_t token;
1394 spinlock_t *uninitialized_var(ptl); 1392 spinlock_t *uninitialized_var(ptl);
1395 1393
1396 pte = (mm == &init_mm) ? 1394 pte = (mm == &init_mm) ?
@@ -1401,10 +1399,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1401 1399
1402 BUG_ON(pmd_huge(*pmd)); 1400 BUG_ON(pmd_huge(*pmd));
1403 1401
1404 pmd_page = pmd_page(*pmd); 1402 token = pmd_pgtable(*pmd);
1405 1403
1406 do { 1404 do {
1407 err = fn(pte, pmd_page, addr, data); 1405 err = fn(pte, token, addr, data);
1408 if (err) 1406 if (err)
1409 break; 1407 break;
1410 } while (pte++, addr += PAGE_SIZE, addr != end); 1408 } while (pte++, addr += PAGE_SIZE, addr != end);