aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-02 13:27:10 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:10 -0400
commit2bff73830c3df5f575d3bc21bf19df1a10bf7091 (patch)
treeaac7c05edb493a31d3b709462a2091ef16c0cbb3
parent05f36927eddd83e2840a981ef4d9af754dcb86e9 (diff)
[PATCH] x86-64: use lru instead of page->index and page->private for pgd lists management.
x86_64 currently simulates a list using the index and private fields of the page struct. Seems that the code was inherited from i386. But x86_64 does not use the slab to allocate pgds and pmds etc. So the lru field is not used by the slab and therefore available. This patch uses standard list operations on page->lru to realize pgd tracking. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--arch/x86_64/mm/fault.c5
-rw-r--r--include/asm-x86_64/pgalloc.h14
-rw-r--r--include/asm-x86_64/pgtable.h2
3 files changed, 6 insertions, 15 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 6ada7231f3ab..de99dba2c515 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -585,7 +585,7 @@ do_sigbus:
585} 585}
586 586
587DEFINE_SPINLOCK(pgd_lock); 587DEFINE_SPINLOCK(pgd_lock);
588struct page *pgd_list; 588LIST_HEAD(pgd_list);
589 589
590void vmalloc_sync_all(void) 590void vmalloc_sync_all(void)
591{ 591{
@@ -605,8 +605,7 @@ void vmalloc_sync_all(void)
605 if (pgd_none(*pgd_ref)) 605 if (pgd_none(*pgd_ref))
606 continue; 606 continue;
607 spin_lock(&pgd_lock); 607 spin_lock(&pgd_lock);
608 for (page = pgd_list; page; 608 list_for_each_entry(page, &pgd_list, lru) {
609 page = (struct page *)page->index) {
610 pgd_t *pgd; 609 pgd_t *pgd;
611 pgd = (pgd_t *)page_address(page) + pgd_index(address); 610 pgd = (pgd_t *)page_address(page) + pgd_index(address);
612 if (pgd_none(*pgd)) 611 if (pgd_none(*pgd))
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 31d497171969..8bb564687860 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -44,24 +44,16 @@ static inline void pgd_list_add(pgd_t *pgd)
44 struct page *page = virt_to_page(pgd); 44 struct page *page = virt_to_page(pgd);
45 45
46 spin_lock(&pgd_lock); 46 spin_lock(&pgd_lock);
47 page->index = (pgoff_t)pgd_list; 47 list_add(&page->lru, &pgd_list);
48 if (pgd_list)
49 pgd_list->private = (unsigned long)&page->index;
50 pgd_list = page;
51 page->private = (unsigned long)&pgd_list;
52 spin_unlock(&pgd_lock); 48 spin_unlock(&pgd_lock);
53} 49}
54 50
55static inline void pgd_list_del(pgd_t *pgd) 51static inline void pgd_list_del(pgd_t *pgd)
56{ 52{
57 struct page *next, **pprev, *page = virt_to_page(pgd); 53 struct page *page = virt_to_page(pgd);
58 54
59 spin_lock(&pgd_lock); 55 spin_lock(&pgd_lock);
60 next = (struct page *)page->index; 56 list_del(&page->lru);
61 pprev = (struct page **)page->private;
62 *pprev = next;
63 if (next)
64 next->private = (unsigned long)pprev;
65 spin_unlock(&pgd_lock); 57 spin_unlock(&pgd_lock);
66} 58}
67 59
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index c1865e38c7b7..599993f6ba84 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -410,7 +410,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
410#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 410#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
411 411
412extern spinlock_t pgd_lock; 412extern spinlock_t pgd_lock;
413extern struct page *pgd_list; 413extern struct list_head pgd_list;
414void vmalloc_sync_all(void); 414void vmalloc_sync_all(void);
415 415
416extern int kern_addr_valid(unsigned long addr); 416extern int kern_addr_valid(unsigned long addr);