aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/pgtable.h
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-09-29 04:58:41 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-29 12:18:03 -0400
commit2dcea57ae19275451a756a2d5bf96b329487b0e0 (patch)
tree37b0def2369c106fdf3653ca85e0c1a34ce4d905 /include/asm-s390/pgtable.h
parentd1807793e1e7e502e3dc047115e9dbc3b50e4534 (diff)
[PATCH] convert s390 page handling macros to functions
Convert s390 page handling macros to functions. In particular this fixes a problem with s390's SetPageUptodate macro which uses its input parameter twice which again can cause subtle bugs. [akpm@osdl.org: build fix] Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r--include/asm-s390/pgtable.h84
1 files changed, 40 insertions, 44 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 83425cdefc91..ecdff13b2505 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -31,9 +31,9 @@
31 * the S390 page table tree. 31 * the S390 page table tree.
32 */ 32 */
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34#include <linux/mm_types.h>
34#include <asm/bug.h> 35#include <asm/bug.h>
35#include <asm/processor.h> 36#include <asm/processor.h>
36#include <linux/threads.h>
37 37
38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ 38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
39struct mm_struct; 39struct mm_struct;
@@ -597,31 +597,31 @@ ptep_establish(struct vm_area_struct *vma,
597 * should therefore only be called if it is not mapped in any 597 * should therefore only be called if it is not mapped in any
598 * address space. 598 * address space.
599 */ 599 */
600#define page_test_and_clear_dirty(_page) \ 600static inline int page_test_and_clear_dirty(struct page *page)
601({ \ 601{
602 struct page *__page = (_page); \ 602 unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
603 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ 603 int skey = page_get_storage_key(physpage);
604 int __skey = page_get_storage_key(__physpage); \ 604
605 if (__skey & _PAGE_CHANGED) \ 605 if (skey & _PAGE_CHANGED)
606 page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\ 606 page_set_storage_key(physpage, skey & ~_PAGE_CHANGED);
607 (__skey & _PAGE_CHANGED); \ 607 return skey & _PAGE_CHANGED;
608}) 608}
609 609
610/* 610/*
611 * Test and clear referenced bit in storage key. 611 * Test and clear referenced bit in storage key.
612 */ 612 */
613#define page_test_and_clear_young(page) \ 613static inline int page_test_and_clear_young(struct page *page)
614({ \ 614{
615 struct page *__page = (page); \ 615 unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
616 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);\ 616 int ccode;
617 int __ccode; \ 617
618 asm volatile( \ 618 asm volatile (
619 " rrbe 0,%1\n" \ 619 "rrbe 0,%1\n"
620 " ipm %0\n" \ 620 "ipm %0\n"
621 " srl %0,28\n" \ 621 "srl %0,28\n"
622 : "=d" (__ccode) : "a" (__physpage) : "cc"); \ 622 : "=d" (ccode) : "a" (physpage) : "cc" );
623 (__ccode & 2); \ 623 return ccode & 2;
624}) 624}
625 625
626/* 626/*
627 * Conversion functions: convert a page and protection to a page entry, 627 * Conversion functions: convert a page and protection to a page entry,
@@ -634,32 +634,28 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
634 return __pte; 634 return __pte;
635} 635}
636 636
637#define mk_pte(pg, pgprot) \ 637static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
638({ \ 638{
639 struct page *__page = (pg); \ 639 unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
640 pgprot_t __pgprot = (pgprot); \
641 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
642 pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
643 __pte; \
644})
645 640
646#define pfn_pte(pfn, pgprot) \ 641 return mk_pte_phys(physpage, pgprot);
647({ \ 642}
648 pgprot_t __pgprot = (pgprot); \ 643
649 unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ 644static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
650 pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ 645{
651 __pte; \ 646 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
652}) 647
648 return mk_pte_phys(physpage, pgprot);
649}
653 650
654#ifdef __s390x__ 651#ifdef __s390x__
655 652
656#define pfn_pmd(pfn, pgprot) \ 653static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
657({ \ 654{
658 pgprot_t __pgprot = (pgprot); \ 655 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
659 unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ 656
660 pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \ 657 return __pmd(physpage + pgprot_val(pgprot));
661 __pmd; \ 658}
662})
663 659
664#endif /* __s390x__ */ 660#endif /* __s390x__ */
665 661