aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h22
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h29
-rw-r--r--include/asm-powerpc/pgtable.h28
3 files changed, 28 insertions, 51 deletions
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 63c535d02535..5b14536d4af8 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -6,11 +6,7 @@
6#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/threads.h> 8#include <linux/threads.h>
9#include <asm/processor.h> /* For TASK_SIZE */
10#include <asm/mmu.h>
11#include <asm/page.h>
12#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ 9#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
13struct mm_struct;
14 10
15extern unsigned long va_to_phys(unsigned long address); 11extern unsigned long va_to_phys(unsigned long address);
16extern pte_t *va_to_pte(unsigned long address); 12extern pte_t *va_to_pte(unsigned long address);
@@ -488,14 +484,6 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
488#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ 484#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
489 pgprot_val(prot)) 485 pgprot_val(prot))
490#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 486#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
491
492/*
493 * ZERO_PAGE is a global shared page that is always zero: used
494 * for zero-mapped memory areas etc..
495 */
496extern unsigned long empty_zero_page[1024];
497#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
498
499#endif /* __ASSEMBLY__ */ 487#endif /* __ASSEMBLY__ */
500 488
501#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) 489#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
@@ -730,10 +718,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
730#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 718#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
731#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 719#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
732 720
733extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
734
735extern void paging_init(void);
736
737/* 721/*
738 * Encode and decode a swap entry. 722 * Encode and decode a swap entry.
739 * Note that the bits we use in a PTE for representing a swap entry 723 * Note that the bits we use in a PTE for representing a swap entry
@@ -751,12 +735,6 @@ extern void paging_init(void);
751#define pte_to_pgoff(pte) (pte_val(pte) >> 3) 735#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
752#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 736#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
753 737
754/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
755#define kern_addr_valid(addr) (1)
756
757#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
758 remap_pfn_range(vma, vaddr, pfn, size, prot)
759
760/* 738/*
761 * No page table caches to initialise 739 * No page table caches to initialise
762 */ 740 */
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 9b0f51ccad05..d61178dea670 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -7,11 +7,7 @@
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9#include <linux/stddef.h> 9#include <linux/stddef.h>
10#include <asm/processor.h> /* For TASK_SIZE */
11#include <asm/mmu.h>
12#include <asm/page.h>
13#include <asm/tlbflush.h> 10#include <asm/tlbflush.h>
14struct mm_struct;
15#endif /* __ASSEMBLY__ */ 11#endif /* __ASSEMBLY__ */
16 12
17#ifdef CONFIG_PPC_64K_PAGES 13#ifdef CONFIG_PPC_64K_PAGES
@@ -143,16 +139,6 @@ struct mm_struct;
143#define __S110 PAGE_SHARED_X 139#define __S110 PAGE_SHARED_X
144#define __S111 PAGE_SHARED_X 140#define __S111 PAGE_SHARED_X
145 141
146#ifndef __ASSEMBLY__
147
148/*
149 * ZERO_PAGE is a global shared page that is always zero: used
150 * for zero-mapped memory areas etc..
151 */
152extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
153#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
154#endif /* __ASSEMBLY__ */
155
156#ifdef CONFIG_HUGETLB_PAGE 142#ifdef CONFIG_HUGETLB_PAGE
157 143
158#define HAVE_ARCH_UNMAPPED_AREA 144#define HAVE_ARCH_UNMAPPED_AREA
@@ -447,10 +433,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
447#define pgd_ERROR(e) \ 433#define pgd_ERROR(e) \
448 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 434 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
449 435
450extern pgd_t swapper_pg_dir[];
451
452extern void paging_init(void);
453
454/* Encode and de-code a swap entry */ 436/* Encode and de-code a swap entry */
455#define __swp_type(entry) (((entry).val >> 1) & 0x3f) 437#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
456#define __swp_offset(entry) ((entry).val >> 8) 438#define __swp_offset(entry) ((entry).val >> 8)
@@ -461,17 +443,6 @@ extern void paging_init(void);
461#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) 443#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
462#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) 444#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
463 445
464/*
465 * kern_addr_valid is intended to indicate whether an address is a valid
466 * kernel address. Most 32-bit archs define it as always true (like this)
467 * but most 64-bit archs actually perform a test. What should we do here?
468 * The only use is in fs/ncpfs/dir.c
469 */
470#define kern_addr_valid(addr) (1)
471
472#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
473 remap_pfn_range(vma, vaddr, pfn, size, prot)
474
475void pgtable_cache_init(void); 446void pgtable_cache_init(void);
476 447
477/* 448/*
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 78bf4ae712a6..d18ffe7bc7c4 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -2,6 +2,13 @@
2#define _ASM_POWERPC_PGTABLE_H 2#define _ASM_POWERPC_PGTABLE_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#ifndef __ASSEMBLY__
6#include <asm/processor.h> /* For TASK_SIZE */
7#include <asm/mmu.h>
8#include <asm/page.h>
9struct mm_struct;
10#endif /* !__ASSEMBLY__ */
11
5#if defined(CONFIG_PPC64) 12#if defined(CONFIG_PPC64)
6# include <asm/pgtable-ppc64.h> 13# include <asm/pgtable-ppc64.h>
7#else 14#else
@@ -9,6 +16,27 @@
9#endif 16#endif
10 17
11#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19/*
20 * ZERO_PAGE is a global shared page that is always zero: used
21 * for zero-mapped memory areas etc..
22 */
23extern unsigned long empty_zero_page[];
24#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
25
26extern pgd_t swapper_pg_dir[];
27
28extern void paging_init(void);
29
30/*
31 * kern_addr_valid is intended to indicate whether an address is a valid
32 * kernel address. Most 32-bit archs define it as always true (like this)
33 * but most 64-bit archs actually perform a test. What should we do here?
34 */
35#define kern_addr_valid(addr) (1)
36
37#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
38 remap_pfn_range(vma, vaddr, pfn, size, prot)
39
12#include <asm-generic/pgtable.h> 40#include <asm-generic/pgtable.h>
13#endif /* __ASSEMBLY__ */ 41#endif /* __ASSEMBLY__ */
14 42