aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc64/pgtable.h')
-rw-r--r--include/asm-ppc64/pgtable.h117
1 files changed, 27 insertions, 90 deletions
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index b984e2747e0c..264c4f7993be 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -17,16 +17,6 @@
17 17
18#include <asm-generic/pgtable-nopud.h> 18#include <asm-generic/pgtable-nopud.h>
19 19
20/* PMD_SHIFT determines what a second-level page table entry can map */
21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
22#define PMD_SIZE (1UL << PMD_SHIFT)
23#define PMD_MASK (~(PMD_SIZE-1))
24
25/* PGDIR_SHIFT determines what a third-level page table entry can map */
26#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2))
27#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
28#define PGDIR_MASK (~(PGDIR_SIZE-1))
29
30/* 20/*
31 * Entries per page directory level. The PTE level must use a 64b record 21 * Entries per page directory level. The PTE level must use a 64b record
32 * for each page table entry. The PMD and PGD level use a 32b record for 22 * for each page table entry. The PMD and PGD level use a 32b record for
@@ -40,40 +30,30 @@
40#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 30#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
41#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 31#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
42 32
43#define USER_PTRS_PER_PGD (1024) 33/* PMD_SHIFT determines what a second-level page table entry can map */
44#define FIRST_USER_ADDRESS 0 34#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
35#define PMD_SIZE (1UL << PMD_SHIFT)
36#define PMD_MASK (~(PMD_SIZE-1))
45 37
46#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 38/* PGDIR_SHIFT determines what a third-level page table entry can map */
47 PGD_INDEX_SIZE + PAGE_SHIFT) 39#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
40#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41#define PGDIR_MASK (~(PGDIR_SIZE-1))
42
43#define FIRST_USER_ADDRESS 0
48 44
49/* 45/*
50 * Size of EA range mapped by our pagetables. 46 * Size of EA range mapped by our pagetables.
51 */ 47 */
52#define PGTABLE_EA_BITS 41 48#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
53#define PGTABLE_EA_MASK ((1UL<<PGTABLE_EA_BITS)-1) 49 PGD_INDEX_SIZE + PAGE_SHIFT)
50#define EADDR_MASK ((1UL << EADDR_SIZE) - 1)
54 51
55/* 52/*
56 * Define the address range of the vmalloc VM area. 53 * Define the address range of the vmalloc VM area.
57 */ 54 */
58#define VMALLOC_START (0xD000000000000000ul) 55#define VMALLOC_START (0xD000000000000000ul)
59#define VMALLOC_END (VMALLOC_START + PGTABLE_EA_MASK) 56#define VMALLOC_END (VMALLOC_START + EADDR_MASK)
60
61/*
62 * Define the address range of the imalloc VM area.
63 * (used for ioremap)
64 */
65#define IMALLOC_START (ioremap_bot)
66#define IMALLOC_VMADDR(x) ((unsigned long)(x))
67#define PHBS_IO_BASE (0xE000000000000000ul) /* Reserve 2 gigs for PHBs */
68#define IMALLOC_BASE (0xE000000080000000ul)
69#define IMALLOC_END (IMALLOC_BASE + PGTABLE_EA_MASK)
70
71/*
72 * Define the user address range
73 */
74#define USER_START (0UL)
75#define USER_END (USER_START + PGTABLE_EA_MASK)
76
77 57
78/* 58/*
79 * Bits in a linux-style PTE. These match the bits in the 59 * Bits in a linux-style PTE. These match the bits in the
@@ -168,10 +148,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
168/* shift to put page number into pte */ 148/* shift to put page number into pte */
169#define PTE_SHIFT (17) 149#define PTE_SHIFT (17)
170 150
171/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
172 * to give the PTE page number. The bottom two bits are for flags. */
173#define PMD_TO_PTEPAGE_SHIFT (2)
174
175#ifdef CONFIG_HUGETLB_PAGE 151#ifdef CONFIG_HUGETLB_PAGE
176 152
177#ifndef __ASSEMBLY__ 153#ifndef __ASSEMBLY__
@@ -200,13 +176,14 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
200 */ 176 */
201#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 177#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
202 178
203#define pfn_pte(pfn,pgprot) \ 179static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
204({ \ 180{
205 pte_t pte; \ 181 pte_t pte;
206 pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \ 182
207 pgprot_val(pgprot); \ 183
208 pte; \ 184 pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot);
209}) 185 return pte;
186}
210 187
211#define pte_modify(_pte, newprot) \ 188#define pte_modify(_pte, newprot) \
212 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 189 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
@@ -220,13 +197,12 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
220#define pte_page(x) pfn_to_page(pte_pfn(x)) 197#define pte_page(x) pfn_to_page(pte_pfn(x))
221 198
222#define pmd_set(pmdp, ptep) \ 199#define pmd_set(pmdp, ptep) \
223 (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT)) 200 (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
224#define pmd_none(pmd) (!pmd_val(pmd)) 201#define pmd_none(pmd) (!pmd_val(pmd))
225#define pmd_bad(pmd) (pmd_val(pmd) == 0) 202#define pmd_bad(pmd) (pmd_val(pmd) == 0)
226#define pmd_present(pmd) (pmd_val(pmd) != 0) 203#define pmd_present(pmd) (pmd_val(pmd) != 0)
227#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 204#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
228#define pmd_page_kernel(pmd) \ 205#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd)))
229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 206#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
231 207
232#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 208#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp)))
@@ -266,8 +242,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
266/* to find an entry in the ioremap page-table-directory */ 242/* to find an entry in the ioremap page-table-directory */
267#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address)) 243#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
268 244
269#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
270
271/* 245/*
272 * The following only work if pte_present() is true. 246 * The following only work if pte_present() is true.
273 * Undefined behaviour if not.. 247 * Undefined behaviour if not..
@@ -442,7 +416,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
442 pte_clear(mm, addr, ptep); 416 pte_clear(mm, addr, ptep);
443 flush_tlb_pending(); 417 flush_tlb_pending();
444 } 418 }
445 *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS; 419 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
446} 420}
447 421
448/* Set the dirty and/or accessed bits atomically in a linux PTE, this 422/* Set the dirty and/or accessed bits atomically in a linux PTE, this
@@ -487,18 +461,13 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
487 461
488extern unsigned long ioremap_bot, ioremap_base; 462extern unsigned long ioremap_bot, ioremap_base;
489 463
490#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
491#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
492
493#define pte_ERROR(e) \
494 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
495#define pmd_ERROR(e) \ 464#define pmd_ERROR(e) \
496 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 465 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
497#define pgd_ERROR(e) \ 466#define pgd_ERROR(e) \
498 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 467 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
499 468
500extern pgd_t swapper_pg_dir[1024]; 469extern pgd_t swapper_pg_dir[];
501extern pgd_t ioremap_dir[1024]; 470extern pgd_t ioremap_dir[];
502 471
503extern void paging_init(void); 472extern void paging_init(void);
504 473
@@ -540,43 +509,11 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
540 */ 509 */
541#define kern_addr_valid(addr) (1) 510#define kern_addr_valid(addr) (1)
542 511
543#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
544 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
545
546#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 512#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
547 remap_pfn_range(vma, vaddr, pfn, size, prot) 513 remap_pfn_range(vma, vaddr, pfn, size, prot)
548 514
549#define MK_IOSPACE_PFN(space, pfn) (pfn)
550#define GET_IOSPACE(pfn) 0
551#define GET_PFN(pfn) (pfn)
552
553void pgtable_cache_init(void); 515void pgtable_cache_init(void);
554 516
555extern void hpte_init_native(void);
556extern void hpte_init_lpar(void);
557extern void hpte_init_iSeries(void);
558
559/* imalloc region types */
560#define IM_REGION_UNUSED 0x1
561#define IM_REGION_SUBSET 0x2
562#define IM_REGION_EXISTS 0x4
563#define IM_REGION_OVERLAP 0x8
564#define IM_REGION_SUPERSET 0x10
565
566extern struct vm_struct * im_get_free_area(unsigned long size);
567extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
568 int region_type);
569unsigned long im_free(void *addr);
570
571extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
572 unsigned long va, unsigned long prpn,
573 int secondary, unsigned long hpteflags,
574 int bolted, int large);
575
576extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
577 unsigned long prpn, int secondary,
578 unsigned long hpteflags, int bolted, int large);
579
580/* 517/*
581 * find_linux_pte returns the address of a linux pte for a given 518 * find_linux_pte returns the address of a linux pte for a given
582 * effective address and directory. If not found, it returns zero. 519 * effective address and directory. If not found, it returns zero.