aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc64/pgtable.h')
-rw-r--r--include/asm-ppc64/pgtable.h156
1 files changed, 49 insertions, 107 deletions
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index a26120517c54..264c4f7993be 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -1,8 +1,6 @@
1#ifndef _PPC64_PGTABLE_H 1#ifndef _PPC64_PGTABLE_H
2#define _PPC64_PGTABLE_H 2#define _PPC64_PGTABLE_H
3 3
4#include <asm-generic/4level-fixup.h>
5
6/* 4/*
7 * This file contains the functions and defines necessary to modify and use 5 * This file contains the functions and defines necessary to modify and use
8 * the ppc64 hashed page table. 6 * the ppc64 hashed page table.
@@ -17,15 +15,7 @@
17#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
18#endif /* __ASSEMBLY__ */ 16#endif /* __ASSEMBLY__ */
19 17
20/* PMD_SHIFT determines what a second-level page table entry can map */ 18#include <asm-generic/pgtable-nopud.h>
21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
22#define PMD_SIZE (1UL << PMD_SHIFT)
23#define PMD_MASK (~(PMD_SIZE-1))
24
25/* PGDIR_SHIFT determines what a third-level page table entry can map */
26#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2))
27#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
28#define PGDIR_MASK (~(PGDIR_SIZE-1))
29 19
30/* 20/*
31 * Entries per page directory level. The PTE level must use a 64b record 21 * Entries per page directory level. The PTE level must use a 64b record
@@ -40,40 +30,30 @@
40#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 30#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
41#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 31#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
42 32
43#define USER_PTRS_PER_PGD (1024) 33/* PMD_SHIFT determines what a second-level page table entry can map */
44#define FIRST_USER_ADDRESS 0 34#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
35#define PMD_SIZE (1UL << PMD_SHIFT)
36#define PMD_MASK (~(PMD_SIZE-1))
45 37
46#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 38/* PGDIR_SHIFT determines what a third-level page table entry can map */
47 PGD_INDEX_SIZE + PAGE_SHIFT) 39#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
40#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41#define PGDIR_MASK (~(PGDIR_SIZE-1))
42
43#define FIRST_USER_ADDRESS 0
48 44
49/* 45/*
50 * Size of EA range mapped by our pagetables. 46 * Size of EA range mapped by our pagetables.
51 */ 47 */
52#define PGTABLE_EA_BITS 41 48#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
53#define PGTABLE_EA_MASK ((1UL<<PGTABLE_EA_BITS)-1) 49 PGD_INDEX_SIZE + PAGE_SHIFT)
50#define EADDR_MASK ((1UL << EADDR_SIZE) - 1)
54 51
55/* 52/*
56 * Define the address range of the vmalloc VM area. 53 * Define the address range of the vmalloc VM area.
57 */ 54 */
58#define VMALLOC_START (0xD000000000000000ul) 55#define VMALLOC_START (0xD000000000000000ul)
59#define VMALLOC_END (VMALLOC_START + PGTABLE_EA_MASK) 56#define VMALLOC_END (VMALLOC_START + EADDR_MASK)
60
61/*
62 * Define the address range of the imalloc VM area.
63 * (used for ioremap)
64 */
65#define IMALLOC_START (ioremap_bot)
66#define IMALLOC_VMADDR(x) ((unsigned long)(x))
67#define PHBS_IO_BASE (0xE000000000000000ul) /* Reserve 2 gigs for PHBs */
68#define IMALLOC_BASE (0xE000000080000000ul)
69#define IMALLOC_END (IMALLOC_BASE + PGTABLE_EA_MASK)
70
71/*
72 * Define the user address range
73 */
74#define USER_START (0UL)
75#define USER_END (USER_START + PGTABLE_EA_MASK)
76
77 57
78/* 58/*
79 * Bits in a linux-style PTE. These match the bits in the 59 * Bits in a linux-style PTE. These match the bits in the
@@ -168,10 +148,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
168/* shift to put page number into pte */ 148/* shift to put page number into pte */
169#define PTE_SHIFT (17) 149#define PTE_SHIFT (17)
170 150
171/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
172 * to give the PTE page number. The bottom two bits are for flags. */
173#define PMD_TO_PTEPAGE_SHIFT (2)
174
175#ifdef CONFIG_HUGETLB_PAGE 151#ifdef CONFIG_HUGETLB_PAGE
176 152
177#ifndef __ASSEMBLY__ 153#ifndef __ASSEMBLY__
@@ -200,13 +176,14 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
200 */ 176 */
201#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 177#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
202 178
203#define pfn_pte(pfn,pgprot) \ 179static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
204({ \ 180{
205 pte_t pte; \ 181 pte_t pte;
206 pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \ 182
207 pgprot_val(pgprot); \ 183
208 pte; \ 184 pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot);
209}) 185 return pte;
186}
210 187
211#define pte_modify(_pte, newprot) \ 188#define pte_modify(_pte, newprot) \
212 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 189 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
@@ -220,20 +197,20 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
220#define pte_page(x) pfn_to_page(pte_pfn(x)) 197#define pte_page(x) pfn_to_page(pte_pfn(x))
221 198
222#define pmd_set(pmdp, ptep) \ 199#define pmd_set(pmdp, ptep) \
223 (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT)) 200 (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
224#define pmd_none(pmd) (!pmd_val(pmd)) 201#define pmd_none(pmd) (!pmd_val(pmd))
225#define pmd_bad(pmd) (pmd_val(pmd) == 0) 202#define pmd_bad(pmd) (pmd_val(pmd) == 0)
226#define pmd_present(pmd) (pmd_val(pmd) != 0) 203#define pmd_present(pmd) (pmd_val(pmd) != 0)
227#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 204#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
228#define pmd_page_kernel(pmd) \ 205#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd)))
229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 206#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
231#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp))) 207
232#define pgd_none(pgd) (!pgd_val(pgd)) 208#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp)))
233#define pgd_bad(pgd) ((pgd_val(pgd)) == 0) 209#define pud_none(pud) (!pud_val(pud))
234#define pgd_present(pgd) (pgd_val(pgd) != 0UL) 210#define pud_bad(pud) ((pud_val(pud)) == 0UL)
235#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) 211#define pud_present(pud) (pud_val(pud) != 0UL)
236#define pgd_page(pgd) (__bpn_to_ba(pgd_val(pgd))) 212#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
213#define pud_page(pud) (__bpn_to_ba(pud_val(pud)))
237 214
238/* 215/*
239 * Find an entry in a page-table-directory. We combine the address region 216 * Find an entry in a page-table-directory. We combine the address region
@@ -245,12 +222,13 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
245#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 222#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
246 223
247/* Find an entry in the second-level page table.. */ 224/* Find an entry in the second-level page table.. */
248#define pmd_offset(dir,addr) \ 225#define pmd_offset(pudp,addr) \
249 ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 226 ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
250 227
251/* Find an entry in the third-level page table.. */ 228/* Find an entry in the third-level page table.. */
252#define pte_offset_kernel(dir,addr) \ 229#define pte_offset_kernel(dir,addr) \
253 ((pte_t *) pmd_page_kernel(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 230 ((pte_t *) pmd_page_kernel(*(dir)) \
231 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
254 232
255#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 233#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
256#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 234#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -264,8 +242,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
264/* to find an entry in the ioremap page-table-directory */ 242/* to find an entry in the ioremap page-table-directory */
265#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address)) 243#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
266 244
267#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
268
269/* 245/*
270 * The following only work if pte_present() is true. 246 * The following only work if pte_present() is true.
271 * Undefined behaviour if not.. 247 * Undefined behaviour if not..
@@ -440,7 +416,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
440 pte_clear(mm, addr, ptep); 416 pte_clear(mm, addr, ptep);
441 flush_tlb_pending(); 417 flush_tlb_pending();
442 } 418 }
443 *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS; 419 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
444} 420}
445 421
446/* Set the dirty and/or accessed bits atomically in a linux PTE, this 422/* Set the dirty and/or accessed bits atomically in a linux PTE, this
@@ -485,18 +461,13 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
485 461
486extern unsigned long ioremap_bot, ioremap_base; 462extern unsigned long ioremap_bot, ioremap_base;
487 463
488#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
489#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
490
491#define pte_ERROR(e) \
492 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
493#define pmd_ERROR(e) \ 464#define pmd_ERROR(e) \
494 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 465 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
495#define pgd_ERROR(e) \ 466#define pgd_ERROR(e) \
496 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 467 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
497 468
498extern pgd_t swapper_pg_dir[1024]; 469extern pgd_t swapper_pg_dir[];
499extern pgd_t ioremap_dir[1024]; 470extern pgd_t ioremap_dir[];
500 471
501extern void paging_init(void); 472extern void paging_init(void);
502 473
@@ -538,43 +509,11 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
538 */ 509 */
539#define kern_addr_valid(addr) (1) 510#define kern_addr_valid(addr) (1)
540 511
541#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
542 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
543
544#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 512#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
545 remap_pfn_range(vma, vaddr, pfn, size, prot) 513 remap_pfn_range(vma, vaddr, pfn, size, prot)
546 514
547#define MK_IOSPACE_PFN(space, pfn) (pfn)
548#define GET_IOSPACE(pfn) 0
549#define GET_PFN(pfn) (pfn)
550
551void pgtable_cache_init(void); 515void pgtable_cache_init(void);
552 516
553extern void hpte_init_native(void);
554extern void hpte_init_lpar(void);
555extern void hpte_init_iSeries(void);
556
557/* imalloc region types */
558#define IM_REGION_UNUSED 0x1
559#define IM_REGION_SUBSET 0x2
560#define IM_REGION_EXISTS 0x4
561#define IM_REGION_OVERLAP 0x8
562#define IM_REGION_SUPERSET 0x10
563
564extern struct vm_struct * im_get_free_area(unsigned long size);
565extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
566 int region_type);
567unsigned long im_free(void *addr);
568
569extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
570 unsigned long va, unsigned long prpn,
571 int secondary, unsigned long hpteflags,
572 int bolted, int large);
573
574extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
575 unsigned long prpn, int secondary,
576 unsigned long hpteflags, int bolted, int large);
577
578/* 517/*
579 * find_linux_pte returns the address of a linux pte for a given 518 * find_linux_pte returns the address of a linux pte for a given
580 * effective address and directory. If not found, it returns zero. 519 * effective address and directory. If not found, it returns zero.
@@ -582,19 +521,22 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
582static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 521static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
583{ 522{
584 pgd_t *pg; 523 pgd_t *pg;
524 pud_t *pu;
585 pmd_t *pm; 525 pmd_t *pm;
586 pte_t *pt = NULL; 526 pte_t *pt = NULL;
587 pte_t pte; 527 pte_t pte;
588 528
589 pg = pgdir + pgd_index(ea); 529 pg = pgdir + pgd_index(ea);
590 if (!pgd_none(*pg)) { 530 if (!pgd_none(*pg)) {
591 531 pu = pud_offset(pg, ea);
592 pm = pmd_offset(pg, ea); 532 if (!pud_none(*pu)) {
593 if (pmd_present(*pm)) { 533 pm = pmd_offset(pu, ea);
594 pt = pte_offset_kernel(pm, ea); 534 if (pmd_present(*pm)) {
595 pte = *pt; 535 pt = pte_offset_kernel(pm, ea);
596 if (!pte_present(pte)) 536 pte = *pt;
597 pt = NULL; 537 if (!pte_present(pte))
538 pt = NULL;
539 }
598 } 540 }
599 } 541 }
600 542