aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pte-hash64-64k.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-10 13:53:29 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-20 00:56:57 -0400
commitc605782b1c3f1c18a55dc1a75b19ed0288f61ac3 (patch)
tree2e6673146afcb692dd9c137241e29abe94631679 /arch/powerpc/include/asm/pte-hash64-64k.h
parent28794d34ecb6815a3fa0a4256027c9b081a17c5f (diff)
powerpc/mm: Split the various pgtable-* headers based on MMU type
This patch moves the definition of the PTE format for each MMU type to separate files instead of all in one file. This improves overall maintainability and will make it easier to add new types. On 64-bit, additionally, I've separated the headers relative to the format of the page table tree (3 vs. 4 levels for 64K vs 4K pages) from the headers specific to the PTE format for hash based processors, this will make it easier to add support for Book3 "E" 64-bit implementations. There are still some type-related ifdef's in the generic headers, we might remove them in the long run, but this patch shouldn't result in any code change, -hopefully- just definitions being moved around. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/pte-hash64-64k.h')
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h115
1 files changed, 115 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
new file mode 100644
index 000000000000..e05d26fa372f
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -0,0 +1,115 @@
1/* To be include by pgtable-hash64.h only */
2
3/* Additional PTE bits (don't change without checking asm in hash_low.S) */
4#define _PAGE_SPECIAL 0x00000400 /* software: special page */
5#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
6#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
7#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
8#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
9
10/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead,
11 * we set that to be the whole sub-bits mask. The C code will only
12 * test this, so a multi-bit mask will work. For combo pages, this
13 * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of
14 * all the sub bits. For real 64k pages, we now have the assembly set
15 * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap
16 * that mask. This is fine as long as the HIDX bits are never set on
17 * a PTE that isn't hashed, which is the case today.
18 *
19 * A little nit is for the huge page C code, which does the hashing
20 * in C, we need to provide which bit to use.
21 */
22#define _PAGE_HASHPTE _PAGE_HPTE_SUB
23
24/* Note the full page bits must be in the same location as for normal
25 * 4k pages as the same asssembly will be used to insert 64K pages
26 * wether the kernel has CONFIG_PPC_64K_PAGES or not
27 */
28#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
29#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
30
31/* PTE flags to conserve for HPTE identification */
32#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO)
33
34/* Shift to put page number into pte.
35 *
36 * That gives us a max RPN of 34 bits, which means a max of 50 bits
37 * of addressable physical space, or 46 bits for the special 4k PFNs.
38 */
39#define PTE_RPN_SHIFT (30)
40
41#ifndef __ASSEMBLY__
42
43/*
44 * With 64K pages on hash table, we have a special PTE format that
45 * uses a second "half" of the page table to encode sub-page information
46 * in order to deal with 64K made of 4K HW pages. Thus we override the
47 * generic accessors and iterators here
48 */
49#define __real_pte(e,p) ((real_pte_t) { \
50 (e), pte_val(*((p) + PTRS_PER_PTE)) })
51#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
52 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
53#define __rpte_to_pte(r) ((r).pte)
54#define __rpte_sub_valid(rpte, index) \
55 (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
56
57/* Trick: we set __end to va + 64k, which happens works for
58 * a 16M page as well as we want only one iteration
59 */
60#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
61 do { \
62 unsigned long __end = va + PAGE_SIZE; \
63 unsigned __split = (psize == MMU_PAGE_4K || \
64 psize == MMU_PAGE_64K_AP); \
65 shift = mmu_psize_defs[psize].shift; \
66 for (index = 0; va < __end; index++, va += (1L << shift)) { \
67 if (!__split || __rpte_sub_valid(rpte, index)) do { \
68
69#define pte_iterate_hashed_end() } while(0); } } while(0)
70
71#define pte_pagesize_index(mm, addr, pte) \
72 (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
73
74#define remap_4k_pfn(vma, addr, pfn, prot) \
75 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
76 __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
77
78
79#ifdef CONFIG_PPC_SUBPAGE_PROT
80/*
81 * For the sub-page protection option, we extend the PGD with one of
82 * these. Basically we have a 3-level tree, with the top level being
83 * the protptrs array. To optimize speed and memory consumption when
84 * only addresses < 4GB are being protected, pointers to the first
85 * four pages of sub-page protection words are stored in the low_prot
86 * array.
87 * Each page of sub-page protection words protects 1GB (4 bytes
88 * protects 64k). For the 3-level tree, each page of pointers then
89 * protects 8TB.
90 */
91struct subpage_prot_table {
92 unsigned long maxaddr; /* only addresses < this are protected */
93 unsigned int **protptrs[2];
94 unsigned int *low_prot[4];
95};
96
97#undef PGD_TABLE_SIZE
98#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
99 sizeof(struct subpage_prot_table))
100
101#define SBP_L1_BITS (PAGE_SHIFT - 2)
102#define SBP_L2_BITS (PAGE_SHIFT - 3)
103#define SBP_L1_COUNT (1 << SBP_L1_BITS)
104#define SBP_L2_COUNT (1 << SBP_L2_BITS)
105#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
106#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
107
108extern void subpage_prot_free(pgd_t *pgd);
109
110static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
111{
112 return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
113}
114#endif /* CONFIG_PPC_SUBPAGE_PROT */
115#endif /* __ASSEMBLY__ */