diff options
author | David S. Miller <davem@davemloft.net> | 2013-09-21 00:50:41 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-11-12 18:22:34 -0500 |
commit | b2d438348024b75a1ee8b66b85d77f569a5dfed8 (patch) | |
tree | 057c725d9d058d326533d0947aedd226adb57540 /arch/sparc/include/asm/page_64.h | |
parent | f998c9c0d663b013e3aa3ba78908396c8c497218 (diff) |
sparc64: Make PAGE_OFFSET variable.
Choose PAGE_OFFSET dynamically based upon cpu type.
Original UltraSPARC-I (spitfire) chips only supported a 44-bit
virtual address space.
Newer chips (T4 and later) support 52-bit virtual addresses
and up to 47-bits of physical memory space.
Therefore we have to adjust PAGE_SIZE dynamically based upon
the capabilities of the chip.
Note that this change alone does not allow us to support > 43-bit
physical memory, to do that we need to re-arrange our page table
support. The current encodings of the pmd_t and pgd_t pointers
restricts us to "32 + 11" == 43 bits.
This change can waste quite a bit of memory for the various tables.
In particular, a future change should work to size and allocate
kern_linear_bitmap[] and sparc64_valid_addr_bitmap[] dynamically.
This isn't easy as we really cannot take a TLB miss when accessing
kern_linear_bitmap[]. We'd have to lock it into the TLB or similar.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Bob Picco <bob.picco@oracle.com>
Diffstat (limited to 'arch/sparc/include/asm/page_64.h')
-rw-r--r-- | arch/sparc/include/asm/page_64.h | 20 |
1 files changed, 6 insertions, 14 deletions
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 978ea6d022e9..89e07fd0ac88 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h | |||
@@ -112,24 +112,16 @@ typedef pte_t *pgtable_t; | |||
112 | 112 | ||
113 | #include <asm-generic/memory_model.h> | 113 | #include <asm-generic/memory_model.h> |
114 | 114 | ||
115 | #endif /* !(__ASSEMBLY__) */ | ||
116 | |||
117 | /* We used to stick this into a hard-coded global register (%g4) | ||
118 | * but that does not make sense anymore. | ||
119 | */ | ||
120 | #define MAX_SUPPORTED_PA_BITS 43 | ||
121 | #define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) | 115 | #define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) |
122 | #define PAGE_OFFSET PAGE_OFFSET_BY_BITS(MAX_SUPPORTED_PA_BITS) | 116 | extern unsigned long PAGE_OFFSET; |
123 | 117 | ||
124 | /* The "virtual" portion of PAGE_OFFSET, used to clip off the non-physical | 118 | #endif /* !(__ASSEMBLY__) */ |
125 | * bits of a linear kernel address. | ||
126 | */ | ||
127 | #define PAGE_OFFSET_VA_BITS (64 - MAX_SUPPORTED_PA_BITS) | ||
128 | 119 | ||
129 | /* The actual number of physical memory address bits we support, this is | 120 | /* The maximum number of physical memory address bits we support, this |
130 | * used to size various tables used to manage kernel TLB misses. | 121 | * is used to size various tables used to manage kernel TLB misses and |
122 | * also the sparsemem code. | ||
131 | */ | 123 | */ |
132 | #define MAX_PHYS_ADDRESS_BITS 41 | 124 | #define MAX_PHYS_ADDRESS_BITS 47 |
133 | 125 | ||
134 | /* These two shift counts are used when indexing sparc64_valid_addr_bitmap | 126 | /* These two shift counts are used when indexing sparc64_valid_addr_bitmap |
135 | * and kpte_linear_bitmap. | 127 | * and kpte_linear_bitmap. |