aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/mmu.h')
-rw-r--r--include/asm-powerpc/mmu.h13
1 files changed, 13 insertions, 0 deletions
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 885397420104..3a5ebe229af5 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -165,6 +165,16 @@ struct mmu_psize_def
165extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 165extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
166extern int mmu_linear_psize; 166extern int mmu_linear_psize;
167extern int mmu_virtual_psize; 167extern int mmu_virtual_psize;
168extern int mmu_vmalloc_psize;
169extern int mmu_io_psize;
170
171/*
172 * If the processor supports 64k normal pages but not 64k cache
173 * inhibited pages, we have to be prepared to switch processes
174 * to use 4k pages when they create cache-inhibited mappings.
175 * If this is the case, mmu_ci_restrictions will be set to 1.
176 */
177extern int mmu_ci_restrictions;
168 178
169#ifdef CONFIG_HUGETLB_PAGE 179#ifdef CONFIG_HUGETLB_PAGE
170/* 180/*
@@ -256,6 +266,7 @@ extern long iSeries_hpte_insert(unsigned long hpte_group,
256 266
257extern void stabs_alloc(void); 267extern void stabs_alloc(void);
258extern void slb_initialize(void); 268extern void slb_initialize(void);
269extern void slb_flush_and_rebolt(void);
259extern void stab_initialize(unsigned long stab); 270extern void stab_initialize(unsigned long stab);
260 271
261#endif /* __ASSEMBLY__ */ 272#endif /* __ASSEMBLY__ */
@@ -359,6 +370,8 @@ typedef unsigned long mm_context_id_t;
359 370
360typedef struct { 371typedef struct {
361 mm_context_id_t id; 372 mm_context_id_t id;
373 u16 user_psize; /* page size index */
374 u16 sllp; /* SLB entry page size encoding */
362#ifdef CONFIG_HUGETLB_PAGE 375#ifdef CONFIG_HUGETLB_PAGE
363 u16 low_htlb_areas, high_htlb_areas; 376 u16 low_htlb_areas, high_htlb_areas;
364#endif 377#endif