diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-powerpc/mmu-hash64.h | 11 | ||||
| -rw-r--r-- | include/asm-powerpc/paca.h | 2 | ||||
| -rw-r--r-- | include/asm-powerpc/page_64.h | 86 | ||||
| -rw-r--r-- | include/asm-powerpc/pgalloc-64.h | 31 | ||||
| -rw-r--r-- | include/asm-powerpc/pgtable-4k.h | 6 | ||||
| -rw-r--r-- | include/asm-powerpc/pgtable-64k.h | 7 | ||||
| -rw-r--r-- | include/asm-powerpc/spu_csa.h | 10 | ||||
| -rw-r--r-- | include/linux/suspend.h | 11 |
8 files changed, 93 insertions, 71 deletions
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 6739457d8bc0..e2ca55bcfe0b 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h | |||
| @@ -350,10 +350,13 @@ typedef unsigned long mm_context_id_t; | |||
| 350 | 350 | ||
| 351 | typedef struct { | 351 | typedef struct { |
| 352 | mm_context_id_t id; | 352 | mm_context_id_t id; |
| 353 | u16 user_psize; /* page size index */ | 353 | u16 user_psize; /* page size index */ |
| 354 | u16 sllp; /* SLB entry page size encoding */ | 354 | |
| 355 | #ifdef CONFIG_HUGETLB_PAGE | 355 | #ifdef CONFIG_PPC_MM_SLICES |
| 356 | u16 low_htlb_areas, high_htlb_areas; | 356 | u64 low_slices_psize; /* SLB page size encodings */ |
| 357 | u64 high_slices_psize; /* 4 bits per slice for now */ | ||
| 358 | #else | ||
| 359 | u16 sllp; /* SLB page size encoding */ | ||
| 357 | #endif | 360 | #endif |
| 358 | unsigned long vdso_base; | 361 | unsigned long vdso_base; |
| 359 | } mm_context_t; | 362 | } mm_context_t; |
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index cf95274f735e..c6a5b1735666 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h | |||
| @@ -83,8 +83,8 @@ struct paca_struct { | |||
| 83 | 83 | ||
| 84 | mm_context_t context; | 84 | mm_context_t context; |
| 85 | u16 vmalloc_sllp; | 85 | u16 vmalloc_sllp; |
| 86 | u16 slb_cache[SLB_CACHE_ENTRIES]; | ||
| 87 | u16 slb_cache_ptr; | 86 | u16 slb_cache_ptr; |
| 87 | u16 slb_cache[SLB_CACHE_ENTRIES]; | ||
| 88 | 88 | ||
| 89 | /* | 89 | /* |
| 90 | * then miscellaneous read-write fields | 90 | * then miscellaneous read-write fields |
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index eab779c21995..3448a3d4bc64 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h | |||
| @@ -88,57 +88,55 @@ extern unsigned int HPAGE_SHIFT; | |||
| 88 | 88 | ||
| 89 | #endif /* __ASSEMBLY__ */ | 89 | #endif /* __ASSEMBLY__ */ |
| 90 | 90 | ||
| 91 | #ifdef CONFIG_HUGETLB_PAGE | 91 | #ifdef CONFIG_PPC_MM_SLICES |
| 92 | 92 | ||
| 93 | #define HTLB_AREA_SHIFT 40 | 93 | #define SLICE_LOW_SHIFT 28 |
| 94 | #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) | 94 | #define SLICE_HIGH_SHIFT 40 |
| 95 | #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) | ||
| 96 | 95 | ||
| 97 | #define LOW_ESID_MASK(addr, len) \ | 96 | #define SLICE_LOW_TOP (0x100000000ul) |
| 98 | (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \ | 97 | #define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) |
| 99 | - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff) | 98 | #define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT) |
| 100 | #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ | ||
| 101 | - (1U << GET_HTLB_AREA(addr))) & 0xffff) | ||
| 102 | 99 | ||
| 103 | #define ARCH_HAS_HUGEPAGE_ONLY_RANGE | 100 | #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) |
| 104 | #define ARCH_HAS_HUGETLB_FREE_PGD_RANGE | 101 | #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) |
| 105 | #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE | ||
| 106 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | ||
| 107 | 102 | ||
| 108 | #define touches_hugepage_low_range(mm, addr, len) \ | 103 | #ifndef __ASSEMBLY__ |
| 109 | (((addr) < 0x100000000UL) \ | 104 | |
| 110 | && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)) | 105 | struct slice_mask { |
| 111 | #define touches_hugepage_high_range(mm, addr, len) \ | 106 | u16 low_slices; |
| 112 | ((((addr) + (len)) > 0x100000000UL) \ | 107 | u16 high_slices; |
| 113 | && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)) | 108 | }; |
| 114 | 109 | ||
| 115 | #define __within_hugepage_low_range(addr, len, segmask) \ | 110 | struct mm_struct; |
| 116 | ( (((addr)+(len)) <= 0x100000000UL) \ | ||
| 117 | && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))) | ||
| 118 | #define within_hugepage_low_range(addr, len) \ | ||
| 119 | __within_hugepage_low_range((addr), (len), \ | ||
| 120 | current->mm->context.low_htlb_areas) | ||
| 121 | #define __within_hugepage_high_range(addr, len, zonemask) \ | ||
| 122 | ( ((addr) >= 0x100000000UL) \ | ||
| 123 | && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))) | ||
| 124 | #define within_hugepage_high_range(addr, len) \ | ||
| 125 | __within_hugepage_high_range((addr), (len), \ | ||
| 126 | current->mm->context.high_htlb_areas) | ||
| 127 | |||
| 128 | #define is_hugepage_only_range(mm, addr, len) \ | ||
| 129 | (touches_hugepage_high_range((mm), (addr), (len)) || \ | ||
| 130 | touches_hugepage_low_range((mm), (addr), (len))) | ||
| 131 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
| 132 | 111 | ||
| 133 | #define in_hugepage_area(context, addr) \ | 112 | extern unsigned long slice_get_unmapped_area(unsigned long addr, |
| 134 | (cpu_has_feature(CPU_FTR_16M_PAGE) && \ | 113 | unsigned long len, |
| 135 | ( ( (addr) >= 0x100000000UL) \ | 114 | unsigned long flags, |
| 136 | ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \ | 115 | unsigned int psize, |
| 137 | : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) | 116 | int topdown, |
| 117 | int use_cache); | ||
| 138 | 118 | ||
| 139 | #else /* !CONFIG_HUGETLB_PAGE */ | 119 | extern unsigned int get_slice_psize(struct mm_struct *mm, |
| 120 | unsigned long addr); | ||
| 140 | 121 | ||
| 141 | #define in_hugepage_area(mm, addr) 0 | 122 | extern void slice_init_context(struct mm_struct *mm, unsigned int psize); |
| 123 | extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); | ||
| 124 | |||
| 125 | #define ARCH_HAS_HUGEPAGE_ONLY_RANGE | ||
| 126 | extern int is_hugepage_only_range(struct mm_struct *m, | ||
| 127 | unsigned long addr, | ||
| 128 | unsigned long len); | ||
| 129 | |||
| 130 | #endif /* __ASSEMBLY__ */ | ||
| 131 | #else | ||
| 132 | #define slice_init() | ||
| 133 | #endif /* CONFIG_PPC_MM_SLICES */ | ||
| 134 | |||
| 135 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 136 | |||
| 137 | #define ARCH_HAS_HUGETLB_FREE_PGD_RANGE | ||
| 138 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | ||
| 139 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
| 142 | 140 | ||
| 143 | #endif /* !CONFIG_HUGETLB_PAGE */ | 141 | #endif /* !CONFIG_HUGETLB_PAGE */ |
| 144 | 142 | ||
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h index 30b50cf56e2c..d9a3a8ca58a1 100644 --- a/include/asm-powerpc/pgalloc-64.h +++ b/include/asm-powerpc/pgalloc-64.h | |||
| @@ -14,18 +14,11 @@ | |||
| 14 | 14 | ||
| 15 | extern struct kmem_cache *pgtable_cache[]; | 15 | extern struct kmem_cache *pgtable_cache[]; |
| 16 | 16 | ||
| 17 | #ifdef CONFIG_PPC_64K_PAGES | 17 | #define PGD_CACHE_NUM 0 |
| 18 | #define PTE_CACHE_NUM 0 | 18 | #define PUD_CACHE_NUM 1 |
| 19 | #define PMD_CACHE_NUM 1 | 19 | #define PMD_CACHE_NUM 1 |
| 20 | #define PGD_CACHE_NUM 2 | 20 | #define HUGEPTE_CACHE_NUM 2 |
| 21 | #define HUGEPTE_CACHE_NUM 3 | 21 | #define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */ |
| 22 | #else | ||
| 23 | #define PTE_CACHE_NUM 0 | ||
| 24 | #define PMD_CACHE_NUM 1 | ||
| 25 | #define PUD_CACHE_NUM 1 | ||
| 26 | #define PGD_CACHE_NUM 0 | ||
| 27 | #define HUGEPTE_CACHE_NUM 2 | ||
| 28 | #endif | ||
| 29 | 22 | ||
| 30 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 23 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 31 | { | 24 | { |
| @@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd) | |||
| 91 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 84 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 92 | unsigned long address) | 85 | unsigned long address) |
| 93 | { | 86 | { |
| 94 | return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], | 87 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); |
| 95 | GFP_KERNEL|__GFP_REPEAT); | ||
| 96 | } | 88 | } |
| 97 | 89 | ||
| 98 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | 90 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
| @@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
| 103 | 95 | ||
| 104 | static inline void pte_free_kernel(pte_t *pte) | 96 | static inline void pte_free_kernel(pte_t *pte) |
| 105 | { | 97 | { |
| 106 | kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); | 98 | free_page((unsigned long)pte); |
| 107 | } | 99 | } |
| 108 | 100 | ||
| 109 | static inline void pte_free(struct page *ptepage) | 101 | static inline void pte_free(struct page *ptepage) |
| 110 | { | 102 | { |
| 111 | pte_free_kernel(page_address(ptepage)); | 103 | __free_page(ptepage); |
| 112 | } | 104 | } |
| 113 | 105 | ||
| 114 | #define PGF_CACHENUM_MASK 0x3 | 106 | #define PGF_CACHENUM_MASK 0x3 |
| @@ -130,14 +122,17 @@ static inline void pgtable_free(pgtable_free_t pgf) | |||
| 130 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); | 122 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); |
| 131 | int cachenum = pgf.val & PGF_CACHENUM_MASK; | 123 | int cachenum = pgf.val & PGF_CACHENUM_MASK; |
| 132 | 124 | ||
| 133 | kmem_cache_free(pgtable_cache[cachenum], p); | 125 | if (cachenum == PTE_NONCACHE_NUM) |
| 126 | free_page((unsigned long)p); | ||
| 127 | else | ||
| 128 | kmem_cache_free(pgtable_cache[cachenum], p); | ||
| 134 | } | 129 | } |
| 135 | 130 | ||
| 136 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); | 131 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); |
| 137 | 132 | ||
| 138 | #define __pte_free_tlb(tlb, ptepage) \ | 133 | #define __pte_free_tlb(tlb, ptepage) \ |
| 139 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ | 134 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ |
| 140 | PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) | 135 | PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)) |
| 141 | #define __pmd_free_tlb(tlb, pmd) \ | 136 | #define __pmd_free_tlb(tlb, pmd) \ |
| 142 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ | 137 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ |
| 143 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) | 138 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) |
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index 1744d6ac12a2..add5481fd7c7 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h | |||
| @@ -80,7 +80,11 @@ | |||
| 80 | 80 | ||
| 81 | #define pte_iterate_hashed_end() } while(0) | 81 | #define pte_iterate_hashed_end() } while(0) |
| 82 | 82 | ||
| 83 | #define pte_pagesize_index(pte) MMU_PAGE_4K | 83 | #ifdef CONFIG_PPC_HAS_HASH_64K |
| 84 | #define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) | ||
| 85 | #else | ||
| 86 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | ||
| 87 | #endif | ||
| 84 | 88 | ||
| 85 | /* | 89 | /* |
| 86 | * 4-level page tables related bits | 90 | * 4-level page tables related bits |
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 16ef4978520d..31cbd3d7fce8 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h | |||
| @@ -35,6 +35,11 @@ | |||
| 35 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ | 35 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ |
| 36 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ | 36 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ |
| 37 | #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ | 37 | #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ |
| 38 | |||
| 39 | /* Note the full page bits must be in the same location as for normal | ||
| 40 | * 4k pages as the same asssembly will be used to insert 64K pages | ||
| 41 | * wether the kernel has CONFIG_PPC_64K_PAGES or not | ||
| 42 | */ | ||
| 38 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ | 43 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ |
| 39 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ | 44 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ |
| 40 | 45 | ||
| @@ -88,7 +93,7 @@ | |||
| 88 | 93 | ||
| 89 | #define pte_iterate_hashed_end() } while(0); } } while(0) | 94 | #define pte_iterate_hashed_end() } while(0); } } while(0) |
| 90 | 95 | ||
| 91 | #define pte_pagesize_index(pte) \ | 96 | #define pte_pagesize_index(mm, addr, pte) \ |
| 92 | (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) | 97 | (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) |
| 93 | 98 | ||
| 94 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | 99 | #define remap_4k_pfn(vma, addr, pfn, prot) \ |
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 02e56a6685a2..c48ae185c874 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h | |||
| @@ -235,6 +235,12 @@ struct spu_priv2_collapsed { | |||
| 235 | */ | 235 | */ |
| 236 | struct spu_state { | 236 | struct spu_state { |
| 237 | struct spu_lscsa *lscsa; | 237 | struct spu_lscsa *lscsa; |
| 238 | #ifdef CONFIG_SPU_FS_64K_LS | ||
| 239 | int use_big_pages; | ||
| 240 | /* One struct page per 64k page */ | ||
| 241 | #define SPU_LSCSA_NUM_BIG_PAGES (sizeof(struct spu_lscsa) / 0x10000) | ||
| 242 | struct page *lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES]; | ||
| 243 | #endif | ||
| 238 | struct spu_problem_collapsed prob; | 244 | struct spu_problem_collapsed prob; |
| 239 | struct spu_priv1_collapsed priv1; | 245 | struct spu_priv1_collapsed priv1; |
| 240 | struct spu_priv2_collapsed priv2; | 246 | struct spu_priv2_collapsed priv2; |
| @@ -247,12 +253,14 @@ struct spu_state { | |||
| 247 | spinlock_t register_lock; | 253 | spinlock_t register_lock; |
| 248 | }; | 254 | }; |
| 249 | 255 | ||
| 250 | extern void spu_init_csa(struct spu_state *csa); | 256 | extern int spu_init_csa(struct spu_state *csa); |
| 251 | extern void spu_fini_csa(struct spu_state *csa); | 257 | extern void spu_fini_csa(struct spu_state *csa); |
| 252 | extern int spu_save(struct spu_state *prev, struct spu *spu); | 258 | extern int spu_save(struct spu_state *prev, struct spu *spu); |
| 253 | extern int spu_restore(struct spu_state *new, struct spu *spu); | 259 | extern int spu_restore(struct spu_state *new, struct spu *spu); |
| 254 | extern int spu_switch(struct spu_state *prev, struct spu_state *new, | 260 | extern int spu_switch(struct spu_state *prev, struct spu_state *new, |
| 255 | struct spu *spu); | 261 | struct spu *spu); |
| 262 | extern int spu_alloc_lscsa(struct spu_state *csa); | ||
| 263 | extern void spu_free_lscsa(struct spu_state *csa); | ||
| 256 | 264 | ||
| 257 | #endif /* !__SPU__ */ | 265 | #endif /* !__SPU__ */ |
| 258 | #endif /* __KERNEL__ */ | 266 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index d74da9122b60..9c7cb6430666 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
| @@ -52,7 +52,15 @@ struct hibernation_ops { | |||
| 52 | 52 | ||
| 53 | #if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) | 53 | #if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) |
| 54 | /* kernel/power/snapshot.c */ | 54 | /* kernel/power/snapshot.c */ |
| 55 | extern void __init register_nosave_region(unsigned long, unsigned long); | 55 | extern void __register_nosave_region(unsigned long b, unsigned long e, int km); |
| 56 | static inline void register_nosave_region(unsigned long b, unsigned long e) | ||
| 57 | { | ||
| 58 | __register_nosave_region(b, e, 0); | ||
| 59 | } | ||
| 60 | static inline void register_nosave_region_late(unsigned long b, unsigned long e) | ||
| 61 | { | ||
| 62 | __register_nosave_region(b, e, 1); | ||
| 63 | } | ||
| 56 | extern int swsusp_page_is_forbidden(struct page *); | 64 | extern int swsusp_page_is_forbidden(struct page *); |
| 57 | extern void swsusp_set_page_free(struct page *); | 65 | extern void swsusp_set_page_free(struct page *); |
| 58 | extern void swsusp_unset_page_free(struct page *); | 66 | extern void swsusp_unset_page_free(struct page *); |
| @@ -62,6 +70,7 @@ extern void hibernation_set_ops(struct hibernation_ops *ops); | |||
| 62 | extern int hibernate(void); | 70 | extern int hibernate(void); |
| 63 | #else | 71 | #else |
| 64 | static inline void register_nosave_region(unsigned long b, unsigned long e) {} | 72 | static inline void register_nosave_region(unsigned long b, unsigned long e) {} |
| 73 | static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} | ||
| 65 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 74 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } |
| 66 | static inline void swsusp_set_page_free(struct page *p) {} | 75 | static inline void swsusp_set_page_free(struct page *p) {} |
| 67 | static inline void swsusp_unset_page_free(struct page *p) {} | 76 | static inline void swsusp_unset_page_free(struct page *p) {} |
