diff options
Diffstat (limited to 'arch/powerpc/mm')
| -rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 6 | ||||
| -rw-r--r-- | arch/powerpc/mm/Makefile | 2 | ||||
| -rw-r--r-- | arch/powerpc/mm/fault.c | 17 | ||||
| -rw-r--r-- | arch/powerpc/mm/hugetlbpage-book3e.c | 21 | ||||
| -rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 116 | ||||
| -rw-r--r-- | arch/powerpc/mm/icswx.c | 273 | ||||
| -rw-r--r-- | arch/powerpc/mm/icswx.h | 62 | ||||
| -rw-r--r-- | arch/powerpc/mm/icswx_pid.c | 87 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_32.c | 7 | ||||
| -rw-r--r-- | arch/powerpc/mm/mem.c | 23 | ||||
| -rw-r--r-- | arch/powerpc/mm/mmap_64.c | 14 | ||||
| -rw-r--r-- | arch/powerpc/mm/mmu_context_hash64.c | 195 | ||||
| -rw-r--r-- | arch/powerpc/mm/numa.c | 4 | ||||
| -rw-r--r-- | arch/powerpc/mm/tlb_low_64e.S | 36 | ||||
| -rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 2 |
15 files changed, 577 insertions, 288 deletions
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index f60e006d90c3..388b95e1a009 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
| @@ -78,11 +78,7 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) | |||
| 78 | "tlbwe %1,%3,%5\n" | 78 | "tlbwe %1,%3,%5\n" |
| 79 | "tlbwe %0,%3,%6\n" | 79 | "tlbwe %0,%3,%6\n" |
| 80 | : | 80 | : |
| 81 | #ifdef CONFIG_PPC47x | ||
| 82 | : "r" (PPC47x_TLB2_S_RWX), | ||
| 83 | #else | ||
| 84 | : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), | 81 | : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), |
| 85 | #endif | ||
| 86 | "r" (phys), | 82 | "r" (phys), |
| 87 | "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), | 83 | "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), |
| 88 | "r" (entry), | 84 | "r" (entry), |
| @@ -221,7 +217,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |||
| 221 | { | 217 | { |
| 222 | u64 size; | 218 | u64 size; |
| 223 | 219 | ||
| 224 | #ifndef CONFIG_RELOCATABLE | 220 | #ifndef CONFIG_NONSTATIC_KERNEL |
| 225 | /* We don't currently support the first MEMBLOCK not mapping 0 | 221 | /* We don't currently support the first MEMBLOCK not mapping 0 |
| 226 | * physical on those processors | 222 | * physical on those processors |
| 227 | */ | 223 | */ |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 991ee813d2a8..3787b61f7d20 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
| @@ -21,6 +21,8 @@ obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o | |||
| 21 | obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ | 21 | obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ |
| 22 | tlb_hash$(CONFIG_WORD_SIZE).o \ | 22 | tlb_hash$(CONFIG_WORD_SIZE).o \ |
| 23 | mmu_context_hash$(CONFIG_WORD_SIZE).o | 23 | mmu_context_hash$(CONFIG_WORD_SIZE).o |
| 24 | obj-$(CONFIG_PPC_ICSWX) += icswx.o | ||
| 25 | obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o | ||
| 24 | obj-$(CONFIG_40x) += 40x_mmu.o | 26 | obj-$(CONFIG_40x) += 40x_mmu.o |
| 25 | obj-$(CONFIG_44x) += 44x_mmu.o | 27 | obj-$(CONFIG_44x) += 44x_mmu.o |
| 26 | obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o | 28 | obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5efe8c96d37f..2f0d1b032a89 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
| @@ -44,6 +44,8 @@ | |||
| 44 | #include <asm/siginfo.h> | 44 | #include <asm/siginfo.h> |
| 45 | #include <mm/mmu_decl.h> | 45 | #include <mm/mmu_decl.h> |
| 46 | 46 | ||
| 47 | #include "icswx.h" | ||
| 48 | |||
| 47 | #ifdef CONFIG_KPROBES | 49 | #ifdef CONFIG_KPROBES |
| 48 | static inline int notify_page_fault(struct pt_regs *regs) | 50 | static inline int notify_page_fault(struct pt_regs *regs) |
| 49 | { | 51 | { |
| @@ -143,6 +145,21 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 143 | is_write = error_code & ESR_DST; | 145 | is_write = error_code & ESR_DST; |
| 144 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | 146 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ |
| 145 | 147 | ||
| 148 | #ifdef CONFIG_PPC_ICSWX | ||
| 149 | /* | ||
| 150 | * we need to do this early because this "data storage | ||
| 151 | * interrupt" does not update the DAR/DEAR so we don't want to | ||
| 152 | * look at it | ||
| 153 | */ | ||
| 154 | if (error_code & ICSWX_DSI_UCT) { | ||
| 155 | int ret; | ||
| 156 | |||
| 157 | ret = acop_handle_fault(regs, address, error_code); | ||
| 158 | if (ret) | ||
| 159 | return ret; | ||
| 160 | } | ||
| 161 | #endif | ||
| 162 | |||
| 146 | if (notify_page_fault(regs)) | 163 | if (notify_page_fault(regs)) |
| 147 | return 0; | 164 | return 0; |
| 148 | 165 | ||
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 343ad0b87261..3bc700655fc8 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c | |||
| @@ -37,31 +37,32 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) | |||
| 37 | return found; | 37 | return found; |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte) | 40 | void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, |
| 41 | pte_t pte) | ||
| 41 | { | 42 | { |
| 42 | unsigned long mas1, mas2; | 43 | unsigned long mas1, mas2; |
| 43 | u64 mas7_3; | 44 | u64 mas7_3; |
| 44 | unsigned long psize, tsize, shift; | 45 | unsigned long psize, tsize, shift; |
| 45 | unsigned long flags; | 46 | unsigned long flags; |
| 47 | struct mm_struct *mm; | ||
| 46 | 48 | ||
| 47 | #ifdef CONFIG_PPC_FSL_BOOK3E | 49 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 48 | int index, lz, ncams; | 50 | int index, ncams; |
| 49 | struct vm_area_struct *vma; | ||
| 50 | #endif | 51 | #endif |
| 51 | 52 | ||
| 52 | if (unlikely(is_kernel_addr(ea))) | 53 | if (unlikely(is_kernel_addr(ea))) |
| 53 | return; | 54 | return; |
| 54 | 55 | ||
| 56 | mm = vma->vm_mm; | ||
| 57 | |||
| 55 | #ifdef CONFIG_PPC_MM_SLICES | 58 | #ifdef CONFIG_PPC_MM_SLICES |
| 56 | psize = mmu_get_tsize(get_slice_psize(mm, ea)); | 59 | psize = get_slice_psize(mm, ea); |
| 57 | tsize = mmu_get_psize(psize); | 60 | tsize = mmu_get_tsize(psize); |
| 58 | shift = mmu_psize_defs[psize].shift; | 61 | shift = mmu_psize_defs[psize].shift; |
| 59 | #else | 62 | #else |
| 60 | vma = find_vma(mm, ea); | 63 | psize = vma_mmu_pagesize(vma); |
| 61 | psize = vma_mmu_pagesize(vma); /* returns actual size in bytes */ | 64 | shift = __ilog2(psize); |
| 62 | asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (psize)); | 65 | tsize = shift - 10; |
| 63 | shift = 31 - lz; | ||
| 64 | tsize = 21 - lz; | ||
| 65 | #endif | 66 | #endif |
| 66 | 67 | ||
| 67 | /* | 68 | /* |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8558b572e55d..a8b3cc7d90fe 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
| @@ -29,22 +29,22 @@ unsigned int HPAGE_SHIFT; | |||
| 29 | 29 | ||
| 30 | /* | 30 | /* |
| 31 | * Tracks gpages after the device tree is scanned and before the | 31 | * Tracks gpages after the device tree is scanned and before the |
| 32 | * huge_boot_pages list is ready. On 64-bit implementations, this is | 32 | * huge_boot_pages list is ready. On non-Freescale implementations, this is |
| 33 | * just used to track 16G pages and so is a single array. 32-bit | 33 | * just used to track 16G pages and so is a single array. FSL-based |
| 34 | * implementations may have more than one gpage size due to limitations | 34 | * implementations may have more than one gpage size, so we need multiple |
| 35 | * of the memory allocators, so we need multiple arrays | 35 | * arrays |
| 36 | */ | 36 | */ |
| 37 | #ifdef CONFIG_PPC64 | 37 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 38 | #define MAX_NUMBER_GPAGES 1024 | ||
| 39 | static u64 gpage_freearray[MAX_NUMBER_GPAGES]; | ||
| 40 | static unsigned nr_gpages; | ||
| 41 | #else | ||
| 42 | #define MAX_NUMBER_GPAGES 128 | 38 | #define MAX_NUMBER_GPAGES 128 |
| 43 | struct psize_gpages { | 39 | struct psize_gpages { |
| 44 | u64 gpage_list[MAX_NUMBER_GPAGES]; | 40 | u64 gpage_list[MAX_NUMBER_GPAGES]; |
| 45 | unsigned int nr_gpages; | 41 | unsigned int nr_gpages; |
| 46 | }; | 42 | }; |
| 47 | static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; | 43 | static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; |
| 44 | #else | ||
| 45 | #define MAX_NUMBER_GPAGES 1024 | ||
| 46 | static u64 gpage_freearray[MAX_NUMBER_GPAGES]; | ||
| 47 | static unsigned nr_gpages; | ||
| 48 | #endif | 48 | #endif |
| 49 | 49 | ||
| 50 | static inline int shift_to_mmu_psize(unsigned int shift) | 50 | static inline int shift_to_mmu_psize(unsigned int shift) |
| @@ -115,12 +115,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
| 115 | struct kmem_cache *cachep; | 115 | struct kmem_cache *cachep; |
| 116 | pte_t *new; | 116 | pte_t *new; |
| 117 | 117 | ||
| 118 | #ifdef CONFIG_PPC64 | 118 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 119 | cachep = PGT_CACHE(pdshift - pshift); | ||
| 120 | #else | ||
| 121 | int i; | 119 | int i; |
| 122 | int num_hugepd = 1 << (pshift - pdshift); | 120 | int num_hugepd = 1 << (pshift - pdshift); |
| 123 | cachep = hugepte_cache; | 121 | cachep = hugepte_cache; |
| 122 | #else | ||
| 123 | cachep = PGT_CACHE(pdshift - pshift); | ||
| 124 | #endif | 124 | #endif |
| 125 | 125 | ||
| 126 | new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); | 126 | new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); |
| @@ -132,12 +132,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
| 132 | return -ENOMEM; | 132 | return -ENOMEM; |
| 133 | 133 | ||
| 134 | spin_lock(&mm->page_table_lock); | 134 | spin_lock(&mm->page_table_lock); |
| 135 | #ifdef CONFIG_PPC64 | 135 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 136 | if (!hugepd_none(*hpdp)) | ||
| 137 | kmem_cache_free(cachep, new); | ||
| 138 | else | ||
| 139 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | ||
| 140 | #else | ||
| 141 | /* | 136 | /* |
| 142 | * We have multiple higher-level entries that point to the same | 137 | * We have multiple higher-level entries that point to the same |
| 143 | * actual pte location. Fill in each as we go and backtrack on error. | 138 | * actual pte location. Fill in each as we go and backtrack on error. |
| @@ -156,11 +151,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
| 156 | hpdp->pd = 0; | 151 | hpdp->pd = 0; |
| 157 | kmem_cache_free(cachep, new); | 152 | kmem_cache_free(cachep, new); |
| 158 | } | 153 | } |
| 154 | #else | ||
| 155 | if (!hugepd_none(*hpdp)) | ||
| 156 | kmem_cache_free(cachep, new); | ||
| 157 | else | ||
| 158 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | ||
| 159 | #endif | 159 | #endif |
| 160 | spin_unlock(&mm->page_table_lock); | 160 | spin_unlock(&mm->page_table_lock); |
| 161 | return 0; | 161 | return 0; |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | /* | ||
| 165 | * These macros define how to determine which level of the page table holds | ||
| 166 | * the hpdp. | ||
| 167 | */ | ||
| 168 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
| 169 | #define HUGEPD_PGD_SHIFT PGDIR_SHIFT | ||
| 170 | #define HUGEPD_PUD_SHIFT PUD_SHIFT | ||
| 171 | #else | ||
| 172 | #define HUGEPD_PGD_SHIFT PUD_SHIFT | ||
| 173 | #define HUGEPD_PUD_SHIFT PMD_SHIFT | ||
| 174 | #endif | ||
| 175 | |||
| 164 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) | 176 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) |
| 165 | { | 177 | { |
| 166 | pgd_t *pg; | 178 | pgd_t *pg; |
| @@ -173,12 +185,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz | |||
| 173 | addr &= ~(sz-1); | 185 | addr &= ~(sz-1); |
| 174 | 186 | ||
| 175 | pg = pgd_offset(mm, addr); | 187 | pg = pgd_offset(mm, addr); |
| 176 | if (pshift >= PUD_SHIFT) { | 188 | |
| 189 | if (pshift >= HUGEPD_PGD_SHIFT) { | ||
| 177 | hpdp = (hugepd_t *)pg; | 190 | hpdp = (hugepd_t *)pg; |
| 178 | } else { | 191 | } else { |
| 179 | pdshift = PUD_SHIFT; | 192 | pdshift = PUD_SHIFT; |
| 180 | pu = pud_alloc(mm, pg, addr); | 193 | pu = pud_alloc(mm, pg, addr); |
| 181 | if (pshift >= PMD_SHIFT) { | 194 | if (pshift >= HUGEPD_PUD_SHIFT) { |
| 182 | hpdp = (hugepd_t *)pu; | 195 | hpdp = (hugepd_t *)pu; |
| 183 | } else { | 196 | } else { |
| 184 | pdshift = PMD_SHIFT; | 197 | pdshift = PMD_SHIFT; |
| @@ -198,7 +211,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz | |||
| 198 | return hugepte_offset(hpdp, addr, pdshift); | 211 | return hugepte_offset(hpdp, addr, pdshift); |
| 199 | } | 212 | } |
| 200 | 213 | ||
| 201 | #ifdef CONFIG_PPC32 | 214 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 202 | /* Build list of addresses of gigantic pages. This function is used in early | 215 | /* Build list of addresses of gigantic pages. This function is used in early |
| 203 | * boot before the buddy or bootmem allocator is setup. | 216 | * boot before the buddy or bootmem allocator is setup. |
| 204 | */ | 217 | */ |
| @@ -318,7 +331,7 @@ void __init reserve_hugetlb_gpages(void) | |||
| 318 | } | 331 | } |
| 319 | } | 332 | } |
| 320 | 333 | ||
| 321 | #else /* PPC64 */ | 334 | #else /* !PPC_FSL_BOOK3E */ |
| 322 | 335 | ||
| 323 | /* Build list of addresses of gigantic pages. This function is used in early | 336 | /* Build list of addresses of gigantic pages. This function is used in early |
| 324 | * boot before the buddy or bootmem allocator is setup. | 337 | * boot before the buddy or bootmem allocator is setup. |
| @@ -356,7 +369,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | |||
| 356 | return 0; | 369 | return 0; |
| 357 | } | 370 | } |
| 358 | 371 | ||
| 359 | #ifdef CONFIG_PPC32 | 372 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 360 | #define HUGEPD_FREELIST_SIZE \ | 373 | #define HUGEPD_FREELIST_SIZE \ |
| 361 | ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) | 374 | ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) |
| 362 | 375 | ||
| @@ -416,11 +429,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif | |||
| 416 | unsigned long pdmask = ~((1UL << pdshift) - 1); | 429 | unsigned long pdmask = ~((1UL << pdshift) - 1); |
| 417 | unsigned int num_hugepd = 1; | 430 | unsigned int num_hugepd = 1; |
| 418 | 431 | ||
| 419 | #ifdef CONFIG_PPC64 | 432 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 420 | unsigned int shift = hugepd_shift(*hpdp); | 433 | /* Note: On fsl the hpdp may be the first of several */ |
| 421 | #else | ||
| 422 | /* Note: On 32-bit the hpdp may be the first of several */ | ||
| 423 | num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); | 434 | num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); |
| 435 | #else | ||
| 436 | unsigned int shift = hugepd_shift(*hpdp); | ||
| 424 | #endif | 437 | #endif |
| 425 | 438 | ||
| 426 | start &= pdmask; | 439 | start &= pdmask; |
| @@ -438,10 +451,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif | |||
| 438 | hpdp->pd = 0; | 451 | hpdp->pd = 0; |
| 439 | 452 | ||
| 440 | tlb->need_flush = 1; | 453 | tlb->need_flush = 1; |
| 441 | #ifdef CONFIG_PPC64 | 454 | |
| 442 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); | 455 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 443 | #else | ||
| 444 | hugepd_free(tlb, hugepte); | 456 | hugepd_free(tlb, hugepte); |
| 457 | #else | ||
| 458 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); | ||
| 445 | #endif | 459 | #endif |
| 446 | } | 460 | } |
| 447 | 461 | ||
| @@ -454,14 +468,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
| 454 | unsigned long start; | 468 | unsigned long start; |
| 455 | 469 | ||
| 456 | start = addr; | 470 | start = addr; |
| 457 | pmd = pmd_offset(pud, addr); | ||
| 458 | do { | 471 | do { |
| 472 | pmd = pmd_offset(pud, addr); | ||
| 459 | next = pmd_addr_end(addr, end); | 473 | next = pmd_addr_end(addr, end); |
| 460 | if (pmd_none(*pmd)) | 474 | if (pmd_none(*pmd)) |
| 461 | continue; | 475 | continue; |
| 476 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
| 477 | /* | ||
| 478 | * Increment next by the size of the huge mapping since | ||
| 479 | * there may be more than one entry at this level for a | ||
| 480 | * single hugepage, but all of them point to | ||
| 481 | * the same kmem cache that holds the hugepte. | ||
| 482 | */ | ||
| 483 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); | ||
| 484 | #endif | ||
| 462 | free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, | 485 | free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, |
| 463 | addr, next, floor, ceiling); | 486 | addr, next, floor, ceiling); |
| 464 | } while (pmd++, addr = next, addr != end); | 487 | } while (addr = next, addr != end); |
| 465 | 488 | ||
| 466 | start &= PUD_MASK; | 489 | start &= PUD_MASK; |
| 467 | if (start < floor) | 490 | if (start < floor) |
| @@ -488,8 +511,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
| 488 | unsigned long start; | 511 | unsigned long start; |
| 489 | 512 | ||
| 490 | start = addr; | 513 | start = addr; |
| 491 | pud = pud_offset(pgd, addr); | ||
| 492 | do { | 514 | do { |
| 515 | pud = pud_offset(pgd, addr); | ||
| 493 | next = pud_addr_end(addr, end); | 516 | next = pud_addr_end(addr, end); |
| 494 | if (!is_hugepd(pud)) { | 517 | if (!is_hugepd(pud)) { |
| 495 | if (pud_none_or_clear_bad(pud)) | 518 | if (pud_none_or_clear_bad(pud)) |
| @@ -497,10 +520,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
| 497 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, | 520 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
| 498 | ceiling); | 521 | ceiling); |
| 499 | } else { | 522 | } else { |
| 523 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
| 524 | /* | ||
| 525 | * Increment next by the size of the huge mapping since | ||
| 526 | * there may be more than one entry at this level for a | ||
| 527 | * single hugepage, but all of them point to | ||
| 528 | * the same kmem cache that holds the hugepte. | ||
| 529 | */ | ||
| 530 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); | ||
| 531 | #endif | ||
| 500 | free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, | 532 | free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, |
| 501 | addr, next, floor, ceiling); | 533 | addr, next, floor, ceiling); |
| 502 | } | 534 | } |
| 503 | } while (pud++, addr = next, addr != end); | 535 | } while (addr = next, addr != end); |
| 504 | 536 | ||
| 505 | start &= PGDIR_MASK; | 537 | start &= PGDIR_MASK; |
| 506 | if (start < floor) | 538 | if (start < floor) |
| @@ -555,12 +587,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |||
| 555 | continue; | 587 | continue; |
| 556 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); | 588 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
| 557 | } else { | 589 | } else { |
| 558 | #ifdef CONFIG_PPC32 | 590 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 559 | /* | 591 | /* |
| 560 | * Increment next by the size of the huge mapping since | 592 | * Increment next by the size of the huge mapping since |
| 561 | * on 32-bit there may be more than one entry at the pgd | 593 | * there may be more than one entry at the pgd level |
| 562 | * level for a single hugepage, but all of them point to | 594 | * for a single hugepage, but all of them point to the |
| 563 | * the same kmem cache that holds the hugepte. | 595 | * same kmem cache that holds the hugepte. |
| 564 | */ | 596 | */ |
| 565 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); | 597 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); |
| 566 | #endif | 598 | #endif |
| @@ -698,19 +730,17 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, | |||
| 698 | return 1; | 730 | return 1; |
| 699 | } | 731 | } |
| 700 | 732 | ||
| 733 | #ifdef CONFIG_PPC_MM_SLICES | ||
| 701 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 734 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 702 | unsigned long len, unsigned long pgoff, | 735 | unsigned long len, unsigned long pgoff, |
| 703 | unsigned long flags) | 736 | unsigned long flags) |
| 704 | { | 737 | { |
| 705 | #ifdef CONFIG_PPC_MM_SLICES | ||
| 706 | struct hstate *hstate = hstate_file(file); | 738 | struct hstate *hstate = hstate_file(file); |
| 707 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); | 739 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); |
| 708 | 740 | ||
| 709 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); | 741 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); |
| 710 | #else | ||
| 711 | return get_unmapped_area(file, addr, len, pgoff, flags); | ||
| 712 | #endif | ||
| 713 | } | 742 | } |
| 743 | #endif | ||
| 714 | 744 | ||
| 715 | unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) | 745 | unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) |
| 716 | { | 746 | { |
| @@ -784,7 +814,7 @@ static int __init hugepage_setup_sz(char *str) | |||
| 784 | } | 814 | } |
| 785 | __setup("hugepagesz=", hugepage_setup_sz); | 815 | __setup("hugepagesz=", hugepage_setup_sz); |
| 786 | 816 | ||
| 787 | #ifdef CONFIG_FSL_BOOKE | 817 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 788 | struct kmem_cache *hugepte_cache; | 818 | struct kmem_cache *hugepte_cache; |
| 789 | static int __init hugetlbpage_init(void) | 819 | static int __init hugetlbpage_init(void) |
| 790 | { | 820 | { |
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c new file mode 100644 index 000000000000..5d9a59eaad93 --- /dev/null +++ b/arch/powerpc/mm/icswx.c | |||
| @@ -0,0 +1,273 @@ | |||
| 1 | /* | ||
| 2 | * ICSWX and ACOP Management | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public License | ||
| 8 | * as published by the Free Software Foundation; either version | ||
| 9 | * 2 of the License, or (at your option) any later version. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/errno.h> | ||
| 16 | #include <linux/types.h> | ||
| 17 | #include <linux/mm.h> | ||
| 18 | #include <linux/spinlock.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/uaccess.h> | ||
| 21 | |||
| 22 | #include "icswx.h" | ||
| 23 | |||
| 24 | /* | ||
| 25 | * The processor and its L2 cache cause the icswx instruction to | ||
| 26 | * generate a COP_REQ transaction on PowerBus. The transaction has no | ||
| 27 | * address, and the processor does not perform an MMU access to | ||
| 28 | * authenticate the transaction. The command portion of the PowerBus | ||
| 29 | * COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor | ||
| 30 | * Process ID (PID), which the coprocessor compares to the authorized | ||
| 31 | * LPID and PID held in the coprocessor, to determine if the process | ||
| 32 | * is authorized to generate the transaction. The data of the COP_REQ | ||
| 33 | * transaction is 128-byte or less in size and is placed in cacheable | ||
| 34 | * memory on a 128-byte cache line boundary. | ||
| 35 | * | ||
| 36 | * The task to use a coprocessor should use use_cop() to mark the use | ||
| 37 | * of the Coprocessor Type (CT) and context switching. On a server | ||
| 38 | * class processor, the PID register is used only for coprocessor | ||
| 39 | * management + * and so a coprocessor PID is allocated before | ||
| 40 | * executing icswx + * instruction. Drop_cop() is used to free the | ||
| 41 | * coprocessor PID. | ||
| 42 | * | ||
| 43 | * Example: | ||
| 44 | * Host Fabric Interface (HFI) is a PowerPC network coprocessor. | ||
| 45 | * Each HFI have multiple windows. Each HFI window serves as a | ||
| 46 | * network device sending to and receiving from HFI network. | ||
| 47 | * HFI immediate send function uses icswx instruction. The immediate | ||
| 48 | * send function allows small (single cache-line) packets be sent | ||
| 49 | * without using the regular HFI send FIFO and doorbell, which are | ||
| 50 | * much slower than immediate send. | ||
| 51 | * | ||
| 52 | * For each task intending to use HFI immediate send, the HFI driver | ||
| 53 | * calls use_cop() to obtain a coprocessor PID for the task. | ||
| 54 | * The HFI driver then allocate a free HFI window and save the | ||
| 55 | * coprocessor PID to the HFI window to allow the task to use the | ||
| 56 | * HFI window. | ||
| 57 | * | ||
| 58 | * The HFI driver repeatedly creates immediate send packets and | ||
| 59 | * issues icswx instruction to send data through the HFI window. | ||
| 60 | * The HFI compares the coprocessor PID in the CPU PID register | ||
| 61 | * to the PID held in the HFI window to determine if the transaction | ||
| 62 | * is allowed. | ||
| 63 | * | ||
| 64 | * When the task to release the HFI window, the HFI driver calls | ||
| 65 | * drop_cop() to release the coprocessor PID. | ||
| 66 | */ | ||
| 67 | |||
| 68 | void switch_cop(struct mm_struct *next) | ||
| 69 | { | ||
| 70 | #ifdef CONFIG_ICSWX_PID | ||
| 71 | mtspr(SPRN_PID, next->context.cop_pid); | ||
| 72 | #endif | ||
| 73 | mtspr(SPRN_ACOP, next->context.acop); | ||
| 74 | } | ||
| 75 | |||
| 76 | /** | ||
| 77 | * Start using a coprocessor. | ||
| 78 | * @acop: mask of coprocessor to be used. | ||
| 79 | * @mm: The mm the coprocessor to associate with. Most likely current mm. | ||
| 80 | * | ||
| 81 | * Return a positive PID if successful. Negative errno otherwise. | ||
| 82 | * The returned PID will be fed to the coprocessor to determine if an | ||
| 83 | * icswx transaction is authenticated. | ||
| 84 | */ | ||
| 85 | int use_cop(unsigned long acop, struct mm_struct *mm) | ||
| 86 | { | ||
| 87 | int ret; | ||
| 88 | |||
| 89 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
| 90 | return -ENODEV; | ||
| 91 | |||
| 92 | if (!mm || !acop) | ||
| 93 | return -EINVAL; | ||
| 94 | |||
| 95 | /* The page_table_lock ensures mm_users won't change under us */ | ||
| 96 | spin_lock(&mm->page_table_lock); | ||
| 97 | spin_lock(mm->context.cop_lockp); | ||
| 98 | |||
| 99 | ret = get_cop_pid(mm); | ||
| 100 | if (ret < 0) | ||
| 101 | goto out; | ||
| 102 | |||
| 103 | /* update acop */ | ||
| 104 | mm->context.acop |= acop; | ||
| 105 | |||
| 106 | sync_cop(mm); | ||
| 107 | |||
| 108 | /* | ||
| 109 | * If this is a threaded process then there might be other threads | ||
| 110 | * running. We need to send an IPI to force them to pick up any | ||
| 111 | * change in PID and ACOP. | ||
| 112 | */ | ||
| 113 | if (atomic_read(&mm->mm_users) > 1) | ||
| 114 | smp_call_function(sync_cop, mm, 1); | ||
| 115 | |||
| 116 | out: | ||
| 117 | spin_unlock(mm->context.cop_lockp); | ||
| 118 | spin_unlock(&mm->page_table_lock); | ||
| 119 | |||
| 120 | return ret; | ||
| 121 | } | ||
| 122 | EXPORT_SYMBOL_GPL(use_cop); | ||
| 123 | |||
| 124 | /** | ||
| 125 | * Stop using a coprocessor. | ||
| 126 | * @acop: mask of coprocessor to be stopped. | ||
| 127 | * @mm: The mm the coprocessor associated with. | ||
| 128 | */ | ||
| 129 | void drop_cop(unsigned long acop, struct mm_struct *mm) | ||
| 130 | { | ||
| 131 | int free_pid; | ||
| 132 | |||
| 133 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
| 134 | return; | ||
| 135 | |||
| 136 | if (WARN_ON_ONCE(!mm)) | ||
| 137 | return; | ||
| 138 | |||
| 139 | /* The page_table_lock ensures mm_users won't change under us */ | ||
| 140 | spin_lock(&mm->page_table_lock); | ||
| 141 | spin_lock(mm->context.cop_lockp); | ||
| 142 | |||
| 143 | mm->context.acop &= ~acop; | ||
| 144 | |||
| 145 | free_pid = disable_cop_pid(mm); | ||
| 146 | sync_cop(mm); | ||
| 147 | |||
| 148 | /* | ||
| 149 | * If this is a threaded process then there might be other threads | ||
| 150 | * running. We need to send an IPI to force them to pick up any | ||
| 151 | * change in PID and ACOP. | ||
| 152 | */ | ||
| 153 | if (atomic_read(&mm->mm_users) > 1) | ||
| 154 | smp_call_function(sync_cop, mm, 1); | ||
| 155 | |||
| 156 | if (free_pid != COP_PID_NONE) | ||
| 157 | free_cop_pid(free_pid); | ||
| 158 | |||
| 159 | spin_unlock(mm->context.cop_lockp); | ||
| 160 | spin_unlock(&mm->page_table_lock); | ||
| 161 | } | ||
| 162 | EXPORT_SYMBOL_GPL(drop_cop); | ||
| 163 | |||
| 164 | static int acop_use_cop(int ct) | ||
| 165 | { | ||
| 166 | /* todo */ | ||
| 167 | return -1; | ||
| 168 | } | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Get the instruction word at the NIP | ||
| 172 | */ | ||
| 173 | static u32 acop_get_inst(struct pt_regs *regs) | ||
| 174 | { | ||
| 175 | u32 inst; | ||
| 176 | u32 __user *p; | ||
| 177 | |||
| 178 | p = (u32 __user *)regs->nip; | ||
| 179 | if (!access_ok(VERIFY_READ, p, sizeof(*p))) | ||
| 180 | return 0; | ||
| 181 | |||
| 182 | if (__get_user(inst, p)) | ||
| 183 | return 0; | ||
| 184 | |||
| 185 | return inst; | ||
| 186 | } | ||
| 187 | |||
| 188 | /** | ||
| 189 | * @regs: regsiters at time of interrupt | ||
| 190 | * @address: storage address | ||
| 191 | * @error_code: Fault code, usually the DSISR or ESR depending on | ||
| 192 | * processor type | ||
| 193 | * | ||
| 194 | * Return 0 if we are able to resolve the data storage fault that | ||
| 195 | * results from a CT miss in the ACOP register. | ||
| 196 | */ | ||
| 197 | int acop_handle_fault(struct pt_regs *regs, unsigned long address, | ||
| 198 | unsigned long error_code) | ||
| 199 | { | ||
| 200 | int ct; | ||
| 201 | u32 inst = 0; | ||
| 202 | |||
| 203 | if (!cpu_has_feature(CPU_FTR_ICSWX)) { | ||
| 204 | pr_info("No coprocessors available"); | ||
| 205 | _exception(SIGILL, regs, ILL_ILLOPN, address); | ||
| 206 | } | ||
| 207 | |||
| 208 | if (!user_mode(regs)) { | ||
| 209 | /* this could happen if the HV denies the | ||
| 210 | * kernel access, for now we just die */ | ||
| 211 | die("ICSWX from kernel failed", regs, SIGSEGV); | ||
| 212 | } | ||
| 213 | |||
| 214 | /* Some implementations leave us a hint for the CT */ | ||
| 215 | ct = ICSWX_GET_CT_HINT(error_code); | ||
| 216 | if (ct < 0) { | ||
| 217 | /* we have to peek at the instruction word to figure out CT */ | ||
| 218 | u32 ccw; | ||
| 219 | u32 rs; | ||
| 220 | |||
| 221 | inst = acop_get_inst(regs); | ||
| 222 | if (inst == 0) | ||
| 223 | return -1; | ||
| 224 | |||
| 225 | rs = (inst >> (31 - 10)) & 0x1f; | ||
| 226 | ccw = regs->gpr[rs]; | ||
| 227 | ct = (ccw >> 16) & 0x3f; | ||
| 228 | } | ||
| 229 | |||
| 230 | if (!acop_use_cop(ct)) | ||
| 231 | return 0; | ||
| 232 | |||
| 233 | /* at this point the CT is unknown to the system */ | ||
| 234 | pr_warn("%s[%d]: Coprocessor %d is unavailable", | ||
| 235 | current->comm, current->pid, ct); | ||
| 236 | |||
| 237 | /* get inst if we don't already have it */ | ||
| 238 | if (inst == 0) { | ||
| 239 | inst = acop_get_inst(regs); | ||
| 240 | if (inst == 0) | ||
| 241 | return -1; | ||
| 242 | } | ||
| 243 | |||
| 244 | /* Check if the instruction is the "record form" */ | ||
| 245 | if (inst & 1) { | ||
| 246 | /* | ||
| 247 | * the instruction is "record" form so we can reject | ||
| 248 | * using CR0 | ||
| 249 | */ | ||
| 250 | regs->ccr &= ~(0xful << 28); | ||
| 251 | regs->ccr |= ICSWX_RC_NOT_FOUND << 28; | ||
| 252 | |||
| 253 | /* Move on to the next instruction */ | ||
| 254 | regs->nip += 4; | ||
| 255 | } else { | ||
| 256 | /* | ||
| 257 | * There is no architected mechanism to report a bad | ||
| 258 | * CT so we could either SIGILL or report nothing. | ||
| 259 | * Since the non-record version should only bu used | ||
| 260 | * for "hints" or "don't care" we should probably do | ||
| 261 | * nothing. However, I could see how some people | ||
| 262 | * might want an SIGILL so it here if you want it. | ||
| 263 | */ | ||
| 264 | #ifdef CONFIG_PPC_ICSWX_USE_SIGILL | ||
| 265 | _exception(SIGILL, regs, ILL_ILLOPN, address); | ||
| 266 | #else | ||
| 267 | regs->nip += 4; | ||
| 268 | #endif | ||
| 269 | } | ||
| 270 | |||
| 271 | return 0; | ||
| 272 | } | ||
| 273 | EXPORT_SYMBOL_GPL(acop_handle_fault); | ||
diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h new file mode 100644 index 000000000000..42176bd0884c --- /dev/null +++ b/arch/powerpc/mm/icswx.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | #ifndef _ARCH_POWERPC_MM_ICSWX_H_ | ||
| 2 | #define _ARCH_POWERPC_MM_ICSWX_H_ | ||
| 3 | |||
| 4 | /* | ||
| 5 | * ICSWX and ACOP Management | ||
| 6 | * | ||
| 7 | * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or | ||
| 10 | * modify it under the terms of the GNU General Public License | ||
| 11 | * as published by the Free Software Foundation; either version | ||
| 12 | * 2 of the License, or (at your option) any later version. | ||
| 13 | * | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <asm/mmu_context.h> | ||
| 17 | |||
| 18 | /* also used to denote that PIDs are not used */ | ||
| 19 | #define COP_PID_NONE 0 | ||
| 20 | |||
| 21 | static inline void sync_cop(void *arg) | ||
| 22 | { | ||
| 23 | struct mm_struct *mm = arg; | ||
| 24 | |||
| 25 | if (mm == current->active_mm) | ||
| 26 | switch_cop(current->active_mm); | ||
| 27 | } | ||
| 28 | |||
| 29 | #ifdef CONFIG_PPC_ICSWX_PID | ||
| 30 | extern int get_cop_pid(struct mm_struct *mm); | ||
| 31 | extern int disable_cop_pid(struct mm_struct *mm); | ||
| 32 | extern void free_cop_pid(int free_pid); | ||
| 33 | #else | ||
| 34 | #define get_cop_pid(m) (COP_PID_NONE) | ||
| 35 | #define disable_cop_pid(m) (COP_PID_NONE) | ||
| 36 | #define free_cop_pid(p) | ||
| 37 | #endif | ||
| 38 | |||
| 39 | /* | ||
| 40 | * These are implementation bits for architected registers. If this | ||
| 41 | * ever becomes architecture the should be moved to reg.h et. al. | ||
| 42 | */ | ||
| 43 | /* UCT is the same bit for Server and Embedded */ | ||
| 44 | #define ICSWX_DSI_UCT 0x00004000 /* Unavailable Coprocessor Type */ | ||
| 45 | |||
| 46 | #ifdef CONFIG_PPC_BOOK3E | ||
| 47 | /* Embedded implementation gives us no hints as to what the CT is */ | ||
| 48 | #define ICSWX_GET_CT_HINT(x) (-1) | ||
| 49 | #else | ||
| 50 | /* Server implementation contains the CT value in the DSISR */ | ||
| 51 | #define ICSWX_DSISR_CTMASK 0x00003f00 | ||
| 52 | #define ICSWX_GET_CT_HINT(x) (((x) & ICSWX_DSISR_CTMASK) >> 8) | ||
| 53 | #endif | ||
| 54 | |||
| 55 | #define ICSWX_RC_STARTED 0x8 /* The request has been started */ | ||
| 56 | #define ICSWX_RC_NOT_IDLE 0x4 /* No coprocessor found idle */ | ||
| 57 | #define ICSWX_RC_NOT_FOUND 0x2 /* No coprocessor found */ | ||
| 58 | #define ICSWX_RC_UNDEFINED 0x1 /* Reserved */ | ||
| 59 | |||
| 60 | extern int acop_handle_fault(struct pt_regs *regs, unsigned long address, | ||
| 61 | unsigned long error_code); | ||
| 62 | #endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */ | ||
diff --git a/arch/powerpc/mm/icswx_pid.c b/arch/powerpc/mm/icswx_pid.c new file mode 100644 index 000000000000..91e30eb7d054 --- /dev/null +++ b/arch/powerpc/mm/icswx_pid.c | |||
| @@ -0,0 +1,87 @@ | |||
| 1 | /* | ||
| 2 | * ICSWX and ACOP/PID Management | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public License | ||
| 8 | * as published by the Free Software Foundation; either version | ||
| 9 | * 2 of the License, or (at your option) any later version. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/errno.h> | ||
| 16 | #include <linux/types.h> | ||
| 17 | #include <linux/mm.h> | ||
| 18 | #include <linux/spinlock.h> | ||
| 19 | #include <linux/idr.h> | ||
| 20 | #include <linux/module.h> | ||
| 21 | #include "icswx.h" | ||
| 22 | |||
| 23 | #define COP_PID_MIN (COP_PID_NONE + 1) | ||
| 24 | #define COP_PID_MAX (0xFFFF) | ||
| 25 | |||
| 26 | static DEFINE_SPINLOCK(mmu_context_acop_lock); | ||
| 27 | static DEFINE_IDA(cop_ida); | ||
| 28 | |||
| 29 | static int new_cop_pid(struct ida *ida, int min_id, int max_id, | ||
| 30 | spinlock_t *lock) | ||
| 31 | { | ||
| 32 | int index; | ||
| 33 | int err; | ||
| 34 | |||
| 35 | again: | ||
| 36 | if (!ida_pre_get(ida, GFP_KERNEL)) | ||
| 37 | return -ENOMEM; | ||
| 38 | |||
| 39 | spin_lock(lock); | ||
| 40 | err = ida_get_new_above(ida, min_id, &index); | ||
| 41 | spin_unlock(lock); | ||
| 42 | |||
| 43 | if (err == -EAGAIN) | ||
| 44 | goto again; | ||
| 45 | else if (err) | ||
| 46 | return err; | ||
| 47 | |||
| 48 | if (index > max_id) { | ||
| 49 | spin_lock(lock); | ||
| 50 | ida_remove(ida, index); | ||
| 51 | spin_unlock(lock); | ||
| 52 | return -ENOMEM; | ||
| 53 | } | ||
| 54 | |||
| 55 | return index; | ||
| 56 | } | ||
| 57 | |||
| 58 | int get_cop_pid(struct mm_struct *mm) | ||
| 59 | { | ||
| 60 | int pid; | ||
| 61 | |||
| 62 | if (mm->context.cop_pid == COP_PID_NONE) { | ||
| 63 | pid = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, | ||
| 64 | &mmu_context_acop_lock); | ||
| 65 | if (pid >= 0) | ||
| 66 | mm->context.cop_pid = pid; | ||
| 67 | } | ||
| 68 | return mm->context.cop_pid; | ||
| 69 | } | ||
| 70 | |||
| 71 | int disable_cop_pid(struct mm_struct *mm) | ||
| 72 | { | ||
| 73 | int free_pid = COP_PID_NONE; | ||
| 74 | |||
| 75 | if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { | ||
| 76 | free_pid = mm->context.cop_pid; | ||
| 77 | mm->context.cop_pid = COP_PID_NONE; | ||
| 78 | } | ||
| 79 | return free_pid; | ||
| 80 | } | ||
| 81 | |||
| 82 | void free_cop_pid(int free_pid) | ||
| 83 | { | ||
| 84 | spin_lock(&mmu_context_acop_lock); | ||
| 85 | ida_remove(&cop_ida, free_pid); | ||
| 86 | spin_unlock(&mmu_context_acop_lock); | ||
| 87 | } | ||
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 58861fa1220e..6157be2a7049 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
| @@ -65,6 +65,13 @@ phys_addr_t memstart_addr = (phys_addr_t)~0ull; | |||
| 65 | EXPORT_SYMBOL(memstart_addr); | 65 | EXPORT_SYMBOL(memstart_addr); |
| 66 | phys_addr_t kernstart_addr; | 66 | phys_addr_t kernstart_addr; |
| 67 | EXPORT_SYMBOL(kernstart_addr); | 67 | EXPORT_SYMBOL(kernstart_addr); |
| 68 | |||
| 69 | #ifdef CONFIG_RELOCATABLE_PPC32 | ||
| 70 | /* Used in __va()/__pa() */ | ||
| 71 | long long virt_phys_offset; | ||
| 72 | EXPORT_SYMBOL(virt_phys_offset); | ||
| 73 | #endif | ||
| 74 | |||
| 68 | phys_addr_t lowmem_end_addr; | 75 | phys_addr_t lowmem_end_addr; |
| 69 | 76 | ||
| 70 | int boot_mapsize; | 77 | int boot_mapsize; |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 8e2eb6611b0b..d974b79a3068 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <asm/vdso.h> | 51 | #include <asm/vdso.h> |
| 52 | #include <asm/fixmap.h> | 52 | #include <asm/fixmap.h> |
| 53 | #include <asm/swiotlb.h> | 53 | #include <asm/swiotlb.h> |
| 54 | #include <asm/rtas.h> | ||
| 54 | 55 | ||
| 55 | #include "mmu_decl.h" | 56 | #include "mmu_decl.h" |
| 56 | 57 | ||
| @@ -553,7 +554,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
| 553 | #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ | 554 | #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ |
| 554 | && defined(CONFIG_HUGETLB_PAGE) | 555 | && defined(CONFIG_HUGETLB_PAGE) |
| 555 | if (is_vm_hugetlb_page(vma)) | 556 | if (is_vm_hugetlb_page(vma)) |
| 556 | book3e_hugetlb_preload(vma->vm_mm, address, *ptep); | 557 | book3e_hugetlb_preload(vma, address, *ptep); |
| 557 | #endif | 558 | #endif |
| 558 | } | 559 | } |
| 559 | 560 | ||
| @@ -585,3 +586,23 @@ static int add_system_ram_resources(void) | |||
| 585 | return 0; | 586 | return 0; |
| 586 | } | 587 | } |
| 587 | subsys_initcall(add_system_ram_resources); | 588 | subsys_initcall(add_system_ram_resources); |
| 589 | |||
| 590 | #ifdef CONFIG_STRICT_DEVMEM | ||
| 591 | /* | ||
| 592 | * devmem_is_allowed(): check to see if /dev/mem access to a certain address | ||
| 593 | * is valid. The argument is a physical page number. | ||
| 594 | * | ||
| 595 | * Access has to be given to non-kernel-ram areas as well, these contain the | ||
| 596 | * PCI mmio resources as well as potential bios/acpi data regions. | ||
| 597 | */ | ||
| 598 | int devmem_is_allowed(unsigned long pfn) | ||
| 599 | { | ||
| 600 | if (iomem_is_exclusive(pfn << PAGE_SHIFT)) | ||
| 601 | return 0; | ||
| 602 | if (!page_is_ram(pfn)) | ||
| 603 | return 1; | ||
| 604 | if (page_is_rtas_user_buf(pfn)) | ||
| 605 | return 1; | ||
| 606 | return 0; | ||
| 607 | } | ||
| 608 | #endif /* CONFIG_STRICT_DEVMEM */ | ||
diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c index 5a783d8e8e8e..67a42ed0d2fc 100644 --- a/arch/powerpc/mm/mmap_64.c +++ b/arch/powerpc/mm/mmap_64.c | |||
| @@ -53,14 +53,6 @@ static inline int mmap_is_legacy(void) | |||
| 53 | return sysctl_legacy_va_layout; | 53 | return sysctl_legacy_va_layout; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | /* | ||
| 57 | * Since get_random_int() returns the same value within a 1 jiffy window, | ||
| 58 | * we will almost always get the same randomisation for the stack and mmap | ||
| 59 | * region. This will mean the relative distance between stack and mmap will | ||
| 60 | * be the same. | ||
| 61 | * | ||
| 62 | * To avoid this we can shift the randomness by 1 bit. | ||
| 63 | */ | ||
| 64 | static unsigned long mmap_rnd(void) | 56 | static unsigned long mmap_rnd(void) |
| 65 | { | 57 | { |
| 66 | unsigned long rnd = 0; | 58 | unsigned long rnd = 0; |
| @@ -68,11 +60,11 @@ static unsigned long mmap_rnd(void) | |||
| 68 | if (current->flags & PF_RANDOMIZE) { | 60 | if (current->flags & PF_RANDOMIZE) { |
| 69 | /* 8MB for 32bit, 1GB for 64bit */ | 61 | /* 8MB for 32bit, 1GB for 64bit */ |
| 70 | if (is_32bit_task()) | 62 | if (is_32bit_task()) |
| 71 | rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT))); | 63 | rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); |
| 72 | else | 64 | else |
| 73 | rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT))); | 65 | rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); |
| 74 | } | 66 | } |
| 75 | return (rnd << PAGE_SHIFT) * 2; | 67 | return rnd << PAGE_SHIFT; |
| 76 | } | 68 | } |
| 77 | 69 | ||
| 78 | static inline unsigned long mmap_base(void) | 70 | static inline unsigned long mmap_base(void) |
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index ca988a3d5fb2..40677aa0190e 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
| @@ -24,200 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
| 26 | 26 | ||
| 27 | #ifdef CONFIG_PPC_ICSWX | 27 | #include "icswx.h" |
| 28 | /* | ||
| 29 | * The processor and its L2 cache cause the icswx instruction to | ||
| 30 | * generate a COP_REQ transaction on PowerBus. The transaction has | ||
| 31 | * no address, and the processor does not perform an MMU access | ||
| 32 | * to authenticate the transaction. The command portion of the | ||
| 33 | * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and | ||
| 34 | * the coprocessor Process ID (PID), which the coprocessor compares | ||
| 35 | * to the authorized LPID and PID held in the coprocessor, to determine | ||
| 36 | * if the process is authorized to generate the transaction. | ||
| 37 | * The data of the COP_REQ transaction is 128-byte or less and is | ||
| 38 | * placed in cacheable memory on a 128-byte cache line boundary. | ||
| 39 | * | ||
| 40 | * The task to use a coprocessor should use use_cop() to allocate | ||
| 41 | * a coprocessor PID before executing icswx instruction. use_cop() | ||
| 42 | * also enables the coprocessor context switching. Drop_cop() is | ||
| 43 | * used to free the coprocessor PID. | ||
| 44 | * | ||
| 45 | * Example: | ||
| 46 | * Host Fabric Interface (HFI) is a PowerPC network coprocessor. | ||
| 47 | * Each HFI have multiple windows. Each HFI window serves as a | ||
| 48 | * network device sending to and receiving from HFI network. | ||
| 49 | * HFI immediate send function uses icswx instruction. The immediate | ||
| 50 | * send function allows small (single cache-line) packets be sent | ||
| 51 | * without using the regular HFI send FIFO and doorbell, which are | ||
| 52 | * much slower than immediate send. | ||
| 53 | * | ||
| 54 | * For each task intending to use HFI immediate send, the HFI driver | ||
| 55 | * calls use_cop() to obtain a coprocessor PID for the task. | ||
| 56 | * The HFI driver then allocate a free HFI window and save the | ||
| 57 | * coprocessor PID to the HFI window to allow the task to use the | ||
| 58 | * HFI window. | ||
| 59 | * | ||
| 60 | * The HFI driver repeatedly creates immediate send packets and | ||
| 61 | * issues icswx instruction to send data through the HFI window. | ||
| 62 | * The HFI compares the coprocessor PID in the CPU PID register | ||
| 63 | * to the PID held in the HFI window to determine if the transaction | ||
| 64 | * is allowed. | ||
| 65 | * | ||
| 66 | * When the task to release the HFI window, the HFI driver calls | ||
| 67 | * drop_cop() to release the coprocessor PID. | ||
| 68 | */ | ||
| 69 | |||
| 70 | #define COP_PID_NONE 0 | ||
| 71 | #define COP_PID_MIN (COP_PID_NONE + 1) | ||
| 72 | #define COP_PID_MAX (0xFFFF) | ||
| 73 | |||
| 74 | static DEFINE_SPINLOCK(mmu_context_acop_lock); | ||
| 75 | static DEFINE_IDA(cop_ida); | ||
| 76 | |||
| 77 | void switch_cop(struct mm_struct *next) | ||
| 78 | { | ||
| 79 | mtspr(SPRN_PID, next->context.cop_pid); | ||
| 80 | mtspr(SPRN_ACOP, next->context.acop); | ||
| 81 | } | ||
| 82 | |||
| 83 | static int new_cop_pid(struct ida *ida, int min_id, int max_id, | ||
| 84 | spinlock_t *lock) | ||
| 85 | { | ||
| 86 | int index; | ||
| 87 | int err; | ||
| 88 | |||
| 89 | again: | ||
| 90 | if (!ida_pre_get(ida, GFP_KERNEL)) | ||
| 91 | return -ENOMEM; | ||
| 92 | |||
| 93 | spin_lock(lock); | ||
| 94 | err = ida_get_new_above(ida, min_id, &index); | ||
| 95 | spin_unlock(lock); | ||
| 96 | |||
| 97 | if (err == -EAGAIN) | ||
| 98 | goto again; | ||
| 99 | else if (err) | ||
| 100 | return err; | ||
| 101 | |||
| 102 | if (index > max_id) { | ||
| 103 | spin_lock(lock); | ||
| 104 | ida_remove(ida, index); | ||
| 105 | spin_unlock(lock); | ||
| 106 | return -ENOMEM; | ||
| 107 | } | ||
| 108 | |||
| 109 | return index; | ||
| 110 | } | ||
| 111 | |||
| 112 | static void sync_cop(void *arg) | ||
| 113 | { | ||
| 114 | struct mm_struct *mm = arg; | ||
| 115 | |||
| 116 | if (mm == current->active_mm) | ||
| 117 | switch_cop(current->active_mm); | ||
| 118 | } | ||
| 119 | |||
| 120 | /** | ||
| 121 | * Start using a coprocessor. | ||
| 122 | * @acop: mask of coprocessor to be used. | ||
| 123 | * @mm: The mm the coprocessor to associate with. Most likely current mm. | ||
| 124 | * | ||
| 125 | * Return a positive PID if successful. Negative errno otherwise. | ||
| 126 | * The returned PID will be fed to the coprocessor to determine if an | ||
| 127 | * icswx transaction is authenticated. | ||
| 128 | */ | ||
| 129 | int use_cop(unsigned long acop, struct mm_struct *mm) | ||
| 130 | { | ||
| 131 | int ret; | ||
| 132 | |||
| 133 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
| 134 | return -ENODEV; | ||
| 135 | |||
| 136 | if (!mm || !acop) | ||
| 137 | return -EINVAL; | ||
| 138 | |||
| 139 | /* The page_table_lock ensures mm_users won't change under us */ | ||
| 140 | spin_lock(&mm->page_table_lock); | ||
| 141 | spin_lock(mm->context.cop_lockp); | ||
| 142 | |||
| 143 | if (mm->context.cop_pid == COP_PID_NONE) { | ||
| 144 | ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, | ||
| 145 | &mmu_context_acop_lock); | ||
| 146 | if (ret < 0) | ||
| 147 | goto out; | ||
| 148 | |||
| 149 | mm->context.cop_pid = ret; | ||
| 150 | } | ||
| 151 | mm->context.acop |= acop; | ||
| 152 | |||
| 153 | sync_cop(mm); | ||
| 154 | |||
| 155 | /* | ||
| 156 | * If this is a threaded process then there might be other threads | ||
| 157 | * running. We need to send an IPI to force them to pick up any | ||
| 158 | * change in PID and ACOP. | ||
| 159 | */ | ||
| 160 | if (atomic_read(&mm->mm_users) > 1) | ||
| 161 | smp_call_function(sync_cop, mm, 1); | ||
| 162 | |||
| 163 | ret = mm->context.cop_pid; | ||
| 164 | |||
| 165 | out: | ||
| 166 | spin_unlock(mm->context.cop_lockp); | ||
| 167 | spin_unlock(&mm->page_table_lock); | ||
| 168 | |||
| 169 | return ret; | ||
| 170 | } | ||
| 171 | EXPORT_SYMBOL_GPL(use_cop); | ||
| 172 | |||
| 173 | /** | ||
| 174 | * Stop using a coprocessor. | ||
| 175 | * @acop: mask of coprocessor to be stopped. | ||
| 176 | * @mm: The mm the coprocessor associated with. | ||
| 177 | */ | ||
| 178 | void drop_cop(unsigned long acop, struct mm_struct *mm) | ||
| 179 | { | ||
| 180 | int free_pid = COP_PID_NONE; | ||
| 181 | |||
| 182 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
| 183 | return; | ||
| 184 | |||
| 185 | if (WARN_ON_ONCE(!mm)) | ||
| 186 | return; | ||
| 187 | |||
| 188 | /* The page_table_lock ensures mm_users won't change under us */ | ||
| 189 | spin_lock(&mm->page_table_lock); | ||
| 190 | spin_lock(mm->context.cop_lockp); | ||
| 191 | |||
| 192 | mm->context.acop &= ~acop; | ||
| 193 | |||
| 194 | if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { | ||
| 195 | free_pid = mm->context.cop_pid; | ||
| 196 | mm->context.cop_pid = COP_PID_NONE; | ||
| 197 | } | ||
| 198 | |||
| 199 | sync_cop(mm); | ||
| 200 | |||
| 201 | /* | ||
| 202 | * If this is a threaded process then there might be other threads | ||
| 203 | * running. We need to send an IPI to force them to pick up any | ||
| 204 | * change in PID and ACOP. | ||
| 205 | */ | ||
| 206 | if (atomic_read(&mm->mm_users) > 1) | ||
| 207 | smp_call_function(sync_cop, mm, 1); | ||
| 208 | |||
| 209 | if (free_pid != COP_PID_NONE) { | ||
| 210 | spin_lock(&mmu_context_acop_lock); | ||
| 211 | ida_remove(&cop_ida, free_pid); | ||
| 212 | spin_unlock(&mmu_context_acop_lock); | ||
| 213 | } | ||
| 214 | |||
| 215 | spin_unlock(mm->context.cop_lockp); | ||
| 216 | spin_unlock(&mm->page_table_lock); | ||
| 217 | } | ||
| 218 | EXPORT_SYMBOL_GPL(drop_cop); | ||
| 219 | |||
| 220 | #endif /* CONFIG_PPC_ICSWX */ | ||
| 221 | 28 | ||
| 222 | static DEFINE_SPINLOCK(mmu_context_lock); | 29 | static DEFINE_SPINLOCK(mmu_context_lock); |
| 223 | static DEFINE_IDA(mmu_context_ida); | 30 | static DEFINE_IDA(mmu_context_ida); |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index e6eea0ac80c8..c0189c169bbb 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -386,7 +386,7 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) | |||
| 386 | of_node_put(memory); | 386 | of_node_put(memory); |
| 387 | } | 387 | } |
| 388 | 388 | ||
| 389 | static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) | 389 | static unsigned long read_n_cells(int n, const unsigned int **buf) |
| 390 | { | 390 | { |
| 391 | unsigned long result = 0; | 391 | unsigned long result = 0; |
| 392 | 392 | ||
| @@ -947,7 +947,7 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { | |||
| 947 | .priority = 1 /* Must run before sched domains notifier. */ | 947 | .priority = 1 /* Must run before sched domains notifier. */ |
| 948 | }; | 948 | }; |
| 949 | 949 | ||
| 950 | static void mark_reserved_regions_for_nid(int nid) | 950 | static void __init mark_reserved_regions_for_nid(int nid) |
| 951 | { | 951 | { |
| 952 | struct pglist_data *node = NODE_DATA(nid); | 952 | struct pglist_data *node = NODE_DATA(nid); |
| 953 | struct memblock_region *reg; | 953 | struct memblock_region *reg; |
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index dc4a5f385e41..ff672bd8fea9 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S | |||
| @@ -94,11 +94,11 @@ | |||
| 94 | 94 | ||
| 95 | srdi r15,r16,60 /* get region */ | 95 | srdi r15,r16,60 /* get region */ |
| 96 | rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 | 96 | rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 |
| 97 | bne- dtlb_miss_fault_bolted | 97 | bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */ |
| 98 | 98 | ||
| 99 | rlwinm r10,r11,32-19,27,27 | 99 | rlwinm r10,r11,32-19,27,27 |
| 100 | rlwimi r10,r11,32-16,19,19 | 100 | rlwimi r10,r11,32-16,19,19 |
| 101 | cmpwi r15,0 | 101 | cmpwi r15,0 /* user vs kernel check */ |
| 102 | ori r10,r10,_PAGE_PRESENT | 102 | ori r10,r10,_PAGE_PRESENT |
| 103 | oris r11,r10,_PAGE_ACCESSED@h | 103 | oris r11,r10,_PAGE_ACCESSED@h |
| 104 | 104 | ||
| @@ -120,44 +120,38 @@ tlb_miss_common_bolted: | |||
| 120 | rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 | 120 | rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 |
| 121 | cmpldi cr0,r14,0 | 121 | cmpldi cr0,r14,0 |
| 122 | clrrdi r15,r15,3 | 122 | clrrdi r15,r15,3 |
| 123 | beq tlb_miss_fault_bolted | 123 | beq tlb_miss_fault_bolted /* No PGDIR, bail */ |
| 124 | 124 | ||
| 125 | BEGIN_MMU_FTR_SECTION | 125 | BEGIN_MMU_FTR_SECTION |
| 126 | /* Set the TLB reservation and search for existing entry. Then load | 126 | /* Set the TLB reservation and search for existing entry. Then load |
| 127 | * the entry. | 127 | * the entry. |
| 128 | */ | 128 | */ |
| 129 | PPC_TLBSRX_DOT(0,r16) | 129 | PPC_TLBSRX_DOT(0,r16) |
| 130 | ldx r14,r14,r15 | 130 | ldx r14,r14,r15 /* grab pgd entry */ |
| 131 | beq normal_tlb_miss_done | 131 | beq normal_tlb_miss_done /* tlb exists already, bail */ |
| 132 | MMU_FTR_SECTION_ELSE | 132 | MMU_FTR_SECTION_ELSE |
| 133 | ldx r14,r14,r15 | 133 | ldx r14,r14,r15 /* grab pgd entry */ |
| 134 | ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) | 134 | ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) |
| 135 | 135 | ||
| 136 | #ifndef CONFIG_PPC_64K_PAGES | 136 | #ifndef CONFIG_PPC_64K_PAGES |
| 137 | rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 | 137 | rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 |
| 138 | clrrdi r15,r15,3 | 138 | clrrdi r15,r15,3 |
| 139 | 139 | cmpdi cr0,r14,0 | |
| 140 | cmpldi cr0,r14,0 | 140 | bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */ |
| 141 | beq tlb_miss_fault_bolted | 141 | ldx r14,r14,r15 /* grab pud entry */ |
| 142 | |||
| 143 | ldx r14,r14,r15 | ||
| 144 | #endif /* CONFIG_PPC_64K_PAGES */ | 142 | #endif /* CONFIG_PPC_64K_PAGES */ |
| 145 | 143 | ||
| 146 | rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 | 144 | rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 |
| 147 | clrrdi r15,r15,3 | 145 | clrrdi r15,r15,3 |
| 148 | 146 | cmpdi cr0,r14,0 | |
| 149 | cmpldi cr0,r14,0 | 147 | bge tlb_miss_fault_bolted |
| 150 | beq tlb_miss_fault_bolted | 148 | ldx r14,r14,r15 /* Grab pmd entry */ |
| 151 | |||
| 152 | ldx r14,r14,r15 | ||
| 153 | 149 | ||
| 154 | rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3 | 150 | rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3 |
| 155 | clrrdi r15,r15,3 | 151 | clrrdi r15,r15,3 |
| 156 | 152 | cmpdi cr0,r14,0 | |
| 157 | cmpldi cr0,r14,0 | 153 | bge tlb_miss_fault_bolted |
| 158 | beq tlb_miss_fault_bolted | 154 | ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */ |
| 159 | |||
| 160 | ldx r14,r14,r15 | ||
| 161 | 155 | ||
| 162 | /* Check if required permissions are met */ | 156 | /* Check if required permissions are met */ |
| 163 | andc. r15,r11,r14 | 157 | andc. r15,r11,r14 |
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 573ba3b69d1f..df32a838dcfa 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | * indirect page table entries. | 52 | * indirect page table entries. |
| 53 | */ | 53 | */ |
| 54 | #ifdef CONFIG_PPC_BOOK3E_MMU | 54 | #ifdef CONFIG_PPC_BOOK3E_MMU |
| 55 | #ifdef CONFIG_FSL_BOOKE | 55 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 56 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { | 56 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
| 57 | [MMU_PAGE_4K] = { | 57 | [MMU_PAGE_4K] = { |
| 58 | .shift = 12, | 58 | .shift = 12, |
