diff options
| -rw-r--r-- | Documentation/sparc/console.txt | 9 | ||||
| -rw-r--r-- | arch/sparc/include/asm/page_64.h | 4 | ||||
| -rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 23 | ||||
| -rw-r--r-- | arch/sparc/include/asm/setup.h | 5 | ||||
| -rw-r--r-- | arch/sparc/include/asm/tlbflush_64.h | 5 | ||||
| -rw-r--r-- | arch/sparc/include/asm/topology_64.h | 4 | ||||
| -rw-r--r-- | arch/sparc/include/asm/uprobes.h | 4 | ||||
| -rw-r--r-- | arch/sparc/kernel/smp_64.c | 9 | ||||
| -rw-r--r-- | arch/sparc/kernel/tsb.S | 21 | ||||
| -rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 201 | ||||
| -rw-r--r-- | arch/sparc/mm/init_64.c | 260 | ||||
| -rw-r--r-- | arch/sparc/mm/srmmu.c | 6 | ||||
| -rw-r--r-- | arch/sparc/mm/tlb.c | 17 | ||||
| -rw-r--r-- | arch/sparc/mm/tsb.c | 61 | ||||
| -rw-r--r-- | drivers/block/sunvdc.c | 18 | ||||
| -rw-r--r-- | drivers/tty/serial/sunhv.c | 12 | ||||
| -rw-r--r-- | kernel/panic.c | 3 |
17 files changed, 488 insertions, 174 deletions
diff --git a/Documentation/sparc/console.txt b/Documentation/sparc/console.txt new file mode 100644 index 000000000000..5aa735a44e02 --- /dev/null +++ b/Documentation/sparc/console.txt | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | Steps for sending 'break' on sunhv console: | ||
| 2 | =========================================== | ||
| 3 | |||
| 4 | On Baremetal: | ||
| 5 | 1. press Esc + 'B' | ||
| 6 | |||
| 7 | On LDOM: | ||
| 8 | 1. press Ctrl + ']' | ||
| 9 | 2. telnet> send break | ||
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index c1263fc390db..f294dd42fc7d 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h | |||
| @@ -17,7 +17,8 @@ | |||
| 17 | 17 | ||
| 18 | #define HPAGE_SHIFT 23 | 18 | #define HPAGE_SHIFT 23 |
| 19 | #define REAL_HPAGE_SHIFT 22 | 19 | #define REAL_HPAGE_SHIFT 22 |
| 20 | 20 | #define HPAGE_256MB_SHIFT 28 | |
| 21 | #define HPAGE_64K_SHIFT 16 | ||
| 21 | #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) | 22 | #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) |
| 22 | 23 | ||
| 23 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 24 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| @@ -26,6 +27,7 @@ | |||
| 26 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 27 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 27 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 28 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 28 | #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) | 29 | #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) |
| 30 | #define HUGE_MAX_HSTATE 3 | ||
| 29 | #endif | 31 | #endif |
| 30 | 32 | ||
| 31 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 314b66851348..7932a4a37817 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
| @@ -375,7 +375,10 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) | |||
| 375 | #define pgprot_noncached pgprot_noncached | 375 | #define pgprot_noncached pgprot_noncached |
| 376 | 376 | ||
| 377 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 377 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 378 | static inline unsigned long __pte_huge_mask(void) | 378 | extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, |
| 379 | struct page *page, int writable); | ||
| 380 | #define arch_make_huge_pte arch_make_huge_pte | ||
| 381 | static inline unsigned long __pte_default_huge_mask(void) | ||
| 379 | { | 382 | { |
| 380 | unsigned long mask; | 383 | unsigned long mask; |
| 381 | 384 | ||
| @@ -395,12 +398,14 @@ static inline unsigned long __pte_huge_mask(void) | |||
| 395 | 398 | ||
| 396 | static inline pte_t pte_mkhuge(pte_t pte) | 399 | static inline pte_t pte_mkhuge(pte_t pte) |
| 397 | { | 400 | { |
| 398 | return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask()); | 401 | return __pte(pte_val(pte) | __pte_default_huge_mask()); |
| 399 | } | 402 | } |
| 400 | 403 | ||
| 401 | static inline bool is_hugetlb_pte(pte_t pte) | 404 | static inline bool is_default_hugetlb_pte(pte_t pte) |
| 402 | { | 405 | { |
| 403 | return !!(pte_val(pte) & __pte_huge_mask()); | 406 | unsigned long mask = __pte_default_huge_mask(); |
| 407 | |||
| 408 | return (pte_val(pte) & mask) == mask; | ||
| 404 | } | 409 | } |
| 405 | 410 | ||
| 406 | static inline bool is_hugetlb_pmd(pmd_t pmd) | 411 | static inline bool is_hugetlb_pmd(pmd_t pmd) |
| @@ -875,10 +880,12 @@ static inline unsigned long pud_pfn(pud_t pud) | |||
| 875 | 880 | ||
| 876 | /* Actual page table PTE updates. */ | 881 | /* Actual page table PTE updates. */ |
| 877 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | 882 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
| 878 | pte_t *ptep, pte_t orig, int fullmm); | 883 | pte_t *ptep, pte_t orig, int fullmm, |
| 884 | unsigned int hugepage_shift); | ||
| 879 | 885 | ||
| 880 | static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | 886 | static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
| 881 | pte_t *ptep, pte_t orig, int fullmm) | 887 | pte_t *ptep, pte_t orig, int fullmm, |
| 888 | unsigned int hugepage_shift) | ||
| 882 | { | 889 | { |
| 883 | /* It is more efficient to let flush_tlb_kernel_range() | 890 | /* It is more efficient to let flush_tlb_kernel_range() |
| 884 | * handle init_mm tlb flushes. | 891 | * handle init_mm tlb flushes. |
| @@ -887,7 +894,7 @@ static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | |||
| 887 | * and SUN4V pte layout, so this inline test is fine. | 894 | * and SUN4V pte layout, so this inline test is fine. |
| 888 | */ | 895 | */ |
| 889 | if (likely(mm != &init_mm) && pte_accessible(mm, orig)) | 896 | if (likely(mm != &init_mm) && pte_accessible(mm, orig)) |
| 890 | tlb_batch_add(mm, vaddr, ptep, orig, fullmm); | 897 | tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift); |
| 891 | } | 898 | } |
| 892 | 899 | ||
| 893 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | 900 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
| @@ -906,7 +913,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
| 906 | pte_t orig = *ptep; | 913 | pte_t orig = *ptep; |
| 907 | 914 | ||
| 908 | *ptep = pte; | 915 | *ptep = pte; |
| 909 | maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm); | 916 | maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT); |
| 910 | } | 917 | } |
| 911 | 918 | ||
| 912 | #define set_pte_at(mm,addr,ptep,pte) \ | 919 | #define set_pte_at(mm,addr,ptep,pte) \ |
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 29d64b1758ed..478bf6bb4598 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h | |||
| @@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes; | |||
| 59 | extern atomic_t dcpage_flushes_xcall; | 59 | extern atomic_t dcpage_flushes_xcall; |
| 60 | 60 | ||
| 61 | extern int sysctl_tsb_ratio; | 61 | extern int sysctl_tsb_ratio; |
| 62 | #endif | ||
| 63 | 62 | ||
| 63 | #ifdef CONFIG_SERIAL_SUNHV | ||
| 64 | void sunhv_migrate_hvcons_irq(int cpu); | ||
| 65 | #endif | ||
| 66 | #endif | ||
| 64 | void sun_do_break(void); | 67 | void sun_do_break(void); |
| 65 | extern int stop_a_enabled; | 68 | extern int stop_a_enabled; |
| 66 | extern int scons_pwroff; | 69 | extern int scons_pwroff; |
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index a8e192e90700..54be88a6774c 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | #define TLB_BATCH_NR 192 | 8 | #define TLB_BATCH_NR 192 |
| 9 | 9 | ||
| 10 | struct tlb_batch { | 10 | struct tlb_batch { |
| 11 | bool huge; | 11 | unsigned int hugepage_shift; |
| 12 | struct mm_struct *mm; | 12 | struct mm_struct *mm; |
| 13 | unsigned long tlb_nr; | 13 | unsigned long tlb_nr; |
| 14 | unsigned long active; | 14 | unsigned long active; |
| @@ -17,7 +17,8 @@ struct tlb_batch { | |||
| 17 | 17 | ||
| 18 | void flush_tsb_kernel_range(unsigned long start, unsigned long end); | 18 | void flush_tsb_kernel_range(unsigned long start, unsigned long end); |
| 19 | void flush_tsb_user(struct tlb_batch *tb); | 19 | void flush_tsb_user(struct tlb_batch *tb); |
| 20 | void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge); | 20 | void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, |
| 21 | unsigned int hugepage_shift); | ||
| 21 | 22 | ||
| 22 | /* TLB flush operations. */ | 23 | /* TLB flush operations. */ |
| 23 | 24 | ||
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 225543000122..ad5293f89680 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | #ifdef CONFIG_NUMA | 4 | #ifdef CONFIG_NUMA |
| 5 | 5 | ||
| 6 | #include <asm/mmzone.h> | 6 | #include <asm/mmzone.h> |
| 7 | #include <asm/cpudata.h> | ||
| 8 | 7 | ||
| 9 | static inline int cpu_to_node(int cpu) | 8 | static inline int cpu_to_node(int cpu) |
| 10 | { | 9 | { |
| @@ -42,6 +41,9 @@ int __node_distance(int, int); | |||
| 42 | #endif /* !(CONFIG_NUMA) */ | 41 | #endif /* !(CONFIG_NUMA) */ |
| 43 | 42 | ||
| 44 | #ifdef CONFIG_SMP | 43 | #ifdef CONFIG_SMP |
| 44 | |||
| 45 | #include <asm/cpudata.h> | ||
| 46 | |||
| 45 | #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) | 47 | #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) |
| 46 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) | 48 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) |
| 47 | #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) | 49 | #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) |
diff --git a/arch/sparc/include/asm/uprobes.h b/arch/sparc/include/asm/uprobes.h index f87aae5a908e..36196c17aff8 100644 --- a/arch/sparc/include/asm/uprobes.h +++ b/arch/sparc/include/asm/uprobes.h | |||
| @@ -42,8 +42,8 @@ struct arch_uprobe { | |||
| 42 | }; | 42 | }; |
| 43 | 43 | ||
| 44 | struct arch_uprobe_task { | 44 | struct arch_uprobe_task { |
| 45 | u32 saved_tpc; | 45 | u64 saved_tpc; |
| 46 | u32 saved_tnpc; | 46 | u64 saved_tnpc; |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
| 49 | struct task_struct; | 49 | struct task_struct; |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 0ce347f8e4cc..90a02cb64e20 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
| @@ -1443,6 +1443,7 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) | |||
| 1443 | 1443 | ||
| 1444 | static void stop_this_cpu(void *dummy) | 1444 | static void stop_this_cpu(void *dummy) |
| 1445 | { | 1445 | { |
| 1446 | set_cpu_online(smp_processor_id(), false); | ||
| 1446 | prom_stopself(); | 1447 | prom_stopself(); |
| 1447 | } | 1448 | } |
| 1448 | 1449 | ||
| @@ -1451,9 +1452,15 @@ void smp_send_stop(void) | |||
| 1451 | int cpu; | 1452 | int cpu; |
| 1452 | 1453 | ||
| 1453 | if (tlb_type == hypervisor) { | 1454 | if (tlb_type == hypervisor) { |
| 1455 | int this_cpu = smp_processor_id(); | ||
| 1456 | #ifdef CONFIG_SERIAL_SUNHV | ||
| 1457 | sunhv_migrate_hvcons_irq(this_cpu); | ||
| 1458 | #endif | ||
| 1454 | for_each_online_cpu(cpu) { | 1459 | for_each_online_cpu(cpu) { |
| 1455 | if (cpu == smp_processor_id()) | 1460 | if (cpu == this_cpu) |
| 1456 | continue; | 1461 | continue; |
| 1462 | |||
| 1463 | set_cpu_online(cpu, false); | ||
| 1457 | #ifdef CONFIG_SUN_LDOMS | 1464 | #ifdef CONFIG_SUN_LDOMS |
| 1458 | if (ldom_domaining_enabled) { | 1465 | if (ldom_domaining_enabled) { |
| 1459 | unsigned long hv_err; | 1466 | unsigned long hv_err; |
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index d568c8207af7..10689cfd0ad4 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S | |||
| @@ -117,26 +117,11 @@ tsb_miss_page_table_walk_sun4v_fastpath: | |||
| 117 | /* Valid PTE is now in %g5. */ | 117 | /* Valid PTE is now in %g5. */ |
| 118 | 118 | ||
| 119 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 119 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 120 | 661: sethi %uhi(_PAGE_SZALL_4U), %g7 | 120 | sethi %uhi(_PAGE_PMD_HUGE), %g7 |
| 121 | sllx %g7, 32, %g7 | 121 | sllx %g7, 32, %g7 |
| 122 | .section .sun4v_2insn_patch, "ax" | ||
| 123 | .word 661b | ||
| 124 | mov _PAGE_SZALL_4V, %g7 | ||
| 125 | nop | ||
| 126 | .previous | ||
| 127 | |||
| 128 | and %g5, %g7, %g2 | ||
| 129 | |||
| 130 | 661: sethi %uhi(_PAGE_SZHUGE_4U), %g7 | ||
| 131 | sllx %g7, 32, %g7 | ||
| 132 | .section .sun4v_2insn_patch, "ax" | ||
| 133 | .word 661b | ||
| 134 | mov _PAGE_SZHUGE_4V, %g7 | ||
| 135 | nop | ||
| 136 | .previous | ||
| 137 | 122 | ||
| 138 | cmp %g2, %g7 | 123 | andcc %g5, %g7, %g0 |
| 139 | bne,pt %xcc, 60f | 124 | be,pt %xcc, 60f |
| 140 | nop | 125 | nop |
| 141 | 126 | ||
| 142 | /* It is a huge page, use huge page TSB entry address we | 127 | /* It is a huge page, use huge page TSB entry address we |
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 988acc8b1b80..e98a3f2e8f0f 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c | |||
| @@ -28,6 +28,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | |||
| 28 | unsigned long pgoff, | 28 | unsigned long pgoff, |
| 29 | unsigned long flags) | 29 | unsigned long flags) |
| 30 | { | 30 | { |
| 31 | struct hstate *h = hstate_file(filp); | ||
| 31 | unsigned long task_size = TASK_SIZE; | 32 | unsigned long task_size = TASK_SIZE; |
| 32 | struct vm_unmapped_area_info info; | 33 | struct vm_unmapped_area_info info; |
| 33 | 34 | ||
| @@ -38,7 +39,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | |||
| 38 | info.length = len; | 39 | info.length = len; |
| 39 | info.low_limit = TASK_UNMAPPED_BASE; | 40 | info.low_limit = TASK_UNMAPPED_BASE; |
| 40 | info.high_limit = min(task_size, VA_EXCLUDE_START); | 41 | info.high_limit = min(task_size, VA_EXCLUDE_START); |
| 41 | info.align_mask = PAGE_MASK & ~HPAGE_MASK; | 42 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 42 | info.align_offset = 0; | 43 | info.align_offset = 0; |
| 43 | addr = vm_unmapped_area(&info); | 44 | addr = vm_unmapped_area(&info); |
| 44 | 45 | ||
| @@ -58,6 +59,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
| 58 | const unsigned long pgoff, | 59 | const unsigned long pgoff, |
| 59 | const unsigned long flags) | 60 | const unsigned long flags) |
| 60 | { | 61 | { |
| 62 | struct hstate *h = hstate_file(filp); | ||
| 61 | struct mm_struct *mm = current->mm; | 63 | struct mm_struct *mm = current->mm; |
| 62 | unsigned long addr = addr0; | 64 | unsigned long addr = addr0; |
| 63 | struct vm_unmapped_area_info info; | 65 | struct vm_unmapped_area_info info; |
| @@ -69,7 +71,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
| 69 | info.length = len; | 71 | info.length = len; |
| 70 | info.low_limit = PAGE_SIZE; | 72 | info.low_limit = PAGE_SIZE; |
| 71 | info.high_limit = mm->mmap_base; | 73 | info.high_limit = mm->mmap_base; |
| 72 | info.align_mask = PAGE_MASK & ~HPAGE_MASK; | 74 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 73 | info.align_offset = 0; | 75 | info.align_offset = 0; |
| 74 | addr = vm_unmapped_area(&info); | 76 | addr = vm_unmapped_area(&info); |
| 75 | 77 | ||
| @@ -94,6 +96,7 @@ unsigned long | |||
| 94 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 96 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 95 | unsigned long len, unsigned long pgoff, unsigned long flags) | 97 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 96 | { | 98 | { |
| 99 | struct hstate *h = hstate_file(file); | ||
| 97 | struct mm_struct *mm = current->mm; | 100 | struct mm_struct *mm = current->mm; |
| 98 | struct vm_area_struct *vma; | 101 | struct vm_area_struct *vma; |
| 99 | unsigned long task_size = TASK_SIZE; | 102 | unsigned long task_size = TASK_SIZE; |
| @@ -101,7 +104,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 101 | if (test_thread_flag(TIF_32BIT)) | 104 | if (test_thread_flag(TIF_32BIT)) |
| 102 | task_size = STACK_TOP32; | 105 | task_size = STACK_TOP32; |
| 103 | 106 | ||
| 104 | if (len & ~HPAGE_MASK) | 107 | if (len & ~huge_page_mask(h)) |
| 105 | return -EINVAL; | 108 | return -EINVAL; |
| 106 | if (len > task_size) | 109 | if (len > task_size) |
| 107 | return -ENOMEM; | 110 | return -ENOMEM; |
| @@ -113,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 113 | } | 116 | } |
| 114 | 117 | ||
| 115 | if (addr) { | 118 | if (addr) { |
| 116 | addr = ALIGN(addr, HPAGE_SIZE); | 119 | addr = ALIGN(addr, huge_page_size(h)); |
| 117 | vma = find_vma(mm, addr); | 120 | vma = find_vma(mm, addr); |
| 118 | if (task_size - len >= addr && | 121 | if (task_size - len >= addr && |
| 119 | (!vma || addr + len <= vma->vm_start)) | 122 | (!vma || addr + len <= vma->vm_start)) |
| @@ -127,17 +130,141 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 127 | pgoff, flags); | 130 | pgoff, flags); |
| 128 | } | 131 | } |
| 129 | 132 | ||
| 133 | static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) | ||
| 134 | { | ||
| 135 | return entry; | ||
| 136 | } | ||
| 137 | |||
| 138 | static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) | ||
| 139 | { | ||
| 140 | unsigned long hugepage_size = _PAGE_SZ4MB_4V; | ||
| 141 | |||
| 142 | pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; | ||
| 143 | |||
| 144 | switch (shift) { | ||
| 145 | case HPAGE_256MB_SHIFT: | ||
| 146 | hugepage_size = _PAGE_SZ256MB_4V; | ||
| 147 | pte_val(entry) |= _PAGE_PMD_HUGE; | ||
| 148 | break; | ||
| 149 | case HPAGE_SHIFT: | ||
| 150 | pte_val(entry) |= _PAGE_PMD_HUGE; | ||
| 151 | break; | ||
| 152 | case HPAGE_64K_SHIFT: | ||
| 153 | hugepage_size = _PAGE_SZ64K_4V; | ||
| 154 | break; | ||
| 155 | default: | ||
| 156 | WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); | ||
| 157 | } | ||
| 158 | |||
| 159 | pte_val(entry) = pte_val(entry) | hugepage_size; | ||
| 160 | return entry; | ||
| 161 | } | ||
| 162 | |||
| 163 | static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift) | ||
| 164 | { | ||
| 165 | if (tlb_type == hypervisor) | ||
| 166 | return sun4v_hugepage_shift_to_tte(entry, shift); | ||
| 167 | else | ||
| 168 | return sun4u_hugepage_shift_to_tte(entry, shift); | ||
| 169 | } | ||
| 170 | |||
| 171 | pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, | ||
| 172 | struct page *page, int writeable) | ||
| 173 | { | ||
| 174 | unsigned int shift = huge_page_shift(hstate_vma(vma)); | ||
| 175 | |||
| 176 | return hugepage_shift_to_tte(entry, shift); | ||
| 177 | } | ||
| 178 | |||
| 179 | static unsigned int sun4v_huge_tte_to_shift(pte_t entry) | ||
| 180 | { | ||
| 181 | unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V; | ||
| 182 | unsigned int shift; | ||
| 183 | |||
| 184 | switch (tte_szbits) { | ||
| 185 | case _PAGE_SZ256MB_4V: | ||
| 186 | shift = HPAGE_256MB_SHIFT; | ||
| 187 | break; | ||
| 188 | case _PAGE_SZ4MB_4V: | ||
| 189 | shift = REAL_HPAGE_SHIFT; | ||
| 190 | break; | ||
| 191 | case _PAGE_SZ64K_4V: | ||
| 192 | shift = HPAGE_64K_SHIFT; | ||
| 193 | break; | ||
| 194 | default: | ||
| 195 | shift = PAGE_SHIFT; | ||
| 196 | break; | ||
| 197 | } | ||
| 198 | return shift; | ||
| 199 | } | ||
| 200 | |||
| 201 | static unsigned int sun4u_huge_tte_to_shift(pte_t entry) | ||
| 202 | { | ||
| 203 | unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U; | ||
| 204 | unsigned int shift; | ||
| 205 | |||
| 206 | switch (tte_szbits) { | ||
| 207 | case _PAGE_SZ256MB_4U: | ||
| 208 | shift = HPAGE_256MB_SHIFT; | ||
| 209 | break; | ||
| 210 | case _PAGE_SZ4MB_4U: | ||
| 211 | shift = REAL_HPAGE_SHIFT; | ||
| 212 | break; | ||
| 213 | case _PAGE_SZ64K_4U: | ||
| 214 | shift = HPAGE_64K_SHIFT; | ||
| 215 | break; | ||
| 216 | default: | ||
| 217 | shift = PAGE_SHIFT; | ||
| 218 | break; | ||
| 219 | } | ||
| 220 | return shift; | ||
| 221 | } | ||
| 222 | |||
| 223 | static unsigned int huge_tte_to_shift(pte_t entry) | ||
| 224 | { | ||
| 225 | unsigned long shift; | ||
| 226 | |||
| 227 | if (tlb_type == hypervisor) | ||
| 228 | shift = sun4v_huge_tte_to_shift(entry); | ||
| 229 | else | ||
| 230 | shift = sun4u_huge_tte_to_shift(entry); | ||
| 231 | |||
| 232 | if (shift == PAGE_SHIFT) | ||
| 233 | WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n", | ||
| 234 | pte_val(entry)); | ||
| 235 | |||
| 236 | return shift; | ||
| 237 | } | ||
| 238 | |||
| 239 | static unsigned long huge_tte_to_size(pte_t pte) | ||
| 240 | { | ||
| 241 | unsigned long size = 1UL << huge_tte_to_shift(pte); | ||
| 242 | |||
| 243 | if (size == REAL_HPAGE_SIZE) | ||
| 244 | size = HPAGE_SIZE; | ||
| 245 | return size; | ||
| 246 | } | ||
| 247 | |||
| 130 | pte_t *huge_pte_alloc(struct mm_struct *mm, | 248 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
| 131 | unsigned long addr, unsigned long sz) | 249 | unsigned long addr, unsigned long sz) |
| 132 | { | 250 | { |
| 133 | pgd_t *pgd; | 251 | pgd_t *pgd; |
| 134 | pud_t *pud; | 252 | pud_t *pud; |
| 253 | pmd_t *pmd; | ||
| 135 | pte_t *pte = NULL; | 254 | pte_t *pte = NULL; |
| 136 | 255 | ||
| 137 | pgd = pgd_offset(mm, addr); | 256 | pgd = pgd_offset(mm, addr); |
| 138 | pud = pud_alloc(mm, pgd, addr); | 257 | pud = pud_alloc(mm, pgd, addr); |
| 139 | if (pud) | 258 | if (pud) { |
| 140 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 259 | pmd = pmd_alloc(mm, pud, addr); |
| 260 | if (!pmd) | ||
| 261 | return NULL; | ||
| 262 | |||
| 263 | if (sz == PMD_SHIFT) | ||
| 264 | pte = (pte_t *)pmd; | ||
| 265 | else | ||
| 266 | pte = pte_alloc_map(mm, pmd, addr); | ||
| 267 | } | ||
| 141 | 268 | ||
| 142 | return pte; | 269 | return pte; |
| 143 | } | 270 | } |
| @@ -146,49 +273,83 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
| 146 | { | 273 | { |
| 147 | pgd_t *pgd; | 274 | pgd_t *pgd; |
| 148 | pud_t *pud; | 275 | pud_t *pud; |
| 276 | pmd_t *pmd; | ||
| 149 | pte_t *pte = NULL; | 277 | pte_t *pte = NULL; |
| 150 | 278 | ||
| 151 | pgd = pgd_offset(mm, addr); | 279 | pgd = pgd_offset(mm, addr); |
| 152 | if (!pgd_none(*pgd)) { | 280 | if (!pgd_none(*pgd)) { |
| 153 | pud = pud_offset(pgd, addr); | 281 | pud = pud_offset(pgd, addr); |
| 154 | if (!pud_none(*pud)) | 282 | if (!pud_none(*pud)) { |
| 155 | pte = (pte_t *)pmd_offset(pud, addr); | 283 | pmd = pmd_offset(pud, addr); |
| 284 | if (!pmd_none(*pmd)) { | ||
| 285 | if (is_hugetlb_pmd(*pmd)) | ||
| 286 | pte = (pte_t *)pmd; | ||
| 287 | else | ||
| 288 | pte = pte_offset_map(pmd, addr); | ||
| 289 | } | ||
| 290 | } | ||
| 156 | } | 291 | } |
| 292 | |||
| 157 | return pte; | 293 | return pte; |
| 158 | } | 294 | } |
| 159 | 295 | ||
| 160 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | 296 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 161 | pte_t *ptep, pte_t entry) | 297 | pte_t *ptep, pte_t entry) |
| 162 | { | 298 | { |
| 299 | unsigned int i, nptes, orig_shift, shift; | ||
| 300 | unsigned long size; | ||
| 163 | pte_t orig; | 301 | pte_t orig; |
| 164 | 302 | ||
| 303 | size = huge_tte_to_size(entry); | ||
| 304 | shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; | ||
| 305 | nptes = size >> shift; | ||
| 306 | |||
| 165 | if (!pte_present(*ptep) && pte_present(entry)) | 307 | if (!pte_present(*ptep) && pte_present(entry)) |
| 166 | mm->context.hugetlb_pte_count++; | 308 | mm->context.hugetlb_pte_count += nptes; |
| 167 | 309 | ||
| 168 | addr &= HPAGE_MASK; | 310 | addr &= ~(size - 1); |
| 169 | orig = *ptep; | 311 | orig = *ptep; |
| 170 | *ptep = entry; | 312 | orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig); |
| 313 | |||
| 314 | for (i = 0; i < nptes; i++) | ||
| 315 | ptep[i] = __pte(pte_val(entry) + (i << shift)); | ||
| 171 | 316 | ||
| 172 | /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ | 317 | maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift); |
| 173 | maybe_tlb_batch_add(mm, addr, ptep, orig, 0); | 318 | /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ |
| 174 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0); | 319 | if (size == HPAGE_SIZE) |
| 320 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, | ||
| 321 | orig_shift); | ||
| 175 | } | 322 | } |
| 176 | 323 | ||
| 177 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | 324 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 178 | pte_t *ptep) | 325 | pte_t *ptep) |
| 179 | { | 326 | { |
| 327 | unsigned int i, nptes, hugepage_shift; | ||
| 328 | unsigned long size; | ||
| 180 | pte_t entry; | 329 | pte_t entry; |
| 181 | 330 | ||
| 182 | entry = *ptep; | 331 | entry = *ptep; |
| 332 | size = huge_tte_to_size(entry); | ||
| 333 | if (size >= HPAGE_SIZE) | ||
| 334 | nptes = size >> PMD_SHIFT; | ||
| 335 | else | ||
| 336 | nptes = size >> PAGE_SHIFT; | ||
| 337 | |||
| 338 | hugepage_shift = pte_none(entry) ? PAGE_SHIFT : | ||
| 339 | huge_tte_to_shift(entry); | ||
| 340 | |||
| 183 | if (pte_present(entry)) | 341 | if (pte_present(entry)) |
| 184 | mm->context.hugetlb_pte_count--; | 342 | mm->context.hugetlb_pte_count -= nptes; |
| 185 | 343 | ||
| 186 | addr &= HPAGE_MASK; | 344 | addr &= ~(size - 1); |
| 187 | *ptep = __pte(0UL); | 345 | for (i = 0; i < nptes; i++) |
| 346 | ptep[i] = __pte(0UL); | ||
| 188 | 347 | ||
| 189 | /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ | 348 | maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); |
| 190 | maybe_tlb_batch_add(mm, addr, ptep, entry, 0); | 349 | /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ |
| 191 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0); | 350 | if (size == HPAGE_SIZE) |
| 351 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, | ||
| 352 | hugepage_shift); | ||
| 192 | 353 | ||
| 193 | return entry; | 354 | return entry; |
| 194 | } | 355 | } |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 5d2f91511c60..ccd455328989 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
| @@ -324,6 +324,50 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde | |||
| 324 | tsb_insert(tsb, tag, tte); | 324 | tsb_insert(tsb, tag, tte); |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 328 | static int __init setup_hugepagesz(char *string) | ||
| 329 | { | ||
| 330 | unsigned long long hugepage_size; | ||
| 331 | unsigned int hugepage_shift; | ||
| 332 | unsigned short hv_pgsz_idx; | ||
| 333 | unsigned int hv_pgsz_mask; | ||
| 334 | int rc = 0; | ||
| 335 | |||
| 336 | hugepage_size = memparse(string, &string); | ||
| 337 | hugepage_shift = ilog2(hugepage_size); | ||
| 338 | |||
| 339 | switch (hugepage_shift) { | ||
| 340 | case HPAGE_256MB_SHIFT: | ||
| 341 | hv_pgsz_mask = HV_PGSZ_MASK_256MB; | ||
| 342 | hv_pgsz_idx = HV_PGSZ_IDX_256MB; | ||
| 343 | break; | ||
| 344 | case HPAGE_SHIFT: | ||
| 345 | hv_pgsz_mask = HV_PGSZ_MASK_4MB; | ||
| 346 | hv_pgsz_idx = HV_PGSZ_IDX_4MB; | ||
| 347 | break; | ||
| 348 | case HPAGE_64K_SHIFT: | ||
| 349 | hv_pgsz_mask = HV_PGSZ_MASK_64K; | ||
| 350 | hv_pgsz_idx = HV_PGSZ_IDX_64K; | ||
| 351 | break; | ||
| 352 | default: | ||
| 353 | hv_pgsz_mask = 0; | ||
| 354 | } | ||
| 355 | |||
| 356 | if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { | ||
| 357 | pr_warn("hugepagesz=%llu not supported by MMU.\n", | ||
| 358 | hugepage_size); | ||
| 359 | goto out; | ||
| 360 | } | ||
| 361 | |||
| 362 | hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT); | ||
| 363 | rc = 1; | ||
| 364 | |||
| 365 | out: | ||
| 366 | return rc; | ||
| 367 | } | ||
| 368 | __setup("hugepagesz=", setup_hugepagesz); | ||
| 369 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
| 370 | |||
| 327 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 371 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
| 328 | { | 372 | { |
| 329 | struct mm_struct *mm; | 373 | struct mm_struct *mm; |
| @@ -347,7 +391,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * | |||
| 347 | 391 | ||
| 348 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 392 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 349 | if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && | 393 | if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && |
| 350 | is_hugetlb_pte(pte)) { | 394 | is_hugetlb_pmd(__pmd(pte_val(pte)))) { |
| 351 | /* We are fabricating 8MB pages using 4MB real hw pages. */ | 395 | /* We are fabricating 8MB pages using 4MB real hw pages. */ |
| 352 | pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); | 396 | pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); |
| 353 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, | 397 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, |
| @@ -785,13 +829,23 @@ static void __init find_ramdisk(unsigned long phys_base) | |||
| 785 | 829 | ||
| 786 | struct node_mem_mask { | 830 | struct node_mem_mask { |
| 787 | unsigned long mask; | 831 | unsigned long mask; |
| 788 | unsigned long val; | 832 | unsigned long match; |
| 789 | }; | 833 | }; |
| 790 | static struct node_mem_mask node_masks[MAX_NUMNODES]; | 834 | static struct node_mem_mask node_masks[MAX_NUMNODES]; |
| 791 | static int num_node_masks; | 835 | static int num_node_masks; |
| 792 | 836 | ||
| 793 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 837 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
| 794 | 838 | ||
| 839 | struct mdesc_mlgroup { | ||
| 840 | u64 node; | ||
| 841 | u64 latency; | ||
| 842 | u64 match; | ||
| 843 | u64 mask; | ||
| 844 | }; | ||
| 845 | |||
| 846 | static struct mdesc_mlgroup *mlgroups; | ||
| 847 | static int num_mlgroups; | ||
| 848 | |||
| 795 | int numa_cpu_lookup_table[NR_CPUS]; | 849 | int numa_cpu_lookup_table[NR_CPUS]; |
| 796 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | 850 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; |
| 797 | 851 | ||
| @@ -802,78 +856,129 @@ struct mdesc_mblock { | |||
| 802 | }; | 856 | }; |
| 803 | static struct mdesc_mblock *mblocks; | 857 | static struct mdesc_mblock *mblocks; |
| 804 | static int num_mblocks; | 858 | static int num_mblocks; |
| 805 | static int find_numa_node_for_addr(unsigned long pa, | ||
| 806 | struct node_mem_mask *pnode_mask); | ||
| 807 | 859 | ||
| 808 | static unsigned long __init ra_to_pa(unsigned long addr) | 860 | static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr) |
| 809 | { | 861 | { |
| 862 | struct mdesc_mblock *m = NULL; | ||
| 810 | int i; | 863 | int i; |
| 811 | 864 | ||
| 812 | for (i = 0; i < num_mblocks; i++) { | 865 | for (i = 0; i < num_mblocks; i++) { |
| 813 | struct mdesc_mblock *m = &mblocks[i]; | 866 | m = &mblocks[i]; |
| 814 | 867 | ||
| 815 | if (addr >= m->base && | 868 | if (addr >= m->base && |
| 816 | addr < (m->base + m->size)) { | 869 | addr < (m->base + m->size)) { |
| 817 | addr += m->offset; | ||
| 818 | break; | 870 | break; |
| 819 | } | 871 | } |
| 820 | } | 872 | } |
| 821 | return addr; | 873 | |
| 874 | return m; | ||
| 822 | } | 875 | } |
| 823 | 876 | ||
| 824 | static int __init find_node(unsigned long addr) | 877 | static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid) |
| 825 | { | 878 | { |
| 826 | static bool search_mdesc = true; | 879 | int prev_nid, new_nid; |
| 827 | static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; | ||
| 828 | static int last_index; | ||
| 829 | int i; | ||
| 830 | 880 | ||
| 831 | addr = ra_to_pa(addr); | 881 | prev_nid = -1; |
| 832 | for (i = 0; i < num_node_masks; i++) { | 882 | for ( ; start < end; start += PAGE_SIZE) { |
| 833 | struct node_mem_mask *p = &node_masks[i]; | 883 | for (new_nid = 0; new_nid < num_node_masks; new_nid++) { |
| 884 | struct node_mem_mask *p = &node_masks[new_nid]; | ||
| 834 | 885 | ||
| 835 | if ((addr & p->mask) == p->val) | 886 | if ((start & p->mask) == p->match) { |
| 836 | return i; | 887 | if (prev_nid == -1) |
| 837 | } | 888 | prev_nid = new_nid; |
| 838 | /* The following condition has been observed on LDOM guests because | 889 | break; |
| 839 | * node_masks only contains the best latency mask and value. | 890 | } |
| 840 | * LDOM guest's mdesc can contain a single latency group to | ||
| 841 | * cover multiple address range. Print warning message only if the | ||
| 842 | * address cannot be found in node_masks nor mdesc. | ||
| 843 | */ | ||
| 844 | if ((search_mdesc) && | ||
| 845 | ((addr & last_mem_mask.mask) != last_mem_mask.val)) { | ||
| 846 | /* find the available node in the mdesc */ | ||
| 847 | last_index = find_numa_node_for_addr(addr, &last_mem_mask); | ||
| 848 | numadbg("find_node: latency group for address 0x%lx is %d\n", | ||
| 849 | addr, last_index); | ||
| 850 | if ((last_index < 0) || (last_index >= num_node_masks)) { | ||
| 851 | /* WARN_ONCE() and use default group 0 */ | ||
| 852 | WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); | ||
| 853 | search_mdesc = false; | ||
| 854 | last_index = 0; | ||
| 855 | } | 891 | } |
| 892 | |||
| 893 | if (new_nid == num_node_masks) { | ||
| 894 | prev_nid = 0; | ||
| 895 | WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.", | ||
| 896 | start); | ||
| 897 | break; | ||
| 898 | } | ||
| 899 | |||
| 900 | if (prev_nid != new_nid) | ||
| 901 | break; | ||
| 856 | } | 902 | } |
| 903 | *nid = prev_nid; | ||
| 857 | 904 | ||
| 858 | return last_index; | 905 | return start > end ? end : start; |
| 859 | } | 906 | } |
| 860 | 907 | ||
| 861 | static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) | 908 | static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) |
| 862 | { | 909 | { |
| 863 | *nid = find_node(start); | 910 | u64 ret_end, pa_start, m_mask, m_match, m_end; |
| 864 | start += PAGE_SIZE; | 911 | struct mdesc_mblock *mblock; |
| 865 | while (start < end) { | 912 | int _nid, i; |
| 866 | int n = find_node(start); | 913 | |
| 914 | if (tlb_type != hypervisor) | ||
| 915 | return memblock_nid_range_sun4u(start, end, nid); | ||
| 916 | |||
| 917 | mblock = addr_to_mblock(start); | ||
| 918 | if (!mblock) { | ||
| 919 | WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]", | ||
| 920 | start); | ||
| 921 | |||
| 922 | _nid = 0; | ||
| 923 | ret_end = end; | ||
| 924 | goto done; | ||
| 925 | } | ||
| 926 | |||
| 927 | pa_start = start + mblock->offset; | ||
| 928 | m_match = 0; | ||
| 929 | m_mask = 0; | ||
| 930 | |||
| 931 | for (_nid = 0; _nid < num_node_masks; _nid++) { | ||
| 932 | struct node_mem_mask *const m = &node_masks[_nid]; | ||
| 867 | 933 | ||
| 868 | if (n != *nid) | 934 | if ((pa_start & m->mask) == m->match) { |
| 935 | m_match = m->match; | ||
| 936 | m_mask = m->mask; | ||
| 869 | break; | 937 | break; |
| 870 | start += PAGE_SIZE; | 938 | } |
| 871 | } | 939 | } |
| 872 | 940 | ||
| 873 | if (start > end) | 941 | if (num_node_masks == _nid) { |
| 874 | start = end; | 942 | /* We could not find NUMA group, so default to 0, but lets |
| 943 | * search for latency group, so we could calculate the correct | ||
| 944 | * end address that we return | ||
| 945 | */ | ||
| 946 | _nid = 0; | ||
| 947 | |||
| 948 | for (i = 0; i < num_mlgroups; i++) { | ||
| 949 | struct mdesc_mlgroup *const m = &mlgroups[i]; | ||
| 950 | |||
| 951 | if ((pa_start & m->mask) == m->match) { | ||
| 952 | m_match = m->match; | ||
| 953 | m_mask = m->mask; | ||
| 954 | break; | ||
| 955 | } | ||
| 956 | } | ||
| 957 | |||
| 958 | if (i == num_mlgroups) { | ||
| 959 | WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]", | ||
| 960 | start); | ||
| 961 | |||
| 962 | ret_end = end; | ||
| 963 | goto done; | ||
| 964 | } | ||
| 965 | } | ||
| 875 | 966 | ||
| 876 | return start; | 967 | /* |
| 968 | * Each latency group has match and mask, and each memory block has an | ||
| 969 | * offset. An address belongs to a latency group if its address matches | ||
| 970 | * the following formula: ((addr + offset) & mask) == match | ||
| 971 | * It is, however, slow to check every single page if it matches a | ||
| 972 | * particular latency group. As optimization we calculate end value by | ||
| 973 | * using bit arithmetics. | ||
| 974 | */ | ||
| 975 | m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset; | ||
| 976 | m_end += pa_start & ~((1ul << fls64(m_mask)) - 1); | ||
| 977 | ret_end = m_end > end ? end : m_end; | ||
| 978 | |||
| 979 | done: | ||
| 980 | *nid = _nid; | ||
| 981 | return ret_end; | ||
| 877 | } | 982 | } |
| 878 | #endif | 983 | #endif |
| 879 | 984 | ||
| @@ -914,7 +1019,8 @@ static void init_node_masks_nonnuma(void) | |||
| 914 | 1019 | ||
| 915 | numadbg("Initializing tables for non-numa.\n"); | 1020 | numadbg("Initializing tables for non-numa.\n"); |
| 916 | 1021 | ||
| 917 | node_masks[0].mask = node_masks[0].val = 0; | 1022 | node_masks[0].mask = 0; |
| 1023 | node_masks[0].match = 0; | ||
| 918 | num_node_masks = 1; | 1024 | num_node_masks = 1; |
| 919 | 1025 | ||
| 920 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 1026 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
| @@ -932,15 +1038,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table); | |||
| 932 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | 1038 | EXPORT_SYMBOL(numa_cpumask_lookup_table); |
| 933 | EXPORT_SYMBOL(node_data); | 1039 | EXPORT_SYMBOL(node_data); |
| 934 | 1040 | ||
| 935 | struct mdesc_mlgroup { | ||
| 936 | u64 node; | ||
| 937 | u64 latency; | ||
| 938 | u64 match; | ||
| 939 | u64 mask; | ||
| 940 | }; | ||
| 941 | static struct mdesc_mlgroup *mlgroups; | ||
| 942 | static int num_mlgroups; | ||
| 943 | |||
| 944 | static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, | 1041 | static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, |
| 945 | u32 cfg_handle) | 1042 | u32 cfg_handle) |
| 946 | { | 1043 | { |
| @@ -1029,6 +1126,10 @@ int of_node_to_nid(struct device_node *dp) | |||
| 1029 | static void __init add_node_ranges(void) | 1126 | static void __init add_node_ranges(void) |
| 1030 | { | 1127 | { |
| 1031 | struct memblock_region *reg; | 1128 | struct memblock_region *reg; |
| 1129 | unsigned long prev_max; | ||
| 1130 | |||
| 1131 | memblock_resized: | ||
| 1132 | prev_max = memblock.memory.max; | ||
| 1032 | 1133 | ||
| 1033 | for_each_memblock(memory, reg) { | 1134 | for_each_memblock(memory, reg) { |
| 1034 | unsigned long size = reg->size; | 1135 | unsigned long size = reg->size; |
| @@ -1048,6 +1149,8 @@ static void __init add_node_ranges(void) | |||
| 1048 | 1149 | ||
| 1049 | memblock_set_node(start, this_end - start, | 1150 | memblock_set_node(start, this_end - start, |
| 1050 | &memblock.memory, nid); | 1151 | &memblock.memory, nid); |
| 1152 | if (memblock.memory.max != prev_max) | ||
| 1153 | goto memblock_resized; | ||
| 1051 | start = this_end; | 1154 | start = this_end; |
| 1052 | } | 1155 | } |
| 1053 | } | 1156 | } |
| @@ -1182,41 +1285,6 @@ int __node_distance(int from, int to) | |||
| 1182 | return numa_latency[from][to]; | 1285 | return numa_latency[from][to]; |
| 1183 | } | 1286 | } |
| 1184 | 1287 | ||
| 1185 | static int find_numa_node_for_addr(unsigned long pa, | ||
| 1186 | struct node_mem_mask *pnode_mask) | ||
| 1187 | { | ||
| 1188 | struct mdesc_handle *md = mdesc_grab(); | ||
| 1189 | u64 node, arc; | ||
| 1190 | int i = 0; | ||
| 1191 | |||
| 1192 | node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); | ||
| 1193 | if (node == MDESC_NODE_NULL) | ||
| 1194 | goto out; | ||
| 1195 | |||
| 1196 | mdesc_for_each_node_by_name(md, node, "group") { | ||
| 1197 | mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { | ||
| 1198 | u64 target = mdesc_arc_target(md, arc); | ||
| 1199 | struct mdesc_mlgroup *m = find_mlgroup(target); | ||
| 1200 | |||
| 1201 | if (!m) | ||
| 1202 | continue; | ||
| 1203 | if ((pa & m->mask) == m->match) { | ||
| 1204 | if (pnode_mask) { | ||
| 1205 | pnode_mask->mask = m->mask; | ||
| 1206 | pnode_mask->val = m->match; | ||
| 1207 | } | ||
| 1208 | mdesc_release(md); | ||
| 1209 | return i; | ||
| 1210 | } | ||
| 1211 | } | ||
| 1212 | i++; | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | out: | ||
| 1216 | mdesc_release(md); | ||
| 1217 | return -1; | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) | 1288 | static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) |
| 1221 | { | 1289 | { |
| 1222 | int i; | 1290 | int i; |
| @@ -1224,7 +1292,7 @@ static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) | |||
| 1224 | for (i = 0; i < MAX_NUMNODES; i++) { | 1292 | for (i = 0; i < MAX_NUMNODES; i++) { |
| 1225 | struct node_mem_mask *n = &node_masks[i]; | 1293 | struct node_mem_mask *n = &node_masks[i]; |
| 1226 | 1294 | ||
| 1227 | if ((grp->mask == n->mask) && (grp->match == n->val)) | 1295 | if ((grp->mask == n->mask) && (grp->match == n->match)) |
| 1228 | break; | 1296 | break; |
| 1229 | } | 1297 | } |
| 1230 | return i; | 1298 | return i; |
| @@ -1279,10 +1347,10 @@ static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, | |||
| 1279 | n = &node_masks[num_node_masks++]; | 1347 | n = &node_masks[num_node_masks++]; |
| 1280 | 1348 | ||
| 1281 | n->mask = candidate->mask; | 1349 | n->mask = candidate->mask; |
| 1282 | n->val = candidate->match; | 1350 | n->match = candidate->match; |
| 1283 | 1351 | ||
| 1284 | numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", | 1352 | numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n", |
| 1285 | index, n->mask, n->val, candidate->latency); | 1353 | index, n->mask, n->match, candidate->latency); |
| 1286 | 1354 | ||
| 1287 | return 0; | 1355 | return 0; |
| 1288 | } | 1356 | } |
| @@ -1379,7 +1447,7 @@ static int __init numa_parse_jbus(void) | |||
| 1379 | numa_cpu_lookup_table[cpu] = index; | 1447 | numa_cpu_lookup_table[cpu] = index; |
| 1380 | cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); | 1448 | cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); |
| 1381 | node_masks[index].mask = ~((1UL << 36UL) - 1UL); | 1449 | node_masks[index].mask = ~((1UL << 36UL) - 1UL); |
| 1382 | node_masks[index].val = cpu << 36UL; | 1450 | node_masks[index].match = cpu << 36UL; |
| 1383 | 1451 | ||
| 1384 | index++; | 1452 | index++; |
| 1385 | } | 1453 | } |
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c7f2a5295b3a..def82f6d626f 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
| @@ -1444,7 +1444,7 @@ static void poke_viking(void) | |||
| 1444 | srmmu_set_mmureg(mreg); | 1444 | srmmu_set_mmureg(mreg); |
| 1445 | } | 1445 | } |
| 1446 | 1446 | ||
| 1447 | static struct sparc32_cachetlb_ops viking_ops = { | 1447 | static struct sparc32_cachetlb_ops viking_ops __ro_after_init = { |
| 1448 | .cache_all = viking_flush_cache_all, | 1448 | .cache_all = viking_flush_cache_all, |
| 1449 | .cache_mm = viking_flush_cache_mm, | 1449 | .cache_mm = viking_flush_cache_mm, |
| 1450 | .cache_page = viking_flush_cache_page, | 1450 | .cache_page = viking_flush_cache_page, |
| @@ -1475,7 +1475,7 @@ static struct sparc32_cachetlb_ops viking_ops = { | |||
| 1475 | * flushes going at once will require SMP locking anyways so there's | 1475 | * flushes going at once will require SMP locking anyways so there's |
| 1476 | * no real value in trying any harder than this. | 1476 | * no real value in trying any harder than this. |
| 1477 | */ | 1477 | */ |
| 1478 | static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = { | 1478 | static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = { |
| 1479 | .cache_all = viking_flush_cache_all, | 1479 | .cache_all = viking_flush_cache_all, |
| 1480 | .cache_mm = viking_flush_cache_mm, | 1480 | .cache_mm = viking_flush_cache_mm, |
| 1481 | .cache_page = viking_flush_cache_page, | 1481 | .cache_page = viking_flush_cache_page, |
| @@ -1759,7 +1759,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | |||
| 1759 | local_ops->sig_insns(mm, insn_addr); | 1759 | local_ops->sig_insns(mm, insn_addr); |
| 1760 | } | 1760 | } |
| 1761 | 1761 | ||
| 1762 | static struct sparc32_cachetlb_ops smp_cachetlb_ops = { | 1762 | static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = { |
| 1763 | .cache_all = smp_flush_cache_all, | 1763 | .cache_all = smp_flush_cache_all, |
| 1764 | .cache_mm = smp_flush_cache_mm, | 1764 | .cache_mm = smp_flush_cache_mm, |
| 1765 | .cache_page = smp_flush_cache_page, | 1765 | .cache_page = smp_flush_cache_page, |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index c56a195c9071..afda3bbf7854 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
| @@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void) | |||
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, | 69 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
| 70 | bool exec, bool huge) | 70 | bool exec, unsigned int hugepage_shift) |
| 71 | { | 71 | { |
| 72 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); | 72 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
| 73 | unsigned long nr; | 73 | unsigned long nr; |
| @@ -84,19 +84,19 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, | |||
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | if (!tb->active) { | 86 | if (!tb->active) { |
| 87 | flush_tsb_user_page(mm, vaddr, huge); | 87 | flush_tsb_user_page(mm, vaddr, hugepage_shift); |
| 88 | global_flush_tlb_page(mm, vaddr); | 88 | global_flush_tlb_page(mm, vaddr); |
| 89 | goto out; | 89 | goto out; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | if (nr == 0) { | 92 | if (nr == 0) { |
| 93 | tb->mm = mm; | 93 | tb->mm = mm; |
| 94 | tb->huge = huge; | 94 | tb->hugepage_shift = hugepage_shift; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | if (tb->huge != huge) { | 97 | if (tb->hugepage_shift != hugepage_shift) { |
| 98 | flush_tlb_pending(); | 98 | flush_tlb_pending(); |
| 99 | tb->huge = huge; | 99 | tb->hugepage_shift = hugepage_shift; |
| 100 | nr = 0; | 100 | nr = 0; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| @@ -110,10 +110,9 @@ out: | |||
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | 112 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
| 113 | pte_t *ptep, pte_t orig, int fullmm) | 113 | pte_t *ptep, pte_t orig, int fullmm, |
| 114 | unsigned int hugepage_shift) | ||
| 114 | { | 115 | { |
| 115 | bool huge = is_hugetlb_pte(orig); | ||
| 116 | |||
| 117 | if (tlb_type != hypervisor && | 116 | if (tlb_type != hypervisor && |
| 118 | pte_dirty(orig)) { | 117 | pte_dirty(orig)) { |
| 119 | unsigned long paddr, pfn = pte_pfn(orig); | 118 | unsigned long paddr, pfn = pte_pfn(orig); |
| @@ -139,7 +138,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | |||
| 139 | 138 | ||
| 140 | no_cache_flush: | 139 | no_cache_flush: |
| 141 | if (!fullmm) | 140 | if (!fullmm) |
| 142 | tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); | 141 | tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); |
| 143 | } | 142 | } |
| 144 | 143 | ||
| 145 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 144 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index e20fbbafb0b0..23479c3d39f0 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
| @@ -86,6 +86,33 @@ static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, | |||
| 86 | __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); | 86 | __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
| 90 | static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v, | ||
| 91 | unsigned long hash_shift, | ||
| 92 | unsigned long nentries, | ||
| 93 | unsigned int hugepage_shift) | ||
| 94 | { | ||
| 95 | unsigned int hpage_entries; | ||
| 96 | unsigned int i; | ||
| 97 | |||
| 98 | hpage_entries = 1 << (hugepage_shift - hash_shift); | ||
| 99 | for (i = 0; i < hpage_entries; i++) | ||
| 100 | __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift, | ||
| 101 | nentries); | ||
| 102 | } | ||
| 103 | |||
| 104 | static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, | ||
| 105 | unsigned long tsb, unsigned long nentries, | ||
| 106 | unsigned int hugepage_shift) | ||
| 107 | { | ||
| 108 | unsigned long i; | ||
| 109 | |||
| 110 | for (i = 0; i < tb->tlb_nr; i++) | ||
| 111 | __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, | ||
| 112 | nentries, hugepage_shift); | ||
| 113 | } | ||
| 114 | #endif | ||
| 115 | |||
| 89 | void flush_tsb_user(struct tlb_batch *tb) | 116 | void flush_tsb_user(struct tlb_batch *tb) |
| 90 | { | 117 | { |
| 91 | struct mm_struct *mm = tb->mm; | 118 | struct mm_struct *mm = tb->mm; |
| @@ -93,45 +120,61 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
| 93 | 120 | ||
| 94 | spin_lock_irqsave(&mm->context.lock, flags); | 121 | spin_lock_irqsave(&mm->context.lock, flags); |
| 95 | 122 | ||
| 96 | if (!tb->huge) { | 123 | if (tb->hugepage_shift < HPAGE_SHIFT) { |
| 97 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; | 124 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; |
| 98 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | 125 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; |
| 99 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 126 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
| 100 | base = __pa(base); | 127 | base = __pa(base); |
| 101 | __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); | 128 | if (tb->hugepage_shift == PAGE_SHIFT) |
| 129 | __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); | ||
| 130 | #if defined(CONFIG_HUGETLB_PAGE) | ||
| 131 | else | ||
| 132 | __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries, | ||
| 133 | tb->hugepage_shift); | ||
| 134 | #endif | ||
| 102 | } | 135 | } |
| 103 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 136 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 104 | if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { | 137 | else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
| 105 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | 138 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; |
| 106 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | 139 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
| 107 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 140 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
| 108 | base = __pa(base); | 141 | base = __pa(base); |
| 109 | __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); | 142 | __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries, |
| 143 | tb->hugepage_shift); | ||
| 110 | } | 144 | } |
| 111 | #endif | 145 | #endif |
| 112 | spin_unlock_irqrestore(&mm->context.lock, flags); | 146 | spin_unlock_irqrestore(&mm->context.lock, flags); |
| 113 | } | 147 | } |
| 114 | 148 | ||
| 115 | void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) | 149 | void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, |
| 150 | unsigned int hugepage_shift) | ||
| 116 | { | 151 | { |
| 117 | unsigned long nentries, base, flags; | 152 | unsigned long nentries, base, flags; |
| 118 | 153 | ||
| 119 | spin_lock_irqsave(&mm->context.lock, flags); | 154 | spin_lock_irqsave(&mm->context.lock, flags); |
| 120 | 155 | ||
| 121 | if (!huge) { | 156 | if (hugepage_shift < HPAGE_SHIFT) { |
| 122 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; | 157 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; |
| 123 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | 158 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; |
| 124 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 159 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
| 125 | base = __pa(base); | 160 | base = __pa(base); |
| 126 | __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); | 161 | if (hugepage_shift == PAGE_SHIFT) |
| 162 | __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, | ||
| 163 | nentries); | ||
| 164 | #if defined(CONFIG_HUGETLB_PAGE) | ||
| 165 | else | ||
| 166 | __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, | ||
| 167 | nentries, hugepage_shift); | ||
| 168 | #endif | ||
| 127 | } | 169 | } |
| 128 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 170 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 129 | if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { | 171 | else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
| 130 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | 172 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; |
| 131 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | 173 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
| 132 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 174 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
| 133 | base = __pa(base); | 175 | base = __pa(base); |
| 134 | __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries); | 176 | __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, |
| 177 | nentries, hugepage_shift); | ||
| 135 | } | 178 | } |
| 136 | #endif | 179 | #endif |
| 137 | spin_unlock_irqrestore(&mm->context.lock, flags); | 180 | spin_unlock_irqrestore(&mm->context.lock, flags); |
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index cab157331c4e..3f3a3ab3d50a 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
| @@ -34,6 +34,7 @@ MODULE_LICENSE("GPL"); | |||
| 34 | MODULE_VERSION(DRV_MODULE_VERSION); | 34 | MODULE_VERSION(DRV_MODULE_VERSION); |
| 35 | 35 | ||
| 36 | #define VDC_TX_RING_SIZE 512 | 36 | #define VDC_TX_RING_SIZE 512 |
| 37 | #define VDC_DEFAULT_BLK_SIZE 512 | ||
| 37 | 38 | ||
| 38 | #define WAITING_FOR_LINK_UP 0x01 | 39 | #define WAITING_FOR_LINK_UP 0x01 |
| 39 | #define WAITING_FOR_TX_SPACE 0x02 | 40 | #define WAITING_FOR_TX_SPACE 0x02 |
| @@ -73,6 +74,7 @@ struct vdc_port { | |||
| 73 | u32 vdisk_size; | 74 | u32 vdisk_size; |
| 74 | u8 vdisk_type; | 75 | u8 vdisk_type; |
| 75 | u8 vdisk_mtype; | 76 | u8 vdisk_mtype; |
| 77 | u32 vdisk_phys_blksz; | ||
| 76 | 78 | ||
| 77 | char disk_name[32]; | 79 | char disk_name[32]; |
| 78 | }; | 80 | }; |
| @@ -88,6 +90,7 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) | |||
| 88 | 90 | ||
| 89 | /* Ordered from largest major to lowest */ | 91 | /* Ordered from largest major to lowest */ |
| 90 | static struct vio_version vdc_versions[] = { | 92 | static struct vio_version vdc_versions[] = { |
| 93 | { .major = 1, .minor = 2 }, | ||
| 91 | { .major = 1, .minor = 1 }, | 94 | { .major = 1, .minor = 1 }, |
| 92 | { .major = 1, .minor = 0 }, | 95 | { .major = 1, .minor = 0 }, |
| 93 | }; | 96 | }; |
| @@ -271,6 +274,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) | |||
| 271 | if (pkt->max_xfer_size < port->max_xfer_size) | 274 | if (pkt->max_xfer_size < port->max_xfer_size) |
| 272 | port->max_xfer_size = pkt->max_xfer_size; | 275 | port->max_xfer_size = pkt->max_xfer_size; |
| 273 | port->vdisk_block_size = pkt->vdisk_block_size; | 276 | port->vdisk_block_size = pkt->vdisk_block_size; |
| 277 | |||
| 278 | port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE; | ||
| 279 | if (vdc_version_supported(port, 1, 2)) | ||
| 280 | port->vdisk_phys_blksz = pkt->phys_block_size; | ||
| 281 | |||
| 274 | return 0; | 282 | return 0; |
| 275 | } else { | 283 | } else { |
| 276 | printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); | 284 | printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); |
| @@ -754,6 +762,12 @@ static int probe_disk(struct vdc_port *port) | |||
| 754 | if (err) | 762 | if (err) |
| 755 | return err; | 763 | return err; |
| 756 | 764 | ||
| 765 | /* Using version 1.2 means vdisk_phys_blksz should be set unless the | ||
| 766 | * disk is reserved by another system. | ||
| 767 | */ | ||
| 768 | if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz) | ||
| 769 | return -ENODEV; | ||
| 770 | |||
| 757 | if (vdc_version_supported(port, 1, 1)) { | 771 | if (vdc_version_supported(port, 1, 1)) { |
| 758 | /* vdisk_size should be set during the handshake, if it wasn't | 772 | /* vdisk_size should be set during the handshake, if it wasn't |
| 759 | * then the underlying disk is reserved by another system | 773 | * then the underlying disk is reserved by another system |
| @@ -829,6 +843,8 @@ static int probe_disk(struct vdc_port *port) | |||
| 829 | } | 843 | } |
| 830 | } | 844 | } |
| 831 | 845 | ||
| 846 | blk_queue_physical_block_size(q, port->vdisk_phys_blksz); | ||
| 847 | |||
| 832 | pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", | 848 | pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", |
| 833 | g->disk_name, | 849 | g->disk_name, |
| 834 | port->vdisk_size, (port->vdisk_size >> (20 - 9)), | 850 | port->vdisk_size, (port->vdisk_size >> (20 - 9)), |
| @@ -910,7 +926,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
| 910 | if (err) | 926 | if (err) |
| 911 | goto err_out_free_port; | 927 | goto err_out_free_port; |
| 912 | 928 | ||
| 913 | port->vdisk_block_size = 512; | 929 | port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE; |
| 914 | port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); | 930 | port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); |
| 915 | port->ring_cookies = ((port->max_xfer_size * | 931 | port->ring_cookies = ((port->max_xfer_size * |
| 916 | port->vdisk_block_size) / PAGE_SIZE) + 2; | 932 | port->vdisk_block_size) / PAGE_SIZE) + 2; |
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 73abd89c0108..46e46894e918 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c | |||
| @@ -116,7 +116,7 @@ static int receive_chars_getchar(struct uart_port *port) | |||
| 116 | 116 | ||
| 117 | static int receive_chars_read(struct uart_port *port) | 117 | static int receive_chars_read(struct uart_port *port) |
| 118 | { | 118 | { |
| 119 | int saw_console_brk = 0; | 119 | static int saw_console_brk; |
| 120 | int limit = 10000; | 120 | int limit = 10000; |
| 121 | 121 | ||
| 122 | while (limit-- > 0) { | 122 | while (limit-- > 0) { |
| @@ -128,6 +128,9 @@ static int receive_chars_read(struct uart_port *port) | |||
| 128 | bytes_read = 0; | 128 | bytes_read = 0; |
| 129 | 129 | ||
| 130 | if (stat == CON_BREAK) { | 130 | if (stat == CON_BREAK) { |
| 131 | if (saw_console_brk) | ||
| 132 | sun_do_break(); | ||
| 133 | |||
| 131 | if (uart_handle_break(port)) | 134 | if (uart_handle_break(port)) |
| 132 | continue; | 135 | continue; |
| 133 | saw_console_brk = 1; | 136 | saw_console_brk = 1; |
| @@ -151,6 +154,7 @@ static int receive_chars_read(struct uart_port *port) | |||
| 151 | if (port->sysrq != 0 && *con_read_page) { | 154 | if (port->sysrq != 0 && *con_read_page) { |
| 152 | for (i = 0; i < bytes_read; i++) | 155 | for (i = 0; i < bytes_read; i++) |
| 153 | uart_handle_sysrq_char(port, con_read_page[i]); | 156 | uart_handle_sysrq_char(port, con_read_page[i]); |
| 157 | saw_console_brk = 0; | ||
| 154 | } | 158 | } |
| 155 | 159 | ||
| 156 | if (port->state == NULL) | 160 | if (port->state == NULL) |
| @@ -398,6 +402,12 @@ static struct uart_driver sunhv_reg = { | |||
| 398 | 402 | ||
| 399 | static struct uart_port *sunhv_port; | 403 | static struct uart_port *sunhv_port; |
| 400 | 404 | ||
| 405 | void sunhv_migrate_hvcons_irq(int cpu) | ||
| 406 | { | ||
| 407 | /* Migrate hvcons irq to param cpu */ | ||
| 408 | irq_force_affinity(sunhv_port->irq, cpumask_of(cpu)); | ||
| 409 | } | ||
| 410 | |||
| 401 | /* Copy 's' into the con_write_page, decoding "\n" into | 411 | /* Copy 's' into the con_write_page, decoding "\n" into |
| 402 | * "\r\n" along the way. We have to return two lengths | 412 | * "\r\n" along the way. We have to return two lengths |
| 403 | * because the caller needs to know how much to advance | 413 | * because the caller needs to know how much to advance |
diff --git a/kernel/panic.c b/kernel/panic.c index b95959733ce0..3ec16e603e88 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -273,7 +273,8 @@ void panic(const char *fmt, ...) | |||
| 273 | extern int stop_a_enabled; | 273 | extern int stop_a_enabled; |
| 274 | /* Make sure the user can actually press Stop-A (L1-A) */ | 274 | /* Make sure the user can actually press Stop-A (L1-A) */ |
| 275 | stop_a_enabled = 1; | 275 | stop_a_enabled = 1; |
| 276 | pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n"); | 276 | pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" |
| 277 | "twice on console to return to the boot prom\n"); | ||
| 277 | } | 278 | } |
| 278 | #endif | 279 | #endif |
| 279 | #if defined(CONFIG_S390) | 280 | #if defined(CONFIG_S390) |
