diff options
author | David S. Miller <davem@davemloft.net> | 2017-06-06 22:20:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-06-06 22:20:08 -0400 |
commit | 216fe8f021e33c36e3b27c49c9f1951f6b037d7f (patch) | |
tree | a43daec41b4d3955e7a4f8d0ed0654a7c80527ec /arch/sparc | |
parent | 9747e2313838ee8f5d8073fd6aa7289255c3c51b (diff) | |
parent | b29794ec95c6856b316c2295904208bf11ffddd9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Just some simple overlapping changes in marvell PHY driver
and the DSA core code.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/Kconfig | 12 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 32 | ||||
-rw-r--r-- | arch/sparc/include/asm/pil.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/vio.h | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/ds.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 17 | ||||
-rw-r--r-- | arch/sparc/kernel/kernel.h | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 31 | ||||
-rw-r--r-- | arch/sparc/kernel/tsb.S | 11 | ||||
-rw-r--r-- | arch/sparc/kernel/ttable_64.S | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/vio.c | 68 | ||||
-rw-r--r-- | arch/sparc/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/sparc/lib/multi3.S | 35 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 89 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 7 | ||||
-rw-r--r-- | arch/sparc/mm/ultra.S | 5 |
17 files changed, 201 insertions, 116 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 58243b0d21c0..b558c9e29de3 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -192,9 +192,9 @@ config NR_CPUS | |||
192 | int "Maximum number of CPUs" | 192 | int "Maximum number of CPUs" |
193 | depends on SMP | 193 | depends on SMP |
194 | range 2 32 if SPARC32 | 194 | range 2 32 if SPARC32 |
195 | range 2 1024 if SPARC64 | 195 | range 2 4096 if SPARC64 |
196 | default 32 if SPARC32 | 196 | default 32 if SPARC32 |
197 | default 64 if SPARC64 | 197 | default 4096 if SPARC64 |
198 | 198 | ||
199 | source kernel/Kconfig.hz | 199 | source kernel/Kconfig.hz |
200 | 200 | ||
@@ -295,9 +295,13 @@ config NUMA | |||
295 | depends on SPARC64 && SMP | 295 | depends on SPARC64 && SMP |
296 | 296 | ||
297 | config NODES_SHIFT | 297 | config NODES_SHIFT |
298 | int | 298 | int "Maximum NUMA Nodes (as a power of 2)" |
299 | default "4" | 299 | range 4 5 if SPARC64 |
300 | default "5" | ||
300 | depends on NEED_MULTIPLE_NODES | 301 | depends on NEED_MULTIPLE_NODES |
302 | help | ||
303 | Specify the maximum number of NUMA Nodes available on the target | ||
304 | system. Increases memory reserved to accommodate various tables. | ||
301 | 305 | ||
302 | # Some NUMA nodes have memory ranges that span | 306 | # Some NUMA nodes have memory ranges that span |
303 | # other nodes. Even though a pfn is valid and | 307 | # other nodes. Even though a pfn is valid and |
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index f7de0dbc38af..83b36a5371ff 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h | |||
@@ -52,7 +52,7 @@ | |||
52 | #define CTX_NR_MASK TAG_CONTEXT_BITS | 52 | #define CTX_NR_MASK TAG_CONTEXT_BITS |
53 | #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) | 53 | #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) |
54 | 54 | ||
55 | #define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) | 55 | #define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT) |
56 | #define CTX_VALID(__ctx) \ | 56 | #define CTX_VALID(__ctx) \ |
57 | (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) | 57 | (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) |
58 | #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) | 58 | #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) |
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 22fede6eba11..2cddcda4f85f 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h | |||
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock; | |||
19 | extern unsigned long tlb_context_cache; | 19 | extern unsigned long tlb_context_cache; |
20 | extern unsigned long mmu_context_bmap[]; | 20 | extern unsigned long mmu_context_bmap[]; |
21 | 21 | ||
22 | DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); | ||
22 | void get_new_mmu_context(struct mm_struct *mm); | 23 | void get_new_mmu_context(struct mm_struct *mm); |
23 | #ifdef CONFIG_SMP | ||
24 | void smp_new_mmu_context_version(void); | ||
25 | #else | ||
26 | #define smp_new_mmu_context_version() do { } while (0) | ||
27 | #endif | ||
28 | |||
29 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 24 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
30 | void destroy_context(struct mm_struct *mm); | 25 | void destroy_context(struct mm_struct *mm); |
31 | 26 | ||
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long); | |||
76 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) | 71 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
77 | { | 72 | { |
78 | unsigned long ctx_valid, flags; | 73 | unsigned long ctx_valid, flags; |
79 | int cpu; | 74 | int cpu = smp_processor_id(); |
80 | 75 | ||
76 | per_cpu(per_cpu_secondary_mm, cpu) = mm; | ||
81 | if (unlikely(mm == &init_mm)) | 77 | if (unlikely(mm == &init_mm)) |
82 | return; | 78 | return; |
83 | 79 | ||
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
123 | * for the first time, we must flush that context out of the | 119 | * for the first time, we must flush that context out of the |
124 | * local TLB. | 120 | * local TLB. |
125 | */ | 121 | */ |
126 | cpu = smp_processor_id(); | ||
127 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { | 122 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
128 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 123 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
129 | __flush_tlb_mm(CTX_HWBITS(mm->context), | 124 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
133 | } | 128 | } |
134 | 129 | ||
135 | #define deactivate_mm(tsk,mm) do { } while (0) | 130 | #define deactivate_mm(tsk,mm) do { } while (0) |
136 | 131 | #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) | |
137 | /* Activate a new MM instance for the current task. */ | ||
138 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | int cpu; | ||
142 | |||
143 | spin_lock_irqsave(&mm->context.lock, flags); | ||
144 | if (!CTX_VALID(mm->context)) | ||
145 | get_new_mmu_context(mm); | ||
146 | cpu = smp_processor_id(); | ||
147 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) | ||
148 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
149 | |||
150 | load_secondary_context(mm); | ||
151 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); | ||
152 | tsb_context_switch(mm); | ||
153 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
154 | } | ||
155 | |||
156 | #endif /* !(__ASSEMBLY__) */ | 132 | #endif /* !(__ASSEMBLY__) */ |
157 | 133 | ||
158 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ | 134 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h index 266937030546..522b43db2ed3 100644 --- a/arch/sparc/include/asm/pil.h +++ b/arch/sparc/include/asm/pil.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #define PIL_SMP_CALL_FUNC 1 | 20 | #define PIL_SMP_CALL_FUNC 1 |
21 | #define PIL_SMP_RECEIVE_SIGNAL 2 | 21 | #define PIL_SMP_RECEIVE_SIGNAL 2 |
22 | #define PIL_SMP_CAPTURE 3 | 22 | #define PIL_SMP_CAPTURE 3 |
23 | #define PIL_SMP_CTX_NEW_VERSION 4 | ||
24 | #define PIL_DEVICE_IRQ 5 | 23 | #define PIL_DEVICE_IRQ 5 |
25 | #define PIL_SMP_CALL_FUNC_SNGL 6 | 24 | #define PIL_SMP_CALL_FUNC_SNGL 6 |
26 | #define PIL_DEFERRED_PCR_WORK 7 | 25 | #define PIL_DEFERRED_PCR_WORK 7 |
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 8174f6cdbbbb..9dca7a892978 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h | |||
@@ -327,6 +327,7 @@ struct vio_dev { | |||
327 | int compat_len; | 327 | int compat_len; |
328 | 328 | ||
329 | u64 dev_no; | 329 | u64 dev_no; |
330 | u64 id; | ||
330 | 331 | ||
331 | unsigned long channel_id; | 332 | unsigned long channel_id; |
332 | 333 | ||
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index b542cc7c8d94..f87265afb175 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp) | |||
909 | pbuf.req.handle = cp->handle; | 909 | pbuf.req.handle = cp->handle; |
910 | pbuf.req.major = 1; | 910 | pbuf.req.major = 1; |
911 | pbuf.req.minor = 0; | 911 | pbuf.req.minor = 0; |
912 | strcpy(pbuf.req.svc_id, cp->service_id); | 912 | strcpy(pbuf.id_buf, cp->service_id); |
913 | 913 | ||
914 | err = __ds_send(lp, &pbuf, msg_len); | 914 | err = __ds_send(lp, &pbuf, msg_len); |
915 | if (err > 0) | 915 | if (err > 0) |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 4d0248aa0928..99dd133a029f 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) | |||
1034 | { | 1034 | { |
1035 | #ifdef CONFIG_SMP | 1035 | #ifdef CONFIG_SMP |
1036 | unsigned long page; | 1036 | unsigned long page; |
1037 | void *mondo, *p; | ||
1037 | 1038 | ||
1038 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | 1039 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); |
1040 | |||
1041 | /* Make sure mondo block is 64byte aligned */ | ||
1042 | p = kzalloc(127, GFP_KERNEL); | ||
1043 | if (!p) { | ||
1044 | prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); | ||
1045 | prom_halt(); | ||
1046 | } | ||
1047 | mondo = (void *)(((unsigned long)p + 63) & ~0x3f); | ||
1048 | tb->cpu_mondo_block_pa = __pa(mondo); | ||
1039 | 1049 | ||
1040 | page = get_zeroed_page(GFP_KERNEL); | 1050 | page = get_zeroed_page(GFP_KERNEL); |
1041 | if (!page) { | 1051 | if (!page) { |
1042 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | 1052 | prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); |
1043 | prom_halt(); | 1053 | prom_halt(); |
1044 | } | 1054 | } |
1045 | 1055 | ||
1046 | tb->cpu_mondo_block_pa = __pa(page); | 1056 | tb->cpu_list_pa = __pa(page); |
1047 | tb->cpu_list_pa = __pa(page + 64); | ||
1048 | #endif | 1057 | #endif |
1049 | } | 1058 | } |
1050 | 1059 | ||
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index c9804551262c..6ae1e77be0bf 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h | |||
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
37 | /* smp_64.c */ | 37 | /* smp_64.c */ |
38 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); | 38 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); |
39 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); | 39 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); |
40 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs); | ||
41 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); | 40 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); |
42 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); | 41 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); |
43 | 42 | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b3bc0ac757cc..fdf31040a7dc 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
964 | preempt_enable(); | 964 | preempt_enable(); |
965 | } | 965 | } |
966 | 966 | ||
967 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | ||
968 | { | ||
969 | struct mm_struct *mm; | ||
970 | unsigned long flags; | ||
971 | |||
972 | clear_softint(1 << irq); | ||
973 | |||
974 | /* See if we need to allocate a new TLB context because | ||
975 | * the version of the one we are using is now out of date. | ||
976 | */ | ||
977 | mm = current->active_mm; | ||
978 | if (unlikely(!mm || (mm == &init_mm))) | ||
979 | return; | ||
980 | |||
981 | spin_lock_irqsave(&mm->context.lock, flags); | ||
982 | |||
983 | if (unlikely(!CTX_VALID(mm->context))) | ||
984 | get_new_mmu_context(mm); | ||
985 | |||
986 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
987 | |||
988 | load_secondary_context(mm); | ||
989 | __flush_tlb_mm(CTX_HWBITS(mm->context), | ||
990 | SECONDARY_CONTEXT); | ||
991 | } | ||
992 | |||
993 | void smp_new_mmu_context_version(void) | ||
994 | { | ||
995 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); | ||
996 | } | ||
997 | |||
998 | #ifdef CONFIG_KGDB | 967 | #ifdef CONFIG_KGDB |
999 | void kgdb_roundup_cpus(unsigned long flags) | 968 | void kgdb_roundup_cpus(unsigned long flags) |
1000 | { | 969 | { |
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 10689cfd0ad4..07c0df924960 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S | |||
@@ -455,13 +455,16 @@ __tsb_context_switch: | |||
455 | .type copy_tsb,#function | 455 | .type copy_tsb,#function |
456 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | 456 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size |
457 | * %o2=new_tsb_base, %o3=new_tsb_size | 457 | * %o2=new_tsb_base, %o3=new_tsb_size |
458 | * %o4=page_size_shift | ||
458 | */ | 459 | */ |
459 | sethi %uhi(TSB_PASS_BITS), %g7 | 460 | sethi %uhi(TSB_PASS_BITS), %g7 |
460 | srlx %o3, 4, %o3 | 461 | srlx %o3, 4, %o3 |
461 | add %o0, %o1, %g1 /* end of old tsb */ | 462 | add %o0, %o1, %o1 /* end of old tsb */ |
462 | sllx %g7, 32, %g7 | 463 | sllx %g7, 32, %g7 |
463 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ | 464 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ |
464 | 465 | ||
466 | mov %o4, %g1 /* page_size_shift */ | ||
467 | |||
465 | 661: prefetcha [%o0] ASI_N, #one_read | 468 | 661: prefetcha [%o0] ASI_N, #one_read |
466 | .section .tsb_phys_patch, "ax" | 469 | .section .tsb_phys_patch, "ax" |
467 | .word 661b | 470 | .word 661b |
@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | |||
486 | /* This can definitely be computed faster... */ | 489 | /* This can definitely be computed faster... */ |
487 | srlx %o0, 4, %o5 /* Build index */ | 490 | srlx %o0, 4, %o5 /* Build index */ |
488 | and %o5, 511, %o5 /* Mask index */ | 491 | and %o5, 511, %o5 /* Mask index */ |
489 | sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ | 492 | sllx %o5, %g1, %o5 /* Put into vaddr position */ |
490 | or %o4, %o5, %o4 /* Full VADDR. */ | 493 | or %o4, %o5, %o4 /* Full VADDR. */ |
491 | srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ | 494 | srlx %o4, %g1, %o4 /* Shift down to create index */ |
492 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ | 495 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ |
493 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ | 496 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ |
494 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ | 497 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ |
@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | |||
496 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ | 499 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ |
497 | 500 | ||
498 | 80: add %o0, 16, %o0 | 501 | 80: add %o0, 16, %o0 |
499 | cmp %o0, %g1 | 502 | cmp %o0, %o1 |
500 | bne,pt %xcc, 90b | 503 | bne,pt %xcc, 90b |
501 | nop | 504 | nop |
502 | 505 | ||
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S index 7bd8f6556352..efe93ab4a9c0 100644 --- a/arch/sparc/kernel/ttable_64.S +++ b/arch/sparc/kernel/ttable_64.S | |||
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) | |||
50 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) | 50 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) |
51 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) | 51 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) |
52 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) | 52 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) |
53 | tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) | 53 | tl0_irq4: BTRAP(0x44) |
54 | #else | 54 | #else |
55 | tl0_irq1: BTRAP(0x41) | 55 | tl0_irq1: BTRAP(0x41) |
56 | tl0_irq2: BTRAP(0x42) | 56 | tl0_irq2: BTRAP(0x42) |
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index f6bb857254fc..075d38980dee 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c | |||
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, | |||
302 | if (!id) { | 302 | if (!id) { |
303 | dev_set_name(&vdev->dev, "%s", bus_id_name); | 303 | dev_set_name(&vdev->dev, "%s", bus_id_name); |
304 | vdev->dev_no = ~(u64)0; | 304 | vdev->dev_no = ~(u64)0; |
305 | vdev->id = ~(u64)0; | ||
305 | } else if (!cfg_handle) { | 306 | } else if (!cfg_handle) { |
306 | dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); | 307 | dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); |
307 | vdev->dev_no = *id; | 308 | vdev->dev_no = *id; |
309 | vdev->id = ~(u64)0; | ||
308 | } else { | 310 | } else { |
309 | dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, | 311 | dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, |
310 | *cfg_handle, *id); | 312 | *cfg_handle, *id); |
311 | vdev->dev_no = *cfg_handle; | 313 | vdev->dev_no = *cfg_handle; |
314 | vdev->id = *id; | ||
312 | } | 315 | } |
313 | 316 | ||
314 | vdev->dev.parent = parent; | 317 | vdev->dev.parent = parent; |
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node) | |||
351 | (void) vio_create_one(hp, node, &root_vdev->dev); | 354 | (void) vio_create_one(hp, node, &root_vdev->dev); |
352 | } | 355 | } |
353 | 356 | ||
357 | struct vio_md_node_query { | ||
358 | const char *type; | ||
359 | u64 dev_no; | ||
360 | u64 id; | ||
361 | }; | ||
362 | |||
354 | static int vio_md_node_match(struct device *dev, void *arg) | 363 | static int vio_md_node_match(struct device *dev, void *arg) |
355 | { | 364 | { |
365 | struct vio_md_node_query *query = (struct vio_md_node_query *) arg; | ||
356 | struct vio_dev *vdev = to_vio_dev(dev); | 366 | struct vio_dev *vdev = to_vio_dev(dev); |
357 | 367 | ||
358 | if (vdev->mp == (u64) arg) | 368 | if (vdev->dev_no != query->dev_no) |
359 | return 1; | 369 | return 0; |
370 | if (vdev->id != query->id) | ||
371 | return 0; | ||
372 | if (strcmp(vdev->type, query->type)) | ||
373 | return 0; | ||
360 | 374 | ||
361 | return 0; | 375 | return 1; |
362 | } | 376 | } |
363 | 377 | ||
364 | static void vio_remove(struct mdesc_handle *hp, u64 node) | 378 | static void vio_remove(struct mdesc_handle *hp, u64 node) |
365 | { | 379 | { |
380 | const char *type; | ||
381 | const u64 *id, *cfg_handle; | ||
382 | u64 a; | ||
383 | struct vio_md_node_query query; | ||
366 | struct device *dev; | 384 | struct device *dev; |
367 | 385 | ||
368 | dev = device_find_child(&root_vdev->dev, (void *) node, | 386 | type = mdesc_get_property(hp, node, "device-type", NULL); |
387 | if (!type) { | ||
388 | type = mdesc_get_property(hp, node, "name", NULL); | ||
389 | if (!type) | ||
390 | type = mdesc_node_name(hp, node); | ||
391 | } | ||
392 | |||
393 | query.type = type; | ||
394 | |||
395 | id = mdesc_get_property(hp, node, "id", NULL); | ||
396 | cfg_handle = NULL; | ||
397 | mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { | ||
398 | u64 target; | ||
399 | |||
400 | target = mdesc_arc_target(hp, a); | ||
401 | cfg_handle = mdesc_get_property(hp, target, | ||
402 | "cfg-handle", NULL); | ||
403 | if (cfg_handle) | ||
404 | break; | ||
405 | } | ||
406 | |||
407 | if (!id) { | ||
408 | query.dev_no = ~(u64)0; | ||
409 | query.id = ~(u64)0; | ||
410 | } else if (!cfg_handle) { | ||
411 | query.dev_no = *id; | ||
412 | query.id = ~(u64)0; | ||
413 | } else { | ||
414 | query.dev_no = *cfg_handle; | ||
415 | query.id = *id; | ||
416 | } | ||
417 | |||
418 | dev = device_find_child(&root_vdev->dev, &query, | ||
369 | vio_md_node_match); | 419 | vio_md_node_match); |
370 | if (dev) { | 420 | if (dev) { |
371 | printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); | 421 | printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); |
372 | 422 | ||
373 | device_unregister(dev); | 423 | device_unregister(dev); |
374 | put_device(dev); | 424 | put_device(dev); |
425 | } else { | ||
426 | if (!id) | ||
427 | printk(KERN_ERR "VIO: Removed unknown %s node.\n", | ||
428 | type); | ||
429 | else if (!cfg_handle) | ||
430 | printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n", | ||
431 | type, *id); | ||
432 | else | ||
433 | printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n", | ||
434 | type, *cfg_handle, *id); | ||
375 | } | 435 | } |
376 | } | 436 | } |
377 | 437 | ||
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 69912d2f8b54..07c03e72d812 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile | |||
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o | |||
15 | lib-$(CONFIG_SPARC64) += atomic_64.o | 15 | lib-$(CONFIG_SPARC64) += atomic_64.o |
16 | lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o | 16 | lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o |
17 | lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o | 17 | lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o |
18 | lib-$(CONFIG_SPARC64) += multi3.o | ||
18 | 19 | ||
19 | lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o | 20 | lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o |
20 | lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o | 21 | lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o |
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S new file mode 100644 index 000000000000..d6b6c97fe3c7 --- /dev/null +++ b/arch/sparc/lib/multi3.S | |||
@@ -0,0 +1,35 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <asm/export.h> | ||
3 | |||
4 | .text | ||
5 | .align 4 | ||
6 | ENTRY(__multi3) /* %o0 = u, %o1 = v */ | ||
7 | mov %o1, %g1 | ||
8 | srl %o3, 0, %g4 | ||
9 | mulx %g4, %g1, %o1 | ||
10 | srlx %g1, 0x20, %g3 | ||
11 | mulx %g3, %g4, %g5 | ||
12 | sllx %g5, 0x20, %o5 | ||
13 | srl %g1, 0, %g4 | ||
14 | sub %o1, %o5, %o5 | ||
15 | srlx %o5, 0x20, %o5 | ||
16 | addcc %g5, %o5, %g5 | ||
17 | srlx %o3, 0x20, %o5 | ||
18 | mulx %g4, %o5, %g4 | ||
19 | mulx %g3, %o5, %o5 | ||
20 | sethi %hi(0x80000000), %g3 | ||
21 | addcc %g5, %g4, %g5 | ||
22 | srlx %g5, 0x20, %g5 | ||
23 | add %g3, %g3, %g3 | ||
24 | movcc %xcc, %g0, %g3 | ||
25 | addcc %o5, %g5, %o5 | ||
26 | sllx %g4, 0x20, %g4 | ||
27 | add %o1, %g4, %o1 | ||
28 | add %o5, %g3, %g2 | ||
29 | mulx %g1, %o2, %g1 | ||
30 | add %g1, %g2, %g1 | ||
31 | mulx %o0, %o3, %o0 | ||
32 | retl | ||
33 | add %g1, %o0, %o0 | ||
34 | ENDPROC(__multi3) | ||
35 | EXPORT_SYMBOL(__multi3) | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 0cda653ae007..3c40ebd50f92 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { | 360 | if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { |
361 | pr_warn("hugepagesz=%llu not supported by MMU.\n", | 361 | hugetlb_bad_size(); |
362 | pr_err("hugepagesz=%llu not supported by MMU.\n", | ||
362 | hugepage_size); | 363 | hugepage_size); |
363 | goto out; | 364 | goto out; |
364 | } | 365 | } |
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range); | |||
706 | 707 | ||
707 | /* get_new_mmu_context() uses "cache + 1". */ | 708 | /* get_new_mmu_context() uses "cache + 1". */ |
708 | DEFINE_SPINLOCK(ctx_alloc_lock); | 709 | DEFINE_SPINLOCK(ctx_alloc_lock); |
709 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | 710 | unsigned long tlb_context_cache = CTX_FIRST_VERSION; |
710 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) | 711 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) |
711 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) | 712 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) |
712 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); | 713 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); |
714 | DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; | ||
715 | |||
716 | static void mmu_context_wrap(void) | ||
717 | { | ||
718 | unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; | ||
719 | unsigned long new_ver, new_ctx, old_ctx; | ||
720 | struct mm_struct *mm; | ||
721 | int cpu; | ||
722 | |||
723 | bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); | ||
724 | |||
725 | /* Reserve kernel context */ | ||
726 | set_bit(0, mmu_context_bmap); | ||
727 | |||
728 | new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; | ||
729 | if (unlikely(new_ver == 0)) | ||
730 | new_ver = CTX_FIRST_VERSION; | ||
731 | tlb_context_cache = new_ver; | ||
732 | |||
733 | /* | ||
734 | * Make sure that any new mm that are added into per_cpu_secondary_mm, | ||
735 | * are going to go through get_new_mmu_context() path. | ||
736 | */ | ||
737 | mb(); | ||
738 | |||
739 | /* | ||
740 | * Updated versions to current on those CPUs that had valid secondary | ||
741 | * contexts | ||
742 | */ | ||
743 | for_each_online_cpu(cpu) { | ||
744 | /* | ||
745 | * If a new mm is stored after we took this mm from the array, | ||
746 | * it will go into get_new_mmu_context() path, because we | ||
747 | * already bumped the version in tlb_context_cache. | ||
748 | */ | ||
749 | mm = per_cpu(per_cpu_secondary_mm, cpu); | ||
750 | |||
751 | if (unlikely(!mm || mm == &init_mm)) | ||
752 | continue; | ||
753 | |||
754 | old_ctx = mm->context.sparc64_ctx_val; | ||
755 | if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { | ||
756 | new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; | ||
757 | set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); | ||
758 | mm->context.sparc64_ctx_val = new_ctx; | ||
759 | } | ||
760 | } | ||
761 | } | ||
713 | 762 | ||
714 | /* Caller does TLB context flushing on local CPU if necessary. | 763 | /* Caller does TLB context flushing on local CPU if necessary. |
715 | * The caller also ensures that CTX_VALID(mm->context) is false. | 764 | * The caller also ensures that CTX_VALID(mm->context) is false. |
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm) | |||
725 | { | 774 | { |
726 | unsigned long ctx, new_ctx; | 775 | unsigned long ctx, new_ctx; |
727 | unsigned long orig_pgsz_bits; | 776 | unsigned long orig_pgsz_bits; |
728 | int new_version; | ||
729 | 777 | ||
730 | spin_lock(&ctx_alloc_lock); | 778 | spin_lock(&ctx_alloc_lock); |
779 | retry: | ||
780 | /* wrap might have happened, test again if our context became valid */ | ||
781 | if (unlikely(CTX_VALID(mm->context))) | ||
782 | goto out; | ||
731 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | 783 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); |
732 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | 784 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; |
733 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | 785 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); |
734 | new_version = 0; | ||
735 | if (new_ctx >= (1 << CTX_NR_BITS)) { | 786 | if (new_ctx >= (1 << CTX_NR_BITS)) { |
736 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | 787 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); |
737 | if (new_ctx >= ctx) { | 788 | if (new_ctx >= ctx) { |
738 | int i; | 789 | mmu_context_wrap(); |
739 | new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + | 790 | goto retry; |
740 | CTX_FIRST_VERSION; | ||
741 | if (new_ctx == 1) | ||
742 | new_ctx = CTX_FIRST_VERSION; | ||
743 | |||
744 | /* Don't call memset, for 16 entries that's just | ||
745 | * plain silly... | ||
746 | */ | ||
747 | mmu_context_bmap[0] = 3; | ||
748 | mmu_context_bmap[1] = 0; | ||
749 | mmu_context_bmap[2] = 0; | ||
750 | mmu_context_bmap[3] = 0; | ||
751 | for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { | ||
752 | mmu_context_bmap[i + 0] = 0; | ||
753 | mmu_context_bmap[i + 1] = 0; | ||
754 | mmu_context_bmap[i + 2] = 0; | ||
755 | mmu_context_bmap[i + 3] = 0; | ||
756 | } | ||
757 | new_version = 1; | ||
758 | goto out; | ||
759 | } | 791 | } |
760 | } | 792 | } |
793 | if (mm->context.sparc64_ctx_val) | ||
794 | cpumask_clear(mm_cpumask(mm)); | ||
761 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); | 795 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); |
762 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); | 796 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); |
763 | out: | ||
764 | tlb_context_cache = new_ctx; | 797 | tlb_context_cache = new_ctx; |
765 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | 798 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; |
799 | out: | ||
766 | spin_unlock(&ctx_alloc_lock); | 800 | spin_unlock(&ctx_alloc_lock); |
767 | |||
768 | if (unlikely(new_version)) | ||
769 | smp_new_mmu_context_version(); | ||
770 | } | 801 | } |
771 | 802 | ||
772 | static int numa_enabled = 1; | 803 | static int numa_enabled = 1; |
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index bedf08b22a47..0d4b998c7d7b 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -496,7 +496,8 @@ retry_tsb_alloc: | |||
496 | extern void copy_tsb(unsigned long old_tsb_base, | 496 | extern void copy_tsb(unsigned long old_tsb_base, |
497 | unsigned long old_tsb_size, | 497 | unsigned long old_tsb_size, |
498 | unsigned long new_tsb_base, | 498 | unsigned long new_tsb_base, |
499 | unsigned long new_tsb_size); | 499 | unsigned long new_tsb_size, |
500 | unsigned long page_size_shift); | ||
500 | unsigned long old_tsb_base = (unsigned long) old_tsb; | 501 | unsigned long old_tsb_base = (unsigned long) old_tsb; |
501 | unsigned long new_tsb_base = (unsigned long) new_tsb; | 502 | unsigned long new_tsb_base = (unsigned long) new_tsb; |
502 | 503 | ||
@@ -504,7 +505,9 @@ retry_tsb_alloc: | |||
504 | old_tsb_base = __pa(old_tsb_base); | 505 | old_tsb_base = __pa(old_tsb_base); |
505 | new_tsb_base = __pa(new_tsb_base); | 506 | new_tsb_base = __pa(new_tsb_base); |
506 | } | 507 | } |
507 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); | 508 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, |
509 | tsb_index == MM_TSB_BASE ? | ||
510 | PAGE_SHIFT : REAL_HPAGE_SHIFT); | ||
508 | } | 511 | } |
509 | 512 | ||
510 | mm->context.tsb_block[tsb_index].tsb = new_tsb; | 513 | mm->context.tsb_block[tsb_index].tsb = new_tsb; |
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 5d2fd6cd3189..fcf4d27a38fb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S | |||
@@ -971,11 +971,6 @@ xcall_capture: | |||
971 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint | 971 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint |
972 | retry | 972 | retry |
973 | 973 | ||
974 | .globl xcall_new_mmu_context_version | ||
975 | xcall_new_mmu_context_version: | ||
976 | wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint | ||
977 | retry | ||
978 | |||
979 | #ifdef CONFIG_KGDB | 974 | #ifdef CONFIG_KGDB |
980 | .globl xcall_kgdb_capture | 975 | .globl xcall_kgdb_capture |
981 | xcall_kgdb_capture: | 976 | xcall_kgdb_capture: |