diff options
author | Atish Patra <atish.patra@wdc.com> | 2018-10-02 15:15:05 -0400 |
---|---|---|
committer | Palmer Dabbelt <palmer@sifive.com> | 2018-10-22 20:03:37 -0400 |
commit | f99fb607fb2bc0d4ce6b9adb764c65e37f40a92b (patch) | |
tree | 082100e81815f6c2fa5d152821d24351aa16a871 /arch/riscv/kernel | |
parent | 6825c7a80f1863b975a00042abe140ea24813af2 (diff) |
RISC-V: Use Linux logical CPU number instead of hartid
Setup the cpu_logical_map during boot. Moreover, every SBI call
and PLIC context are based on the physical hartid. Use the logical
CPU to hartid mapping to pass correct hartid to respective functions.
Signed-off-by: Atish Patra <atish.patra@wdc.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r-- | arch/riscv/kernel/cpu.c | 8 | ||||
-rw-r--r-- | arch/riscv/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/riscv/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/riscv/kernel/smp.c | 24 | ||||
-rw-r--r-- | arch/riscv/kernel/smpboot.c | 25 |
5 files changed, 45 insertions, 22 deletions
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 4723e235dcaa..cccc6f61c538 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | #include <asm/smp.h> | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * Returns the hart ID of the given device tree node, or -1 if the device tree | 20 | * Returns the hart ID of the given device tree node, or -1 if the device tree |
@@ -138,11 +139,12 @@ static void c_stop(struct seq_file *m, void *v) | |||
138 | 139 | ||
139 | static int c_show(struct seq_file *m, void *v) | 140 | static int c_show(struct seq_file *m, void *v) |
140 | { | 141 | { |
141 | unsigned long hart_id = (unsigned long)v - 1; | 142 | unsigned long cpu_id = (unsigned long)v - 1; |
142 | struct device_node *node = of_get_cpu_node(hart_id, NULL); | 143 | struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id), |
144 | NULL); | ||
143 | const char *compat, *isa, *mmu; | 145 | const char *compat, *isa, *mmu; |
144 | 146 | ||
145 | seq_printf(m, "hart\t: %lu\n", hart_id); | 147 | seq_printf(m, "hart\t: %lu\n", cpu_id); |
146 | if (!of_property_read_string(node, "riscv,isa", &isa)) | 148 | if (!of_property_read_string(node, "riscv,isa", &isa)) |
147 | print_isa(m, isa); | 149 | print_isa(m, isa); |
148 | if (!of_property_read_string(node, "mmu-type", &mmu)) | 150 | if (!of_property_read_string(node, "mmu-type", &mmu)) |
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index c4d2c63f9a29..711190d473d4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S | |||
@@ -47,6 +47,8 @@ ENTRY(_start) | |||
47 | /* Save hart ID and DTB physical address */ | 47 | /* Save hart ID and DTB physical address */ |
48 | mv s0, a0 | 48 | mv s0, a0 |
49 | mv s1, a1 | 49 | mv s1, a1 |
50 | la a2, boot_cpu_hartid | ||
51 | REG_S a0, (a2) | ||
50 | 52 | ||
51 | /* Initialize page tables and relocate to virtual addresses */ | 53 | /* Initialize page tables and relocate to virtual addresses */ |
52 | la sp, init_thread_union + THREAD_SIZE | 54 | la sp, init_thread_union + THREAD_SIZE |
@@ -55,7 +57,7 @@ ENTRY(_start) | |||
55 | 57 | ||
56 | /* Restore C environment */ | 58 | /* Restore C environment */ |
57 | la tp, init_task | 59 | la tp, init_task |
58 | sw s0, TASK_TI_CPU(tp) | 60 | sw zero, TASK_TI_CPU(tp) |
59 | 61 | ||
60 | la sp, init_thread_union | 62 | la sp, init_thread_union |
61 | li a0, ASM_THREAD_SIZE | 63 | li a0, ASM_THREAD_SIZE |
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index d5d8611066d5..5e9e6f934cc0 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
@@ -81,11 +81,17 @@ EXPORT_SYMBOL(empty_zero_page); | |||
81 | 81 | ||
82 | /* The lucky hart to first increment this variable will boot the other cores */ | 82 | /* The lucky hart to first increment this variable will boot the other cores */ |
83 | atomic_t hart_lottery; | 83 | atomic_t hart_lottery; |
84 | unsigned long boot_cpu_hartid; | ||
84 | 85 | ||
85 | unsigned long __cpuid_to_hartid_map[NR_CPUS] = { | 86 | unsigned long __cpuid_to_hartid_map[NR_CPUS] = { |
86 | [0 ... NR_CPUS-1] = INVALID_HARTID | 87 | [0 ... NR_CPUS-1] = INVALID_HARTID |
87 | }; | 88 | }; |
88 | 89 | ||
90 | void __init smp_setup_processor_id(void) | ||
91 | { | ||
92 | cpuid_to_hartid_map(0) = boot_cpu_hartid; | ||
93 | } | ||
94 | |||
89 | #ifdef CONFIG_BLK_DEV_INITRD | 95 | #ifdef CONFIG_BLK_DEV_INITRD |
90 | static void __init setup_initrd(void) | 96 | static void __init setup_initrd(void) |
91 | { | 97 | { |
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 0bd48935f886..4eac0094f47e 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c | |||
@@ -97,14 +97,18 @@ void riscv_software_interrupt(void) | |||
97 | static void | 97 | static void |
98 | send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) | 98 | send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) |
99 | { | 99 | { |
100 | int i; | 100 | int cpuid, hartid; |
101 | struct cpumask hartid_mask; | ||
101 | 102 | ||
103 | cpumask_clear(&hartid_mask); | ||
102 | mb(); | 104 | mb(); |
103 | for_each_cpu(i, to_whom) | 105 | for_each_cpu(cpuid, to_whom) { |
104 | set_bit(operation, &ipi_data[i].bits); | 106 | set_bit(operation, &ipi_data[cpuid].bits); |
105 | 107 | hartid = cpuid_to_hartid_map(cpuid); | |
108 | cpumask_set_cpu(hartid, &hartid_mask); | ||
109 | } | ||
106 | mb(); | 110 | mb(); |
107 | sbi_send_ipi(cpumask_bits(to_whom)); | 111 | sbi_send_ipi(cpumask_bits(&hartid_mask)); |
108 | } | 112 | } |
109 | 113 | ||
110 | void arch_send_call_function_ipi_mask(struct cpumask *mask) | 114 | void arch_send_call_function_ipi_mask(struct cpumask *mask) |
@@ -146,7 +150,7 @@ void smp_send_reschedule(int cpu) | |||
146 | void flush_icache_mm(struct mm_struct *mm, bool local) | 150 | void flush_icache_mm(struct mm_struct *mm, bool local) |
147 | { | 151 | { |
148 | unsigned int cpu; | 152 | unsigned int cpu; |
149 | cpumask_t others, *mask; | 153 | cpumask_t others, hmask, *mask; |
150 | 154 | ||
151 | preempt_disable(); | 155 | preempt_disable(); |
152 | 156 | ||
@@ -164,9 +168,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local) | |||
164 | */ | 168 | */ |
165 | cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); | 169 | cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); |
166 | local |= cpumask_empty(&others); | 170 | local |= cpumask_empty(&others); |
167 | if (mm != current->active_mm || !local) | 171 | if (mm != current->active_mm || !local) { |
168 | sbi_remote_fence_i(others.bits); | 172 | cpumask_clear(&hmask); |
169 | else { | 173 | riscv_cpuid_to_hartid_mask(&others, &hmask); |
174 | sbi_remote_fence_i(hmask.bits); | ||
175 | } else { | ||
170 | /* | 176 | /* |
171 | * It's assumed that at least one strongly ordered operation is | 177 | * It's assumed that at least one strongly ordered operation is |
172 | * performed on this hart between setting a hart's cpumask bit | 178 | * performed on this hart between setting a hart's cpumask bit |
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 1e478615017c..18cda0e8cf94 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c | |||
@@ -53,17 +53,23 @@ void __init setup_smp(void) | |||
53 | struct device_node *dn = NULL; | 53 | struct device_node *dn = NULL; |
54 | int hart; | 54 | int hart; |
55 | bool found_boot_cpu = false; | 55 | bool found_boot_cpu = false; |
56 | int cpuid = 1; | ||
56 | 57 | ||
57 | while ((dn = of_find_node_by_type(dn, "cpu"))) { | 58 | while ((dn = of_find_node_by_type(dn, "cpu"))) { |
58 | hart = riscv_of_processor_hartid(dn); | 59 | hart = riscv_of_processor_hartid(dn); |
59 | if (hart >= 0) { | 60 | if (hart < 0) |
60 | set_cpu_possible(hart, true); | 61 | continue; |
61 | set_cpu_present(hart, true); | 62 | |
62 | if (hart == smp_processor_id()) { | 63 | if (hart == cpuid_to_hartid_map(0)) { |
63 | BUG_ON(found_boot_cpu); | 64 | BUG_ON(found_boot_cpu); |
64 | found_boot_cpu = true; | 65 | found_boot_cpu = 1; |
65 | } | 66 | continue; |
66 | } | 67 | } |
68 | |||
69 | cpuid_to_hartid_map(cpuid) = hart; | ||
70 | set_cpu_possible(cpuid, true); | ||
71 | set_cpu_present(cpuid, true); | ||
72 | cpuid++; | ||
67 | } | 73 | } |
68 | 74 | ||
69 | BUG_ON(!found_boot_cpu); | 75 | BUG_ON(!found_boot_cpu); |
@@ -71,6 +77,7 @@ void __init setup_smp(void) | |||
71 | 77 | ||
72 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) | 78 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
73 | { | 79 | { |
80 | int hartid = cpuid_to_hartid_map(cpu); | ||
74 | tidle->thread_info.cpu = cpu; | 81 | tidle->thread_info.cpu = cpu; |
75 | 82 | ||
76 | /* | 83 | /* |
@@ -81,9 +88,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
81 | * the spinning harts that they can continue the boot process. | 88 | * the spinning harts that they can continue the boot process. |
82 | */ | 89 | */ |
83 | smp_mb(); | 90 | smp_mb(); |
84 | WRITE_ONCE(__cpu_up_stack_pointer[cpu], | 91 | WRITE_ONCE(__cpu_up_stack_pointer[hartid], |
85 | task_stack_page(tidle) + THREAD_SIZE); | 92 | task_stack_page(tidle) + THREAD_SIZE); |
86 | WRITE_ONCE(__cpu_up_task_pointer[cpu], tidle); | 93 | WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle); |
87 | 94 | ||
88 | while (!cpu_online(cpu)) | 95 | while (!cpu_online(cpu)) |
89 | cpu_relax(); | 96 | cpu_relax(); |