diff options
Diffstat (limited to 'arch/powerpc/mm/mmu_context_hash64.c')
-rw-r--r-- | arch/powerpc/mm/mmu_context_hash64.c | 195 |
1 files changed, 1 insertions, 194 deletions
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index ca988a3d5fb2..40677aa0190e 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
@@ -24,200 +24,7 @@ | |||
24 | 24 | ||
25 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
26 | 26 | ||
27 | #ifdef CONFIG_PPC_ICSWX | 27 | #include "icswx.h" |
28 | /* | ||
29 | * The processor and its L2 cache cause the icswx instruction to | ||
30 | * generate a COP_REQ transaction on PowerBus. The transaction has | ||
31 | * no address, and the processor does not perform an MMU access | ||
32 | * to authenticate the transaction. The command portion of the | ||
33 | * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and | ||
34 | * the coprocessor Process ID (PID), which the coprocessor compares | ||
35 | * to the authorized LPID and PID held in the coprocessor, to determine | ||
36 | * if the process is authorized to generate the transaction. | ||
37 | * The data of the COP_REQ transaction is 128-byte or less and is | ||
38 | * placed in cacheable memory on a 128-byte cache line boundary. | ||
39 | * | ||
40 | * The task to use a coprocessor should use use_cop() to allocate | ||
41 | * a coprocessor PID before executing icswx instruction. use_cop() | ||
42 | * also enables the coprocessor context switching. Drop_cop() is | ||
43 | * used to free the coprocessor PID. | ||
44 | * | ||
45 | * Example: | ||
46 | * Host Fabric Interface (HFI) is a PowerPC network coprocessor. | ||
47 | * Each HFI have multiple windows. Each HFI window serves as a | ||
48 | * network device sending to and receiving from HFI network. | ||
49 | * HFI immediate send function uses icswx instruction. The immediate | ||
50 | * send function allows small (single cache-line) packets be sent | ||
51 | * without using the regular HFI send FIFO and doorbell, which are | ||
52 | * much slower than immediate send. | ||
53 | * | ||
54 | * For each task intending to use HFI immediate send, the HFI driver | ||
55 | * calls use_cop() to obtain a coprocessor PID for the task. | ||
56 | * The HFI driver then allocate a free HFI window and save the | ||
57 | * coprocessor PID to the HFI window to allow the task to use the | ||
58 | * HFI window. | ||
59 | * | ||
60 | * The HFI driver repeatedly creates immediate send packets and | ||
61 | * issues icswx instruction to send data through the HFI window. | ||
62 | * The HFI compares the coprocessor PID in the CPU PID register | ||
63 | * to the PID held in the HFI window to determine if the transaction | ||
64 | * is allowed. | ||
65 | * | ||
66 | * When the task to release the HFI window, the HFI driver calls | ||
67 | * drop_cop() to release the coprocessor PID. | ||
68 | */ | ||
69 | |||
70 | #define COP_PID_NONE 0 | ||
71 | #define COP_PID_MIN (COP_PID_NONE + 1) | ||
72 | #define COP_PID_MAX (0xFFFF) | ||
73 | |||
74 | static DEFINE_SPINLOCK(mmu_context_acop_lock); | ||
75 | static DEFINE_IDA(cop_ida); | ||
76 | |||
77 | void switch_cop(struct mm_struct *next) | ||
78 | { | ||
79 | mtspr(SPRN_PID, next->context.cop_pid); | ||
80 | mtspr(SPRN_ACOP, next->context.acop); | ||
81 | } | ||
82 | |||
83 | static int new_cop_pid(struct ida *ida, int min_id, int max_id, | ||
84 | spinlock_t *lock) | ||
85 | { | ||
86 | int index; | ||
87 | int err; | ||
88 | |||
89 | again: | ||
90 | if (!ida_pre_get(ida, GFP_KERNEL)) | ||
91 | return -ENOMEM; | ||
92 | |||
93 | spin_lock(lock); | ||
94 | err = ida_get_new_above(ida, min_id, &index); | ||
95 | spin_unlock(lock); | ||
96 | |||
97 | if (err == -EAGAIN) | ||
98 | goto again; | ||
99 | else if (err) | ||
100 | return err; | ||
101 | |||
102 | if (index > max_id) { | ||
103 | spin_lock(lock); | ||
104 | ida_remove(ida, index); | ||
105 | spin_unlock(lock); | ||
106 | return -ENOMEM; | ||
107 | } | ||
108 | |||
109 | return index; | ||
110 | } | ||
111 | |||
112 | static void sync_cop(void *arg) | ||
113 | { | ||
114 | struct mm_struct *mm = arg; | ||
115 | |||
116 | if (mm == current->active_mm) | ||
117 | switch_cop(current->active_mm); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * Start using a coprocessor. | ||
122 | * @acop: mask of coprocessor to be used. | ||
123 | * @mm: The mm the coprocessor to associate with. Most likely current mm. | ||
124 | * | ||
125 | * Return a positive PID if successful. Negative errno otherwise. | ||
126 | * The returned PID will be fed to the coprocessor to determine if an | ||
127 | * icswx transaction is authenticated. | ||
128 | */ | ||
129 | int use_cop(unsigned long acop, struct mm_struct *mm) | ||
130 | { | ||
131 | int ret; | ||
132 | |||
133 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
134 | return -ENODEV; | ||
135 | |||
136 | if (!mm || !acop) | ||
137 | return -EINVAL; | ||
138 | |||
139 | /* The page_table_lock ensures mm_users won't change under us */ | ||
140 | spin_lock(&mm->page_table_lock); | ||
141 | spin_lock(mm->context.cop_lockp); | ||
142 | |||
143 | if (mm->context.cop_pid == COP_PID_NONE) { | ||
144 | ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, | ||
145 | &mmu_context_acop_lock); | ||
146 | if (ret < 0) | ||
147 | goto out; | ||
148 | |||
149 | mm->context.cop_pid = ret; | ||
150 | } | ||
151 | mm->context.acop |= acop; | ||
152 | |||
153 | sync_cop(mm); | ||
154 | |||
155 | /* | ||
156 | * If this is a threaded process then there might be other threads | ||
157 | * running. We need to send an IPI to force them to pick up any | ||
158 | * change in PID and ACOP. | ||
159 | */ | ||
160 | if (atomic_read(&mm->mm_users) > 1) | ||
161 | smp_call_function(sync_cop, mm, 1); | ||
162 | |||
163 | ret = mm->context.cop_pid; | ||
164 | |||
165 | out: | ||
166 | spin_unlock(mm->context.cop_lockp); | ||
167 | spin_unlock(&mm->page_table_lock); | ||
168 | |||
169 | return ret; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(use_cop); | ||
172 | |||
173 | /** | ||
174 | * Stop using a coprocessor. | ||
175 | * @acop: mask of coprocessor to be stopped. | ||
176 | * @mm: The mm the coprocessor associated with. | ||
177 | */ | ||
178 | void drop_cop(unsigned long acop, struct mm_struct *mm) | ||
179 | { | ||
180 | int free_pid = COP_PID_NONE; | ||
181 | |||
182 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
183 | return; | ||
184 | |||
185 | if (WARN_ON_ONCE(!mm)) | ||
186 | return; | ||
187 | |||
188 | /* The page_table_lock ensures mm_users won't change under us */ | ||
189 | spin_lock(&mm->page_table_lock); | ||
190 | spin_lock(mm->context.cop_lockp); | ||
191 | |||
192 | mm->context.acop &= ~acop; | ||
193 | |||
194 | if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { | ||
195 | free_pid = mm->context.cop_pid; | ||
196 | mm->context.cop_pid = COP_PID_NONE; | ||
197 | } | ||
198 | |||
199 | sync_cop(mm); | ||
200 | |||
201 | /* | ||
202 | * If this is a threaded process then there might be other threads | ||
203 | * running. We need to send an IPI to force them to pick up any | ||
204 | * change in PID and ACOP. | ||
205 | */ | ||
206 | if (atomic_read(&mm->mm_users) > 1) | ||
207 | smp_call_function(sync_cop, mm, 1); | ||
208 | |||
209 | if (free_pid != COP_PID_NONE) { | ||
210 | spin_lock(&mmu_context_acop_lock); | ||
211 | ida_remove(&cop_ida, free_pid); | ||
212 | spin_unlock(&mmu_context_acop_lock); | ||
213 | } | ||
214 | |||
215 | spin_unlock(mm->context.cop_lockp); | ||
216 | spin_unlock(&mm->page_table_lock); | ||
217 | } | ||
218 | EXPORT_SYMBOL_GPL(drop_cop); | ||
219 | |||
220 | #endif /* CONFIG_PPC_ICSWX */ | ||
221 | 28 | ||
222 | static DEFINE_SPINLOCK(mmu_context_lock); | 29 | static DEFINE_SPINLOCK(mmu_context_lock); |
223 | static DEFINE_IDA(mmu_context_ida); | 30 | static DEFINE_IDA(mmu_context_ida); |