diff options
Diffstat (limited to 'drivers/misc/cxl/fault.c')
-rw-r--r-- | drivers/misc/cxl/fault.c | 129 |
1 files changed, 95 insertions, 34 deletions
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 25a5418c55cb..81c3f75b7330 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c | |||
@@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, | |||
166 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | 166 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
167 | } | 167 | } |
168 | 168 | ||
169 | /* | ||
170 | * Returns the mm_struct corresponding to the context ctx via ctx->pid | ||
171 | * In case the task has exited we use the task group leader accessible | ||
172 | * via ctx->glpid to find the next task in the thread group that has a | ||
173 | * valid mm_struct associated with it. If a task with valid mm_struct | ||
174 | * is found the ctx->pid is updated to use the task struct for subsequent | ||
175 | * translations. In case no valid mm_struct is found in the task group to | ||
176 | * service the fault a NULL is returned. | ||
177 | */ | ||
178 | static struct mm_struct *get_mem_context(struct cxl_context *ctx) | ||
179 | { | ||
180 | struct task_struct *task = NULL; | ||
181 | struct mm_struct *mm = NULL; | ||
182 | struct pid *old_pid = ctx->pid; | ||
183 | |||
184 | if (old_pid == NULL) { | ||
185 | pr_warn("%s: Invalid context for pe=%d\n", | ||
186 | __func__, ctx->pe); | ||
187 | return NULL; | ||
188 | } | ||
189 | |||
190 | task = get_pid_task(old_pid, PIDTYPE_PID); | ||
191 | |||
192 | /* | ||
193 | * pid_alive may look racy but this saves us from costly | ||
194 | * get_task_mm when the task is a zombie. In worst case | ||
195 | * we may think a task is alive, which is about to die | ||
196 | * but get_task_mm will return NULL. | ||
197 | */ | ||
198 | if (task != NULL && pid_alive(task)) | ||
199 | mm = get_task_mm(task); | ||
200 | |||
201 | /* release the task struct that was taken earlier */ | ||
202 | if (task) | ||
203 | put_task_struct(task); | ||
204 | else | ||
205 | pr_devel("%s: Context owning pid=%i for pe=%i dead\n", | ||
206 | __func__, pid_nr(old_pid), ctx->pe); | ||
207 | |||
208 | /* | ||
209 | * If we couldn't find the mm context then use the group | ||
210 | * leader to iterate over the task group and find a task | ||
211 | * that gives us mm_struct. | ||
212 | */ | ||
213 | if (unlikely(mm == NULL && ctx->glpid != NULL)) { | ||
214 | |||
215 | rcu_read_lock(); | ||
216 | task = pid_task(ctx->glpid, PIDTYPE_PID); | ||
217 | if (task) | ||
218 | do { | ||
219 | mm = get_task_mm(task); | ||
220 | if (mm) { | ||
221 | ctx->pid = get_task_pid(task, | ||
222 | PIDTYPE_PID); | ||
223 | break; | ||
224 | } | ||
225 | task = next_thread(task); | ||
226 | } while (task && !thread_group_leader(task)); | ||
227 | rcu_read_unlock(); | ||
228 | |||
229 | /* check if we switched pid */ | ||
230 | if (ctx->pid != old_pid) { | ||
231 | if (mm) | ||
232 | pr_devel("%s:pe=%i switch pid %i->%i\n", | ||
233 | __func__, ctx->pe, pid_nr(old_pid), | ||
234 | pid_nr(ctx->pid)); | ||
235 | else | ||
236 | pr_devel("%s:Cannot find mm for pid=%i\n", | ||
237 | __func__, pid_nr(old_pid)); | ||
238 | |||
239 | /* drop the reference to older pid */ | ||
240 | put_pid(old_pid); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | return mm; | ||
245 | } | ||
246 | |||
247 | |||
248 | |||
169 | void cxl_handle_fault(struct work_struct *fault_work) | 249 | void cxl_handle_fault(struct work_struct *fault_work) |
170 | { | 250 | { |
171 | struct cxl_context *ctx = | 251 | struct cxl_context *ctx = |
172 | container_of(fault_work, struct cxl_context, fault_work); | 252 | container_of(fault_work, struct cxl_context, fault_work); |
173 | u64 dsisr = ctx->dsisr; | 253 | u64 dsisr = ctx->dsisr; |
174 | u64 dar = ctx->dar; | 254 | u64 dar = ctx->dar; |
175 | struct task_struct *task = NULL; | ||
176 | struct mm_struct *mm = NULL; | 255 | struct mm_struct *mm = NULL; |
177 | 256 | ||
178 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || | 257 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || |
@@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct *fault_work) | |||
195 | "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); | 274 | "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); |
196 | 275 | ||
197 | if (!ctx->kernel) { | 276 | if (!ctx->kernel) { |
198 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | 277 | |
199 | pr_devel("cxl_handle_fault unable to get task %i\n", | 278 | mm = get_mem_context(ctx); |
200 | pid_nr(ctx->pid)); | 279 | /* indicates all the thread in task group have exited */ |
280 | if (mm == NULL) { | ||
281 | pr_devel("%s: unable to get mm for pe=%d pid=%i\n", | ||
282 | __func__, ctx->pe, pid_nr(ctx->pid)); | ||
201 | cxl_ack_ae(ctx); | 283 | cxl_ack_ae(ctx); |
202 | return; | 284 | return; |
203 | } | 285 | } else { |
204 | if (!(mm = get_task_mm(task))) { | 286 | pr_devel("Handling page fault for pe=%d pid=%i\n", |
205 | pr_devel("cxl_handle_fault unable to get mm %i\n", | 287 | ctx->pe, pid_nr(ctx->pid)); |
206 | pid_nr(ctx->pid)); | ||
207 | cxl_ack_ae(ctx); | ||
208 | goto out; | ||
209 | } | 288 | } |
210 | } | 289 | } |
211 | 290 | ||
@@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct *fault_work) | |||
218 | 297 | ||
219 | if (mm) | 298 | if (mm) |
220 | mmput(mm); | 299 | mmput(mm); |
221 | out: | ||
222 | if (task) | ||
223 | put_task_struct(task); | ||
224 | } | 300 | } |
225 | 301 | ||
226 | static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) | 302 | static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) |
227 | { | 303 | { |
228 | int rc; | ||
229 | struct task_struct *task; | ||
230 | struct mm_struct *mm; | 304 | struct mm_struct *mm; |
231 | 305 | ||
232 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | 306 | mm = get_mem_context(ctx); |
233 | pr_devel("cxl_prefault_one unable to get task %i\n", | 307 | if (mm == NULL) { |
234 | pid_nr(ctx->pid)); | ||
235 | return; | ||
236 | } | ||
237 | if (!(mm = get_task_mm(task))) { | ||
238 | pr_devel("cxl_prefault_one unable to get mm %i\n", | 308 | pr_devel("cxl_prefault_one unable to get mm %i\n", |
239 | pid_nr(ctx->pid)); | 309 | pid_nr(ctx->pid)); |
240 | put_task_struct(task); | ||
241 | return; | 310 | return; |
242 | } | 311 | } |
243 | 312 | ||
244 | rc = cxl_fault_segment(ctx, mm, ea); | 313 | cxl_fault_segment(ctx, mm, ea); |
245 | 314 | ||
246 | mmput(mm); | 315 | mmput(mm); |
247 | put_task_struct(task); | ||
248 | } | 316 | } |
249 | 317 | ||
250 | static u64 next_segment(u64 ea, u64 vsid) | 318 | static u64 next_segment(u64 ea, u64 vsid) |
@@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_context *ctx) | |||
263 | struct copro_slb slb; | 331 | struct copro_slb slb; |
264 | struct vm_area_struct *vma; | 332 | struct vm_area_struct *vma; |
265 | int rc; | 333 | int rc; |
266 | struct task_struct *task; | ||
267 | struct mm_struct *mm; | 334 | struct mm_struct *mm; |
268 | 335 | ||
269 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | 336 | mm = get_mem_context(ctx); |
270 | pr_devel("cxl_prefault_vma unable to get task %i\n", | 337 | if (mm == NULL) { |
271 | pid_nr(ctx->pid)); | ||
272 | return; | ||
273 | } | ||
274 | if (!(mm = get_task_mm(task))) { | ||
275 | pr_devel("cxl_prefault_vm unable to get mm %i\n", | 338 | pr_devel("cxl_prefault_vm unable to get mm %i\n", |
276 | pid_nr(ctx->pid)); | 339 | pid_nr(ctx->pid)); |
277 | goto out1; | 340 | return; |
278 | } | 341 | } |
279 | 342 | ||
280 | down_read(&mm->mmap_sem); | 343 | down_read(&mm->mmap_sem); |
@@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_context *ctx) | |||
295 | up_read(&mm->mmap_sem); | 358 | up_read(&mm->mmap_sem); |
296 | 359 | ||
297 | mmput(mm); | 360 | mmput(mm); |
298 | out1: | ||
299 | put_task_struct(task); | ||
300 | } | 361 | } |
301 | 362 | ||
302 | void cxl_prefault(struct cxl_context *ctx, u64 wed) | 363 | void cxl_prefault(struct cxl_context *ctx, u64 wed) |