aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-05-13 10:21:49 -0400
committerWill Deacon <will.deacon@arm.com>2013-08-20 06:54:53 -0400
commit28256d612726a28a8b9d3c49f2b74198c4423d6a (patch)
treea64cd498ec1e9f4d34f77bcc827a541f19099bce /arch/arm/kernel
parent377747c40657eb35ad98a56439606d96a928425a (diff)
ARM: cacheflush: split user cache-flushing into interruptible chunks
Flushing a large, non-faulting VMA from userspace can potentially result in a long time spent flushing the cache line-by-line without preemption occurring (in the case of CONFIG_PREEMPT=n). Whilst this doesn't affect the stability of the system, it can certainly affect the responsiveness and CPU availability for other tasks. This patch splits up the user cacheflush code so that it flushes in chunks of a page. After each chunk has been flushed, we may reschedule if appropriate and, before processing the next chunk, we allow any pending signals to be handled before resuming from where we left off. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/traps.c65
1 files changed, 57 insertions, 8 deletions
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cab094c234ee..4d268d912b0e 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -499,6 +499,54 @@ static int bad_syscall(int n, struct pt_regs *regs)
499 return regs->ARM_r0; 499 return regs->ARM_r0;
500} 500}
501 501
502static long do_cache_op_restart(struct restart_block *);
503
504static inline int
505__do_cache_op(unsigned long start, unsigned long end)
506{
507 int ret;
508 unsigned long chunk = PAGE_SIZE;
509
510 do {
511 if (signal_pending(current)) {
512 struct thread_info *ti = current_thread_info();
513
514 ti->restart_block = (struct restart_block) {
515 .fn = do_cache_op_restart,
516 };
517
518 ti->arm_restart_block = (struct arm_restart_block) {
519 {
520 .cache = {
521 .start = start,
522 .end = end,
523 },
524 },
525 };
526
527 return -ERESTART_RESTARTBLOCK;
528 }
529
530 ret = flush_cache_user_range(start, start + chunk);
531 if (ret)
532 return ret;
533
534 cond_resched();
535 start += chunk;
536 } while (start < end);
537
538 return 0;
539}
540
541static long do_cache_op_restart(struct restart_block *unused)
542{
543 struct arm_restart_block *restart_block;
544
545 restart_block = &current_thread_info()->arm_restart_block;
546 return __do_cache_op(restart_block->cache.start,
547 restart_block->cache.end);
548}
549
502static inline int 550static inline int
503do_cache_op(unsigned long start, unsigned long end, int flags) 551do_cache_op(unsigned long start, unsigned long end, int flags)
504{ 552{
@@ -510,17 +558,18 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
510 558
511 down_read(&mm->mmap_sem); 559 down_read(&mm->mmap_sem);
512 vma = find_vma(mm, start); 560 vma = find_vma(mm, start);
513 if (vma && vma->vm_start < end) { 561 if (!vma || vma->vm_start >= end) {
514 if (start < vma->vm_start)
515 start = vma->vm_start;
516 if (end > vma->vm_end)
517 end = vma->vm_end;
518
519 up_read(&mm->mmap_sem); 562 up_read(&mm->mmap_sem);
520 return flush_cache_user_range(start, end); 563 return -EINVAL;
521 } 564 }
565
566 if (start < vma->vm_start)
567 start = vma->vm_start;
568 if (end > vma->vm_end)
569 end = vma->vm_end;
522 up_read(&mm->mmap_sem); 570 up_read(&mm->mmap_sem);
523 return -EINVAL; 571
572 return __do_cache_op(start, end);
524} 573}
525 574
526/* 575/*