diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-03-25 13:47:41 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-03-27 07:36:30 -0400 |
commit | f56f821feb7b36223f309e0ec05986bb137ce418 (patch) | |
tree | 5d6ae677300d4ea171bad39ba8f7aa65cf24c6d9 | |
parent | d174bd6472d79fb5603dc8bd35e5184d83194ea8 (diff) |
mm: extend prefault helpers to fault in more than PAGE_SIZE
drm/i915 wants to read/write more than one page in its fastpath
and hence needs to prefault more than PAGE_SIZE bytes.
Add new functions in filemap.h to make that possible.
Also kill a copy&pasted spurious space in both functions while at it.
v2: As suggested by Andrew Morton, add a multipage parameter to both
functions to avoid the additional branch for the pagemap.c hotpath.
My gcc 4.6 here seems to dtrt and indeed reap these branches where not
needed.
v3: Becaus I couldn't find a way around adding a uaddr += PAGE_SIZE to
the filemap.c hotpaths (that the compiler couldn't remove again),
let's go with separate new functions for the multipage use-case.
v4: Adjust comment to CodingStlye and fix spelling.
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 | ||||
-rw-r--r-- | include/linux/pagemap.h | 64 |
3 files changed, 66 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e9cac478cced..6dc832902f53 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -416,7 +416,7 @@ i915_gem_shmem_pread(struct drm_device *dev, | |||
416 | mutex_unlock(&dev->struct_mutex); | 416 | mutex_unlock(&dev->struct_mutex); |
417 | 417 | ||
418 | if (!prefaulted) { | 418 | if (!prefaulted) { |
419 | ret = fault_in_pages_writeable(user_data, remain); | 419 | ret = fault_in_multipages_writeable(user_data, remain); |
420 | /* Userspace is tricking us, but we've already clobbered | 420 | /* Userspace is tricking us, but we've already clobbered |
421 | * its pages with the prefault and promised to write the | 421 | * its pages with the prefault and promised to write the |
422 | * data up to the first fault. Hence ignore any errors | 422 | * data up to the first fault. Hence ignore any errors |
@@ -809,8 +809,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
809 | args->size)) | 809 | args->size)) |
810 | return -EFAULT; | 810 | return -EFAULT; |
811 | 811 | ||
812 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | 812 | ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, |
813 | args->size); | 813 | args->size); |
814 | if (ret) | 814 | if (ret) |
815 | return -EFAULT; | 815 | return -EFAULT; |
816 | 816 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index eb85860001ec..8e0b686d3afb 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -997,7 +997,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
997 | if (!access_ok(VERIFY_WRITE, ptr, length)) | 997 | if (!access_ok(VERIFY_WRITE, ptr, length)) |
998 | return -EFAULT; | 998 | return -EFAULT; |
999 | 999 | ||
1000 | if (fault_in_pages_readable(ptr, length)) | 1000 | if (fault_in_multipages_readable(ptr, length)) |
1001 | return -EFAULT; | 1001 | return -EFAULT; |
1002 | } | 1002 | } |
1003 | 1003 | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index cfaaa6949b8b..c93a9a9bcd35 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -426,7 +426,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |||
426 | */ | 426 | */ |
427 | if (((unsigned long)uaddr & PAGE_MASK) != | 427 | if (((unsigned long)uaddr & PAGE_MASK) != |
428 | ((unsigned long)end & PAGE_MASK)) | 428 | ((unsigned long)end & PAGE_MASK)) |
429 | ret = __put_user(0, end); | 429 | ret = __put_user(0, end); |
430 | } | 430 | } |
431 | return ret; | 431 | return ret; |
432 | } | 432 | } |
@@ -445,13 +445,73 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) | |||
445 | 445 | ||
446 | if (((unsigned long)uaddr & PAGE_MASK) != | 446 | if (((unsigned long)uaddr & PAGE_MASK) != |
447 | ((unsigned long)end & PAGE_MASK)) { | 447 | ((unsigned long)end & PAGE_MASK)) { |
448 | ret = __get_user(c, end); | 448 | ret = __get_user(c, end); |
449 | (void)c; | 449 | (void)c; |
450 | } | 450 | } |
451 | } | 451 | } |
452 | return ret; | 452 | return ret; |
453 | } | 453 | } |
454 | 454 | ||
455 | /* | ||
456 | * Multipage variants of the above prefault helpers, useful if more than | ||
457 | * PAGE_SIZE of data needs to be prefaulted. These are separate from the above | ||
458 | * functions (which only handle up to PAGE_SIZE) to avoid clobbering the | ||
459 | * filemap.c hotpaths. | ||
460 | */ | ||
461 | static inline int fault_in_multipages_writeable(char __user *uaddr, int size) | ||
462 | { | ||
463 | int ret; | ||
464 | const char __user *end = uaddr + size - 1; | ||
465 | |||
466 | if (unlikely(size == 0)) | ||
467 | return 0; | ||
468 | |||
469 | /* | ||
470 | * Writing zeroes into userspace here is OK, because we know that if | ||
471 | * the zero gets there, we'll be overwriting it. | ||
472 | */ | ||
473 | while (uaddr <= end) { | ||
474 | ret = __put_user(0, uaddr); | ||
475 | if (ret != 0) | ||
476 | return ret; | ||
477 | uaddr += PAGE_SIZE; | ||
478 | } | ||
479 | |||
480 | /* Check whether the range spilled into the next page. */ | ||
481 | if (((unsigned long)uaddr & PAGE_MASK) == | ||
482 | ((unsigned long)end & PAGE_MASK)) | ||
483 | ret = __put_user(0, end); | ||
484 | |||
485 | return ret; | ||
486 | } | ||
487 | |||
488 | static inline int fault_in_multipages_readable(const char __user *uaddr, | ||
489 | int size) | ||
490 | { | ||
491 | volatile char c; | ||
492 | int ret; | ||
493 | const char __user *end = uaddr + size - 1; | ||
494 | |||
495 | if (unlikely(size == 0)) | ||
496 | return 0; | ||
497 | |||
498 | while (uaddr <= end) { | ||
499 | ret = __get_user(c, uaddr); | ||
500 | if (ret != 0) | ||
501 | return ret; | ||
502 | uaddr += PAGE_SIZE; | ||
503 | } | ||
504 | |||
505 | /* Check whether the range spilled into the next page. */ | ||
506 | if (((unsigned long)uaddr & PAGE_MASK) == | ||
507 | ((unsigned long)end & PAGE_MASK)) { | ||
508 | ret = __get_user(c, end); | ||
509 | (void)c; | ||
510 | } | ||
511 | |||
512 | return ret; | ||
513 | } | ||
514 | |||
455 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | 515 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
456 | pgoff_t index, gfp_t gfp_mask); | 516 | pgoff_t index, gfp_t gfp_mask); |
457 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 517 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |