aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-03-25 13:47:41 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-03-27 07:36:30 -0400
commitf56f821feb7b36223f309e0ec05986bb137ce418 (patch)
tree5d6ae677300d4ea171bad39ba8f7aa65cf24c6d9 /include/linux/pagemap.h
parentd174bd6472d79fb5603dc8bd35e5184d83194ea8 (diff)
mm: extend prefault helpers to fault in more than PAGE_SIZE
drm/i915 wants to read/write more than one page in its fastpath and hence needs to prefault more than PAGE_SIZE bytes. Add new functions in filemap.h to make that possible. Also kill a copy&pasted spurious space in both functions while at it. v2: As suggested by Andrew Morton, add a multipage parameter to both functions to avoid the additional branch for the pagemap.c hotpath. My gcc 4.6 here seems to dtrt and indeed reap these branches where not needed. v3: Becaus I couldn't find a way around adding a uaddr += PAGE_SIZE to the filemap.c hotpaths (that the compiler couldn't remove again), let's go with separate new functions for the multipage use-case. v4: Adjust comment to CodingStlye and fix spelling. Acked-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h64
1 files changed, 62 insertions, 2 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index cfaaa6949b8b..c93a9a9bcd35 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -426,7 +426,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
426 */ 426 */
427 if (((unsigned long)uaddr & PAGE_MASK) != 427 if (((unsigned long)uaddr & PAGE_MASK) !=
428 ((unsigned long)end & PAGE_MASK)) 428 ((unsigned long)end & PAGE_MASK))
429 ret = __put_user(0, end); 429 ret = __put_user(0, end);
430 } 430 }
431 return ret; 431 return ret;
432} 432}
@@ -445,13 +445,73 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
445 445
446 if (((unsigned long)uaddr & PAGE_MASK) != 446 if (((unsigned long)uaddr & PAGE_MASK) !=
447 ((unsigned long)end & PAGE_MASK)) { 447 ((unsigned long)end & PAGE_MASK)) {
448 ret = __get_user(c, end); 448 ret = __get_user(c, end);
449 (void)c; 449 (void)c;
450 } 450 }
451 } 451 }
452 return ret; 452 return ret;
453} 453}
454 454
455/*
456 * Multipage variants of the above prefault helpers, useful if more than
457 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
458 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
459 * filemap.c hotpaths.
460 */
461static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
462{
463 int ret;
464 const char __user *end = uaddr + size - 1;
465
466 if (unlikely(size == 0))
467 return 0;
468
469 /*
470 * Writing zeroes into userspace here is OK, because we know that if
471 * the zero gets there, we'll be overwriting it.
472 */
473 while (uaddr <= end) {
474 ret = __put_user(0, uaddr);
475 if (ret != 0)
476 return ret;
477 uaddr += PAGE_SIZE;
478 }
479
480 /* Check whether the range spilled into the next page. */
481 if (((unsigned long)uaddr & PAGE_MASK) ==
482 ((unsigned long)end & PAGE_MASK))
483 ret = __put_user(0, end);
484
485 return ret;
486}
487
488static inline int fault_in_multipages_readable(const char __user *uaddr,
489 int size)
490{
491 volatile char c;
492 int ret;
493 const char __user *end = uaddr + size - 1;
494
495 if (unlikely(size == 0))
496 return 0;
497
498 while (uaddr <= end) {
499 ret = __get_user(c, uaddr);
500 if (ret != 0)
501 return ret;
502 uaddr += PAGE_SIZE;
503 }
504
505 /* Check whether the range spilled into the next page. */
506 if (((unsigned long)uaddr & PAGE_MASK) ==
507 ((unsigned long)end & PAGE_MASK)) {
508 ret = __get_user(c, end);
509 (void)c;
510 }
511
512 return ret;
513}
514
455int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 515int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
456 pgoff_t index, gfp_t gfp_mask); 516 pgoff_t index, gfp_t gfp_mask);
457int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 517int add_to_page_cache_lru(struct page *page, struct address_space *mapping,