aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/filemap.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index a3b4021c448f..ec6566ffbd90 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2420,20 +2420,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
2420 * Synchronous readahead happens when we don't even find 2420 * Synchronous readahead happens when we don't even find
2421 * a page in the page cache at all. 2421 * a page in the page cache at all.
2422 */ 2422 */
2423static void do_sync_mmap_readahead(struct vm_area_struct *vma, 2423static void do_sync_mmap_readahead(struct vm_fault *vmf)
2424 struct file_ra_state *ra,
2425 struct file *file,
2426 pgoff_t offset)
2427{ 2424{
2425 struct file *file = vmf->vma->vm_file;
2426 struct file_ra_state *ra = &file->f_ra;
2428 struct address_space *mapping = file->f_mapping; 2427 struct address_space *mapping = file->f_mapping;
2428 pgoff_t offset = vmf->pgoff;
2429 2429
2430 /* If we don't want any read-ahead, don't bother */ 2430 /* If we don't want any read-ahead, don't bother */
2431 if (vma->vm_flags & VM_RAND_READ) 2431 if (vmf->vma->vm_flags & VM_RAND_READ)
2432 return; 2432 return;
2433 if (!ra->ra_pages) 2433 if (!ra->ra_pages)
2434 return; 2434 return;
2435 2435
2436 if (vma->vm_flags & VM_SEQ_READ) { 2436 if (vmf->vma->vm_flags & VM_SEQ_READ) {
2437 page_cache_sync_readahead(mapping, ra, file, offset, 2437 page_cache_sync_readahead(mapping, ra, file, offset,
2438 ra->ra_pages); 2438 ra->ra_pages);
2439 return; 2439 return;
@@ -2463,16 +2463,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
2463 * Asynchronous readahead happens when we find the page and PG_readahead, 2463 * Asynchronous readahead happens when we find the page and PG_readahead,
2464 * so we want to possibly extend the readahead further.. 2464 * so we want to possibly extend the readahead further..
2465 */ 2465 */
2466static void do_async_mmap_readahead(struct vm_area_struct *vma, 2466static void do_async_mmap_readahead(struct vm_fault *vmf,
2467 struct file_ra_state *ra, 2467 struct page *page)
2468 struct file *file,
2469 struct page *page,
2470 pgoff_t offset)
2471{ 2468{
2469 struct file *file = vmf->vma->vm_file;
2470 struct file_ra_state *ra = &file->f_ra;
2472 struct address_space *mapping = file->f_mapping; 2471 struct address_space *mapping = file->f_mapping;
2472 pgoff_t offset = vmf->pgoff;
2473 2473
2474 /* If we don't want any read-ahead, don't bother */ 2474 /* If we don't want any read-ahead, don't bother */
2475 if (vma->vm_flags & VM_RAND_READ) 2475 if (vmf->vma->vm_flags & VM_RAND_READ)
2476 return; 2476 return;
2477 if (ra->mmap_miss > 0) 2477 if (ra->mmap_miss > 0)
2478 ra->mmap_miss--; 2478 ra->mmap_miss--;
@@ -2531,10 +2531,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
2531 * We found the page, so try async readahead before 2531 * We found the page, so try async readahead before
2532 * waiting for the lock. 2532 * waiting for the lock.
2533 */ 2533 */
2534 do_async_mmap_readahead(vmf->vma, ra, file, page, offset); 2534 do_async_mmap_readahead(vmf, page);
2535 } else if (!page) { 2535 } else if (!page) {
2536 /* No page in the page cache at all */ 2536 /* No page in the page cache at all */
2537 do_sync_mmap_readahead(vmf->vma, ra, file, offset); 2537 do_sync_mmap_readahead(vmf);
2538 count_vm_event(PGMAJFAULT); 2538 count_vm_event(PGMAJFAULT);
2539 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 2539 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
2540 ret = VM_FAULT_MAJOR; 2540 ret = VM_FAULT_MAJOR;