diff options
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 31 |
1 files changed, 10 insertions, 21 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 0aa3faa48219..8cd7e97eae1f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range); | |||
393 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | 393 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) |
394 | { | 394 | { |
395 | int error; | 395 | int error; |
396 | struct mem_cgroup *memcg = NULL; | ||
397 | 396 | ||
398 | VM_BUG_ON(!PageLocked(old)); | 397 | VM_BUG_ON(!PageLocked(old)); |
399 | VM_BUG_ON(!PageLocked(new)); | 398 | VM_BUG_ON(!PageLocked(new)); |
400 | VM_BUG_ON(new->mapping); | 399 | VM_BUG_ON(new->mapping); |
401 | 400 | ||
402 | /* | ||
403 | * This is not page migration, but prepare_migration and | ||
404 | * end_migration does enough work for charge replacement. | ||
405 | * | ||
406 | * In the longer term we probably want a specialized function | ||
407 | * for moving the charge from old to new in a more efficient | ||
408 | * manner. | ||
409 | */ | ||
410 | error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask); | ||
411 | if (error) | ||
412 | return error; | ||
413 | |||
414 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 401 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); |
415 | if (!error) { | 402 | if (!error) { |
416 | struct address_space *mapping = old->mapping; | 403 | struct address_space *mapping = old->mapping; |
@@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
432 | if (PageSwapBacked(new)) | 419 | if (PageSwapBacked(new)) |
433 | __inc_zone_page_state(new, NR_SHMEM); | 420 | __inc_zone_page_state(new, NR_SHMEM); |
434 | spin_unlock_irq(&mapping->tree_lock); | 421 | spin_unlock_irq(&mapping->tree_lock); |
422 | /* mem_cgroup codes must not be called under tree_lock */ | ||
423 | mem_cgroup_replace_page_cache(old, new); | ||
435 | radix_tree_preload_end(); | 424 | radix_tree_preload_end(); |
436 | if (freepage) | 425 | if (freepage) |
437 | freepage(old); | 426 | freepage(old); |
438 | page_cache_release(old); | 427 | page_cache_release(old); |
439 | mem_cgroup_end_migration(memcg, old, new, true); | ||
440 | } else { | ||
441 | mem_cgroup_end_migration(memcg, old, new, false); | ||
442 | } | 428 | } |
443 | 429 | ||
444 | return error; | 430 | return error; |
@@ -1414,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1414 | unsigned long seg = 0; | 1400 | unsigned long seg = 0; |
1415 | size_t count; | 1401 | size_t count; |
1416 | loff_t *ppos = &iocb->ki_pos; | 1402 | loff_t *ppos = &iocb->ki_pos; |
1417 | struct blk_plug plug; | ||
1418 | 1403 | ||
1419 | count = 0; | 1404 | count = 0; |
1420 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); | 1405 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); |
1421 | if (retval) | 1406 | if (retval) |
1422 | return retval; | 1407 | return retval; |
1423 | 1408 | ||
1424 | blk_start_plug(&plug); | ||
1425 | |||
1426 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ | 1409 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ |
1427 | if (filp->f_flags & O_DIRECT) { | 1410 | if (filp->f_flags & O_DIRECT) { |
1428 | loff_t size; | 1411 | loff_t size; |
@@ -1438,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1438 | retval = filemap_write_and_wait_range(mapping, pos, | 1421 | retval = filemap_write_and_wait_range(mapping, pos, |
1439 | pos + iov_length(iov, nr_segs) - 1); | 1422 | pos + iov_length(iov, nr_segs) - 1); |
1440 | if (!retval) { | 1423 | if (!retval) { |
1424 | struct blk_plug plug; | ||
1425 | |||
1426 | blk_start_plug(&plug); | ||
1441 | retval = mapping->a_ops->direct_IO(READ, iocb, | 1427 | retval = mapping->a_ops->direct_IO(READ, iocb, |
1442 | iov, pos, nr_segs); | 1428 | iov, pos, nr_segs); |
1429 | blk_finish_plug(&plug); | ||
1443 | } | 1430 | } |
1444 | if (retval > 0) { | 1431 | if (retval > 0) { |
1445 | *ppos = pos + retval; | 1432 | *ppos = pos + retval; |
@@ -1495,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1495 | break; | 1482 | break; |
1496 | } | 1483 | } |
1497 | out: | 1484 | out: |
1498 | blk_finish_plug(&plug); | ||
1499 | return retval; | 1485 | return retval; |
1500 | } | 1486 | } |
1501 | EXPORT_SYMBOL(generic_file_aio_read); | 1487 | EXPORT_SYMBOL(generic_file_aio_read); |
@@ -2351,8 +2337,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, | |||
2351 | pgoff_t index, unsigned flags) | 2337 | pgoff_t index, unsigned flags) |
2352 | { | 2338 | { |
2353 | int status; | 2339 | int status; |
2340 | gfp_t gfp_mask; | ||
2354 | struct page *page; | 2341 | struct page *page; |
2355 | gfp_t gfp_notmask = 0; | 2342 | gfp_t gfp_notmask = 0; |
2343 | |||
2344 | gfp_mask = mapping_gfp_mask(mapping) | __GFP_WRITE; | ||
2356 | if (flags & AOP_FLAG_NOFS) | 2345 | if (flags & AOP_FLAG_NOFS) |
2357 | gfp_notmask = __GFP_FS; | 2346 | gfp_notmask = __GFP_FS; |
2358 | repeat: | 2347 | repeat: |
@@ -2360,7 +2349,7 @@ repeat: | |||
2360 | if (page) | 2349 | if (page) |
2361 | goto found; | 2350 | goto found; |
2362 | 2351 | ||
2363 | page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); | 2352 | page = __page_cache_alloc(gfp_mask & ~gfp_notmask); |
2364 | if (!page) | 2353 | if (!page) |
2365 | return NULL; | 2354 | return NULL; |
2366 | status = add_to_page_cache_lru(page, mapping, index, | 2355 | status = add_to_page_cache_lru(page, mapping, index, |