diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-04 13:51:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-04 13:51:54 -0500 |
commit | 82bdc843c2be0ce199e8e247dfb2a17248cbd6c4 (patch) | |
tree | f04e4c0935dd9e3f05741bf812229b36b18683cd /mm | |
parent | 71b1b20b8aea6ba4a1a15736409f1261d8dfe1da (diff) | |
parent | 0bf380bc70ecba68cb4d74dc656cc2fa8c4d801a (diff) |
Merge branch 'akpm'
* akpm:
mm: compaction: check pfn_valid when entering a new MAX_ORDER_NR_PAGES block during isolation for migration
readahead: fix pipeline break caused by block plug
kprobes: fix a memory leak in function pre_handler_kretprobe()
drivers/tty/vt/vt_ioctl.c: fix KDFONTOP 32bit compatibility layer
lkdtm: avoid calling lkdtm_do_action() with spinlock held
mm/filemap_xip.c: fix race condition in xip_file_fault()
mm/memcontrol.c: fix warning with CONFIG_NUMA=n
avr32: select generic atomic64_t support
mm: postpone migrated page mapping reset
xtensa: fix memscan()
MAINTAINERS: update lguest F: patterns
MAINTAINERS: remove staging sections
MAINTAINERS: remove iMX5 section
MAINTAINERS: update partitions block F: patterns
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 13 | ||||
-rw-r--r-- | mm/filemap.c | 8 | ||||
-rw-r--r-- | mm/filemap_xip.c | 7 | ||||
-rw-r--r-- | mm/memcontrol.c | 3 | ||||
-rw-r--r-- | mm/migrate.c | 2 |
5 files changed, 26 insertions, 7 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 71a58f67f481..bd939a574b84 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -313,6 +313,19 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
313 | } else if (!locked) | 313 | } else if (!locked) |
314 | spin_lock_irq(&zone->lru_lock); | 314 | spin_lock_irq(&zone->lru_lock); |
315 | 315 | ||
316 | /* | ||
317 | * migrate_pfn does not necessarily start aligned to a | ||
318 | * pageblock. Ensure that pfn_valid is called when moving | ||
319 | * into a new MAX_ORDER_NR_PAGES range in case of large | ||
320 | * memory holes within the zone | ||
321 | */ | ||
322 | if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { | ||
323 | if (!pfn_valid(low_pfn)) { | ||
324 | low_pfn += MAX_ORDER_NR_PAGES - 1; | ||
325 | continue; | ||
326 | } | ||
327 | } | ||
328 | |||
316 | if (!pfn_valid_within(low_pfn)) | 329 | if (!pfn_valid_within(low_pfn)) |
317 | continue; | 330 | continue; |
318 | nr_scanned++; | 331 | nr_scanned++; |
diff --git a/mm/filemap.c b/mm/filemap.c index 97f49ed35bd2..b66275757c28 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1400,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1400 | unsigned long seg = 0; | 1400 | unsigned long seg = 0; |
1401 | size_t count; | 1401 | size_t count; |
1402 | loff_t *ppos = &iocb->ki_pos; | 1402 | loff_t *ppos = &iocb->ki_pos; |
1403 | struct blk_plug plug; | ||
1404 | 1403 | ||
1405 | count = 0; | 1404 | count = 0; |
1406 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); | 1405 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); |
1407 | if (retval) | 1406 | if (retval) |
1408 | return retval; | 1407 | return retval; |
1409 | 1408 | ||
1410 | blk_start_plug(&plug); | ||
1411 | |||
1412 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ | 1409 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ |
1413 | if (filp->f_flags & O_DIRECT) { | 1410 | if (filp->f_flags & O_DIRECT) { |
1414 | loff_t size; | 1411 | loff_t size; |
@@ -1424,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1424 | retval = filemap_write_and_wait_range(mapping, pos, | 1421 | retval = filemap_write_and_wait_range(mapping, pos, |
1425 | pos + iov_length(iov, nr_segs) - 1); | 1422 | pos + iov_length(iov, nr_segs) - 1); |
1426 | if (!retval) { | 1423 | if (!retval) { |
1424 | struct blk_plug plug; | ||
1425 | |||
1426 | blk_start_plug(&plug); | ||
1427 | retval = mapping->a_ops->direct_IO(READ, iocb, | 1427 | retval = mapping->a_ops->direct_IO(READ, iocb, |
1428 | iov, pos, nr_segs); | 1428 | iov, pos, nr_segs); |
1429 | blk_finish_plug(&plug); | ||
1429 | } | 1430 | } |
1430 | if (retval > 0) { | 1431 | if (retval > 0) { |
1431 | *ppos = pos + retval; | 1432 | *ppos = pos + retval; |
@@ -1481,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1481 | break; | 1482 | break; |
1482 | } | 1483 | } |
1483 | out: | 1484 | out: |
1484 | blk_finish_plug(&plug); | ||
1485 | return retval; | 1485 | return retval; |
1486 | } | 1486 | } |
1487 | EXPORT_SYMBOL(generic_file_aio_read); | 1487 | EXPORT_SYMBOL(generic_file_aio_read); |
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index f91b2f687343..a4eb31132229 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
@@ -263,7 +263,12 @@ found: | |||
263 | xip_pfn); | 263 | xip_pfn); |
264 | if (err == -ENOMEM) | 264 | if (err == -ENOMEM) |
265 | return VM_FAULT_OOM; | 265 | return VM_FAULT_OOM; |
266 | BUG_ON(err); | 266 | /* |
267 | * err == -EBUSY is fine, we've raced against another thread | ||
268 | * that faulted-in the same page | ||
269 | */ | ||
270 | if (err != -EBUSY) | ||
271 | BUG_ON(err); | ||
267 | return VM_FAULT_NOPAGE; | 272 | return VM_FAULT_NOPAGE; |
268 | } else { | 273 | } else { |
269 | int err, ret = VM_FAULT_OOM; | 274 | int err, ret = VM_FAULT_OOM; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 556859fec4ef..6728a7ae6f2d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -776,7 +776,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) | |||
776 | /* threshold event is triggered in finer grain than soft limit */ | 776 | /* threshold event is triggered in finer grain than soft limit */ |
777 | if (unlikely(mem_cgroup_event_ratelimit(memcg, | 777 | if (unlikely(mem_cgroup_event_ratelimit(memcg, |
778 | MEM_CGROUP_TARGET_THRESH))) { | 778 | MEM_CGROUP_TARGET_THRESH))) { |
779 | bool do_softlimit, do_numainfo; | 779 | bool do_softlimit; |
780 | bool do_numainfo __maybe_unused; | ||
780 | 781 | ||
781 | do_softlimit = mem_cgroup_event_ratelimit(memcg, | 782 | do_softlimit = mem_cgroup_event_ratelimit(memcg, |
782 | MEM_CGROUP_TARGET_SOFTLIMIT); | 783 | MEM_CGROUP_TARGET_SOFTLIMIT); |
diff --git a/mm/migrate.c b/mm/migrate.c index 9871a56d82c3..df141f60289e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -445,7 +445,6 @@ void migrate_page_copy(struct page *newpage, struct page *page) | |||
445 | ClearPageSwapCache(page); | 445 | ClearPageSwapCache(page); |
446 | ClearPagePrivate(page); | 446 | ClearPagePrivate(page); |
447 | set_page_private(page, 0); | 447 | set_page_private(page, 0); |
448 | page->mapping = NULL; | ||
449 | 448 | ||
450 | /* | 449 | /* |
451 | * If any waiters have accumulated on the new page then | 450 | * If any waiters have accumulated on the new page then |
@@ -667,6 +666,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
667 | } else { | 666 | } else { |
668 | if (remap_swapcache) | 667 | if (remap_swapcache) |
669 | remove_migration_ptes(page, newpage); | 668 | remove_migration_ptes(page, newpage); |
669 | page->mapping = NULL; | ||
670 | } | 670 | } |
671 | 671 | ||
672 | unlock_page(newpage); | 672 | unlock_page(newpage); |