diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-08 18:57:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-08 18:57:47 -0400 |
commit | 8065be8d032f38da25b54bf077a05a30d9ce9f2a (patch) | |
tree | 32a7baf4b40e0240ab4b9dd6f2bbe6129929bb66 /mm/vmscan.c | |
parent | 27d438c56009e5ae632de36fe70985d1aab5e344 (diff) | |
parent | ecc265fe9e09e32a3573b2ba26e79b2099eb8bbb (diff) |
Merge branch 'akpm' (second patchbomb from Andrew Morton)
Merge more incoming from Andrew Morton:
"Two new syscalls:
memfd_create in "shm: add memfd_create() syscall"
kexec_file_load in "kexec: implementation of new syscall kexec_file_load"
And:
- Most (all?) of the rest of MM
- Lots of the usual misc bits
- fs/autofs4
- drivers/rtc
- fs/nilfs
- procfs
- fork.c, exec.c
- more in lib/
- rapidio
- Janitorial work in filesystems: fs/ufs, fs/reiserfs, fs/adfs,
fs/cramfs, fs/romfs, fs/qnx6.
- initrd/initramfs work
- "file sealing" and the memfd_create() syscall, in tmpfs
- add pci_zalloc_consistent, use it in lots of places
- MAINTAINERS maintenance
- kexec feature work"
* emailed patches from Andrew Morton <akpm@linux-foundation.org: (193 commits)
MAINTAINERS: update nomadik patterns
MAINTAINERS: update usb/gadget patterns
MAINTAINERS: update DMA BUFFER SHARING patterns
kexec: verify the signature of signed PE bzImage
kexec: support kexec/kdump on EFI systems
kexec: support for kexec on panic using new system call
kexec-bzImage64: support for loading bzImage using 64bit entry
kexec: load and relocate purgatory at kernel load time
purgatory: core purgatory functionality
purgatory/sha256: provide implementation of sha256 in purgaotory context
kexec: implementation of new syscall kexec_file_load
kexec: new syscall kexec_file_load() declaration
kexec: make kexec_segment user buffer pointer a union
resource: provide new functions to walk through resources
kexec: use common function for kimage_normal_alloc() and kimage_crash_alloc()
kexec: move segment verification code in a separate function
kexec: rename unusebale_pages to unusable_pages
kernel: build bin2c based on config option CONFIG_BUILD_BIN2C
bin2c: move bin2c in scripts/basic
shm: wait for pins to be released when sealing
...
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index d2f65c856350..2836b5373b2e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -577,9 +577,10 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, | |||
577 | 577 | ||
578 | if (PageSwapCache(page)) { | 578 | if (PageSwapCache(page)) { |
579 | swp_entry_t swap = { .val = page_private(page) }; | 579 | swp_entry_t swap = { .val = page_private(page) }; |
580 | mem_cgroup_swapout(page, swap); | ||
580 | __delete_from_swap_cache(page); | 581 | __delete_from_swap_cache(page); |
581 | spin_unlock_irq(&mapping->tree_lock); | 582 | spin_unlock_irq(&mapping->tree_lock); |
582 | swapcache_free(swap, page); | 583 | swapcache_free(swap); |
583 | } else { | 584 | } else { |
584 | void (*freepage)(struct page *); | 585 | void (*freepage)(struct page *); |
585 | void *shadow = NULL; | 586 | void *shadow = NULL; |
@@ -600,7 +601,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, | |||
600 | shadow = workingset_eviction(mapping, page); | 601 | shadow = workingset_eviction(mapping, page); |
601 | __delete_from_page_cache(page, shadow); | 602 | __delete_from_page_cache(page, shadow); |
602 | spin_unlock_irq(&mapping->tree_lock); | 603 | spin_unlock_irq(&mapping->tree_lock); |
603 | mem_cgroup_uncharge_cache_page(page); | ||
604 | 604 | ||
605 | if (freepage != NULL) | 605 | if (freepage != NULL) |
606 | freepage(page); | 606 | freepage(page); |
@@ -822,7 +822,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
822 | 822 | ||
823 | cond_resched(); | 823 | cond_resched(); |
824 | 824 | ||
825 | mem_cgroup_uncharge_start(); | ||
826 | while (!list_empty(page_list)) { | 825 | while (!list_empty(page_list)) { |
827 | struct address_space *mapping; | 826 | struct address_space *mapping; |
828 | struct page *page; | 827 | struct page *page; |
@@ -1133,11 +1132,12 @@ keep: | |||
1133 | VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); | 1132 | VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); |
1134 | } | 1133 | } |
1135 | 1134 | ||
1135 | mem_cgroup_uncharge_list(&free_pages); | ||
1136 | free_hot_cold_page_list(&free_pages, true); | 1136 | free_hot_cold_page_list(&free_pages, true); |
1137 | 1137 | ||
1138 | list_splice(&ret_pages, page_list); | 1138 | list_splice(&ret_pages, page_list); |
1139 | count_vm_events(PGACTIVATE, pgactivate); | 1139 | count_vm_events(PGACTIVATE, pgactivate); |
1140 | mem_cgroup_uncharge_end(); | 1140 | |
1141 | *ret_nr_dirty += nr_dirty; | 1141 | *ret_nr_dirty += nr_dirty; |
1142 | *ret_nr_congested += nr_congested; | 1142 | *ret_nr_congested += nr_congested; |
1143 | *ret_nr_unqueued_dirty += nr_unqueued_dirty; | 1143 | *ret_nr_unqueued_dirty += nr_unqueued_dirty; |
@@ -1437,6 +1437,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) | |||
1437 | 1437 | ||
1438 | if (unlikely(PageCompound(page))) { | 1438 | if (unlikely(PageCompound(page))) { |
1439 | spin_unlock_irq(&zone->lru_lock); | 1439 | spin_unlock_irq(&zone->lru_lock); |
1440 | mem_cgroup_uncharge(page); | ||
1440 | (*get_compound_page_dtor(page))(page); | 1441 | (*get_compound_page_dtor(page))(page); |
1441 | spin_lock_irq(&zone->lru_lock); | 1442 | spin_lock_irq(&zone->lru_lock); |
1442 | } else | 1443 | } else |
@@ -1544,6 +1545,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1544 | 1545 | ||
1545 | spin_unlock_irq(&zone->lru_lock); | 1546 | spin_unlock_irq(&zone->lru_lock); |
1546 | 1547 | ||
1548 | mem_cgroup_uncharge_list(&page_list); | ||
1547 | free_hot_cold_page_list(&page_list, true); | 1549 | free_hot_cold_page_list(&page_list, true); |
1548 | 1550 | ||
1549 | /* | 1551 | /* |
@@ -1658,6 +1660,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec, | |||
1658 | 1660 | ||
1659 | if (unlikely(PageCompound(page))) { | 1661 | if (unlikely(PageCompound(page))) { |
1660 | spin_unlock_irq(&zone->lru_lock); | 1662 | spin_unlock_irq(&zone->lru_lock); |
1663 | mem_cgroup_uncharge(page); | ||
1661 | (*get_compound_page_dtor(page))(page); | 1664 | (*get_compound_page_dtor(page))(page); |
1662 | spin_lock_irq(&zone->lru_lock); | 1665 | spin_lock_irq(&zone->lru_lock); |
1663 | } else | 1666 | } else |
@@ -1765,6 +1768,7 @@ static void shrink_active_list(unsigned long nr_to_scan, | |||
1765 | __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); | 1768 | __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); |
1766 | spin_unlock_irq(&zone->lru_lock); | 1769 | spin_unlock_irq(&zone->lru_lock); |
1767 | 1770 | ||
1771 | mem_cgroup_uncharge_list(&l_hold); | ||
1768 | free_hot_cold_page_list(&l_hold, true); | 1772 | free_hot_cold_page_list(&l_hold, true); |
1769 | } | 1773 | } |
1770 | 1774 | ||