aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/swap.h8
-rw-r--r--mm/vmscan.c104
3 files changed, 64 insertions, 50 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e67980654c49..1850cf8bad64 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1046,7 +1046,7 @@ int in_gate_area_no_task(unsigned long addr);
1046 1046
1047int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, 1047int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
1048 void __user *, size_t *, loff_t *); 1048 void __user *, size_t *, loff_t *);
1049int shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1049unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1050 unsigned long lru_pages); 1050 unsigned long lru_pages);
1051void drop_pagecache(void); 1051void drop_pagecache(void);
1052void drop_slab(void); 1052void drop_slab(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d572b19afb7d..3dc6c89c49b8 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -172,8 +172,8 @@ extern int rotate_reclaimable_page(struct page *page);
172extern void swap_setup(void); 172extern void swap_setup(void);
173 173
174/* linux/mm/vmscan.c */ 174/* linux/mm/vmscan.c */
175extern int try_to_free_pages(struct zone **, gfp_t); 175extern unsigned long try_to_free_pages(struct zone **, gfp_t);
176extern int shrink_all_memory(int); 176extern unsigned long shrink_all_memory(unsigned long nr_pages);
177extern int vm_swappiness; 177extern int vm_swappiness;
178 178
179#ifdef CONFIG_NUMA 179#ifdef CONFIG_NUMA
@@ -190,11 +190,11 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
190 190
191#ifdef CONFIG_MIGRATION 191#ifdef CONFIG_MIGRATION
192extern int isolate_lru_page(struct page *p); 192extern int isolate_lru_page(struct page *p);
193extern int putback_lru_pages(struct list_head *l); 193extern unsigned long putback_lru_pages(struct list_head *l);
194extern int migrate_page(struct page *, struct page *); 194extern int migrate_page(struct page *, struct page *);
195extern void migrate_page_copy(struct page *, struct page *); 195extern void migrate_page_copy(struct page *, struct page *);
196extern int migrate_page_remove_references(struct page *, struct page *, int); 196extern int migrate_page_remove_references(struct page *, struct page *, int);
197extern int migrate_pages(struct list_head *l, struct list_head *t, 197extern unsigned long migrate_pages(struct list_head *l, struct list_head *t,
198 struct list_head *moved, struct list_head *failed); 198 struct list_head *moved, struct list_head *failed);
199extern int fail_migrate_page(struct page *, struct page *); 199extern int fail_migrate_page(struct page *, struct page *);
200#else 200#else
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5feef4d4650e..62cd7cd257e3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -177,10 +177,11 @@ EXPORT_SYMBOL(remove_shrinker);
177 * 177 *
178 * Returns the number of slab objects which we shrunk. 178 * Returns the number of slab objects which we shrunk.
179 */ 179 */
180int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) 180unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
181 unsigned long lru_pages)
181{ 182{
182 struct shrinker *shrinker; 183 struct shrinker *shrinker;
183 int ret = 0; 184 unsigned long ret = 0;
184 185
185 if (scanned == 0) 186 if (scanned == 0)
186 scanned = SWAP_CLUSTER_MAX; 187 scanned = SWAP_CLUSTER_MAX;
@@ -410,12 +411,13 @@ cannot_free:
410/* 411/*
411 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed 412 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
412 */ 413 */
413static int shrink_list(struct list_head *page_list, struct scan_control *sc) 414static unsigned long shrink_list(struct list_head *page_list,
415 struct scan_control *sc)
414{ 416{
415 LIST_HEAD(ret_pages); 417 LIST_HEAD(ret_pages);
416 struct pagevec freed_pvec; 418 struct pagevec freed_pvec;
417 int pgactivate = 0; 419 int pgactivate = 0;
418 int reclaimed = 0; 420 unsigned long reclaimed = 0;
419 421
420 cond_resched(); 422 cond_resched();
421 423
@@ -599,11 +601,11 @@ static inline void move_to_lru(struct page *page)
599 * 601 *
600 * returns the number of pages put back. 602 * returns the number of pages put back.
601 */ 603 */
602int putback_lru_pages(struct list_head *l) 604unsigned long putback_lru_pages(struct list_head *l)
603{ 605{
604 struct page *page; 606 struct page *page;
605 struct page *page2; 607 struct page *page2;
606 int count = 0; 608 unsigned long count = 0;
607 609
608 list_for_each_entry_safe(page, page2, l, lru) { 610 list_for_each_entry_safe(page, page2, l, lru) {
609 move_to_lru(page); 611 move_to_lru(page);
@@ -848,11 +850,11 @@ EXPORT_SYMBOL(migrate_page);
848 * 850 *
849 * Return: Number of pages not migrated when "to" ran empty. 851 * Return: Number of pages not migrated when "to" ran empty.
850 */ 852 */
851int migrate_pages(struct list_head *from, struct list_head *to, 853unsigned long migrate_pages(struct list_head *from, struct list_head *to,
852 struct list_head *moved, struct list_head *failed) 854 struct list_head *moved, struct list_head *failed)
853{ 855{
854 int retry; 856 unsigned long retry;
855 int nr_failed = 0; 857 unsigned long nr_failed = 0;
856 int pass = 0; 858 int pass = 0;
857 struct page *page; 859 struct page *page;
858 struct page *page2; 860 struct page *page2;
@@ -1069,12 +1071,13 @@ int isolate_lru_page(struct page *page)
1069 * 1071 *
1070 * returns how many pages were moved onto *@dst. 1072 * returns how many pages were moved onto *@dst.
1071 */ 1073 */
1072static int isolate_lru_pages(int nr_to_scan, struct list_head *src, 1074static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1073 struct list_head *dst, int *scanned) 1075 struct list_head *src, struct list_head *dst,
1076 unsigned long *scanned)
1074{ 1077{
1075 int nr_taken = 0; 1078 unsigned long nr_taken = 0;
1076 struct page *page; 1079 struct page *page;
1077 int scan = 0; 1080 unsigned long scan = 0;
1078 1081
1079 while (scan++ < nr_to_scan && !list_empty(src)) { 1082 while (scan++ < nr_to_scan && !list_empty(src)) {
1080 struct list_head *target; 1083 struct list_head *target;
@@ -1106,20 +1109,22 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
1106/* 1109/*
1107 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 1110 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
1108 */ 1111 */
1109static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc) 1112static void shrink_cache(unsigned long max_scan, struct zone *zone,
1113 struct scan_control *sc)
1110{ 1114{
1111 LIST_HEAD(page_list); 1115 LIST_HEAD(page_list);
1112 struct pagevec pvec; 1116 struct pagevec pvec;
1117 unsigned long nr_scanned = 0;
1113 1118
1114 pagevec_init(&pvec, 1); 1119 pagevec_init(&pvec, 1);
1115 1120
1116 lru_add_drain(); 1121 lru_add_drain();
1117 spin_lock_irq(&zone->lru_lock); 1122 spin_lock_irq(&zone->lru_lock);
1118 while (max_scan > 0) { 1123 do {
1119 struct page *page; 1124 struct page *page;
1120 int nr_taken; 1125 unsigned long nr_taken;
1121 int nr_scan; 1126 unsigned long nr_scan;
1122 int nr_freed; 1127 unsigned long nr_freed;
1123 1128
1124 nr_taken = isolate_lru_pages(sc->swap_cluster_max, 1129 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
1125 &zone->inactive_list, 1130 &zone->inactive_list,
@@ -1131,7 +1136,7 @@ static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *s
1131 if (nr_taken == 0) 1136 if (nr_taken == 0)
1132 goto done; 1137 goto done;
1133 1138
1134 max_scan -= nr_scan; 1139 nr_scanned += nr_scan;
1135 nr_freed = shrink_list(&page_list, sc); 1140 nr_freed = shrink_list(&page_list, sc);
1136 1141
1137 local_irq_disable(); 1142 local_irq_disable();
@@ -1161,7 +1166,7 @@ static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *s
1161 spin_lock_irq(&zone->lru_lock); 1166 spin_lock_irq(&zone->lru_lock);
1162 } 1167 }
1163 } 1168 }
1164 } 1169 } while (nr_scanned < max_scan);
1165 spin_unlock_irq(&zone->lru_lock); 1170 spin_unlock_irq(&zone->lru_lock);
1166done: 1171done:
1167 pagevec_release(&pvec); 1172 pagevec_release(&pvec);
@@ -1185,11 +1190,12 @@ done:
1185 * But we had to alter page->flags anyway. 1190 * But we had to alter page->flags anyway.
1186 */ 1191 */
1187static void 1192static void
1188refill_inactive_zone(int nr_pages, struct zone *zone, struct scan_control *sc) 1193refill_inactive_zone(unsigned long nr_pages, struct zone *zone,
1194 struct scan_control *sc)
1189{ 1195{
1190 int pgmoved; 1196 unsigned long pgmoved;
1191 int pgdeactivate = 0; 1197 int pgdeactivate = 0;
1192 int pgscanned; 1198 unsigned long pgscanned;
1193 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1199 LIST_HEAD(l_hold); /* The pages which were snipped off */
1194 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 1200 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
1195 LIST_HEAD(l_active); /* Pages to go onto the active_list */ 1201 LIST_HEAD(l_active); /* Pages to go onto the active_list */
@@ -1323,8 +1329,8 @@ refill_inactive_zone(int nr_pages, struct zone *zone, struct scan_control *sc)
1323/* 1329/*
1324 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1330 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1325 */ 1331 */
1326static void 1332static void shrink_zone(int priority, struct zone *zone,
1327shrink_zone(int priority, struct zone *zone, struct scan_control *sc) 1333 struct scan_control *sc)
1328{ 1334{
1329 unsigned long nr_active; 1335 unsigned long nr_active;
1330 unsigned long nr_inactive; 1336 unsigned long nr_inactive;
@@ -1387,8 +1393,8 @@ shrink_zone(int priority, struct zone *zone, struct scan_control *sc)
1387 * If a zone is deemed to be full of pinned pages then just give it a light 1393 * If a zone is deemed to be full of pinned pages then just give it a light
1388 * scan then give up on it. 1394 * scan then give up on it.
1389 */ 1395 */
1390static void 1396static void shrink_caches(int priority, struct zone **zones,
1391shrink_caches(int priority, struct zone **zones, struct scan_control *sc) 1397 struct scan_control *sc)
1392{ 1398{
1393 int i; 1399 int i;
1394 1400
@@ -1425,11 +1431,12 @@ shrink_caches(int priority, struct zone **zones, struct scan_control *sc)
1425 * holds filesystem locks which prevent writeout this might not work, and the 1431 * holds filesystem locks which prevent writeout this might not work, and the
1426 * allocation attempt will fail. 1432 * allocation attempt will fail.
1427 */ 1433 */
1428int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 1434unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1429{ 1435{
1430 int priority; 1436 int priority;
1431 int ret = 0; 1437 int ret = 0;
1432 int total_scanned = 0, total_reclaimed = 0; 1438 unsigned long total_scanned = 0;
1439 unsigned long total_reclaimed = 0;
1433 struct reclaim_state *reclaim_state = current->reclaim_state; 1440 struct reclaim_state *reclaim_state = current->reclaim_state;
1434 unsigned long lru_pages = 0; 1441 unsigned long lru_pages = 0;
1435 int i; 1442 int i;
@@ -1525,13 +1532,15 @@ out:
1525 * the page allocator fallback scheme to ensure that aging of pages is balanced 1532 * the page allocator fallback scheme to ensure that aging of pages is balanced
1526 * across the zones. 1533 * across the zones.
1527 */ 1534 */
1528static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) 1535static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
1536 int order)
1529{ 1537{
1530 int to_free = nr_pages; 1538 unsigned long to_free = nr_pages;
1531 int all_zones_ok; 1539 int all_zones_ok;
1532 int priority; 1540 int priority;
1533 int i; 1541 int i;
1534 int total_scanned, total_reclaimed; 1542 unsigned long total_scanned;
1543 unsigned long total_reclaimed;
1535 struct reclaim_state *reclaim_state = current->reclaim_state; 1544 struct reclaim_state *reclaim_state = current->reclaim_state;
1536 struct scan_control sc = { 1545 struct scan_control sc = {
1537 .gfp_mask = GFP_KERNEL, 1546 .gfp_mask = GFP_KERNEL,
@@ -1776,22 +1785,23 @@ void wakeup_kswapd(struct zone *zone, int order)
1776 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed 1785 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1777 * pages. 1786 * pages.
1778 */ 1787 */
1779int shrink_all_memory(int nr_pages) 1788unsigned long shrink_all_memory(unsigned long nr_pages)
1780{ 1789{
1781 pg_data_t *pgdat; 1790 pg_data_t *pgdat;
1782 int nr_to_free = nr_pages; 1791 unsigned long nr_to_free = nr_pages;
1783 int ret = 0; 1792 unsigned long ret = 0;
1784 struct reclaim_state reclaim_state = { 1793 struct reclaim_state reclaim_state = {
1785 .reclaimed_slab = 0, 1794 .reclaimed_slab = 0,
1786 }; 1795 };
1787 1796
1788 current->reclaim_state = &reclaim_state; 1797 current->reclaim_state = &reclaim_state;
1789 for_each_pgdat(pgdat) { 1798 for_each_pgdat(pgdat) {
1790 int freed; 1799 unsigned long freed;
1800
1791 freed = balance_pgdat(pgdat, nr_to_free, 0); 1801 freed = balance_pgdat(pgdat, nr_to_free, 0);
1792 ret += freed; 1802 ret += freed;
1793 nr_to_free -= freed; 1803 nr_to_free -= freed;
1794 if (nr_to_free <= 0) 1804 if ((long)nr_to_free <= 0)
1795 break; 1805 break;
1796 } 1806 }
1797 current->reclaim_state = NULL; 1807 current->reclaim_state = NULL;
@@ -1805,8 +1815,7 @@ int shrink_all_memory(int nr_pages)
1805 away, we get changed to run anywhere: as the first one comes back, 1815 away, we get changed to run anywhere: as the first one comes back,
1806 restore their cpu bindings. */ 1816 restore their cpu bindings. */
1807static int __devinit cpu_callback(struct notifier_block *nfb, 1817static int __devinit cpu_callback(struct notifier_block *nfb,
1808 unsigned long action, 1818 unsigned long action, void *hcpu)
1809 void *hcpu)
1810{ 1819{
1811 pg_data_t *pgdat; 1820 pg_data_t *pgdat;
1812 cpumask_t mask; 1821 cpumask_t mask;
@@ -1826,10 +1835,15 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
1826static int __init kswapd_init(void) 1835static int __init kswapd_init(void)
1827{ 1836{
1828 pg_data_t *pgdat; 1837 pg_data_t *pgdat;
1838
1829 swap_setup(); 1839 swap_setup();
1830 for_each_pgdat(pgdat) 1840 for_each_pgdat(pgdat) {
1831 pgdat->kswapd 1841 pid_t pid;
1832 = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); 1842
1843 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
1844 BUG_ON(pid < 0);
1845 pgdat->kswapd = find_task_by_pid(pid);
1846 }
1833 total_memory = nr_free_pagecache_pages(); 1847 total_memory = nr_free_pagecache_pages();
1834 hotcpu_notifier(cpu_callback, 0); 1848 hotcpu_notifier(cpu_callback, 0);
1835 return 0; 1849 return 0;
@@ -1873,7 +1887,7 @@ int zone_reclaim_interval __read_mostly = 30*HZ;
1873 */ 1887 */
1874static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1888static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1875{ 1889{
1876 const int nr_pages = 1 << order; 1890 const unsigned long nr_pages = 1 << order;
1877 struct task_struct *p = current; 1891 struct task_struct *p = current;
1878 struct reclaim_state reclaim_state; 1892 struct reclaim_state reclaim_state;
1879 int priority; 1893 int priority;
@@ -1881,7 +1895,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1881 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 1895 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1882 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 1896 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1883 .nr_mapped = read_page_state(nr_mapped), 1897 .nr_mapped = read_page_state(nr_mapped),
1884 .swap_cluster_max = max(nr_pages, SWAP_CLUSTER_MAX), 1898 .swap_cluster_max = max_t(unsigned long, nr_pages,
1899 SWAP_CLUSTER_MAX),
1885 .gfp_mask = gfp_mask, 1900 .gfp_mask = gfp_mask,
1886 }; 1901 };
1887 1902
@@ -1966,4 +1981,3 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1966 return __zone_reclaim(zone, gfp_mask, order); 1981 return __zone_reclaim(zone, gfp_mask, order);
1967} 1982}
1968#endif 1983#endif
1969