summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSergey Senozhatsky <sergey.senozhatsky@gmail.com>2015-09-08 18:04:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commitab9d306d9c3bf64b1dbad127aa13252cc550f839 (patch)
tree525ffb59977b7985b8eecd37109dc3b214d2d77f /mm
parent860c707dca155a56dfa115ddd6c00959296144a6 (diff)
zsmalloc: use shrinker to trigger auto-compaction
Perform automatic pool compaction by a shrinker when system is getting tight on memory. User-space has a very little knowledge regarding zsmalloc fragmentation and basically has no mechanism to tell whether compaction will result in any memory gain. Another issue is that user space is not always aware of the fact that system is getting tight on memory. Which leads to very uncomfortable scenarios when user space may start issuing compaction 'randomly' or from crontab (for example). Fragmentation is not always necessarily bad, allocated and unused objects, after all, may be filled with the data later, w/o the need of allocating a new zspage. On the other hand, we obviously don't want to waste memory when the system needs it. Compaction now has a relatively quick pool scan so we are able to estimate the number of pages that will be freed easily, which makes it possible to call this function from a shrinker->count_objects() callback. We also abort compaction as soon as we detect that we can't free any pages any more, preventing wasteful objects migrations. Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Suggested-by: Minchan Kim <minchan@kernel.org> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zsmalloc.c78
1 files changed, 78 insertions, 0 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b7b4a5612ec7..27b9661c8fa6 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -246,6 +246,14 @@ struct zs_pool {
246 atomic_long_t pages_allocated; 246 atomic_long_t pages_allocated;
247 247
248 struct zs_pool_stats stats; 248 struct zs_pool_stats stats;
249
250 /* Compact classes */
251 struct shrinker shrinker;
252 /*
253 * To signify that register_shrinker() was successful
254 * and unregister_shrinker() will not Oops.
255 */
256 bool shrinker_enabled;
249#ifdef CONFIG_ZSMALLOC_STAT 257#ifdef CONFIG_ZSMALLOC_STAT
250 struct dentry *stat_dentry; 258 struct dentry *stat_dentry;
251#endif 259#endif
@@ -1778,6 +1786,69 @@ void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
1778} 1786}
1779EXPORT_SYMBOL_GPL(zs_pool_stats); 1787EXPORT_SYMBOL_GPL(zs_pool_stats);
1780 1788
1789static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
1790 struct shrink_control *sc)
1791{
1792 unsigned long pages_freed;
1793 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
1794 shrinker);
1795
1796 pages_freed = pool->stats.pages_compacted;
1797 /*
1798 * Compact classes and calculate compaction delta.
1799 * Can run concurrently with a manually triggered
1800 * (by user) compaction.
1801 */
1802 pages_freed = zs_compact(pool) - pages_freed;
1803
1804 return pages_freed ? pages_freed : SHRINK_STOP;
1805}
1806
1807static unsigned long zs_shrinker_count(struct shrinker *shrinker,
1808 struct shrink_control *sc)
1809{
1810 int i;
1811 struct size_class *class;
1812 unsigned long pages_to_free = 0;
1813 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
1814 shrinker);
1815
1816 if (!pool->shrinker_enabled)
1817 return 0;
1818
1819 for (i = zs_size_classes - 1; i >= 0; i--) {
1820 class = pool->size_class[i];
1821 if (!class)
1822 continue;
1823 if (class->index != i)
1824 continue;
1825
1826 spin_lock(&class->lock);
1827 pages_to_free += zs_can_compact(class);
1828 spin_unlock(&class->lock);
1829 }
1830
1831 return pages_to_free;
1832}
1833
1834static void zs_unregister_shrinker(struct zs_pool *pool)
1835{
1836 if (pool->shrinker_enabled) {
1837 unregister_shrinker(&pool->shrinker);
1838 pool->shrinker_enabled = false;
1839 }
1840}
1841
1842static int zs_register_shrinker(struct zs_pool *pool)
1843{
1844 pool->shrinker.scan_objects = zs_shrinker_scan;
1845 pool->shrinker.count_objects = zs_shrinker_count;
1846 pool->shrinker.batch = 0;
1847 pool->shrinker.seeks = DEFAULT_SEEKS;
1848
1849 return register_shrinker(&pool->shrinker);
1850}
1851
1781/** 1852/**
1782 * zs_create_pool - Creates an allocation pool to work from. 1853 * zs_create_pool - Creates an allocation pool to work from.
1783 * @flags: allocation flags used to allocate pool metadata 1854 * @flags: allocation flags used to allocate pool metadata
@@ -1863,6 +1934,12 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
1863 if (zs_pool_stat_create(name, pool)) 1934 if (zs_pool_stat_create(name, pool))
1864 goto err; 1935 goto err;
1865 1936
1937 /*
1938 * Not critical, we still can use the pool
1939 * and user can trigger compaction manually.
1940 */
1941 if (zs_register_shrinker(pool) == 0)
1942 pool->shrinker_enabled = true;
1866 return pool; 1943 return pool;
1867 1944
1868err: 1945err:
@@ -1875,6 +1952,7 @@ void zs_destroy_pool(struct zs_pool *pool)
1875{ 1952{
1876 int i; 1953 int i;
1877 1954
1955 zs_unregister_shrinker(pool);
1878 zs_pool_stat_destroy(pool); 1956 zs_pool_stat_destroy(pool);
1879 1957
1880 for (i = 0; i < zs_size_classes; i++) { 1958 for (i = 0; i < zs_size_classes; i++) {