aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDan Streetman <ddstreet@ieee.org>2015-09-09 18:35:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 16:29:01 -0400
commitf1c54846ee4504d89b59b04d870831db4046b478 (patch)
tree003d4063cbb127fc2f2950a3a69198c0860dda56 /mm
parent3f0e131221eb951c45c93d1cce9db73889be2a5e (diff)
zswap: dynamic pool creation
Add dynamic creation of pools. Move the static crypto compression per-cpu transforms into each pool. Add a pointer to zswap_entry to the pool it's in. This is required by the following patch which enables changing the zswap zpool and compressor params at runtime. [akpm@linux-foundation.org: fix merge snafus] Signed-off-by: Dan Streetman <ddstreet@ieee.org> Acked-by: Seth Jennings <sjennings@variantweb.net> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zswap.c548
1 files changed, 405 insertions, 143 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 48a1d081e2a5..f9ba9bb1c1b8 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -99,66 +99,19 @@ module_param_named(zpool, zswap_zpool_type, charp, 0444);
99static struct zpool *zswap_pool; 99static struct zpool *zswap_pool;
100 100
101/********************************* 101/*********************************
102* compression functions 102* data structures
103**********************************/ 103**********************************/
104/* per-cpu compression transforms */
105static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms;
106 104
107enum comp_op { 105struct zswap_pool {
108 ZSWAP_COMPOP_COMPRESS, 106 struct zpool *zpool;
109 ZSWAP_COMPOP_DECOMPRESS 107 struct crypto_comp * __percpu *tfm;
108 struct kref kref;
109 struct list_head list;
110 struct rcu_head rcu_head;
111 struct notifier_block notifier;
112 char tfm_name[CRYPTO_MAX_ALG_NAME];
110}; 113};
111 114
112static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen,
113 u8 *dst, unsigned int *dlen)
114{
115 struct crypto_comp *tfm;
116 int ret;
117
118 tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu());
119 switch (op) {
120 case ZSWAP_COMPOP_COMPRESS:
121 ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
122 break;
123 case ZSWAP_COMPOP_DECOMPRESS:
124 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
125 break;
126 default:
127 ret = -EINVAL;
128 }
129
130 put_cpu();
131 return ret;
132}
133
134static int __init zswap_comp_init(void)
135{
136 if (!crypto_has_comp(zswap_compressor, 0, 0)) {
137 pr_info("%s compressor not available\n", zswap_compressor);
138 /* fall back to default compressor */
139 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
140 if (!crypto_has_comp(zswap_compressor, 0, 0))
141 /* can't even load the default compressor */
142 return -ENODEV;
143 }
144 pr_info("using %s compressor\n", zswap_compressor);
145
146 /* alloc percpu transforms */
147 zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
148 if (!zswap_comp_pcpu_tfms)
149 return -ENOMEM;
150 return 0;
151}
152
153static void __init zswap_comp_exit(void)
154{
155 /* free percpu transforms */
156 free_percpu(zswap_comp_pcpu_tfms);
157}
158
159/*********************************
160* data structures
161**********************************/
162/* 115/*
163 * struct zswap_entry 116 * struct zswap_entry
164 * 117 *
@@ -166,22 +119,24 @@ static void __init zswap_comp_exit(void)
166 * page within zswap. 119 * page within zswap.
167 * 120 *
168 * rbnode - links the entry into red-black tree for the appropriate swap type 121 * rbnode - links the entry into red-black tree for the appropriate swap type
122 * offset - the swap offset for the entry. Index into the red-black tree.
169 * refcount - the number of outstanding reference to the entry. This is needed 123 * refcount - the number of outstanding reference to the entry. This is needed
170 * to protect against premature freeing of the entry by code 124 * to protect against premature freeing of the entry by code
171 * concurrent calls to load, invalidate, and writeback. The lock 125 * concurrent calls to load, invalidate, and writeback. The lock
172 * for the zswap_tree structure that contains the entry must 126 * for the zswap_tree structure that contains the entry must
173 * be held while changing the refcount. Since the lock must 127 * be held while changing the refcount. Since the lock must
174 * be held, there is no reason to also make refcount atomic. 128 * be held, there is no reason to also make refcount atomic.
175 * offset - the swap offset for the entry. Index into the red-black tree.
176 * handle - zpool allocation handle that stores the compressed page data
177 * length - the length in bytes of the compressed page data. Needed during 129 * length - the length in bytes of the compressed page data. Needed during
178 * decompression 130 * decompression
131 * pool - the zswap_pool the entry's data is in
132 * handle - zpool allocation handle that stores the compressed page data
179 */ 133 */
180struct zswap_entry { 134struct zswap_entry {
181 struct rb_node rbnode; 135 struct rb_node rbnode;
182 pgoff_t offset; 136 pgoff_t offset;
183 int refcount; 137 int refcount;
184 unsigned int length; 138 unsigned int length;
139 struct zswap_pool *pool;
185 unsigned long handle; 140 unsigned long handle;
186}; 141};
187 142
@@ -201,6 +156,48 @@ struct zswap_tree {
201 156
202static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; 157static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
203 158
159/* RCU-protected iteration */
160static LIST_HEAD(zswap_pools);
161/* protects zswap_pools list modification */
162static DEFINE_SPINLOCK(zswap_pools_lock);
163
164/*********************************
165* helpers and fwd declarations
166**********************************/
167
168#define zswap_pool_debug(msg, p) \
169 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
170 zpool_get_type((p)->zpool))
171
172static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
173static int zswap_pool_get(struct zswap_pool *pool);
174static void zswap_pool_put(struct zswap_pool *pool);
175
176static const struct zpool_ops zswap_zpool_ops = {
177 .evict = zswap_writeback_entry
178};
179
180static bool zswap_is_full(void)
181{
182 return totalram_pages * zswap_max_pool_percent / 100 <
183 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
184}
185
186static void zswap_update_total_size(void)
187{
188 struct zswap_pool *pool;
189 u64 total = 0;
190
191 rcu_read_lock();
192
193 list_for_each_entry_rcu(pool, &zswap_pools, list)
194 total += zpool_get_total_size(pool->zpool);
195
196 rcu_read_unlock();
197
198 zswap_pool_total_size = total;
199}
200
204/********************************* 201/*********************************
205* zswap entry functions 202* zswap entry functions
206**********************************/ 203**********************************/
@@ -294,10 +291,11 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
294 */ 291 */
295static void zswap_free_entry(struct zswap_entry *entry) 292static void zswap_free_entry(struct zswap_entry *entry)
296{ 293{
297 zpool_free(zswap_pool, entry->handle); 294 zpool_free(entry->pool->zpool, entry->handle);
295 zswap_pool_put(entry->pool);
298 zswap_entry_cache_free(entry); 296 zswap_entry_cache_free(entry);
299 atomic_dec(&zswap_stored_pages); 297 atomic_dec(&zswap_stored_pages);
300 zswap_pool_total_size = zpool_get_total_size(zswap_pool); 298 zswap_update_total_size();
301} 299}
302 300
303/* caller must hold the tree lock */ 301/* caller must hold the tree lock */
@@ -339,35 +337,21 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
339**********************************/ 337**********************************/
340static DEFINE_PER_CPU(u8 *, zswap_dstmem); 338static DEFINE_PER_CPU(u8 *, zswap_dstmem);
341 339
342static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu) 340static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
343{ 341{
344 struct crypto_comp *tfm;
345 u8 *dst; 342 u8 *dst;
346 343
347 switch (action) { 344 switch (action) {
348 case CPU_UP_PREPARE: 345 case CPU_UP_PREPARE:
349 tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
350 if (IS_ERR(tfm)) {
351 pr_err("can't allocate compressor transform\n");
352 return NOTIFY_BAD;
353 }
354 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
355 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); 346 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
356 if (!dst) { 347 if (!dst) {
357 pr_err("can't allocate compressor buffer\n"); 348 pr_err("can't allocate compressor buffer\n");
358 crypto_free_comp(tfm);
359 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
360 return NOTIFY_BAD; 349 return NOTIFY_BAD;
361 } 350 }
362 per_cpu(zswap_dstmem, cpu) = dst; 351 per_cpu(zswap_dstmem, cpu) = dst;
363 break; 352 break;
364 case CPU_DEAD: 353 case CPU_DEAD:
365 case CPU_UP_CANCELED: 354 case CPU_UP_CANCELED:
366 tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
367 if (tfm) {
368 crypto_free_comp(tfm);
369 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
370 }
371 dst = per_cpu(zswap_dstmem, cpu); 355 dst = per_cpu(zswap_dstmem, cpu);
372 kfree(dst); 356 kfree(dst);
373 per_cpu(zswap_dstmem, cpu) = NULL; 357 per_cpu(zswap_dstmem, cpu) = NULL;
@@ -378,43 +362,303 @@ static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
378 return NOTIFY_OK; 362 return NOTIFY_OK;
379} 363}
380 364
381static int zswap_cpu_notifier(struct notifier_block *nb, 365static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
382 unsigned long action, void *pcpu) 366 unsigned long action, void *pcpu)
383{ 367{
384 unsigned long cpu = (unsigned long)pcpu; 368 return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
385 return __zswap_cpu_notifier(action, cpu);
386} 369}
387 370
388static struct notifier_block zswap_cpu_notifier_block = { 371static struct notifier_block zswap_dstmem_notifier = {
389 .notifier_call = zswap_cpu_notifier 372 .notifier_call = zswap_cpu_dstmem_notifier,
390}; 373};
391 374
392static int __init zswap_cpu_init(void) 375static int __init zswap_cpu_dstmem_init(void)
376{
377 unsigned long cpu;
378
379 cpu_notifier_register_begin();
380 for_each_online_cpu(cpu)
381 if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
382 NOTIFY_BAD)
383 goto cleanup;
384 __register_cpu_notifier(&zswap_dstmem_notifier);
385 cpu_notifier_register_done();
386 return 0;
387
388cleanup:
389 for_each_online_cpu(cpu)
390 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
391 cpu_notifier_register_done();
392 return -ENOMEM;
393}
394
395static void zswap_cpu_dstmem_destroy(void)
396{
397 unsigned long cpu;
398
399 cpu_notifier_register_begin();
400 for_each_online_cpu(cpu)
401 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
402 __unregister_cpu_notifier(&zswap_dstmem_notifier);
403 cpu_notifier_register_done();
404}
405
406static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
407 unsigned long action, unsigned long cpu)
408{
409 struct crypto_comp *tfm;
410
411 switch (action) {
412 case CPU_UP_PREPARE:
413 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
414 break;
415 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
416 if (IS_ERR_OR_NULL(tfm)) {
417 pr_err("could not alloc crypto comp %s : %ld\n",
418 pool->tfm_name, PTR_ERR(tfm));
419 return NOTIFY_BAD;
420 }
421 *per_cpu_ptr(pool->tfm, cpu) = tfm;
422 break;
423 case CPU_DEAD:
424 case CPU_UP_CANCELED:
425 tfm = *per_cpu_ptr(pool->tfm, cpu);
426 if (!IS_ERR_OR_NULL(tfm))
427 crypto_free_comp(tfm);
428 *per_cpu_ptr(pool->tfm, cpu) = NULL;
429 break;
430 default:
431 break;
432 }
433 return NOTIFY_OK;
434}
435
436static int zswap_cpu_comp_notifier(struct notifier_block *nb,
437 unsigned long action, void *pcpu)
438{
439 unsigned long cpu = (unsigned long)pcpu;
440 struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
441
442 return __zswap_cpu_comp_notifier(pool, action, cpu);
443}
444
445static int zswap_cpu_comp_init(struct zswap_pool *pool)
393{ 446{
394 unsigned long cpu; 447 unsigned long cpu;
395 448
449 memset(&pool->notifier, 0, sizeof(pool->notifier));
450 pool->notifier.notifier_call = zswap_cpu_comp_notifier;
451
396 cpu_notifier_register_begin(); 452 cpu_notifier_register_begin();
397 for_each_online_cpu(cpu) 453 for_each_online_cpu(cpu)
398 if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK) 454 if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
455 NOTIFY_BAD)
399 goto cleanup; 456 goto cleanup;
400 __register_cpu_notifier(&zswap_cpu_notifier_block); 457 __register_cpu_notifier(&pool->notifier);
401 cpu_notifier_register_done(); 458 cpu_notifier_register_done();
402 return 0; 459 return 0;
403 460
404cleanup: 461cleanup:
405 for_each_online_cpu(cpu) 462 for_each_online_cpu(cpu)
406 __zswap_cpu_notifier(CPU_UP_CANCELED, cpu); 463 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
407 cpu_notifier_register_done(); 464 cpu_notifier_register_done();
408 return -ENOMEM; 465 return -ENOMEM;
409} 466}
410 467
468static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
469{
470 unsigned long cpu;
471
472 cpu_notifier_register_begin();
473 for_each_online_cpu(cpu)
474 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
475 __unregister_cpu_notifier(&pool->notifier);
476 cpu_notifier_register_done();
477}
478
411/********************************* 479/*********************************
412* helpers 480* pool functions
413**********************************/ 481**********************************/
414static bool zswap_is_full(void) 482
483static struct zswap_pool *__zswap_pool_current(void)
415{ 484{
416 return totalram_pages * zswap_max_pool_percent / 100 < 485 struct zswap_pool *pool;
417 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); 486
487 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
488 WARN_ON(!pool);
489
490 return pool;
491}
492
493static struct zswap_pool *zswap_pool_current(void)
494{
495 assert_spin_locked(&zswap_pools_lock);
496
497 return __zswap_pool_current();
498}
499
500static struct zswap_pool *zswap_pool_current_get(void)
501{
502 struct zswap_pool *pool;
503
504 rcu_read_lock();
505
506 pool = __zswap_pool_current();
507 if (!pool || !zswap_pool_get(pool))
508 pool = NULL;
509
510 rcu_read_unlock();
511
512 return pool;
513}
514
515static struct zswap_pool *zswap_pool_last_get(void)
516{
517 struct zswap_pool *pool, *last = NULL;
518
519 rcu_read_lock();
520
521 list_for_each_entry_rcu(pool, &zswap_pools, list)
522 last = pool;
523 if (!WARN_ON(!last) && !zswap_pool_get(last))
524 last = NULL;
525
526 rcu_read_unlock();
527
528 return last;
529}
530
531static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
532{
533 struct zswap_pool *pool;
534
535 assert_spin_locked(&zswap_pools_lock);
536
537 list_for_each_entry_rcu(pool, &zswap_pools, list) {
538 if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name)))
539 continue;
540 if (strncmp(zpool_get_type(pool->zpool), type,
541 sizeof(zswap_zpool_type)))
542 continue;
543 /* if we can't get it, it's about to be destroyed */
544 if (!zswap_pool_get(pool))
545 continue;
546 return pool;
547 }
548
549 return NULL;
550}
551
552static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
553{
554 struct zswap_pool *pool;
555 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
556
557 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
558 if (!pool) {
559 pr_err("pool alloc failed\n");
560 return NULL;
561 }
562
563 pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
564 if (!pool->zpool) {
565 pr_err("%s zpool not available\n", type);
566 goto error;
567 }
568 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
569
570 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
571 pool->tfm = alloc_percpu(struct crypto_comp *);
572 if (!pool->tfm) {
573 pr_err("percpu alloc failed\n");
574 goto error;
575 }
576
577 if (zswap_cpu_comp_init(pool))
578 goto error;
579 pr_debug("using %s compressor\n", pool->tfm_name);
580
581 /* being the current pool takes 1 ref; this func expects the
582 * caller to always add the new pool as the current pool
583 */
584 kref_init(&pool->kref);
585 INIT_LIST_HEAD(&pool->list);
586
587 zswap_pool_debug("created", pool);
588
589 return pool;
590
591error:
592 free_percpu(pool->tfm);
593 if (pool->zpool)
594 zpool_destroy_pool(pool->zpool);
595 kfree(pool);
596 return NULL;
597}
598
599static struct zswap_pool *__zswap_pool_create_fallback(void)
600{
601 if (!crypto_has_comp(zswap_compressor, 0, 0)) {
602 pr_err("compressor %s not available, using default %s\n",
603 zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
604 strncpy(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT,
605 sizeof(zswap_compressor));
606 }
607 if (!zpool_has_pool(zswap_zpool_type)) {
608 pr_err("zpool %s not available, using default %s\n",
609 zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
610 strncpy(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT,
611 sizeof(zswap_zpool_type));
612 }
613
614 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
615}
616
617static void zswap_pool_destroy(struct zswap_pool *pool)
618{
619 zswap_pool_debug("destroying", pool);
620
621 zswap_cpu_comp_destroy(pool);
622 free_percpu(pool->tfm);
623 zpool_destroy_pool(pool->zpool);
624 kfree(pool);
625}
626
627static int __must_check zswap_pool_get(struct zswap_pool *pool)
628{
629 return kref_get_unless_zero(&pool->kref);
630}
631
632static void __zswap_pool_release(struct rcu_head *head)
633{
634 struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head);
635
636 /* nobody should have been able to get a kref... */
637 WARN_ON(kref_get_unless_zero(&pool->kref));
638
639 /* pool is now off zswap_pools list and has no references. */
640 zswap_pool_destroy(pool);
641}
642
643static void __zswap_pool_empty(struct kref *kref)
644{
645 struct zswap_pool *pool;
646
647 pool = container_of(kref, typeof(*pool), kref);
648
649 spin_lock(&zswap_pools_lock);
650
651 WARN_ON(pool == zswap_pool_current());
652
653 list_del_rcu(&pool->list);
654 call_rcu(&pool->rcu_head, __zswap_pool_release);
655
656 spin_unlock(&zswap_pools_lock);
657}
658
659static void zswap_pool_put(struct zswap_pool *pool)
660{
661 kref_put(&pool->kref, __zswap_pool_empty);
418} 662}
419 663
420/********************************* 664/*********************************
@@ -477,6 +721,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
477 pgoff_t offset; 721 pgoff_t offset;
478 struct zswap_entry *entry; 722 struct zswap_entry *entry;
479 struct page *page; 723 struct page *page;
724 struct crypto_comp *tfm;
480 u8 *src, *dst; 725 u8 *src, *dst;
481 unsigned int dlen; 726 unsigned int dlen;
482 int ret; 727 int ret;
@@ -517,13 +762,15 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
517 case ZSWAP_SWAPCACHE_NEW: /* page is locked */ 762 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
518 /* decompress */ 763 /* decompress */
519 dlen = PAGE_SIZE; 764 dlen = PAGE_SIZE;
520 src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, 765 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
521 ZPOOL_MM_RO) + sizeof(struct zswap_header); 766 ZPOOL_MM_RO) + sizeof(struct zswap_header);
522 dst = kmap_atomic(page); 767 dst = kmap_atomic(page);
523 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, 768 tfm = *get_cpu_ptr(entry->pool->tfm);
524 entry->length, dst, &dlen); 769 ret = crypto_comp_decompress(tfm, src, entry->length,
770 dst, &dlen);
771 put_cpu_ptr(entry->pool->tfm);
525 kunmap_atomic(dst); 772 kunmap_atomic(dst);
526 zpool_unmap_handle(zswap_pool, entry->handle); 773 zpool_unmap_handle(entry->pool->zpool, entry->handle);
527 BUG_ON(ret); 774 BUG_ON(ret);
528 BUG_ON(dlen != PAGE_SIZE); 775 BUG_ON(dlen != PAGE_SIZE);
529 776
@@ -572,6 +819,22 @@ end:
572 return ret; 819 return ret;
573} 820}
574 821
822static int zswap_shrink(void)
823{
824 struct zswap_pool *pool;
825 int ret;
826
827 pool = zswap_pool_last_get();
828 if (!pool)
829 return -ENOENT;
830
831 ret = zpool_shrink(pool->zpool, 1, NULL);
832
833 zswap_pool_put(pool);
834
835 return ret;
836}
837
575/********************************* 838/*********************************
576* frontswap hooks 839* frontswap hooks
577**********************************/ 840**********************************/
@@ -581,6 +844,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
581{ 844{
582 struct zswap_tree *tree = zswap_trees[type]; 845 struct zswap_tree *tree = zswap_trees[type];
583 struct zswap_entry *entry, *dupentry; 846 struct zswap_entry *entry, *dupentry;
847 struct crypto_comp *tfm;
584 int ret; 848 int ret;
585 unsigned int dlen = PAGE_SIZE, len; 849 unsigned int dlen = PAGE_SIZE, len;
586 unsigned long handle; 850 unsigned long handle;
@@ -596,7 +860,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
596 /* reclaim space if needed */ 860 /* reclaim space if needed */
597 if (zswap_is_full()) { 861 if (zswap_is_full()) {
598 zswap_pool_limit_hit++; 862 zswap_pool_limit_hit++;
599 if (zpool_shrink(zswap_pool, 1, NULL)) { 863 if (zswap_shrink()) {
600 zswap_reject_reclaim_fail++; 864 zswap_reject_reclaim_fail++;
601 ret = -ENOMEM; 865 ret = -ENOMEM;
602 goto reject; 866 goto reject;
@@ -611,33 +875,42 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
611 goto reject; 875 goto reject;
612 } 876 }
613 877
878 /* if entry is successfully added, it keeps the reference */
879 entry->pool = zswap_pool_current_get();
880 if (!entry->pool) {
881 ret = -EINVAL;
882 goto freepage;
883 }
884
614 /* compress */ 885 /* compress */
615 dst = get_cpu_var(zswap_dstmem); 886 dst = get_cpu_var(zswap_dstmem);
887 tfm = *get_cpu_ptr(entry->pool->tfm);
616 src = kmap_atomic(page); 888 src = kmap_atomic(page);
617 ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); 889 ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
618 kunmap_atomic(src); 890 kunmap_atomic(src);
891 put_cpu_ptr(entry->pool->tfm);
619 if (ret) { 892 if (ret) {
620 ret = -EINVAL; 893 ret = -EINVAL;
621 goto freepage; 894 goto put_dstmem;
622 } 895 }
623 896
624 /* store */ 897 /* store */
625 len = dlen + sizeof(struct zswap_header); 898 len = dlen + sizeof(struct zswap_header);
626 ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN, 899 ret = zpool_malloc(entry->pool->zpool, len,
627 &handle); 900 __GFP_NORETRY | __GFP_NOWARN, &handle);
628 if (ret == -ENOSPC) { 901 if (ret == -ENOSPC) {
629 zswap_reject_compress_poor++; 902 zswap_reject_compress_poor++;
630 goto freepage; 903 goto put_dstmem;
631 } 904 }
632 if (ret) { 905 if (ret) {
633 zswap_reject_alloc_fail++; 906 zswap_reject_alloc_fail++;
634 goto freepage; 907 goto put_dstmem;
635 } 908 }
636 zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW); 909 zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
637 zhdr->swpentry = swp_entry(type, offset); 910 zhdr->swpentry = swp_entry(type, offset);
638 buf = (u8 *)(zhdr + 1); 911 buf = (u8 *)(zhdr + 1);
639 memcpy(buf, dst, dlen); 912 memcpy(buf, dst, dlen);
640 zpool_unmap_handle(zswap_pool, handle); 913 zpool_unmap_handle(entry->pool->zpool, handle);
641 put_cpu_var(zswap_dstmem); 914 put_cpu_var(zswap_dstmem);
642 915
643 /* populate entry */ 916 /* populate entry */
@@ -660,12 +933,14 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
660 933
661 /* update stats */ 934 /* update stats */
662 atomic_inc(&zswap_stored_pages); 935 atomic_inc(&zswap_stored_pages);
663 zswap_pool_total_size = zpool_get_total_size(zswap_pool); 936 zswap_update_total_size();
664 937
665 return 0; 938 return 0;
666 939
667freepage: 940put_dstmem:
668 put_cpu_var(zswap_dstmem); 941 put_cpu_var(zswap_dstmem);
942 zswap_pool_put(entry->pool);
943freepage:
669 zswap_entry_cache_free(entry); 944 zswap_entry_cache_free(entry);
670reject: 945reject:
671 return ret; 946 return ret;
@@ -680,6 +955,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
680{ 955{
681 struct zswap_tree *tree = zswap_trees[type]; 956 struct zswap_tree *tree = zswap_trees[type];
682 struct zswap_entry *entry; 957 struct zswap_entry *entry;
958 struct crypto_comp *tfm;
683 u8 *src, *dst; 959 u8 *src, *dst;
684 unsigned int dlen; 960 unsigned int dlen;
685 int ret; 961 int ret;
@@ -696,13 +972,14 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
696 972
697 /* decompress */ 973 /* decompress */
698 dlen = PAGE_SIZE; 974 dlen = PAGE_SIZE;
699 src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, 975 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
700 ZPOOL_MM_RO) + sizeof(struct zswap_header); 976 ZPOOL_MM_RO) + sizeof(struct zswap_header);
701 dst = kmap_atomic(page); 977 dst = kmap_atomic(page);
702 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, 978 tfm = *get_cpu_ptr(entry->pool->tfm);
703 dst, &dlen); 979 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
980 put_cpu_ptr(entry->pool->tfm);
704 kunmap_atomic(dst); 981 kunmap_atomic(dst);
705 zpool_unmap_handle(zswap_pool, entry->handle); 982 zpool_unmap_handle(entry->pool->zpool, entry->handle);
706 BUG_ON(ret); 983 BUG_ON(ret);
707 984
708 spin_lock(&tree->lock); 985 spin_lock(&tree->lock);
@@ -755,10 +1032,6 @@ static void zswap_frontswap_invalidate_area(unsigned type)
755 zswap_trees[type] = NULL; 1032 zswap_trees[type] = NULL;
756} 1033}
757 1034
758static const struct zpool_ops zswap_zpool_ops = {
759 .evict = zswap_writeback_entry
760};
761
762static void zswap_frontswap_init(unsigned type) 1035static void zswap_frontswap_init(unsigned type)
763{ 1036{
764 struct zswap_tree *tree; 1037 struct zswap_tree *tree;
@@ -839,49 +1112,38 @@ static void __exit zswap_debugfs_exit(void) { }
839**********************************/ 1112**********************************/
840static int __init init_zswap(void) 1113static int __init init_zswap(void)
841{ 1114{
842 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; 1115 struct zswap_pool *pool;
843
844 pr_info("loading zswap\n");
845
846 zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp,
847 &zswap_zpool_ops);
848 if (!zswap_pool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
849 pr_info("%s zpool not available\n", zswap_zpool_type);
850 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
851 zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp,
852 &zswap_zpool_ops);
853 }
854 if (!zswap_pool) {
855 pr_err("%s zpool not available\n", zswap_zpool_type);
856 pr_err("zpool creation failed\n");
857 goto error;
858 }
859 pr_info("using %s pool\n", zswap_zpool_type);
860 1116
861 if (zswap_entry_cache_create()) { 1117 if (zswap_entry_cache_create()) {
862 pr_err("entry cache creation failed\n"); 1118 pr_err("entry cache creation failed\n");
863 goto cachefail; 1119 goto cache_fail;
864 } 1120 }
865 if (zswap_comp_init()) { 1121
866 pr_err("compressor initialization failed\n"); 1122 if (zswap_cpu_dstmem_init()) {
867 goto compfail; 1123 pr_err("dstmem alloc failed\n");
1124 goto dstmem_fail;
868 } 1125 }
869 if (zswap_cpu_init()) { 1126
870 pr_err("per-cpu initialization failed\n"); 1127 pool = __zswap_pool_create_fallback();
871 goto pcpufail; 1128 if (!pool) {
1129 pr_err("pool creation failed\n");
1130 goto pool_fail;
872 } 1131 }
1132 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1133 zpool_get_type(pool->zpool));
1134
1135 list_add(&pool->list, &zswap_pools);
873 1136
874 frontswap_register_ops(&zswap_frontswap_ops); 1137 frontswap_register_ops(&zswap_frontswap_ops);
875 if (zswap_debugfs_init()) 1138 if (zswap_debugfs_init())
876 pr_warn("debugfs initialization failed\n"); 1139 pr_warn("debugfs initialization failed\n");
877 return 0; 1140 return 0;
878pcpufail: 1141
879 zswap_comp_exit(); 1142pool_fail:
880compfail: 1143 zswap_cpu_dstmem_destroy();
1144dstmem_fail:
881 zswap_entry_cache_destroy(); 1145 zswap_entry_cache_destroy();
882cachefail: 1146cache_fail:
883 zpool_destroy_pool(zswap_pool);
884error:
885 return -ENOMEM; 1147 return -ENOMEM;
886} 1148}
887/* must be late so crypto has time to come up */ 1149/* must be late so crypto has time to come up */