aboutsummaryrefslogtreecommitdiffstats
path: root/mm/zsmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r--mm/zsmalloc.c86
1 files changed, 85 insertions, 1 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index fe78189624cf..4e2fc83cb394 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -92,6 +92,7 @@
92#include <linux/spinlock.h> 92#include <linux/spinlock.h>
93#include <linux/types.h> 93#include <linux/types.h>
94#include <linux/zsmalloc.h> 94#include <linux/zsmalloc.h>
95#include <linux/zpool.h>
95 96
96/* 97/*
97 * This must be power of 2 and greater than of equal to sizeof(link_free). 98 * This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -240,6 +241,81 @@ struct mapping_area {
240 enum zs_mapmode vm_mm; /* mapping mode */ 241 enum zs_mapmode vm_mm; /* mapping mode */
241}; 242};
242 243
244/* zpool driver */
245
246#ifdef CONFIG_ZPOOL
247
248static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
249{
250 return zs_create_pool(gfp);
251}
252
253static void zs_zpool_destroy(void *pool)
254{
255 zs_destroy_pool(pool);
256}
257
258static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
259 unsigned long *handle)
260{
261 *handle = zs_malloc(pool, size);
262 return *handle ? 0 : -1;
263}
264static void zs_zpool_free(void *pool, unsigned long handle)
265{
266 zs_free(pool, handle);
267}
268
269static int zs_zpool_shrink(void *pool, unsigned int pages,
270 unsigned int *reclaimed)
271{
272 return -EINVAL;
273}
274
275static void *zs_zpool_map(void *pool, unsigned long handle,
276 enum zpool_mapmode mm)
277{
278 enum zs_mapmode zs_mm;
279
280 switch (mm) {
281 case ZPOOL_MM_RO:
282 zs_mm = ZS_MM_RO;
283 break;
284 case ZPOOL_MM_WO:
285 zs_mm = ZS_MM_WO;
286 break;
287 case ZPOOL_MM_RW: /* fallthru */
288 default:
289 zs_mm = ZS_MM_RW;
290 break;
291 }
292
293 return zs_map_object(pool, handle, zs_mm);
294}
295static void zs_zpool_unmap(void *pool, unsigned long handle)
296{
297 zs_unmap_object(pool, handle);
298}
299
300static u64 zs_zpool_total_size(void *pool)
301{
302 return zs_get_total_size_bytes(pool);
303}
304
305static struct zpool_driver zs_zpool_driver = {
306 .type = "zsmalloc",
307 .owner = THIS_MODULE,
308 .create = zs_zpool_create,
309 .destroy = zs_zpool_destroy,
310 .malloc = zs_zpool_malloc,
311 .free = zs_zpool_free,
312 .shrink = zs_zpool_shrink,
313 .map = zs_zpool_map,
314 .unmap = zs_zpool_unmap,
315 .total_size = zs_zpool_total_size,
316};
317
318#endif /* CONFIG_ZPOOL */
243 319
244/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ 320/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
245static DEFINE_PER_CPU(struct mapping_area, zs_map_area); 321static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -690,7 +766,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
690static inline void *__zs_map_object(struct mapping_area *area, 766static inline void *__zs_map_object(struct mapping_area *area,
691 struct page *pages[2], int off, int size) 767 struct page *pages[2], int off, int size)
692{ 768{
693 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages)); 769 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
694 area->vm_addr = area->vm->addr; 770 area->vm_addr = area->vm->addr;
695 return area->vm_addr + off; 771 return area->vm_addr + off;
696} 772}
@@ -814,6 +890,10 @@ static void zs_exit(void)
814{ 890{
815 int cpu; 891 int cpu;
816 892
893#ifdef CONFIG_ZPOOL
894 zpool_unregister_driver(&zs_zpool_driver);
895#endif
896
817 cpu_notifier_register_begin(); 897 cpu_notifier_register_begin();
818 898
819 for_each_online_cpu(cpu) 899 for_each_online_cpu(cpu)
@@ -840,6 +920,10 @@ static int zs_init(void)
840 920
841 cpu_notifier_register_done(); 921 cpu_notifier_register_done();
842 922
923#ifdef CONFIG_ZPOOL
924 zpool_register_driver(&zs_zpool_driver);
925#endif
926
843 return 0; 927 return 0;
844fail: 928fail:
845 zs_exit(); 929 zs_exit();