aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2015-04-15 19:15:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:20 -0400
commitc78062612fb525430b775a0bef4d3cc07e512da0 (patch)
tree721419c87789027ae0988b4c5bda0572bb712db2 /mm
parent2e40e163a25af3bd35d128d3e2e005916de5cce6 (diff)
zsmalloc: factor out obj_[malloc|free]
In later patch, migration needs some part of functions in zs_malloc and zs_free so this patch factor out them. Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Juneho Choi <juno.choi@lge.com> Cc: Gunho Lee <gunho.lee@lge.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Seth Jennings <sjennings@variantweb.net> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zsmalloc.c98
1 files changed, 60 insertions, 38 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6f3cfbf5e237..55b171016f4f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -525,11 +525,10 @@ static void remove_zspage(struct page *page, struct size_class *class,
525 * page from the freelist of the old fullness group to that of the new 525 * page from the freelist of the old fullness group to that of the new
526 * fullness group. 526 * fullness group.
527 */ 527 */
528static enum fullness_group fix_fullness_group(struct zs_pool *pool, 528static enum fullness_group fix_fullness_group(struct size_class *class,
529 struct page *page) 529 struct page *page)
530{ 530{
531 int class_idx; 531 int class_idx;
532 struct size_class *class;
533 enum fullness_group currfg, newfg; 532 enum fullness_group currfg, newfg;
534 533
535 BUG_ON(!is_first_page(page)); 534 BUG_ON(!is_first_page(page));
@@ -539,7 +538,6 @@ static enum fullness_group fix_fullness_group(struct zs_pool *pool,
539 if (newfg == currfg) 538 if (newfg == currfg)
540 goto out; 539 goto out;
541 540
542 class = pool->size_class[class_idx];
543 remove_zspage(page, class, currfg); 541 remove_zspage(page, class, currfg);
544 insert_zspage(page, class, newfg); 542 insert_zspage(page, class, newfg);
545 set_zspage_mapping(page, class_idx, newfg); 543 set_zspage_mapping(page, class_idx, newfg);
@@ -1281,6 +1279,33 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1281} 1279}
1282EXPORT_SYMBOL_GPL(zs_unmap_object); 1280EXPORT_SYMBOL_GPL(zs_unmap_object);
1283 1281
1282static unsigned long obj_malloc(struct page *first_page,
1283 struct size_class *class, unsigned long handle)
1284{
1285 unsigned long obj;
1286 struct link_free *link;
1287
1288 struct page *m_page;
1289 unsigned long m_objidx, m_offset;
1290 void *vaddr;
1291
1292 obj = (unsigned long)first_page->freelist;
1293 obj_to_location(obj, &m_page, &m_objidx);
1294 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
1295
1296 vaddr = kmap_atomic(m_page);
1297 link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1298 first_page->freelist = link->next;
1299 /* record handle in the header of allocated chunk */
1300 link->handle = handle;
1301 kunmap_atomic(vaddr);
1302 first_page->inuse++;
1303 zs_stat_inc(class, OBJ_USED, 1);
1304
1305 return obj;
1306}
1307
1308
1284/** 1309/**
1285 * zs_malloc - Allocate block of given size from pool. 1310 * zs_malloc - Allocate block of given size from pool.
1286 * @pool: pool to allocate from 1311 * @pool: pool to allocate from
@@ -1293,12 +1318,8 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
1293unsigned long zs_malloc(struct zs_pool *pool, size_t size) 1318unsigned long zs_malloc(struct zs_pool *pool, size_t size)
1294{ 1319{
1295 unsigned long handle, obj; 1320 unsigned long handle, obj;
1296 struct link_free *link;
1297 struct size_class *class; 1321 struct size_class *class;
1298 void *vaddr; 1322 struct page *first_page;
1299
1300 struct page *first_page, *m_page;
1301 unsigned long m_objidx, m_offset;
1302 1323
1303 if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE)) 1324 if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE))
1304 return 0; 1325 return 0;
@@ -1331,22 +1352,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
1331 class->size, class->pages_per_zspage)); 1352 class->size, class->pages_per_zspage));
1332 } 1353 }
1333 1354
1334 obj = (unsigned long)first_page->freelist; 1355 obj = obj_malloc(first_page, class, handle);
1335 obj_to_location(obj, &m_page, &m_objidx);
1336 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
1337
1338 vaddr = kmap_atomic(m_page);
1339 link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1340 first_page->freelist = link->next;
1341
1342 /* record handle in the header of allocated chunk */
1343 link->handle = handle;
1344 kunmap_atomic(vaddr);
1345
1346 first_page->inuse++;
1347 zs_stat_inc(class, OBJ_USED, 1);
1348 /* Now move the zspage to another fullness group, if required */ 1356 /* Now move the zspage to another fullness group, if required */
1349 fix_fullness_group(pool, first_page); 1357 fix_fullness_group(class, first_page);
1350 record_obj(handle, obj); 1358 record_obj(handle, obj);
1351 spin_unlock(&class->lock); 1359 spin_unlock(&class->lock);
1352 1360
@@ -1354,46 +1362,60 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
1354} 1362}
1355EXPORT_SYMBOL_GPL(zs_malloc); 1363EXPORT_SYMBOL_GPL(zs_malloc);
1356 1364
1357void zs_free(struct zs_pool *pool, unsigned long handle) 1365static void obj_free(struct zs_pool *pool, struct size_class *class,
1366 unsigned long obj)
1358{ 1367{
1359 struct link_free *link; 1368 struct link_free *link;
1360 struct page *first_page, *f_page; 1369 struct page *first_page, *f_page;
1361 unsigned long obj, f_objidx, f_offset; 1370 unsigned long f_objidx, f_offset;
1362 void *vaddr; 1371 void *vaddr;
1363
1364 int class_idx; 1372 int class_idx;
1365 struct size_class *class;
1366 enum fullness_group fullness; 1373 enum fullness_group fullness;
1367 1374
1368 if (unlikely(!handle)) 1375 BUG_ON(!obj);
1369 return;
1370 1376
1371 obj = handle_to_obj(handle);
1372 free_handle(pool, handle);
1373 obj_to_location(obj, &f_page, &f_objidx); 1377 obj_to_location(obj, &f_page, &f_objidx);
1374 first_page = get_first_page(f_page); 1378 first_page = get_first_page(f_page);
1375 1379
1376 get_zspage_mapping(first_page, &class_idx, &fullness); 1380 get_zspage_mapping(first_page, &class_idx, &fullness);
1377 class = pool->size_class[class_idx];
1378 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); 1381 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
1379 1382
1380 spin_lock(&class->lock); 1383 vaddr = kmap_atomic(f_page);
1381 1384
1382 /* Insert this object in containing zspage's freelist */ 1385 /* Insert this object in containing zspage's freelist */
1383 vaddr = kmap_atomic(f_page);
1384 link = (struct link_free *)(vaddr + f_offset); 1386 link = (struct link_free *)(vaddr + f_offset);
1385 link->next = first_page->freelist; 1387 link->next = first_page->freelist;
1386 kunmap_atomic(vaddr); 1388 kunmap_atomic(vaddr);
1387 first_page->freelist = (void *)obj; 1389 first_page->freelist = (void *)obj;
1388
1389 first_page->inuse--; 1390 first_page->inuse--;
1390 fullness = fix_fullness_group(pool, first_page);
1391
1392 zs_stat_dec(class, OBJ_USED, 1); 1391 zs_stat_dec(class, OBJ_USED, 1);
1392}
1393
1394void zs_free(struct zs_pool *pool, unsigned long handle)
1395{
1396 struct page *first_page, *f_page;
1397 unsigned long obj, f_objidx;
1398 int class_idx;
1399 struct size_class *class;
1400 enum fullness_group fullness;
1401
1402 if (unlikely(!handle))
1403 return;
1404
1405 obj = handle_to_obj(handle);
1406 free_handle(pool, handle);
1407 obj_to_location(obj, &f_page, &f_objidx);
1408 first_page = get_first_page(f_page);
1409
1410 get_zspage_mapping(first_page, &class_idx, &fullness);
1411 class = pool->size_class[class_idx];
1412
1413 spin_lock(&class->lock);
1414 obj_free(pool, class, obj);
1415 fullness = fix_fullness_group(class, first_page);
1393 if (fullness == ZS_EMPTY) 1416 if (fullness == ZS_EMPTY)
1394 zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( 1417 zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
1395 class->size, class->pages_per_zspage)); 1418 class->size, class->pages_per_zspage));
1396
1397 spin_unlock(&class->lock); 1419 spin_unlock(&class->lock);
1398 1420
1399 if (fullness == ZS_EMPTY) { 1421 if (fullness == ZS_EMPTY) {