diff options
author | Minchan Kim <minchan@kernel.org> | 2016-07-26 18:23:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 19:19:19 -0400 |
commit | 3b1d9ca65a80ced8ae737ffb11ae939334a882ca (patch) | |
tree | db2019a112c6c134f69523c7d78f4078415e78c6 /mm | |
parent | 9bc482d3460501ac809457af26b46b72cd7dc212 (diff) |
zsmalloc: use OBJ_TAG_BIT for bit shifter
Static check warns using tag as bit shifter. It doesn't break current
working but not good for redability. Let's use OBJ_TAG_BIT as bit
shifter instead of OBJ_ALLOCATED_TAG.
Link: http://lkml.kernel.org/r/20160607045146.GF26230@bbox
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/zsmalloc.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 04a4f063b4fd..6b6986a02aa0 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -1052,7 +1052,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) | |||
1052 | link = (struct link_free *)vaddr + off / sizeof(*link); | 1052 | link = (struct link_free *)vaddr + off / sizeof(*link); |
1053 | 1053 | ||
1054 | while ((off += class->size) < PAGE_SIZE) { | 1054 | while ((off += class->size) < PAGE_SIZE) { |
1055 | link->next = freeobj++ << OBJ_ALLOCATED_TAG; | 1055 | link->next = freeobj++ << OBJ_TAG_BITS; |
1056 | link += class->size / sizeof(*link); | 1056 | link += class->size / sizeof(*link); |
1057 | } | 1057 | } |
1058 | 1058 | ||
@@ -1063,13 +1063,13 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) | |||
1063 | */ | 1063 | */ |
1064 | next_page = get_next_page(page); | 1064 | next_page = get_next_page(page); |
1065 | if (next_page) { | 1065 | if (next_page) { |
1066 | link->next = freeobj++ << OBJ_ALLOCATED_TAG; | 1066 | link->next = freeobj++ << OBJ_TAG_BITS; |
1067 | } else { | 1067 | } else { |
1068 | /* | 1068 | /* |
1069 | * Reset OBJ_ALLOCATED_TAG bit to last link to tell | 1069 | * Reset OBJ_TAG_BITS bit to last link to tell |
1070 | * whether it's allocated object or not. | 1070 | * whether it's allocated object or not. |
1071 | */ | 1071 | */ |
1072 | link->next = -1 << OBJ_ALLOCATED_TAG; | 1072 | link->next = -1 << OBJ_TAG_BITS; |
1073 | } | 1073 | } |
1074 | kunmap_atomic(vaddr); | 1074 | kunmap_atomic(vaddr); |
1075 | page = next_page; | 1075 | page = next_page; |
@@ -1514,7 +1514,7 @@ static unsigned long obj_malloc(struct size_class *class, | |||
1514 | 1514 | ||
1515 | vaddr = kmap_atomic(m_page); | 1515 | vaddr = kmap_atomic(m_page); |
1516 | link = (struct link_free *)vaddr + m_offset / sizeof(*link); | 1516 | link = (struct link_free *)vaddr + m_offset / sizeof(*link); |
1517 | set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); | 1517 | set_freeobj(zspage, link->next >> OBJ_TAG_BITS); |
1518 | if (likely(!PageHugeObject(m_page))) | 1518 | if (likely(!PageHugeObject(m_page))) |
1519 | /* record handle in the header of allocated chunk */ | 1519 | /* record handle in the header of allocated chunk */ |
1520 | link->handle = handle; | 1520 | link->handle = handle; |
@@ -1616,7 +1616,7 @@ static void obj_free(struct size_class *class, unsigned long obj) | |||
1616 | 1616 | ||
1617 | /* Insert this object in containing zspage's freelist */ | 1617 | /* Insert this object in containing zspage's freelist */ |
1618 | link = (struct link_free *)(vaddr + f_offset); | 1618 | link = (struct link_free *)(vaddr + f_offset); |
1619 | link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG; | 1619 | link->next = get_freeobj(zspage) << OBJ_TAG_BITS; |
1620 | kunmap_atomic(vaddr); | 1620 | kunmap_atomic(vaddr); |
1621 | set_freeobj(zspage, f_objidx); | 1621 | set_freeobj(zspage, f_objidx); |
1622 | mod_zspage_inuse(zspage, -1); | 1622 | mod_zspage_inuse(zspage, -1); |