aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-07-28 17:59:02 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2010-07-28 17:59:02 -0400
commitb94de9bb7519f597a3aed521d5eaeb5b02a7cbc0 (patch)
tree1c2771372fc23c55dd678080aa6529c98f8de371 /lib
parenta2b6bf63cb7a3e34bd2e753a6f2c2776b5c8496f (diff)
lib/scatterlist: Hook sg_kmalloc into kmemleak (v2)
kmemleak ignores page_alloc() and so believes the final sub-page allocation using the plain kmalloc is decoupled and lost. This leads to lots of false-positives with code that uses scatterlists. The options seem to be either to tell kmemleak that the kmalloc is not leaked or to notify kmemleak of the page allocations. The danger of the first approach is that we may hide a real leak, so choose the latter approach (of which I am not sure of the downsides). v2: Added comments on the suggestion of Catalin. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <jaxboe@fusionio.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/scatterlist.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 9afa25b52a83..a5ec42868f99 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/kmemleak.h>
13 14
14/** 15/**
15 * sg_next - return the next scatterlist entry in a list 16 * sg_next - return the next scatterlist entry in a list
@@ -115,17 +116,29 @@ EXPORT_SYMBOL(sg_init_one);
115 */ 116 */
116static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) 117static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
117{ 118{
118 if (nents == SG_MAX_SINGLE_ALLOC) 119 if (nents == SG_MAX_SINGLE_ALLOC) {
119 return (struct scatterlist *) __get_free_page(gfp_mask); 120 /*
120 else 121 * Kmemleak doesn't track page allocations as they are not
122 * commonly used (in a raw form) for kernel data structures.
123 * As we chain together a list of pages and then a normal
124 * kmalloc (tracked by kmemleak), in order to for that last
125 * allocation not to become decoupled (and thus a
126 * false-positive) we need to inform kmemleak of all the
127 * intermediate allocations.
128 */
129 void *ptr = (void *) __get_free_page(gfp_mask);
130 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
131 return ptr;
132 } else
121 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); 133 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
122} 134}
123 135
124static void sg_kfree(struct scatterlist *sg, unsigned int nents) 136static void sg_kfree(struct scatterlist *sg, unsigned int nents)
125{ 137{
126 if (nents == SG_MAX_SINGLE_ALLOC) 138 if (nents == SG_MAX_SINGLE_ALLOC) {
139 kmemleak_free(sg);
127 free_page((unsigned long) sg); 140 free_page((unsigned long) sg);
128 else 141 } else
129 kfree(sg); 142 kfree(sg);
130} 143}
131 144