aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-06-06 01:46:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-06 14:29:09 -0400
commit6cfd53fc03670c7a544a56d441eb1a6cc800d72b (patch)
tree806badd9dc55ff81822694037ff10425b99eacaa
parent6f09bdfc717a0e1a89a029001484d5a195faab64 (diff)
nommu: fix kobjsize() for SLOB and SLUB
kobjsize() has been abusing page->index as a method for sorting out compound order, which blows up both for page cache pages, and SLOB's reuse of the index in struct slob_page. Presently we are not able to accurately size arbitrary pointers that don't come from kmalloc(), so the best we can do is sort out the compound order from the head page if it's a compound page, or default to 0-order if it's impossible to ksize() the object. Obviously this leaves quite a bit to be desired in terms of object sizing accuracy, but the behaviour is unchanged over the existing implementation, while fixing the page->index oopses originally reported here: http://marc.info/?l=linux-mm&m=121127773325245&w=2 Accuracy could also be improved by having SLUB and SLOB both set PG_slab on ksizeable pages, rather than just handling the __GFP_COMP cases irregardless of the PG_slab setting, as made possibly with Pekka's patches: http://marc.info/?l=linux-kernel&m=121139439900534&w=2 http://marc.info/?l=linux-kernel&m=121139440000537&w=2 http://marc.info/?l=linux-kernel&m=121139440000540&w=2 This is primarily a bugfix for nommu systems for 2.6.26, with the aim being to gradually kill off kobjsize() and its particular brand of object abuse entirely. Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Paul Mundt <lethal@linux-sh.org> Acked-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/nommu.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index dca93fcb8b7a..3abd0845bda4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -104,21 +104,43 @@ EXPORT_SYMBOL(vmtruncate);
104unsigned int kobjsize(const void *objp) 104unsigned int kobjsize(const void *objp)
105{ 105{
106 struct page *page; 106 struct page *page;
107 int order = 0;
107 108
108 /* 109 /*
109 * If the object we have should not have ksize performed on it, 110 * If the object we have should not have ksize performed on it,
110 * return size of 0 111 * return size of 0
111 */ 112 */
112 if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp)))) 113 if (!objp)
113 return 0; 114 return 0;
114 115
116 if ((unsigned long)objp >= memory_end)
117 return 0;
118
119 page = virt_to_head_page(objp);
120 if (!page)
121 return 0;
122
123 /*
124 * If the allocator sets PageSlab, we know the pointer came from
125 * kmalloc().
126 */
115 if (PageSlab(page)) 127 if (PageSlab(page))
116 return ksize(objp); 128 return ksize(objp);
117 129
118 BUG_ON(page->index < 0); 130 /*
119 BUG_ON(page->index >= MAX_ORDER); 131 * The ksize() function is only guaranteed to work for pointers
132 * returned by kmalloc(). So handle arbitrary pointers, that we expect
133 * always to be compound pages, here.
134 */
135 if (PageCompound(page))
136 order = compound_order(page);
120 137
121 return (PAGE_SIZE << page->index); 138 /*
139 * Finally, handle arbitrary pointers that don't set PageSlab.
140 * Default to 0-order in the case when we're unable to ksize()
141 * the object.
142 */
143 return PAGE_SIZE << order;
122} 144}
123 145
124/* 146/*