aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/nommu.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index dca93fcb8b7a..3abd0845bda4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -104,21 +104,43 @@ EXPORT_SYMBOL(vmtruncate);
104unsigned int kobjsize(const void *objp) 104unsigned int kobjsize(const void *objp)
105{ 105{
106 struct page *page; 106 struct page *page;
107 int order = 0;
107 108
108 /* 109 /*
109 * If the object we have should not have ksize performed on it, 110 * If the object we have should not have ksize performed on it,
110 * return size of 0 111 * return size of 0
111 */ 112 */
112 if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp)))) 113 if (!objp)
113 return 0; 114 return 0;
114 115
116 if ((unsigned long)objp >= memory_end)
117 return 0;
118
119 page = virt_to_head_page(objp);
120 if (!page)
121 return 0;
122
123 /*
124 * If the allocator sets PageSlab, we know the pointer came from
125 * kmalloc().
126 */
115 if (PageSlab(page)) 127 if (PageSlab(page))
116 return ksize(objp); 128 return ksize(objp);
117 129
118 BUG_ON(page->index < 0); 130 /*
119 BUG_ON(page->index >= MAX_ORDER); 131 * The ksize() function is only guaranteed to work for pointers
132 * returned by kmalloc(). So handle arbitrary pointers, that we expect
133 * always to be compound pages, here.
134 */
135 if (PageCompound(page))
136 order = compound_order(page);
120 137
121 return (PAGE_SIZE << page->index); 138 /*
139 * Finally, handle arbitrary pointers that don't set PageSlab.
140 * Default to 0-order in the case when we're unable to ksize()
141 * the object.
142 */
143 return PAGE_SIZE << order;
122} 144}
123 145
124/* 146/*