aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/nommu.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 3abd0845bda4..4462b6a3fcb9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -104,21 +104,15 @@ EXPORT_SYMBOL(vmtruncate);
104unsigned int kobjsize(const void *objp) 104unsigned int kobjsize(const void *objp)
105{ 105{
106 struct page *page; 106 struct page *page;
107 int order = 0;
108 107
109 /* 108 /*
110 * If the object we have should not have ksize performed on it, 109 * If the object we have should not have ksize performed on it,
111 * return size of 0 110 * return size of 0
112 */ 111 */
113 if (!objp) 112 if (!objp || !virt_addr_valid(objp))
114 return 0;
115
116 if ((unsigned long)objp >= memory_end)
117 return 0; 113 return 0;
118 114
119 page = virt_to_head_page(objp); 115 page = virt_to_head_page(objp);
120 if (!page)
121 return 0;
122 116
123 /* 117 /*
124 * If the allocator sets PageSlab, we know the pointer came from 118 * If the allocator sets PageSlab, we know the pointer came from
@@ -129,18 +123,9 @@ unsigned int kobjsize(const void *objp)
129 123
130 /* 124 /*
131 * The ksize() function is only guaranteed to work for pointers 125 * The ksize() function is only guaranteed to work for pointers
132 * returned by kmalloc(). So handle arbitrary pointers, that we expect 126 * returned by kmalloc(). So handle arbitrary pointers here.
133 * always to be compound pages, here.
134 */
135 if (PageCompound(page))
136 order = compound_order(page);
137
138 /*
139 * Finally, handle arbitrary pointers that don't set PageSlab.
140 * Default to 0-order in the case when we're unable to ksize()
141 * the object.
142 */ 127 */
143 return PAGE_SIZE << order; 128 return PAGE_SIZE << compound_order(page);
144} 129}
145 130
146/* 131/*