diff options
author | Dave Hansen <dave.hansen@linux.intel.com> | 2014-01-28 17:24:50 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2014-01-31 06:40:34 -0500 |
commit | 433a91ff5fa19e3eb70b12f7056f234aebd09ac2 (patch) | |
tree | 9d0af35284088374a2a203cde24c4a7360d7abec | |
parent | 26e4f2057516f1c457e0e95346a00303f983ad53 (diff) |
mm: sl[uo]b: fix misleading comments
On x86, SLUB creates and handles <=8192-byte allocations internally.
It passes larger ones up to the allocator. Saying "up to order 2" is,
at best, ambiguous. Is that order-1? Or (order-2 bytes)? Make
it more clear.
SLOB commits a similar sin. It *handles* page-size requests, but the
comment says that it passes up "all page size and larger requests".
SLOB also swaps around the order of the very-similarly-named
KMALLOC_SHIFT_HIGH and KMALLOC_SHIFT_MAX #defines. Make it
consistent with the order of the other two allocators.
Cc: Matt Mackall <mpm@selenic.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | include/linux/slab.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 1e2f4fe12773..f76e956b4011 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -205,8 +205,8 @@ struct kmem_cache { | |||
205 | 205 | ||
206 | #ifdef CONFIG_SLUB | 206 | #ifdef CONFIG_SLUB |
207 | /* | 207 | /* |
208 | * SLUB allocates up to order 2 pages directly and otherwise | 208 | * SLUB directly allocates requests fitting in to an order-1 page |
209 | * passes the request to the page allocator. | 209 | * (PAGE_SIZE*2). Larger requests are passed to the page allocator. |
210 | */ | 210 | */ |
211 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) | 211 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) |
212 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) | 212 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) |
@@ -217,12 +217,12 @@ struct kmem_cache { | |||
217 | 217 | ||
218 | #ifdef CONFIG_SLOB | 218 | #ifdef CONFIG_SLOB |
219 | /* | 219 | /* |
220 | * SLOB passes all page size and larger requests to the page allocator. | 220 | * SLOB passes all requests larger than one page to the page allocator. |
221 | * No kmalloc array is necessary since objects of different sizes can | 221 | * No kmalloc array is necessary since objects of different sizes can |
222 | * be allocated from the same page. | 222 | * be allocated from the same page. |
223 | */ | 223 | */ |
224 | #define KMALLOC_SHIFT_MAX 30 | ||
225 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT | 224 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT |
225 | #define KMALLOC_SHIFT_MAX 30 | ||
226 | #ifndef KMALLOC_SHIFT_LOW | 226 | #ifndef KMALLOC_SHIFT_LOW |
227 | #define KMALLOC_SHIFT_LOW 3 | 227 | #define KMALLOC_SHIFT_LOW 3 |
228 | #endif | 228 | #endif |