summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/slab.h2
-rw-r--r--mm/slab.h2
3 files changed, 3 insertions, 3 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 792c8981e633..30b02e79610e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -765,7 +765,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
765void __memcg_kmem_uncharge(struct page *page, int order); 765void __memcg_kmem_uncharge(struct page *page, int order);
766 766
767/* 767/*
768 * helper for acessing a memcg's index. It will be used as an index in the 768 * helper for accessing a memcg's index. It will be used as an index in the
769 * child cache array in kmem_cache, and also to derive its name. This function 769 * child cache array in kmem_cache, and also to derive its name. This function
770 * will return -1 when this is not a kmem-limited memcg. 770 * will return -1 when this is not a kmem-limited memcg.
771 */ 771 */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9d9a5bdb9b00..5d49f0c60dcb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -314,7 +314,7 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment
314void kmem_cache_free(struct kmem_cache *, void *); 314void kmem_cache_free(struct kmem_cache *, void *);
315 315
316/* 316/*
317 * Bulk allocation and freeing operations. These are accellerated in an 317 * Bulk allocation and freeing operations. These are accelerated in an
318 * allocator specific way to avoid taking locks repeatedly or building 318 * allocator specific way to avoid taking locks repeatedly or building
319 * metadata structures unnecessarily. 319 * metadata structures unnecessarily.
320 * 320 *
diff --git a/mm/slab.h b/mm/slab.h
index 6c7f16a44386..e880bbe91973 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -172,7 +172,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
172/* 172/*
173 * Generic implementation of bulk operations 173 * Generic implementation of bulk operations
174 * These are useful for situations in which the allocator cannot 174 * These are useful for situations in which the allocator cannot
175 * perform optimizations. In that case segments of the objecct listed 175 * perform optimizations. In that case segments of the object listed
176 * may be allocated or freed using these operations. 176 * may be allocated or freed using these operations.
177 */ 177 */
178void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 178void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);