aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e2e6ba7a5172..d821ce6fff39 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1818,7 +1818,7 @@ static int slub_nomerge;
1818 * system components. Generally order 0 allocations should be preferred since 1818 * system components. Generally order 0 allocations should be preferred since
1819 * order 0 does not cause fragmentation in the page allocator. Larger objects 1819 * order 0 does not cause fragmentation in the page allocator. Larger objects
1820 * be problematic to put into order 0 slabs because there may be too much 1820 * be problematic to put into order 0 slabs because there may be too much
1821 * unused space left. We go to a higher order if more than 1/8th of the slab 1821 * unused space left. We go to a higher order if more than 1/16th of the slab
1822 * would be wasted. 1822 * would be wasted.
1823 * 1823 *
1824 * In order to reach satisfactory performance we must ensure that a minimum 1824 * In order to reach satisfactory performance we must ensure that a minimum
@@ -1883,7 +1883,7 @@ static inline int calculate_order(int size)
1883 if (!min_objects) 1883 if (!min_objects)
1884 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1884 min_objects = 4 * (fls(nr_cpu_ids) + 1);
1885 while (min_objects > 1) { 1885 while (min_objects > 1) {
1886 fraction = 8; 1886 fraction = 16;
1887 while (fraction >= 4) { 1887 while (fraction >= 4) {
1888 order = slab_order(size, min_objects, 1888 order = slab_order(size, min_objects,
1889 slub_max_order, fraction); 1889 slub_max_order, fraction);