aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2015-02-10 17:09:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 17:30:30 -0500
commit9aabf810a67cd97e2d1a48f0bab338b7680f1929 (patch)
treef0d7dbb9011d8bcf689f1f5f099de1297eeae5d3 /mm/slub.c
parent913e027ca17ee06fa9436a21e54464795b0fa0e8 (diff)
mm/slub: optimize alloc/free fastpath by removing preemption on/off
We had to insert a preempt enable/disable in the fastpath a while ago in order to guarantee that tid and kmem_cache_cpu are retrieved on the same cpu. It is the problem only for CONFIG_PREEMPT in which scheduler can move the process to other cpu during retrieving data. Now, I reach the solution to remove preempt enable/disable in the fastpath. If tid is matched with kmem_cache_cpu's tid after tid and kmem_cache_cpu are retrieved by separate this_cpu operation, it means that they are retrieved on the same cpu. If not matched, we just have to retry it. With this guarantee, preemption enable/disable isn't need at all even if CONFIG_PREEMPT, so this patch removes it. I saw roughly 5% win in a fast-path loop over kmem_cache_alloc/free in CONFIG_PREEMPT. (14.821 ns -> 14.049 ns) Below is the result of Christoph's slab_test reported by Jesper Dangaard Brouer. * Before Single thread testing ===================== 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 49 cycles kfree -> 62 cycles 10000 times kmalloc(16) -> 48 cycles kfree -> 64 cycles 10000 times kmalloc(32) -> 53 cycles kfree -> 70 cycles 10000 times kmalloc(64) -> 64 cycles kfree -> 77 cycles 10000 times kmalloc(128) -> 74 cycles kfree -> 84 cycles 10000 times kmalloc(256) -> 84 cycles kfree -> 114 cycles 10000 times kmalloc(512) -> 83 cycles kfree -> 116 cycles 10000 times kmalloc(1024) -> 81 cycles kfree -> 120 cycles 10000 times kmalloc(2048) -> 104 cycles kfree -> 136 cycles 10000 times kmalloc(4096) -> 142 cycles kfree -> 165 cycles 10000 times kmalloc(8192) -> 238 cycles kfree -> 226 cycles 10000 times kmalloc(16384) -> 403 cycles kfree -> 264 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 68 cycles 10000 times kmalloc(16)/kfree -> 68 cycles 10000 times kmalloc(32)/kfree -> 69 cycles 10000 times kmalloc(64)/kfree -> 68 cycles 10000 times kmalloc(128)/kfree -> 68 cycles 10000 times kmalloc(256)/kfree -> 68 cycles 10000 times kmalloc(512)/kfree -> 74 cycles 10000 times kmalloc(1024)/kfree -> 75 cycles 10000 times kmalloc(2048)/kfree -> 74 cycles 10000 times kmalloc(4096)/kfree -> 74 cycles 10000 times kmalloc(8192)/kfree -> 75 cycles 10000 times kmalloc(16384)/kfree -> 510 cycles * After Single thread testing ===================== 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 46 cycles kfree -> 61 cycles 10000 times kmalloc(16) -> 46 cycles kfree -> 63 cycles 10000 times kmalloc(32) -> 49 cycles kfree -> 69 cycles 10000 times kmalloc(64) -> 57 cycles kfree -> 76 cycles 10000 times kmalloc(128) -> 66 cycles kfree -> 83 cycles 10000 times kmalloc(256) -> 84 cycles kfree -> 110 cycles 10000 times kmalloc(512) -> 77 cycles kfree -> 114 cycles 10000 times kmalloc(1024) -> 80 cycles kfree -> 116 cycles 10000 times kmalloc(2048) -> 102 cycles kfree -> 131 cycles 10000 times kmalloc(4096) -> 135 cycles kfree -> 163 cycles 10000 times kmalloc(8192) -> 238 cycles kfree -> 218 cycles 10000 times kmalloc(16384) -> 399 cycles kfree -> 262 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 65 cycles 10000 times kmalloc(16)/kfree -> 66 cycles 10000 times kmalloc(32)/kfree -> 65 cycles 10000 times kmalloc(64)/kfree -> 66 cycles 10000 times kmalloc(128)/kfree -> 66 cycles 10000 times kmalloc(256)/kfree -> 71 cycles 10000 times kmalloc(512)/kfree -> 72 cycles 10000 times kmalloc(1024)/kfree -> 71 cycles 10000 times kmalloc(2048)/kfree -> 71 cycles 10000 times kmalloc(4096)/kfree -> 71 cycles 10000 times kmalloc(8192)/kfree -> 65 cycles 10000 times kmalloc(16384)/kfree -> 511 cycles Most of the results are better than before. Note that this change slightly worses performance in !CONFIG_PREEMPT, roughly 0.3%. Implementing each case separately would help performance, but, since it's so marginal, I didn't do that. This would help maintanance since we have same code for all cases. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index fe376fe1f4fe..e7ed6f8304f4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2398,13 +2398,24 @@ redo:
2398 * reading from one cpu area. That does not matter as long 2398 * reading from one cpu area. That does not matter as long
2399 * as we end up on the original cpu again when doing the cmpxchg. 2399 * as we end up on the original cpu again when doing the cmpxchg.
2400 * 2400 *
2401 * Preemption is disabled for the retrieval of the tid because that 2401 * We should guarantee that tid and kmem_cache are retrieved on
2402 * must occur from the current processor. We cannot allow rescheduling 2402 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2403 * on a different processor between the determination of the pointer 2403 * to check if it is matched or not.
2404 * and the retrieval of the tid.
2405 */ 2404 */
2406 preempt_disable(); 2405 do {
2407 c = this_cpu_ptr(s->cpu_slab); 2406 tid = this_cpu_read(s->cpu_slab->tid);
2407 c = raw_cpu_ptr(s->cpu_slab);
2408 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
2409
2410 /*
2411 * Irqless object alloc/free algorithm used here depends on sequence
2412 * of fetching cpu_slab's data. tid should be fetched before anything
2413 * on c to guarantee that object and page associated with previous tid
2414 * won't be used with current tid. If we fetch tid first, object and
2415 * page could be one associated with next tid and our alloc/free
2416 * request will be failed. In this case, we will retry. So, no problem.
2417 */
2418 barrier();
2408 2419
2409 /* 2420 /*
2410 * The transaction ids are globally unique per cpu and per operation on 2421 * The transaction ids are globally unique per cpu and per operation on
@@ -2412,8 +2423,6 @@ redo:
2412 * occurs on the right processor and that there was no operation on the 2423 * occurs on the right processor and that there was no operation on the
2413 * linked list in between. 2424 * linked list in between.
2414 */ 2425 */
2415 tid = c->tid;
2416 preempt_enable();
2417 2426
2418 object = c->freelist; 2427 object = c->freelist;
2419 page = c->page; 2428 page = c->page;
@@ -2659,11 +2668,13 @@ redo:
2659 * data is retrieved via this pointer. If we are on the same cpu 2668 * data is retrieved via this pointer. If we are on the same cpu
2660 * during the cmpxchg then the free will succedd. 2669 * during the cmpxchg then the free will succedd.
2661 */ 2670 */
2662 preempt_disable(); 2671 do {
2663 c = this_cpu_ptr(s->cpu_slab); 2672 tid = this_cpu_read(s->cpu_slab->tid);
2673 c = raw_cpu_ptr(s->cpu_slab);
2674 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
2664 2675
2665 tid = c->tid; 2676 /* Same with comment on barrier() in slab_alloc_node() */
2666 preempt_enable(); 2677 barrier();
2667 2678
2668 if (likely(page == c->page)) { 2679 if (likely(page == c->page)) {
2669 set_freepointer(s, object, c->freelist); 2680 set_freepointer(s, object, c->freelist);