diff options
author | Manfred Spraul <manfred@colorfullife.com> | 2013-07-08 19:01:20 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-10-18 10:45:46 -0400 |
commit | 49f7f31ab27de9bcaf2d12e1d0196400dadf6add (patch) | |
tree | b95d01d51e49db3e79e8d49c8b856b54938612eb /ipc/util.c | |
parent | 5cd37e921753eeff777a522aa78b0cc5a6ff7596 (diff) |
ipc/util.c, ipc_rcu_alloc: cacheline align allocation
commit 196aa0132fc7261f34b10ae1bfb44abc1bc69b3c upstream.
Enforce that ipc_rcu_alloc returns a cacheline aligned pointer on SMP.
Rationale:
The SysV sem code tries to move the main spinlock into a seperate
cacheline (____cacheline_aligned_in_smp). This works only if
ipc_rcu_alloc returns cacheline aligned pointers. vmalloc and kmalloc
return cacheline algined pointers, the implementation of ipc_rcu_alloc
breaks that.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'ipc/util.c')
-rw-r--r-- | ipc/util.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/ipc/util.c b/ipc/util.c index a0c139f3d1f3..4704223bfad4 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -468,9 +468,7 @@ void ipc_free(void* ptr, int size) | |||
468 | struct ipc_rcu { | 468 | struct ipc_rcu { |
469 | struct rcu_head rcu; | 469 | struct rcu_head rcu; |
470 | atomic_t refcount; | 470 | atomic_t refcount; |
471 | /* "void *" makes sure alignment of following data is sane. */ | 471 | } ____cacheline_aligned_in_smp; |
472 | void *data[0]; | ||
473 | }; | ||
474 | 472 | ||
475 | /** | 473 | /** |
476 | * ipc_rcu_alloc - allocate ipc and rcu space | 474 | * ipc_rcu_alloc - allocate ipc and rcu space |
@@ -488,12 +486,14 @@ void *ipc_rcu_alloc(int size) | |||
488 | if (unlikely(!out)) | 486 | if (unlikely(!out)) |
489 | return NULL; | 487 | return NULL; |
490 | atomic_set(&out->refcount, 1); | 488 | atomic_set(&out->refcount, 1); |
491 | return out->data; | 489 | return out + 1; |
492 | } | 490 | } |
493 | 491 | ||
494 | int ipc_rcu_getref(void *ptr) | 492 | int ipc_rcu_getref(void *ptr) |
495 | { | 493 | { |
496 | return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount); | 494 | struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; |
495 | |||
496 | return atomic_inc_not_zero(&p->refcount); | ||
497 | } | 497 | } |
498 | 498 | ||
499 | /** | 499 | /** |
@@ -507,7 +507,7 @@ static void ipc_schedule_free(struct rcu_head *head) | |||
507 | 507 | ||
508 | void ipc_rcu_putref(void *ptr) | 508 | void ipc_rcu_putref(void *ptr) |
509 | { | 509 | { |
510 | struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data); | 510 | struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; |
511 | 511 | ||
512 | if (!atomic_dec_and_test(&p->refcount)) | 512 | if (!atomic_dec_and_test(&p->refcount)) |
513 | return; | 513 | return; |