aboutsummaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-04-29 12:42:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 11:03:33 -0400
commit600fe9751aeb6f6b72de84076a05c5b8c04152c0 (patch)
tree3c02f2627ab4dad3cee03d3c03955376830b5065 /ipc
parent4ada8db38a44654446fe35ceb20a1972220e0f69 (diff)
ipc_schedule_free() can do vfree() directly now
Commit 32fcfd40715e ("make vfree() safe to call from interrupt contexts") made it safe to do vfree directly from the RCU callback, which allows us to simplify ipc/util.c a lot by getting rid of the differences between vmalloc/kmalloc memory. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
-rw-r--r--ipc/util.c103
1 files changed, 16 insertions, 87 deletions
diff --git a/ipc/util.c b/ipc/util.c
index abfc13e8677f..809ec5ec8122 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -466,51 +466,13 @@ void ipc_free(void* ptr, int size)
466 kfree(ptr); 466 kfree(ptr);
467} 467}
468 468
469/* 469struct ipc_rcu {
470 * rcu allocations:
471 * There are three headers that are prepended to the actual allocation:
472 * - during use: ipc_rcu_hdr.
473 * - during the rcu grace period: ipc_rcu_grace.
474 * - [only if vmalloc]: ipc_rcu_sched.
475 * Their lifetime doesn't overlap, thus the headers share the same memory.
476 * Unlike a normal union, they are right-aligned, thus some container_of
477 * forward/backward casting is necessary:
478 */
479struct ipc_rcu_hdr
480{
481 atomic_t refcount;
482 int is_vmalloc;
483 void *data[0];
484};
485
486
487struct ipc_rcu_grace
488{
489 struct rcu_head rcu; 470 struct rcu_head rcu;
471 atomic_t refcount;
490 /* "void *" makes sure alignment of following data is sane. */ 472 /* "void *" makes sure alignment of following data is sane. */
491 void *data[0]; 473 void *data[0];
492}; 474};
493 475
494struct ipc_rcu_sched
495{
496 struct work_struct work;
497 /* "void *" makes sure alignment of following data is sane. */
498 void *data[0];
499};
500
501#define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
502 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
503#define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
504 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
505
506static inline int rcu_use_vmalloc(int size)
507{
508 /* Too big for a single page? */
509 if (HDRLEN_KMALLOC + size > PAGE_SIZE)
510 return 1;
511 return 0;
512}
513
514/** 476/**
515 * ipc_rcu_alloc - allocate ipc and rcu space 477 * ipc_rcu_alloc - allocate ipc and rcu space
516 * @size: size desired 478 * @size: size desired
@@ -520,74 +482,41 @@ static inline int rcu_use_vmalloc(int size)
520 */ 482 */
521void *ipc_rcu_alloc(int size) 483void *ipc_rcu_alloc(int size)
522{ 484{
523 void *out;
524
525 /* 485 /*
526 * We prepend the allocation with the rcu struct, and 486 * We prepend the allocation with the rcu struct
527 * workqueue if necessary (for vmalloc).
528 */ 487 */
529 if (rcu_use_vmalloc(size)) { 488 struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size);
530 out = vmalloc(HDRLEN_VMALLOC + size); 489 if (unlikely(!out))
531 if (!out) 490 return NULL;
532 goto done; 491 atomic_set(&out->refcount, 1);
533 492 return out->data;
534 out += HDRLEN_VMALLOC;
535 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
536 } else {
537 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
538 if (!out)
539 goto done;
540
541 out += HDRLEN_KMALLOC;
542 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
543 }
544
545 /* set reference counter no matter what kind of allocation was done */
546 atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
547done:
548 return out;
549} 493}
550 494
551int ipc_rcu_getref(void *ptr) 495int ipc_rcu_getref(void *ptr)
552{ 496{
553 return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount); 497 return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount);
554}
555
556static void ipc_do_vfree(struct work_struct *work)
557{
558 vfree(container_of(work, struct ipc_rcu_sched, work));
559} 498}
560 499
561/** 500/**
562 * ipc_schedule_free - free ipc + rcu space 501 * ipc_schedule_free - free ipc + rcu space
563 * @head: RCU callback structure for queued work 502 * @head: RCU callback structure for queued work
564 *
565 * Since RCU callback function is called in bh,
566 * we need to defer the vfree to schedule_work().
567 */ 503 */
568static void ipc_schedule_free(struct rcu_head *head) 504static void ipc_schedule_free(struct rcu_head *head)
569{ 505{
570 struct ipc_rcu_grace *grace; 506 vfree(container_of(head, struct ipc_rcu, rcu));
571 struct ipc_rcu_sched *sched;
572
573 grace = container_of(head, struct ipc_rcu_grace, rcu);
574 sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
575 data[0]);
576
577 INIT_WORK(&sched->work, ipc_do_vfree);
578 schedule_work(&sched->work);
579} 507}
580 508
581void ipc_rcu_putref(void *ptr) 509void ipc_rcu_putref(void *ptr)
582{ 510{
583 if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount)) 511 struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data);
512
513 if (!atomic_dec_and_test(&p->refcount))
584 return; 514 return;
585 515
586 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { 516 if (is_vmalloc_addr(ptr)) {
587 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 517 call_rcu(&p->rcu, ipc_schedule_free);
588 ipc_schedule_free);
589 } else { 518 } else {
590 kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu); 519 kfree_rcu(p, rcu);
591 } 520 }
592} 521}
593 522