diff options
| author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-04-09 13:19:55 -0400 |
|---|---|---|
| committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-04-09 13:19:55 -0400 |
| commit | ed4dcc5939e497199404647d7d9d8569432485dc (patch) | |
| tree | 00fb3643864aa06e9e85f3bc062f6ccd5b648c27 | |
| parent | 115e784516a9d0994b4a9d5730a4b1a4be6fcc58 (diff) | |
rt_domain: do not allocate/free memory in release queue handling
This fixes a bunch of bugs:
- a memory leak (some heap nodes were never reclaimed)
- a locking rules violation (memory allocation can wake up kswapd,
which causes a circular locking dependency between the release queue lock
and the run queue locks
Also, allocating memory in a hot path was never particularly clever.
This patch pre-allocates memory at task-creation time, but does not change
how the release queue works overall.
| -rw-r--r-- | include/litmus/rt_domain.h | 4 | ||||
| -rw-r--r-- | include/litmus/rt_param.h | 2 | ||||
| -rw-r--r-- | litmus/litmus.c | 17 | ||||
| -rw-r--r-- | litmus/rt_domain.c | 34 |
4 files changed, 33 insertions, 24 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 7356ec7ca4..9323892a87 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
| @@ -21,6 +21,10 @@ struct release_heap { | |||
| 21 | struct list_head list; | 21 | struct list_head list; |
| 22 | lt_t release_time; | 22 | lt_t release_time; |
| 23 | struct heap heap; | 23 | struct heap heap; |
| 24 | /* the node used to enqueue this heap in a | ||
| 25 | * release queue | ||
| 26 | */ | ||
| 27 | struct heap_node* hn; | ||
| 24 | }; | 28 | }; |
| 25 | 29 | ||
| 26 | struct release_queue { | 30 | struct release_queue { |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 155543ea93..a0d5de2aa5 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
| @@ -40,6 +40,7 @@ struct rt_task { | |||
| 40 | 40 | ||
| 41 | struct _rt_domain; | 41 | struct _rt_domain; |
| 42 | struct heap_node; | 42 | struct heap_node; |
| 43 | struct release_heap; | ||
| 43 | 44 | ||
| 44 | struct rt_job { | 45 | struct rt_job { |
| 45 | /* Time instant the the job was or will be released. */ | 46 | /* Time instant the the job was or will be released. */ |
| @@ -160,6 +161,7 @@ struct rt_param { | |||
| 160 | * implementation). | 161 | * implementation). |
| 161 | */ | 162 | */ |
| 162 | struct heap_node* heap_node; | 163 | struct heap_node* heap_node; |
| 164 | struct release_heap* rel_heap; | ||
| 163 | 165 | ||
| 164 | /* Used by rt_domain to queue task in release list. | 166 | /* Used by rt_domain to queue task in release list. |
| 165 | */ | 167 | */ |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 8591e7c022..145da8e629 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
| @@ -568,11 +568,22 @@ long litmus_admit_task(struct task_struct* tsk) | |||
| 568 | 568 | ||
| 569 | /* allocate heap node for this task */ | 569 | /* allocate heap node for this task */ |
| 570 | tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC); | 570 | tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC); |
| 571 | if (!tsk_rt(tsk)->heap_node) { | 571 | tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); |
| 572 | tsk_rt(tsk)->rel_heap->hn = heap_node_alloc(GFP_ATOMIC); | ||
| 573 | if (!tsk_rt(tsk)->heap_node || | ||
| 574 | !tsk_rt(tsk)->rel_heap || | ||
| 575 | !tsk_rt(tsk)->rel_heap->hn) { | ||
| 572 | printk(KERN_WARNING "litmus: no more heap node memory!?\n"); | 576 | printk(KERN_WARNING "litmus: no more heap node memory!?\n"); |
| 573 | retval = -ENOMEM; | 577 | retval = -ENOMEM; |
| 574 | } else | 578 | heap_node_free(tsk_rt(tsk)->heap_node); |
| 579 | if (tsk_rt(tsk)->rel_heap) { | ||
| 580 | heap_node_free(tsk_rt(tsk)->rel_heap->hn); | ||
| 581 | release_heap_free(tsk_rt(tsk)->rel_heap); | ||
| 582 | } | ||
| 583 | } else { | ||
| 575 | heap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 584 | heap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
| 585 | heap_node_init(&tsk_rt(tsk)->rel_heap->hn, tsk_rt(tsk)->rel_heap); | ||
| 586 | } | ||
| 576 | 587 | ||
| 577 | if (!retval) | 588 | if (!retval) |
| 578 | retval = litmus->admit_task(tsk); | 589 | retval = litmus->admit_task(tsk); |
| @@ -595,6 +606,8 @@ void litmus_exit_task(struct task_struct* tsk) | |||
| 595 | litmus->task_exit(tsk); | 606 | litmus->task_exit(tsk); |
| 596 | BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node)); | 607 | BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node)); |
| 597 | heap_node_free(tsk_rt(tsk)->heap_node); | 608 | heap_node_free(tsk_rt(tsk)->heap_node); |
| 609 | heap_node_free(tsk_rt(tsk)->rel_heap->hn); | ||
| 610 | release_heap_free(tsk_rt(tsk)->rel_heap); | ||
| 598 | atomic_dec(&rt_task_count); | 611 | atomic_dec(&rt_task_count); |
| 599 | reinit_litmus_state(tsk, 1); | 612 | reinit_litmus_state(tsk, 1); |
| 600 | } | 613 | } |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index a05ed9ca94..69e0019870 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
| @@ -52,13 +52,13 @@ int heap_earlier_release(struct heap_node *_a, struct heap_node *_b) | |||
| 52 | * Will return heap for given time. If no such heap exists prior to the invocation | 52 | * Will return heap for given time. If no such heap exists prior to the invocation |
| 53 | * it will be created. | 53 | * it will be created. |
| 54 | */ | 54 | */ |
| 55 | static struct release_heap* get_release_heap(rt_domain_t *rt, lt_t release_time) | 55 | static struct release_heap* get_release_heap(rt_domain_t *rt, struct task_struct* t) |
| 56 | { | 56 | { |
| 57 | struct list_head* pos; | 57 | struct list_head* pos; |
| 58 | struct release_heap* heap = NULL; | 58 | struct release_heap* heap = NULL; |
| 59 | struct release_heap* rh; | 59 | struct release_heap* rh; |
| 60 | lt_t release_time = get_release(t); | ||
| 60 | unsigned int slot = time2slot(release_time); | 61 | unsigned int slot = time2slot(release_time); |
| 61 | int inserted; | ||
| 62 | 62 | ||
| 63 | /* initialize pos for the case that the list is empty */ | 63 | /* initialize pos for the case that the list is empty */ |
| 64 | pos = rt->release_queue.slot[slot].next; | 64 | pos = rt->release_queue.slot[slot].next; |
| @@ -78,23 +78,13 @@ static struct release_heap* get_release_heap(rt_domain_t *rt, lt_t release_time) | |||
| 78 | } | 78 | } |
| 79 | } | 79 | } |
| 80 | if (!heap) { | 80 | if (!heap) { |
| 81 | /* must create new node */ | 81 | /* pre-allocated release heap */ |
| 82 | /* FIXME: use a kmemcache_t */ | 82 | rh = tsk_rt(t)->rel_heap; |
| 83 | rh = kmalloc(sizeof(*rh), GFP_ATOMIC); | ||
| 84 | if (unlikely(!rh)) | ||
| 85 | /* Should be handled somehow. | ||
| 86 | * For now, let's just hope there is | ||
| 87 | * sufficient memory. | ||
| 88 | */ | ||
| 89 | panic("rt_domain: no more memory?"); | ||
| 90 | rh->release_time = release_time; | 83 | rh->release_time = release_time; |
| 91 | heap_init(&rh->heap); | 84 | heap_init(&rh->heap); |
| 92 | list_add(&rh->list, pos->prev); | 85 | list_add(&rh->list, pos->prev); |
| 93 | inserted = heap_add(heap_earlier_release, | 86 | heap_insert(heap_earlier_release, |
| 94 | &rt->release_queue.rel_heap, rh, | 87 | &rt->release_queue.rel_heap, rh->hn); |
| 95 | GFP_ATOMIC); | ||
| 96 | if (unlikely(!inserted)) | ||
| 97 | panic("rt_domain: no more heap memory?"); | ||
| 98 | heap = rh; | 88 | heap = rh; |
| 99 | } | 89 | } |
| 100 | return heap; | 90 | return heap; |
| @@ -125,19 +115,19 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | |||
| 125 | rh = list_entry(pos, struct release_heap, list); | 115 | rh = list_entry(pos, struct release_heap, list); |
| 126 | heap_union(rt->order, &tasks, &rh->heap); | 116 | heap_union(rt->order, &tasks, &rh->heap); |
| 127 | list_del(pos); | 117 | list_del(pos); |
| 128 | kfree(rh); | ||
| 129 | } | 118 | } |
| 130 | 119 | ||
| 131 | /* call release callback */ | 120 | /* call release callback */ |
| 132 | rt->release_jobs(rt, &tasks); | 121 | if (!heap_empty(&tasks)) |
| 122 | rt->release_jobs(rt, &tasks); | ||
| 133 | 123 | ||
| 134 | 124 | ||
| 135 | spin_lock_irqsave(&rt->release_lock, flags); | 125 | spin_lock_irqsave(&rt->release_lock, flags); |
| 136 | while ((pending = next_release(rt, &release))) { | 126 | while ((pending = next_release(rt, &release))) { |
| 137 | if (lt_before(release, litmus_clock())) { | 127 | if (lt_before(release, litmus_clock())) { |
| 138 | /* pick for release */ | 128 | /* pick for release */ |
| 139 | rh = heap_take_del(heap_earlier_release, | 129 | rh = heap_take(heap_earlier_release, |
| 140 | &rt->release_queue.rel_heap); | 130 | &rt->release_queue.rel_heap)->value; |
| 141 | list_move(&rh->list, &list); | 131 | list_move(&rh->list, &list); |
| 142 | } else | 132 | } else |
| 143 | break; | 133 | break; |
| @@ -187,8 +177,8 @@ static void arm_release_timer(unsigned long _rt) | |||
| 187 | t = list_entry(pos, struct task_struct, rt_param.list); | 177 | t = list_entry(pos, struct task_struct, rt_param.list); |
| 188 | sched_trace_task_release(t); | 178 | sched_trace_task_release(t); |
| 189 | list_del(pos); | 179 | list_del(pos); |
| 190 | rh = get_release_heap(rt, get_release(t)); | 180 | rh = get_release_heap(rt, t); |
| 191 | heap_add(rt->order, &rh->heap, t, GFP_ATOMIC); | 181 | heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); |
| 192 | } | 182 | } |
| 193 | 183 | ||
| 194 | next_release(rt, &release); | 184 | next_release(rt, &release); |
