diff options
author | David Howells <dhowells@redhat.com> | 2006-11-22 09:54:01 -0500 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2006-11-22 09:54:01 -0500 |
commit | 52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c (patch) | |
tree | 5849b4e3c17daa70a7e81cfdeaddac9ac8a0e953 /kernel/workqueue.c | |
parent | 0f9005a6f7a82f4aacbd72f7b92322a8ca1c3f97 (diff) |
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them
into a separate structure (delayed_work), which incorporates a work_struct and
the timer_list removed from work_struct.
The work_struct struct is huge, and this limits it's usefulness. On a 64-bit
architecture it's nearly 100 bytes in size. This reduces that by half for the
non-delayable type of event.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 51 |
1 files changed, 28 insertions, 23 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 17c2f03d2c27..44fc54b7decf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -122,29 +122,33 @@ EXPORT_SYMBOL_GPL(queue_work); | |||
122 | 122 | ||
123 | static void delayed_work_timer_fn(unsigned long __data) | 123 | static void delayed_work_timer_fn(unsigned long __data) |
124 | { | 124 | { |
125 | struct work_struct *work = (struct work_struct *)__data; | 125 | struct delayed_work *dwork = (struct delayed_work *)__data; |
126 | struct workqueue_struct *wq = work->wq_data; | 126 | struct workqueue_struct *wq = dwork->work.wq_data; |
127 | int cpu = smp_processor_id(); | 127 | int cpu = smp_processor_id(); |
128 | 128 | ||
129 | if (unlikely(is_single_threaded(wq))) | 129 | if (unlikely(is_single_threaded(wq))) |
130 | cpu = singlethread_cpu; | 130 | cpu = singlethread_cpu; |
131 | 131 | ||
132 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 132 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); |
133 | } | 133 | } |
134 | 134 | ||
135 | /** | 135 | /** |
136 | * queue_delayed_work - queue work on a workqueue after delay | 136 | * queue_delayed_work - queue work on a workqueue after delay |
137 | * @wq: workqueue to use | 137 | * @wq: workqueue to use |
138 | * @work: work to queue | 138 | * @work: delayable work to queue |
139 | * @delay: number of jiffies to wait before queueing | 139 | * @delay: number of jiffies to wait before queueing |
140 | * | 140 | * |
141 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 141 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
142 | */ | 142 | */ |
143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, |
144 | struct work_struct *work, unsigned long delay) | 144 | struct delayed_work *dwork, unsigned long delay) |
145 | { | 145 | { |
146 | int ret = 0; | 146 | int ret = 0; |
147 | struct timer_list *timer = &work->timer; | 147 | struct timer_list *timer = &dwork->timer; |
148 | struct work_struct *work = &dwork->work; | ||
149 | |||
150 | if (delay == 0) | ||
151 | return queue_work(wq, work); | ||
148 | 152 | ||
149 | if (!test_and_set_bit(0, &work->pending)) { | 153 | if (!test_and_set_bit(0, &work->pending)) { |
150 | BUG_ON(timer_pending(timer)); | 154 | BUG_ON(timer_pending(timer)); |
@@ -153,7 +157,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, | |||
153 | /* This stores wq for the moment, for the timer_fn */ | 157 | /* This stores wq for the moment, for the timer_fn */ |
154 | work->wq_data = wq; | 158 | work->wq_data = wq; |
155 | timer->expires = jiffies + delay; | 159 | timer->expires = jiffies + delay; |
156 | timer->data = (unsigned long)work; | 160 | timer->data = (unsigned long)dwork; |
157 | timer->function = delayed_work_timer_fn; | 161 | timer->function = delayed_work_timer_fn; |
158 | add_timer(timer); | 162 | add_timer(timer); |
159 | ret = 1; | 163 | ret = 1; |
@@ -172,10 +176,11 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); | |||
172 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 176 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
173 | */ | 177 | */ |
174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 178 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
175 | struct work_struct *work, unsigned long delay) | 179 | struct delayed_work *dwork, unsigned long delay) |
176 | { | 180 | { |
177 | int ret = 0; | 181 | int ret = 0; |
178 | struct timer_list *timer = &work->timer; | 182 | struct timer_list *timer = &dwork->timer; |
183 | struct work_struct *work = &dwork->work; | ||
179 | 184 | ||
180 | if (!test_and_set_bit(0, &work->pending)) { | 185 | if (!test_and_set_bit(0, &work->pending)) { |
181 | BUG_ON(timer_pending(timer)); | 186 | BUG_ON(timer_pending(timer)); |
@@ -184,7 +189,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
184 | /* This stores wq for the moment, for the timer_fn */ | 189 | /* This stores wq for the moment, for the timer_fn */ |
185 | work->wq_data = wq; | 190 | work->wq_data = wq; |
186 | timer->expires = jiffies + delay; | 191 | timer->expires = jiffies + delay; |
187 | timer->data = (unsigned long)work; | 192 | timer->data = (unsigned long)dwork; |
188 | timer->function = delayed_work_timer_fn; | 193 | timer->function = delayed_work_timer_fn; |
189 | add_timer_on(timer, cpu); | 194 | add_timer_on(timer, cpu); |
190 | ret = 1; | 195 | ret = 1; |
@@ -468,31 +473,31 @@ EXPORT_SYMBOL(schedule_work); | |||
468 | 473 | ||
469 | /** | 474 | /** |
470 | * schedule_delayed_work - put work task in global workqueue after delay | 475 | * schedule_delayed_work - put work task in global workqueue after delay |
471 | * @work: job to be done | 476 | * @dwork: job to be done |
472 | * @delay: number of jiffies to wait | 477 | * @delay: number of jiffies to wait or 0 for immediate execution |
473 | * | 478 | * |
474 | * After waiting for a given time this puts a job in the kernel-global | 479 | * After waiting for a given time this puts a job in the kernel-global |
475 | * workqueue. | 480 | * workqueue. |
476 | */ | 481 | */ |
477 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 482 | int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) |
478 | { | 483 | { |
479 | return queue_delayed_work(keventd_wq, work, delay); | 484 | return queue_delayed_work(keventd_wq, dwork, delay); |
480 | } | 485 | } |
481 | EXPORT_SYMBOL(schedule_delayed_work); | 486 | EXPORT_SYMBOL(schedule_delayed_work); |
482 | 487 | ||
483 | /** | 488 | /** |
484 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 489 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
485 | * @cpu: cpu to use | 490 | * @cpu: cpu to use |
486 | * @work: job to be done | 491 | * @dwork: job to be done |
487 | * @delay: number of jiffies to wait | 492 | * @delay: number of jiffies to wait |
488 | * | 493 | * |
489 | * After waiting for a given time this puts a job in the kernel-global | 494 | * After waiting for a given time this puts a job in the kernel-global |
490 | * workqueue on the specified CPU. | 495 | * workqueue on the specified CPU. |
491 | */ | 496 | */ |
492 | int schedule_delayed_work_on(int cpu, | 497 | int schedule_delayed_work_on(int cpu, |
493 | struct work_struct *work, unsigned long delay) | 498 | struct delayed_work *dwork, unsigned long delay) |
494 | { | 499 | { |
495 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); | 500 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
496 | } | 501 | } |
497 | EXPORT_SYMBOL(schedule_delayed_work_on); | 502 | EXPORT_SYMBOL(schedule_delayed_work_on); |
498 | 503 | ||
@@ -539,12 +544,12 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
539 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 544 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed |
540 | * work whose handler rearms the delayed work. | 545 | * work whose handler rearms the delayed work. |
541 | * @wq: the controlling workqueue structure | 546 | * @wq: the controlling workqueue structure |
542 | * @work: the delayed work struct | 547 | * @dwork: the delayed work struct |
543 | */ | 548 | */ |
544 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 549 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
545 | struct work_struct *work) | 550 | struct delayed_work *dwork) |
546 | { | 551 | { |
547 | while (!cancel_delayed_work(work)) | 552 | while (!cancel_delayed_work(dwork)) |
548 | flush_workqueue(wq); | 553 | flush_workqueue(wq); |
549 | } | 554 | } |
550 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 555 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); |
@@ -552,11 +557,11 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | |||
552 | /** | 557 | /** |
553 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd | 558 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd |
554 | * work whose handler rearms the delayed work. | 559 | * work whose handler rearms the delayed work. |
555 | * @work: the delayed work struct | 560 | * @dwork: the delayed work struct |
556 | */ | 561 | */ |
557 | void cancel_rearming_delayed_work(struct work_struct *work) | 562 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |
558 | { | 563 | { |
559 | cancel_rearming_delayed_workqueue(keventd_wq, work); | 564 | cancel_rearming_delayed_workqueue(keventd_wq, dwork); |
560 | } | 565 | } |
561 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 566 | EXPORT_SYMBOL(cancel_rearming_delayed_work); |
562 | 567 | ||