aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 04:07:12 -0400
committerTejun Heo <tj@kernel.org>2010-06-29 04:07:12 -0400
commit1e19ffc63dbbaea7a7d1c63d99c38d3e5a4c7edf (patch)
tree357690d6017682a4a21824f7d3f34a83406a136d /kernel
parentaffee4b294a0fc97d67c8a77dc080c4dd262a79e (diff)
workqueue: implement per-cwq active work limit
Add cwq->nr_active, cwq->max_active and cwq->delayed_work. nr_active counts the number of active works per cwq. A work is active if it's flushable (colored) and is on cwq's worklist. If nr_active reaches max_active, new works are queued on cwq->delayed_work and activated later as works on the cwq complete and decrement nr_active. cwq->max_active can be specified via the new @max_active parameter to __create_workqueue() and is set to 1 for all workqueues for now. As each cwq has only single worker now, this double queueing doesn't cause any behavior difference visible to its users. This will be used to reimplement freeze/thaw and implement shared worker pool. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c39
1 files changed, 37 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9953d3c7bd10..e541b5db67dd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -77,6 +77,9 @@ struct cpu_workqueue_struct {
77 int flush_color; /* L: flushing color */ 77 int flush_color; /* L: flushing color */
78 int nr_in_flight[WORK_NR_COLORS]; 78 int nr_in_flight[WORK_NR_COLORS];
79 /* L: nr of in_flight works */ 79 /* L: nr of in_flight works */
80 int nr_active; /* L: nr of active works */
81 int max_active; /* I: max active works */
82 struct list_head delayed_works; /* L: delayed works */
80}; 83};
81 84
82/* 85/*
@@ -321,14 +324,24 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
321 struct work_struct *work) 324 struct work_struct *work)
322{ 325{
323 struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); 326 struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
327 struct list_head *worklist;
324 unsigned long flags; 328 unsigned long flags;
325 329
326 debug_work_activate(work); 330 debug_work_activate(work);
331
327 spin_lock_irqsave(&cwq->lock, flags); 332 spin_lock_irqsave(&cwq->lock, flags);
328 BUG_ON(!list_empty(&work->entry)); 333 BUG_ON(!list_empty(&work->entry));
334
329 cwq->nr_in_flight[cwq->work_color]++; 335 cwq->nr_in_flight[cwq->work_color]++;
330 insert_work(cwq, work, &cwq->worklist, 336
331 work_color_to_flags(cwq->work_color)); 337 if (likely(cwq->nr_active < cwq->max_active)) {
338 cwq->nr_active++;
339 worklist = &cwq->worklist;
340 } else
341 worklist = &cwq->delayed_works;
342
343 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
344
332 spin_unlock_irqrestore(&cwq->lock, flags); 345 spin_unlock_irqrestore(&cwq->lock, flags);
333} 346}
334 347
@@ -584,6 +597,15 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
584 *nextp = n; 597 *nextp = n;
585} 598}
586 599
600static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
601{
602 struct work_struct *work = list_first_entry(&cwq->delayed_works,
603 struct work_struct, entry);
604
605 move_linked_works(work, &cwq->worklist, NULL);
606 cwq->nr_active++;
607}
608
587/** 609/**
588 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 610 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
589 * @cwq: cwq of interest 611 * @cwq: cwq of interest
@@ -602,6 +624,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
602 return; 624 return;
603 625
604 cwq->nr_in_flight[color]--; 626 cwq->nr_in_flight[color]--;
627 cwq->nr_active--;
628
629 /* one down, submit a delayed one */
630 if (!list_empty(&cwq->delayed_works) &&
631 cwq->nr_active < cwq->max_active)
632 cwq_activate_first_delayed(cwq);
605 633
606 /* is flush in progress and are we at the flushing tip? */ 634 /* is flush in progress and are we at the flushing tip? */
607 if (likely(cwq->flush_color != color)) 635 if (likely(cwq->flush_color != color))
@@ -1505,6 +1533,7 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1505 1533
1506struct workqueue_struct *__create_workqueue_key(const char *name, 1534struct workqueue_struct *__create_workqueue_key(const char *name,
1507 unsigned int flags, 1535 unsigned int flags,
1536 int max_active,
1508 struct lock_class_key *key, 1537 struct lock_class_key *key,
1509 const char *lock_name) 1538 const char *lock_name)
1510{ 1539{
@@ -1513,6 +1542,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
1513 bool failed = false; 1542 bool failed = false;
1514 unsigned int cpu; 1543 unsigned int cpu;
1515 1544
1545 max_active = clamp_val(max_active, 1, INT_MAX);
1546
1516 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 1547 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1517 if (!wq) 1548 if (!wq)
1518 goto err; 1549 goto err;
@@ -1544,8 +1575,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
1544 cwq->cpu = cpu; 1575 cwq->cpu = cpu;
1545 cwq->wq = wq; 1576 cwq->wq = wq;
1546 cwq->flush_color = -1; 1577 cwq->flush_color = -1;
1578 cwq->max_active = max_active;
1547 spin_lock_init(&cwq->lock); 1579 spin_lock_init(&cwq->lock);
1548 INIT_LIST_HEAD(&cwq->worklist); 1580 INIT_LIST_HEAD(&cwq->worklist);
1581 INIT_LIST_HEAD(&cwq->delayed_works);
1549 init_waitqueue_head(&cwq->more_work); 1582 init_waitqueue_head(&cwq->more_work);
1550 1583
1551 if (failed) 1584 if (failed)
@@ -1607,6 +1640,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
1607 1640
1608 for (i = 0; i < WORK_NR_COLORS; i++) 1641 for (i = 0; i < WORK_NR_COLORS; i++)
1609 BUG_ON(cwq->nr_in_flight[i]); 1642 BUG_ON(cwq->nr_in_flight[i]);
1643 BUG_ON(cwq->nr_active);
1644 BUG_ON(!list_empty(&cwq->delayed_works));
1610 } 1645 }
1611 1646
1612 free_cwqs(wq->cpu_wq); 1647 free_cwqs(wq->cpu_wq);