diff options
-rw-r--r-- | include/linux/workqueue.h | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 53 |
2 files changed, 57 insertions, 0 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 5f4aeaa9f3e..20000305a8a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -390,6 +390,10 @@ extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
390 | struct delayed_work *work, unsigned long delay); | 390 | struct delayed_work *work, unsigned long delay); |
391 | extern bool queue_delayed_work(struct workqueue_struct *wq, | 391 | extern bool queue_delayed_work(struct workqueue_struct *wq, |
392 | struct delayed_work *work, unsigned long delay); | 392 | struct delayed_work *work, unsigned long delay); |
393 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | ||
394 | struct delayed_work *dwork, unsigned long delay); | ||
395 | extern bool mod_delayed_work(struct workqueue_struct *wq, | ||
396 | struct delayed_work *dwork, unsigned long delay); | ||
393 | 397 | ||
394 | extern void flush_workqueue(struct workqueue_struct *wq); | 398 | extern void flush_workqueue(struct workqueue_struct *wq); |
395 | extern void drain_workqueue(struct workqueue_struct *wq); | 399 | extern void drain_workqueue(struct workqueue_struct *wq); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b4a4e05c89e..41ae2c0979f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1414,6 +1414,59 @@ bool queue_delayed_work(struct workqueue_struct *wq, | |||
1414 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 1414 | EXPORT_SYMBOL_GPL(queue_delayed_work); |
1415 | 1415 | ||
1416 | /** | 1416 | /** |
1417 | * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU | ||
1418 | * @cpu: CPU number to execute work on | ||
1419 | * @wq: workqueue to use | ||
1420 | * @dwork: work to queue | ||
1421 | * @delay: number of jiffies to wait before queueing | ||
1422 | * | ||
1423 | * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, | ||
1424 | * modify @dwork's timer so that it expires after @delay. If @delay is | ||
1425 | * zero, @work is guaranteed to be scheduled immediately regardless of its | ||
1426 | * current state. | ||
1427 | * | ||
1428 | * Returns %false if @dwork was idle and queued, %true if @dwork was | ||
1429 | * pending and its timer was modified. | ||
1430 | * | ||
1431 | * This function is safe to call from any context other than IRQ handler. | ||
1432 | * See try_to_grab_pending() for details. | ||
1433 | */ | ||
1434 | bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | ||
1435 | struct delayed_work *dwork, unsigned long delay) | ||
1436 | { | ||
1437 | unsigned long flags; | ||
1438 | int ret; | ||
1439 | |||
1440 | do { | ||
1441 | ret = try_to_grab_pending(&dwork->work, true, &flags); | ||
1442 | } while (unlikely(ret == -EAGAIN)); | ||
1443 | |||
1444 | if (likely(ret >= 0)) { | ||
1445 | __queue_delayed_work(cpu, wq, dwork, delay); | ||
1446 | local_irq_restore(flags); | ||
1447 | } | ||
1448 | |||
1449 | /* -ENOENT from try_to_grab_pending() becomes %true */ | ||
1450 | return ret; | ||
1451 | } | ||
1452 | EXPORT_SYMBOL_GPL(mod_delayed_work_on); | ||
1453 | |||
1454 | /** | ||
1455 | * mod_delayed_work - modify delay of or queue a delayed work | ||
1456 | * @wq: workqueue to use | ||
1457 | * @dwork: work to queue | ||
1458 | * @delay: number of jiffies to wait before queueing | ||
1459 | * | ||
1460 | * mod_delayed_work_on() on local CPU. | ||
1461 | */ | ||
1462 | bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, | ||
1463 | unsigned long delay) | ||
1464 | { | ||
1465 | return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | ||
1466 | } | ||
1467 | EXPORT_SYMBOL_GPL(mod_delayed_work); | ||
1468 | |||
1469 | /** | ||
1417 | * worker_enter_idle - enter idle state | 1470 | * worker_enter_idle - enter idle state |
1418 | * @worker: worker which is entering idle state | 1471 | * @worker: worker which is entering idle state |
1419 | * | 1472 | * |