diff options
| author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-12-11 08:18:45 -0500 |
|---|---|---|
| committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-12-11 08:18:45 -0500 |
| commit | 9374020a78fce13a1cf2edf3d26f6dd7231b5c3d (patch) | |
| tree | 50c8629e6c6222c5b9681506b52afbde818c5e56 /kernel/workqueue.c | |
| parent | d2a0db1ee01aea154ccc460e45a16857e32c4427 (diff) | |
| parent | 29594404d7fe73cd80eaa4ee8c43dcc53970c60e (diff) | |
Merge tag 'v3.7' into v4l_for_linus
Linux 3.7
* tag 'v3.7': (1545 commits)
Linux 3.7
Input: matrix-keymap - provide proper module license
Revert "revert "Revert "mm: remove __GFP_NO_KSWAPD""" and associated damage
ipv4: ip_check_defrag must not modify skb before unsharing
Revert "mm: avoid waking kswapd for THP allocations when compaction is deferred or contended"
inet_diag: validate port comparison byte code to prevent unsafe reads
inet_diag: avoid unsafe and nonsensical prefix matches in inet_diag_bc_run()
inet_diag: validate byte code to prevent oops in inet_diag_bc_run()
inet_diag: fix oops for IPv4 AF_INET6 TCP SYN-RECV state
mm: vmscan: fix inappropriate zone congestion clearing
vfs: fix O_DIRECT read past end of block device
net: gro: fix possible panic in skb_gro_receive()
tcp: bug fix Fast Open client retransmission
tmpfs: fix shared mempolicy leak
mm: vmscan: do not keep kswapd looping forever due to individual uncompactable zones
mm: compaction: validate pfn range passed to isolate_freepages_block
mmc: sh-mmcif: avoid oops on spurious interrupts (second try)
Revert misapplied "mmc: sh-mmcif: avoid oops on spurious interrupts"
mmc: sdhci-s3c: fix missing clock for gpio card-detect
lib/Makefile: Fix oid_registry build dependency
...
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d951daa0ca9a..1dae900df798 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1361,8 +1361,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, | |||
| 1361 | 1361 | ||
| 1362 | WARN_ON_ONCE(timer->function != delayed_work_timer_fn || | 1362 | WARN_ON_ONCE(timer->function != delayed_work_timer_fn || |
| 1363 | timer->data != (unsigned long)dwork); | 1363 | timer->data != (unsigned long)dwork); |
| 1364 | BUG_ON(timer_pending(timer)); | 1364 | WARN_ON_ONCE(timer_pending(timer)); |
| 1365 | BUG_ON(!list_empty(&work->entry)); | 1365 | WARN_ON_ONCE(!list_empty(&work->entry)); |
| 1366 | |||
| 1367 | /* | ||
| 1368 | * If @delay is 0, queue @dwork->work immediately. This is for | ||
| 1369 | * both optimization and correctness. The earliest @timer can | ||
| 1370 | * expire is on the closest next tick and delayed_work users depend | ||
| 1371 | * on that there's no such delay when @delay is 0. | ||
| 1372 | */ | ||
| 1373 | if (!delay) { | ||
| 1374 | __queue_work(cpu, wq, &dwork->work); | ||
| 1375 | return; | ||
| 1376 | } | ||
| 1366 | 1377 | ||
| 1367 | timer_stats_timer_set_start_info(&dwork->timer); | 1378 | timer_stats_timer_set_start_info(&dwork->timer); |
| 1368 | 1379 | ||
| @@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 1417 | bool ret = false; | 1428 | bool ret = false; |
| 1418 | unsigned long flags; | 1429 | unsigned long flags; |
| 1419 | 1430 | ||
| 1420 | if (!delay) | ||
| 1421 | return queue_work_on(cpu, wq, &dwork->work); | ||
| 1422 | |||
| 1423 | /* read the comment in __queue_work() */ | 1431 | /* read the comment in __queue_work() */ |
| 1424 | local_irq_save(flags); | 1432 | local_irq_save(flags); |
| 1425 | 1433 | ||
| @@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq) | |||
| 2407 | repeat: | 2415 | repeat: |
| 2408 | set_current_state(TASK_INTERRUPTIBLE); | 2416 | set_current_state(TASK_INTERRUPTIBLE); |
| 2409 | 2417 | ||
| 2410 | if (kthread_should_stop()) | 2418 | if (kthread_should_stop()) { |
| 2419 | __set_current_state(TASK_RUNNING); | ||
| 2411 | return 0; | 2420 | return 0; |
| 2421 | } | ||
| 2412 | 2422 | ||
| 2413 | /* | 2423 | /* |
| 2414 | * See whether any cpu is asking for help. Unbounded | 2424 | * See whether any cpu is asking for help. Unbounded |
| @@ -2982,7 +2992,7 @@ bool cancel_delayed_work(struct delayed_work *dwork) | |||
| 2982 | 2992 | ||
| 2983 | set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); | 2993 | set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); |
| 2984 | local_irq_restore(flags); | 2994 | local_irq_restore(flags); |
| 2985 | return true; | 2995 | return ret; |
| 2986 | } | 2996 | } |
| 2987 | EXPORT_SYMBOL(cancel_delayed_work); | 2997 | EXPORT_SYMBOL(cancel_delayed_work); |
| 2988 | 2998 | ||
