diff options
| author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2013-03-18 19:58:00 -0400 |
|---|---|---|
| committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2013-03-18 19:58:00 -0400 |
| commit | d608d71cd6d19792487d08333d63c7ff20294694 (patch) | |
| tree | c9cad98ad9cbba487d32812d59c456ed774d6ffb /include/linux/workqueue.h | |
| parent | ed72d37a33fdf43dc47787fe220532cdec9da528 (diff) | |
| parent | a937536b868b8369b98967929045f1df54234323 (diff) | |
Merge tag 'v3.9-rc3' into v4l_for_linus
Linux 3.9-rc3
* tag 'v3.9-rc3': (11231 commits)
Linux 3.9-rc3
perf,x86: fix link failure for non-Intel configs
perf,x86: fix wrmsr_on_cpu() warning on suspend/resume
Btrfs: fix warning of free_extent_map
perf,x86: fix kernel crash with PEBS/BTS after suspend/resume
ALSA: hda - Fix missing EAPD/GPIO setup for Cirrus codecs
sound: sequencer: cap array index in seq_chn_common_event()
mfd: twl4030-madc: Remove __exit_p annotation
ALSA: hda/ca0132 - Remove extra setting of dsp_state.
ALSA: hda/ca0132 - Check download state of DSP.
ALSA: hda/ca0132 - Check if dspload_image succeeded.
mm/fremap.c: fix possible oops on error path
list: Fix double fetch of pointer in hlist_entry_safe()
Btrfs: fix warning when creating snapshots
Btrfs: return as soon as possible when edquot happens
Btrfs: return EIO if we have extent tree corruption
btrfs: use rcu_barrier() to wait for bdev puts at unmount
Btrfs: remove btrfs_try_spin_lock
Btrfs: get better concurrency for snapshot-aware defrag work
hwmon: (pmbus/ltc2978) Fix temperature reporting
...
Diffstat (limited to 'include/linux/workqueue.h')
| -rw-r--r-- | include/linux/workqueue.h | 35 |
1 files changed, 22 insertions, 13 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 2b58905d3504..8afab27cdbc2 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data); | |||
| 27 | enum { | 27 | enum { |
| 28 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | 28 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
| 29 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ | 29 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
| 30 | WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ | 30 | WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ |
| 31 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ | 31 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ |
| 32 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 32 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 33 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ | 33 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
| @@ -40,7 +40,7 @@ enum { | |||
| 40 | 40 | ||
| 41 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | 41 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
| 42 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, | 42 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, |
| 43 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | 43 | WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, |
| 44 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | 44 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
| 45 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 45 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 46 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, | 46 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, |
| @@ -57,29 +57,36 @@ enum { | |||
| 57 | 57 | ||
| 58 | /* special cpu IDs */ | 58 | /* special cpu IDs */ |
| 59 | WORK_CPU_UNBOUND = NR_CPUS, | 59 | WORK_CPU_UNBOUND = NR_CPUS, |
| 60 | WORK_CPU_NONE = NR_CPUS + 1, | 60 | WORK_CPU_END = NR_CPUS + 1, |
| 61 | WORK_CPU_LAST = WORK_CPU_NONE, | ||
| 62 | 61 | ||
| 63 | /* | 62 | /* |
| 64 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned | 63 | * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. |
| 65 | * off. This makes cwqs aligned to 256 bytes and allows 15 | 64 | * This makes pwqs aligned to 256 bytes and allows 15 workqueue |
| 66 | * workqueue flush colors. | 65 | * flush colors. |
| 67 | */ | 66 | */ |
| 68 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | 67 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + |
| 69 | WORK_STRUCT_COLOR_BITS, | 68 | WORK_STRUCT_COLOR_BITS, |
| 70 | 69 | ||
| 71 | /* data contains off-queue information when !WORK_STRUCT_CWQ */ | 70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
| 72 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, | 71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, |
| 73 | 72 | ||
| 74 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), | 73 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), |
| 75 | 74 | ||
| 75 | /* | ||
| 76 | * When a work item is off queue, its high bits point to the last | ||
| 77 | * pool it was on. Cap at 31 bits and use the highest number to | ||
| 78 | * indicate that no pool is associated. | ||
| 79 | */ | ||
| 76 | WORK_OFFQ_FLAG_BITS = 1, | 80 | WORK_OFFQ_FLAG_BITS = 1, |
| 77 | WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, | 81 | WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, |
| 82 | WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, | ||
| 83 | WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, | ||
| 84 | WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, | ||
| 78 | 85 | ||
| 79 | /* convenience constants */ | 86 | /* convenience constants */ |
| 80 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, | 87 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
| 81 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, | 88 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
| 82 | WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT, | 89 | WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, |
| 83 | 90 | ||
| 84 | /* bit mask for work_busy() return values */ | 91 | /* bit mask for work_busy() return values */ |
| 85 | WORK_BUSY_PENDING = 1 << 0, | 92 | WORK_BUSY_PENDING = 1 << 0, |
| @@ -95,13 +102,16 @@ struct work_struct { | |||
| 95 | #endif | 102 | #endif |
| 96 | }; | 103 | }; |
| 97 | 104 | ||
| 98 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) | 105 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) |
| 99 | #define WORK_DATA_STATIC_INIT() \ | 106 | #define WORK_DATA_STATIC_INIT() \ |
| 100 | ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) | 107 | ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) |
| 101 | 108 | ||
| 102 | struct delayed_work { | 109 | struct delayed_work { |
| 103 | struct work_struct work; | 110 | struct work_struct work; |
| 104 | struct timer_list timer; | 111 | struct timer_list timer; |
| 112 | |||
| 113 | /* target workqueue and CPU ->timer uses to queue ->work */ | ||
| 114 | struct workqueue_struct *wq; | ||
| 105 | int cpu; | 115 | int cpu; |
| 106 | }; | 116 | }; |
| 107 | 117 | ||
| @@ -426,7 +436,6 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | |||
| 426 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 436 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
| 427 | int max_active); | 437 | int max_active); |
| 428 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); | 438 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); |
| 429 | extern unsigned int work_cpu(struct work_struct *work); | ||
| 430 | extern unsigned int work_busy(struct work_struct *work); | 439 | extern unsigned int work_busy(struct work_struct *work); |
| 431 | 440 | ||
| 432 | /* | 441 | /* |
