aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:54:49 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:54:49 -0500
commit365970a1ea76d81cb1ad2f652acb605f06dae256 (patch)
treed2a34e397a4c2d9d0c27ceb0854752afe143c100 /kernel
parent6bb49e5965c1fc399b4d3cd2b5cf2da535b330c0 (diff)
WorkStruct: Merge the pending bit into the wq_data pointer
Reclaim a word from the size of the work_struct by folding the pending bit and the wq_data pointer together. This shouldn't cause misalignment problems as all pointers should be at least 4-byte aligned. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c41
1 files changed, 32 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1e9d61ecf762..967479756511 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
80 return list_empty(&wq->list); 80 return list_empty(&wq->list);
81} 81}
82 82
83static inline void set_wq_data(struct work_struct *work, void *wq)
84{
85 unsigned long new, old, res;
86
87 /* assume the pending flag is already set and that the task has already
88 * been queued on this workqueue */
89 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
90 res = work->management;
91 if (res != new) {
92 do {
93 old = res;
94 new = (unsigned long) wq;
95 new |= (old & WORK_STRUCT_FLAG_MASK);
96 res = cmpxchg(&work->management, old, new);
97 } while (res != old);
98 }
99}
100
101static inline void *get_wq_data(struct work_struct *work)
102{
103 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
104}
105
83/* Preempt must be disabled. */ 106/* Preempt must be disabled. */
84static void __queue_work(struct cpu_workqueue_struct *cwq, 107static void __queue_work(struct cpu_workqueue_struct *cwq,
85 struct work_struct *work) 108 struct work_struct *work)
@@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
87 unsigned long flags; 110 unsigned long flags;
88 111
89 spin_lock_irqsave(&cwq->lock, flags); 112 spin_lock_irqsave(&cwq->lock, flags);
90 work->wq_data = cwq; 113 set_wq_data(work, cwq);
91 list_add_tail(&work->entry, &cwq->worklist); 114 list_add_tail(&work->entry, &cwq->worklist);
92 cwq->insert_sequence++; 115 cwq->insert_sequence++;
93 wake_up(&cwq->more_work); 116 wake_up(&cwq->more_work);
@@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
108{ 131{
109 int ret = 0, cpu = get_cpu(); 132 int ret = 0, cpu = get_cpu();
110 133
111 if (!test_and_set_bit(0, &work->pending)) { 134 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
112 if (unlikely(is_single_threaded(wq))) 135 if (unlikely(is_single_threaded(wq)))
113 cpu = singlethread_cpu; 136 cpu = singlethread_cpu;
114 BUG_ON(!list_empty(&work->entry)); 137 BUG_ON(!list_empty(&work->entry));
@@ -123,7 +146,7 @@ EXPORT_SYMBOL_GPL(queue_work);
123static void delayed_work_timer_fn(unsigned long __data) 146static void delayed_work_timer_fn(unsigned long __data)
124{ 147{
125 struct delayed_work *dwork = (struct delayed_work *)__data; 148 struct delayed_work *dwork = (struct delayed_work *)__data;
126 struct workqueue_struct *wq = dwork->work.wq_data; 149 struct workqueue_struct *wq = get_wq_data(&dwork->work);
127 int cpu = smp_processor_id(); 150 int cpu = smp_processor_id();
128 151
129 if (unlikely(is_single_threaded(wq))) 152 if (unlikely(is_single_threaded(wq)))
@@ -150,12 +173,12 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
150 if (delay == 0) 173 if (delay == 0)
151 return queue_work(wq, work); 174 return queue_work(wq, work);
152 175
153 if (!test_and_set_bit(0, &work->pending)) { 176 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
154 BUG_ON(timer_pending(timer)); 177 BUG_ON(timer_pending(timer));
155 BUG_ON(!list_empty(&work->entry)); 178 BUG_ON(!list_empty(&work->entry));
156 179
157 /* This stores wq for the moment, for the timer_fn */ 180 /* This stores wq for the moment, for the timer_fn */
158 work->wq_data = wq; 181 set_wq_data(work, wq);
159 timer->expires = jiffies + delay; 182 timer->expires = jiffies + delay;
160 timer->data = (unsigned long)dwork; 183 timer->data = (unsigned long)dwork;
161 timer->function = delayed_work_timer_fn; 184 timer->function = delayed_work_timer_fn;
@@ -182,12 +205,12 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
182 struct timer_list *timer = &dwork->timer; 205 struct timer_list *timer = &dwork->timer;
183 struct work_struct *work = &dwork->work; 206 struct work_struct *work = &dwork->work;
184 207
185 if (!test_and_set_bit(0, &work->pending)) { 208 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
186 BUG_ON(timer_pending(timer)); 209 BUG_ON(timer_pending(timer));
187 BUG_ON(!list_empty(&work->entry)); 210 BUG_ON(!list_empty(&work->entry));
188 211
189 /* This stores wq for the moment, for the timer_fn */ 212 /* This stores wq for the moment, for the timer_fn */
190 work->wq_data = wq; 213 set_wq_data(work, wq);
191 timer->expires = jiffies + delay; 214 timer->expires = jiffies + delay;
192 timer->data = (unsigned long)dwork; 215 timer->data = (unsigned long)dwork;
193 timer->function = delayed_work_timer_fn; 216 timer->function = delayed_work_timer_fn;
@@ -223,8 +246,8 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
223 list_del_init(cwq->worklist.next); 246 list_del_init(cwq->worklist.next);
224 spin_unlock_irqrestore(&cwq->lock, flags); 247 spin_unlock_irqrestore(&cwq->lock, flags);
225 248
226 BUG_ON(work->wq_data != cwq); 249 BUG_ON(get_wq_data(work) != cwq);
227 clear_bit(0, &work->pending); 250 clear_bit(WORK_STRUCT_PENDING, &work->management);
228 f(data); 251 f(data);
229 252
230 spin_lock_irqsave(&cwq->lock, flags); 253 spin_lock_irqsave(&cwq->lock, flags);