aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/workqueue.h32
-rw-r--r--kernel/workqueue.c16
-rw-r--r--mm/mincore.c190
3 files changed, 116 insertions, 122 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 5b13dcf02714..2a7b38d87018 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -8,16 +8,21 @@
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <asm/atomic.h>
11 12
12struct workqueue_struct; 13struct workqueue_struct;
13 14
14struct work_struct; 15struct work_struct;
15typedef void (*work_func_t)(struct work_struct *work); 16typedef void (*work_func_t)(struct work_struct *work);
16 17
18/*
19 * The first word is the work queue pointer and the flags rolled into
20 * one
21 */
22#define work_data_bits(work) ((unsigned long *)(&(work)->data))
23
17struct work_struct { 24struct work_struct {
18 /* the first word is the work queue pointer and the flags rolled into 25 atomic_long_t data;
19 * one */
20 unsigned long management;
21#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
22#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ 27#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
23#define WORK_STRUCT_FLAG_MASK (3UL) 28#define WORK_STRUCT_FLAG_MASK (3UL)
@@ -26,6 +31,9 @@ struct work_struct {
26 work_func_t func; 31 work_func_t func;
27}; 32};
28 33
34#define WORK_DATA_INIT(autorelease) \
35 ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
36
29struct delayed_work { 37struct delayed_work {
30 struct work_struct work; 38 struct work_struct work;
31 struct timer_list timer; 39 struct timer_list timer;
@@ -36,13 +44,13 @@ struct execute_work {
36}; 44};
37 45
38#define __WORK_INITIALIZER(n, f) { \ 46#define __WORK_INITIALIZER(n, f) { \
39 .management = 0, \ 47 .data = WORK_DATA_INIT(0), \
40 .entry = { &(n).entry, &(n).entry }, \ 48 .entry = { &(n).entry, &(n).entry }, \
41 .func = (f), \ 49 .func = (f), \
42 } 50 }
43 51
44#define __WORK_INITIALIZER_NAR(n, f) { \ 52#define __WORK_INITIALIZER_NAR(n, f) { \
45 .management = (1 << WORK_STRUCT_NOAUTOREL), \ 53 .data = WORK_DATA_INIT(1), \
46 .entry = { &(n).entry, &(n).entry }, \ 54 .entry = { &(n).entry, &(n).entry }, \
47 .func = (f), \ 55 .func = (f), \
48 } 56 }
@@ -82,17 +90,21 @@ struct execute_work {
82 90
83/* 91/*
84 * initialize all of a work item in one go 92 * initialize all of a work item in one go
93 *
94 * NOTE! No point in using "atomic_long_set()": useing a direct
95 * assignment of the work data initializer allows the compiler
96 * to generate better code.
85 */ 97 */
86#define INIT_WORK(_work, _func) \ 98#define INIT_WORK(_work, _func) \
87 do { \ 99 do { \
88 (_work)->management = 0; \ 100 (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
89 INIT_LIST_HEAD(&(_work)->entry); \ 101 INIT_LIST_HEAD(&(_work)->entry); \
90 PREPARE_WORK((_work), (_func)); \ 102 PREPARE_WORK((_work), (_func)); \
91 } while (0) 103 } while (0)
92 104
93#define INIT_WORK_NAR(_work, _func) \ 105#define INIT_WORK_NAR(_work, _func) \
94 do { \ 106 do { \
95 (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ 107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \
96 INIT_LIST_HEAD(&(_work)->entry); \ 108 INIT_LIST_HEAD(&(_work)->entry); \
97 PREPARE_WORK((_work), (_func)); \ 109 PREPARE_WORK((_work), (_func)); \
98 } while (0) 110 } while (0)
@@ -114,7 +126,7 @@ struct execute_work {
114 * @work: The work item in question 126 * @work: The work item in question
115 */ 127 */
116#define work_pending(work) \ 128#define work_pending(work) \
117 test_bit(WORK_STRUCT_PENDING, &(work)->management) 129 test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
118 130
119/** 131/**
120 * delayed_work_pending - Find out whether a delayable work item is currently 132 * delayed_work_pending - Find out whether a delayable work item is currently
@@ -143,7 +155,7 @@ struct execute_work {
143 * This should also be used to release a delayed work item. 155 * This should also be used to release a delayed work item.
144 */ 156 */
145#define work_release(work) \ 157#define work_release(work) \
146 clear_bit(WORK_STRUCT_PENDING, &(work)->management) 158 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
147 159
148 160
149extern struct workqueue_struct *__create_workqueue(const char *name, 161extern struct workqueue_struct *__create_workqueue(const char *name,
@@ -188,7 +200,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
188 200
189 ret = del_timer_sync(&work->timer); 201 ret = del_timer_sync(&work->timer);
190 if (ret) 202 if (ret)
191 clear_bit(WORK_STRUCT_PENDING, &work->work.management); 203 work_release(&work->work);
192 return ret; 204 return ret;
193} 205}
194 206
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index db49886bfae1..742cbbe49bdc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq)
96 BUG_ON(!work_pending(work)); 96 BUG_ON(!work_pending(work));
97 97
98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
99 new |= work->management & WORK_STRUCT_FLAG_MASK; 99 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 work->management = new; 100 atomic_long_set(&work->data, new);
101} 101}
102 102
103static inline void *get_wq_data(struct work_struct *work) 103static inline void *get_wq_data(struct work_struct *work)
104{ 104{
105 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
106} 106}
107 107
108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
@@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work
133 list_del_init(&work->entry); 133 list_del_init(&work->entry);
134 spin_unlock_irqrestore(&cwq->lock, flags); 134 spin_unlock_irqrestore(&cwq->lock, flags);
135 135
136 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 136 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
137 work_release(work); 137 work_release(work);
138 f(work); 138 f(work);
139 139
@@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
206{ 206{
207 int ret = 0, cpu = get_cpu(); 207 int ret = 0, cpu = get_cpu();
208 208
209 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 if (unlikely(is_single_threaded(wq))) 210 if (unlikely(is_single_threaded(wq)))
211 cpu = singlethread_cpu; 211 cpu = singlethread_cpu;
212 BUG_ON(!list_empty(&work->entry)); 212 BUG_ON(!list_empty(&work->entry));
@@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
248 if (delay == 0) 248 if (delay == 0)
249 return queue_work(wq, work); 249 return queue_work(wq, work);
250 250
251 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 251 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
252 BUG_ON(timer_pending(timer)); 252 BUG_ON(timer_pending(timer));
253 BUG_ON(!list_empty(&work->entry)); 253 BUG_ON(!list_empty(&work->entry));
254 254
@@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
280 struct timer_list *timer = &dwork->timer; 280 struct timer_list *timer = &dwork->timer;
281 struct work_struct *work = &dwork->work; 281 struct work_struct *work = &dwork->work;
282 282
283 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 283 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
284 BUG_ON(timer_pending(timer)); 284 BUG_ON(timer_pending(timer));
285 BUG_ON(!list_empty(&work->entry)); 285 BUG_ON(!list_empty(&work->entry));
286 286
@@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
321 spin_unlock_irqrestore(&cwq->lock, flags); 321 spin_unlock_irqrestore(&cwq->lock, flags);
322 322
323 BUG_ON(get_wq_data(work) != cwq); 323 BUG_ON(get_wq_data(work) != cwq);
324 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
325 work_release(work); 325 work_release(work);
326 f(work); 326 f(work);
327 327
diff --git a/mm/mincore.c b/mm/mincore.c
index 72890780c1c9..b44d7f875cb6 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/mm/mincore.c 2 * linux/mm/mincore.c
3 * 3 *
4 * Copyright (C) 1994-1999 Linus Torvalds 4 * Copyright (C) 1994-2006 Linus Torvalds
5 */ 5 */
6 6
7/* 7/*
@@ -38,46 +38,60 @@ static unsigned char mincore_page(struct vm_area_struct * vma,
38 return present; 38 return present;
39} 39}
40 40
41static long mincore_vma(struct vm_area_struct * vma, 41/*
42 unsigned long start, unsigned long end, unsigned char __user * vec) 42 * Do a chunk of "sys_mincore()". We've already checked
43 * all the arguments, we hold the mmap semaphore: we should
44 * just return the amount of info we're asked for.
45 */
46static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pages)
43{ 47{
44 long error, i, remaining; 48 unsigned long i, nr, pgoff;
45 unsigned char * tmp; 49 struct vm_area_struct *vma = find_vma(current->mm, addr);
46 50
47 error = -ENOMEM; 51 /*
48 if (!vma->vm_file) 52 * find_vma() didn't find anything: the address
49 return error; 53 * is above everything we have mapped.
50 54 */
51 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 55 if (!vma) {
52 if (end > vma->vm_end) 56 memset(vec, 0, pages);
53 end = vma->vm_end; 57 return pages;
54 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 58 }
55 59
56 error = -EAGAIN; 60 /*
57 tmp = (unsigned char *) __get_free_page(GFP_KERNEL); 61 * find_vma() found something, but we might be
58 if (!tmp) 62 * below it: check for that.
59 return error; 63 */
64 if (addr < vma->vm_start) {
65 unsigned long gap = (vma->vm_start - addr) >> PAGE_SHIFT;
66 if (gap > pages)
67 gap = pages;
68 memset(vec, 0, gap);
69 return gap;
70 }
60 71
61 /* (end - start) is # of pages, and also # of bytes in "vec */ 72 /*
62 remaining = (end - start), 73 * Ok, got it. But check whether it's a segment we support
74 * mincore() on. Right now, we don't do any anonymous mappings.
75 */
76 if (!vma->vm_file)
77 return -ENOMEM;
63 78
64 error = 0; 79 /*
65 for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) { 80 * Calculate how many pages there are left in the vma, and
66 int j = 0; 81 * what the pgoff is for our address.
67 long thispiece = (remaining < PAGE_SIZE) ? 82 */
68 remaining : PAGE_SIZE; 83 nr = (vma->vm_end - addr) >> PAGE_SHIFT;
84 if (nr > pages)
85 nr = pages;
69 86
70 while (j < thispiece) 87 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
71 tmp[j++] = mincore_page(vma, start++); 88 pgoff += vma->vm_pgoff;
72 89
73 if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) { 90 /* And then we just fill the sucker in.. */
74 error = -EFAULT; 91 for (i = 0 ; i < nr; i++, pgoff++)
75 break; 92 vec[i] = mincore_page(vma, pgoff);
76 }
77 }
78 93
79 free_page((unsigned long) tmp); 94 return nr;
80 return error;
81} 95}
82 96
83/* 97/*
@@ -107,82 +121,50 @@ static long mincore_vma(struct vm_area_struct * vma,
107asmlinkage long sys_mincore(unsigned long start, size_t len, 121asmlinkage long sys_mincore(unsigned long start, size_t len,
108 unsigned char __user * vec) 122 unsigned char __user * vec)
109{ 123{
110 int index = 0; 124 long retval;
111 unsigned long end, limit; 125 unsigned long pages;
112 struct vm_area_struct * vma; 126 unsigned char *tmp;
113 size_t max;
114 int unmapped_error = 0;
115 long error;
116
117 /* check the arguments */
118 if (start & ~PAGE_CACHE_MASK)
119 goto einval;
120
121 limit = TASK_SIZE;
122 if (start >= limit)
123 goto enomem;
124
125 if (!len)
126 return 0;
127 127
128 max = limit - start; 128 /* Check the start address: needs to be page-aligned.. */
129 len = PAGE_CACHE_ALIGN(len); 129 if (start & ~PAGE_CACHE_MASK)
130 if (len > max || !len) 130 return -EINVAL;
131 goto enomem;
132
133 end = start + len;
134 131
135 /* check the output buffer whilst holding the lock */ 132 /* ..and we need to be passed a valid user-space range */
136 error = -EFAULT; 133 if (!access_ok(VERIFY_READ, (void __user *) start, len))
137 down_read(&current->mm->mmap_sem); 134 return -ENOMEM;
138 135
139 if (!access_ok(VERIFY_WRITE, vec, len >> PAGE_SHIFT)) 136 /* This also avoids any overflows on PAGE_CACHE_ALIGN */
140 goto out; 137 pages = len >> PAGE_SHIFT;
138 pages += (len & ~PAGE_MASK) != 0;
141 139
142 /* 140 if (!access_ok(VERIFY_WRITE, vec, pages))
143 * If the interval [start,end) covers some unmapped address 141 return -EFAULT;
144 * ranges, just ignore them, but return -ENOMEM at the end.
145 */
146 error = 0;
147
148 vma = find_vma(current->mm, start);
149 while (vma) {
150 /* Here start < vma->vm_end. */
151 if (start < vma->vm_start) {
152 unmapped_error = -ENOMEM;
153 start = vma->vm_start;
154 }
155 142
156 /* Here vma->vm_start <= start < vma->vm_end. */ 143 tmp = (void *) __get_free_page(GFP_USER);
157 if (end <= vma->vm_end) { 144 if (!tmp)
158 if (start < end) { 145 return -ENOMEM;
159 error = mincore_vma(vma, start, end, 146
160 &vec[index]); 147 retval = 0;
161 if (error) 148 while (pages) {
162 goto out; 149 /*
163 } 150 * Do at most PAGE_SIZE entries per iteration, due to
164 error = unmapped_error; 151 * the temporary buffer size.
165 goto out; 152 */
153 down_read(&current->mm->mmap_sem);
154 retval = do_mincore(start, tmp, max(pages, PAGE_SIZE));
155 up_read(&current->mm->mmap_sem);
156
157 if (retval <= 0)
158 break;
159 if (copy_to_user(vec, tmp, retval)) {
160 retval = -EFAULT;
161 break;
166 } 162 }
167 163 pages -= retval;
168 /* Here vma->vm_start <= start < vma->vm_end < end. */ 164 vec += retval;
169 error = mincore_vma(vma, start, vma->vm_end, &vec[index]); 165 start += retval << PAGE_SHIFT;
170 if (error) 166 retval = 0;
171 goto out;
172 index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT;
173 start = vma->vm_end;
174 vma = vma->vm_next;
175 } 167 }
176 168 free_page((unsigned long) tmp);
177 /* we found a hole in the area queried if we arrive here */ 169 return retval;
178 error = -ENOMEM;
179
180out:
181 up_read(&current->mm->mmap_sem);
182 return error;
183
184einval:
185 return -EINVAL;
186enomem:
187 return -ENOMEM;
188} 170}