diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/argv_split.c | 4 | ||||
-rw-r--r-- | lib/bust_spinlocks.c | 6 | ||||
-rw-r--r-- | lib/idr.c | 3 | ||||
-rw-r--r-- | lib/iomap.c | 2 | ||||
-rw-r--r-- | lib/ioremap.c | 1 | ||||
-rw-r--r-- | lib/percpu_counter.c | 48 | ||||
-rw-r--r-- | lib/proportions.c | 384 | ||||
-rw-r--r-- | lib/radix-tree.c | 16 | ||||
-rw-r--r-- | lib/sort.c | 2 |
10 files changed, 445 insertions, 24 deletions
diff --git a/lib/Makefile b/lib/Makefile index 6c4ea33bb2cb..c5f215d509d3 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -5,7 +5,8 @@ | |||
5 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ | 5 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ |
6 | rbtree.o radix-tree.o dump_stack.o \ | 6 | rbtree.o radix-tree.o dump_stack.o \ |
7 | idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \ | 7 | idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \ |
8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o | 8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
9 | proportions.o | ||
9 | 10 | ||
10 | lib-$(CONFIG_MMU) += ioremap.o | 11 | lib-$(CONFIG_MMU) += ioremap.o |
11 | lib-$(CONFIG_SMP) += cpumask.o | 12 | lib-$(CONFIG_SMP) += cpumask.o |
diff --git a/lib/argv_split.c b/lib/argv_split.c index 4096ed42f490..fad6ce4f7b57 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c | |||
@@ -75,7 +75,9 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp) | |||
75 | if (argv == NULL) | 75 | if (argv == NULL) |
76 | goto out; | 76 | goto out; |
77 | 77 | ||
78 | *argcp = argc; | 78 | if (argcp) |
79 | *argcp = argc; | ||
80 | |||
79 | argvp = argv; | 81 | argvp = argv; |
80 | 82 | ||
81 | while (*str) { | 83 | while (*str) { |
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index accb35658169..486da62b2b07 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c | |||
@@ -17,13 +17,13 @@ | |||
17 | void __attribute__((weak)) bust_spinlocks(int yes) | 17 | void __attribute__((weak)) bust_spinlocks(int yes) |
18 | { | 18 | { |
19 | if (yes) { | 19 | if (yes) { |
20 | oops_in_progress = 1; | 20 | ++oops_in_progress; |
21 | } else { | 21 | } else { |
22 | #ifdef CONFIG_VT | 22 | #ifdef CONFIG_VT |
23 | unblank_screen(); | 23 | unblank_screen(); |
24 | #endif | 24 | #endif |
25 | oops_in_progress = 0; | 25 | if (--oops_in_progress == 0) |
26 | wake_up_klogd(); | 26 | wake_up_klogd(); |
27 | } | 27 | } |
28 | } | 28 | } |
29 | 29 | ||
@@ -580,8 +580,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
580 | } | 580 | } |
581 | EXPORT_SYMBOL(idr_replace); | 581 | EXPORT_SYMBOL(idr_replace); |
582 | 582 | ||
583 | static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, | 583 | static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) |
584 | unsigned long flags) | ||
585 | { | 584 | { |
586 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 585 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
587 | } | 586 | } |
diff --git a/lib/iomap.c b/lib/iomap.c index 864f2ec1966e..72c42687ba10 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -40,7 +40,7 @@ static void bad_io_access(unsigned long port, const char *access) | |||
40 | static int count = 10; | 40 | static int count = 10; |
41 | if (count) { | 41 | if (count) { |
42 | count--; | 42 | count--; |
43 | printk(KERN_ERR "Bad IO access at port %lx (%s)\n", port, access); | 43 | printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); |
44 | WARN_ON(1); | 44 | WARN_ON(1); |
45 | } | 45 | } |
46 | } | 46 | } |
diff --git a/lib/ioremap.c b/lib/ioremap.c index 760521417b69..14c6078f17a2 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/vmalloc.h> | 8 | #include <linux/vmalloc.h> |
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/io.h> | ||
11 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
12 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
13 | 14 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index cf22c617baa4..9659eabffc31 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -14,15 +14,29 @@ static LIST_HEAD(percpu_counters); | |||
14 | static DEFINE_MUTEX(percpu_counters_lock); | 14 | static DEFINE_MUTEX(percpu_counters_lock); |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | void percpu_counter_mod(struct percpu_counter *fbc, s32 amount) | 17 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
18 | { | 18 | { |
19 | long count; | 19 | int cpu; |
20 | |||
21 | spin_lock(&fbc->lock); | ||
22 | for_each_possible_cpu(cpu) { | ||
23 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | ||
24 | *pcount = 0; | ||
25 | } | ||
26 | fbc->count = amount; | ||
27 | spin_unlock(&fbc->lock); | ||
28 | } | ||
29 | EXPORT_SYMBOL(percpu_counter_set); | ||
30 | |||
31 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | ||
32 | { | ||
33 | s64 count; | ||
20 | s32 *pcount; | 34 | s32 *pcount; |
21 | int cpu = get_cpu(); | 35 | int cpu = get_cpu(); |
22 | 36 | ||
23 | pcount = per_cpu_ptr(fbc->counters, cpu); | 37 | pcount = per_cpu_ptr(fbc->counters, cpu); |
24 | count = *pcount + amount; | 38 | count = *pcount + amount; |
25 | if (count >= FBC_BATCH || count <= -FBC_BATCH) { | 39 | if (count >= batch || count <= -batch) { |
26 | spin_lock(&fbc->lock); | 40 | spin_lock(&fbc->lock); |
27 | fbc->count += count; | 41 | fbc->count += count; |
28 | *pcount = 0; | 42 | *pcount = 0; |
@@ -32,13 +46,13 @@ void percpu_counter_mod(struct percpu_counter *fbc, s32 amount) | |||
32 | } | 46 | } |
33 | put_cpu(); | 47 | put_cpu(); |
34 | } | 48 | } |
35 | EXPORT_SYMBOL(percpu_counter_mod); | 49 | EXPORT_SYMBOL(__percpu_counter_add); |
36 | 50 | ||
37 | /* | 51 | /* |
38 | * Add up all the per-cpu counts, return the result. This is a more accurate | 52 | * Add up all the per-cpu counts, return the result. This is a more accurate |
39 | * but much slower version of percpu_counter_read_positive() | 53 | * but much slower version of percpu_counter_read_positive() |
40 | */ | 54 | */ |
41 | s64 percpu_counter_sum(struct percpu_counter *fbc) | 55 | s64 __percpu_counter_sum(struct percpu_counter *fbc) |
42 | { | 56 | { |
43 | s64 ret; | 57 | s64 ret; |
44 | int cpu; | 58 | int cpu; |
@@ -50,25 +64,43 @@ s64 percpu_counter_sum(struct percpu_counter *fbc) | |||
50 | ret += *pcount; | 64 | ret += *pcount; |
51 | } | 65 | } |
52 | spin_unlock(&fbc->lock); | 66 | spin_unlock(&fbc->lock); |
53 | return ret < 0 ? 0 : ret; | 67 | return ret; |
54 | } | 68 | } |
55 | EXPORT_SYMBOL(percpu_counter_sum); | 69 | EXPORT_SYMBOL(__percpu_counter_sum); |
70 | |||
71 | static struct lock_class_key percpu_counter_irqsafe; | ||
56 | 72 | ||
57 | void percpu_counter_init(struct percpu_counter *fbc, s64 amount) | 73 | int percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
58 | { | 74 | { |
59 | spin_lock_init(&fbc->lock); | 75 | spin_lock_init(&fbc->lock); |
60 | fbc->count = amount; | 76 | fbc->count = amount; |
61 | fbc->counters = alloc_percpu(s32); | 77 | fbc->counters = alloc_percpu(s32); |
78 | if (!fbc->counters) | ||
79 | return -ENOMEM; | ||
62 | #ifdef CONFIG_HOTPLUG_CPU | 80 | #ifdef CONFIG_HOTPLUG_CPU |
63 | mutex_lock(&percpu_counters_lock); | 81 | mutex_lock(&percpu_counters_lock); |
64 | list_add(&fbc->list, &percpu_counters); | 82 | list_add(&fbc->list, &percpu_counters); |
65 | mutex_unlock(&percpu_counters_lock); | 83 | mutex_unlock(&percpu_counters_lock); |
66 | #endif | 84 | #endif |
85 | return 0; | ||
67 | } | 86 | } |
68 | EXPORT_SYMBOL(percpu_counter_init); | 87 | EXPORT_SYMBOL(percpu_counter_init); |
69 | 88 | ||
89 | int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount) | ||
90 | { | ||
91 | int err; | ||
92 | |||
93 | err = percpu_counter_init(fbc, amount); | ||
94 | if (!err) | ||
95 | lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe); | ||
96 | return err; | ||
97 | } | ||
98 | |||
70 | void percpu_counter_destroy(struct percpu_counter *fbc) | 99 | void percpu_counter_destroy(struct percpu_counter *fbc) |
71 | { | 100 | { |
101 | if (!fbc->counters) | ||
102 | return; | ||
103 | |||
72 | free_percpu(fbc->counters); | 104 | free_percpu(fbc->counters); |
73 | #ifdef CONFIG_HOTPLUG_CPU | 105 | #ifdef CONFIG_HOTPLUG_CPU |
74 | mutex_lock(&percpu_counters_lock); | 106 | mutex_lock(&percpu_counters_lock); |
diff --git a/lib/proportions.c b/lib/proportions.c new file mode 100644 index 000000000000..332d8c58184d --- /dev/null +++ b/lib/proportions.c | |||
@@ -0,0 +1,384 @@ | |||
1 | /* | ||
2 | * Floating proportions | ||
3 | * | ||
4 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * | ||
8 | * The floating proportion is a time derivative with an exponentially decaying | ||
9 | * history: | ||
10 | * | ||
11 | * p_{j} = \Sum_{i=0} (dx_{j}/dt_{-i}) / 2^(1+i) | ||
12 | * | ||
13 | * Where j is an element from {prop_local}, x_{j} is j's number of events, | ||
14 | * and i the time period over which the differential is taken. So d/dt_{-i} is | ||
15 | * the differential over the i-th last period. | ||
16 | * | ||
17 | * The decaying history gives smooth transitions. The time differential carries | ||
18 | * the notion of speed. | ||
19 | * | ||
20 | * The denominator is 2^(1+i) because we want the series to be normalised, ie. | ||
21 | * | ||
22 | * \Sum_{i=0} 1/2^(1+i) = 1 | ||
23 | * | ||
24 | * Further more, if we measure time (t) in the same events as x; so that: | ||
25 | * | ||
26 | * t = \Sum_{j} x_{j} | ||
27 | * | ||
28 | * we get that: | ||
29 | * | ||
30 | * \Sum_{j} p_{j} = 1 | ||
31 | * | ||
32 | * Writing this in an iterative fashion we get (dropping the 'd's): | ||
33 | * | ||
34 | * if (++x_{j}, ++t > period) | ||
35 | * t /= 2; | ||
36 | * for_each (j) | ||
37 | * x_{j} /= 2; | ||
38 | * | ||
39 | * so that: | ||
40 | * | ||
41 | * p_{j} = x_{j} / t; | ||
42 | * | ||
43 | * We optimize away the '/= 2' for the global time delta by noting that: | ||
44 | * | ||
45 | * if (++t > period) t /= 2: | ||
46 | * | ||
47 | * Can be approximated by: | ||
48 | * | ||
49 | * period/2 + (++t % period/2) | ||
50 | * | ||
51 | * [ Furthermore, when we choose period to be 2^n it can be written in terms of | ||
52 | * binary operations and wraparound artefacts disappear. ] | ||
53 | * | ||
54 | * Also note that this yields a natural counter of the elapsed periods: | ||
55 | * | ||
56 | * c = t / (period/2) | ||
57 | * | ||
58 | * [ Its monotonic increasing property can be applied to mitigate the wrap- | ||
59 | * around issue. ] | ||
60 | * | ||
61 | * This allows us to do away with the loop over all prop_locals on each period | ||
62 | * expiration. By remembering the period count under which it was last accessed | ||
63 | * as c_{j}, we can obtain the number of 'missed' cycles from: | ||
64 | * | ||
65 | * c - c_{j} | ||
66 | * | ||
67 | * We can then lazily catch up to the global period count every time we are | ||
68 | * going to use x_{j}, by doing: | ||
69 | * | ||
70 | * x_{j} /= 2^(c - c_{j}), c_{j} = c | ||
71 | */ | ||
72 | |||
73 | #include <linux/proportions.h> | ||
74 | #include <linux/rcupdate.h> | ||
75 | |||
76 | /* | ||
77 | * Limit the time part in order to ensure there are some bits left for the | ||
78 | * cycle counter. | ||
79 | */ | ||
80 | #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) | ||
81 | |||
82 | int prop_descriptor_init(struct prop_descriptor *pd, int shift) | ||
83 | { | ||
84 | int err; | ||
85 | |||
86 | if (shift > PROP_MAX_SHIFT) | ||
87 | shift = PROP_MAX_SHIFT; | ||
88 | |||
89 | pd->index = 0; | ||
90 | pd->pg[0].shift = shift; | ||
91 | mutex_init(&pd->mutex); | ||
92 | err = percpu_counter_init_irq(&pd->pg[0].events, 0); | ||
93 | if (err) | ||
94 | goto out; | ||
95 | |||
96 | err = percpu_counter_init_irq(&pd->pg[1].events, 0); | ||
97 | if (err) | ||
98 | percpu_counter_destroy(&pd->pg[0].events); | ||
99 | |||
100 | out: | ||
101 | return err; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * We have two copies, and flip between them to make it seem like an atomic | ||
106 | * update. The update is not really atomic wrt the events counter, but | ||
107 | * it is internally consistent with the bit layout depending on shift. | ||
108 | * | ||
109 | * We copy the events count, move the bits around and flip the index. | ||
110 | */ | ||
111 | void prop_change_shift(struct prop_descriptor *pd, int shift) | ||
112 | { | ||
113 | int index; | ||
114 | int offset; | ||
115 | u64 events; | ||
116 | unsigned long flags; | ||
117 | |||
118 | if (shift > PROP_MAX_SHIFT) | ||
119 | shift = PROP_MAX_SHIFT; | ||
120 | |||
121 | mutex_lock(&pd->mutex); | ||
122 | |||
123 | index = pd->index ^ 1; | ||
124 | offset = pd->pg[pd->index].shift - shift; | ||
125 | if (!offset) | ||
126 | goto out; | ||
127 | |||
128 | pd->pg[index].shift = shift; | ||
129 | |||
130 | local_irq_save(flags); | ||
131 | events = percpu_counter_sum(&pd->pg[pd->index].events); | ||
132 | if (offset < 0) | ||
133 | events <<= -offset; | ||
134 | else | ||
135 | events >>= offset; | ||
136 | percpu_counter_set(&pd->pg[index].events, events); | ||
137 | |||
138 | /* | ||
139 | * ensure the new pg is fully written before the switch | ||
140 | */ | ||
141 | smp_wmb(); | ||
142 | pd->index = index; | ||
143 | local_irq_restore(flags); | ||
144 | |||
145 | synchronize_rcu(); | ||
146 | |||
147 | out: | ||
148 | mutex_unlock(&pd->mutex); | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * wrap the access to the data in an rcu_read_lock() section; | ||
153 | * this is used to track the active references. | ||
154 | */ | ||
155 | static struct prop_global *prop_get_global(struct prop_descriptor *pd) | ||
156 | { | ||
157 | int index; | ||
158 | |||
159 | rcu_read_lock(); | ||
160 | index = pd->index; | ||
161 | /* | ||
162 | * match the wmb from vcd_flip() | ||
163 | */ | ||
164 | smp_rmb(); | ||
165 | return &pd->pg[index]; | ||
166 | } | ||
167 | |||
168 | static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg) | ||
169 | { | ||
170 | rcu_read_unlock(); | ||
171 | } | ||
172 | |||
173 | static void | ||
174 | prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) | ||
175 | { | ||
176 | int offset = *pl_shift - new_shift; | ||
177 | |||
178 | if (!offset) | ||
179 | return; | ||
180 | |||
181 | if (offset < 0) | ||
182 | *pl_period <<= -offset; | ||
183 | else | ||
184 | *pl_period >>= offset; | ||
185 | |||
186 | *pl_shift = new_shift; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * PERCPU | ||
191 | */ | ||
192 | |||
193 | int prop_local_init_percpu(struct prop_local_percpu *pl) | ||
194 | { | ||
195 | spin_lock_init(&pl->lock); | ||
196 | pl->shift = 0; | ||
197 | pl->period = 0; | ||
198 | return percpu_counter_init_irq(&pl->events, 0); | ||
199 | } | ||
200 | |||
201 | void prop_local_destroy_percpu(struct prop_local_percpu *pl) | ||
202 | { | ||
203 | percpu_counter_destroy(&pl->events); | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * Catch up with missed period expirations. | ||
208 | * | ||
209 | * until (c_{j} == c) | ||
210 | * x_{j} -= x_{j}/2; | ||
211 | * c_{j}++; | ||
212 | */ | ||
213 | static | ||
214 | void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | ||
215 | { | ||
216 | unsigned long period = 1UL << (pg->shift - 1); | ||
217 | unsigned long period_mask = ~(period - 1); | ||
218 | unsigned long global_period; | ||
219 | unsigned long flags; | ||
220 | |||
221 | global_period = percpu_counter_read(&pg->events); | ||
222 | global_period &= period_mask; | ||
223 | |||
224 | /* | ||
225 | * Fast path - check if the local and global period count still match | ||
226 | * outside of the lock. | ||
227 | */ | ||
228 | if (pl->period == global_period) | ||
229 | return; | ||
230 | |||
231 | spin_lock_irqsave(&pl->lock, flags); | ||
232 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | ||
233 | /* | ||
234 | * For each missed period, we half the local counter. | ||
235 | * basically: | ||
236 | * pl->events >> (global_period - pl->period); | ||
237 | * | ||
238 | * but since the distributed nature of percpu counters make division | ||
239 | * rather hard, use a regular subtraction loop. This is safe, because | ||
240 | * the events will only every be incremented, hence the subtraction | ||
241 | * can never result in a negative number. | ||
242 | */ | ||
243 | while (pl->period != global_period) { | ||
244 | unsigned long val = percpu_counter_read(&pl->events); | ||
245 | unsigned long half = (val + 1) >> 1; | ||
246 | |||
247 | /* | ||
248 | * Half of zero won't be much less, break out. | ||
249 | * This limits the loop to shift iterations, even | ||
250 | * if we missed a million. | ||
251 | */ | ||
252 | if (!val) | ||
253 | break; | ||
254 | |||
255 | percpu_counter_add(&pl->events, -half); | ||
256 | pl->period += period; | ||
257 | } | ||
258 | pl->period = global_period; | ||
259 | spin_unlock_irqrestore(&pl->lock, flags); | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * ++x_{j}, ++t | ||
264 | */ | ||
265 | void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) | ||
266 | { | ||
267 | struct prop_global *pg = prop_get_global(pd); | ||
268 | |||
269 | prop_norm_percpu(pg, pl); | ||
270 | percpu_counter_add(&pl->events, 1); | ||
271 | percpu_counter_add(&pg->events, 1); | ||
272 | prop_put_global(pd, pg); | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Obtain a fraction of this proportion | ||
277 | * | ||
278 | * p_{j} = x_{j} / (period/2 + t % period/2) | ||
279 | */ | ||
280 | void prop_fraction_percpu(struct prop_descriptor *pd, | ||
281 | struct prop_local_percpu *pl, | ||
282 | long *numerator, long *denominator) | ||
283 | { | ||
284 | struct prop_global *pg = prop_get_global(pd); | ||
285 | unsigned long period_2 = 1UL << (pg->shift - 1); | ||
286 | unsigned long counter_mask = period_2 - 1; | ||
287 | unsigned long global_count; | ||
288 | |||
289 | prop_norm_percpu(pg, pl); | ||
290 | *numerator = percpu_counter_read_positive(&pl->events); | ||
291 | |||
292 | global_count = percpu_counter_read(&pg->events); | ||
293 | *denominator = period_2 + (global_count & counter_mask); | ||
294 | |||
295 | prop_put_global(pd, pg); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * SINGLE | ||
300 | */ | ||
301 | |||
302 | int prop_local_init_single(struct prop_local_single *pl) | ||
303 | { | ||
304 | spin_lock_init(&pl->lock); | ||
305 | pl->shift = 0; | ||
306 | pl->period = 0; | ||
307 | pl->events = 0; | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | void prop_local_destroy_single(struct prop_local_single *pl) | ||
312 | { | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Catch up with missed period expirations. | ||
317 | */ | ||
318 | static | ||
319 | void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | ||
320 | { | ||
321 | unsigned long period = 1UL << (pg->shift - 1); | ||
322 | unsigned long period_mask = ~(period - 1); | ||
323 | unsigned long global_period; | ||
324 | unsigned long flags; | ||
325 | |||
326 | global_period = percpu_counter_read(&pg->events); | ||
327 | global_period &= period_mask; | ||
328 | |||
329 | /* | ||
330 | * Fast path - check if the local and global period count still match | ||
331 | * outside of the lock. | ||
332 | */ | ||
333 | if (pl->period == global_period) | ||
334 | return; | ||
335 | |||
336 | spin_lock_irqsave(&pl->lock, flags); | ||
337 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | ||
338 | /* | ||
339 | * For each missed period, we half the local counter. | ||
340 | */ | ||
341 | period = (global_period - pl->period) >> (pg->shift - 1); | ||
342 | if (likely(period < BITS_PER_LONG)) | ||
343 | pl->events >>= period; | ||
344 | else | ||
345 | pl->events = 0; | ||
346 | pl->period = global_period; | ||
347 | spin_unlock_irqrestore(&pl->lock, flags); | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * ++x_{j}, ++t | ||
352 | */ | ||
353 | void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) | ||
354 | { | ||
355 | struct prop_global *pg = prop_get_global(pd); | ||
356 | |||
357 | prop_norm_single(pg, pl); | ||
358 | pl->events++; | ||
359 | percpu_counter_add(&pg->events, 1); | ||
360 | prop_put_global(pd, pg); | ||
361 | } | ||
362 | |||
363 | /* | ||
364 | * Obtain a fraction of this proportion | ||
365 | * | ||
366 | * p_{j} = x_{j} / (period/2 + t % period/2) | ||
367 | */ | ||
368 | void prop_fraction_single(struct prop_descriptor *pd, | ||
369 | struct prop_local_single *pl, | ||
370 | long *numerator, long *denominator) | ||
371 | { | ||
372 | struct prop_global *pg = prop_get_global(pd); | ||
373 | unsigned long period_2 = 1UL << (pg->shift - 1); | ||
374 | unsigned long counter_mask = period_2 - 1; | ||
375 | unsigned long global_count; | ||
376 | |||
377 | prop_norm_single(pg, pl); | ||
378 | *numerator = pl->events; | ||
379 | |||
380 | global_count = percpu_counter_read(&pg->events); | ||
381 | *denominator = period_2 + (global_count & counter_mask); | ||
382 | |||
383 | prop_put_global(pd, pg); | ||
384 | } | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6b26f9d39800..48c250fe2233 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -1042,19 +1042,21 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) | |||
1042 | EXPORT_SYMBOL(radix_tree_tagged); | 1042 | EXPORT_SYMBOL(radix_tree_tagged); |
1043 | 1043 | ||
1044 | static void | 1044 | static void |
1045 | radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags) | 1045 | radix_tree_node_ctor(struct kmem_cache *cachep, void *node) |
1046 | { | 1046 | { |
1047 | memset(node, 0, sizeof(struct radix_tree_node)); | 1047 | memset(node, 0, sizeof(struct radix_tree_node)); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | static __init unsigned long __maxindex(unsigned int height) | 1050 | static __init unsigned long __maxindex(unsigned int height) |
1051 | { | 1051 | { |
1052 | unsigned int tmp = height * RADIX_TREE_MAP_SHIFT; | 1052 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; |
1053 | unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1; | 1053 | int shift = RADIX_TREE_INDEX_BITS - width; |
1054 | 1054 | ||
1055 | if (tmp >= RADIX_TREE_INDEX_BITS) | 1055 | if (shift < 0) |
1056 | index = ~0UL; | 1056 | return ~0UL; |
1057 | return index; | 1057 | if (shift >= BITS_PER_LONG) |
1058 | return 0UL; | ||
1059 | return ~0UL >> shift; | ||
1058 | } | 1060 | } |
1059 | 1061 | ||
1060 | static __init void radix_tree_init_maxindex(void) | 1062 | static __init void radix_tree_init_maxindex(void) |
diff --git a/lib/sort.c b/lib/sort.c index 961567894d16..6abbaf3d5858 100644 --- a/lib/sort.c +++ b/lib/sort.c | |||
@@ -67,7 +67,7 @@ void sort(void *base, size_t num, size_t size, | |||
67 | } | 67 | } |
68 | 68 | ||
69 | /* sort */ | 69 | /* sort */ |
70 | for (i = n - size; i >= 0; i -= size) { | 70 | for (i = n - size; i > 0; i -= size) { |
71 | swap(base, base + i, size); | 71 | swap(base, base + i, size); |
72 | for (r = 0; r * 2 + size < i; r = c) { | 72 | for (r = 0; r * 2 + size < i; r = c) { |
73 | c = r * 2 + size; | 73 | c = r * 2 + size; |