aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-06 08:23:39 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-06 08:23:39 -0400
commit68083e05d72d94f347293d8cc0067050ba904bfa (patch)
tree842e71365bd90866be7add181661a4039d891564 /lib
parent7baac8b91f9871ba8cb09af84de4ae1d86d07812 (diff)
parentb7279469d66b55119784b8b9529c99c1955fe747 (diff)
Merge commit 'v2.6.26-rc9' into cpus4096
Diffstat (limited to 'lib')
-rw-r--r--lib/bitrev.c3
-rw-r--r--lib/bug.c2
-rw-r--r--lib/debugobjects.c15
-rw-r--r--lib/div64.c10
-rw-r--r--lib/radix-tree.c122
-rw-r--r--lib/ts_bm.c2
6 files changed, 84 insertions, 70 deletions
diff --git a/lib/bitrev.c b/lib/bitrev.c
index 989aff73f881..3956203456d4 100644
--- a/lib/bitrev.c
+++ b/lib/bitrev.c
@@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = {
42}; 42};
43EXPORT_SYMBOL_GPL(byte_rev_table); 43EXPORT_SYMBOL_GPL(byte_rev_table);
44 44
45static __always_inline u16 bitrev16(u16 x) 45u16 bitrev16(u16 x)
46{ 46{
47 return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); 47 return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
48} 48}
49EXPORT_SYMBOL(bitrev16);
49 50
50/** 51/**
51 * bitrev32 - reverse the order of bits in a u32 value 52 * bitrev32 - reverse the order of bits in a u32 value
diff --git a/lib/bug.c b/lib/bug.c
index 530f38f55787..bfeafd60ee9f 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,7 @@
37 */ 37 */
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/kernel.h>
40#include <linux/bug.h> 41#include <linux/bug.h>
41#include <linux/sched.h> 42#include <linux/sched.h>
42 43
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
149 (void *)bugaddr); 150 (void *)bugaddr);
150 151
151 show_regs(regs); 152 show_regs(regs);
153 add_taint(TAINT_WARN);
152 return BUG_TRAP_TYPE_WARN; 154 return BUG_TRAP_TYPE_WARN;
153 } 155 }
154 156
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a76a5e122ae1..85b18d79be89 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -68,6 +68,7 @@ static int fill_pool(void)
68{ 68{
69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
70 struct debug_obj *new; 70 struct debug_obj *new;
71 unsigned long flags;
71 72
72 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
73 return obj_pool_free; 74 return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
81 if (!new) 82 if (!new)
82 return obj_pool_free; 83 return obj_pool_free;
83 84
84 spin_lock(&pool_lock); 85 spin_lock_irqsave(&pool_lock, flags);
85 hlist_add_head(&new->node, &obj_pool); 86 hlist_add_head(&new->node, &obj_pool);
86 obj_pool_free++; 87 obj_pool_free++;
87 spin_unlock(&pool_lock); 88 spin_unlock_irqrestore(&pool_lock, flags);
88 } 89 }
89 return obj_pool_free; 90 return obj_pool_free;
90} 91}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
110} 111}
111 112
112/* 113/*
113 * Allocate a new object. If the pool is empty and no refill possible, 114 * Allocate a new object. If the pool is empty, switch off the debugger.
114 * switch off the debugger.
115 */ 115 */
116static struct debug_obj * 116static struct debug_obj *
117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
118{ 118{
119 struct debug_obj *obj = NULL; 119 struct debug_obj *obj = NULL;
120 int retry = 0;
121 120
122repeat:
123 spin_lock(&pool_lock); 121 spin_lock(&pool_lock);
124 if (obj_pool.first) { 122 if (obj_pool.first) {
125 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 123 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +139,6 @@ repeat:
141 } 139 }
142 spin_unlock(&pool_lock); 140 spin_unlock(&pool_lock);
143 141
144 if (fill_pool() && !obj && !retry++)
145 goto repeat;
146
147 return obj; 142 return obj;
148} 143}
149 144
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
261 struct debug_obj *obj; 256 struct debug_obj *obj;
262 unsigned long flags; 257 unsigned long flags;
263 258
259 fill_pool();
260
264 db = get_bucket((unsigned long) addr); 261 db = get_bucket((unsigned long) addr);
265 262
266 spin_lock_irqsave(&db->lock, flags); 263 spin_lock_irqsave(&db->lock, flags);
diff --git a/lib/div64.c b/lib/div64.c
index bb5bd0c0f030..a111eb8de9cf 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64);
98#endif 98#endif
99 99
100#endif /* BITS_PER_LONG == 32 */ 100#endif /* BITS_PER_LONG == 32 */
101
102/*
103 * Iterative div/mod for use when dividend is not expected to be much
104 * bigger than divisor.
105 */
106u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
107{
108 return __iter_div_u64_rem(dividend, divisor, remainder);
109}
110EXPORT_SYMBOL(iter_div_u64_rem);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd521716ab1a..56ec21a7f73d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2001 Momchil Velikov 2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig 3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> 4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin 5 * Copyright (C) 2006 Nick Piggin
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
88 return root->gfp_mask & __GFP_BITS_MASK; 88 return root->gfp_mask & __GFP_BITS_MASK;
89} 89}
90 90
91static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
92 int offset)
93{
94 __set_bit(offset, node->tags[tag]);
95}
96
97static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
98 int offset)
99{
100 __clear_bit(offset, node->tags[tag]);
101}
102
103static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
104 int offset)
105{
106 return test_bit(offset, node->tags[tag]);
107}
108
109static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
110{
111 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
112}
113
114static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
115{
116 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
117}
118
119static inline void root_tag_clear_all(struct radix_tree_root *root)
120{
121 root->gfp_mask &= __GFP_BITS_MASK;
122}
123
124static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
125{
126 return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
127}
128
129/*
130 * Returns 1 if any slot in the node has this tag set.
131 * Otherwise returns 0.
132 */
133static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
134{
135 int idx;
136 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
137 if (node->tags[tag][idx])
138 return 1;
139 }
140 return 0;
141}
91/* 142/*
92 * This assumes that the caller has performed appropriate preallocation, and 143 * This assumes that the caller has performed appropriate preallocation, and
93 * that the caller has pinned this thread of control to the current CPU. 144 * that the caller has pinned this thread of control to the current CPU.
@@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
124{ 175{
125 struct radix_tree_node *node = 176 struct radix_tree_node *node =
126 container_of(head, struct radix_tree_node, rcu_head); 177 container_of(head, struct radix_tree_node, rcu_head);
178
179 /*
180 * must only free zeroed nodes into the slab. radix_tree_shrink
181 * can leave us with a non-NULL entry in the first slot, so clear
182 * that here to make sure.
183 */
184 tag_clear(node, 0, 0);
185 tag_clear(node, 1, 0);
186 node->slots[0] = NULL;
187 node->count = 0;
188
127 kmem_cache_free(radix_tree_node_cachep, node); 189 kmem_cache_free(radix_tree_node_cachep, node);
128} 190}
129 191
@@ -165,59 +227,6 @@ out:
165} 227}
166EXPORT_SYMBOL(radix_tree_preload); 228EXPORT_SYMBOL(radix_tree_preload);
167 229
168static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
169 int offset)
170{
171 __set_bit(offset, node->tags[tag]);
172}
173
174static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
175 int offset)
176{
177 __clear_bit(offset, node->tags[tag]);
178}
179
180static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
181 int offset)
182{
183 return test_bit(offset, node->tags[tag]);
184}
185
186static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
187{
188 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
189}
190
191
192static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
193{
194 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
195}
196
197static inline void root_tag_clear_all(struct radix_tree_root *root)
198{
199 root->gfp_mask &= __GFP_BITS_MASK;
200}
201
202static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
203{
204 return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
205}
206
207/*
208 * Returns 1 if any slot in the node has this tag set.
209 * Otherwise returns 0.
210 */
211static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
212{
213 int idx;
214 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
215 if (node->tags[tag][idx])
216 return 1;
217 }
218 return 0;
219}
220
221/* 230/*
222 * Return the maximum key which can be store into a 231 * Return the maximum key which can be store into a
223 * radix tree with height HEIGHT. 232 * radix tree with height HEIGHT.
@@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
930 newptr = radix_tree_ptr_to_indirect(newptr); 939 newptr = radix_tree_ptr_to_indirect(newptr);
931 root->rnode = newptr; 940 root->rnode = newptr;
932 root->height--; 941 root->height--;
933 /* must only free zeroed nodes into the slab */
934 tag_clear(to_free, 0, 0);
935 tag_clear(to_free, 1, 0);
936 to_free->slots[0] = NULL;
937 to_free->count = 0;
938 radix_tree_node_free(to_free); 942 radix_tree_node_free(to_free);
939 } 943 }
940} 944}
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index d90822c378a4..4a7fce72898e 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
63 struct ts_bm *bm = ts_config_priv(conf); 63 struct ts_bm *bm = ts_config_priv(conf);
64 unsigned int i, text_len, consumed = state->offset; 64 unsigned int i, text_len, consumed = state->offset;
65 const u8 *text; 65 const u8 *text;
66 int shift = bm->patlen, bs; 66 int shift = bm->patlen - 1, bs;
67 67
68 for (;;) { 68 for (;;) {
69 text_len = conf->get_next_block(consumed, &text, conf, state); 69 text_len = conf->get_next_block(consumed, &text, conf, state);