diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/bug.c | 19 | ||||
-rw-r--r-- | lib/dynamic_printk.c | 6 | ||||
-rw-r--r-- | lib/idr.c | 22 | ||||
-rw-r--r-- | lib/percpu_counter.c | 7 |
4 files changed, 41 insertions, 13 deletions
@@ -5,6 +5,8 @@ | |||
5 | 5 | ||
6 | CONFIG_BUG - emit BUG traps. Nothing happens without this. | 6 | CONFIG_BUG - emit BUG traps. Nothing happens without this. |
7 | CONFIG_GENERIC_BUG - enable this code. | 7 | CONFIG_GENERIC_BUG - enable this code. |
8 | CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to | ||
9 | the containing struct bug_entry for bug_addr and file. | ||
8 | CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG | 10 | CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG |
9 | 11 | ||
10 | CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable | 12 | CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable |
@@ -43,6 +45,15 @@ | |||
43 | 45 | ||
44 | extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; | 46 | extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; |
45 | 47 | ||
48 | static inline unsigned long bug_addr(const struct bug_entry *bug) | ||
49 | { | ||
50 | #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS | ||
51 | return bug->bug_addr; | ||
52 | #else | ||
53 | return (unsigned long)bug + bug->bug_addr_disp; | ||
54 | #endif | ||
55 | } | ||
56 | |||
46 | #ifdef CONFIG_MODULES | 57 | #ifdef CONFIG_MODULES |
47 | static LIST_HEAD(module_bug_list); | 58 | static LIST_HEAD(module_bug_list); |
48 | 59 | ||
@@ -55,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
55 | unsigned i; | 66 | unsigned i; |
56 | 67 | ||
57 | for (i = 0; i < mod->num_bugs; ++i, ++bug) | 68 | for (i = 0; i < mod->num_bugs; ++i, ++bug) |
58 | if (bugaddr == bug->bug_addr) | 69 | if (bugaddr == bug_addr(bug)) |
59 | return bug; | 70 | return bug; |
60 | } | 71 | } |
61 | return NULL; | 72 | return NULL; |
@@ -108,7 +119,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr) | |||
108 | const struct bug_entry *bug; | 119 | const struct bug_entry *bug; |
109 | 120 | ||
110 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) | 121 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) |
111 | if (bugaddr == bug->bug_addr) | 122 | if (bugaddr == bug_addr(bug)) |
112 | return bug; | 123 | return bug; |
113 | 124 | ||
114 | return module_find_bug(bugaddr); | 125 | return module_find_bug(bugaddr); |
@@ -133,7 +144,11 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
133 | 144 | ||
134 | if (bug) { | 145 | if (bug) { |
135 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 146 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
147 | #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS | ||
136 | file = bug->file; | 148 | file = bug->file; |
149 | #else | ||
150 | file = (const char *)bug + bug->file_disp; | ||
151 | #endif | ||
137 | line = bug->line; | 152 | line = bug->line; |
138 | #endif | 153 | #endif |
139 | warning = (bug->flags & BUGFLAG_WARNING) != 0; | 154 | warning = (bug->flags & BUGFLAG_WARNING) != 0; |
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c index d83660fd6fdd..8e30295e8566 100644 --- a/lib/dynamic_printk.c +++ b/lib/dynamic_printk.c | |||
@@ -135,7 +135,7 @@ int unregister_dynamic_debug_module(char *mod_name) | |||
135 | nr_entries--; | 135 | nr_entries--; |
136 | out: | 136 | out: |
137 | up(&debug_list_mutex); | 137 | up(&debug_list_mutex); |
138 | return 0; | 138 | return ret; |
139 | } | 139 | } |
140 | EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); | 140 | EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); |
141 | 141 | ||
@@ -289,7 +289,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, | |||
289 | dynamic_enabled = DYNAMIC_ENABLED_SOME; | 289 | dynamic_enabled = DYNAMIC_ENABLED_SOME; |
290 | err = 0; | 290 | err = 0; |
291 | printk(KERN_DEBUG | 291 | printk(KERN_DEBUG |
292 | "debugging enabled for module %s", | 292 | "debugging enabled for module %s\n", |
293 | elem->name); | 293 | elem->name); |
294 | } else if (!value && (elem->enable == 1)) { | 294 | } else if (!value && (elem->enable == 1)) { |
295 | elem->enable = 0; | 295 | elem->enable = 0; |
@@ -309,7 +309,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, | |||
309 | err = 0; | 309 | err = 0; |
310 | printk(KERN_DEBUG | 310 | printk(KERN_DEBUG |
311 | "debugging disabled for module " | 311 | "debugging disabled for module " |
312 | "%s", elem->name); | 312 | "%s\n", elem->name); |
313 | } | 313 | } |
314 | } | 314 | } |
315 | } | 315 | } |
@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
185 | new = get_from_free_list(idp); | 185 | new = get_from_free_list(idp); |
186 | if (!new) | 186 | if (!new) |
187 | return -1; | 187 | return -1; |
188 | new->layer = l-1; | ||
188 | rcu_assign_pointer(p->ary[m], new); | 189 | rcu_assign_pointer(p->ary[m], new); |
189 | p->count++; | 190 | p->count++; |
190 | } | 191 | } |
@@ -210,6 +211,7 @@ build_up: | |||
210 | if (unlikely(!p)) { | 211 | if (unlikely(!p)) { |
211 | if (!(p = get_from_free_list(idp))) | 212 | if (!(p = get_from_free_list(idp))) |
212 | return -1; | 213 | return -1; |
214 | p->layer = 0; | ||
213 | layers = 1; | 215 | layers = 1; |
214 | } | 216 | } |
215 | /* | 217 | /* |
@@ -218,8 +220,14 @@ build_up: | |||
218 | */ | 220 | */ |
219 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 221 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
220 | layers++; | 222 | layers++; |
221 | if (!p->count) | 223 | if (!p->count) { |
224 | /* special case: if the tree is currently empty, | ||
225 | * then we grow the tree by moving the top node | ||
226 | * upwards. | ||
227 | */ | ||
228 | p->layer++; | ||
222 | continue; | 229 | continue; |
230 | } | ||
223 | if (!(new = get_from_free_list(idp))) { | 231 | if (!(new = get_from_free_list(idp))) { |
224 | /* | 232 | /* |
225 | * The allocation failed. If we built part of | 233 | * The allocation failed. If we built part of |
@@ -237,6 +245,7 @@ build_up: | |||
237 | } | 245 | } |
238 | new->ary[0] = p; | 246 | new->ary[0] = p; |
239 | new->count = 1; | 247 | new->count = 1; |
248 | new->layer = layers-1; | ||
240 | if (p->bitmap == IDR_FULL) | 249 | if (p->bitmap == IDR_FULL) |
241 | __set_bit(0, &new->bitmap); | 250 | __set_bit(0, &new->bitmap); |
242 | p = new; | 251 | p = new; |
@@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id) | |||
493 | int n; | 502 | int n; |
494 | struct idr_layer *p; | 503 | struct idr_layer *p; |
495 | 504 | ||
496 | n = idp->layers * IDR_BITS; | ||
497 | p = rcu_dereference(idp->top); | 505 | p = rcu_dereference(idp->top); |
506 | if (!p) | ||
507 | return NULL; | ||
508 | n = (p->layer+1) * IDR_BITS; | ||
498 | 509 | ||
499 | /* Mask off upper bits we don't use for the search. */ | 510 | /* Mask off upper bits we don't use for the search. */ |
500 | id &= MAX_ID_MASK; | 511 | id &= MAX_ID_MASK; |
501 | 512 | ||
502 | if (id >= (1 << n)) | 513 | if (id >= (1 << n)) |
503 | return NULL; | 514 | return NULL; |
515 | BUG_ON(n == 0); | ||
504 | 516 | ||
505 | while (n > 0 && p) { | 517 | while (n > 0 && p) { |
506 | n -= IDR_BITS; | 518 | n -= IDR_BITS; |
519 | BUG_ON(n != p->layer*IDR_BITS); | ||
507 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
508 | } | 521 | } |
509 | return((void *)p); | 522 | return((void *)p); |
@@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
582 | int n; | 595 | int n; |
583 | struct idr_layer *p, *old_p; | 596 | struct idr_layer *p, *old_p; |
584 | 597 | ||
585 | n = idp->layers * IDR_BITS; | ||
586 | p = idp->top; | 598 | p = idp->top; |
599 | if (!p) | ||
600 | return ERR_PTR(-EINVAL); | ||
601 | |||
602 | n = (p->layer+1) * IDR_BITS; | ||
587 | 603 | ||
588 | id &= MAX_ID_MASK; | 604 | id &= MAX_ID_MASK; |
589 | 605 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a8663890a88c..b255b939bc1b 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | ret += *pcount; | 64 | ret += *pcount; |
65 | *pcount = 0; | ||
66 | } | 65 | } |
67 | fbc->count = ret; | ||
68 | |||
69 | spin_unlock(&fbc->lock); | 66 | spin_unlock(&fbc->lock); |
70 | return ret; | 67 | return ret; |
71 | } | 68 | } |
@@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc) | |||
104 | if (!fbc->counters) | 101 | if (!fbc->counters) |
105 | return; | 102 | return; |
106 | 103 | ||
107 | free_percpu(fbc->counters); | ||
108 | fbc->counters = NULL; | ||
109 | #ifdef CONFIG_HOTPLUG_CPU | 104 | #ifdef CONFIG_HOTPLUG_CPU |
110 | mutex_lock(&percpu_counters_lock); | 105 | mutex_lock(&percpu_counters_lock); |
111 | list_del(&fbc->list); | 106 | list_del(&fbc->list); |
112 | mutex_unlock(&percpu_counters_lock); | 107 | mutex_unlock(&percpu_counters_lock); |
113 | #endif | 108 | #endif |
109 | free_percpu(fbc->counters); | ||
110 | fbc->counters = NULL; | ||
114 | } | 111 | } |
115 | EXPORT_SYMBOL(percpu_counter_destroy); | 112 | EXPORT_SYMBOL(percpu_counter_destroy); |
116 | 113 | ||