aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-25 16:27:36 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-25 16:27:36 -0500
commit9f9cba810f36d16f4e64477e879a69f6c47b389d (patch)
treed787abcbead1439d3f82f0719efe520fd9689f79 /lib
parentdbf5bef8da169b38db804996a661f8d634df8295 (diff)
parent949db153b6466c6f7cad5a427ecea94985927311 (diff)
Merge 3.8-rc5 into tty-next
This resolves a number of tty driver merge issues found in linux-next Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/bug.c1
-rw-r--r--lib/cpu_rmap.c54
-rw-r--r--lib/rbtree.c20
3 files changed, 67 insertions, 8 deletions
diff --git a/lib/bug.c b/lib/bug.c
index a28c1415357c..d0cdf14c651a 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
55} 55}
56 56
57#ifdef CONFIG_MODULES 57#ifdef CONFIG_MODULES
58/* Updates are protected by module mutex */
58static LIST_HEAD(module_bug_list); 59static LIST_HEAD(module_bug_list);
59 60
60static const struct bug_entry *module_find_bug(unsigned long bugaddr) 61static const struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 145dec5267c9..5fbed5caba6e 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
45 if (!rmap) 45 if (!rmap)
46 return NULL; 46 return NULL;
47 47
48 kref_init(&rmap->refcount);
48 rmap->obj = (void **)((char *)rmap + obj_offset); 49 rmap->obj = (void **)((char *)rmap + obj_offset);
49 50
50 /* Initially assign CPUs to objects on a rota, since we have 51 /* Initially assign CPUs to objects on a rota, since we have
@@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
63} 64}
64EXPORT_SYMBOL(alloc_cpu_rmap); 65EXPORT_SYMBOL(alloc_cpu_rmap);
65 66
67/**
68 * cpu_rmap_release - internal reclaiming helper called from kref_put
69 * @ref: kref to struct cpu_rmap
70 */
71static void cpu_rmap_release(struct kref *ref)
72{
73 struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
74 kfree(rmap);
75}
76
77/**
78 * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
79 * @rmap: reverse-map allocated with alloc_cpu_rmap()
80 */
81static inline void cpu_rmap_get(struct cpu_rmap *rmap)
82{
83 kref_get(&rmap->refcount);
84}
85
86/**
87 * cpu_rmap_put - release ref on a cpu_rmap
88 * @rmap: reverse-map allocated with alloc_cpu_rmap()
89 */
90int cpu_rmap_put(struct cpu_rmap *rmap)
91{
92 return kref_put(&rmap->refcount, cpu_rmap_release);
93}
94EXPORT_SYMBOL(cpu_rmap_put);
95
66/* Reevaluate nearest object for given CPU, comparing with the given 96/* Reevaluate nearest object for given CPU, comparing with the given
67 * neighbours at the given distance. 97 * neighbours at the given distance.
68 */ 98 */
@@ -197,8 +227,7 @@ struct irq_glue {
197 * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs 227 * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
198 * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL 228 * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
199 * 229 *
200 * Must be called in process context, before freeing the IRQs, and 230 * Must be called in process context, before freeing the IRQs.
201 * without holding any locks required by global workqueue items.
202 */ 231 */
203void free_irq_cpu_rmap(struct cpu_rmap *rmap) 232void free_irq_cpu_rmap(struct cpu_rmap *rmap)
204{ 233{
@@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
212 glue = rmap->obj[index]; 241 glue = rmap->obj[index];
213 irq_set_affinity_notifier(glue->notify.irq, NULL); 242 irq_set_affinity_notifier(glue->notify.irq, NULL);
214 } 243 }
215 irq_run_affinity_notifiers();
216 244
217 kfree(rmap); 245 cpu_rmap_put(rmap);
218} 246}
219EXPORT_SYMBOL(free_irq_cpu_rmap); 247EXPORT_SYMBOL(free_irq_cpu_rmap);
220 248
249/**
250 * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
251 * @notify: struct irq_affinity_notify passed by irq/manage.c
252 * @mask: cpu mask for new SMP affinity
253 *
254 * This is executed in workqueue context.
255 */
221static void 256static void
222irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) 257irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
223{ 258{
@@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
230 pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); 265 pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
231} 266}
232 267
268/**
269 * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
270 * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
271 */
233static void irq_cpu_rmap_release(struct kref *ref) 272static void irq_cpu_rmap_release(struct kref *ref)
234{ 273{
235 struct irq_glue *glue = 274 struct irq_glue *glue =
236 container_of(ref, struct irq_glue, notify.kref); 275 container_of(ref, struct irq_glue, notify.kref);
276
277 cpu_rmap_put(glue->rmap);
237 kfree(glue); 278 kfree(glue);
238} 279}
239 280
@@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
258 glue->notify.notify = irq_cpu_rmap_notify; 299 glue->notify.notify = irq_cpu_rmap_notify;
259 glue->notify.release = irq_cpu_rmap_release; 300 glue->notify.release = irq_cpu_rmap_release;
260 glue->rmap = rmap; 301 glue->rmap = rmap;
302 cpu_rmap_get(rmap);
261 glue->index = cpu_rmap_add(rmap, glue); 303 glue->index = cpu_rmap_add(rmap, glue);
262 rc = irq_set_affinity_notifier(irq, &glue->notify); 304 rc = irq_set_affinity_notifier(irq, &glue->notify);
263 if (rc) 305 if (rc) {
306 cpu_rmap_put(glue->rmap);
264 kfree(glue); 307 kfree(glue);
308 }
265 return rc; 309 return rc;
266} 310}
267EXPORT_SYMBOL(irq_cpu_rmap_add); 311EXPORT_SYMBOL(irq_cpu_rmap_add);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4f56a11d67fa..c0e31fe2fabf 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
194 } 194 }
195} 195}
196 196
197__always_inline void 197/*
198__rb_erase_color(struct rb_node *parent, struct rb_root *root, 198 * Inline version for rb_erase() use - we want to be able to inline
199 * and eliminate the dummy_rotate callback there
200 */
201static __always_inline void
202____rb_erase_color(struct rb_node *parent, struct rb_root *root,
199 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 203 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
200{ 204{
201 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; 205 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
@@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root,
355 } 359 }
356 } 360 }
357} 361}
362
363/* Non-inline version for rb_erase_augmented() use */
364void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
365 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
366{
367 ____rb_erase_color(parent, root, augment_rotate);
368}
358EXPORT_SYMBOL(__rb_erase_color); 369EXPORT_SYMBOL(__rb_erase_color);
359 370
360/* 371/*
@@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color);
380 391
381void rb_erase(struct rb_node *node, struct rb_root *root) 392void rb_erase(struct rb_node *node, struct rb_root *root)
382{ 393{
383 rb_erase_augmented(node, root, &dummy_callbacks); 394 struct rb_node *rebalance;
395 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
396 if (rebalance)
397 ____rb_erase_color(rebalance, root, dummy_rotate);
384} 398}
385EXPORT_SYMBOL(rb_erase); 399EXPORT_SYMBOL(rb_erase);
386 400