aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJulia Lawall <julia@diku.dk>2010-03-29 11:37:02 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-03-29 15:23:24 -0400
commit292f60c0c4ab44aa2d589ba03c12e64a3b3c5e38 (patch)
tree0c33c9b23202a27c768dee45776503c9ec6b8306 /kernel
parente36673ec5126f15a8cddf6049aede7bdcf484c26 (diff)
ring-buffer: Add missing unlock
In some error handling cases the lock is not unlocked. The return is converted to a goto, to share the unlock at the end of the function. A simplified version of the semantic patch that finds this problem is as follows: (http://coccinelle.lip6.fr/) // <smpl> @r exists@ expression E1; identifier f; @@ f (...) { <+... * spin_lock_irq (E1,...); ... when != E1 * return ...; ...+> } // </smpl> Signed-off-by: Julia Lawall <julia@diku.dk> LKML-Reference: <Pine.LNX.4.64.1003291736440.21896@ask.diku.dk> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d1187ef20caf..9a0f9bf6a37b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1209,18 +1209,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1209 1209
1210 for (i = 0; i < nr_pages; i++) { 1210 for (i = 0; i < nr_pages; i++) {
1211 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1211 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1212 return; 1212 goto out;
1213 p = cpu_buffer->pages->next; 1213 p = cpu_buffer->pages->next;
1214 bpage = list_entry(p, struct buffer_page, list); 1214 bpage = list_entry(p, struct buffer_page, list);
1215 list_del_init(&bpage->list); 1215 list_del_init(&bpage->list);
1216 free_buffer_page(bpage); 1216 free_buffer_page(bpage);
1217 } 1217 }
1218 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1218 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1219 return; 1219 goto out;
1220 1220
1221 rb_reset_cpu(cpu_buffer); 1221 rb_reset_cpu(cpu_buffer);
1222 rb_check_pages(cpu_buffer); 1222 rb_check_pages(cpu_buffer);
1223 1223
1224out:
1224 spin_unlock_irq(&cpu_buffer->reader_lock); 1225 spin_unlock_irq(&cpu_buffer->reader_lock);
1225} 1226}
1226 1227
@@ -1237,7 +1238,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1237 1238
1238 for (i = 0; i < nr_pages; i++) { 1239 for (i = 0; i < nr_pages; i++) {
1239 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1240 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1240 return; 1241 goto out;
1241 p = pages->next; 1242 p = pages->next;
1242 bpage = list_entry(p, struct buffer_page, list); 1243 bpage = list_entry(p, struct buffer_page, list);
1243 list_del_init(&bpage->list); 1244 list_del_init(&bpage->list);
@@ -1246,6 +1247,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1246 rb_reset_cpu(cpu_buffer); 1247 rb_reset_cpu(cpu_buffer);
1247 rb_check_pages(cpu_buffer); 1248 rb_check_pages(cpu_buffer);
1248 1249
1250out:
1249 spin_unlock_irq(&cpu_buffer->reader_lock); 1251 spin_unlock_irq(&cpu_buffer->reader_lock);
1250} 1252}
1251 1253