aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-12-10 22:54:27 -0500
committerSteven Rostedt <rostedt@goodmis.org>2009-12-10 22:54:27 -0500
commit184210154b9aa570099183f6c062ac4eb11190b7 (patch)
tree8951abcbd66eb5e198e340013b87138c2ff8d7fa /kernel/trace/ring_buffer.c
parentd954fbf0ff6b5fdfb32350e85a2f15d3db976506 (diff)
ring-buffer: Use sync sched protection on ring buffer resizing
There was a comment in the ring buffer code that says the calling layers should prevent tracing or reading of the ring buffer while resizing. I have discovered that the tracers do not honor this arrangement. This patch moves the disabling and synchronizing the ring buffer to a higher layer during resizing. This guarantees that no writes are occurring while the resize takes place. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5e..0d64c51ab4df 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 struct list_head *p; 1193 struct list_head *p;
1194 unsigned i; 1194 unsigned i;
1195 1195
1196 atomic_inc(&cpu_buffer->record_disabled);
1197 synchronize_sched();
1198
1199 spin_lock_irq(&cpu_buffer->reader_lock); 1196 spin_lock_irq(&cpu_buffer->reader_lock);
1200 rb_head_page_deactivate(cpu_buffer); 1197 rb_head_page_deactivate(cpu_buffer);
1201 1198
@@ -1214,9 +1211,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1214 spin_unlock_irq(&cpu_buffer->reader_lock); 1211 spin_unlock_irq(&cpu_buffer->reader_lock);
1215 1212
1216 rb_check_pages(cpu_buffer); 1213 rb_check_pages(cpu_buffer);
1217
1218 atomic_dec(&cpu_buffer->record_disabled);
1219
1220} 1214}
1221 1215
1222static void 1216static void
@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1227 struct list_head *p; 1221 struct list_head *p;
1228 unsigned i; 1222 unsigned i;
1229 1223
1230 atomic_inc(&cpu_buffer->record_disabled);
1231 synchronize_sched();
1232
1233 spin_lock_irq(&cpu_buffer->reader_lock); 1224 spin_lock_irq(&cpu_buffer->reader_lock);
1234 rb_head_page_deactivate(cpu_buffer); 1225 rb_head_page_deactivate(cpu_buffer);
1235 1226
@@ -1245,8 +1236,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1245 spin_unlock_irq(&cpu_buffer->reader_lock); 1236 spin_unlock_irq(&cpu_buffer->reader_lock);
1246 1237
1247 rb_check_pages(cpu_buffer); 1238 rb_check_pages(cpu_buffer);
1248
1249 atomic_dec(&cpu_buffer->record_disabled);
1250} 1239}
1251 1240
1252/** 1241/**
@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1254 * @buffer: the buffer to resize. 1243 * @buffer: the buffer to resize.
1255 * @size: the new size. 1244 * @size: the new size.
1256 * 1245 *
1257 * The tracer is responsible for making sure that the buffer is
1258 * not being used while changing the size.
1259 * Note: We may be able to change the above requirement by using
1260 * RCU synchronizations.
1261 *
1262 * Minimum size is 2 * BUF_PAGE_SIZE. 1246 * Minimum size is 2 * BUF_PAGE_SIZE.
1263 * 1247 *
1264 * Returns -1 on failure. 1248 * Returns -1 on failure.
@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1290 if (size == buffer_size) 1274 if (size == buffer_size)
1291 return size; 1275 return size;
1292 1276
1277 atomic_inc(&buffer->record_disabled);
1278
1279 /* Make sure all writers are done with this buffer. */
1280 synchronize_sched();
1281
1293 mutex_lock(&buffer->mutex); 1282 mutex_lock(&buffer->mutex);
1294 get_online_cpus(); 1283 get_online_cpus();
1295 1284
@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1352 put_online_cpus(); 1341 put_online_cpus();
1353 mutex_unlock(&buffer->mutex); 1342 mutex_unlock(&buffer->mutex);
1354 1343
1344 atomic_dec(&buffer->record_disabled);
1345
1355 return size; 1346 return size;
1356 1347
1357 free_pages: 1348 free_pages:
@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1361 } 1352 }
1362 put_online_cpus(); 1353 put_online_cpus();
1363 mutex_unlock(&buffer->mutex); 1354 mutex_unlock(&buffer->mutex);
1355 atomic_dec(&buffer->record_disabled);
1364 return -ENOMEM; 1356 return -ENOMEM;
1365 1357
1366 /* 1358 /*
@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1370 out_fail: 1362 out_fail:
1371 put_online_cpus(); 1363 put_online_cpus();
1372 mutex_unlock(&buffer->mutex); 1364 mutex_unlock(&buffer->mutex);
1365 atomic_dec(&buffer->record_disabled);
1373 return -1; 1366 return -1;
1374} 1367}
1375EXPORT_SYMBOL_GPL(ring_buffer_resize); 1368EXPORT_SYMBOL_GPL(ring_buffer_resize);