aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/relay.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/relay.c')
-rw-r--r--kernel/relay.c37
1 files changed, 18 insertions, 19 deletions
diff --git a/kernel/relay.c b/kernel/relay.c
index 577f251c7e28..4311101b0ca7 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -310,16 +310,13 @@ static struct rchan_callbacks default_channel_callbacks = {
310 310
311/** 311/**
312 * wakeup_readers - wake up readers waiting on a channel 312 * wakeup_readers - wake up readers waiting on a channel
313 * @work: work struct that contains the the channel buffer 313 * @data: contains the channel buffer
314 * 314 *
315 * This is the work function used to defer reader waking. The 315 * This is the timer function used to defer reader waking.
316 * reason waking is deferred is that calling directly from write
317 * causes problems if you're writing from say the scheduler.
318 */ 316 */
319static void wakeup_readers(struct work_struct *work) 317static void wakeup_readers(unsigned long data)
320{ 318{
321 struct rchan_buf *buf = 319 struct rchan_buf *buf = (struct rchan_buf *)data;
322 container_of(work, struct rchan_buf, wake_readers.work);
323 wake_up_interruptible(&buf->read_wait); 320 wake_up_interruptible(&buf->read_wait);
324} 321}
325 322
@@ -337,11 +334,9 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
337 if (init) { 334 if (init) {
338 init_waitqueue_head(&buf->read_wait); 335 init_waitqueue_head(&buf->read_wait);
339 kref_init(&buf->kref); 336 kref_init(&buf->kref);
340 INIT_DELAYED_WORK(&buf->wake_readers, NULL); 337 setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
341 } else { 338 } else
342 cancel_delayed_work(&buf->wake_readers); 339 del_timer_sync(&buf->timer);
343 flush_scheduled_work();
344 }
345 340
346 buf->subbufs_produced = 0; 341 buf->subbufs_produced = 0;
347 buf->subbufs_consumed = 0; 342 buf->subbufs_consumed = 0;
@@ -447,8 +442,7 @@ end:
447static void relay_close_buf(struct rchan_buf *buf) 442static void relay_close_buf(struct rchan_buf *buf)
448{ 443{
449 buf->finalized = 1; 444 buf->finalized = 1;
450 cancel_delayed_work(&buf->wake_readers); 445 del_timer_sync(&buf->timer);
451 flush_scheduled_work();
452 kref_put(&buf->kref, relay_remove_buf); 446 kref_put(&buf->kref, relay_remove_buf);
453} 447}
454 448
@@ -490,6 +484,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
490 484
491 switch(action) { 485 switch(action) {
492 case CPU_UP_PREPARE: 486 case CPU_UP_PREPARE:
487 case CPU_UP_PREPARE_FROZEN:
493 mutex_lock(&relay_channels_mutex); 488 mutex_lock(&relay_channels_mutex);
494 list_for_each_entry(chan, &relay_channels, list) { 489 list_for_each_entry(chan, &relay_channels, list) {
495 if (chan->buf[hotcpu]) 490 if (chan->buf[hotcpu])
@@ -506,6 +501,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
506 mutex_unlock(&relay_channels_mutex); 501 mutex_unlock(&relay_channels_mutex);
507 break; 502 break;
508 case CPU_DEAD: 503 case CPU_DEAD:
504 case CPU_DEAD_FROZEN:
509 /* No need to flush the cpu : will be flushed upon 505 /* No need to flush the cpu : will be flushed upon
510 * final relay_flush() call. */ 506 * final relay_flush() call. */
511 break; 507 break;
@@ -608,11 +604,14 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
608 buf->dentry->d_inode->i_size += buf->chan->subbuf_size - 604 buf->dentry->d_inode->i_size += buf->chan->subbuf_size -
609 buf->padding[old_subbuf]; 605 buf->padding[old_subbuf];
610 smp_mb(); 606 smp_mb();
611 if (waitqueue_active(&buf->read_wait)) { 607 if (waitqueue_active(&buf->read_wait))
612 PREPARE_DELAYED_WORK(&buf->wake_readers, 608 /*
613 wakeup_readers); 609 * Calling wake_up_interruptible() from here
614 schedule_delayed_work(&buf->wake_readers, 1); 610 * will deadlock if we happen to be logging
615 } 611 * from the scheduler (trying to re-grab
612 * rq->lock), so defer it.
613 */
614 __mod_timer(&buf->timer, jiffies + 1);
616 } 615 }
617 616
618 old = buf->data; 617 old = buf->data;