diff options
Diffstat (limited to 'kernel/relay.c')
-rw-r--r-- | kernel/relay.c | 226 |
1 files changed, 85 insertions, 141 deletions
diff --git a/kernel/relay.c b/kernel/relay.c index d797502140b9..da79a109dbeb 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -214,7 +214,7 @@ static void relay_destroy_buf(struct rchan_buf *buf) | |||
214 | __free_page(buf->page_array[i]); | 214 | __free_page(buf->page_array[i]); |
215 | relay_free_page_array(buf->page_array); | 215 | relay_free_page_array(buf->page_array); |
216 | } | 216 | } |
217 | chan->buf[buf->cpu] = NULL; | 217 | *per_cpu_ptr(chan->buf, buf->cpu) = NULL; |
218 | kfree(buf->padding); | 218 | kfree(buf->padding); |
219 | kfree(buf); | 219 | kfree(buf); |
220 | kref_put(&chan->kref, relay_destroy_channel); | 220 | kref_put(&chan->kref, relay_destroy_channel); |
@@ -328,13 +328,15 @@ static struct rchan_callbacks default_channel_callbacks = { | |||
328 | 328 | ||
329 | /** | 329 | /** |
330 | * wakeup_readers - wake up readers waiting on a channel | 330 | * wakeup_readers - wake up readers waiting on a channel |
331 | * @data: contains the channel buffer | 331 | * @work: contains the channel buffer |
332 | * | 332 | * |
333 | * This is the timer function used to defer reader waking. | 333 | * This is the function used to defer reader waking |
334 | */ | 334 | */ |
335 | static void wakeup_readers(unsigned long data) | 335 | static void wakeup_readers(struct irq_work *work) |
336 | { | 336 | { |
337 | struct rchan_buf *buf = (struct rchan_buf *)data; | 337 | struct rchan_buf *buf; |
338 | |||
339 | buf = container_of(work, struct rchan_buf, wakeup_work); | ||
338 | wake_up_interruptible(&buf->read_wait); | 340 | wake_up_interruptible(&buf->read_wait); |
339 | } | 341 | } |
340 | 342 | ||
@@ -352,9 +354,10 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) | |||
352 | if (init) { | 354 | if (init) { |
353 | init_waitqueue_head(&buf->read_wait); | 355 | init_waitqueue_head(&buf->read_wait); |
354 | kref_init(&buf->kref); | 356 | kref_init(&buf->kref); |
355 | setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); | 357 | init_irq_work(&buf->wakeup_work, wakeup_readers); |
356 | } else | 358 | } else { |
357 | del_timer_sync(&buf->timer); | 359 | irq_work_sync(&buf->wakeup_work); |
360 | } | ||
358 | 361 | ||
359 | buf->subbufs_produced = 0; | 362 | buf->subbufs_produced = 0; |
360 | buf->subbufs_consumed = 0; | 363 | buf->subbufs_consumed = 0; |
@@ -382,20 +385,21 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) | |||
382 | */ | 385 | */ |
383 | void relay_reset(struct rchan *chan) | 386 | void relay_reset(struct rchan *chan) |
384 | { | 387 | { |
388 | struct rchan_buf *buf; | ||
385 | unsigned int i; | 389 | unsigned int i; |
386 | 390 | ||
387 | if (!chan) | 391 | if (!chan) |
388 | return; | 392 | return; |
389 | 393 | ||
390 | if (chan->is_global && chan->buf[0]) { | 394 | if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { |
391 | __relay_reset(chan->buf[0], 0); | 395 | __relay_reset(buf, 0); |
392 | return; | 396 | return; |
393 | } | 397 | } |
394 | 398 | ||
395 | mutex_lock(&relay_channels_mutex); | 399 | mutex_lock(&relay_channels_mutex); |
396 | for_each_possible_cpu(i) | 400 | for_each_possible_cpu(i) |
397 | if (chan->buf[i]) | 401 | if ((buf = *per_cpu_ptr(chan->buf, i))) |
398 | __relay_reset(chan->buf[i], 0); | 402 | __relay_reset(buf, 0); |
399 | mutex_unlock(&relay_channels_mutex); | 403 | mutex_unlock(&relay_channels_mutex); |
400 | } | 404 | } |
401 | EXPORT_SYMBOL_GPL(relay_reset); | 405 | EXPORT_SYMBOL_GPL(relay_reset); |
@@ -440,7 +444,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) | |||
440 | struct dentry *dentry; | 444 | struct dentry *dentry; |
441 | 445 | ||
442 | if (chan->is_global) | 446 | if (chan->is_global) |
443 | return chan->buf[0]; | 447 | return *per_cpu_ptr(chan->buf, 0); |
444 | 448 | ||
445 | buf = relay_create_buf(chan); | 449 | buf = relay_create_buf(chan); |
446 | if (!buf) | 450 | if (!buf) |
@@ -464,7 +468,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) | |||
464 | __relay_reset(buf, 1); | 468 | __relay_reset(buf, 1); |
465 | 469 | ||
466 | if(chan->is_global) { | 470 | if(chan->is_global) { |
467 | chan->buf[0] = buf; | 471 | *per_cpu_ptr(chan->buf, 0) = buf; |
468 | buf->cpu = 0; | 472 | buf->cpu = 0; |
469 | } | 473 | } |
470 | 474 | ||
@@ -486,7 +490,7 @@ free_buf: | |||
486 | static void relay_close_buf(struct rchan_buf *buf) | 490 | static void relay_close_buf(struct rchan_buf *buf) |
487 | { | 491 | { |
488 | buf->finalized = 1; | 492 | buf->finalized = 1; |
489 | del_timer_sync(&buf->timer); | 493 | irq_work_sync(&buf->wakeup_work); |
490 | buf->chan->cb->remove_buf_file(buf->dentry); | 494 | buf->chan->cb->remove_buf_file(buf->dentry); |
491 | kref_put(&buf->kref, relay_remove_buf); | 495 | kref_put(&buf->kref, relay_remove_buf); |
492 | } | 496 | } |
@@ -512,46 +516,25 @@ static void setup_callbacks(struct rchan *chan, | |||
512 | chan->cb = cb; | 516 | chan->cb = cb; |
513 | } | 517 | } |
514 | 518 | ||
515 | /** | 519 | int relay_prepare_cpu(unsigned int cpu) |
516 | * relay_hotcpu_callback - CPU hotplug callback | ||
517 | * @nb: notifier block | ||
518 | * @action: hotplug action to take | ||
519 | * @hcpu: CPU number | ||
520 | * | ||
521 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | ||
522 | */ | ||
523 | static int relay_hotcpu_callback(struct notifier_block *nb, | ||
524 | unsigned long action, | ||
525 | void *hcpu) | ||
526 | { | 520 | { |
527 | unsigned int hotcpu = (unsigned long)hcpu; | ||
528 | struct rchan *chan; | 521 | struct rchan *chan; |
522 | struct rchan_buf *buf; | ||
529 | 523 | ||
530 | switch(action) { | 524 | mutex_lock(&relay_channels_mutex); |
531 | case CPU_UP_PREPARE: | 525 | list_for_each_entry(chan, &relay_channels, list) { |
532 | case CPU_UP_PREPARE_FROZEN: | 526 | if ((buf = *per_cpu_ptr(chan->buf, cpu))) |
533 | mutex_lock(&relay_channels_mutex); | 527 | continue; |
534 | list_for_each_entry(chan, &relay_channels, list) { | 528 | buf = relay_open_buf(chan, cpu); |
535 | if (chan->buf[hotcpu]) | 529 | if (!buf) { |
536 | continue; | 530 | pr_err("relay: cpu %d buffer creation failed\n", cpu); |
537 | chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); | 531 | mutex_unlock(&relay_channels_mutex); |
538 | if(!chan->buf[hotcpu]) { | 532 | return -ENOMEM; |
539 | printk(KERN_ERR | ||
540 | "relay_hotcpu_callback: cpu %d buffer " | ||
541 | "creation failed\n", hotcpu); | ||
542 | mutex_unlock(&relay_channels_mutex); | ||
543 | return notifier_from_errno(-ENOMEM); | ||
544 | } | ||
545 | } | 533 | } |
546 | mutex_unlock(&relay_channels_mutex); | 534 | *per_cpu_ptr(chan->buf, cpu) = buf; |
547 | break; | ||
548 | case CPU_DEAD: | ||
549 | case CPU_DEAD_FROZEN: | ||
550 | /* No need to flush the cpu : will be flushed upon | ||
551 | * final relay_flush() call. */ | ||
552 | break; | ||
553 | } | 535 | } |
554 | return NOTIFY_OK; | 536 | mutex_unlock(&relay_channels_mutex); |
537 | return 0; | ||
555 | } | 538 | } |
556 | 539 | ||
557 | /** | 540 | /** |
@@ -583,6 +566,7 @@ struct rchan *relay_open(const char *base_filename, | |||
583 | { | 566 | { |
584 | unsigned int i; | 567 | unsigned int i; |
585 | struct rchan *chan; | 568 | struct rchan *chan; |
569 | struct rchan_buf *buf; | ||
586 | 570 | ||
587 | if (!(subbuf_size && n_subbufs)) | 571 | if (!(subbuf_size && n_subbufs)) |
588 | return NULL; | 572 | return NULL; |
@@ -593,6 +577,7 @@ struct rchan *relay_open(const char *base_filename, | |||
593 | if (!chan) | 577 | if (!chan) |
594 | return NULL; | 578 | return NULL; |
595 | 579 | ||
580 | chan->buf = alloc_percpu(struct rchan_buf *); | ||
596 | chan->version = RELAYFS_CHANNEL_VERSION; | 581 | chan->version = RELAYFS_CHANNEL_VERSION; |
597 | chan->n_subbufs = n_subbufs; | 582 | chan->n_subbufs = n_subbufs; |
598 | chan->subbuf_size = subbuf_size; | 583 | chan->subbuf_size = subbuf_size; |
@@ -608,9 +593,10 @@ struct rchan *relay_open(const char *base_filename, | |||
608 | 593 | ||
609 | mutex_lock(&relay_channels_mutex); | 594 | mutex_lock(&relay_channels_mutex); |
610 | for_each_online_cpu(i) { | 595 | for_each_online_cpu(i) { |
611 | chan->buf[i] = relay_open_buf(chan, i); | 596 | buf = relay_open_buf(chan, i); |
612 | if (!chan->buf[i]) | 597 | if (!buf) |
613 | goto free_bufs; | 598 | goto free_bufs; |
599 | *per_cpu_ptr(chan->buf, i) = buf; | ||
614 | } | 600 | } |
615 | list_add(&chan->list, &relay_channels); | 601 | list_add(&chan->list, &relay_channels); |
616 | mutex_unlock(&relay_channels_mutex); | 602 | mutex_unlock(&relay_channels_mutex); |
@@ -619,8 +605,8 @@ struct rchan *relay_open(const char *base_filename, | |||
619 | 605 | ||
620 | free_bufs: | 606 | free_bufs: |
621 | for_each_possible_cpu(i) { | 607 | for_each_possible_cpu(i) { |
622 | if (chan->buf[i]) | 608 | if ((buf = *per_cpu_ptr(chan->buf, i))) |
623 | relay_close_buf(chan->buf[i]); | 609 | relay_close_buf(buf); |
624 | } | 610 | } |
625 | 611 | ||
626 | kref_put(&chan->kref, relay_destroy_channel); | 612 | kref_put(&chan->kref, relay_destroy_channel); |
@@ -666,6 +652,7 @@ int relay_late_setup_files(struct rchan *chan, | |||
666 | unsigned int i, curr_cpu; | 652 | unsigned int i, curr_cpu; |
667 | unsigned long flags; | 653 | unsigned long flags; |
668 | struct dentry *dentry; | 654 | struct dentry *dentry; |
655 | struct rchan_buf *buf; | ||
669 | struct rchan_percpu_buf_dispatcher disp; | 656 | struct rchan_percpu_buf_dispatcher disp; |
670 | 657 | ||
671 | if (!chan || !base_filename) | 658 | if (!chan || !base_filename) |
@@ -684,10 +671,11 @@ int relay_late_setup_files(struct rchan *chan, | |||
684 | 671 | ||
685 | if (chan->is_global) { | 672 | if (chan->is_global) { |
686 | err = -EINVAL; | 673 | err = -EINVAL; |
687 | if (!WARN_ON_ONCE(!chan->buf[0])) { | 674 | buf = *per_cpu_ptr(chan->buf, 0); |
688 | dentry = relay_create_buf_file(chan, chan->buf[0], 0); | 675 | if (!WARN_ON_ONCE(!buf)) { |
676 | dentry = relay_create_buf_file(chan, buf, 0); | ||
689 | if (dentry && !WARN_ON_ONCE(!chan->is_global)) { | 677 | if (dentry && !WARN_ON_ONCE(!chan->is_global)) { |
690 | relay_set_buf_dentry(chan->buf[0], dentry); | 678 | relay_set_buf_dentry(buf, dentry); |
691 | err = 0; | 679 | err = 0; |
692 | } | 680 | } |
693 | } | 681 | } |
@@ -702,13 +690,14 @@ int relay_late_setup_files(struct rchan *chan, | |||
702 | * on all currently online CPUs. | 690 | * on all currently online CPUs. |
703 | */ | 691 | */ |
704 | for_each_online_cpu(i) { | 692 | for_each_online_cpu(i) { |
705 | if (unlikely(!chan->buf[i])) { | 693 | buf = *per_cpu_ptr(chan->buf, i); |
694 | if (unlikely(!buf)) { | ||
706 | WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); | 695 | WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); |
707 | err = -EINVAL; | 696 | err = -EINVAL; |
708 | break; | 697 | break; |
709 | } | 698 | } |
710 | 699 | ||
711 | dentry = relay_create_buf_file(chan, chan->buf[i], i); | 700 | dentry = relay_create_buf_file(chan, buf, i); |
712 | if (unlikely(!dentry)) { | 701 | if (unlikely(!dentry)) { |
713 | err = -EINVAL; | 702 | err = -EINVAL; |
714 | break; | 703 | break; |
@@ -716,10 +705,10 @@ int relay_late_setup_files(struct rchan *chan, | |||
716 | 705 | ||
717 | if (curr_cpu == i) { | 706 | if (curr_cpu == i) { |
718 | local_irq_save(flags); | 707 | local_irq_save(flags); |
719 | relay_set_buf_dentry(chan->buf[i], dentry); | 708 | relay_set_buf_dentry(buf, dentry); |
720 | local_irq_restore(flags); | 709 | local_irq_restore(flags); |
721 | } else { | 710 | } else { |
722 | disp.buf = chan->buf[i]; | 711 | disp.buf = buf; |
723 | disp.dentry = dentry; | 712 | disp.dentry = dentry; |
724 | smp_mb(); | 713 | smp_mb(); |
725 | /* relay_channels_mutex must be held, so wait. */ | 714 | /* relay_channels_mutex must be held, so wait. */ |
@@ -768,14 +757,15 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
768 | buf->early_bytes += buf->chan->subbuf_size - | 757 | buf->early_bytes += buf->chan->subbuf_size - |
769 | buf->padding[old_subbuf]; | 758 | buf->padding[old_subbuf]; |
770 | smp_mb(); | 759 | smp_mb(); |
771 | if (waitqueue_active(&buf->read_wait)) | 760 | if (waitqueue_active(&buf->read_wait)) { |
772 | /* | 761 | /* |
773 | * Calling wake_up_interruptible() from here | 762 | * Calling wake_up_interruptible() from here |
774 | * will deadlock if we happen to be logging | 763 | * will deadlock if we happen to be logging |
775 | * from the scheduler (trying to re-grab | 764 | * from the scheduler (trying to re-grab |
776 | * rq->lock), so defer it. | 765 | * rq->lock), so defer it. |
777 | */ | 766 | */ |
778 | mod_timer(&buf->timer, jiffies + 1); | 767 | irq_work_queue(&buf->wakeup_work); |
768 | } | ||
779 | } | 769 | } |
780 | 770 | ||
781 | old = buf->data; | 771 | old = buf->data; |
@@ -822,11 +812,10 @@ void relay_subbufs_consumed(struct rchan *chan, | |||
822 | if (!chan) | 812 | if (!chan) |
823 | return; | 813 | return; |
824 | 814 | ||
825 | if (cpu >= NR_CPUS || !chan->buf[cpu] || | 815 | buf = *per_cpu_ptr(chan->buf, cpu); |
826 | subbufs_consumed > chan->n_subbufs) | 816 | if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs) |
827 | return; | 817 | return; |
828 | 818 | ||
829 | buf = chan->buf[cpu]; | ||
830 | if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) | 819 | if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) |
831 | buf->subbufs_consumed = buf->subbufs_produced; | 820 | buf->subbufs_consumed = buf->subbufs_produced; |
832 | else | 821 | else |
@@ -842,18 +831,19 @@ EXPORT_SYMBOL_GPL(relay_subbufs_consumed); | |||
842 | */ | 831 | */ |
843 | void relay_close(struct rchan *chan) | 832 | void relay_close(struct rchan *chan) |
844 | { | 833 | { |
834 | struct rchan_buf *buf; | ||
845 | unsigned int i; | 835 | unsigned int i; |
846 | 836 | ||
847 | if (!chan) | 837 | if (!chan) |
848 | return; | 838 | return; |
849 | 839 | ||
850 | mutex_lock(&relay_channels_mutex); | 840 | mutex_lock(&relay_channels_mutex); |
851 | if (chan->is_global && chan->buf[0]) | 841 | if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) |
852 | relay_close_buf(chan->buf[0]); | 842 | relay_close_buf(buf); |
853 | else | 843 | else |
854 | for_each_possible_cpu(i) | 844 | for_each_possible_cpu(i) |
855 | if (chan->buf[i]) | 845 | if ((buf = *per_cpu_ptr(chan->buf, i))) |
856 | relay_close_buf(chan->buf[i]); | 846 | relay_close_buf(buf); |
857 | 847 | ||
858 | if (chan->last_toobig) | 848 | if (chan->last_toobig) |
859 | printk(KERN_WARNING "relay: one or more items not logged " | 849 | printk(KERN_WARNING "relay: one or more items not logged " |
@@ -874,20 +864,21 @@ EXPORT_SYMBOL_GPL(relay_close); | |||
874 | */ | 864 | */ |
875 | void relay_flush(struct rchan *chan) | 865 | void relay_flush(struct rchan *chan) |
876 | { | 866 | { |
867 | struct rchan_buf *buf; | ||
877 | unsigned int i; | 868 | unsigned int i; |
878 | 869 | ||
879 | if (!chan) | 870 | if (!chan) |
880 | return; | 871 | return; |
881 | 872 | ||
882 | if (chan->is_global && chan->buf[0]) { | 873 | if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { |
883 | relay_switch_subbuf(chan->buf[0], 0); | 874 | relay_switch_subbuf(buf, 0); |
884 | return; | 875 | return; |
885 | } | 876 | } |
886 | 877 | ||
887 | mutex_lock(&relay_channels_mutex); | 878 | mutex_lock(&relay_channels_mutex); |
888 | for_each_possible_cpu(i) | 879 | for_each_possible_cpu(i) |
889 | if (chan->buf[i]) | 880 | if ((buf = *per_cpu_ptr(chan->buf, i))) |
890 | relay_switch_subbuf(chan->buf[i], 0); | 881 | relay_switch_subbuf(buf, 0); |
891 | mutex_unlock(&relay_channels_mutex); | 882 | mutex_unlock(&relay_channels_mutex); |
892 | } | 883 | } |
893 | EXPORT_SYMBOL_GPL(relay_flush); | 884 | EXPORT_SYMBOL_GPL(relay_flush); |
@@ -1121,51 +1112,23 @@ static size_t relay_file_read_end_pos(struct rchan_buf *buf, | |||
1121 | return end_pos; | 1112 | return end_pos; |
1122 | } | 1113 | } |
1123 | 1114 | ||
1124 | /* | 1115 | static ssize_t relay_file_read(struct file *filp, |
1125 | * subbuf_read_actor - read up to one subbuf's worth of data | 1116 | char __user *buffer, |
1126 | */ | 1117 | size_t count, |
1127 | static int subbuf_read_actor(size_t read_start, | 1118 | loff_t *ppos) |
1128 | struct rchan_buf *buf, | ||
1129 | size_t avail, | ||
1130 | read_descriptor_t *desc) | ||
1131 | { | ||
1132 | void *from; | ||
1133 | int ret = 0; | ||
1134 | |||
1135 | from = buf->start + read_start; | ||
1136 | ret = avail; | ||
1137 | if (copy_to_user(desc->arg.buf, from, avail)) { | ||
1138 | desc->error = -EFAULT; | ||
1139 | ret = 0; | ||
1140 | } | ||
1141 | desc->arg.data += ret; | ||
1142 | desc->written += ret; | ||
1143 | desc->count -= ret; | ||
1144 | |||
1145 | return ret; | ||
1146 | } | ||
1147 | |||
1148 | typedef int (*subbuf_actor_t) (size_t read_start, | ||
1149 | struct rchan_buf *buf, | ||
1150 | size_t avail, | ||
1151 | read_descriptor_t *desc); | ||
1152 | |||
1153 | /* | ||
1154 | * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries | ||
1155 | */ | ||
1156 | static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos, | ||
1157 | subbuf_actor_t subbuf_actor, | ||
1158 | read_descriptor_t *desc) | ||
1159 | { | 1119 | { |
1160 | struct rchan_buf *buf = filp->private_data; | 1120 | struct rchan_buf *buf = filp->private_data; |
1161 | size_t read_start, avail; | 1121 | size_t read_start, avail; |
1122 | size_t written = 0; | ||
1162 | int ret; | 1123 | int ret; |
1163 | 1124 | ||
1164 | if (!desc->count) | 1125 | if (!count) |
1165 | return 0; | 1126 | return 0; |
1166 | 1127 | ||
1167 | inode_lock(file_inode(filp)); | 1128 | inode_lock(file_inode(filp)); |
1168 | do { | 1129 | do { |
1130 | void *from; | ||
1131 | |||
1169 | if (!relay_file_read_avail(buf, *ppos)) | 1132 | if (!relay_file_read_avail(buf, *ppos)) |
1170 | break; | 1133 | break; |
1171 | 1134 | ||
@@ -1174,32 +1137,22 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos, | |||
1174 | if (!avail) | 1137 | if (!avail) |
1175 | break; | 1138 | break; |
1176 | 1139 | ||
1177 | avail = min(desc->count, avail); | 1140 | avail = min(count, avail); |
1178 | ret = subbuf_actor(read_start, buf, avail, desc); | 1141 | from = buf->start + read_start; |
1179 | if (desc->error < 0) | 1142 | ret = avail; |
1143 | if (copy_to_user(buffer, from, avail)) | ||
1180 | break; | 1144 | break; |
1181 | 1145 | ||
1182 | if (ret) { | 1146 | buffer += ret; |
1183 | relay_file_read_consume(buf, read_start, ret); | 1147 | written += ret; |
1184 | *ppos = relay_file_read_end_pos(buf, read_start, ret); | 1148 | count -= ret; |
1185 | } | ||
1186 | } while (desc->count && ret); | ||
1187 | inode_unlock(file_inode(filp)); | ||
1188 | 1149 | ||
1189 | return desc->written; | 1150 | relay_file_read_consume(buf, read_start, ret); |
1190 | } | 1151 | *ppos = relay_file_read_end_pos(buf, read_start, ret); |
1152 | } while (count); | ||
1153 | inode_unlock(file_inode(filp)); | ||
1191 | 1154 | ||
1192 | static ssize_t relay_file_read(struct file *filp, | 1155 | return written; |
1193 | char __user *buffer, | ||
1194 | size_t count, | ||
1195 | loff_t *ppos) | ||
1196 | { | ||
1197 | read_descriptor_t desc; | ||
1198 | desc.written = 0; | ||
1199 | desc.count = count; | ||
1200 | desc.arg.buf = buffer; | ||
1201 | desc.error = 0; | ||
1202 | return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, &desc); | ||
1203 | } | 1156 | } |
1204 | 1157 | ||
1205 | static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed) | 1158 | static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed) |
@@ -1377,12 +1330,3 @@ const struct file_operations relay_file_operations = { | |||
1377 | .splice_read = relay_file_splice_read, | 1330 | .splice_read = relay_file_splice_read, |
1378 | }; | 1331 | }; |
1379 | EXPORT_SYMBOL_GPL(relay_file_operations); | 1332 | EXPORT_SYMBOL_GPL(relay_file_operations); |
1380 | |||
1381 | static __init int relay_init(void) | ||
1382 | { | ||
1383 | |||
1384 | hotcpu_notifier(relay_hotcpu_callback, 0); | ||
1385 | return 0; | ||
1386 | } | ||
1387 | |||
1388 | early_initcall(relay_init); | ||