aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAkash Goel <akash.goel@intel.com>2016-09-02 15:47:38 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-06 12:30:19 -0400
commit017c59c042d01fc84cae7a8ea475861e702c77ab (patch)
treeee665d166dff1e47e5815fd74130e67c23ba29bf
parentee1e714b94521b0bb27b04dfd1728ec51b19d4f0 (diff)
relay: Use per CPU constructs for the relay channel buffer pointers
relay essentially needs to maintain a per CPU array of channel buffer pointers but it manually creates that array. Instead its better to use the per CPU constructs, provided by the kernel, to allocate & access the array of pointer to channel buffers. Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://lkml.kernel.org/r/1470909140-25919-1-git-send-email-akash.goel@intel.com Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/relay.h17
-rw-r--r--kernel/relay.c74
2 files changed, 52 insertions, 39 deletions
diff --git a/include/linux/relay.h b/include/linux/relay.h
index d7c8359693c6..eb295e373b90 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -19,6 +19,7 @@
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/poll.h> 20#include <linux/poll.h>
21#include <linux/kref.h> 21#include <linux/kref.h>
22#include <linux/percpu.h>
22 23
23/* 24/*
24 * Tracks changes to rchan/rchan_buf structs 25 * Tracks changes to rchan/rchan_buf structs
@@ -63,7 +64,7 @@ struct rchan
63 struct kref kref; /* channel refcount */ 64 struct kref kref; /* channel refcount */
64 void *private_data; /* for user-defined data */ 65 void *private_data; /* for user-defined data */
65 size_t last_toobig; /* tried to log event > subbuf size */ 66 size_t last_toobig; /* tried to log event > subbuf size */
66 struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */ 67 struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
67 int is_global; /* One global buffer ? */ 68 int is_global; /* One global buffer ? */
68 struct list_head list; /* for channel list */ 69 struct list_head list; /* for channel list */
69 struct dentry *parent; /* parent dentry passed to open */ 70 struct dentry *parent; /* parent dentry passed to open */
@@ -204,7 +205,7 @@ static inline void relay_write(struct rchan *chan,
204 struct rchan_buf *buf; 205 struct rchan_buf *buf;
205 206
206 local_irq_save(flags); 207 local_irq_save(flags);
207 buf = chan->buf[smp_processor_id()]; 208 buf = *this_cpu_ptr(chan->buf);
208 if (unlikely(buf->offset + length > chan->subbuf_size)) 209 if (unlikely(buf->offset + length > chan->subbuf_size))
209 length = relay_switch_subbuf(buf, length); 210 length = relay_switch_subbuf(buf, length);
210 memcpy(buf->data + buf->offset, data, length); 211 memcpy(buf->data + buf->offset, data, length);
@@ -230,12 +231,12 @@ static inline void __relay_write(struct rchan *chan,
230{ 231{
231 struct rchan_buf *buf; 232 struct rchan_buf *buf;
232 233
233 buf = chan->buf[get_cpu()]; 234 buf = *get_cpu_ptr(chan->buf);
234 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) 235 if (unlikely(buf->offset + length > buf->chan->subbuf_size))
235 length = relay_switch_subbuf(buf, length); 236 length = relay_switch_subbuf(buf, length);
236 memcpy(buf->data + buf->offset, data, length); 237 memcpy(buf->data + buf->offset, data, length);
237 buf->offset += length; 238 buf->offset += length;
238 put_cpu(); 239 put_cpu_ptr(chan->buf);
239} 240}
240 241
241/** 242/**
@@ -251,17 +252,19 @@ static inline void __relay_write(struct rchan *chan,
251 */ 252 */
252static inline void *relay_reserve(struct rchan *chan, size_t length) 253static inline void *relay_reserve(struct rchan *chan, size_t length)
253{ 254{
254 void *reserved; 255 void *reserved = NULL;
255 struct rchan_buf *buf = chan->buf[smp_processor_id()]; 256 struct rchan_buf *buf = *get_cpu_ptr(chan->buf);
256 257
257 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { 258 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
258 length = relay_switch_subbuf(buf, length); 259 length = relay_switch_subbuf(buf, length);
259 if (!length) 260 if (!length)
260 return NULL; 261 goto end;
261 } 262 }
262 reserved = buf->data + buf->offset; 263 reserved = buf->data + buf->offset;
263 buf->offset += length; 264 buf->offset += length;
264 265
266end:
267 put_cpu_ptr(chan->buf);
265 return reserved; 268 return reserved;
266} 269}
267 270
diff --git a/kernel/relay.c b/kernel/relay.c
index d797502140b9..ed157378f6cb 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -214,7 +214,7 @@ static void relay_destroy_buf(struct rchan_buf *buf)
214 __free_page(buf->page_array[i]); 214 __free_page(buf->page_array[i]);
215 relay_free_page_array(buf->page_array); 215 relay_free_page_array(buf->page_array);
216 } 216 }
217 chan->buf[buf->cpu] = NULL; 217 *per_cpu_ptr(chan->buf, buf->cpu) = NULL;
218 kfree(buf->padding); 218 kfree(buf->padding);
219 kfree(buf); 219 kfree(buf);
220 kref_put(&chan->kref, relay_destroy_channel); 220 kref_put(&chan->kref, relay_destroy_channel);
@@ -382,20 +382,21 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
382 */ 382 */
383void relay_reset(struct rchan *chan) 383void relay_reset(struct rchan *chan)
384{ 384{
385 struct rchan_buf *buf;
385 unsigned int i; 386 unsigned int i;
386 387
387 if (!chan) 388 if (!chan)
388 return; 389 return;
389 390
390 if (chan->is_global && chan->buf[0]) { 391 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
391 __relay_reset(chan->buf[0], 0); 392 __relay_reset(buf, 0);
392 return; 393 return;
393 } 394 }
394 395
395 mutex_lock(&relay_channels_mutex); 396 mutex_lock(&relay_channels_mutex);
396 for_each_possible_cpu(i) 397 for_each_possible_cpu(i)
397 if (chan->buf[i]) 398 if ((buf = *per_cpu_ptr(chan->buf, i)))
398 __relay_reset(chan->buf[i], 0); 399 __relay_reset(buf, 0);
399 mutex_unlock(&relay_channels_mutex); 400 mutex_unlock(&relay_channels_mutex);
400} 401}
401EXPORT_SYMBOL_GPL(relay_reset); 402EXPORT_SYMBOL_GPL(relay_reset);
@@ -440,7 +441,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
440 struct dentry *dentry; 441 struct dentry *dentry;
441 442
442 if (chan->is_global) 443 if (chan->is_global)
443 return chan->buf[0]; 444 return *per_cpu_ptr(chan->buf, 0);
444 445
445 buf = relay_create_buf(chan); 446 buf = relay_create_buf(chan);
446 if (!buf) 447 if (!buf)
@@ -464,7 +465,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
464 __relay_reset(buf, 1); 465 __relay_reset(buf, 1);
465 466
466 if(chan->is_global) { 467 if(chan->is_global) {
467 chan->buf[0] = buf; 468 *per_cpu_ptr(chan->buf, 0) = buf;
468 buf->cpu = 0; 469 buf->cpu = 0;
469 } 470 }
470 471
@@ -526,22 +527,24 @@ static int relay_hotcpu_callback(struct notifier_block *nb,
526{ 527{
527 unsigned int hotcpu = (unsigned long)hcpu; 528 unsigned int hotcpu = (unsigned long)hcpu;
528 struct rchan *chan; 529 struct rchan *chan;
530 struct rchan_buf *buf;
529 531
530 switch(action) { 532 switch(action) {
531 case CPU_UP_PREPARE: 533 case CPU_UP_PREPARE:
532 case CPU_UP_PREPARE_FROZEN: 534 case CPU_UP_PREPARE_FROZEN:
533 mutex_lock(&relay_channels_mutex); 535 mutex_lock(&relay_channels_mutex);
534 list_for_each_entry(chan, &relay_channels, list) { 536 list_for_each_entry(chan, &relay_channels, list) {
535 if (chan->buf[hotcpu]) 537 if ((buf = *per_cpu_ptr(chan->buf, hotcpu)))
536 continue; 538 continue;
537 chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); 539 buf = relay_open_buf(chan, hotcpu);
538 if(!chan->buf[hotcpu]) { 540 if (!buf) {
539 printk(KERN_ERR 541 printk(KERN_ERR
540 "relay_hotcpu_callback: cpu %d buffer " 542 "relay_hotcpu_callback: cpu %d buffer "
541 "creation failed\n", hotcpu); 543 "creation failed\n", hotcpu);
542 mutex_unlock(&relay_channels_mutex); 544 mutex_unlock(&relay_channels_mutex);
543 return notifier_from_errno(-ENOMEM); 545 return notifier_from_errno(-ENOMEM);
544 } 546 }
547 *per_cpu_ptr(chan->buf, hotcpu) = buf;
545 } 548 }
546 mutex_unlock(&relay_channels_mutex); 549 mutex_unlock(&relay_channels_mutex);
547 break; 550 break;
@@ -583,6 +586,7 @@ struct rchan *relay_open(const char *base_filename,
583{ 586{
584 unsigned int i; 587 unsigned int i;
585 struct rchan *chan; 588 struct rchan *chan;
589 struct rchan_buf *buf;
586 590
587 if (!(subbuf_size && n_subbufs)) 591 if (!(subbuf_size && n_subbufs))
588 return NULL; 592 return NULL;
@@ -593,6 +597,7 @@ struct rchan *relay_open(const char *base_filename,
593 if (!chan) 597 if (!chan)
594 return NULL; 598 return NULL;
595 599
600 chan->buf = alloc_percpu(struct rchan_buf *);
596 chan->version = RELAYFS_CHANNEL_VERSION; 601 chan->version = RELAYFS_CHANNEL_VERSION;
597 chan->n_subbufs = n_subbufs; 602 chan->n_subbufs = n_subbufs;
598 chan->subbuf_size = subbuf_size; 603 chan->subbuf_size = subbuf_size;
@@ -608,9 +613,10 @@ struct rchan *relay_open(const char *base_filename,
608 613
609 mutex_lock(&relay_channels_mutex); 614 mutex_lock(&relay_channels_mutex);
610 for_each_online_cpu(i) { 615 for_each_online_cpu(i) {
611 chan->buf[i] = relay_open_buf(chan, i); 616 buf = relay_open_buf(chan, i);
612 if (!chan->buf[i]) 617 if (!buf)
613 goto free_bufs; 618 goto free_bufs;
619 *per_cpu_ptr(chan->buf, i) = buf;
614 } 620 }
615 list_add(&chan->list, &relay_channels); 621 list_add(&chan->list, &relay_channels);
616 mutex_unlock(&relay_channels_mutex); 622 mutex_unlock(&relay_channels_mutex);
@@ -619,8 +625,8 @@ struct rchan *relay_open(const char *base_filename,
619 625
620free_bufs: 626free_bufs:
621 for_each_possible_cpu(i) { 627 for_each_possible_cpu(i) {
622 if (chan->buf[i]) 628 if ((buf = *per_cpu_ptr(chan->buf, i)))
623 relay_close_buf(chan->buf[i]); 629 relay_close_buf(buf);
624 } 630 }
625 631
626 kref_put(&chan->kref, relay_destroy_channel); 632 kref_put(&chan->kref, relay_destroy_channel);
@@ -666,6 +672,7 @@ int relay_late_setup_files(struct rchan *chan,
666 unsigned int i, curr_cpu; 672 unsigned int i, curr_cpu;
667 unsigned long flags; 673 unsigned long flags;
668 struct dentry *dentry; 674 struct dentry *dentry;
675 struct rchan_buf *buf;
669 struct rchan_percpu_buf_dispatcher disp; 676 struct rchan_percpu_buf_dispatcher disp;
670 677
671 if (!chan || !base_filename) 678 if (!chan || !base_filename)
@@ -684,10 +691,11 @@ int relay_late_setup_files(struct rchan *chan,
684 691
685 if (chan->is_global) { 692 if (chan->is_global) {
686 err = -EINVAL; 693 err = -EINVAL;
687 if (!WARN_ON_ONCE(!chan->buf[0])) { 694 buf = *per_cpu_ptr(chan->buf, 0);
688 dentry = relay_create_buf_file(chan, chan->buf[0], 0); 695 if (!WARN_ON_ONCE(!buf)) {
696 dentry = relay_create_buf_file(chan, buf, 0);
689 if (dentry && !WARN_ON_ONCE(!chan->is_global)) { 697 if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
690 relay_set_buf_dentry(chan->buf[0], dentry); 698 relay_set_buf_dentry(buf, dentry);
691 err = 0; 699 err = 0;
692 } 700 }
693 } 701 }
@@ -702,13 +710,14 @@ int relay_late_setup_files(struct rchan *chan,
702 * on all currently online CPUs. 710 * on all currently online CPUs.
703 */ 711 */
704 for_each_online_cpu(i) { 712 for_each_online_cpu(i) {
705 if (unlikely(!chan->buf[i])) { 713 buf = *per_cpu_ptr(chan->buf, i);
714 if (unlikely(!buf)) {
706 WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); 715 WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
707 err = -EINVAL; 716 err = -EINVAL;
708 break; 717 break;
709 } 718 }
710 719
711 dentry = relay_create_buf_file(chan, chan->buf[i], i); 720 dentry = relay_create_buf_file(chan, buf, i);
712 if (unlikely(!dentry)) { 721 if (unlikely(!dentry)) {
713 err = -EINVAL; 722 err = -EINVAL;
714 break; 723 break;
@@ -716,10 +725,10 @@ int relay_late_setup_files(struct rchan *chan,
716 725
717 if (curr_cpu == i) { 726 if (curr_cpu == i) {
718 local_irq_save(flags); 727 local_irq_save(flags);
719 relay_set_buf_dentry(chan->buf[i], dentry); 728 relay_set_buf_dentry(buf, dentry);
720 local_irq_restore(flags); 729 local_irq_restore(flags);
721 } else { 730 } else {
722 disp.buf = chan->buf[i]; 731 disp.buf = buf;
723 disp.dentry = dentry; 732 disp.dentry = dentry;
724 smp_mb(); 733 smp_mb();
725 /* relay_channels_mutex must be held, so wait. */ 734 /* relay_channels_mutex must be held, so wait. */
@@ -822,11 +831,10 @@ void relay_subbufs_consumed(struct rchan *chan,
822 if (!chan) 831 if (!chan)
823 return; 832 return;
824 833
825 if (cpu >= NR_CPUS || !chan->buf[cpu] || 834 buf = *per_cpu_ptr(chan->buf, cpu);
826 subbufs_consumed > chan->n_subbufs) 835 if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs)
827 return; 836 return;
828 837
829 buf = chan->buf[cpu];
830 if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) 838 if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
831 buf->subbufs_consumed = buf->subbufs_produced; 839 buf->subbufs_consumed = buf->subbufs_produced;
832 else 840 else
@@ -842,18 +850,19 @@ EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
842 */ 850 */
843void relay_close(struct rchan *chan) 851void relay_close(struct rchan *chan)
844{ 852{
853 struct rchan_buf *buf;
845 unsigned int i; 854 unsigned int i;
846 855
847 if (!chan) 856 if (!chan)
848 return; 857 return;
849 858
850 mutex_lock(&relay_channels_mutex); 859 mutex_lock(&relay_channels_mutex);
851 if (chan->is_global && chan->buf[0]) 860 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
852 relay_close_buf(chan->buf[0]); 861 relay_close_buf(buf);
853 else 862 else
854 for_each_possible_cpu(i) 863 for_each_possible_cpu(i)
855 if (chan->buf[i]) 864 if ((buf = *per_cpu_ptr(chan->buf, i)))
856 relay_close_buf(chan->buf[i]); 865 relay_close_buf(buf);
857 866
858 if (chan->last_toobig) 867 if (chan->last_toobig)
859 printk(KERN_WARNING "relay: one or more items not logged " 868 printk(KERN_WARNING "relay: one or more items not logged "
@@ -874,20 +883,21 @@ EXPORT_SYMBOL_GPL(relay_close);
874 */ 883 */
875void relay_flush(struct rchan *chan) 884void relay_flush(struct rchan *chan)
876{ 885{
886 struct rchan_buf *buf;
877 unsigned int i; 887 unsigned int i;
878 888
879 if (!chan) 889 if (!chan)
880 return; 890 return;
881 891
882 if (chan->is_global && chan->buf[0]) { 892 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
883 relay_switch_subbuf(chan->buf[0], 0); 893 relay_switch_subbuf(buf, 0);
884 return; 894 return;
885 } 895 }
886 896
887 mutex_lock(&relay_channels_mutex); 897 mutex_lock(&relay_channels_mutex);
888 for_each_possible_cpu(i) 898 for_each_possible_cpu(i)
889 if (chan->buf[i]) 899 if ((buf = *per_cpu_ptr(chan->buf, i)))
890 relay_switch_subbuf(chan->buf[i], 0); 900 relay_switch_subbuf(buf, 0);
891 mutex_unlock(&relay_channels_mutex); 901 mutex_unlock(&relay_channels_mutex);
892} 902}
893EXPORT_SYMBOL_GPL(relay_flush); 903EXPORT_SYMBOL_GPL(relay_flush);