summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c342
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h55
2 files changed, 308 insertions, 89 deletions
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 54d5c0664277..fba659dd2ee0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -6,10 +6,18 @@
6 6
7#include <linux/coresight.h> 7#include <linux/coresight.h>
8#include <linux/dma-mapping.h> 8#include <linux/dma-mapping.h>
9#include <linux/iommu.h>
9#include <linux/slab.h> 10#include <linux/slab.h>
10#include "coresight-priv.h" 11#include "coresight-priv.h"
11#include "coresight-tmc.h" 12#include "coresight-tmc.h"
12 13
14struct etr_flat_buf {
15 struct device *dev;
16 dma_addr_t daddr;
17 void *vaddr;
18 size_t size;
19};
20
13/* 21/*
14 * The TMC ETR SG has a page size of 4K. The SG table contains pointers 22 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
15 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from 23 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
@@ -540,16 +548,207 @@ tmc_init_etr_sg_table(struct device *dev, int node,
540 return etr_table; 548 return etr_table;
541} 549}
542 550
551/*
552 * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
553 */
554static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
555 struct etr_buf *etr_buf, int node,
556 void **pages)
557{
558 struct etr_flat_buf *flat_buf;
559
560 /* We cannot reuse existing pages for flat buf */
561 if (pages)
562 return -EINVAL;
563
564 flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
565 if (!flat_buf)
566 return -ENOMEM;
567
568 flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
569 &flat_buf->daddr, GFP_KERNEL);
570 if (!flat_buf->vaddr) {
571 kfree(flat_buf);
572 return -ENOMEM;
573 }
574
575 flat_buf->size = etr_buf->size;
576 flat_buf->dev = drvdata->dev;
577 etr_buf->hwaddr = flat_buf->daddr;
578 etr_buf->mode = ETR_MODE_FLAT;
579 etr_buf->private = flat_buf;
580 return 0;
581}
582
583static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
584{
585 struct etr_flat_buf *flat_buf = etr_buf->private;
586
587 if (flat_buf && flat_buf->daddr)
588 dma_free_coherent(flat_buf->dev, flat_buf->size,
589 flat_buf->vaddr, flat_buf->daddr);
590 kfree(flat_buf);
591}
592
593static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
594{
595 /*
596 * Adjust the buffer to point to the beginning of the trace data
597 * and update the available trace data.
598 */
599 etr_buf->offset = rrp - etr_buf->hwaddr;
600 if (etr_buf->full)
601 etr_buf->len = etr_buf->size;
602 else
603 etr_buf->len = rwp - rrp;
604}
605
606static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
607 u64 offset, size_t len, char **bufpp)
608{
609 struct etr_flat_buf *flat_buf = etr_buf->private;
610
611 *bufpp = (char *)flat_buf->vaddr + offset;
612 /*
613 * tmc_etr_buf_get_data already adjusts the length to handle
614 * buffer wrapping around.
615 */
616 return len;
617}
618
619static const struct etr_buf_operations etr_flat_buf_ops = {
620 .alloc = tmc_etr_alloc_flat_buf,
621 .free = tmc_etr_free_flat_buf,
622 .sync = tmc_etr_sync_flat_buf,
623 .get_data = tmc_etr_get_data_flat_buf,
624};
625
626static const struct etr_buf_operations *etr_buf_ops[] = {
627 [ETR_MODE_FLAT] = &etr_flat_buf_ops,
628};
629
630static inline int tmc_etr_mode_alloc_buf(int mode,
631 struct tmc_drvdata *drvdata,
632 struct etr_buf *etr_buf, int node,
633 void **pages)
634{
635 int rc;
636
637 switch (mode) {
638 case ETR_MODE_FLAT:
639 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, node, pages);
640 if (!rc)
641 etr_buf->ops = etr_buf_ops[mode];
642 return rc;
643 default:
644 return -EINVAL;
645 }
646}
647
648/*
649 * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
650 * @drvdata : ETR device details.
651 * @size : size of the requested buffer.
652 * @flags : Required properties for the buffer.
653 * @node : Node for memory allocations.
654 * @pages : An optional list of pages.
655 */
656static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
657 ssize_t size, int flags,
658 int node, void **pages)
659{
660 int rc = 0;
661 struct etr_buf *etr_buf;
662
663 etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
664 if (!etr_buf)
665 return ERR_PTR(-ENOMEM);
666
667 etr_buf->size = size;
668
669 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
670 etr_buf, node, pages);
671 if (rc) {
672 kfree(etr_buf);
673 return ERR_PTR(rc);
674 }
675
676 return etr_buf;
677}
678
679static void tmc_free_etr_buf(struct etr_buf *etr_buf)
680{
681 WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
682 etr_buf->ops->free(etr_buf);
683 kfree(etr_buf);
684}
685
686/*
687 * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
688 * with a maximum of @len bytes.
689 * Returns: The size of the linear data available @pos, with *bufpp
690 * updated to point to the buffer.
691 */
692static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
693 u64 offset, size_t len, char **bufpp)
694{
695 /* Adjust the length to limit this transaction to end of buffer */
696 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
697
698 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
699}
700
701static inline s64
702tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
703{
704 ssize_t len;
705 char *bufp;
706
707 len = tmc_etr_buf_get_data(etr_buf, offset,
708 CORESIGHT_BARRIER_PKT_SIZE, &bufp);
709 if (WARN_ON(len <= CORESIGHT_BARRIER_PKT_SIZE))
710 return -EINVAL;
711 coresight_insert_barrier_packet(bufp);
712 return offset + CORESIGHT_BARRIER_PKT_SIZE;
713}
714
715/*
716 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
717 * Makes sure the trace data is synced to the memory for consumption.
718 * @etr_buf->offset will hold the offset to the beginning of the trace data
719 * within the buffer, with @etr_buf->len bytes to consume.
720 */
721static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
722{
723 struct etr_buf *etr_buf = drvdata->etr_buf;
724 u64 rrp, rwp;
725 u32 status;
726
727 rrp = tmc_read_rrp(drvdata);
728 rwp = tmc_read_rwp(drvdata);
729 status = readl_relaxed(drvdata->base + TMC_STS);
730 etr_buf->full = status & TMC_STS_FULL;
731
732 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
733
734 etr_buf->ops->sync(etr_buf, rrp, rwp);
735
736 /* Insert barrier packets at the beginning, if there was an overflow */
737 if (etr_buf->full)
738 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
739}
740
543static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) 741static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
544{ 742{
545 u32 axictl, sts; 743 u32 axictl, sts;
744 struct etr_buf *etr_buf = drvdata->etr_buf;
546 745
547 CS_UNLOCK(drvdata->base); 746 CS_UNLOCK(drvdata->base);
548 747
549 /* Wait for TMCSReady bit to be set */ 748 /* Wait for TMCSReady bit to be set */
550 tmc_wait_for_tmcready(drvdata); 749 tmc_wait_for_tmcready(drvdata);
551 750
552 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ); 751 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
553 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 752 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
554 753
555 axictl = readl_relaxed(drvdata->base + TMC_AXICTL); 754 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
@@ -563,15 +762,15 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
563 } 762 }
564 763
565 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); 764 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
566 tmc_write_dba(drvdata, drvdata->paddr); 765 tmc_write_dba(drvdata, etr_buf->hwaddr);
567 /* 766 /*
568 * If the TMC pointers must be programmed before the session, 767 * If the TMC pointers must be programmed before the session,
569 * we have to set it properly (i.e, RRP/RWP to base address and 768 * we have to set it properly (i.e, RRP/RWP to base address and
570 * STS to "not full"). 769 * STS to "not full").
571 */ 770 */
572 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) { 771 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
573 tmc_write_rrp(drvdata, drvdata->paddr); 772 tmc_write_rrp(drvdata, etr_buf->hwaddr);
574 tmc_write_rwp(drvdata, drvdata->paddr); 773 tmc_write_rwp(drvdata, etr_buf->hwaddr);
575 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL; 774 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
576 writel_relaxed(sts, drvdata->base + TMC_STS); 775 writel_relaxed(sts, drvdata->base + TMC_STS);
577 } 776 }
@@ -587,59 +786,48 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
587} 786}
588 787
589/* 788/*
590 * Return the available trace data in the buffer @pos, with a maximum 789 * Return the available trace data in the buffer (starts at etr_buf->offset,
591 * limit of @len, also updating the @bufpp on where to find it. 790 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
791 * also updating the @bufpp on where to find it. Since the trace data
792 * starts at anywhere in the buffer, depending on the RRP, we adjust the
793 * @len returned to handle buffer wrapping around.
592 */ 794 */
593ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata, 795ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
594 loff_t pos, size_t len, char **bufpp) 796 loff_t pos, size_t len, char **bufpp)
595{ 797{
798 s64 offset;
596 ssize_t actual = len; 799 ssize_t actual = len;
597 char *bufp = drvdata->buf + pos; 800 struct etr_buf *etr_buf = drvdata->etr_buf;
598 char *bufend = (char *)(drvdata->vaddr + drvdata->size);
599
600 /* Adjust the len to available size @pos */
601 if (pos + actual > drvdata->len)
602 actual = drvdata->len - pos;
603 801
802 if (pos + actual > etr_buf->len)
803 actual = etr_buf->len - pos;
604 if (actual <= 0) 804 if (actual <= 0)
605 return actual; 805 return actual;
606 806
607 /* 807 /* Compute the offset from which we read the data */
608 * Since we use a circular buffer, with trace data starting 808 offset = etr_buf->offset + pos;
609 * @drvdata->buf, possibly anywhere in the buffer @drvdata->vaddr, 809 if (offset >= etr_buf->size)
610 * wrap the current @pos to within the buffer. 810 offset -= etr_buf->size;
611 */ 811 return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
612 if (bufp >= bufend)
613 bufp -= drvdata->size;
614 /*
615 * For simplicity, avoid copying over a wrapped around buffer.
616 */
617 if ((bufp + actual) > bufend)
618 actual = bufend - bufp;
619 *bufpp = bufp;
620 return actual;
621} 812}
622 813
623static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata) 814static struct etr_buf *
815tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
624{ 816{
625 u32 val; 817 return tmc_alloc_etr_buf(drvdata, drvdata->size,
626 u64 rwp; 818 0, cpu_to_node(0), NULL);
819}
627 820
628 rwp = tmc_read_rwp(drvdata); 821static void
629 val = readl_relaxed(drvdata->base + TMC_STS); 822tmc_etr_free_sysfs_buf(struct etr_buf *buf)
823{
824 if (buf)
825 tmc_free_etr_buf(buf);
826}
630 827
631 /* 828static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
632 * Adjust the buffer to point to the beginning of the trace data 829{
633 * and update the available trace data. 830 tmc_sync_etr_buf(drvdata);
634 */
635 if (val & TMC_STS_FULL) {
636 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
637 drvdata->len = drvdata->size;
638 coresight_insert_barrier_packet(drvdata->buf);
639 } else {
640 drvdata->buf = drvdata->vaddr;
641 drvdata->len = rwp - drvdata->paddr;
642 }
643} 831}
644 832
645static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) 833static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
@@ -652,7 +840,8 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
652 * read before the TMC is disabled. 840 * read before the TMC is disabled.
653 */ 841 */
654 if (drvdata->mode == CS_MODE_SYSFS) 842 if (drvdata->mode == CS_MODE_SYSFS)
655 tmc_etr_dump_hw(drvdata); 843 tmc_etr_sync_sysfs_buf(drvdata);
844
656 tmc_disable_hw(drvdata); 845 tmc_disable_hw(drvdata);
657 846
658 CS_LOCK(drvdata->base); 847 CS_LOCK(drvdata->base);
@@ -661,35 +850,32 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
661static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) 850static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
662{ 851{
663 int ret = 0; 852 int ret = 0;
664 bool used = false;
665 unsigned long flags; 853 unsigned long flags;
666 void __iomem *vaddr = NULL;
667 dma_addr_t paddr = 0;
668 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 854 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
855 struct etr_buf *new_buf = NULL, *free_buf = NULL;
669 856
670 /* 857 /*
671 * If we don't have a buffer release the lock and allocate memory. 858 * If we are enabling the ETR from disabled state, we need to make
672 * Otherwise keep the lock and move along. 859 * sure we have a buffer with the right size. The etr_buf is not reset
860 * immediately after we stop the tracing in SYSFS mode as we wait for
861 * the user to collect the data. We may be able to reuse the existing
862 * buffer, provided the size matches. Any allocation has to be done
863 * with the lock released.
673 */ 864 */
674 spin_lock_irqsave(&drvdata->spinlock, flags); 865 spin_lock_irqsave(&drvdata->spinlock, flags);
675 if (!drvdata->vaddr) { 866 if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
676 spin_unlock_irqrestore(&drvdata->spinlock, flags); 867 spin_unlock_irqrestore(&drvdata->spinlock, flags);
677 868
678 /* 869 /* Allocate memory with the locks released */
679 * Contiguous memory can't be allocated while a spinlock is 870 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
680 * held. As such allocate memory here and free it if a buffer 871 if (IS_ERR(new_buf))
681 * has already been allocated (from a previous session). 872 return PTR_ERR(new_buf);
682 */
683 vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
684 &paddr, GFP_KERNEL);
685 if (!vaddr)
686 return -ENOMEM;
687 873
688 /* Let's try again */ 874 /* Let's try again */
689 spin_lock_irqsave(&drvdata->spinlock, flags); 875 spin_lock_irqsave(&drvdata->spinlock, flags);
690 } 876 }
691 877
692 if (drvdata->reading) { 878 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
693 ret = -EBUSY; 879 ret = -EBUSY;
694 goto out; 880 goto out;
695 } 881 }
@@ -697,21 +883,19 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
697 /* 883 /*
698 * In sysFS mode we can have multiple writers per sink. Since this 884 * In sysFS mode we can have multiple writers per sink. Since this
699 * sink is already enabled no memory is needed and the HW need not be 885 * sink is already enabled no memory is needed and the HW need not be
700 * touched. 886 * touched, even if the buffer size has changed.
701 */ 887 */
702 if (drvdata->mode == CS_MODE_SYSFS) 888 if (drvdata->mode == CS_MODE_SYSFS)
703 goto out; 889 goto out;
704 890
705 /* 891 /*
706 * If drvdata::vaddr == NULL, use the memory allocated above. 892 * If we don't have a buffer or it doesn't match the requested size,
707 * Otherwise a buffer still exists from a previous session, so 893 * use the buffer allocated above. Otherwise reuse the existing buffer.
708 * simply use that.
709 */ 894 */
710 if (drvdata->vaddr == NULL) { 895 if (!drvdata->etr_buf ||
711 used = true; 896 (new_buf && drvdata->etr_buf->size != new_buf->size)) {
712 drvdata->vaddr = vaddr; 897 free_buf = drvdata->etr_buf;
713 drvdata->paddr = paddr; 898 drvdata->etr_buf = new_buf;
714 drvdata->buf = drvdata->vaddr;
715 } 899 }
716 900
717 drvdata->mode = CS_MODE_SYSFS; 901 drvdata->mode = CS_MODE_SYSFS;
@@ -720,8 +904,8 @@ out:
720 spin_unlock_irqrestore(&drvdata->spinlock, flags); 904 spin_unlock_irqrestore(&drvdata->spinlock, flags);
721 905
722 /* Free memory outside the spinlock if need be */ 906 /* Free memory outside the spinlock if need be */
723 if (!used && vaddr) 907 if (free_buf)
724 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); 908 tmc_etr_free_sysfs_buf(free_buf);
725 909
726 if (!ret) 910 if (!ret)
727 dev_info(drvdata->dev, "TMC-ETR enabled\n"); 911 dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -800,8 +984,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
800 goto out; 984 goto out;
801 } 985 }
802 986
803 /* If drvdata::buf is NULL the trace data has been read already */ 987 /* If drvdata::etr_buf is NULL the trace data has been read already */
804 if (drvdata->buf == NULL) { 988 if (drvdata->etr_buf == NULL) {
805 ret = -EINVAL; 989 ret = -EINVAL;
806 goto out; 990 goto out;
807 } 991 }
@@ -820,8 +1004,7 @@ out:
820int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) 1004int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
821{ 1005{
822 unsigned long flags; 1006 unsigned long flags;
823 dma_addr_t paddr; 1007 struct etr_buf *etr_buf = NULL;
824 void __iomem *vaddr = NULL;
825 1008
826 /* config types are set a boot time and never change */ 1009 /* config types are set a boot time and never change */
827 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) 1010 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -842,17 +1025,16 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
842 * The ETR is not tracing and the buffer was just read. 1025 * The ETR is not tracing and the buffer was just read.
843 * As such prepare to free the trace buffer. 1026 * As such prepare to free the trace buffer.
844 */ 1027 */
845 vaddr = drvdata->vaddr; 1028 etr_buf = drvdata->etr_buf;
846 paddr = drvdata->paddr; 1029 drvdata->etr_buf = NULL;
847 drvdata->buf = drvdata->vaddr = NULL;
848 } 1030 }
849 1031
850 drvdata->reading = false; 1032 drvdata->reading = false;
851 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1033 spin_unlock_irqrestore(&drvdata->spinlock, flags);
852 1034
853 /* Free allocated memory out side of the spinlock */ 1035 /* Free allocated memory out side of the spinlock */
854 if (vaddr) 1036 if (etr_buf)
855 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); 1037 tmc_free_etr_buf(etr_buf);
856 1038
857 return 0; 1039 return 0;
858} 1040}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index cdb668b4441d..bbd65e2107be 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -123,6 +123,34 @@ enum tmc_mem_intf_width {
123#define CORESIGHT_SOC_600_ETR_CAPS \ 123#define CORESIGHT_SOC_600_ETR_CAPS \
124 (TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE) 124 (TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
125 125
126enum etr_mode {
127 ETR_MODE_FLAT, /* Uses contiguous flat buffer */
128};
129
130struct etr_buf_operations;
131
132/**
133 * struct etr_buf - Details of the buffer used by ETR
134 * @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
135 * @full : Trace data overflow
136 * @size : Size of the buffer.
137 * @hwaddr : Address to be programmed in the TMC:DBA{LO,HI}
138 * @offset : Offset of the trace data in the buffer for consumption.
139 * @len : Available trace data @buf (may round up to the beginning).
140 * @ops : ETR buffer operations for the mode.
141 * @private : Backend specific information for the buf
142 */
143struct etr_buf {
144 enum etr_mode mode;
145 bool full;
146 ssize_t size;
147 dma_addr_t hwaddr;
148 unsigned long offset;
149 s64 len;
150 const struct etr_buf_operations *ops;
151 void *private;
152};
153
126/** 154/**
127 * struct tmc_drvdata - specifics associated to an TMC component 155 * struct tmc_drvdata - specifics associated to an TMC component
128 * @base: memory mapped base address for this component. 156 * @base: memory mapped base address for this component.
@@ -130,11 +158,10 @@ enum tmc_mem_intf_width {
130 * @csdev: component vitals needed by the framework. 158 * @csdev: component vitals needed by the framework.
131 * @miscdev: specifics to handle "/dev/xyz.tmc" entry. 159 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
132 * @spinlock: only one at a time pls. 160 * @spinlock: only one at a time pls.
133 * @buf: area of memory where trace data get sent. 161 * @buf: Snapshot of the trace data for ETF/ETB.
134 * @paddr: DMA start location in RAM. 162 * @etr_buf: details of buffer used in TMC-ETR
135 * @vaddr: virtual representation of @paddr. 163 * @len: size of the available trace for ETF/ETB.
136 * @size: trace buffer size. 164 * @size: trace buffer size for this TMC (common for all modes).
137 * @len: size of the available trace.
138 * @mode: how this TMC is being used. 165 * @mode: how this TMC is being used.
139 * @config_type: TMC variant, must be of type @tmc_config_type. 166 * @config_type: TMC variant, must be of type @tmc_config_type.
140 * @memwidth: width of the memory interface databus, in bytes. 167 * @memwidth: width of the memory interface databus, in bytes.
@@ -149,11 +176,12 @@ struct tmc_drvdata {
149 struct miscdevice miscdev; 176 struct miscdevice miscdev;
150 spinlock_t spinlock; 177 spinlock_t spinlock;
151 bool reading; 178 bool reading;
152 char *buf; 179 union {
153 dma_addr_t paddr; 180 char *buf; /* TMC ETB */
154 void __iomem *vaddr; 181 struct etr_buf *etr_buf; /* TMC ETR */
155 u32 size; 182 };
156 u32 len; 183 u32 len;
184 u32 size;
157 u32 mode; 185 u32 mode;
158 enum tmc_config_type config_type; 186 enum tmc_config_type config_type;
159 enum tmc_mem_intf_width memwidth; 187 enum tmc_mem_intf_width memwidth;
@@ -161,6 +189,15 @@ struct tmc_drvdata {
161 u32 etr_caps; 189 u32 etr_caps;
162}; 190};
163 191
192struct etr_buf_operations {
193 int (*alloc)(struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
194 int node, void **pages);
195 void (*sync)(struct etr_buf *etr_buf, u64 rrp, u64 rwp);
196 ssize_t (*get_data)(struct etr_buf *etr_buf, u64 offset, size_t len,
197 char **bufpp);
198 void (*free)(struct etr_buf *etr_buf);
199};
200
164/** 201/**
165 * struct tmc_pages - Collection of pages used for SG. 202 * struct tmc_pages - Collection of pages used for SG.
166 * @nr_pages: Number of pages in the list. 203 * @nr_pages: Number of pages in the list.