summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2018-07-11 15:40:21 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-07-15 07:52:57 -0400
commit260ec24b31a596361b617e6993ee9a31c43f278c (patch)
tree92ea358571b20980ea6185d3e053a34546964d97 /drivers
parent99443ea19e8bb0fee5fb6f4ed9cec11fd825061f (diff)
coresight: Add support for TMC ETR SG unit
This patch adds support for setting up an SG table used by the TMC ETR inbuilt SG unit. The TMC ETR uses 4K page sized tables to hold pointers to the 4K data pages with the last entry in a table pointing to the next table with the entries, by kind of chaining. The 2 LSBs determine the type of the table entry, to one of : Normal - Points to a 4KB data page. Last - Points to a 4KB data page, but is the last entry in the page table. Link - Points to another 4KB table page with pointers to data. The code takes care of handling the system page size which could be different than 4K. So we could end up putting multiple ETR SG tables in a single system page, vice versa for the data pages. Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c263
1 files changed, 263 insertions, 0 deletions
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 402b06143e10..54d5c0664277 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -11,6 +11,87 @@
11#include "coresight-tmc.h" 11#include "coresight-tmc.h"
12 12
13/* 13/*
14 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
15 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
16 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
17 * contain more than one SG buffer and tables.
18 *
19 * A table entry has the following format:
20 *
21 * ---Bit31------------Bit4-------Bit1-----Bit0--
22 * | Address[39:12] | SBZ | Entry Type |
23 * ----------------------------------------------
24 *
25 * Address: Bits [39:12] of a physical page address. Bits [11:0] are
26 * always zero.
27 *
28 * Entry type:
29 * b00 - Reserved.
30 * b01 - Last entry in the tables, points to 4K page buffer.
31 * b10 - Normal entry, points to 4K page buffer.
32 * b11 - Link. The address points to the base of next table.
33 */
34
35typedef u32 sgte_t;
36
37#define ETR_SG_PAGE_SHIFT 12
38#define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
39#define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
40#define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
41#define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
42
43#define ETR_SG_ET_MASK 0x3
44#define ETR_SG_ET_LAST 0x1
45#define ETR_SG_ET_NORMAL 0x2
46#define ETR_SG_ET_LINK 0x3
47
48#define ETR_SG_ADDR_SHIFT 4
49
50#define ETR_SG_ENTRY(addr, type) \
51 (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
52 (type & ETR_SG_ET_MASK))
53
54#define ETR_SG_ADDR(entry) \
55 (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
56#define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
57
58/*
59 * struct etr_sg_table : ETR SG Table
60 * @sg_table: Generic SG Table holding the data/table pages.
61 * @hwaddr: hwaddress used by the TMC, which is the base
62 * address of the table.
63 */
64struct etr_sg_table {
65 struct tmc_sg_table *sg_table;
66 dma_addr_t hwaddr;
67};
68
69/*
70 * tmc_etr_sg_table_entries: Total number of table entries required to map
71 * @nr_pages system pages.
72 *
73 * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
74 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
75 * with the last entry pointing to another page of table entries.
76 * If we spill over to a new page for mapping 1 entry, we could as
77 * well replace the link entry of the previous page with the last entry.
78 */
79static inline unsigned long __attribute_const__
80tmc_etr_sg_table_entries(int nr_pages)
81{
82 unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
83 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
84 /*
85 * If we spill over to a new page for 1 entry, we could as well
86 * make it the LAST entry in the previous page, skipping the Link
87 * address.
88 */
89 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
90 nr_sglinks--;
91 return nr_sgpages + nr_sglinks;
92}
93
94/*
14 * tmc_pages_get_offset: Go through all the pages in the tmc_pages 95 * tmc_pages_get_offset: Go through all the pages in the tmc_pages
15 * and map the device address @addr to an offset within the virtual 96 * and map the device address @addr to an offset within the virtual
16 * contiguous buffer. 97 * contiguous buffer.
@@ -277,6 +358,188 @@ ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
277 return len; 358 return len;
278} 359}
279 360
361#ifdef ETR_SG_DEBUG
362/* Map a dma address to virtual address */
363static unsigned long
364tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
365 dma_addr_t addr, bool table)
366{
367 long offset;
368 unsigned long base;
369 struct tmc_pages *tmc_pages;
370
371 if (table) {
372 tmc_pages = &sg_table->table_pages;
373 base = (unsigned long)sg_table->table_vaddr;
374 } else {
375 tmc_pages = &sg_table->data_pages;
376 base = (unsigned long)sg_table->data_vaddr;
377 }
378
379 offset = tmc_pages_get_offset(tmc_pages, addr);
380 if (offset < 0)
381 return 0;
382 return base + offset;
383}
384
385/* Dump the given sg_table */
386static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
387{
388 sgte_t *ptr;
389 int i = 0;
390 dma_addr_t addr;
391 struct tmc_sg_table *sg_table = etr_table->sg_table;
392
393 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
394 etr_table->hwaddr, true);
395 while (ptr) {
396 addr = ETR_SG_ADDR(*ptr);
397 switch (ETR_SG_ET(*ptr)) {
398 case ETR_SG_ET_NORMAL:
399 dev_dbg(sg_table->dev,
400 "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
401 ptr++;
402 break;
403 case ETR_SG_ET_LINK:
404 dev_dbg(sg_table->dev,
405 "%05d: *** %p\t:{L} 0x%llx ***\n",
406 i, ptr, addr);
407 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
408 addr, true);
409 break;
410 case ETR_SG_ET_LAST:
411 dev_dbg(sg_table->dev,
412 "%05d: ### %p\t:[L] 0x%llx ###\n",
413 i, ptr, addr);
414 return;
415 default:
416 dev_dbg(sg_table->dev,
417 "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
418 i, ptr, addr);
419 return;
420 }
421 i++;
422 }
423 dev_dbg(sg_table->dev, "******* End of Table *****\n");
424}
425#else
426static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
427#endif
428
429/*
430 * Populate the SG Table page table entries from table/data
431 * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
432 * So does a Table page. So we keep track of indices of the tables
433 * in each system page and move the pointers accordingly.
434 */
435#define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
436static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
437{
438 dma_addr_t paddr;
439 int i, type, nr_entries;
440 int tpidx = 0; /* index to the current system table_page */
441 int sgtidx = 0; /* index to the sg_table within the current syspage */
442 int sgtentry = 0; /* the entry within the sg_table */
443 int dpidx = 0; /* index to the current system data_page */
444 int spidx = 0; /* index to the SG page within the current data page */
445 sgte_t *ptr; /* pointer to the table entry to fill */
446 struct tmc_sg_table *sg_table = etr_table->sg_table;
447 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
448 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
449
450 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
451 /*
452 * Use the contiguous virtual address of the table to update entries.
453 */
454 ptr = sg_table->table_vaddr;
455 /*
456 * Fill all the entries, except the last entry to avoid special
457 * checks within the loop.
458 */
459 for (i = 0; i < nr_entries - 1; i++) {
460 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
461 /*
462 * Last entry in a sg_table page is a link address to
463 * the next table page. If this sg_table is the last
464 * one in the system page, it links to the first
465 * sg_table in the next system page. Otherwise, it
466 * links to the next sg_table page within the system
467 * page.
468 */
469 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
470 paddr = table_daddrs[tpidx + 1];
471 } else {
472 paddr = table_daddrs[tpidx] +
473 (ETR_SG_PAGE_SIZE * (sgtidx + 1));
474 }
475 type = ETR_SG_ET_LINK;
476 } else {
477 /*
478 * Update the indices to the data_pages to point to the
479 * next sg_page in the data buffer.
480 */
481 type = ETR_SG_ET_NORMAL;
482 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
483 if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
484 dpidx++;
485 }
486 *ptr++ = ETR_SG_ENTRY(paddr, type);
487 /*
488 * Move to the next table pointer, moving the table page index
489 * if necessary
490 */
491 if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
492 if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
493 tpidx++;
494 }
495 }
496
497 /* Set up the last entry, which is always a data pointer */
498 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
499 *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
500}
501
502/*
503 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
504 * populate the table.
505 *
506 * @dev - Device pointer for the TMC
507 * @node - NUMA node where the memory should be allocated
508 * @size - Total size of the data buffer
509 * @pages - Optional list of page virtual address
510 */
511static struct etr_sg_table __maybe_unused *
512tmc_init_etr_sg_table(struct device *dev, int node,
513 unsigned long size, void **pages)
514{
515 int nr_entries, nr_tpages;
516 int nr_dpages = size >> PAGE_SHIFT;
517 struct tmc_sg_table *sg_table;
518 struct etr_sg_table *etr_table;
519
520 etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
521 if (!etr_table)
522 return ERR_PTR(-ENOMEM);
523 nr_entries = tmc_etr_sg_table_entries(nr_dpages);
524 nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
525
526 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
527 if (IS_ERR(sg_table)) {
528 kfree(etr_table);
529 return ERR_PTR(PTR_ERR(sg_table));
530 }
531
532 etr_table->sg_table = sg_table;
533 /* TMC should use table base address for DBA */
534 etr_table->hwaddr = sg_table->table_daddr;
535 tmc_etr_sg_table_populate(etr_table);
536 /* Sync the table pages for the HW */
537 tmc_sg_table_sync_table(sg_table);
538 tmc_etr_sg_table_dump(etr_table);
539
540 return etr_table;
541}
542
280static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) 543static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
281{ 544{
282 u32 axictl, sts; 545 u32 axictl, sts;