aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2016-05-09 10:58:37 -0400
committerJoerg Roedel <jroedel@suse.de>2016-05-09 10:58:37 -0400
commite85e8f69cedb5fbc7cd16f56dd97220e61ed616e (patch)
treeb84639279878725f694f5d88b9ff6241ace93dfc /drivers/iommu/amd_iommu.c
parentfd6c50ee3af3935d8dfa38b0a81b2386fd915cfe (diff)
iommu/amd: Remove statistics code
The statistics are not really used for anything and should be replaced by generic and per-device statistic counters. Remove the code for now. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c95
1 files changed, 0 insertions, 95 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 119148552aa0..de9f028be79e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -489,70 +489,6 @@ static void iommu_uninit_device(struct device *dev)
489 */ 489 */
490} 490}
491 491
492#ifdef CONFIG_AMD_IOMMU_STATS
493
494/*
495 * Initialization code for statistics collection
496 */
497
498DECLARE_STATS_COUNTER(compl_wait);
499DECLARE_STATS_COUNTER(cnt_map_single);
500DECLARE_STATS_COUNTER(cnt_unmap_single);
501DECLARE_STATS_COUNTER(cnt_map_sg);
502DECLARE_STATS_COUNTER(cnt_unmap_sg);
503DECLARE_STATS_COUNTER(cnt_alloc_coherent);
504DECLARE_STATS_COUNTER(cnt_free_coherent);
505DECLARE_STATS_COUNTER(cross_page);
506DECLARE_STATS_COUNTER(domain_flush_single);
507DECLARE_STATS_COUNTER(domain_flush_all);
508DECLARE_STATS_COUNTER(alloced_io_mem);
509DECLARE_STATS_COUNTER(total_map_requests);
510DECLARE_STATS_COUNTER(complete_ppr);
511DECLARE_STATS_COUNTER(invalidate_iotlb);
512DECLARE_STATS_COUNTER(invalidate_iotlb_all);
513DECLARE_STATS_COUNTER(pri_requests);
514
515static struct dentry *stats_dir;
516static struct dentry *de_fflush;
517
518static void amd_iommu_stats_add(struct __iommu_counter *cnt)
519{
520 if (stats_dir == NULL)
521 return;
522
523 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
524 &cnt->value);
525}
526
527static void amd_iommu_stats_init(void)
528{
529 stats_dir = debugfs_create_dir("amd-iommu", NULL);
530 if (stats_dir == NULL)
531 return;
532
533 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
534 &amd_iommu_unmap_flush);
535
536 amd_iommu_stats_add(&compl_wait);
537 amd_iommu_stats_add(&cnt_map_single);
538 amd_iommu_stats_add(&cnt_unmap_single);
539 amd_iommu_stats_add(&cnt_map_sg);
540 amd_iommu_stats_add(&cnt_unmap_sg);
541 amd_iommu_stats_add(&cnt_alloc_coherent);
542 amd_iommu_stats_add(&cnt_free_coherent);
543 amd_iommu_stats_add(&cross_page);
544 amd_iommu_stats_add(&domain_flush_single);
545 amd_iommu_stats_add(&domain_flush_all);
546 amd_iommu_stats_add(&alloced_io_mem);
547 amd_iommu_stats_add(&total_map_requests);
548 amd_iommu_stats_add(&complete_ppr);
549 amd_iommu_stats_add(&invalidate_iotlb);
550 amd_iommu_stats_add(&invalidate_iotlb_all);
551 amd_iommu_stats_add(&pri_requests);
552}
553
554#endif
555
556/**************************************************************************** 492/****************************************************************************
557 * 493 *
558 * Interrupt handling functions 494 * Interrupt handling functions
@@ -675,8 +611,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
675{ 611{
676 struct amd_iommu_fault fault; 612 struct amd_iommu_fault fault;
677 613
678 INC_STATS_COUNTER(pri_requests);
679
680 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { 614 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
681 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); 615 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
682 return; 616 return;
@@ -2641,11 +2575,6 @@ static dma_addr_t __map_single(struct device *dev,
2641 pages = iommu_num_pages(paddr, size, PAGE_SIZE); 2575 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2642 paddr &= PAGE_MASK; 2576 paddr &= PAGE_MASK;
2643 2577
2644 INC_STATS_COUNTER(total_map_requests);
2645
2646 if (pages > 1)
2647 INC_STATS_COUNTER(cross_page);
2648
2649 if (align) 2578 if (align)
2650 align_mask = (1UL << get_order(size)) - 1; 2579 align_mask = (1UL << get_order(size)) - 1;
2651 2580
@@ -2666,8 +2595,6 @@ static dma_addr_t __map_single(struct device *dev,
2666 } 2595 }
2667 address += offset; 2596 address += offset;
2668 2597
2669 ADD_STATS_COUNTER(alloced_io_mem, size);
2670
2671 if (unlikely(amd_iommu_np_cache)) { 2598 if (unlikely(amd_iommu_np_cache)) {
2672 domain_flush_pages(&dma_dom->domain, address, size); 2599 domain_flush_pages(&dma_dom->domain, address, size);
2673 domain_flush_complete(&dma_dom->domain); 2600 domain_flush_complete(&dma_dom->domain);
@@ -2715,8 +2642,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
2715 start += PAGE_SIZE; 2642 start += PAGE_SIZE;
2716 } 2643 }
2717 2644
2718 SUB_STATS_COUNTER(alloced_io_mem, size);
2719
2720 dma_ops_free_addresses(dma_dom, dma_addr, pages); 2645 dma_ops_free_addresses(dma_dom, dma_addr, pages);
2721} 2646}
2722 2647
@@ -2732,8 +2657,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
2732 struct protection_domain *domain; 2657 struct protection_domain *domain;
2733 u64 dma_mask; 2658 u64 dma_mask;
2734 2659
2735 INC_STATS_COUNTER(cnt_map_single);
2736
2737 domain = get_domain(dev); 2660 domain = get_domain(dev);
2738 if (PTR_ERR(domain) == -EINVAL) 2661 if (PTR_ERR(domain) == -EINVAL)
2739 return (dma_addr_t)paddr; 2662 return (dma_addr_t)paddr;
@@ -2754,8 +2677,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2754{ 2677{
2755 struct protection_domain *domain; 2678 struct protection_domain *domain;
2756 2679
2757 INC_STATS_COUNTER(cnt_unmap_single);
2758
2759 domain = get_domain(dev); 2680 domain = get_domain(dev);
2760 if (IS_ERR(domain)) 2681 if (IS_ERR(domain))
2761 return; 2682 return;
@@ -2778,8 +2699,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2778 int mapped_elems = 0; 2699 int mapped_elems = 0;
2779 u64 dma_mask; 2700 u64 dma_mask;
2780 2701
2781 INC_STATS_COUNTER(cnt_map_sg);
2782
2783 domain = get_domain(dev); 2702 domain = get_domain(dev);
2784 if (IS_ERR(domain)) 2703 if (IS_ERR(domain))
2785 return 0; 2704 return 0;
@@ -2825,8 +2744,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2825 struct scatterlist *s; 2744 struct scatterlist *s;
2826 int i; 2745 int i;
2827 2746
2828 INC_STATS_COUNTER(cnt_unmap_sg);
2829
2830 domain = get_domain(dev); 2747 domain = get_domain(dev);
2831 if (IS_ERR(domain)) 2748 if (IS_ERR(domain))
2832 return; 2749 return;
@@ -2849,8 +2766,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
2849 struct protection_domain *domain; 2766 struct protection_domain *domain;
2850 struct page *page; 2767 struct page *page;
2851 2768
2852 INC_STATS_COUNTER(cnt_alloc_coherent);
2853
2854 domain = get_domain(dev); 2769 domain = get_domain(dev);
2855 if (PTR_ERR(domain) == -EINVAL) { 2770 if (PTR_ERR(domain) == -EINVAL) {
2856 page = alloc_pages(flag, get_order(size)); 2771 page = alloc_pages(flag, get_order(size));
@@ -2904,8 +2819,6 @@ static void free_coherent(struct device *dev, size_t size,
2904 struct protection_domain *domain; 2819 struct protection_domain *domain;
2905 struct page *page; 2820 struct page *page;
2906 2821
2907 INC_STATS_COUNTER(cnt_free_coherent);
2908
2909 page = virt_to_page(virt_addr); 2822 page = virt_to_page(virt_addr);
2910 size = PAGE_ALIGN(size); 2823 size = PAGE_ALIGN(size);
2911 2824
@@ -2997,8 +2910,6 @@ int __init amd_iommu_init_dma_ops(void)
2997 if (!swiotlb) 2910 if (!swiotlb)
2998 dma_ops = &nommu_dma_ops; 2911 dma_ops = &nommu_dma_ops;
2999 2912
3000 amd_iommu_stats_init();
3001
3002 if (amd_iommu_unmap_flush) 2913 if (amd_iommu_unmap_flush)
3003 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n"); 2914 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
3004 else 2915 else
@@ -3489,8 +3400,6 @@ out:
3489static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, 3400static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3490 u64 address) 3401 u64 address)
3491{ 3402{
3492 INC_STATS_COUNTER(invalidate_iotlb);
3493
3494 return __flush_pasid(domain, pasid, address, false); 3403 return __flush_pasid(domain, pasid, address, false);
3495} 3404}
3496 3405
@@ -3511,8 +3420,6 @@ EXPORT_SYMBOL(amd_iommu_flush_page);
3511 3420
3512static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) 3421static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3513{ 3422{
3514 INC_STATS_COUNTER(invalidate_iotlb_all);
3515
3516 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 3423 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3517 true); 3424 true);
3518} 3425}
@@ -3632,8 +3539,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3632 struct amd_iommu *iommu; 3539 struct amd_iommu *iommu;
3633 struct iommu_cmd cmd; 3540 struct iommu_cmd cmd;
3634 3541
3635 INC_STATS_COUNTER(complete_ppr);
3636
3637 dev_data = get_dev_data(&pdev->dev); 3542 dev_data = get_dev_data(&pdev->dev);
3638 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3543 iommu = amd_iommu_rlookup_table[dev_data->devid];
3639 3544