diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/dma-debug.c | 124 |
1 files changed, 103 insertions, 21 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d0618aa13b49..f49ab22643b7 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -117,6 +117,11 @@ static const char *type2name[4] = { "single", "page", | |||
117 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | 117 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", |
118 | "DMA_FROM_DEVICE", "DMA_NONE" }; | 118 | "DMA_FROM_DEVICE", "DMA_NONE" }; |
119 | 119 | ||
120 | /* little merge helper - remove it after the merge window */ | ||
121 | #ifndef BUS_NOTIFY_UNBOUND_DRIVER | ||
122 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 | ||
123 | #endif | ||
124 | |||
120 | /* | 125 | /* |
121 | * The access to some variables in this macro is racy. We can't use atomic_t | 126 | * The access to some variables in this macro is racy. We can't use atomic_t |
122 | * here because all these variables are exported to debugfs. Some of them even | 127 | * here because all these variables are exported to debugfs. Some of them even |
@@ -605,9 +610,60 @@ out_err: | |||
605 | return -ENOMEM; | 610 | return -ENOMEM; |
606 | } | 611 | } |
607 | 612 | ||
613 | static int device_dma_allocations(struct device *dev) | ||
614 | { | ||
615 | struct dma_debug_entry *entry; | ||
616 | unsigned long flags; | ||
617 | int count = 0, i; | ||
618 | |||
619 | for (i = 0; i < HASH_SIZE; ++i) { | ||
620 | spin_lock_irqsave(&dma_entry_hash[i].lock, flags); | ||
621 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | ||
622 | if (entry->dev == dev) | ||
623 | count += 1; | ||
624 | } | ||
625 | spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); | ||
626 | } | ||
627 | |||
628 | return count; | ||
629 | } | ||
630 | |||
631 | static int dma_debug_device_change(struct notifier_block *nb, | ||
632 | unsigned long action, void *data) | ||
633 | { | ||
634 | struct device *dev = data; | ||
635 | int count; | ||
636 | |||
637 | |||
638 | switch (action) { | ||
639 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
640 | count = device_dma_allocations(dev); | ||
641 | if (count == 0) | ||
642 | break; | ||
643 | err_printk(dev, NULL, "DMA-API: device driver has pending " | ||
644 | "DMA allocations while released from device " | ||
645 | "[count=%d]\n", count); | ||
646 | break; | ||
647 | default: | ||
648 | break; | ||
649 | } | ||
650 | |||
651 | return 0; | ||
652 | } | ||
653 | |||
608 | void dma_debug_add_bus(struct bus_type *bus) | 654 | void dma_debug_add_bus(struct bus_type *bus) |
609 | { | 655 | { |
610 | /* FIXME: register notifier */ | 656 | struct notifier_block *nb; |
657 | |||
658 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | ||
659 | if (nb == NULL) { | ||
660 | printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); | ||
661 | return; | ||
662 | } | ||
663 | |||
664 | nb->notifier_call = dma_debug_device_change; | ||
665 | |||
666 | bus_register_notifier(bus, nb); | ||
611 | } | 667 | } |
612 | 668 | ||
613 | /* | 669 | /* |
@@ -930,15 +986,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
930 | entry->type = dma_debug_sg; | 986 | entry->type = dma_debug_sg; |
931 | entry->dev = dev; | 987 | entry->dev = dev; |
932 | entry->paddr = sg_phys(s); | 988 | entry->paddr = sg_phys(s); |
933 | entry->size = s->length; | 989 | entry->size = sg_dma_len(s); |
934 | entry->dev_addr = s->dma_address; | 990 | entry->dev_addr = sg_dma_address(s); |
935 | entry->direction = direction; | 991 | entry->direction = direction; |
936 | entry->sg_call_ents = nents; | 992 | entry->sg_call_ents = nents; |
937 | entry->sg_mapped_ents = mapped_ents; | 993 | entry->sg_mapped_ents = mapped_ents; |
938 | 994 | ||
939 | if (!PageHighMem(sg_page(s))) { | 995 | if (!PageHighMem(sg_page(s))) { |
940 | check_for_stack(dev, sg_virt(s)); | 996 | check_for_stack(dev, sg_virt(s)); |
941 | check_for_illegal_area(dev, sg_virt(s), s->length); | 997 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
942 | } | 998 | } |
943 | 999 | ||
944 | add_dma_entry(entry); | 1000 | add_dma_entry(entry); |
@@ -946,13 +1002,32 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
946 | } | 1002 | } |
947 | EXPORT_SYMBOL(debug_dma_map_sg); | 1003 | EXPORT_SYMBOL(debug_dma_map_sg); |
948 | 1004 | ||
1005 | static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) | ||
1006 | { | ||
1007 | struct dma_debug_entry *entry; | ||
1008 | struct hash_bucket *bucket; | ||
1009 | unsigned long flags; | ||
1010 | int mapped_ents = 0; | ||
1011 | struct dma_debug_entry ref; | ||
1012 | |||
1013 | ref.dev = dev; | ||
1014 | ref.dev_addr = sg_dma_address(s); | ||
1015 | ref.size = sg_dma_len(s), | ||
1016 | |||
1017 | bucket = get_hash_bucket(&ref, &flags); | ||
1018 | entry = hash_bucket_find(bucket, &ref); | ||
1019 | if (entry) | ||
1020 | mapped_ents = entry->sg_mapped_ents; | ||
1021 | put_hash_bucket(bucket, &flags); | ||
1022 | |||
1023 | return mapped_ents; | ||
1024 | } | ||
1025 | |||
949 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 1026 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
950 | int nelems, int dir) | 1027 | int nelems, int dir) |
951 | { | 1028 | { |
952 | struct dma_debug_entry *entry; | ||
953 | struct scatterlist *s; | 1029 | struct scatterlist *s; |
954 | int mapped_ents = 0, i; | 1030 | int mapped_ents = 0, i; |
955 | unsigned long flags; | ||
956 | 1031 | ||
957 | if (unlikely(global_disable)) | 1032 | if (unlikely(global_disable)) |
958 | return; | 1033 | return; |
@@ -963,8 +1038,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
963 | .type = dma_debug_sg, | 1038 | .type = dma_debug_sg, |
964 | .dev = dev, | 1039 | .dev = dev, |
965 | .paddr = sg_phys(s), | 1040 | .paddr = sg_phys(s), |
966 | .dev_addr = s->dma_address, | 1041 | .dev_addr = sg_dma_address(s), |
967 | .size = s->length, | 1042 | .size = sg_dma_len(s), |
968 | .direction = dir, | 1043 | .direction = dir, |
969 | .sg_call_ents = 0, | 1044 | .sg_call_ents = 0, |
970 | }; | 1045 | }; |
@@ -972,14 +1047,9 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
972 | if (mapped_ents && i >= mapped_ents) | 1047 | if (mapped_ents && i >= mapped_ents) |
973 | break; | 1048 | break; |
974 | 1049 | ||
975 | if (mapped_ents == 0) { | 1050 | if (!i) { |
976 | struct hash_bucket *bucket; | ||
977 | ref.sg_call_ents = nelems; | 1051 | ref.sg_call_ents = nelems; |
978 | bucket = get_hash_bucket(&ref, &flags); | 1052 | mapped_ents = get_nr_mapped_entries(dev, s); |
979 | entry = hash_bucket_find(bucket, &ref); | ||
980 | if (entry) | ||
981 | mapped_ents = entry->sg_mapped_ents; | ||
982 | put_hash_bucket(bucket, &flags); | ||
983 | } | 1053 | } |
984 | 1054 | ||
985 | check_unmap(&ref); | 1055 | check_unmap(&ref); |
@@ -1081,14 +1151,20 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
1081 | int nelems, int direction) | 1151 | int nelems, int direction) |
1082 | { | 1152 | { |
1083 | struct scatterlist *s; | 1153 | struct scatterlist *s; |
1084 | int i; | 1154 | int mapped_ents = 0, i; |
1085 | 1155 | ||
1086 | if (unlikely(global_disable)) | 1156 | if (unlikely(global_disable)) |
1087 | return; | 1157 | return; |
1088 | 1158 | ||
1089 | for_each_sg(sg, s, nelems, i) { | 1159 | for_each_sg(sg, s, nelems, i) { |
1090 | check_sync(dev, s->dma_address, s->dma_length, 0, | 1160 | if (!i) |
1091 | direction, true); | 1161 | mapped_ents = get_nr_mapped_entries(dev, s); |
1162 | |||
1163 | if (i >= mapped_ents) | ||
1164 | break; | ||
1165 | |||
1166 | check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, | ||
1167 | direction, true); | ||
1092 | } | 1168 | } |
1093 | } | 1169 | } |
1094 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | 1170 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); |
@@ -1097,14 +1173,20 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
1097 | int nelems, int direction) | 1173 | int nelems, int direction) |
1098 | { | 1174 | { |
1099 | struct scatterlist *s; | 1175 | struct scatterlist *s; |
1100 | int i; | 1176 | int mapped_ents = 0, i; |
1101 | 1177 | ||
1102 | if (unlikely(global_disable)) | 1178 | if (unlikely(global_disable)) |
1103 | return; | 1179 | return; |
1104 | 1180 | ||
1105 | for_each_sg(sg, s, nelems, i) { | 1181 | for_each_sg(sg, s, nelems, i) { |
1106 | check_sync(dev, s->dma_address, s->dma_length, 0, | 1182 | if (!i) |
1107 | direction, false); | 1183 | mapped_ents = get_nr_mapped_entries(dev, s); |
1184 | |||
1185 | if (i >= mapped_ents) | ||
1186 | break; | ||
1187 | |||
1188 | check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, | ||
1189 | direction, false); | ||
1108 | } | 1190 | } |
1109 | } | 1191 | } |
1110 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | 1192 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); |