aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/dma-debug.c134
1 files changed, 91 insertions, 43 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index c71e2dd2750f..3b93129a968c 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -874,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
874 "[addr=%p] [size=%llu]\n", addr, size); 874 "[addr=%p] [size=%llu]\n", addr, size);
875} 875}
876 876
877static void check_sync(struct device *dev, dma_addr_t addr, 877static void check_sync(struct device *dev,
878 u64 size, u64 offset, int direction, bool to_cpu) 878 struct dma_debug_entry *ref,
879 bool to_cpu)
879{ 880{
880 struct dma_debug_entry ref = {
881 .dev = dev,
882 .dev_addr = addr,
883 .size = size,
884 .direction = direction,
885 };
886 struct dma_debug_entry *entry; 881 struct dma_debug_entry *entry;
887 struct hash_bucket *bucket; 882 struct hash_bucket *bucket;
888 unsigned long flags; 883 unsigned long flags;
889 884
890 bucket = get_hash_bucket(&ref, &flags); 885 bucket = get_hash_bucket(ref, &flags);
891 886
892 entry = hash_bucket_find(bucket, &ref); 887 entry = hash_bucket_find(bucket, ref);
893 888
894 if (!entry) { 889 if (!entry) {
895 err_printk(dev, NULL, "DMA-API: device driver tries " 890 err_printk(dev, NULL, "DMA-API: device driver tries "
896 "to sync DMA memory it has not allocated " 891 "to sync DMA memory it has not allocated "
897 "[device address=0x%016llx] [size=%llu bytes]\n", 892 "[device address=0x%016llx] [size=%llu bytes]\n",
898 (unsigned long long)addr, size); 893 (unsigned long long)ref->dev_addr, ref->size);
899 goto out; 894 goto out;
900 } 895 }
901 896
902 if ((offset + size) > entry->size) { 897 if (ref->size > entry->size) {
903 err_printk(dev, entry, "DMA-API: device driver syncs" 898 err_printk(dev, entry, "DMA-API: device driver syncs"
904 " DMA memory outside allocated range " 899 " DMA memory outside allocated range "
905 "[device address=0x%016llx] " 900 "[device address=0x%016llx] "
906 "[allocation size=%llu bytes] [sync offset=%llu] " 901 "[allocation size=%llu bytes] "
907 "[sync size=%llu]\n", entry->dev_addr, entry->size, 902 "[sync offset+size=%llu]\n",
908 offset, size); 903 entry->dev_addr, entry->size,
904 ref->size);
909 } 905 }
910 906
911 if (direction != entry->direction) { 907 if (ref->direction != entry->direction) {
912 err_printk(dev, entry, "DMA-API: device driver syncs " 908 err_printk(dev, entry, "DMA-API: device driver syncs "
913 "DMA memory with different direction " 909 "DMA memory with different direction "
914 "[device address=0x%016llx] [size=%llu bytes] " 910 "[device address=0x%016llx] [size=%llu bytes] "
915 "[mapped with %s] [synced with %s]\n", 911 "[mapped with %s] [synced with %s]\n",
916 (unsigned long long)addr, entry->size, 912 (unsigned long long)ref->dev_addr, entry->size,
917 dir2name[entry->direction], 913 dir2name[entry->direction],
918 dir2name[direction]); 914 dir2name[ref->direction]);
919 } 915 }
920 916
921 if (entry->direction == DMA_BIDIRECTIONAL) 917 if (entry->direction == DMA_BIDIRECTIONAL)
922 goto out; 918 goto out;
923 919
924 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 920 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
925 !(direction == DMA_TO_DEVICE)) 921 !(ref->direction == DMA_TO_DEVICE))
926 err_printk(dev, entry, "DMA-API: device driver syncs " 922 err_printk(dev, entry, "DMA-API: device driver syncs "
927 "device read-only DMA memory for cpu " 923 "device read-only DMA memory for cpu "
928 "[device address=0x%016llx] [size=%llu bytes] " 924 "[device address=0x%016llx] [size=%llu bytes] "
929 "[mapped with %s] [synced with %s]\n", 925 "[mapped with %s] [synced with %s]\n",
930 (unsigned long long)addr, entry->size, 926 (unsigned long long)ref->dev_addr, entry->size,
931 dir2name[entry->direction], 927 dir2name[entry->direction],
932 dir2name[direction]); 928 dir2name[ref->direction]);
933 929
934 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 930 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
935 !(direction == DMA_FROM_DEVICE)) 931 !(ref->direction == DMA_FROM_DEVICE))
936 err_printk(dev, entry, "DMA-API: device driver syncs " 932 err_printk(dev, entry, "DMA-API: device driver syncs "
937 "device write-only DMA memory to device " 933 "device write-only DMA memory to device "
938 "[device address=0x%016llx] [size=%llu bytes] " 934 "[device address=0x%016llx] [size=%llu bytes] "
939 "[mapped with %s] [synced with %s]\n", 935 "[mapped with %s] [synced with %s]\n",
940 (unsigned long long)addr, entry->size, 936 (unsigned long long)ref->dev_addr, entry->size,
941 dir2name[entry->direction], 937 dir2name[entry->direction],
942 dir2name[direction]); 938 dir2name[ref->direction]);
943 939
944out: 940out:
945 put_hash_bucket(bucket, &flags); 941 put_hash_bucket(bucket, &flags);
@@ -1037,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1037} 1033}
1038EXPORT_SYMBOL(debug_dma_map_sg); 1034EXPORT_SYMBOL(debug_dma_map_sg);
1039 1035
1040static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) 1036static int get_nr_mapped_entries(struct device *dev,
1037 struct dma_debug_entry *ref)
1041{ 1038{
1042 struct dma_debug_entry *entry, ref; 1039 struct dma_debug_entry *entry;
1043 struct hash_bucket *bucket; 1040 struct hash_bucket *bucket;
1044 unsigned long flags; 1041 unsigned long flags;
1045 int mapped_ents; 1042 int mapped_ents;
1046 1043
1047 ref.dev = dev; 1044 bucket = get_hash_bucket(ref, &flags);
1048 ref.dev_addr = sg_dma_address(s); 1045 entry = hash_bucket_find(bucket, ref);
1049 ref.size = sg_dma_len(s),
1050
1051 bucket = get_hash_bucket(&ref, &flags);
1052 entry = hash_bucket_find(bucket, &ref);
1053 mapped_ents = 0; 1046 mapped_ents = 0;
1054 1047
1055 if (entry) 1048 if (entry)
@@ -1084,7 +1077,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1084 break; 1077 break;
1085 1078
1086 if (!i) 1079 if (!i)
1087 mapped_ents = get_nr_mapped_entries(dev, s); 1080 mapped_ents = get_nr_mapped_entries(dev, &ref);
1088 1081
1089 check_unmap(&ref); 1082 check_unmap(&ref);
1090 } 1083 }
@@ -1139,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
1139void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1132void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1140 size_t size, int direction) 1133 size_t size, int direction)
1141{ 1134{
1135 struct dma_debug_entry ref;
1136
1142 if (unlikely(global_disable)) 1137 if (unlikely(global_disable))
1143 return; 1138 return;
1144 1139
1145 check_sync(dev, dma_handle, size, 0, direction, true); 1140 ref.type = dma_debug_single;
1141 ref.dev = dev;
1142 ref.dev_addr = dma_handle;
1143 ref.size = size;
1144 ref.direction = direction;
1145 ref.sg_call_ents = 0;
1146
1147 check_sync(dev, &ref, true);
1146} 1148}
1147EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1149EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1148 1150
@@ -1150,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
1150 dma_addr_t dma_handle, size_t size, 1152 dma_addr_t dma_handle, size_t size,
1151 int direction) 1153 int direction)
1152{ 1154{
1155 struct dma_debug_entry ref;
1156
1153 if (unlikely(global_disable)) 1157 if (unlikely(global_disable))
1154 return; 1158 return;
1155 1159
1156 check_sync(dev, dma_handle, size, 0, direction, false); 1160 ref.type = dma_debug_single;
1161 ref.dev = dev;
1162 ref.dev_addr = dma_handle;
1163 ref.size = size;
1164 ref.direction = direction;
1165 ref.sg_call_ents = 0;
1166
1167 check_sync(dev, &ref, false);
1157} 1168}
1158EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1169EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1159 1170
@@ -1162,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
1162 unsigned long offset, size_t size, 1173 unsigned long offset, size_t size,
1163 int direction) 1174 int direction)
1164{ 1175{
1176 struct dma_debug_entry ref;
1177
1165 if (unlikely(global_disable)) 1178 if (unlikely(global_disable))
1166 return; 1179 return;
1167 1180
1168 check_sync(dev, dma_handle, size, offset, direction, true); 1181 ref.type = dma_debug_single;
1182 ref.dev = dev;
1183 ref.dev_addr = dma_handle;
1184 ref.size = offset + size;
1185 ref.direction = direction;
1186 ref.sg_call_ents = 0;
1187
1188 check_sync(dev, &ref, true);
1169} 1189}
1170EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1190EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1171 1191
@@ -1174,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
1174 unsigned long offset, 1194 unsigned long offset,
1175 size_t size, int direction) 1195 size_t size, int direction)
1176{ 1196{
1197 struct dma_debug_entry ref;
1198
1177 if (unlikely(global_disable)) 1199 if (unlikely(global_disable))
1178 return; 1200 return;
1179 1201
1180 check_sync(dev, dma_handle, size, offset, direction, false); 1202 ref.type = dma_debug_single;
1203 ref.dev = dev;
1204 ref.dev_addr = dma_handle;
1205 ref.size = offset + size;
1206 ref.direction = direction;
1207 ref.sg_call_ents = 0;
1208
1209 check_sync(dev, &ref, false);
1181} 1210}
1182EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1211EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1183 1212
@@ -1191,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1191 return; 1220 return;
1192 1221
1193 for_each_sg(sg, s, nelems, i) { 1222 for_each_sg(sg, s, nelems, i) {
1223
1224 struct dma_debug_entry ref = {
1225 .type = dma_debug_sg,
1226 .dev = dev,
1227 .paddr = sg_phys(s),
1228 .dev_addr = sg_dma_address(s),
1229 .size = sg_dma_len(s),
1230 .direction = direction,
1231 .sg_call_ents = nelems,
1232 };
1233
1194 if (!i) 1234 if (!i)
1195 mapped_ents = get_nr_mapped_entries(dev, s); 1235 mapped_ents = get_nr_mapped_entries(dev, &ref);
1196 1236
1197 if (i >= mapped_ents) 1237 if (i >= mapped_ents)
1198 break; 1238 break;
1199 1239
1200 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1240 check_sync(dev, &ref, true);
1201 direction, true);
1202 } 1241 }
1203} 1242}
1204EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1243EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1213,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1213 return; 1252 return;
1214 1253
1215 for_each_sg(sg, s, nelems, i) { 1254 for_each_sg(sg, s, nelems, i) {
1255
1256 struct dma_debug_entry ref = {
1257 .type = dma_debug_sg,
1258 .dev = dev,
1259 .paddr = sg_phys(s),
1260 .dev_addr = sg_dma_address(s),
1261 .size = sg_dma_len(s),
1262 .direction = direction,
1263 .sg_call_ents = nelems,
1264 };
1216 if (!i) 1265 if (!i)
1217 mapped_ents = get_nr_mapped_entries(dev, s); 1266 mapped_ents = get_nr_mapped_entries(dev, &ref);
1218 1267
1219 if (i >= mapped_ents) 1268 if (i >= mapped_ents)
1220 break; 1269 break;
1221 1270
1222 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1271 check_sync(dev, &ref, false);
1223 direction, false);
1224 } 1272 }
1225} 1273}
1226EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1274EXPORT_SYMBOL(debug_dma_sync_sg_for_device);