aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-21 16:13:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-21 16:13:53 -0400
commit8b12e2505ad8c5010922e45f896d908fd1436709 (patch)
tree4dfb959942469ec36156bc222526f82f2d42fe14 /lib
parent413318444fd5351f9858b9deb4e8ecaf8898ee05 (diff)
parent00540e5d54be972a94a3b2ce6da8621bebe731a2 (diff)
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: lockdep: Select frame pointers on x86 dma-debug: be more careful when building reference entries dma-debug: check for sg_call_ents in best-fit algorithm too
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/dma-debug.c149
2 files changed, 99 insertions, 52 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6b0c2d8a212..23067ab1a73 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -472,7 +472,7 @@ config LOCKDEP
472 bool 472 bool
473 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 473 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
474 select STACKTRACE 474 select STACKTRACE
475 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 475 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
476 select KALLSYMS 476 select KALLSYMS
477 select KALLSYMS_ALL 477 select KALLSYMS_ALL
478 478
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ad65fc0317d..3b93129a968 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
262 */ 262 */
263 matches += 1; 263 matches += 1;
264 match_lvl = 0; 264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl; 265 entry->size == ref->size ? ++match_lvl : 0;
266 entry->type == ref->type ? ++match_lvl : match_lvl; 266 entry->type == ref->type ? ++match_lvl : 0;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl; 267 entry->direction == ref->direction ? ++match_lvl : 0;
268 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
268 269
269 if (match_lvl == 3) { 270 if (match_lvl == 4) {
270 /* perfect-fit - return the result */ 271 /* perfect-fit - return the result */
271 return entry; 272 return entry;
272 } else if (match_lvl > last_lvl) { 273 } else if (match_lvl > last_lvl) {
@@ -873,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
873 "[addr=%p] [size=%llu]\n", addr, size); 874 "[addr=%p] [size=%llu]\n", addr, size);
874} 875}
875 876
876static void check_sync(struct device *dev, dma_addr_t addr, 877static void check_sync(struct device *dev,
877 u64 size, u64 offset, int direction, bool to_cpu) 878 struct dma_debug_entry *ref,
879 bool to_cpu)
878{ 880{
879 struct dma_debug_entry ref = {
880 .dev = dev,
881 .dev_addr = addr,
882 .size = size,
883 .direction = direction,
884 };
885 struct dma_debug_entry *entry; 881 struct dma_debug_entry *entry;
886 struct hash_bucket *bucket; 882 struct hash_bucket *bucket;
887 unsigned long flags; 883 unsigned long flags;
888 884
889 bucket = get_hash_bucket(&ref, &flags); 885 bucket = get_hash_bucket(ref, &flags);
890 886
891 entry = hash_bucket_find(bucket, &ref); 887 entry = hash_bucket_find(bucket, ref);
892 888
893 if (!entry) { 889 if (!entry) {
894 err_printk(dev, NULL, "DMA-API: device driver tries " 890 err_printk(dev, NULL, "DMA-API: device driver tries "
895 "to sync DMA memory it has not allocated " 891 "to sync DMA memory it has not allocated "
896 "[device address=0x%016llx] [size=%llu bytes]\n", 892 "[device address=0x%016llx] [size=%llu bytes]\n",
897 (unsigned long long)addr, size); 893 (unsigned long long)ref->dev_addr, ref->size);
898 goto out; 894 goto out;
899 } 895 }
900 896
901 if ((offset + size) > entry->size) { 897 if (ref->size > entry->size) {
902 err_printk(dev, entry, "DMA-API: device driver syncs" 898 err_printk(dev, entry, "DMA-API: device driver syncs"
903 " DMA memory outside allocated range " 899 " DMA memory outside allocated range "
904 "[device address=0x%016llx] " 900 "[device address=0x%016llx] "
905 "[allocation size=%llu bytes] [sync offset=%llu] " 901 "[allocation size=%llu bytes] "
906 "[sync size=%llu]\n", entry->dev_addr, entry->size, 902 "[sync offset+size=%llu]\n",
907 offset, size); 903 entry->dev_addr, entry->size,
904 ref->size);
908 } 905 }
909 906
910 if (direction != entry->direction) { 907 if (ref->direction != entry->direction) {
911 err_printk(dev, entry, "DMA-API: device driver syncs " 908 err_printk(dev, entry, "DMA-API: device driver syncs "
912 "DMA memory with different direction " 909 "DMA memory with different direction "
913 "[device address=0x%016llx] [size=%llu bytes] " 910 "[device address=0x%016llx] [size=%llu bytes] "
914 "[mapped with %s] [synced with %s]\n", 911 "[mapped with %s] [synced with %s]\n",
915 (unsigned long long)addr, entry->size, 912 (unsigned long long)ref->dev_addr, entry->size,
916 dir2name[entry->direction], 913 dir2name[entry->direction],
917 dir2name[direction]); 914 dir2name[ref->direction]);
918 } 915 }
919 916
920 if (entry->direction == DMA_BIDIRECTIONAL) 917 if (entry->direction == DMA_BIDIRECTIONAL)
921 goto out; 918 goto out;
922 919
923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 920 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
924 !(direction == DMA_TO_DEVICE)) 921 !(ref->direction == DMA_TO_DEVICE))
925 err_printk(dev, entry, "DMA-API: device driver syncs " 922 err_printk(dev, entry, "DMA-API: device driver syncs "
926 "device read-only DMA memory for cpu " 923 "device read-only DMA memory for cpu "
927 "[device address=0x%016llx] [size=%llu bytes] " 924 "[device address=0x%016llx] [size=%llu bytes] "
928 "[mapped with %s] [synced with %s]\n", 925 "[mapped with %s] [synced with %s]\n",
929 (unsigned long long)addr, entry->size, 926 (unsigned long long)ref->dev_addr, entry->size,
930 dir2name[entry->direction], 927 dir2name[entry->direction],
931 dir2name[direction]); 928 dir2name[ref->direction]);
932 929
933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 930 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
934 !(direction == DMA_FROM_DEVICE)) 931 !(ref->direction == DMA_FROM_DEVICE))
935 err_printk(dev, entry, "DMA-API: device driver syncs " 932 err_printk(dev, entry, "DMA-API: device driver syncs "
936 "device write-only DMA memory to device " 933 "device write-only DMA memory to device "
937 "[device address=0x%016llx] [size=%llu bytes] " 934 "[device address=0x%016llx] [size=%llu bytes] "
938 "[mapped with %s] [synced with %s]\n", 935 "[mapped with %s] [synced with %s]\n",
939 (unsigned long long)addr, entry->size, 936 (unsigned long long)ref->dev_addr, entry->size,
940 dir2name[entry->direction], 937 dir2name[entry->direction],
941 dir2name[direction]); 938 dir2name[ref->direction]);
942 939
943out: 940out:
944 put_hash_bucket(bucket, &flags); 941 put_hash_bucket(bucket, &flags);
@@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1036} 1033}
1037EXPORT_SYMBOL(debug_dma_map_sg); 1034EXPORT_SYMBOL(debug_dma_map_sg);
1038 1035
1039static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) 1036static int get_nr_mapped_entries(struct device *dev,
1037 struct dma_debug_entry *ref)
1040{ 1038{
1041 struct dma_debug_entry *entry, ref; 1039 struct dma_debug_entry *entry;
1042 struct hash_bucket *bucket; 1040 struct hash_bucket *bucket;
1043 unsigned long flags; 1041 unsigned long flags;
1044 int mapped_ents; 1042 int mapped_ents;
1045 1043
1046 ref.dev = dev; 1044 bucket = get_hash_bucket(ref, &flags);
1047 ref.dev_addr = sg_dma_address(s); 1045 entry = hash_bucket_find(bucket, ref);
1048 ref.size = sg_dma_len(s),
1049
1050 bucket = get_hash_bucket(&ref, &flags);
1051 entry = hash_bucket_find(bucket, &ref);
1052 mapped_ents = 0; 1046 mapped_ents = 0;
1053 1047
1054 if (entry) 1048 if (entry)
@@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1076 .dev_addr = sg_dma_address(s), 1070 .dev_addr = sg_dma_address(s),
1077 .size = sg_dma_len(s), 1071 .size = sg_dma_len(s),
1078 .direction = dir, 1072 .direction = dir,
1079 .sg_call_ents = 0, 1073 .sg_call_ents = nelems,
1080 }; 1074 };
1081 1075
1082 if (mapped_ents && i >= mapped_ents) 1076 if (mapped_ents && i >= mapped_ents)
1083 break; 1077 break;
1084 1078
1085 if (!i) { 1079 if (!i)
1086 ref.sg_call_ents = nelems; 1080 mapped_ents = get_nr_mapped_entries(dev, &ref);
1087 mapped_ents = get_nr_mapped_entries(dev, s);
1088 }
1089 1081
1090 check_unmap(&ref); 1082 check_unmap(&ref);
1091 } 1083 }
@@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
1140void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1132void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1141 size_t size, int direction) 1133 size_t size, int direction)
1142{ 1134{
1135 struct dma_debug_entry ref;
1136
1143 if (unlikely(global_disable)) 1137 if (unlikely(global_disable))
1144 return; 1138 return;
1145 1139
1146 check_sync(dev, dma_handle, size, 0, direction, true); 1140 ref.type = dma_debug_single;
1141 ref.dev = dev;
1142 ref.dev_addr = dma_handle;
1143 ref.size = size;
1144 ref.direction = direction;
1145 ref.sg_call_ents = 0;
1146
1147 check_sync(dev, &ref, true);
1147} 1148}
1148EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1149EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1149 1150
@@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
1151 dma_addr_t dma_handle, size_t size, 1152 dma_addr_t dma_handle, size_t size,
1152 int direction) 1153 int direction)
1153{ 1154{
1155 struct dma_debug_entry ref;
1156
1154 if (unlikely(global_disable)) 1157 if (unlikely(global_disable))
1155 return; 1158 return;
1156 1159
1157 check_sync(dev, dma_handle, size, 0, direction, false); 1160 ref.type = dma_debug_single;
1161 ref.dev = dev;
1162 ref.dev_addr = dma_handle;
1163 ref.size = size;
1164 ref.direction = direction;
1165 ref.sg_call_ents = 0;
1166
1167 check_sync(dev, &ref, false);
1158} 1168}
1159EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1169EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1160 1170
@@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
1163 unsigned long offset, size_t size, 1173 unsigned long offset, size_t size,
1164 int direction) 1174 int direction)
1165{ 1175{
1176 struct dma_debug_entry ref;
1177
1166 if (unlikely(global_disable)) 1178 if (unlikely(global_disable))
1167 return; 1179 return;
1168 1180
1169 check_sync(dev, dma_handle, size, offset, direction, true); 1181 ref.type = dma_debug_single;
1182 ref.dev = dev;
1183 ref.dev_addr = dma_handle;
1184 ref.size = offset + size;
1185 ref.direction = direction;
1186 ref.sg_call_ents = 0;
1187
1188 check_sync(dev, &ref, true);
1170} 1189}
1171EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1190EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1172 1191
@@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
1175 unsigned long offset, 1194 unsigned long offset,
1176 size_t size, int direction) 1195 size_t size, int direction)
1177{ 1196{
1197 struct dma_debug_entry ref;
1198
1178 if (unlikely(global_disable)) 1199 if (unlikely(global_disable))
1179 return; 1200 return;
1180 1201
1181 check_sync(dev, dma_handle, size, offset, direction, false); 1202 ref.type = dma_debug_single;
1203 ref.dev = dev;
1204 ref.dev_addr = dma_handle;
1205 ref.size = offset + size;
1206 ref.direction = direction;
1207 ref.sg_call_ents = 0;
1208
1209 check_sync(dev, &ref, false);
1182} 1210}
1183EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1211EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1184 1212
@@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1192 return; 1220 return;
1193 1221
1194 for_each_sg(sg, s, nelems, i) { 1222 for_each_sg(sg, s, nelems, i) {
1223
1224 struct dma_debug_entry ref = {
1225 .type = dma_debug_sg,
1226 .dev = dev,
1227 .paddr = sg_phys(s),
1228 .dev_addr = sg_dma_address(s),
1229 .size = sg_dma_len(s),
1230 .direction = direction,
1231 .sg_call_ents = nelems,
1232 };
1233
1195 if (!i) 1234 if (!i)
1196 mapped_ents = get_nr_mapped_entries(dev, s); 1235 mapped_ents = get_nr_mapped_entries(dev, &ref);
1197 1236
1198 if (i >= mapped_ents) 1237 if (i >= mapped_ents)
1199 break; 1238 break;
1200 1239
1201 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1240 check_sync(dev, &ref, true);
1202 direction, true);
1203 } 1241 }
1204} 1242}
1205EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1243EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1214 return; 1252 return;
1215 1253
1216 for_each_sg(sg, s, nelems, i) { 1254 for_each_sg(sg, s, nelems, i) {
1255
1256 struct dma_debug_entry ref = {
1257 .type = dma_debug_sg,
1258 .dev = dev,
1259 .paddr = sg_phys(s),
1260 .dev_addr = sg_dma_address(s),
1261 .size = sg_dma_len(s),
1262 .direction = direction,
1263 .sg_call_ents = nelems,
1264 };
1217 if (!i) 1265 if (!i)
1218 mapped_ents = get_nr_mapped_entries(dev, s); 1266 mapped_ents = get_nr_mapped_entries(dev, &ref);
1219 1267
1220 if (i >= mapped_ents) 1268 if (i >= mapped_ents)
1221 break; 1269 break;
1222 1270
1223 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1271 check_sync(dev, &ref, false);
1224 direction, false);
1225 } 1272 }
1226} 1273}
1227EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1274EXPORT_SYMBOL(debug_dma_sync_sg_for_device);