aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/checksum.c10
-rw-r--r--lib/dma-debug.c175
3 files changed, 136 insertions, 68 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6b0c2d8a2129..12327b2bb785 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -340,8 +340,6 @@ config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector" 340 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ 341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
342 !MEMORY_HOTPLUG 342 !MEMORY_HOTPLUG
343 select DEBUG_SLAB if SLAB
344 select SLUB_DEBUG if SLUB
345 select DEBUG_FS if SYSFS 343 select DEBUG_FS if SYSFS
346 select STACKTRACE if STACKTRACE_SUPPORT 344 select STACKTRACE if STACKTRACE_SUPPORT
347 select KALLSYMS 345 select KALLSYMS
@@ -355,9 +353,24 @@ config DEBUG_KMEMLEAK
355 allocations. See Documentation/kmemleak.txt for more 353 allocations. See Documentation/kmemleak.txt for more
356 details. 354 details.
357 355
356 Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
357 of finding leaks due to the slab objects poisoning.
358
358 In order to access the kmemleak file, debugfs needs to be 359 In order to access the kmemleak file, debugfs needs to be
359 mounted (usually at /sys/kernel/debug). 360 mounted (usually at /sys/kernel/debug).
360 361
362config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
363 int "Maximum kmemleak early log entries"
364 depends on DEBUG_KMEMLEAK
365 range 200 2000
366 default 400
367 help
368 Kmemleak must track all the memory allocations to avoid
369 reporting false positives. Since memory may be allocated or
370 freed before kmemleak is initialised, an early log buffer is
371 used to store these actions. If kmemleak reports "early log
372 buffer exceeded", please increase this value.
373
361config DEBUG_KMEMLEAK_TEST 374config DEBUG_KMEMLEAK_TEST
362 tristate "Simple test for the kernel memory leak detector" 375 tristate "Simple test for the kernel memory leak detector"
363 depends on DEBUG_KMEMLEAK 376 depends on DEBUG_KMEMLEAK
@@ -472,7 +485,7 @@ config LOCKDEP
472 bool 485 bool
473 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 486 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
474 select STACKTRACE 487 select STACKTRACE
475 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 488 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
476 select KALLSYMS 489 select KALLSYMS
477 select KALLSYMS_ALL 490 select KALLSYMS_ALL
478 491
diff --git a/lib/checksum.c b/lib/checksum.c
index 12e5a1c91cda..b2e2fd468461 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -55,7 +55,11 @@ static unsigned int do_csum(const unsigned char *buff, int len)
55 goto out; 55 goto out;
56 odd = 1 & (unsigned long) buff; 56 odd = 1 & (unsigned long) buff;
57 if (odd) { 57 if (odd) {
58#ifdef __LITTLE_ENDIAN
58 result = *buff; 59 result = *buff;
60#else
61 result += (*buff << 8);
62#endif
59 len--; 63 len--;
60 buff++; 64 buff++;
61 } 65 }
@@ -71,7 +75,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
71 if (count) { 75 if (count) {
72 unsigned long carry = 0; 76 unsigned long carry = 0;
73 do { 77 do {
74 unsigned long w = *(unsigned long *) buff; 78 unsigned long w = *(unsigned int *) buff;
75 count--; 79 count--;
76 buff += 4; 80 buff += 4;
77 result += carry; 81 result += carry;
@@ -87,7 +91,11 @@ static unsigned int do_csum(const unsigned char *buff, int len)
87 } 91 }
88 } 92 }
89 if (len & 1) 93 if (len & 1)
94#ifdef __LITTLE_ENDIAN
95 result += *buff;
96#else
90 result += (*buff << 8); 97 result += (*buff << 8);
98#endif
91 result = from32to16(result); 99 result = from32to16(result);
92 if (odd) 100 if (odd)
93 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); 101 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ad65fc0317d9..65b0d99b6d0a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
262 */ 262 */
263 matches += 1; 263 matches += 1;
264 match_lvl = 0; 264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl; 265 entry->size == ref->size ? ++match_lvl : 0;
266 entry->type == ref->type ? ++match_lvl : match_lvl; 266 entry->type == ref->type ? ++match_lvl : 0;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl; 267 entry->direction == ref->direction ? ++match_lvl : 0;
268 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
268 269
269 if (match_lvl == 3) { 270 if (match_lvl == 4) {
270 /* perfect-fit - return the result */ 271 /* perfect-fit - return the result */
271 return entry; 272 return entry;
272 } else if (match_lvl > last_lvl) { 273 } else if (match_lvl > last_lvl) {
@@ -715,7 +716,7 @@ void dma_debug_init(u32 num_entries)
715 716
716 for (i = 0; i < HASH_SIZE; ++i) { 717 for (i = 0; i < HASH_SIZE; ++i) {
717 INIT_LIST_HEAD(&dma_entry_hash[i].list); 718 INIT_LIST_HEAD(&dma_entry_hash[i].list);
718 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 719 spin_lock_init(&dma_entry_hash[i].lock);
719 } 720 }
720 721
721 if (dma_debug_fs_init() != 0) { 722 if (dma_debug_fs_init() != 0) {
@@ -855,90 +856,85 @@ static void check_for_stack(struct device *dev, void *addr)
855 "stack [addr=%p]\n", addr); 856 "stack [addr=%p]\n", addr);
856} 857}
857 858
858static inline bool overlap(void *addr, u64 size, void *start, void *end) 859static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
859{ 860{
860 void *addr2 = (char *)addr + size; 861 unsigned long a1 = (unsigned long)addr;
862 unsigned long b1 = a1 + len;
863 unsigned long a2 = (unsigned long)start;
864 unsigned long b2 = (unsigned long)end;
861 865
862 return ((addr >= start && addr < end) || 866 return !(b1 <= a2 || a1 >= b2);
863 (addr2 >= start && addr2 < end) ||
864 ((addr < start) && (addr2 >= end)));
865} 867}
866 868
867static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 869static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
868{ 870{
869 if (overlap(addr, size, _text, _etext) || 871 if (overlap(addr, len, _text, _etext) ||
870 overlap(addr, size, __start_rodata, __end_rodata)) 872 overlap(addr, len, __start_rodata, __end_rodata))
871 err_printk(dev, NULL, "DMA-API: device driver maps " 873 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
872 "memory from kernel text or rodata "
873 "[addr=%p] [size=%llu]\n", addr, size);
874} 874}
875 875
876static void check_sync(struct device *dev, dma_addr_t addr, 876static void check_sync(struct device *dev,
877 u64 size, u64 offset, int direction, bool to_cpu) 877 struct dma_debug_entry *ref,
878 bool to_cpu)
878{ 879{
879 struct dma_debug_entry ref = {
880 .dev = dev,
881 .dev_addr = addr,
882 .size = size,
883 .direction = direction,
884 };
885 struct dma_debug_entry *entry; 880 struct dma_debug_entry *entry;
886 struct hash_bucket *bucket; 881 struct hash_bucket *bucket;
887 unsigned long flags; 882 unsigned long flags;
888 883
889 bucket = get_hash_bucket(&ref, &flags); 884 bucket = get_hash_bucket(ref, &flags);
890 885
891 entry = hash_bucket_find(bucket, &ref); 886 entry = hash_bucket_find(bucket, ref);
892 887
893 if (!entry) { 888 if (!entry) {
894 err_printk(dev, NULL, "DMA-API: device driver tries " 889 err_printk(dev, NULL, "DMA-API: device driver tries "
895 "to sync DMA memory it has not allocated " 890 "to sync DMA memory it has not allocated "
896 "[device address=0x%016llx] [size=%llu bytes]\n", 891 "[device address=0x%016llx] [size=%llu bytes]\n",
897 (unsigned long long)addr, size); 892 (unsigned long long)ref->dev_addr, ref->size);
898 goto out; 893 goto out;
899 } 894 }
900 895
901 if ((offset + size) > entry->size) { 896 if (ref->size > entry->size) {
902 err_printk(dev, entry, "DMA-API: device driver syncs" 897 err_printk(dev, entry, "DMA-API: device driver syncs"
903 " DMA memory outside allocated range " 898 " DMA memory outside allocated range "
904 "[device address=0x%016llx] " 899 "[device address=0x%016llx] "
905 "[allocation size=%llu bytes] [sync offset=%llu] " 900 "[allocation size=%llu bytes] "
906 "[sync size=%llu]\n", entry->dev_addr, entry->size, 901 "[sync offset+size=%llu]\n",
907 offset, size); 902 entry->dev_addr, entry->size,
903 ref->size);
908 } 904 }
909 905
910 if (direction != entry->direction) { 906 if (ref->direction != entry->direction) {
911 err_printk(dev, entry, "DMA-API: device driver syncs " 907 err_printk(dev, entry, "DMA-API: device driver syncs "
912 "DMA memory with different direction " 908 "DMA memory with different direction "
913 "[device address=0x%016llx] [size=%llu bytes] " 909 "[device address=0x%016llx] [size=%llu bytes] "
914 "[mapped with %s] [synced with %s]\n", 910 "[mapped with %s] [synced with %s]\n",
915 (unsigned long long)addr, entry->size, 911 (unsigned long long)ref->dev_addr, entry->size,
916 dir2name[entry->direction], 912 dir2name[entry->direction],
917 dir2name[direction]); 913 dir2name[ref->direction]);
918 } 914 }
919 915
920 if (entry->direction == DMA_BIDIRECTIONAL) 916 if (entry->direction == DMA_BIDIRECTIONAL)
921 goto out; 917 goto out;
922 918
923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 919 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
924 !(direction == DMA_TO_DEVICE)) 920 !(ref->direction == DMA_TO_DEVICE))
925 err_printk(dev, entry, "DMA-API: device driver syncs " 921 err_printk(dev, entry, "DMA-API: device driver syncs "
926 "device read-only DMA memory for cpu " 922 "device read-only DMA memory for cpu "
927 "[device address=0x%016llx] [size=%llu bytes] " 923 "[device address=0x%016llx] [size=%llu bytes] "
928 "[mapped with %s] [synced with %s]\n", 924 "[mapped with %s] [synced with %s]\n",
929 (unsigned long long)addr, entry->size, 925 (unsigned long long)ref->dev_addr, entry->size,
930 dir2name[entry->direction], 926 dir2name[entry->direction],
931 dir2name[direction]); 927 dir2name[ref->direction]);
932 928
933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 929 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
934 !(direction == DMA_FROM_DEVICE)) 930 !(ref->direction == DMA_FROM_DEVICE))
935 err_printk(dev, entry, "DMA-API: device driver syncs " 931 err_printk(dev, entry, "DMA-API: device driver syncs "
936 "device write-only DMA memory to device " 932 "device write-only DMA memory to device "
937 "[device address=0x%016llx] [size=%llu bytes] " 933 "[device address=0x%016llx] [size=%llu bytes] "
938 "[mapped with %s] [synced with %s]\n", 934 "[mapped with %s] [synced with %s]\n",
939 (unsigned long long)addr, entry->size, 935 (unsigned long long)ref->dev_addr, entry->size,
940 dir2name[entry->direction], 936 dir2name[entry->direction],
941 dir2name[direction]); 937 dir2name[ref->direction]);
942 938
943out: 939out:
944 put_hash_bucket(bucket, &flags); 940 put_hash_bucket(bucket, &flags);
@@ -972,7 +968,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
972 entry->type = dma_debug_single; 968 entry->type = dma_debug_single;
973 969
974 if (!PageHighMem(page)) { 970 if (!PageHighMem(page)) {
975 void *addr = ((char *)page_address(page)) + offset; 971 void *addr = page_address(page) + offset;
972
976 check_for_stack(dev, addr); 973 check_for_stack(dev, addr);
977 check_for_illegal_area(dev, addr, size); 974 check_for_illegal_area(dev, addr, size);
978 } 975 }
@@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1036} 1033}
1037EXPORT_SYMBOL(debug_dma_map_sg); 1034EXPORT_SYMBOL(debug_dma_map_sg);
1038 1035
1039static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) 1036static int get_nr_mapped_entries(struct device *dev,
1037 struct dma_debug_entry *ref)
1040{ 1038{
1041 struct dma_debug_entry *entry, ref; 1039 struct dma_debug_entry *entry;
1042 struct hash_bucket *bucket; 1040 struct hash_bucket *bucket;
1043 unsigned long flags; 1041 unsigned long flags;
1044 int mapped_ents; 1042 int mapped_ents;
1045 1043
1046 ref.dev = dev; 1044 bucket = get_hash_bucket(ref, &flags);
1047 ref.dev_addr = sg_dma_address(s); 1045 entry = hash_bucket_find(bucket, ref);
1048 ref.size = sg_dma_len(s),
1049
1050 bucket = get_hash_bucket(&ref, &flags);
1051 entry = hash_bucket_find(bucket, &ref);
1052 mapped_ents = 0; 1046 mapped_ents = 0;
1053 1047
1054 if (entry) 1048 if (entry)
@@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1076 .dev_addr = sg_dma_address(s), 1070 .dev_addr = sg_dma_address(s),
1077 .size = sg_dma_len(s), 1071 .size = sg_dma_len(s),
1078 .direction = dir, 1072 .direction = dir,
1079 .sg_call_ents = 0, 1073 .sg_call_ents = nelems,
1080 }; 1074 };
1081 1075
1082 if (mapped_ents && i >= mapped_ents) 1076 if (mapped_ents && i >= mapped_ents)
1083 break; 1077 break;
1084 1078
1085 if (!i) { 1079 if (!i)
1086 ref.sg_call_ents = nelems; 1080 mapped_ents = get_nr_mapped_entries(dev, &ref);
1087 mapped_ents = get_nr_mapped_entries(dev, s);
1088 }
1089 1081
1090 check_unmap(&ref); 1082 check_unmap(&ref);
1091 } 1083 }
@@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
1140void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1132void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1141 size_t size, int direction) 1133 size_t size, int direction)
1142{ 1134{
1135 struct dma_debug_entry ref;
1136
1143 if (unlikely(global_disable)) 1137 if (unlikely(global_disable))
1144 return; 1138 return;
1145 1139
1146 check_sync(dev, dma_handle, size, 0, direction, true); 1140 ref.type = dma_debug_single;
1141 ref.dev = dev;
1142 ref.dev_addr = dma_handle;
1143 ref.size = size;
1144 ref.direction = direction;
1145 ref.sg_call_ents = 0;
1146
1147 check_sync(dev, &ref, true);
1147} 1148}
1148EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1149EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1149 1150
@@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
1151 dma_addr_t dma_handle, size_t size, 1152 dma_addr_t dma_handle, size_t size,
1152 int direction) 1153 int direction)
1153{ 1154{
1155 struct dma_debug_entry ref;
1156
1154 if (unlikely(global_disable)) 1157 if (unlikely(global_disable))
1155 return; 1158 return;
1156 1159
1157 check_sync(dev, dma_handle, size, 0, direction, false); 1160 ref.type = dma_debug_single;
1161 ref.dev = dev;
1162 ref.dev_addr = dma_handle;
1163 ref.size = size;
1164 ref.direction = direction;
1165 ref.sg_call_ents = 0;
1166
1167 check_sync(dev, &ref, false);
1158} 1168}
1159EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1169EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1160 1170
@@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
1163 unsigned long offset, size_t size, 1173 unsigned long offset, size_t size,
1164 int direction) 1174 int direction)
1165{ 1175{
1176 struct dma_debug_entry ref;
1177
1166 if (unlikely(global_disable)) 1178 if (unlikely(global_disable))
1167 return; 1179 return;
1168 1180
1169 check_sync(dev, dma_handle, size, offset, direction, true); 1181 ref.type = dma_debug_single;
1182 ref.dev = dev;
1183 ref.dev_addr = dma_handle;
1184 ref.size = offset + size;
1185 ref.direction = direction;
1186 ref.sg_call_ents = 0;
1187
1188 check_sync(dev, &ref, true);
1170} 1189}
1171EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1190EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1172 1191
@@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
1175 unsigned long offset, 1194 unsigned long offset,
1176 size_t size, int direction) 1195 size_t size, int direction)
1177{ 1196{
1197 struct dma_debug_entry ref;
1198
1178 if (unlikely(global_disable)) 1199 if (unlikely(global_disable))
1179 return; 1200 return;
1180 1201
1181 check_sync(dev, dma_handle, size, offset, direction, false); 1202 ref.type = dma_debug_single;
1203 ref.dev = dev;
1204 ref.dev_addr = dma_handle;
1205 ref.size = offset + size;
1206 ref.direction = direction;
1207 ref.sg_call_ents = 0;
1208
1209 check_sync(dev, &ref, false);
1182} 1210}
1183EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1211EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1184 1212
@@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1192 return; 1220 return;
1193 1221
1194 for_each_sg(sg, s, nelems, i) { 1222 for_each_sg(sg, s, nelems, i) {
1223
1224 struct dma_debug_entry ref = {
1225 .type = dma_debug_sg,
1226 .dev = dev,
1227 .paddr = sg_phys(s),
1228 .dev_addr = sg_dma_address(s),
1229 .size = sg_dma_len(s),
1230 .direction = direction,
1231 .sg_call_ents = nelems,
1232 };
1233
1195 if (!i) 1234 if (!i)
1196 mapped_ents = get_nr_mapped_entries(dev, s); 1235 mapped_ents = get_nr_mapped_entries(dev, &ref);
1197 1236
1198 if (i >= mapped_ents) 1237 if (i >= mapped_ents)
1199 break; 1238 break;
1200 1239
1201 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1240 check_sync(dev, &ref, true);
1202 direction, true);
1203 } 1241 }
1204} 1242}
1205EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1243EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1214 return; 1252 return;
1215 1253
1216 for_each_sg(sg, s, nelems, i) { 1254 for_each_sg(sg, s, nelems, i) {
1255
1256 struct dma_debug_entry ref = {
1257 .type = dma_debug_sg,
1258 .dev = dev,
1259 .paddr = sg_phys(s),
1260 .dev_addr = sg_dma_address(s),
1261 .size = sg_dma_len(s),
1262 .direction = direction,
1263 .sg_call_ents = nelems,
1264 };
1217 if (!i) 1265 if (!i)
1218 mapped_ents = get_nr_mapped_entries(dev, s); 1266 mapped_ents = get_nr_mapped_entries(dev, &ref);
1219 1267
1220 if (i >= mapped_ents) 1268 if (i >= mapped_ents)
1221 break; 1269 break;
1222 1270
1223 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1271 check_sync(dev, &ref, false);
1224 direction, false);
1225 } 1272 }
1226} 1273}
1227EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1274EXPORT_SYMBOL(debug_dma_sync_sg_for_device);