aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dma-debug.c
diff options
context:
space:
mode:
authorFlorian Fainelli <f.fainelli@gmail.com>2014-12-10 18:41:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:02 -0500
commit01ce18b31153427bcdccc9c9fd170fd28c03e6e8 (patch)
treeb97bc43dac401cd4b3c746d6c08d883b969732ca /lib/dma-debug.c
parent662e9b2b98a0b8e172c392f3d3437d354a6c4067 (diff)
dma-debug: introduce dma_debug_disabled
Add a helper function which returns whether the DMA debugging API is disabled, right now we only check for global_disable, but in order to accommodate early callers of the DMA-API, we will check for more initialization flags in the next patch. Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Horia Geanta <horia.geanta@freescale.com> Cc: Brian Norris <computersforpeace@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/dma-debug.c')
-rw-r--r--lib/dma-debug.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index add80cc02dbe..1ac35dbaf8e0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -102,6 +102,11 @@ static DEFINE_SPINLOCK(free_entries_lock);
102/* Global disable flag - will be set in case of an error */ 102/* Global disable flag - will be set in case of an error */
103static u32 global_disable __read_mostly; 103static u32 global_disable __read_mostly;
104 104
105static inline bool dma_debug_disabled(void)
106{
107 return global_disable;
108}
109
105/* Global error count */ 110/* Global error count */
106static u32 error_count; 111static u32 error_count;
107 112
@@ -945,7 +950,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
945 struct dma_debug_entry *uninitialized_var(entry); 950 struct dma_debug_entry *uninitialized_var(entry);
946 int count; 951 int count;
947 952
948 if (global_disable) 953 if (dma_debug_disabled())
949 return 0; 954 return 0;
950 955
951 switch (action) { 956 switch (action) {
@@ -973,7 +978,7 @@ void dma_debug_add_bus(struct bus_type *bus)
973{ 978{
974 struct notifier_block *nb; 979 struct notifier_block *nb;
975 980
976 if (global_disable) 981 if (dma_debug_disabled())
977 return; 982 return;
978 983
979 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 984 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
@@ -994,7 +999,7 @@ void dma_debug_init(u32 num_entries)
994{ 999{
995 int i; 1000 int i;
996 1001
997 if (global_disable) 1002 if (dma_debug_disabled())
998 return; 1003 return;
999 1004
1000 for (i = 0; i < HASH_SIZE; ++i) { 1005 for (i = 0; i < HASH_SIZE; ++i) {
@@ -1243,7 +1248,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1243{ 1248{
1244 struct dma_debug_entry *entry; 1249 struct dma_debug_entry *entry;
1245 1250
1246 if (unlikely(global_disable)) 1251 if (unlikely(dma_debug_disabled()))
1247 return; 1252 return;
1248 1253
1249 if (dma_mapping_error(dev, dma_addr)) 1254 if (dma_mapping_error(dev, dma_addr))
@@ -1283,7 +1288,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1283 struct hash_bucket *bucket; 1288 struct hash_bucket *bucket;
1284 unsigned long flags; 1289 unsigned long flags;
1285 1290
1286 if (unlikely(global_disable)) 1291 if (unlikely(dma_debug_disabled()))
1287 return; 1292 return;
1288 1293
1289 ref.dev = dev; 1294 ref.dev = dev;
@@ -1325,7 +1330,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1325 .direction = direction, 1330 .direction = direction,
1326 }; 1331 };
1327 1332
1328 if (unlikely(global_disable)) 1333 if (unlikely(dma_debug_disabled()))
1329 return; 1334 return;
1330 1335
1331 if (map_single) 1336 if (map_single)
@@ -1342,7 +1347,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1342 struct scatterlist *s; 1347 struct scatterlist *s;
1343 int i; 1348 int i;
1344 1349
1345 if (unlikely(global_disable)) 1350 if (unlikely(dma_debug_disabled()))
1346 return; 1351 return;
1347 1352
1348 for_each_sg(sg, s, mapped_ents, i) { 1353 for_each_sg(sg, s, mapped_ents, i) {
@@ -1395,7 +1400,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1395 struct scatterlist *s; 1400 struct scatterlist *s;
1396 int mapped_ents = 0, i; 1401 int mapped_ents = 0, i;
1397 1402
1398 if (unlikely(global_disable)) 1403 if (unlikely(dma_debug_disabled()))
1399 return; 1404 return;
1400 1405
1401 for_each_sg(sglist, s, nelems, i) { 1406 for_each_sg(sglist, s, nelems, i) {
@@ -1427,7 +1432,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
1427{ 1432{
1428 struct dma_debug_entry *entry; 1433 struct dma_debug_entry *entry;
1429 1434
1430 if (unlikely(global_disable)) 1435 if (unlikely(dma_debug_disabled()))
1431 return; 1436 return;
1432 1437
1433 if (unlikely(virt == NULL)) 1438 if (unlikely(virt == NULL))
@@ -1462,7 +1467,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
1462 .direction = DMA_BIDIRECTIONAL, 1467 .direction = DMA_BIDIRECTIONAL,
1463 }; 1468 };
1464 1469
1465 if (unlikely(global_disable)) 1470 if (unlikely(dma_debug_disabled()))
1466 return; 1471 return;
1467 1472
1468 check_unmap(&ref); 1473 check_unmap(&ref);
@@ -1474,7 +1479,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1474{ 1479{
1475 struct dma_debug_entry ref; 1480 struct dma_debug_entry ref;
1476 1481
1477 if (unlikely(global_disable)) 1482 if (unlikely(dma_debug_disabled()))
1478 return; 1483 return;
1479 1484
1480 ref.type = dma_debug_single; 1485 ref.type = dma_debug_single;
@@ -1494,7 +1499,7 @@ void debug_dma_sync_single_for_device(struct device *dev,
1494{ 1499{
1495 struct dma_debug_entry ref; 1500 struct dma_debug_entry ref;
1496 1501
1497 if (unlikely(global_disable)) 1502 if (unlikely(dma_debug_disabled()))
1498 return; 1503 return;
1499 1504
1500 ref.type = dma_debug_single; 1505 ref.type = dma_debug_single;
@@ -1515,7 +1520,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
1515{ 1520{
1516 struct dma_debug_entry ref; 1521 struct dma_debug_entry ref;
1517 1522
1518 if (unlikely(global_disable)) 1523 if (unlikely(dma_debug_disabled()))
1519 return; 1524 return;
1520 1525
1521 ref.type = dma_debug_single; 1526 ref.type = dma_debug_single;
@@ -1536,7 +1541,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
1536{ 1541{
1537 struct dma_debug_entry ref; 1542 struct dma_debug_entry ref;
1538 1543
1539 if (unlikely(global_disable)) 1544 if (unlikely(dma_debug_disabled()))
1540 return; 1545 return;
1541 1546
1542 ref.type = dma_debug_single; 1547 ref.type = dma_debug_single;
@@ -1556,7 +1561,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1556 struct scatterlist *s; 1561 struct scatterlist *s;
1557 int mapped_ents = 0, i; 1562 int mapped_ents = 0, i;
1558 1563
1559 if (unlikely(global_disable)) 1564 if (unlikely(dma_debug_disabled()))
1560 return; 1565 return;
1561 1566
1562 for_each_sg(sg, s, nelems, i) { 1567 for_each_sg(sg, s, nelems, i) {
@@ -1589,7 +1594,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1589 struct scatterlist *s; 1594 struct scatterlist *s;
1590 int mapped_ents = 0, i; 1595 int mapped_ents = 0, i;
1591 1596
1592 if (unlikely(global_disable)) 1597 if (unlikely(dma_debug_disabled()))
1593 return; 1598 return;
1594 1599
1595 for_each_sg(sg, s, nelems, i) { 1600 for_each_sg(sg, s, nelems, i) {