diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 20:12:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 20:12:13 -0400 |
commit | 7f0ef0267e20d62d45d527911a993b1e998f4968 (patch) | |
tree | de51abc7da5903f59d83e23937f22420164c9477 /drivers/dma | |
parent | 862f0012549110d6f2586bf54b52ed4540cbff3a (diff) | |
parent | 9307c29524502c21f0e8a6d96d850b2f5bc0bd9a (diff) |
Merge branch 'akpm' (updates from Andrew Morton)
Merge first patch-bomb from Andrew Morton:
- various misc bits
- I'm been patchmonkeying ocfs2 for a while, as Joel and Mark have been
distracted. There has been quite a bit of activity.
- About half the MM queue
- Some backlight bits
- Various lib/ updates
- checkpatch updates
- zillions more little rtc patches
- ptrace
- signals
- exec
- procfs
- rapidio
- nbd
- aoe
- pps
- memstick
- tools/testing/selftests updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (445 commits)
tools/testing/selftests: don't assume the x bit is set on scripts
selftests: add .gitignore for kcmp
selftests: fix clean target in kcmp Makefile
selftests: add .gitignore for vm
selftests: add hugetlbfstest
self-test: fix make clean
selftests: exit 1 on failure
kernel/resource.c: remove the unneeded assignment in function __find_resource
aio: fix wrong comment in aio_complete()
drivers/w1/slaves/w1_ds2408.c: add magic sequence to disable P0 test mode
drivers/memstick/host/r592.c: convert to module_pci_driver
drivers/memstick/host/jmb38x_ms: convert to module_pci_driver
pps-gpio: add device-tree binding and support
drivers/pps/clients/pps-gpio.c: convert to module_platform_driver
drivers/pps/clients/pps-gpio.c: convert to devm_* helpers
drivers/parport/share.c: use kzalloc
Documentation/accounting/getdelays.c: avoid strncpy in accounting tool
aoe: update internal version number to v83
aoe: update copyright date
aoe: perform I/O completions in parallel
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dmaengine.c | 7 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 3 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 114 | ||||
-rw-r--r-- | drivers/dma/ioat/hw.h | 27 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 66 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 85 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 1 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 4 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 47 |
10 files changed, 10 insertions, 345 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 93f7992bee5c..9e56745f87bf 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -663,11 +663,6 @@ static bool device_has_all_tx_types(struct dma_device *device) | |||
663 | return false; | 663 | return false; |
664 | #endif | 664 | #endif |
665 | 665 | ||
666 | #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) | ||
667 | if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) | ||
668 | return false; | ||
669 | #endif | ||
670 | |||
671 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) | 666 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) |
672 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | 667 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
673 | return false; | 668 | return false; |
@@ -729,8 +724,6 @@ int dma_async_device_register(struct dma_device *device) | |||
729 | !device->device_prep_dma_pq); | 724 | !device->device_prep_dma_pq); |
730 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | 725 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && |
731 | !device->device_prep_dma_pq_val); | 726 | !device->device_prep_dma_pq_val); |
732 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | ||
733 | !device->device_prep_dma_memset); | ||
734 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 727 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
735 | !device->device_prep_dma_interrupt); | 728 | !device->device_prep_dma_interrupt); |
736 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | 729 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 17a2393b3e25..5ff6fc1819dc 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -1105,12 +1105,11 @@ static ssize_t cap_show(struct dma_chan *c, char *page) | |||
1105 | { | 1105 | { |
1106 | struct dma_device *dma = c->device; | 1106 | struct dma_device *dma = c->device; |
1107 | 1107 | ||
1108 | return sprintf(page, "copy%s%s%s%s%s%s\n", | 1108 | return sprintf(page, "copy%s%s%s%s%s\n", |
1109 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", | 1109 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", |
1110 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", | 1110 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", |
1111 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", | 1111 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", |
1112 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", | 1112 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", |
1113 | dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "", | ||
1114 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); | 1113 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); |
1115 | 1114 | ||
1116 | } | 1115 | } |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 29bf9448035d..212d584fe427 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -123,7 +123,6 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len | |||
123 | struct ioat_ring_ent { | 123 | struct ioat_ring_ent { |
124 | union { | 124 | union { |
125 | struct ioat_dma_descriptor *hw; | 125 | struct ioat_dma_descriptor *hw; |
126 | struct ioat_fill_descriptor *fill; | ||
127 | struct ioat_xor_descriptor *xor; | 126 | struct ioat_xor_descriptor *xor; |
128 | struct ioat_xor_ext_descriptor *xor_ex; | 127 | struct ioat_xor_ext_descriptor *xor_ex; |
129 | struct ioat_pq_descriptor *pq; | 128 | struct ioat_pq_descriptor *pq; |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index ca6ea9b3551b..b642e035579b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -311,14 +311,6 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | |||
311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ | 311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ |
312 | ioat_dma_unmap(chan, flags, len, desc->hw); | 312 | ioat_dma_unmap(chan, flags, len, desc->hw); |
313 | break; | 313 | break; |
314 | case IOAT_OP_FILL: { | ||
315 | struct ioat_fill_descriptor *hw = desc->fill; | ||
316 | |||
317 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
318 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
319 | PCI_DMA_FROMDEVICE, flags, 1); | ||
320 | break; | ||
321 | } | ||
322 | case IOAT_OP_XOR_VAL: | 314 | case IOAT_OP_XOR_VAL: |
323 | case IOAT_OP_XOR: { | 315 | case IOAT_OP_XOR: { |
324 | struct ioat_xor_descriptor *xor = desc->xor; | 316 | struct ioat_xor_descriptor *xor = desc->xor; |
@@ -824,51 +816,6 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
824 | } | 816 | } |
825 | 817 | ||
826 | static struct dma_async_tx_descriptor * | 818 | static struct dma_async_tx_descriptor * |
827 | ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | ||
828 | size_t len, unsigned long flags) | ||
829 | { | ||
830 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
831 | struct ioat_ring_ent *desc; | ||
832 | size_t total_len = len; | ||
833 | struct ioat_fill_descriptor *fill; | ||
834 | u64 src_data = (0x0101010101010101ULL) * (value & 0xff); | ||
835 | int num_descs, idx, i; | ||
836 | |||
837 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
838 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) | ||
839 | idx = ioat->head; | ||
840 | else | ||
841 | return NULL; | ||
842 | i = 0; | ||
843 | do { | ||
844 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
845 | |||
846 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
847 | fill = desc->fill; | ||
848 | |||
849 | fill->size = xfer_size; | ||
850 | fill->src_data = src_data; | ||
851 | fill->dst_addr = dest; | ||
852 | fill->ctl = 0; | ||
853 | fill->ctl_f.op = IOAT_OP_FILL; | ||
854 | |||
855 | len -= xfer_size; | ||
856 | dest += xfer_size; | ||
857 | dump_desc_dbg(ioat, desc); | ||
858 | } while (++i < num_descs); | ||
859 | |||
860 | desc->txd.flags = flags; | ||
861 | desc->len = total_len; | ||
862 | fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
863 | fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
864 | fill->ctl_f.compl_write = 1; | ||
865 | dump_desc_dbg(ioat, desc); | ||
866 | |||
867 | /* we leave the channel locked to ensure in order submission */ | ||
868 | return &desc->txd; | ||
869 | } | ||
870 | |||
871 | static struct dma_async_tx_descriptor * | ||
872 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | 819 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, |
873 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | 820 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, |
874 | size_t len, unsigned long flags) | 821 | size_t len, unsigned long flags) |
@@ -1431,7 +1378,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1431 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | 1378 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; |
1432 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | 1379 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; |
1433 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | 1380 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; |
1434 | dma_addr_t dma_addr, dest_dma; | 1381 | dma_addr_t dest_dma; |
1435 | struct dma_async_tx_descriptor *tx; | 1382 | struct dma_async_tx_descriptor *tx; |
1436 | struct dma_chan *dma_chan; | 1383 | struct dma_chan *dma_chan; |
1437 | dma_cookie_t cookie; | 1384 | dma_cookie_t cookie; |
@@ -1598,56 +1545,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1598 | goto free_resources; | 1545 | goto free_resources; |
1599 | } | 1546 | } |
1600 | 1547 | ||
1601 | /* skip memset if the capability is not present */ | ||
1602 | if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) | ||
1603 | goto free_resources; | ||
1604 | |||
1605 | /* test memset */ | ||
1606 | op = IOAT_OP_FILL; | ||
1607 | |||
1608 | dma_addr = dma_map_page(dev, dest, 0, | ||
1609 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1610 | tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | ||
1611 | DMA_PREP_INTERRUPT | | ||
1612 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1613 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
1614 | if (!tx) { | ||
1615 | dev_err(dev, "Self-test memset prep failed\n"); | ||
1616 | err = -ENODEV; | ||
1617 | goto dma_unmap; | ||
1618 | } | ||
1619 | |||
1620 | async_tx_ack(tx); | ||
1621 | init_completion(&cmp); | ||
1622 | tx->callback = ioat3_dma_test_callback; | ||
1623 | tx->callback_param = &cmp; | ||
1624 | cookie = tx->tx_submit(tx); | ||
1625 | if (cookie < 0) { | ||
1626 | dev_err(dev, "Self-test memset setup failed\n"); | ||
1627 | err = -ENODEV; | ||
1628 | goto dma_unmap; | ||
1629 | } | ||
1630 | dma->device_issue_pending(dma_chan); | ||
1631 | |||
1632 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1633 | |||
1634 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | ||
1635 | dev_err(dev, "Self-test memset timed out\n"); | ||
1636 | err = -ENODEV; | ||
1637 | goto dma_unmap; | ||
1638 | } | ||
1639 | |||
1640 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1641 | |||
1642 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | ||
1643 | u32 *ptr = page_address(dest); | ||
1644 | if (ptr[i]) { | ||
1645 | dev_err(dev, "Self-test memset failed compare\n"); | ||
1646 | err = -ENODEV; | ||
1647 | goto free_resources; | ||
1648 | } | ||
1649 | } | ||
1650 | |||
1651 | /* test for non-zero parity sum */ | 1548 | /* test for non-zero parity sum */ |
1652 | op = IOAT_OP_XOR_VAL; | 1549 | op = IOAT_OP_XOR_VAL; |
1653 | 1550 | ||
@@ -1706,8 +1603,7 @@ dma_unmap: | |||
1706 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | 1603 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1707 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | 1604 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, |
1708 | DMA_TO_DEVICE); | 1605 | DMA_TO_DEVICE); |
1709 | } else if (op == IOAT_OP_FILL) | 1606 | } |
1710 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1711 | free_resources: | 1607 | free_resources: |
1712 | dma->device_free_chan_resources(dma_chan); | 1608 | dma->device_free_chan_resources(dma_chan); |
1713 | out: | 1609 | out: |
@@ -1944,12 +1840,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1944 | } | 1840 | } |
1945 | } | 1841 | } |
1946 | 1842 | ||
1947 | if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { | ||
1948 | dma_cap_set(DMA_MEMSET, dma->cap_mask); | ||
1949 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; | ||
1950 | } | ||
1951 | |||
1952 | |||
1953 | dma->device_tx_status = ioat3_tx_status; | 1843 | dma->device_tx_status = ioat3_tx_status; |
1954 | device->cleanup_fn = ioat3_cleanup_event; | 1844 | device->cleanup_fn = ioat3_cleanup_event; |
1955 | device->timer_fn = ioat3_timer_event; | 1845 | device->timer_fn = ioat3_timer_event; |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 5ee57d402a6e..62f83e983d8d 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -100,33 +100,6 @@ struct ioat_dma_descriptor { | |||
100 | uint64_t user2; | 100 | uint64_t user2; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct ioat_fill_descriptor { | ||
104 | uint32_t size; | ||
105 | union { | ||
106 | uint32_t ctl; | ||
107 | struct { | ||
108 | unsigned int int_en:1; | ||
109 | unsigned int rsvd:1; | ||
110 | unsigned int dest_snoop_dis:1; | ||
111 | unsigned int compl_write:1; | ||
112 | unsigned int fence:1; | ||
113 | unsigned int rsvd2:2; | ||
114 | unsigned int dest_brk:1; | ||
115 | unsigned int bundle:1; | ||
116 | unsigned int rsvd4:15; | ||
117 | #define IOAT_OP_FILL 0x01 | ||
118 | unsigned int op:8; | ||
119 | } ctl_f; | ||
120 | }; | ||
121 | uint64_t src_data; | ||
122 | uint64_t dst_addr; | ||
123 | uint64_t next; | ||
124 | uint64_t rsv1; | ||
125 | uint64_t next_dst_addr; | ||
126 | uint64_t user1; | ||
127 | uint64_t user2; | ||
128 | }; | ||
129 | |||
130 | struct ioat_xor_descriptor { | 103 | struct ioat_xor_descriptor { |
131 | uint32_t size; | 104 | uint32_t size; |
132 | union { | 105 | union { |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 7dafb9f3785f..c9cc08c2dbba 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -633,39 +633,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
633 | } | 633 | } |
634 | 634 | ||
635 | static struct dma_async_tx_descriptor * | 635 | static struct dma_async_tx_descriptor * |
636 | iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | ||
637 | int value, size_t len, unsigned long flags) | ||
638 | { | ||
639 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | ||
640 | struct iop_adma_desc_slot *sw_desc, *grp_start; | ||
641 | int slot_cnt, slots_per_op; | ||
642 | |||
643 | if (unlikely(!len)) | ||
644 | return NULL; | ||
645 | BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); | ||
646 | |||
647 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | ||
648 | __func__, len); | ||
649 | |||
650 | spin_lock_bh(&iop_chan->lock); | ||
651 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); | ||
652 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | ||
653 | if (sw_desc) { | ||
654 | grp_start = sw_desc->group_head; | ||
655 | iop_desc_init_memset(grp_start, flags); | ||
656 | iop_desc_set_byte_count(grp_start, iop_chan, len); | ||
657 | iop_desc_set_block_fill_val(grp_start, value); | ||
658 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
659 | sw_desc->unmap_src_cnt = 1; | ||
660 | sw_desc->unmap_len = len; | ||
661 | sw_desc->async_tx.flags = flags; | ||
662 | } | ||
663 | spin_unlock_bh(&iop_chan->lock); | ||
664 | |||
665 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
666 | } | ||
667 | |||
668 | static struct dma_async_tx_descriptor * | ||
669 | iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | 636 | iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, |
670 | dma_addr_t *dma_src, unsigned int src_cnt, size_t len, | 637 | dma_addr_t *dma_src, unsigned int src_cnt, size_t len, |
671 | unsigned long flags) | 638 | unsigned long flags) |
@@ -1176,33 +1143,6 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1176 | goto free_resources; | 1143 | goto free_resources; |
1177 | } | 1144 | } |
1178 | 1145 | ||
1179 | /* test memset */ | ||
1180 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, | ||
1181 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1182 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | ||
1183 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1184 | |||
1185 | cookie = iop_adma_tx_submit(tx); | ||
1186 | iop_adma_issue_pending(dma_chan); | ||
1187 | msleep(8); | ||
1188 | |||
1189 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | ||
1190 | dev_err(dma_chan->device->dev, | ||
1191 | "Self-test memset timed out, disabling\n"); | ||
1192 | err = -ENODEV; | ||
1193 | goto free_resources; | ||
1194 | } | ||
1195 | |||
1196 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | ||
1197 | u32 *ptr = page_address(dest); | ||
1198 | if (ptr[i]) { | ||
1199 | dev_err(dma_chan->device->dev, | ||
1200 | "Self-test memset failed compare, disabling\n"); | ||
1201 | err = -ENODEV; | ||
1202 | goto free_resources; | ||
1203 | } | ||
1204 | } | ||
1205 | |||
1206 | /* test for non-zero parity sum */ | 1146 | /* test for non-zero parity sum */ |
1207 | zero_sum_result = 0; | 1147 | zero_sum_result = 0; |
1208 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) | 1148 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) |
@@ -1487,8 +1427,6 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1487 | /* set prep routines based on capability */ | 1427 | /* set prep routines based on capability */ |
1488 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1428 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1489 | dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; | 1429 | dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; |
1490 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | ||
1491 | dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset; | ||
1492 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1430 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1493 | dma_dev->max_xor = iop_adma_get_max_xor(); | 1431 | dma_dev->max_xor = iop_adma_get_max_xor(); |
1494 | dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; | 1432 | dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; |
@@ -1556,8 +1494,7 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1556 | goto err_free_iop_chan; | 1494 | goto err_free_iop_chan; |
1557 | } | 1495 | } |
1558 | 1496 | ||
1559 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || | 1497 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1560 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { | ||
1561 | ret = iop_adma_xor_val_self_test(adev); | 1498 | ret = iop_adma_xor_val_self_test(adev); |
1562 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | 1499 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1563 | if (ret) | 1500 | if (ret) |
@@ -1584,7 +1521,6 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1584 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", | 1521 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", |
1585 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1522 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1586 | dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", | 1523 | dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", |
1587 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | ||
1588 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1524 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1589 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1525 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1590 | 1526 | ||
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index d64ae14f2706..200f1a3c9a44 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -89,11 +89,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |||
89 | hw_desc->phy_next_desc = 0; | 89 | hw_desc->phy_next_desc = 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) | ||
93 | { | ||
94 | desc->value = val; | ||
95 | } | ||
96 | |||
97 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | 92 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, |
98 | dma_addr_t addr) | 93 | dma_addr_t addr) |
99 | { | 94 | { |
@@ -128,22 +123,6 @@ static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |||
128 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); | 123 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); |
129 | } | 124 | } |
130 | 125 | ||
131 | static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) | ||
132 | { | ||
133 | __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); | ||
134 | } | ||
135 | |||
136 | static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) | ||
137 | { | ||
138 | __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); | ||
139 | } | ||
140 | |||
141 | static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) | ||
142 | { | ||
143 | __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); | ||
144 | __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); | ||
145 | } | ||
146 | |||
147 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) | 126 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
148 | { | 127 | { |
149 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); | 128 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); |
@@ -186,8 +165,6 @@ static int mv_can_chain(struct mv_xor_desc_slot *desc) | |||
186 | 165 | ||
187 | if (chain_old_tail->type != desc->type) | 166 | if (chain_old_tail->type != desc->type) |
188 | return 0; | 167 | return 0; |
189 | if (desc->type == DMA_MEMSET) | ||
190 | return 0; | ||
191 | 168 | ||
192 | return 1; | 169 | return 1; |
193 | } | 170 | } |
@@ -205,9 +182,6 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
205 | case DMA_MEMCPY: | 182 | case DMA_MEMCPY: |
206 | op_mode = XOR_OPERATION_MODE_MEMCPY; | 183 | op_mode = XOR_OPERATION_MODE_MEMCPY; |
207 | break; | 184 | break; |
208 | case DMA_MEMSET: | ||
209 | op_mode = XOR_OPERATION_MODE_MEMSET; | ||
210 | break; | ||
211 | default: | 185 | default: |
212 | dev_err(mv_chan_to_devp(chan), | 186 | dev_err(mv_chan_to_devp(chan), |
213 | "error: unsupported operation %d\n", | 187 | "error: unsupported operation %d\n", |
@@ -274,18 +248,9 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |||
274 | if (sw_desc->type != mv_chan->current_type) | 248 | if (sw_desc->type != mv_chan->current_type) |
275 | mv_set_mode(mv_chan, sw_desc->type); | 249 | mv_set_mode(mv_chan, sw_desc->type); |
276 | 250 | ||
277 | if (sw_desc->type == DMA_MEMSET) { | 251 | /* set the hardware chain */ |
278 | /* for memset requests we need to program the engine, no | 252 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); |
279 | * descriptors used. | 253 | |
280 | */ | ||
281 | struct mv_xor_desc *hw_desc = sw_desc->hw_desc; | ||
282 | mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); | ||
283 | mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); | ||
284 | mv_chan_set_value(mv_chan, sw_desc->value); | ||
285 | } else { | ||
286 | /* set the hardware chain */ | ||
287 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | ||
288 | } | ||
289 | mv_chan->pending += sw_desc->slot_cnt; | 254 | mv_chan->pending += sw_desc->slot_cnt; |
290 | mv_xor_issue_pending(&mv_chan->dmachan); | 255 | mv_xor_issue_pending(&mv_chan->dmachan); |
291 | } | 256 | } |
@@ -688,43 +653,6 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
688 | } | 653 | } |
689 | 654 | ||
690 | static struct dma_async_tx_descriptor * | 655 | static struct dma_async_tx_descriptor * |
691 | mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | ||
692 | size_t len, unsigned long flags) | ||
693 | { | ||
694 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
695 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
696 | int slot_cnt; | ||
697 | |||
698 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
699 | "%s dest: %x len: %u flags: %ld\n", | ||
700 | __func__, dest, len, flags); | ||
701 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
702 | return NULL; | ||
703 | |||
704 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | ||
705 | |||
706 | spin_lock_bh(&mv_chan->lock); | ||
707 | slot_cnt = mv_chan_memset_slot_count(len); | ||
708 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
709 | if (sw_desc) { | ||
710 | sw_desc->type = DMA_MEMSET; | ||
711 | sw_desc->async_tx.flags = flags; | ||
712 | grp_start = sw_desc->group_head; | ||
713 | mv_desc_init(grp_start, flags); | ||
714 | mv_desc_set_byte_count(grp_start, len); | ||
715 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
716 | mv_desc_set_block_fill_val(grp_start, value); | ||
717 | sw_desc->unmap_src_cnt = 1; | ||
718 | sw_desc->unmap_len = len; | ||
719 | } | ||
720 | spin_unlock_bh(&mv_chan->lock); | ||
721 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
722 | "%s sw_desc %p async_tx %p \n", | ||
723 | __func__, sw_desc, &sw_desc->async_tx); | ||
724 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
725 | } | ||
726 | |||
727 | static struct dma_async_tx_descriptor * | ||
728 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 656 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
729 | unsigned int src_cnt, size_t len, unsigned long flags) | 657 | unsigned int src_cnt, size_t len, unsigned long flags) |
730 | { | 658 | { |
@@ -1137,8 +1065,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1137 | /* set prep routines based on capability */ | 1065 | /* set prep routines based on capability */ |
1138 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1066 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1139 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1067 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1140 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | ||
1141 | dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; | ||
1142 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1068 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1143 | dma_dev->max_xor = 8; | 1069 | dma_dev->max_xor = 8; |
1144 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 1070 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
@@ -1187,9 +1113,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1187 | goto err_free_irq; | 1113 | goto err_free_irq; |
1188 | } | 1114 | } |
1189 | 1115 | ||
1190 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", | 1116 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", |
1191 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1117 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1192 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | ||
1193 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1118 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1194 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1119 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1195 | 1120 | ||
@@ -1298,8 +1223,6 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1298 | dma_cap_set(DMA_MEMCPY, cap_mask); | 1223 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1299 | if (of_property_read_bool(np, "dmacap,xor")) | 1224 | if (of_property_read_bool(np, "dmacap,xor")) |
1300 | dma_cap_set(DMA_XOR, cap_mask); | 1225 | dma_cap_set(DMA_XOR, cap_mask); |
1301 | if (of_property_read_bool(np, "dmacap,memset")) | ||
1302 | dma_cap_set(DMA_MEMSET, cap_mask); | ||
1303 | if (of_property_read_bool(np, "dmacap,interrupt")) | 1226 | if (of_property_read_bool(np, "dmacap,interrupt")) |
1304 | dma_cap_set(DMA_INTERRUPT, cap_mask); | 1227 | dma_cap_set(DMA_INTERRUPT, cap_mask); |
1305 | 1228 | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index c632a4761fcf..c619359cb7fe 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -31,7 +31,6 @@ | |||
31 | 31 | ||
32 | #define XOR_OPERATION_MODE_XOR 0 | 32 | #define XOR_OPERATION_MODE_XOR 0 |
33 | #define XOR_OPERATION_MODE_MEMCPY 2 | 33 | #define XOR_OPERATION_MODE_MEMCPY 2 |
34 | #define XOR_OPERATION_MODE_MEMSET 4 | ||
35 | 34 | ||
36 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) | 35 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) |
37 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) | 36 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a17553f7c028..7ec82f0667eb 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2485,10 +2485,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan) | |||
2485 | struct dma_pl330_chan *pch = to_pchan(chan); | 2485 | struct dma_pl330_chan *pch = to_pchan(chan); |
2486 | unsigned long flags; | 2486 | unsigned long flags; |
2487 | 2487 | ||
2488 | spin_lock_irqsave(&pch->lock, flags); | ||
2489 | |||
2490 | tasklet_kill(&pch->task); | 2488 | tasklet_kill(&pch->task); |
2491 | 2489 | ||
2490 | spin_lock_irqsave(&pch->lock, flags); | ||
2491 | |||
2492 | pl330_release_channel(pch->pl330_chid); | 2492 | pl330_release_channel(pch->pl330_chid); |
2493 | pch->pl330_chid = NULL; | 2493 | pch->pl330_chid = NULL; |
2494 | 2494 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5d3d95569a1e..1e220f8dfd8c 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -2323,47 +2323,6 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( | |||
2323 | } | 2323 | } |
2324 | 2324 | ||
2325 | /** | 2325 | /** |
2326 | * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation | ||
2327 | */ | ||
2328 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( | ||
2329 | struct dma_chan *chan, dma_addr_t dma_dest, int value, | ||
2330 | size_t len, unsigned long flags) | ||
2331 | { | ||
2332 | struct ppc440spe_adma_chan *ppc440spe_chan; | ||
2333 | struct ppc440spe_adma_desc_slot *sw_desc, *group_start; | ||
2334 | int slot_cnt, slots_per_op; | ||
2335 | |||
2336 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | ||
2337 | |||
2338 | if (unlikely(!len)) | ||
2339 | return NULL; | ||
2340 | |||
2341 | BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); | ||
2342 | |||
2343 | spin_lock_bh(&ppc440spe_chan->lock); | ||
2344 | |||
2345 | dev_dbg(ppc440spe_chan->device->common.dev, | ||
2346 | "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n", | ||
2347 | ppc440spe_chan->device->id, __func__, value, len, | ||
2348 | flags & DMA_PREP_INTERRUPT ? 1 : 0); | ||
2349 | |||
2350 | slot_cnt = slots_per_op = 1; | ||
2351 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, | ||
2352 | slots_per_op); | ||
2353 | if (sw_desc) { | ||
2354 | group_start = sw_desc->group_head; | ||
2355 | ppc440spe_desc_init_memset(group_start, value, flags); | ||
2356 | ppc440spe_adma_set_dest(group_start, dma_dest, 0); | ||
2357 | ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); | ||
2358 | sw_desc->unmap_len = len; | ||
2359 | sw_desc->async_tx.flags = flags; | ||
2360 | } | ||
2361 | spin_unlock_bh(&ppc440spe_chan->lock); | ||
2362 | |||
2363 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
2364 | } | ||
2365 | |||
2366 | /** | ||
2367 | * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation | 2326 | * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation |
2368 | */ | 2327 | */ |
2369 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( | 2328 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( |
@@ -4125,7 +4084,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
4125 | case PPC440SPE_DMA1_ID: | 4084 | case PPC440SPE_DMA1_ID: |
4126 | dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); | 4085 | dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); |
4127 | dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); | 4086 | dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); |
4128 | dma_cap_set(DMA_MEMSET, adev->common.cap_mask); | ||
4129 | dma_cap_set(DMA_PQ, adev->common.cap_mask); | 4087 | dma_cap_set(DMA_PQ, adev->common.cap_mask); |
4130 | dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); | 4088 | dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); |
4131 | dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); | 4089 | dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); |
@@ -4151,10 +4109,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
4151 | adev->common.device_prep_dma_memcpy = | 4109 | adev->common.device_prep_dma_memcpy = |
4152 | ppc440spe_adma_prep_dma_memcpy; | 4110 | ppc440spe_adma_prep_dma_memcpy; |
4153 | } | 4111 | } |
4154 | if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) { | ||
4155 | adev->common.device_prep_dma_memset = | ||
4156 | ppc440spe_adma_prep_dma_memset; | ||
4157 | } | ||
4158 | if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { | 4112 | if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { |
4159 | adev->common.max_xor = XOR_MAX_OPS; | 4113 | adev->common.max_xor = XOR_MAX_OPS; |
4160 | adev->common.device_prep_dma_xor = | 4114 | adev->common.device_prep_dma_xor = |
@@ -4217,7 +4171,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
4217 | dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", | 4171 | dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", |
4218 | dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", | 4172 | dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", |
4219 | dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", | 4173 | dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", |
4220 | dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "", | ||
4221 | dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); | 4174 | dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); |
4222 | } | 4175 | } |
4223 | 4176 | ||