aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig5
-rw-r--r--drivers/dma/coh901318.c9
-rw-r--r--drivers/dma/dmaengine.c14
-rw-r--r--drivers/dma/ioat/dma.c16
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/dma_v2.c12
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c49
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/sa11x0-dma.c2
10 files changed, 84 insertions, 37 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cf9da362d64f..ef378b5b17e4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -91,11 +91,10 @@ config DW_DMAC
91 91
92config AT_HDMAC 92config AT_HDMAC
93 tristate "Atmel AHB DMA support" 93 tristate "Atmel AHB DMA support"
94 depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 94 depends on ARCH_AT91
95 select DMA_ENGINE 95 select DMA_ENGINE
96 help 96 help
97 Support the Atmel AHB DMA controller. This can be integrated in 97 Support the Atmel AHB DMA controller.
98 chips such as the Atmel AT91SAM9RL.
99 98
100config FSL_DMA 99config FSL_DMA
101 tristate "Freescale Elo and Elo Plus DMA support" 100 tristate "Freescale Elo and Elo Plus DMA support"
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index c0b650c70bbd..e67b4e06a918 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -104,13 +104,6 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
104static struct coh901318_base *debugfs_dma_base; 104static struct coh901318_base *debugfs_dma_base;
105static struct dentry *dma_dentry; 105static struct dentry *dma_dentry;
106 106
107static int coh901318_debugfs_open(struct inode *inode, struct file *file)
108{
109
110 file->private_data = inode->i_private;
111 return 0;
112}
113
114static int coh901318_debugfs_read(struct file *file, char __user *buf, 107static int coh901318_debugfs_read(struct file *file, char __user *buf,
115 size_t count, loff_t *f_pos) 108 size_t count, loff_t *f_pos)
116{ 109{
@@ -158,7 +151,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
158 151
159static const struct file_operations coh901318_debugfs_status_operations = { 152static const struct file_operations coh901318_debugfs_status_operations = {
160 .owner = THIS_MODULE, 153 .owner = THIS_MODULE,
161 .open = coh901318_debugfs_open, 154 .open = simple_open,
162 .read = coh901318_debugfs_read, 155 .read = coh901318_debugfs_read,
163 .llseek = default_llseek, 156 .llseek = default_llseek,
164}; 157};
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc31b365..2397f6f451b1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
332} 332}
333EXPORT_SYMBOL(dma_find_channel); 333EXPORT_SYMBOL(dma_find_channel);
334 334
335/*
336 * net_dma_find_channel - find a channel for net_dma
337 * net_dma has alignment requirements
338 */
339struct dma_chan *net_dma_find_channel(void)
340{
341 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
342 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
343 return NULL;
344
345 return chan;
346}
347EXPORT_SYMBOL(net_dma_find_channel);
348
335/** 349/**
336 * dma_issue_pending_all - flush all pending operations across all channels 350 * dma_issue_pending_all - flush all pending operations across all channels
337 */ 351 */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d80e0e9..73b2b65cb1de 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
546 PCI_DMA_TODEVICE, flags, 0); 546 PCI_DMA_TODEVICE, flags, 0);
547} 547}
548 548
549unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) 549dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
550{ 550{
551 unsigned long phys_complete; 551 dma_addr_t phys_complete;
552 u64 completion; 552 u64 completion;
553 553
554 completion = *chan->completion; 554 completion = *chan->completion;
@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
569} 569}
570 570
571bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 571bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
572 unsigned long *phys_complete) 572 dma_addr_t *phys_complete)
573{ 573{
574 *phys_complete = ioat_get_current_completion(chan); 574 *phys_complete = ioat_get_current_completion(chan);
575 if (*phys_complete == chan->last_completion) 575 if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
580 return true; 580 return true;
581} 581}
582 582
583static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) 583static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
584{ 584{
585 struct ioat_chan_common *chan = &ioat->base; 585 struct ioat_chan_common *chan = &ioat->base;
586 struct list_head *_desc, *n; 586 struct list_head *_desc, *n;
587 struct dma_async_tx_descriptor *tx; 587 struct dma_async_tx_descriptor *tx;
588 588
589 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", 589 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
590 __func__, phys_complete); 590 __func__, (unsigned long long) phys_complete);
591 list_for_each_safe(_desc, n, &ioat->used_desc) { 591 list_for_each_safe(_desc, n, &ioat->used_desc) {
592 struct ioat_desc_sw *desc; 592 struct ioat_desc_sw *desc;
593 593
@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
652static void ioat1_cleanup(struct ioat_dma_chan *ioat) 652static void ioat1_cleanup(struct ioat_dma_chan *ioat)
653{ 653{
654 struct ioat_chan_common *chan = &ioat->base; 654 struct ioat_chan_common *chan = &ioat->base;
655 unsigned long phys_complete; 655 dma_addr_t phys_complete;
656 656
657 prefetch(chan->completion); 657 prefetch(chan->completion);
658 658
@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data)
698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
699 spin_unlock_bh(&ioat->desc_lock); 699 spin_unlock_bh(&ioat->desc_lock);
700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
701 unsigned long phys_complete; 701 dma_addr_t phys_complete;
702 702
703 spin_lock_bh(&ioat->desc_lock); 703 spin_lock_bh(&ioat->desc_lock);
704 /* if we haven't made progress and we have already 704 /* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bccd974..5e8fe01ba69d 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@ struct ioatdma_device {
88struct ioat_chan_common { 88struct ioat_chan_common {
89 struct dma_chan common; 89 struct dma_chan common;
90 void __iomem *reg_base; 90 void __iomem *reg_base;
91 unsigned long last_completion; 91 dma_addr_t last_completion;
92 spinlock_t cleanup_lock; 92 spinlock_t cleanup_lock;
93 unsigned long state; 93 unsigned long state;
94 #define IOAT_COMPLETION_PENDING 0 94 #define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
310void __devexit ioat_dma_remove(struct ioatdma_device *device); 310void __devexit ioat_dma_remove(struct ioatdma_device *device);
311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, 311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
312 void __iomem *iobase); 312 void __iomem *iobase);
313unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 313dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
314void ioat_init_channel(struct ioatdma_device *device, 314void ioat_init_channel(struct ioatdma_device *device,
315 struct ioat_chan_common *chan, int idx); 315 struct ioat_chan_common *chan, int idx);
316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
319 size_t len, struct ioat_dma_descriptor *hw); 319 size_t len, struct ioat_dma_descriptor *hw);
320bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 320bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
321 unsigned long *phys_complete); 321 dma_addr_t *phys_complete);
322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
323void ioat_kobject_del(struct ioatdma_device *device); 323void ioat_kobject_del(struct ioatdma_device *device);
324extern const struct sysfs_ops ioat_sysfs_ops; 324extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110ff3d96..86895760b598 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
128 spin_unlock_bh(&ioat->prep_lock); 128 spin_unlock_bh(&ioat->prep_lock);
129} 129}
130 130
131static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 131static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
132{ 132{
133 struct ioat_chan_common *chan = &ioat->base; 133 struct ioat_chan_common *chan = &ioat->base;
134 struct dma_async_tx_descriptor *tx; 134 struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
179static void ioat2_cleanup(struct ioat2_dma_chan *ioat) 179static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180{ 180{
181 struct ioat_chan_common *chan = &ioat->base; 181 struct ioat_chan_common *chan = &ioat->base;
182 unsigned long phys_complete; 182 dma_addr_t phys_complete;
183 183
184 spin_lock_bh(&chan->cleanup_lock); 184 spin_lock_bh(&chan->cleanup_lock);
185 if (ioat_cleanup_preamble(chan, &phys_complete)) 185 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 unsigned long phys_complete; 263 dma_addr_t phys_complete;
264 264
265 ioat2_quiesce(chan, 0); 265 ioat2_quiesce(chan, 0);
266 if (ioat_cleanup_preamble(chan, &phys_complete)) 266 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data)
275 struct ioat_chan_common *chan = &ioat->base; 275 struct ioat_chan_common *chan = &ioat->base;
276 276
277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
278 unsigned long phys_complete; 278 dma_addr_t phys_complete;
279 u64 status; 279 u64 status;
280 280
281 status = ioat_chansts(chan); 281 status = ioat_chansts(chan);
@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
572 */ 572 */
573 struct ioat_chan_common *chan = &ioat->base; 573 struct ioat_chan_common *chan = &ioat->base;
574 struct dma_chan *c = &chan->common; 574 struct dma_chan *c = &chan->common;
575 const u16 curr_size = ioat2_ring_size(ioat); 575 const u32 curr_size = ioat2_ring_size(ioat);
576 const u16 active = ioat2_ring_active(ioat); 576 const u16 active = ioat2_ring_active(ioat);
577 const u16 new_size = 1 << order; 577 const u32 new_size = 1 << order;
578 struct ioat_ring_ent **ring; 578 struct ioat_ring_ent **ring;
579 u16 i; 579 u16 i;
580 580
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b2b8d8..be2a55b95c23 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
74 return container_of(chan, struct ioat2_dma_chan, base); 74 return container_of(chan, struct ioat2_dma_chan, base);
75} 75}
76 76
77static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) 77static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
78{ 78{
79 return 1 << ioat->alloc_order; 79 return 1 << ioat->alloc_order;
80} 80}
@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); 91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
92} 92}
93 93
94static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) 94static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
95{ 95{
96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); 96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
97} 97}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c0e405..f7f1dc62c15c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
257 * The difference from the dma_v2.c __cleanup() is that this routine 257 * The difference from the dma_v2.c __cleanup() is that this routine
258 * handles extended descriptors and dma-unmapping raid operations. 258 * handles extended descriptors and dma-unmapping raid operations.
259 */ 259 */
260static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 260static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 struct ioat_ring_ent *desc; 263 struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
314static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 314static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
315{ 315{
316 struct ioat_chan_common *chan = &ioat->base; 316 struct ioat_chan_common *chan = &ioat->base;
317 unsigned long phys_complete; 317 dma_addr_t phys_complete;
318 318
319 spin_lock_bh(&chan->cleanup_lock); 319 spin_lock_bh(&chan->cleanup_lock);
320 if (ioat_cleanup_preamble(chan, &phys_complete)) 320 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
334{ 334{
335 struct ioat_chan_common *chan = &ioat->base; 335 struct ioat_chan_common *chan = &ioat->base;
336 unsigned long phys_complete; 336 dma_addr_t phys_complete;
337 337
338 ioat2_quiesce(chan, 0); 338 ioat2_quiesce(chan, 0);
339 if (ioat_cleanup_preamble(chan, &phys_complete)) 339 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
348 struct ioat_chan_common *chan = &ioat->base; 348 struct ioat_chan_common *chan = &ioat->base;
349 349
350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
351 unsigned long phys_complete; 351 dma_addr_t phys_complete;
352 u64 status; 352 u64 status;
353 353
354 status = ioat_chansts(chan); 354 status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1150} 1150}
1151 1151
1152static bool is_jf_ioat(struct pci_dev *pdev)
1153{
1154 switch (pdev->device) {
1155 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
1156 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
1157 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
1158 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
1159 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
1160 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
1161 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
1162 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
1163 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
1164 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
1165 return true;
1166 default:
1167 return false;
1168 }
1169}
1170
1171static bool is_snb_ioat(struct pci_dev *pdev)
1172{
1173 switch (pdev->device) {
1174 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
1175 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
1176 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
1177 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
1178 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
1179 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
1180 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
1181 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
1182 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
1183 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
1184 return true;
1185 default:
1186 return false;
1187 }
1188}
1189
1152int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1190int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1153{ 1191{
1154 struct pci_dev *pdev = device->pdev; 1192 struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1169 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1207 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1170 dma->device_free_chan_resources = ioat2_free_chan_resources; 1208 dma->device_free_chan_resources = ioat2_free_chan_resources;
1171 1209
1210 if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
1211 dma->copy_align = 6;
1212
1172 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1213 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1173 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1214 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1174 1215
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2c066a..79e3eba29702 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; 1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1253 /* address conversion buffers (dma_map / page_address) */ 1253 /* address conversion buffers (dma_map / page_address) */
1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; 1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; 1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1256 dma_addr_t pq_dest[2]; 1256 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1257 1257
1258 int i; 1258 int i;
1259 struct dma_async_tx_descriptor *tx; 1259 struct dma_async_tx_descriptor *tx;
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 16a6b48883cf..ec78ccef9132 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -585,7 +585,7 @@ static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
585 585
586static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( 586static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
587 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, 587 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
588 enum dma_transfer_direction dir, unsigned long flags) 588 enum dma_transfer_direction dir, unsigned long flags, void *context)
589{ 589{
590 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 590 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
591 struct sa11x0_dma_desc *txd; 591 struct sa11x0_dma_desc *txd;