aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-04-10 18:30:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-04-10 18:30:16 -0400
commit94fb175c0414902ad9dbd956addf3a5feafbc85b (patch)
tree5d3c37abe78f072e92072f2079a98303c92cf16e
parenta9e1e53bcfb29b3b503a5e75ce498d9a64f32c1e (diff)
parenta2bd1140a264b561e38d99e656cd843c2d840e86 (diff)
Merge tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine fixes from Dan Williams: 1/ regression fix for Xen as it now trips over a broken assumption about the dma address size on 32-bit builds 2/ new quirk for netdma to ignore dma channels that cannot meet netdma alignment requirements 3/ fixes for two long standing issues in ioatdma (ring size overflow) and iop-adma (potential stack corruption) * tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: netdma: adding alignment check for NETDMA ops ioatdma: DMA copy alignment needed to address IOAT DMA silicon errata ioat: ring size variables need to be 32bit to avoid overflow iop-adma: Corrected array overflow in RAID6 Xscale(R) test. ioat: fix size of 'completion' for Xen
-rw-r--r--drivers/dma/dmaengine.c14
-rw-r--r--drivers/dma/ioat/dma.c16
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/dma_v2.c12
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c49
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--include/linux/dmaengine.h1
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
12 files changed, 86 insertions, 30 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc31b365..2397f6f451b1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
332} 332}
333EXPORT_SYMBOL(dma_find_channel); 333EXPORT_SYMBOL(dma_find_channel);
334 334
335/*
336 * net_dma_find_channel - find a channel for net_dma
337 * net_dma has alignment requirements
338 */
339struct dma_chan *net_dma_find_channel(void)
340{
341 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
342 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
343 return NULL;
344
345 return chan;
346}
347EXPORT_SYMBOL(net_dma_find_channel);
348
335/** 349/**
336 * dma_issue_pending_all - flush all pending operations across all channels 350 * dma_issue_pending_all - flush all pending operations across all channels
337 */ 351 */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d80e0e9..73b2b65cb1de 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
546 PCI_DMA_TODEVICE, flags, 0); 546 PCI_DMA_TODEVICE, flags, 0);
547} 547}
548 548
549unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) 549dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
550{ 550{
551 unsigned long phys_complete; 551 dma_addr_t phys_complete;
552 u64 completion; 552 u64 completion;
553 553
554 completion = *chan->completion; 554 completion = *chan->completion;
@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
569} 569}
570 570
571bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 571bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
572 unsigned long *phys_complete) 572 dma_addr_t *phys_complete)
573{ 573{
574 *phys_complete = ioat_get_current_completion(chan); 574 *phys_complete = ioat_get_current_completion(chan);
575 if (*phys_complete == chan->last_completion) 575 if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
580 return true; 580 return true;
581} 581}
582 582
583static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) 583static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
584{ 584{
585 struct ioat_chan_common *chan = &ioat->base; 585 struct ioat_chan_common *chan = &ioat->base;
586 struct list_head *_desc, *n; 586 struct list_head *_desc, *n;
587 struct dma_async_tx_descriptor *tx; 587 struct dma_async_tx_descriptor *tx;
588 588
589 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", 589 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
590 __func__, phys_complete); 590 __func__, (unsigned long long) phys_complete);
591 list_for_each_safe(_desc, n, &ioat->used_desc) { 591 list_for_each_safe(_desc, n, &ioat->used_desc) {
592 struct ioat_desc_sw *desc; 592 struct ioat_desc_sw *desc;
593 593
@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
652static void ioat1_cleanup(struct ioat_dma_chan *ioat) 652static void ioat1_cleanup(struct ioat_dma_chan *ioat)
653{ 653{
654 struct ioat_chan_common *chan = &ioat->base; 654 struct ioat_chan_common *chan = &ioat->base;
655 unsigned long phys_complete; 655 dma_addr_t phys_complete;
656 656
657 prefetch(chan->completion); 657 prefetch(chan->completion);
658 658
@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data)
698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
699 spin_unlock_bh(&ioat->desc_lock); 699 spin_unlock_bh(&ioat->desc_lock);
700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
701 unsigned long phys_complete; 701 dma_addr_t phys_complete;
702 702
703 spin_lock_bh(&ioat->desc_lock); 703 spin_lock_bh(&ioat->desc_lock);
704 /* if we haven't made progress and we have already 704 /* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bccd974..5e8fe01ba69d 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@ struct ioatdma_device {
88struct ioat_chan_common { 88struct ioat_chan_common {
89 struct dma_chan common; 89 struct dma_chan common;
90 void __iomem *reg_base; 90 void __iomem *reg_base;
91 unsigned long last_completion; 91 dma_addr_t last_completion;
92 spinlock_t cleanup_lock; 92 spinlock_t cleanup_lock;
93 unsigned long state; 93 unsigned long state;
94 #define IOAT_COMPLETION_PENDING 0 94 #define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
310void __devexit ioat_dma_remove(struct ioatdma_device *device); 310void __devexit ioat_dma_remove(struct ioatdma_device *device);
311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, 311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
312 void __iomem *iobase); 312 void __iomem *iobase);
313unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 313dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
314void ioat_init_channel(struct ioatdma_device *device, 314void ioat_init_channel(struct ioatdma_device *device,
315 struct ioat_chan_common *chan, int idx); 315 struct ioat_chan_common *chan, int idx);
316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
319 size_t len, struct ioat_dma_descriptor *hw); 319 size_t len, struct ioat_dma_descriptor *hw);
320bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 320bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
321 unsigned long *phys_complete); 321 dma_addr_t *phys_complete);
322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
323void ioat_kobject_del(struct ioatdma_device *device); 323void ioat_kobject_del(struct ioatdma_device *device);
324extern const struct sysfs_ops ioat_sysfs_ops; 324extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110ff3d96..86895760b598 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
128 spin_unlock_bh(&ioat->prep_lock); 128 spin_unlock_bh(&ioat->prep_lock);
129} 129}
130 130
131static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 131static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
132{ 132{
133 struct ioat_chan_common *chan = &ioat->base; 133 struct ioat_chan_common *chan = &ioat->base;
134 struct dma_async_tx_descriptor *tx; 134 struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
179static void ioat2_cleanup(struct ioat2_dma_chan *ioat) 179static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180{ 180{
181 struct ioat_chan_common *chan = &ioat->base; 181 struct ioat_chan_common *chan = &ioat->base;
182 unsigned long phys_complete; 182 dma_addr_t phys_complete;
183 183
184 spin_lock_bh(&chan->cleanup_lock); 184 spin_lock_bh(&chan->cleanup_lock);
185 if (ioat_cleanup_preamble(chan, &phys_complete)) 185 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 unsigned long phys_complete; 263 dma_addr_t phys_complete;
264 264
265 ioat2_quiesce(chan, 0); 265 ioat2_quiesce(chan, 0);
266 if (ioat_cleanup_preamble(chan, &phys_complete)) 266 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data)
275 struct ioat_chan_common *chan = &ioat->base; 275 struct ioat_chan_common *chan = &ioat->base;
276 276
277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
278 unsigned long phys_complete; 278 dma_addr_t phys_complete;
279 u64 status; 279 u64 status;
280 280
281 status = ioat_chansts(chan); 281 status = ioat_chansts(chan);
@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
572 */ 572 */
573 struct ioat_chan_common *chan = &ioat->base; 573 struct ioat_chan_common *chan = &ioat->base;
574 struct dma_chan *c = &chan->common; 574 struct dma_chan *c = &chan->common;
575 const u16 curr_size = ioat2_ring_size(ioat); 575 const u32 curr_size = ioat2_ring_size(ioat);
576 const u16 active = ioat2_ring_active(ioat); 576 const u16 active = ioat2_ring_active(ioat);
577 const u16 new_size = 1 << order; 577 const u32 new_size = 1 << order;
578 struct ioat_ring_ent **ring; 578 struct ioat_ring_ent **ring;
579 u16 i; 579 u16 i;
580 580
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b2b8d8..be2a55b95c23 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
74 return container_of(chan, struct ioat2_dma_chan, base); 74 return container_of(chan, struct ioat2_dma_chan, base);
75} 75}
76 76
77static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) 77static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
78{ 78{
79 return 1 << ioat->alloc_order; 79 return 1 << ioat->alloc_order;
80} 80}
@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); 91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
92} 92}
93 93
94static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) 94static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
95{ 95{
96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); 96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
97} 97}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c0e405..f7f1dc62c15c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
257 * The difference from the dma_v2.c __cleanup() is that this routine 257 * The difference from the dma_v2.c __cleanup() is that this routine
258 * handles extended descriptors and dma-unmapping raid operations. 258 * handles extended descriptors and dma-unmapping raid operations.
259 */ 259 */
260static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 260static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 struct ioat_ring_ent *desc; 263 struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
314static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 314static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
315{ 315{
316 struct ioat_chan_common *chan = &ioat->base; 316 struct ioat_chan_common *chan = &ioat->base;
317 unsigned long phys_complete; 317 dma_addr_t phys_complete;
318 318
319 spin_lock_bh(&chan->cleanup_lock); 319 spin_lock_bh(&chan->cleanup_lock);
320 if (ioat_cleanup_preamble(chan, &phys_complete)) 320 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
334{ 334{
335 struct ioat_chan_common *chan = &ioat->base; 335 struct ioat_chan_common *chan = &ioat->base;
336 unsigned long phys_complete; 336 dma_addr_t phys_complete;
337 337
338 ioat2_quiesce(chan, 0); 338 ioat2_quiesce(chan, 0);
339 if (ioat_cleanup_preamble(chan, &phys_complete)) 339 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
348 struct ioat_chan_common *chan = &ioat->base; 348 struct ioat_chan_common *chan = &ioat->base;
349 349
350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
351 unsigned long phys_complete; 351 dma_addr_t phys_complete;
352 u64 status; 352 u64 status;
353 353
354 status = ioat_chansts(chan); 354 status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1150} 1150}
1151 1151
1152static bool is_jf_ioat(struct pci_dev *pdev)
1153{
1154 switch (pdev->device) {
1155 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
1156 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
1157 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
1158 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
1159 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
1160 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
1161 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
1162 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
1163 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
1164 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
1165 return true;
1166 default:
1167 return false;
1168 }
1169}
1170
1171static bool is_snb_ioat(struct pci_dev *pdev)
1172{
1173 switch (pdev->device) {
1174 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
1175 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
1176 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
1177 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
1178 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
1179 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
1180 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
1181 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
1182 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
1183 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
1184 return true;
1185 default:
1186 return false;
1187 }
1188}
1189
1152int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1190int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1153{ 1191{
1154 struct pci_dev *pdev = device->pdev; 1192 struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1169 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1207 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1170 dma->device_free_chan_resources = ioat2_free_chan_resources; 1208 dma->device_free_chan_resources = ioat2_free_chan_resources;
1171 1209
1210 if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
1211 dma->copy_align = 6;
1212
1172 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1213 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1173 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1214 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1174 1215
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2c066a..79e3eba29702 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; 1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1253 /* address conversion buffers (dma_map / page_address) */ 1253 /* address conversion buffers (dma_map / page_address) */
1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; 1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; 1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1256 dma_addr_t pq_dest[2]; 1256 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1257 1257
1258 int i; 1258 int i;
1259 struct dma_async_tx_descriptor *tx; 1259 struct dma_async_tx_descriptor *tx;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 676f967390ae..f9a2e5e67a54 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -974,6 +974,7 @@ int dma_async_device_register(struct dma_device *device);
974void dma_async_device_unregister(struct dma_device *device); 974void dma_async_device_unregister(struct dma_device *device);
975void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 975void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
976struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 976struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
977struct dma_chan *net_dma_find_channel(void);
977#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 978#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
978 979
979/* --- Helper iov-locking functions --- */ 980/* --- Helper iov-locking functions --- */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d54ed30e821..0cd36e33273b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1452,7 +1452,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1452 if ((available < target) && 1452 if ((available < target) &&
1453 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1453 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1454 !sysctl_tcp_low_latency && 1454 !sysctl_tcp_low_latency &&
1455 dma_find_channel(DMA_MEMCPY)) { 1455 net_dma_find_channel()) {
1456 preempt_enable_no_resched(); 1456 preempt_enable_no_resched();
1457 tp->ucopy.pinned_list = 1457 tp->ucopy.pinned_list =
1458 dma_pin_iovec_pages(msg->msg_iov, len); 1458 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1667,7 +1667,7 @@ do_prequeue:
1667 if (!(flags & MSG_TRUNC)) { 1667 if (!(flags & MSG_TRUNC)) {
1668#ifdef CONFIG_NET_DMA 1668#ifdef CONFIG_NET_DMA
1669 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1669 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1670 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1670 tp->ucopy.dma_chan = net_dma_find_channel();
1671 1671
1672 if (tp->ucopy.dma_chan) { 1672 if (tp->ucopy.dma_chan) {
1673 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1673 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e886e2f7fa8d..05b2dd569691 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5225,7 +5225,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5225 return 0; 5225 return 0;
5226 5226
5227 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5227 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5228 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 5228 tp->ucopy.dma_chan = net_dma_find_channel();
5229 5229
5230 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5230 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5231 5231
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3a25cf743f8b..0cb86ceb652f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1730,7 +1730,7 @@ process:
1730#ifdef CONFIG_NET_DMA 1730#ifdef CONFIG_NET_DMA
1731 struct tcp_sock *tp = tcp_sk(sk); 1731 struct tcp_sock *tp = tcp_sk(sk);
1732 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1732 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1733 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1733 tp->ucopy.dma_chan = net_dma_find_channel();
1734 if (tp->ucopy.dma_chan) 1734 if (tp->ucopy.dma_chan)
1735 ret = tcp_v4_do_rcv(sk, skb); 1735 ret = tcp_v4_do_rcv(sk, skb);
1736 else 1736 else
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12c6ece67f39..86cfe6005f40 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1645,7 +1645,7 @@ process:
1645#ifdef CONFIG_NET_DMA 1645#ifdef CONFIG_NET_DMA
1646 struct tcp_sock *tp = tcp_sk(sk); 1646 struct tcp_sock *tp = tcp_sk(sk);
1647 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1647 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1648 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1648 tp->ucopy.dma_chan = net_dma_find_channel();
1649 if (tp->ucopy.dma_chan) 1649 if (tp->ucopy.dma_chan)
1650 ret = tcp_v6_do_rcv(sk, skb); 1650 ret = tcp_v6_do_rcv(sk, skb);
1651 else 1651 else