diff options
author | Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 2013-10-18 13:35:32 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 14:04:38 -0500 |
commit | 54f8d501e842879143e867e70996574a54d1e130 (patch) | |
tree | 1fcd65a5152d330167f5eefba5cc5d514ec91da1 /drivers/dma/ioat | |
parent | 6f57fd0578dff23a4bd16118f0cb4201bcec91f1 (diff) |
dmaengine: remove DMA unmap from drivers
Remove support for DMA unmapping from drivers as it is no longer
needed (DMA core code is now handling it).
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
[djbw: fix up chan2parent() unused warning in drivers/dma/dw/core.c]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dma.c | 16 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 12 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 166 |
4 files changed, 0 insertions, 195 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 26f8cfd6bc3f..c123e32dbbb0 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) | |||
531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | 531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
532 | } | 532 | } |
533 | 533 | ||
534 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
535 | size_t len, struct ioat_dma_descriptor *hw) | ||
536 | { | ||
537 | struct pci_dev *pdev = chan->device->pdev; | ||
538 | size_t offset = len - hw->size; | ||
539 | |||
540 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
541 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
542 | PCI_DMA_FROMDEVICE, flags, 1); | ||
543 | |||
544 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
545 | ioat_unmap(pdev, hw->src_addr - offset, len, | ||
546 | PCI_DMA_TODEVICE, flags, 0); | ||
547 | } | ||
548 | |||
549 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) | 534 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
550 | { | 535 | { |
551 | dma_addr_t phys_complete; | 536 | dma_addr_t phys_complete; |
@@ -603,7 +588,6 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | |||
603 | if (tx->cookie) { | 588 | if (tx->cookie) { |
604 | dma_cookie_complete(tx); | 589 | dma_cookie_complete(tx); |
605 | dma_descriptor_unmap(tx); | 590 | dma_descriptor_unmap(tx); |
606 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
607 | ioat->active -= desc->hw->tx_cnt; | 591 | ioat->active -= desc->hw->tx_cnt; |
608 | if (tx->callback) { | 592 | if (tx->callback) { |
609 | tx->callback(tx->callback_param); | 593 | tx->callback(tx->callback_param); |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9aa..4300d5af188f 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -342,16 +342,6 @@ static inline bool is_ioat_bug(unsigned long err) | |||
342 | return !!err; | 342 | return !!err; |
343 | } | 343 | } |
344 | 344 | ||
345 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | ||
346 | int direction, enum dma_ctrl_flags flags, bool dst) | ||
347 | { | ||
348 | if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || | ||
349 | (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) | ||
350 | pci_unmap_single(pdev, addr, len, direction); | ||
351 | else | ||
352 | pci_unmap_page(pdev, addr, len, direction); | ||
353 | } | ||
354 | |||
355 | int ioat_probe(struct ioatdma_device *device); | 345 | int ioat_probe(struct ioatdma_device *device); |
356 | int ioat_register(struct ioatdma_device *device); | 346 | int ioat_register(struct ioatdma_device *device); |
357 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); | 347 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); |
@@ -363,8 +353,6 @@ void ioat_init_channel(struct ioatdma_device *device, | |||
363 | struct ioat_chan_common *chan, int idx); | 353 | struct ioat_chan_common *chan, int idx); |
364 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 354 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
365 | struct dma_tx_state *txstate); | 355 | struct dma_tx_state *txstate); |
366 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
367 | size_t len, struct ioat_dma_descriptor *hw); | ||
368 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 356 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
369 | dma_addr_t *phys_complete); | 357 | dma_addr_t *phys_complete); |
370 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 358 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index fc7b50a813cc..5d3affe7e976 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -149,7 +149,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
149 | dump_desc_dbg(ioat, desc); | 149 | dump_desc_dbg(ioat, desc); |
150 | if (tx->cookie) { | 150 | if (tx->cookie) { |
151 | dma_descriptor_unmap(tx); | 151 | dma_descriptor_unmap(tx); |
152 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
153 | dma_cookie_complete(tx); | 152 | dma_cookie_complete(tx); |
154 | if (tx->callback) { | 153 | if (tx->callback) { |
155 | tx->callback(tx->callback_param); | 154 | tx->callback(tx->callback_param); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 57a2901b917a..43386c171bba 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -96,13 +96,6 @@ static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, | |||
96 | 96 | ||
97 | static void ioat3_eh(struct ioat2_dma_chan *ioat); | 97 | static void ioat3_eh(struct ioat2_dma_chan *ioat); |
98 | 98 | ||
99 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
100 | { | ||
101 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
102 | |||
103 | return raw->field[xor_idx_to_field[idx]]; | ||
104 | } | ||
105 | |||
106 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | 99 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], |
107 | dma_addr_t addr, u32 offset, int idx) | 100 | dma_addr_t addr, u32 offset, int idx) |
108 | { | 101 | { |
@@ -296,164 +289,6 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s | |||
296 | kmem_cache_free(device->sed_pool, sed); | 289 | kmem_cache_free(device->sed_pool, sed); |
297 | } | 290 | } |
298 | 291 | ||
299 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | ||
300 | struct ioat_ring_ent *desc, int idx) | ||
301 | { | ||
302 | struct ioat_chan_common *chan = &ioat->base; | ||
303 | struct pci_dev *pdev = chan->device->pdev; | ||
304 | size_t len = desc->len; | ||
305 | size_t offset = len - desc->hw->size; | ||
306 | struct dma_async_tx_descriptor *tx = &desc->txd; | ||
307 | enum dma_ctrl_flags flags = tx->flags; | ||
308 | |||
309 | switch (desc->hw->ctl_f.op) { | ||
310 | case IOAT_OP_COPY: | ||
311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ | ||
312 | ioat_dma_unmap(chan, flags, len, desc->hw); | ||
313 | break; | ||
314 | case IOAT_OP_XOR_VAL: | ||
315 | case IOAT_OP_XOR: { | ||
316 | struct ioat_xor_descriptor *xor = desc->xor; | ||
317 | struct ioat_ring_ent *ext; | ||
318 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
319 | int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); | ||
320 | struct ioat_raw_descriptor *descs[2]; | ||
321 | int i; | ||
322 | |||
323 | if (src_cnt > 5) { | ||
324 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
325 | xor_ex = ext->xor_ex; | ||
326 | } | ||
327 | |||
328 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
329 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
330 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
331 | for (i = 0; i < src_cnt; i++) { | ||
332 | dma_addr_t src = xor_get_src(descs, i); | ||
333 | |||
334 | ioat_unmap(pdev, src - offset, len, | ||
335 | PCI_DMA_TODEVICE, flags, 0); | ||
336 | } | ||
337 | |||
338 | /* dest is a source in xor validate operations */ | ||
339 | if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
340 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
341 | PCI_DMA_TODEVICE, flags, 1); | ||
342 | break; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
347 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
348 | PCI_DMA_FROMDEVICE, flags, 1); | ||
349 | break; | ||
350 | } | ||
351 | case IOAT_OP_PQ_VAL: | ||
352 | case IOAT_OP_PQ: { | ||
353 | struct ioat_pq_descriptor *pq = desc->pq; | ||
354 | struct ioat_ring_ent *ext; | ||
355 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
356 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
357 | struct ioat_raw_descriptor *descs[2]; | ||
358 | int i; | ||
359 | |||
360 | if (src_cnt > 3) { | ||
361 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
362 | pq_ex = ext->pq_ex; | ||
363 | } | ||
364 | |||
365 | /* in the 'continue' case don't unmap the dests as sources */ | ||
366 | if (dmaf_p_disabled_continue(flags)) | ||
367 | src_cnt--; | ||
368 | else if (dmaf_continue(flags)) | ||
369 | src_cnt -= 3; | ||
370 | |||
371 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
372 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
373 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
374 | for (i = 0; i < src_cnt; i++) { | ||
375 | dma_addr_t src = pq_get_src(descs, i); | ||
376 | |||
377 | ioat_unmap(pdev, src - offset, len, | ||
378 | PCI_DMA_TODEVICE, flags, 0); | ||
379 | } | ||
380 | |||
381 | /* the dests are sources in pq validate operations */ | ||
382 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
383 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
384 | ioat_unmap(pdev, pq->p_addr - offset, | ||
385 | len, PCI_DMA_TODEVICE, flags, 0); | ||
386 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
387 | ioat_unmap(pdev, pq->q_addr - offset, | ||
388 | len, PCI_DMA_TODEVICE, flags, 0); | ||
389 | break; | ||
390 | } | ||
391 | } | ||
392 | |||
393 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
394 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
395 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
396 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
397 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
398 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
399 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
400 | } | ||
401 | break; | ||
402 | } | ||
403 | case IOAT_OP_PQ_16S: | ||
404 | case IOAT_OP_PQ_VAL_16S: { | ||
405 | struct ioat_pq_descriptor *pq = desc->pq; | ||
406 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
407 | struct ioat_raw_descriptor *descs[4]; | ||
408 | int i; | ||
409 | |||
410 | /* in the 'continue' case don't unmap the dests as sources */ | ||
411 | if (dmaf_p_disabled_continue(flags)) | ||
412 | src_cnt--; | ||
413 | else if (dmaf_continue(flags)) | ||
414 | src_cnt -= 3; | ||
415 | |||
416 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
417 | descs[0] = (struct ioat_raw_descriptor *)pq; | ||
418 | descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); | ||
419 | descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); | ||
420 | for (i = 0; i < src_cnt; i++) { | ||
421 | dma_addr_t src = pq16_get_src(descs, i); | ||
422 | |||
423 | ioat_unmap(pdev, src - offset, len, | ||
424 | PCI_DMA_TODEVICE, flags, 0); | ||
425 | } | ||
426 | |||
427 | /* the dests are sources in pq validate operations */ | ||
428 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
429 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
430 | ioat_unmap(pdev, pq->p_addr - offset, | ||
431 | len, PCI_DMA_TODEVICE, | ||
432 | flags, 0); | ||
433 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
434 | ioat_unmap(pdev, pq->q_addr - offset, | ||
435 | len, PCI_DMA_TODEVICE, | ||
436 | flags, 0); | ||
437 | break; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
442 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
443 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
444 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
445 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
446 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
447 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
448 | } | ||
449 | break; | ||
450 | } | ||
451 | default: | ||
452 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | ||
453 | __func__, desc->hw->ctl_f.op); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | static bool desc_has_ext(struct ioat_ring_ent *desc) | 292 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
458 | { | 293 | { |
459 | struct ioat_dma_descriptor *hw = desc->hw; | 294 | struct ioat_dma_descriptor *hw = desc->hw; |
@@ -578,7 +413,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
578 | if (tx->cookie) { | 413 | if (tx->cookie) { |
579 | dma_cookie_complete(tx); | 414 | dma_cookie_complete(tx); |
580 | dma_descriptor_unmap(tx); | 415 | dma_descriptor_unmap(tx); |
581 | ioat3_dma_unmap(ioat, desc, idx + i); | ||
582 | if (tx->callback) { | 416 | if (tx->callback) { |
583 | tx->callback(tx->callback_param); | 417 | tx->callback(tx->callback_param); |
584 | tx->callback = NULL; | 418 | tx->callback = NULL; |