diff options
author | Dave Jiang <dave.jiang@intel.com> | 2015-08-11 11:48:55 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-08-17 04:07:30 -0400 |
commit | 3372de5813e4da8305002ff6ffbfc0c7012cb319 (patch) | |
tree | 87e4b66fbfeb47b6ab22a4a993f86b19be27ddb3 | |
parent | 599d49de7f69cb5a23e913db24e168ba2f09bd05 (diff) |
dmaengine: ioatdma: removal of dma_v3.c and relevant ioat3 references
Moving the relevant functions to their respective .c files and removal of
dma_v3.c file. Also removed various ioat3 references when appropriate.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dca.c | 22 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 525 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 11 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 525 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 19 |
6 files changed, 487 insertions, 617 deletions
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 3a7e66464d0c..cf5fedbe2b75 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
2 | ioatdma-y := init.o dma.o dma_v3.o prep.o dca.o sysfs.o | 2 | ioatdma-y := init.o dma.o prep.o dca.o sysfs.o |
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index f2b9a421985a..2cb7c308d5c7 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -132,7 +132,7 @@ static int ioat_dca_dev_managed(struct dca_provider *dca, | |||
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) | 135 | static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) |
136 | { | 136 | { |
137 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | 137 | struct ioat_dca_priv *ioatdca = dca_priv(dca); |
138 | struct pci_dev *pdev; | 138 | struct pci_dev *pdev; |
@@ -166,7 +166,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) | |||
166 | return -EFAULT; | 166 | return -EFAULT; |
167 | } | 167 | } |
168 | 168 | ||
169 | static int ioat3_dca_remove_requester(struct dca_provider *dca, | 169 | static int ioat_dca_remove_requester(struct dca_provider *dca, |
170 | struct device *dev) | 170 | struct device *dev) |
171 | { | 171 | { |
172 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | 172 | struct ioat_dca_priv *ioatdca = dca_priv(dca); |
@@ -193,7 +193,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca, | |||
193 | return -ENODEV; | 193 | return -ENODEV; |
194 | } | 194 | } |
195 | 195 | ||
196 | static u8 ioat3_dca_get_tag(struct dca_provider *dca, | 196 | static u8 ioat_dca_get_tag(struct dca_provider *dca, |
197 | struct device *dev, | 197 | struct device *dev, |
198 | int cpu) | 198 | int cpu) |
199 | { | 199 | { |
@@ -224,14 +224,14 @@ static u8 ioat3_dca_get_tag(struct dca_provider *dca, | |||
224 | return tag; | 224 | return tag; |
225 | } | 225 | } |
226 | 226 | ||
227 | static struct dca_ops ioat3_dca_ops = { | 227 | static struct dca_ops ioat_dca_ops = { |
228 | .add_requester = ioat3_dca_add_requester, | 228 | .add_requester = ioat_dca_add_requester, |
229 | .remove_requester = ioat3_dca_remove_requester, | 229 | .remove_requester = ioat_dca_remove_requester, |
230 | .get_tag = ioat3_dca_get_tag, | 230 | .get_tag = ioat_dca_get_tag, |
231 | .dev_managed = ioat_dca_dev_managed, | 231 | .dev_managed = ioat_dca_dev_managed, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) | 234 | static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset) |
235 | { | 235 | { |
236 | int slots = 0; | 236 | int slots = 0; |
237 | u32 req; | 237 | u32 req; |
@@ -266,7 +266,7 @@ static inline int dca3_tag_map_invalid(u8 *tag_map) | |||
266 | (tag_map[4] == DCA_TAG_MAP_VALID)); | 266 | (tag_map[4] == DCA_TAG_MAP_VALID)); |
267 | } | 267 | } |
268 | 268 | ||
269 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 269 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) |
270 | { | 270 | { |
271 | struct dca_provider *dca; | 271 | struct dca_provider *dca; |
272 | struct ioat_dca_priv *ioatdca; | 272 | struct ioat_dca_priv *ioatdca; |
@@ -293,11 +293,11 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | |||
293 | if (dca_offset == 0) | 293 | if (dca_offset == 0) |
294 | return NULL; | 294 | return NULL; |
295 | 295 | ||
296 | slots = ioat3_dca_count_dca_slots(iobase, dca_offset); | 296 | slots = ioat_dca_count_dca_slots(iobase, dca_offset); |
297 | if (slots == 0) | 297 | if (slots == 0) |
298 | return NULL; | 298 | return NULL; |
299 | 299 | ||
300 | dca = alloc_dca_provider(&ioat3_dca_ops, | 300 | dca = alloc_dca_provider(&ioat_dca_ops, |
301 | sizeof(*ioatdca) | 301 | sizeof(*ioatdca) |
302 | + (sizeof(struct ioat_dca_slot) * slots)); | 302 | + (sizeof(struct ioat_dca_slot) * slots)); |
303 | if (!dca) | 303 | if (!dca) |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index e67eda055ea5..2031bb4ad536 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -37,6 +37,8 @@ | |||
37 | 37 | ||
38 | #include "../dmaengine.h" | 38 | #include "../dmaengine.h" |
39 | 39 | ||
40 | static void ioat_eh(struct ioatdma_chan *ioat_chan); | ||
41 | |||
40 | /** | 42 | /** |
41 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | 43 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode |
42 | * @irq: interrupt id | 44 | * @irq: interrupt id |
@@ -122,59 +124,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan) | |||
122 | ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan); | 124 | ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan); |
123 | } | 125 | } |
124 | 126 | ||
125 | dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan) | 127 | static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) |
126 | { | ||
127 | dma_addr_t phys_complete; | ||
128 | u64 completion; | ||
129 | |||
130 | completion = *ioat_chan->completion; | ||
131 | phys_complete = ioat_chansts_to_addr(completion); | ||
132 | |||
133 | dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, | ||
134 | (unsigned long long) phys_complete); | ||
135 | |||
136 | if (is_ioat_halted(completion)) { | ||
137 | u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
138 | |||
139 | dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", | ||
140 | chanerr); | ||
141 | |||
142 | /* TODO do something to salvage the situation */ | ||
143 | } | ||
144 | |||
145 | return phys_complete; | ||
146 | } | ||
147 | |||
148 | bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, | ||
149 | dma_addr_t *phys_complete) | ||
150 | { | ||
151 | *phys_complete = ioat_get_current_completion(ioat_chan); | ||
152 | if (*phys_complete == ioat_chan->last_completion) | ||
153 | return false; | ||
154 | clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); | ||
155 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
156 | |||
157 | return true; | ||
158 | } | ||
159 | |||
160 | enum dma_status | ||
161 | ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | ||
162 | struct dma_tx_state *txstate) | ||
163 | { | ||
164 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
165 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
166 | enum dma_status ret; | ||
167 | |||
168 | ret = dma_cookie_status(c, cookie, txstate); | ||
169 | if (ret == DMA_COMPLETE) | ||
170 | return ret; | ||
171 | |||
172 | ioat_dma->cleanup_fn((unsigned long) c); | ||
173 | |||
174 | return dma_cookie_status(c, cookie, txstate); | ||
175 | } | ||
176 | |||
177 | void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) | ||
178 | { | 128 | { |
179 | ioat_chan->dmacount += ioat_ring_pending(ioat_chan); | 129 | ioat_chan->dmacount += ioat_ring_pending(ioat_chan); |
180 | ioat_chan->issued = ioat_chan->head; | 130 | ioat_chan->issued = ioat_chan->head; |
@@ -251,7 +201,7 @@ void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) | |||
251 | spin_unlock_bh(&ioat_chan->prep_lock); | 201 | spin_unlock_bh(&ioat_chan->prep_lock); |
252 | } | 202 | } |
253 | 203 | ||
254 | void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) | 204 | static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) |
255 | { | 205 | { |
256 | /* set the tail to be re-issued */ | 206 | /* set the tail to be re-issued */ |
257 | ioat_chan->issued = ioat_chan->tail; | 207 | ioat_chan->issued = ioat_chan->tail; |
@@ -274,7 +224,7 @@ void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) | |||
274 | __ioat_start_null_desc(ioat_chan); | 224 | __ioat_start_null_desc(ioat_chan); |
275 | } | 225 | } |
276 | 226 | ||
277 | int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) | 227 | static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) |
278 | { | 228 | { |
279 | unsigned long end = jiffies + tmo; | 229 | unsigned long end = jiffies + tmo; |
280 | int err = 0; | 230 | int err = 0; |
@@ -295,7 +245,7 @@ int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) | |||
295 | return err; | 245 | return err; |
296 | } | 246 | } |
297 | 247 | ||
298 | int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) | 248 | static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) |
299 | { | 249 | { |
300 | unsigned long end = jiffies + tmo; | 250 | unsigned long end = jiffies + tmo; |
301 | int err = 0; | 251 | int err = 0; |
@@ -411,7 +361,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | |||
411 | return ring; | 361 | return ring; |
412 | } | 362 | } |
413 | 363 | ||
414 | bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) | 364 | static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) |
415 | { | 365 | { |
416 | /* reshape differs from normal ring allocation in that we want | 366 | /* reshape differs from normal ring allocation in that we want |
417 | * to allocate a new software ring while only | 367 | * to allocate a new software ring while only |
@@ -578,3 +528,464 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) | |||
578 | 528 | ||
579 | return -ENOMEM; | 529 | return -ENOMEM; |
580 | } | 530 | } |
531 | |||
532 | static bool desc_has_ext(struct ioat_ring_ent *desc) | ||
533 | { | ||
534 | struct ioat_dma_descriptor *hw = desc->hw; | ||
535 | |||
536 | if (hw->ctl_f.op == IOAT_OP_XOR || | ||
537 | hw->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
538 | struct ioat_xor_descriptor *xor = desc->xor; | ||
539 | |||
540 | if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) | ||
541 | return true; | ||
542 | } else if (hw->ctl_f.op == IOAT_OP_PQ || | ||
543 | hw->ctl_f.op == IOAT_OP_PQ_VAL) { | ||
544 | struct ioat_pq_descriptor *pq = desc->pq; | ||
545 | |||
546 | if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) | ||
547 | return true; | ||
548 | } | ||
549 | |||
550 | return false; | ||
551 | } | ||
552 | |||
553 | static void | ||
554 | ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) | ||
555 | { | ||
556 | if (!sed) | ||
557 | return; | ||
558 | |||
559 | dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | ||
560 | kmem_cache_free(ioat_sed_cache, sed); | ||
561 | } | ||
562 | |||
563 | static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan) | ||
564 | { | ||
565 | u64 phys_complete; | ||
566 | u64 completion; | ||
567 | |||
568 | completion = *ioat_chan->completion; | ||
569 | phys_complete = ioat_chansts_to_addr(completion); | ||
570 | |||
571 | dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, | ||
572 | (unsigned long long) phys_complete); | ||
573 | |||
574 | return phys_complete; | ||
575 | } | ||
576 | |||
577 | static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, | ||
578 | u64 *phys_complete) | ||
579 | { | ||
580 | *phys_complete = ioat_get_current_completion(ioat_chan); | ||
581 | if (*phys_complete == ioat_chan->last_completion) | ||
582 | return false; | ||
583 | |||
584 | clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); | ||
585 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
586 | |||
587 | return true; | ||
588 | } | ||
589 | |||
590 | static void | ||
591 | desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) | ||
592 | { | ||
593 | struct ioat_dma_descriptor *hw = desc->hw; | ||
594 | |||
595 | switch (hw->ctl_f.op) { | ||
596 | case IOAT_OP_PQ_VAL: | ||
597 | case IOAT_OP_PQ_VAL_16S: | ||
598 | { | ||
599 | struct ioat_pq_descriptor *pq = desc->pq; | ||
600 | |||
601 | /* check if there's error written */ | ||
602 | if (!pq->dwbes_f.wbes) | ||
603 | return; | ||
604 | |||
605 | /* need to set a chanerr var for checking to clear later */ | ||
606 | |||
607 | if (pq->dwbes_f.p_val_err) | ||
608 | *desc->result |= SUM_CHECK_P_RESULT; | ||
609 | |||
610 | if (pq->dwbes_f.q_val_err) | ||
611 | *desc->result |= SUM_CHECK_Q_RESULT; | ||
612 | |||
613 | return; | ||
614 | } | ||
615 | default: | ||
616 | return; | ||
617 | } | ||
618 | } | ||
619 | |||
620 | /** | ||
621 | * __cleanup - reclaim used descriptors | ||
622 | * @ioat: channel (ring) to clean | ||
623 | */ | ||
624 | static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | ||
625 | { | ||
626 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
627 | struct ioat_ring_ent *desc; | ||
628 | bool seen_current = false; | ||
629 | int idx = ioat_chan->tail, i; | ||
630 | u16 active; | ||
631 | |||
632 | dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", | ||
633 | __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); | ||
634 | |||
635 | /* | ||
636 | * At restart of the channel, the completion address and the | ||
637 | * channel status will be 0 due to starting a new chain. Since | ||
638 | * it's new chain and the first descriptor "fails", there is | ||
639 | * nothing to clean up. We do not want to reap the entire submitted | ||
640 | * chain due to this 0 address value and then BUG. | ||
641 | */ | ||
642 | if (!phys_complete) | ||
643 | return; | ||
644 | |||
645 | active = ioat_ring_active(ioat_chan); | ||
646 | for (i = 0; i < active && !seen_current; i++) { | ||
647 | struct dma_async_tx_descriptor *tx; | ||
648 | |||
649 | smp_read_barrier_depends(); | ||
650 | prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1)); | ||
651 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
652 | dump_desc_dbg(ioat_chan, desc); | ||
653 | |||
654 | /* set err stat if we are using dwbes */ | ||
655 | if (ioat_dma->cap & IOAT_CAP_DWBES) | ||
656 | desc_get_errstat(ioat_chan, desc); | ||
657 | |||
658 | tx = &desc->txd; | ||
659 | if (tx->cookie) { | ||
660 | dma_cookie_complete(tx); | ||
661 | dma_descriptor_unmap(tx); | ||
662 | if (tx->callback) { | ||
663 | tx->callback(tx->callback_param); | ||
664 | tx->callback = NULL; | ||
665 | } | ||
666 | } | ||
667 | |||
668 | if (tx->phys == phys_complete) | ||
669 | seen_current = true; | ||
670 | |||
671 | /* skip extended descriptors */ | ||
672 | if (desc_has_ext(desc)) { | ||
673 | BUG_ON(i + 1 >= active); | ||
674 | i++; | ||
675 | } | ||
676 | |||
677 | /* cleanup super extended descriptors */ | ||
678 | if (desc->sed) { | ||
679 | ioat_free_sed(ioat_dma, desc->sed); | ||
680 | desc->sed = NULL; | ||
681 | } | ||
682 | } | ||
683 | |||
684 | /* finish all descriptor reads before incrementing tail */ | ||
685 | smp_mb(); | ||
686 | ioat_chan->tail = idx + i; | ||
687 | /* no active descs have written a completion? */ | ||
688 | BUG_ON(active && !seen_current); | ||
689 | ioat_chan->last_completion = phys_complete; | ||
690 | |||
691 | if (active - i == 0) { | ||
692 | dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", | ||
693 | __func__); | ||
694 | clear_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state); | ||
695 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
696 | } | ||
697 | |||
698 | /* 5 microsecond delay per pending descriptor */ | ||
699 | writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), | ||
700 | ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); | ||
701 | } | ||
702 | |||
703 | static void ioat_cleanup(struct ioatdma_chan *ioat_chan) | ||
704 | { | ||
705 | u64 phys_complete; | ||
706 | |||
707 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
708 | |||
709 | if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) | ||
710 | __cleanup(ioat_chan, phys_complete); | ||
711 | |||
712 | if (is_ioat_halted(*ioat_chan->completion)) { | ||
713 | u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
714 | |||
715 | if (chanerr & IOAT_CHANERR_HANDLE_MASK) { | ||
716 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
717 | ioat_eh(ioat_chan); | ||
718 | } | ||
719 | } | ||
720 | |||
721 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
722 | } | ||
723 | |||
724 | void ioat_cleanup_event(unsigned long data) | ||
725 | { | ||
726 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); | ||
727 | |||
728 | ioat_cleanup(ioat_chan); | ||
729 | if (!test_bit(IOAT_RUN, &ioat_chan->state)) | ||
730 | return; | ||
731 | writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
732 | } | ||
733 | |||
734 | static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) | ||
735 | { | ||
736 | u64 phys_complete; | ||
737 | |||
738 | ioat_quiesce(ioat_chan, 0); | ||
739 | if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) | ||
740 | __cleanup(ioat_chan, phys_complete); | ||
741 | |||
742 | __ioat_restart_chan(ioat_chan); | ||
743 | } | ||
744 | |||
745 | static void ioat_eh(struct ioatdma_chan *ioat_chan) | ||
746 | { | ||
747 | struct pci_dev *pdev = to_pdev(ioat_chan); | ||
748 | struct ioat_dma_descriptor *hw; | ||
749 | struct dma_async_tx_descriptor *tx; | ||
750 | u64 phys_complete; | ||
751 | struct ioat_ring_ent *desc; | ||
752 | u32 err_handled = 0; | ||
753 | u32 chanerr_int; | ||
754 | u32 chanerr; | ||
755 | |||
756 | /* cleanup so tail points to descriptor that caused the error */ | ||
757 | if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) | ||
758 | __cleanup(ioat_chan, phys_complete); | ||
759 | |||
760 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
761 | pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); | ||
762 | |||
763 | dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", | ||
764 | __func__, chanerr, chanerr_int); | ||
765 | |||
766 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); | ||
767 | hw = desc->hw; | ||
768 | dump_desc_dbg(ioat_chan, desc); | ||
769 | |||
770 | switch (hw->ctl_f.op) { | ||
771 | case IOAT_OP_XOR_VAL: | ||
772 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { | ||
773 | *desc->result |= SUM_CHECK_P_RESULT; | ||
774 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | ||
775 | } | ||
776 | break; | ||
777 | case IOAT_OP_PQ_VAL: | ||
778 | case IOAT_OP_PQ_VAL_16S: | ||
779 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { | ||
780 | *desc->result |= SUM_CHECK_P_RESULT; | ||
781 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | ||
782 | } | ||
783 | if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { | ||
784 | *desc->result |= SUM_CHECK_Q_RESULT; | ||
785 | err_handled |= IOAT_CHANERR_XOR_Q_ERR; | ||
786 | } | ||
787 | break; | ||
788 | } | ||
789 | |||
790 | /* fault on unhandled error or spurious halt */ | ||
791 | if (chanerr ^ err_handled || chanerr == 0) { | ||
792 | dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", | ||
793 | __func__, chanerr, err_handled); | ||
794 | BUG(); | ||
795 | } else { /* cleanup the faulty descriptor */ | ||
796 | tx = &desc->txd; | ||
797 | if (tx->cookie) { | ||
798 | dma_cookie_complete(tx); | ||
799 | dma_descriptor_unmap(tx); | ||
800 | if (tx->callback) { | ||
801 | tx->callback(tx->callback_param); | ||
802 | tx->callback = NULL; | ||
803 | } | ||
804 | } | ||
805 | } | ||
806 | |||
807 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
808 | pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); | ||
809 | |||
810 | /* mark faulting descriptor as complete */ | ||
811 | *ioat_chan->completion = desc->txd.phys; | ||
812 | |||
813 | spin_lock_bh(&ioat_chan->prep_lock); | ||
814 | ioat_restart_channel(ioat_chan); | ||
815 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
816 | } | ||
817 | |||
818 | static void check_active(struct ioatdma_chan *ioat_chan) | ||
819 | { | ||
820 | if (ioat_ring_active(ioat_chan)) { | ||
821 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
822 | return; | ||
823 | } | ||
824 | |||
825 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) | ||
826 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
827 | else if (ioat_chan->alloc_order > ioat_get_alloc_order()) { | ||
828 | /* if the ring is idle, empty, and oversized try to step | ||
829 | * down the size | ||
830 | */ | ||
831 | reshape_ring(ioat_chan, ioat_chan->alloc_order - 1); | ||
832 | |||
833 | /* keep shrinking until we get back to our minimum | ||
834 | * default size | ||
835 | */ | ||
836 | if (ioat_chan->alloc_order > ioat_get_alloc_order()) | ||
837 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
838 | } | ||
839 | |||
840 | } | ||
841 | |||
842 | void ioat_timer_event(unsigned long data) | ||
843 | { | ||
844 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); | ||
845 | dma_addr_t phys_complete; | ||
846 | u64 status; | ||
847 | |||
848 | status = ioat_chansts(ioat_chan); | ||
849 | |||
850 | /* when halted due to errors check for channel | ||
851 | * programming errors before advancing the completion state | ||
852 | */ | ||
853 | if (is_ioat_halted(status)) { | ||
854 | u32 chanerr; | ||
855 | |||
856 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
857 | dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", | ||
858 | __func__, chanerr); | ||
859 | if (test_bit(IOAT_RUN, &ioat_chan->state)) | ||
860 | BUG_ON(is_ioat_bug(chanerr)); | ||
861 | else /* we never got off the ground */ | ||
862 | return; | ||
863 | } | ||
864 | |||
865 | /* if we haven't made progress and we have already | ||
866 | * acknowledged a pending completion once, then be more | ||
867 | * forceful with a restart | ||
868 | */ | ||
869 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
870 | if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) | ||
871 | __cleanup(ioat_chan, phys_complete); | ||
872 | else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { | ||
873 | spin_lock_bh(&ioat_chan->prep_lock); | ||
874 | ioat_restart_channel(ioat_chan); | ||
875 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
876 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
877 | return; | ||
878 | } else { | ||
879 | set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); | ||
880 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
881 | } | ||
882 | |||
883 | |||
884 | if (ioat_ring_active(ioat_chan)) | ||
885 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
886 | else { | ||
887 | spin_lock_bh(&ioat_chan->prep_lock); | ||
888 | check_active(ioat_chan); | ||
889 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
890 | } | ||
891 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
892 | } | ||
893 | |||
894 | enum dma_status | ||
895 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | ||
896 | struct dma_tx_state *txstate) | ||
897 | { | ||
898 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
899 | enum dma_status ret; | ||
900 | |||
901 | ret = dma_cookie_status(c, cookie, txstate); | ||
902 | if (ret == DMA_COMPLETE) | ||
903 | return ret; | ||
904 | |||
905 | ioat_cleanup(ioat_chan); | ||
906 | |||
907 | return dma_cookie_status(c, cookie, txstate); | ||
908 | } | ||
909 | |||
910 | static int ioat_irq_reinit(struct ioatdma_device *ioat_dma) | ||
911 | { | ||
912 | struct pci_dev *pdev = ioat_dma->pdev; | ||
913 | int irq = pdev->irq, i; | ||
914 | |||
915 | if (!is_bwd_ioat(pdev)) | ||
916 | return 0; | ||
917 | |||
918 | switch (ioat_dma->irq_mode) { | ||
919 | case IOAT_MSIX: | ||
920 | for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { | ||
921 | struct msix_entry *msix = &ioat_dma->msix_entries[i]; | ||
922 | struct ioatdma_chan *ioat_chan; | ||
923 | |||
924 | ioat_chan = ioat_chan_by_index(ioat_dma, i); | ||
925 | devm_free_irq(&pdev->dev, msix->vector, ioat_chan); | ||
926 | } | ||
927 | |||
928 | pci_disable_msix(pdev); | ||
929 | break; | ||
930 | case IOAT_MSI: | ||
931 | pci_disable_msi(pdev); | ||
932 | /* fall through */ | ||
933 | case IOAT_INTX: | ||
934 | devm_free_irq(&pdev->dev, irq, ioat_dma); | ||
935 | break; | ||
936 | default: | ||
937 | return 0; | ||
938 | } | ||
939 | ioat_dma->irq_mode = IOAT_NOIRQ; | ||
940 | |||
941 | return ioat_dma_setup_interrupts(ioat_dma); | ||
942 | } | ||
943 | |||
944 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan) | ||
945 | { | ||
946 | /* throw away whatever the channel was doing and get it | ||
947 | * initialized, with ioat3 specific workarounds | ||
948 | */ | ||
949 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
950 | struct pci_dev *pdev = ioat_dma->pdev; | ||
951 | u32 chanerr; | ||
952 | u16 dev_id; | ||
953 | int err; | ||
954 | |||
955 | ioat_quiesce(ioat_chan, msecs_to_jiffies(100)); | ||
956 | |||
957 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
958 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
959 | |||
960 | if (ioat_dma->version < IOAT_VER_3_3) { | ||
961 | /* clear any pending errors */ | ||
962 | err = pci_read_config_dword(pdev, | ||
963 | IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | ||
964 | if (err) { | ||
965 | dev_err(&pdev->dev, | ||
966 | "channel error register unreachable\n"); | ||
967 | return err; | ||
968 | } | ||
969 | pci_write_config_dword(pdev, | ||
970 | IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | ||
971 | |||
972 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
973 | * (workaround for spurious config parity error after restart) | ||
974 | */ | ||
975 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
976 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
977 | pci_write_config_dword(pdev, | ||
978 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
979 | 0x10); | ||
980 | } | ||
981 | } | ||
982 | |||
983 | err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); | ||
984 | if (!err) | ||
985 | err = ioat_irq_reinit(ioat_dma); | ||
986 | |||
987 | if (err) | ||
988 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); | ||
989 | |||
990 | return err; | ||
991 | } | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index a319befad1a3..2e1f05464703 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -438,24 +438,15 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
438 | struct dma_tx_state *txstate); | 438 | struct dma_tx_state *txstate); |
439 | void ioat_cleanup_event(unsigned long data); | 439 | void ioat_cleanup_event(unsigned long data); |
440 | void ioat_timer_event(unsigned long data); | 440 | void ioat_timer_event(unsigned long data); |
441 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | ||
442 | struct dma_tx_state *txstate); | ||
443 | bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, | ||
444 | dma_addr_t *phys_complete); | ||
445 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); | 441 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); |
446 | void ioat_issue_pending(struct dma_chan *chan); | 442 | void ioat_issue_pending(struct dma_chan *chan); |
447 | bool reshape_ring(struct ioatdma_chan *ioat, int order); | ||
448 | void __ioat_issue_pending(struct ioatdma_chan *ioat_chan); | ||
449 | void ioat_timer_event(unsigned long data); | 443 | void ioat_timer_event(unsigned long data); |
450 | int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo); | ||
451 | int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo); | ||
452 | void __ioat_restart_chan(struct ioatdma_chan *ioat_chan); | ||
453 | 444 | ||
454 | /* IOAT Init functions */ | 445 | /* IOAT Init functions */ |
455 | bool is_bwd_ioat(struct pci_dev *pdev); | 446 | bool is_bwd_ioat(struct pci_dev *pdev); |
447 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
456 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); | 448 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); |
457 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); | 449 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); |
458 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); | 450 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); |
459 | void ioat_stop(struct ioatdma_chan *ioat_chan); | 451 | void ioat_stop(struct ioatdma_chan *ioat_chan); |
460 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
461 | #endif /* IOATDMA_H */ | 452 | #endif /* IOATDMA_H */ |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c deleted file mode 100644 index d0ae8f7c97a6..000000000000 --- a/drivers/dma/ioat/dma_v3.c +++ /dev/null | |||
@@ -1,525 +0,0 @@ | |||
1 | /* | ||
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | * redistributing this file, you may do so under either license. | ||
4 | * | ||
5 | * GPL LICENSE SUMMARY | ||
6 | * | ||
7 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | * BSD LICENSE | ||
22 | * | ||
23 | * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. | ||
24 | * | ||
25 | * Redistribution and use in source and binary forms, with or without | ||
26 | * modification, are permitted provided that the following conditions are met: | ||
27 | * | ||
28 | * * Redistributions of source code must retain the above copyright | ||
29 | * notice, this list of conditions and the following disclaimer. | ||
30 | * * Redistributions in binary form must reproduce the above copyright | ||
31 | * notice, this list of conditions and the following disclaimer in | ||
32 | * the documentation and/or other materials provided with the | ||
33 | * distribution. | ||
34 | * * Neither the name of Intel Corporation nor the names of its | ||
35 | * contributors may be used to endorse or promote products derived | ||
36 | * from this software without specific prior written permission. | ||
37 | * | ||
38 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
39 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
40 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
41 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
42 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
43 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
44 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
45 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
46 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
47 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
48 | * POSSIBILITY OF SUCH DAMAGE. | ||
49 | */ | ||
50 | |||
51 | /* | ||
52 | * Support routines for v3+ hardware | ||
53 | */ | ||
54 | #include <linux/module.h> | ||
55 | #include <linux/pci.h> | ||
56 | #include <linux/gfp.h> | ||
57 | #include <linux/dmaengine.h> | ||
58 | #include <linux/dma-mapping.h> | ||
59 | #include <linux/prefetch.h> | ||
60 | #include "../dmaengine.h" | ||
61 | #include "registers.h" | ||
62 | #include "hw.h" | ||
63 | #include "dma.h" | ||
64 | |||
65 | static void ioat3_eh(struct ioatdma_chan *ioat_chan); | ||
66 | |||
67 | static bool desc_has_ext(struct ioat_ring_ent *desc) | ||
68 | { | ||
69 | struct ioat_dma_descriptor *hw = desc->hw; | ||
70 | |||
71 | if (hw->ctl_f.op == IOAT_OP_XOR || | ||
72 | hw->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
73 | struct ioat_xor_descriptor *xor = desc->xor; | ||
74 | |||
75 | if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) | ||
76 | return true; | ||
77 | } else if (hw->ctl_f.op == IOAT_OP_PQ || | ||
78 | hw->ctl_f.op == IOAT_OP_PQ_VAL) { | ||
79 | struct ioat_pq_descriptor *pq = desc->pq; | ||
80 | |||
81 | if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) | ||
82 | return true; | ||
83 | } | ||
84 | |||
85 | return false; | ||
86 | } | ||
87 | |||
88 | static void | ||
89 | ioat3_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) | ||
90 | { | ||
91 | if (!sed) | ||
92 | return; | ||
93 | |||
94 | dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | ||
95 | kmem_cache_free(ioat_sed_cache, sed); | ||
96 | } | ||
97 | |||
98 | static u64 ioat3_get_current_completion(struct ioatdma_chan *ioat_chan) | ||
99 | { | ||
100 | u64 phys_complete; | ||
101 | u64 completion; | ||
102 | |||
103 | completion = *ioat_chan->completion; | ||
104 | phys_complete = ioat_chansts_to_addr(completion); | ||
105 | |||
106 | dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, | ||
107 | (unsigned long long) phys_complete); | ||
108 | |||
109 | return phys_complete; | ||
110 | } | ||
111 | |||
112 | static bool ioat3_cleanup_preamble(struct ioatdma_chan *ioat_chan, | ||
113 | u64 *phys_complete) | ||
114 | { | ||
115 | *phys_complete = ioat3_get_current_completion(ioat_chan); | ||
116 | if (*phys_complete == ioat_chan->last_completion) | ||
117 | return false; | ||
118 | |||
119 | clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); | ||
120 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
121 | |||
122 | return true; | ||
123 | } | ||
124 | |||
125 | static void | ||
126 | desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) | ||
127 | { | ||
128 | struct ioat_dma_descriptor *hw = desc->hw; | ||
129 | |||
130 | switch (hw->ctl_f.op) { | ||
131 | case IOAT_OP_PQ_VAL: | ||
132 | case IOAT_OP_PQ_VAL_16S: | ||
133 | { | ||
134 | struct ioat_pq_descriptor *pq = desc->pq; | ||
135 | |||
136 | /* check if there's error written */ | ||
137 | if (!pq->dwbes_f.wbes) | ||
138 | return; | ||
139 | |||
140 | /* need to set a chanerr var for checking to clear later */ | ||
141 | |||
142 | if (pq->dwbes_f.p_val_err) | ||
143 | *desc->result |= SUM_CHECK_P_RESULT; | ||
144 | |||
145 | if (pq->dwbes_f.q_val_err) | ||
146 | *desc->result |= SUM_CHECK_Q_RESULT; | ||
147 | |||
148 | return; | ||
149 | } | ||
150 | default: | ||
151 | return; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * __cleanup - reclaim used descriptors | ||
157 | * @ioat: channel (ring) to clean | ||
158 | * | ||
159 | * The difference from the dma_v2.c __cleanup() is that this routine | ||
160 | * handles extended descriptors and dma-unmapping raid operations. | ||
161 | */ | ||
162 | static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | ||
163 | { | ||
164 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
165 | struct ioat_ring_ent *desc; | ||
166 | bool seen_current = false; | ||
167 | int idx = ioat_chan->tail, i; | ||
168 | u16 active; | ||
169 | |||
170 | dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", | ||
171 | __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); | ||
172 | |||
173 | /* | ||
174 | * At restart of the channel, the completion address and the | ||
175 | * channel status will be 0 due to starting a new chain. Since | ||
176 | * it's new chain and the first descriptor "fails", there is | ||
177 | * nothing to clean up. We do not want to reap the entire submitted | ||
178 | * chain due to this 0 address value and then BUG. | ||
179 | */ | ||
180 | if (!phys_complete) | ||
181 | return; | ||
182 | |||
183 | active = ioat_ring_active(ioat_chan); | ||
184 | for (i = 0; i < active && !seen_current; i++) { | ||
185 | struct dma_async_tx_descriptor *tx; | ||
186 | |||
187 | smp_read_barrier_depends(); | ||
188 | prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1)); | ||
189 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
190 | dump_desc_dbg(ioat_chan, desc); | ||
191 | |||
192 | /* set err stat if we are using dwbes */ | ||
193 | if (ioat_dma->cap & IOAT_CAP_DWBES) | ||
194 | desc_get_errstat(ioat_chan, desc); | ||
195 | |||
196 | tx = &desc->txd; | ||
197 | if (tx->cookie) { | ||
198 | dma_cookie_complete(tx); | ||
199 | dma_descriptor_unmap(tx); | ||
200 | if (tx->callback) { | ||
201 | tx->callback(tx->callback_param); | ||
202 | tx->callback = NULL; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | if (tx->phys == phys_complete) | ||
207 | seen_current = true; | ||
208 | |||
209 | /* skip extended descriptors */ | ||
210 | if (desc_has_ext(desc)) { | ||
211 | BUG_ON(i + 1 >= active); | ||
212 | i++; | ||
213 | } | ||
214 | |||
215 | /* cleanup super extended descriptors */ | ||
216 | if (desc->sed) { | ||
217 | ioat3_free_sed(ioat_dma, desc->sed); | ||
218 | desc->sed = NULL; | ||
219 | } | ||
220 | } | ||
221 | smp_mb(); /* finish all descriptor reads before incrementing tail */ | ||
222 | ioat_chan->tail = idx + i; | ||
223 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ | ||
224 | ioat_chan->last_completion = phys_complete; | ||
225 | |||
226 | if (active - i == 0) { | ||
227 | dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", | ||
228 | __func__); | ||
229 | clear_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state); | ||
230 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
231 | } | ||
232 | /* 5 microsecond delay per pending descriptor */ | ||
233 | writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), | ||
234 | ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); | ||
235 | } | ||
236 | |||
237 | static void ioat3_cleanup(struct ioatdma_chan *ioat_chan) | ||
238 | { | ||
239 | u64 phys_complete; | ||
240 | |||
241 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
242 | |||
243 | if (ioat3_cleanup_preamble(ioat_chan, &phys_complete)) | ||
244 | __cleanup(ioat_chan, phys_complete); | ||
245 | |||
246 | if (is_ioat_halted(*ioat_chan->completion)) { | ||
247 | u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
248 | |||
249 | if (chanerr & IOAT_CHANERR_HANDLE_MASK) { | ||
250 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
251 | ioat3_eh(ioat_chan); | ||
252 | } | ||
253 | } | ||
254 | |||
255 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
256 | } | ||
257 | |||
258 | void ioat_cleanup_event(unsigned long data) | ||
259 | { | ||
260 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); | ||
261 | |||
262 | ioat3_cleanup(ioat_chan); | ||
263 | if (!test_bit(IOAT_RUN, &ioat_chan->state)) | ||
264 | return; | ||
265 | writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
266 | } | ||
267 | |||
268 | static void ioat3_restart_channel(struct ioatdma_chan *ioat_chan) | ||
269 | { | ||
270 | u64 phys_complete; | ||
271 | |||
272 | ioat_quiesce(ioat_chan, 0); | ||
273 | if (ioat3_cleanup_preamble(ioat_chan, &phys_complete)) | ||
274 | __cleanup(ioat_chan, phys_complete); | ||
275 | |||
276 | __ioat_restart_chan(ioat_chan); | ||
277 | } | ||
278 | |||
279 | static void ioat3_eh(struct ioatdma_chan *ioat_chan) | ||
280 | { | ||
281 | struct pci_dev *pdev = to_pdev(ioat_chan); | ||
282 | struct ioat_dma_descriptor *hw; | ||
283 | struct dma_async_tx_descriptor *tx; | ||
284 | u64 phys_complete; | ||
285 | struct ioat_ring_ent *desc; | ||
286 | u32 err_handled = 0; | ||
287 | u32 chanerr_int; | ||
288 | u32 chanerr; | ||
289 | |||
290 | /* cleanup so tail points to descriptor that caused the error */ | ||
291 | if (ioat3_cleanup_preamble(ioat_chan, &phys_complete)) | ||
292 | __cleanup(ioat_chan, phys_complete); | ||
293 | |||
294 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
295 | pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); | ||
296 | |||
297 | dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", | ||
298 | __func__, chanerr, chanerr_int); | ||
299 | |||
300 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); | ||
301 | hw = desc->hw; | ||
302 | dump_desc_dbg(ioat_chan, desc); | ||
303 | |||
304 | switch (hw->ctl_f.op) { | ||
305 | case IOAT_OP_XOR_VAL: | ||
306 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { | ||
307 | *desc->result |= SUM_CHECK_P_RESULT; | ||
308 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | ||
309 | } | ||
310 | break; | ||
311 | case IOAT_OP_PQ_VAL: | ||
312 | case IOAT_OP_PQ_VAL_16S: | ||
313 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { | ||
314 | *desc->result |= SUM_CHECK_P_RESULT; | ||
315 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | ||
316 | } | ||
317 | if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { | ||
318 | *desc->result |= SUM_CHECK_Q_RESULT; | ||
319 | err_handled |= IOAT_CHANERR_XOR_Q_ERR; | ||
320 | } | ||
321 | break; | ||
322 | } | ||
323 | |||
324 | /* fault on unhandled error or spurious halt */ | ||
325 | if (chanerr ^ err_handled || chanerr == 0) { | ||
326 | dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", | ||
327 | __func__, chanerr, err_handled); | ||
328 | BUG(); | ||
329 | } else { /* cleanup the faulty descriptor */ | ||
330 | tx = &desc->txd; | ||
331 | if (tx->cookie) { | ||
332 | dma_cookie_complete(tx); | ||
333 | dma_descriptor_unmap(tx); | ||
334 | if (tx->callback) { | ||
335 | tx->callback(tx->callback_param); | ||
336 | tx->callback = NULL; | ||
337 | } | ||
338 | } | ||
339 | } | ||
340 | |||
341 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
342 | pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); | ||
343 | |||
344 | /* mark faulting descriptor as complete */ | ||
345 | *ioat_chan->completion = desc->txd.phys; | ||
346 | |||
347 | spin_lock_bh(&ioat_chan->prep_lock); | ||
348 | ioat3_restart_channel(ioat_chan); | ||
349 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
350 | } | ||
351 | |||
352 | static void check_active(struct ioatdma_chan *ioat_chan) | ||
353 | { | ||
354 | if (ioat_ring_active(ioat_chan)) { | ||
355 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
356 | return; | ||
357 | } | ||
358 | |||
359 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) | ||
360 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
361 | else if (ioat_chan->alloc_order > ioat_get_alloc_order()) { | ||
362 | /* if the ring is idle, empty, and oversized try to step | ||
363 | * down the size | ||
364 | */ | ||
365 | reshape_ring(ioat_chan, ioat_chan->alloc_order - 1); | ||
366 | |||
367 | /* keep shrinking until we get back to our minimum | ||
368 | * default size | ||
369 | */ | ||
370 | if (ioat_chan->alloc_order > ioat_get_alloc_order()) | ||
371 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
372 | } | ||
373 | |||
374 | } | ||
375 | |||
376 | void ioat_timer_event(unsigned long data) | ||
377 | { | ||
378 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); | ||
379 | dma_addr_t phys_complete; | ||
380 | u64 status; | ||
381 | |||
382 | status = ioat_chansts(ioat_chan); | ||
383 | |||
384 | /* when halted due to errors check for channel | ||
385 | * programming errors before advancing the completion state | ||
386 | */ | ||
387 | if (is_ioat_halted(status)) { | ||
388 | u32 chanerr; | ||
389 | |||
390 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
391 | dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", | ||
392 | __func__, chanerr); | ||
393 | if (test_bit(IOAT_RUN, &ioat_chan->state)) | ||
394 | BUG_ON(is_ioat_bug(chanerr)); | ||
395 | else /* we never got off the ground */ | ||
396 | return; | ||
397 | } | ||
398 | |||
399 | /* if we haven't made progress and we have already | ||
400 | * acknowledged a pending completion once, then be more | ||
401 | * forceful with a restart | ||
402 | */ | ||
403 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
404 | if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) | ||
405 | __cleanup(ioat_chan, phys_complete); | ||
406 | else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { | ||
407 | spin_lock_bh(&ioat_chan->prep_lock); | ||
408 | ioat3_restart_channel(ioat_chan); | ||
409 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
410 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
411 | return; | ||
412 | } else { | ||
413 | set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); | ||
414 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
415 | } | ||
416 | |||
417 | |||
418 | if (ioat_ring_active(ioat_chan)) | ||
419 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
420 | else { | ||
421 | spin_lock_bh(&ioat_chan->prep_lock); | ||
422 | check_active(ioat_chan); | ||
423 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
424 | } | ||
425 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
426 | } | ||
427 | |||
428 | enum dma_status | ||
429 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | ||
430 | struct dma_tx_state *txstate) | ||
431 | { | ||
432 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
433 | enum dma_status ret; | ||
434 | |||
435 | ret = dma_cookie_status(c, cookie, txstate); | ||
436 | if (ret == DMA_COMPLETE) | ||
437 | return ret; | ||
438 | |||
439 | ioat3_cleanup(ioat_chan); | ||
440 | |||
441 | return dma_cookie_status(c, cookie, txstate); | ||
442 | } | ||
443 | |||
444 | static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) | ||
445 | { | ||
446 | struct pci_dev *pdev = ioat_dma->pdev; | ||
447 | int irq = pdev->irq, i; | ||
448 | |||
449 | if (!is_bwd_ioat(pdev)) | ||
450 | return 0; | ||
451 | |||
452 | switch (ioat_dma->irq_mode) { | ||
453 | case IOAT_MSIX: | ||
454 | for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { | ||
455 | struct msix_entry *msix = &ioat_dma->msix_entries[i]; | ||
456 | struct ioatdma_chan *ioat_chan; | ||
457 | |||
458 | ioat_chan = ioat_chan_by_index(ioat_dma, i); | ||
459 | devm_free_irq(&pdev->dev, msix->vector, ioat_chan); | ||
460 | } | ||
461 | |||
462 | pci_disable_msix(pdev); | ||
463 | break; | ||
464 | case IOAT_MSI: | ||
465 | pci_disable_msi(pdev); | ||
466 | /* fall through */ | ||
467 | case IOAT_INTX: | ||
468 | devm_free_irq(&pdev->dev, irq, ioat_dma); | ||
469 | break; | ||
470 | default: | ||
471 | return 0; | ||
472 | } | ||
473 | ioat_dma->irq_mode = IOAT_NOIRQ; | ||
474 | |||
475 | return ioat_dma_setup_interrupts(ioat_dma); | ||
476 | } | ||
477 | |||
478 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan) | ||
479 | { | ||
480 | /* throw away whatever the channel was doing and get it | ||
481 | * initialized, with ioat3 specific workarounds | ||
482 | */ | ||
483 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
484 | struct pci_dev *pdev = ioat_dma->pdev; | ||
485 | u32 chanerr; | ||
486 | u16 dev_id; | ||
487 | int err; | ||
488 | |||
489 | ioat_quiesce(ioat_chan, msecs_to_jiffies(100)); | ||
490 | |||
491 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
492 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
493 | |||
494 | if (ioat_dma->version < IOAT_VER_3_3) { | ||
495 | /* clear any pending errors */ | ||
496 | err = pci_read_config_dword(pdev, | ||
497 | IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | ||
498 | if (err) { | ||
499 | dev_err(&pdev->dev, | ||
500 | "channel error register unreachable\n"); | ||
501 | return err; | ||
502 | } | ||
503 | pci_write_config_dword(pdev, | ||
504 | IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | ||
505 | |||
506 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
507 | * (workaround for spurious config parity error after restart) | ||
508 | */ | ||
509 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
510 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
511 | pci_write_config_dword(pdev, | ||
512 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
513 | 0x10); | ||
514 | } | ||
515 | } | ||
516 | |||
517 | err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); | ||
518 | if (!err) | ||
519 | err = ioat3_irq_reinit(ioat_dma); | ||
520 | |||
521 | if (err) | ||
522 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); | ||
523 | |||
524 | return err; | ||
525 | } | ||
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 6b8fd49cf718..e6969809d723 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -735,13 +735,6 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, | |||
735 | tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); | 735 | tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); |
736 | } | 736 | } |
737 | 737 | ||
738 | static void ioat3_dma_test_callback(void *dma_async_param) | ||
739 | { | ||
740 | struct completion *cmp = dma_async_param; | ||
741 | |||
742 | complete(cmp); | ||
743 | } | ||
744 | |||
745 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | 738 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ |
746 | static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | 739 | static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) |
747 | { | 740 | { |
@@ -835,7 +828,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
835 | 828 | ||
836 | async_tx_ack(tx); | 829 | async_tx_ack(tx); |
837 | init_completion(&cmp); | 830 | init_completion(&cmp); |
838 | tx->callback = ioat3_dma_test_callback; | 831 | tx->callback = ioat_dma_test_callback; |
839 | tx->callback_param = &cmp; | 832 | tx->callback_param = &cmp; |
840 | cookie = tx->tx_submit(tx); | 833 | cookie = tx->tx_submit(tx); |
841 | if (cookie < 0) { | 834 | if (cookie < 0) { |
@@ -903,7 +896,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
903 | 896 | ||
904 | async_tx_ack(tx); | 897 | async_tx_ack(tx); |
905 | init_completion(&cmp); | 898 | init_completion(&cmp); |
906 | tx->callback = ioat3_dma_test_callback; | 899 | tx->callback = ioat_dma_test_callback; |
907 | tx->callback_param = &cmp; | 900 | tx->callback_param = &cmp; |
908 | cookie = tx->tx_submit(tx); | 901 | cookie = tx->tx_submit(tx); |
909 | if (cookie < 0) { | 902 | if (cookie < 0) { |
@@ -956,7 +949,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
956 | 949 | ||
957 | async_tx_ack(tx); | 950 | async_tx_ack(tx); |
958 | init_completion(&cmp); | 951 | init_completion(&cmp); |
959 | tx->callback = ioat3_dma_test_callback; | 952 | tx->callback = ioat_dma_test_callback; |
960 | tx->callback_param = &cmp; | 953 | tx->callback_param = &cmp; |
961 | cookie = tx->tx_submit(tx); | 954 | cookie = tx->tx_submit(tx); |
962 | if (cookie < 0) { | 955 | if (cookie < 0) { |
@@ -1024,7 +1017,7 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) | |||
1024 | return 0; | 1017 | return 0; |
1025 | } | 1018 | } |
1026 | 1019 | ||
1027 | static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) | 1020 | static void ioat_intr_quirk(struct ioatdma_device *ioat_dma) |
1028 | { | 1021 | { |
1029 | struct dma_device *dma; | 1022 | struct dma_device *dma; |
1030 | struct dma_chan *c; | 1023 | struct dma_chan *c; |
@@ -1063,7 +1056,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1063 | ioat_dma->enumerate_channels = ioat_enumerate_channels; | 1056 | ioat_dma->enumerate_channels = ioat_enumerate_channels; |
1064 | ioat_dma->reset_hw = ioat_reset_hw; | 1057 | ioat_dma->reset_hw = ioat_reset_hw; |
1065 | ioat_dma->self_test = ioat3_dma_self_test; | 1058 | ioat_dma->self_test = ioat3_dma_self_test; |
1066 | ioat_dma->intr_quirk = ioat3_intr_quirk; | 1059 | ioat_dma->intr_quirk = ioat_intr_quirk; |
1067 | dma = &ioat_dma->dma_dev; | 1060 | dma = &ioat_dma->dma_dev; |
1068 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; | 1061 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; |
1069 | dma->device_issue_pending = ioat_issue_pending; | 1062 | dma->device_issue_pending = ioat_issue_pending; |
@@ -1162,7 +1155,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1162 | ioat_kobject_add(ioat_dma, &ioat_ktype); | 1155 | ioat_kobject_add(ioat_dma, &ioat_ktype); |
1163 | 1156 | ||
1164 | if (dca) | 1157 | if (dca) |
1165 | ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base); | 1158 | ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); |
1166 | 1159 | ||
1167 | return 0; | 1160 | return 0; |
1168 | } | 1161 | } |