diff options
author | Dave Jiang <dave.jiang@intel.com> | 2015-08-11 11:48:49 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-08-17 04:07:30 -0400 |
commit | 599d49de7f69cb5a23e913db24e168ba2f09bd05 (patch) | |
tree | cce55d57f777b00e12e34f7c96f2e764a15e6547 /drivers/dma/ioat | |
parent | c0f28ce66ecfd9fa0ae662a2c7f3e68e537e77f4 (diff) |
dmaengine: ioatdma: move dma prep functions to single location
Move all DMA descriptor prepping functions to prep.c file. Fixup all
broken bits caused by the move.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 47 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 81 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 659 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 22 | ||||
-rw-r--r-- | drivers/dma/ioat/prep.c | 707 |
6 files changed, 769 insertions, 749 deletions
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index f785f8f42f7d..3a7e66464d0c 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
2 | ioatdma-y := init.o dma.o dma_v3.o dca.o sysfs.o | 2 | ioatdma-y := init.o dma.o dma_v3.o prep.o dca.o sysfs.o |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5d78cafdd3f2..e67eda055ea5 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -578,50 +578,3 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) | |||
578 | 578 | ||
579 | return -ENOMEM; | 579 | return -ENOMEM; |
580 | } | 580 | } |
581 | |||
582 | struct dma_async_tx_descriptor * | ||
583 | ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | ||
584 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
585 | { | ||
586 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
587 | struct ioat_dma_descriptor *hw; | ||
588 | struct ioat_ring_ent *desc; | ||
589 | dma_addr_t dst = dma_dest; | ||
590 | dma_addr_t src = dma_src; | ||
591 | size_t total_len = len; | ||
592 | int num_descs, idx, i; | ||
593 | |||
594 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
595 | if (likely(num_descs) && | ||
596 | ioat_check_space_lock(ioat_chan, num_descs) == 0) | ||
597 | idx = ioat_chan->head; | ||
598 | else | ||
599 | return NULL; | ||
600 | i = 0; | ||
601 | do { | ||
602 | size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log); | ||
603 | |||
604 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
605 | hw = desc->hw; | ||
606 | |||
607 | hw->size = copy; | ||
608 | hw->ctl = 0; | ||
609 | hw->src_addr = src; | ||
610 | hw->dst_addr = dst; | ||
611 | |||
612 | len -= copy; | ||
613 | dst += copy; | ||
614 | src += copy; | ||
615 | dump_desc_dbg(ioat_chan, desc); | ||
616 | } while (++i < num_descs); | ||
617 | |||
618 | desc->txd.flags = flags; | ||
619 | desc->len = total_len; | ||
620 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
621 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
622 | hw->ctl_f.compl_write = 1; | ||
623 | dump_desc_dbg(ioat_chan, desc); | ||
624 | /* we leave the channel locked to ensure in order submission */ | ||
625 | |||
626 | return &desc->txd; | ||
627 | } | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index d2ffa5775d53..a319befad1a3 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -37,6 +37,14 @@ | |||
37 | 37 | ||
38 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) | 38 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) |
39 | 39 | ||
40 | /* ioat hardware assumes at least two sources for raid operations */ | ||
41 | #define src_cnt_to_sw(x) ((x) + 2) | ||
42 | #define src_cnt_to_hw(x) ((x) - 2) | ||
43 | #define ndest_to_sw(x) ((x) + 1) | ||
44 | #define ndest_to_hw(x) ((x) - 1) | ||
45 | #define src16_cnt_to_sw(x) ((x) + 9) | ||
46 | #define src16_cnt_to_hw(x) ((x) - 9) | ||
47 | |||
40 | /* | 48 | /* |
41 | * workaround for IOAT ver.3.0 null descriptor issue | 49 | * workaround for IOAT ver.3.0 null descriptor issue |
42 | * (channel returns error when size is 0) | 50 | * (channel returns error when size is 0) |
@@ -190,15 +198,22 @@ struct ioat_ring_ent { | |||
190 | struct ioat_sed_ent *sed; | 198 | struct ioat_sed_ent *sed; |
191 | }; | 199 | }; |
192 | 200 | ||
201 | extern const struct sysfs_ops ioat_sysfs_ops; | ||
202 | extern struct ioat_sysfs_entry ioat_version_attr; | ||
203 | extern struct ioat_sysfs_entry ioat_cap_attr; | ||
204 | extern int ioat_pending_level; | ||
205 | extern int ioat_ring_alloc_order; | ||
206 | extern struct kobj_type ioat_ktype; | ||
207 | extern struct kmem_cache *ioat_cache; | ||
208 | extern int ioat_ring_max_alloc_order; | ||
209 | extern struct kmem_cache *ioat_sed_cache; | ||
210 | |||
193 | static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) | 211 | static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) |
194 | { | 212 | { |
195 | return container_of(c, struct ioatdma_chan, dma_chan); | 213 | return container_of(c, struct ioatdma_chan, dma_chan); |
196 | } | 214 | } |
197 | 215 | ||
198 | |||
199 | |||
200 | /* wrapper around hardware descriptor format + additional software fields */ | 216 | /* wrapper around hardware descriptor format + additional software fields */ |
201 | |||
202 | #ifdef DEBUG | 217 | #ifdef DEBUG |
203 | #define set_desc_id(desc, i) ((desc)->id = (i)) | 218 | #define set_desc_id(desc, i) ((desc)->id = (i)) |
204 | #define desc_id(desc) ((desc)->id) | 219 | #define desc_id(desc) ((desc)->id) |
@@ -381,13 +396,10 @@ ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) | |||
381 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | 396 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); |
382 | } | 397 | } |
383 | 398 | ||
384 | irqreturn_t ioat_dma_do_interrupt(int irq, void *data); | 399 | /* IOAT Prep functions */ |
385 | irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); | 400 | struct dma_async_tx_descriptor * |
386 | struct ioat_ring_ent ** | 401 | ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, |
387 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); | 402 | dma_addr_t dma_src, size_t len, unsigned long flags); |
388 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); | ||
389 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); | ||
390 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan); | ||
391 | struct dma_async_tx_descriptor * | 403 | struct dma_async_tx_descriptor * |
392 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); | 404 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); |
393 | struct dma_async_tx_descriptor * | 405 | struct dma_async_tx_descriptor * |
@@ -412,53 +424,38 @@ struct dma_async_tx_descriptor * | |||
412 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | 424 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, |
413 | unsigned int src_cnt, size_t len, | 425 | unsigned int src_cnt, size_t len, |
414 | enum sum_check_flags *result, unsigned long flags); | 426 | enum sum_check_flags *result, unsigned long flags); |
427 | |||
428 | /* IOAT Operation functions */ | ||
429 | irqreturn_t ioat_dma_do_interrupt(int irq, void *data); | ||
430 | irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); | ||
431 | struct ioat_ring_ent ** | ||
432 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); | ||
433 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); | ||
434 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); | ||
435 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan); | ||
415 | enum dma_status | 436 | enum dma_status |
416 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 437 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
417 | struct dma_tx_state *txstate); | 438 | struct dma_tx_state *txstate); |
418 | void ioat_cleanup_event(unsigned long data); | 439 | void ioat_cleanup_event(unsigned long data); |
419 | void ioat_timer_event(unsigned long data); | 440 | void ioat_timer_event(unsigned long data); |
420 | bool is_bwd_ioat(struct pci_dev *pdev); | ||
421 | int ioat_probe(struct ioatdma_device *ioat_dma); | ||
422 | int ioat_register(struct ioatdma_device *ioat_dma); | ||
423 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma); | ||
424 | void ioat_dma_remove(struct ioatdma_device *ioat_dma); | ||
425 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
426 | void ioat_init_channel(struct ioatdma_device *ioat_dma, | ||
427 | struct ioatdma_chan *ioat_chan, int idx); | ||
428 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 441 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
429 | struct dma_tx_state *txstate); | 442 | struct dma_tx_state *txstate); |
430 | bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, | 443 | bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, |
431 | dma_addr_t *phys_complete); | 444 | dma_addr_t *phys_complete); |
432 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); | ||
433 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); | ||
434 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); | ||
435 | void ioat_stop(struct ioatdma_chan *ioat_chan); | ||
436 | int ioat_dma_probe(struct ioatdma_device *ioat_dma, int dca); | ||
437 | int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca); | ||
438 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
439 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); | 445 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); |
440 | int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); | ||
441 | struct dma_async_tx_descriptor * | ||
442 | ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | ||
443 | dma_addr_t dma_src, size_t len, unsigned long flags); | ||
444 | void ioat_issue_pending(struct dma_chan *chan); | 446 | void ioat_issue_pending(struct dma_chan *chan); |
445 | int ioat_alloc_chan_resources(struct dma_chan *c); | ||
446 | void ioat_free_chan_resources(struct dma_chan *c); | ||
447 | void __ioat_restart_chan(struct ioatdma_chan *ioat_chan); | ||
448 | bool reshape_ring(struct ioatdma_chan *ioat, int order); | 447 | bool reshape_ring(struct ioatdma_chan *ioat, int order); |
449 | void __ioat_issue_pending(struct ioatdma_chan *ioat_chan); | 448 | void __ioat_issue_pending(struct ioatdma_chan *ioat_chan); |
450 | void ioat_timer_event(unsigned long data); | 449 | void ioat_timer_event(unsigned long data); |
451 | int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo); | 450 | int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo); |
452 | int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo); | 451 | int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo); |
452 | void __ioat_restart_chan(struct ioatdma_chan *ioat_chan); | ||
453 | 453 | ||
454 | extern const struct sysfs_ops ioat_sysfs_ops; | 454 | /* IOAT Init functions */ |
455 | extern struct ioat_sysfs_entry ioat_version_attr; | 455 | bool is_bwd_ioat(struct pci_dev *pdev); |
456 | extern struct ioat_sysfs_entry ioat_cap_attr; | 456 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); |
457 | extern int ioat_pending_level; | 457 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); |
458 | extern int ioat_ring_alloc_order; | 458 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); |
459 | extern struct kobj_type ioat_ktype; | 459 | void ioat_stop(struct ioatdma_chan *ioat_chan); |
460 | extern struct kmem_cache *ioat_cache; | 460 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
461 | extern int ioat_ring_max_alloc_order; | ||
462 | extern struct kmem_cache *ioat_sed_cache; | ||
463 | |||
464 | #endif /* IOATDMA_H */ | 461 | #endif /* IOATDMA_H */ |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f6a194a3a463..d0ae8f7c97a6 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -62,107 +62,8 @@ | |||
62 | #include "hw.h" | 62 | #include "hw.h" |
63 | #include "dma.h" | 63 | #include "dma.h" |
64 | 64 | ||
65 | /* ioat hardware assumes at least two sources for raid operations */ | ||
66 | #define src_cnt_to_sw(x) ((x) + 2) | ||
67 | #define src_cnt_to_hw(x) ((x) - 2) | ||
68 | #define ndest_to_sw(x) ((x) + 1) | ||
69 | #define ndest_to_hw(x) ((x) - 1) | ||
70 | #define src16_cnt_to_sw(x) ((x) + 9) | ||
71 | #define src16_cnt_to_hw(x) ((x) - 9) | ||
72 | |||
73 | /* provide a lookup table for setting the source address in the base or | ||
74 | * extended descriptor of an xor or pq descriptor | ||
75 | */ | ||
76 | static const u8 xor_idx_to_desc = 0xe0; | ||
77 | static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; | ||
78 | static const u8 pq_idx_to_desc = 0xf8; | ||
79 | static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, | ||
80 | 2, 2, 2, 2, 2, 2, 2 }; | ||
81 | static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; | ||
82 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, | ||
83 | 0, 1, 2, 3, 4, 5, 6 }; | ||
84 | |||
85 | static void ioat3_eh(struct ioatdma_chan *ioat_chan); | 65 | static void ioat3_eh(struct ioatdma_chan *ioat_chan); |
86 | 66 | ||
87 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | ||
88 | dma_addr_t addr, u32 offset, int idx) | ||
89 | { | ||
90 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
91 | |||
92 | raw->field[xor_idx_to_field[idx]] = addr + offset; | ||
93 | } | ||
94 | |||
95 | static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
96 | { | ||
97 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | ||
98 | |||
99 | return raw->field[pq_idx_to_field[idx]]; | ||
100 | } | ||
101 | |||
102 | static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) | ||
103 | { | ||
104 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | ||
105 | |||
106 | return raw->field[pq16_idx_to_field[idx]]; | ||
107 | } | ||
108 | |||
109 | static void pq_set_src(struct ioat_raw_descriptor *descs[2], | ||
110 | dma_addr_t addr, u32 offset, u8 coef, int idx) | ||
111 | { | ||
112 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; | ||
113 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | ||
114 | |||
115 | raw->field[pq_idx_to_field[idx]] = addr + offset; | ||
116 | pq->coef[idx] = coef; | ||
117 | } | ||
118 | |||
119 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], | ||
120 | dma_addr_t addr, u32 offset, u8 coef, unsigned idx) | ||
121 | { | ||
122 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; | ||
123 | struct ioat_pq16a_descriptor *pq16 = | ||
124 | (struct ioat_pq16a_descriptor *)desc[1]; | ||
125 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | ||
126 | |||
127 | raw->field[pq16_idx_to_field[idx]] = addr + offset; | ||
128 | |||
129 | if (idx < 8) | ||
130 | pq->coef[idx] = coef; | ||
131 | else | ||
132 | pq16->coef[idx - 8] = coef; | ||
133 | } | ||
134 | |||
135 | static struct ioat_sed_ent * | ||
136 | ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) | ||
137 | { | ||
138 | struct ioat_sed_ent *sed; | ||
139 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | ||
140 | |||
141 | sed = kmem_cache_alloc(ioat_sed_cache, flags); | ||
142 | if (!sed) | ||
143 | return NULL; | ||
144 | |||
145 | sed->hw_pool = hw_pool; | ||
146 | sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], | ||
147 | flags, &sed->dma); | ||
148 | if (!sed->hw) { | ||
149 | kmem_cache_free(ioat_sed_cache, sed); | ||
150 | return NULL; | ||
151 | } | ||
152 | |||
153 | return sed; | ||
154 | } | ||
155 | |||
156 | static void | ||
157 | ioat3_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) | ||
158 | { | ||
159 | if (!sed) | ||
160 | return; | ||
161 | |||
162 | dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | ||
163 | kmem_cache_free(ioat_sed_cache, sed); | ||
164 | } | ||
165 | |||
166 | static bool desc_has_ext(struct ioat_ring_ent *desc) | 67 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
167 | { | 68 | { |
168 | struct ioat_dma_descriptor *hw = desc->hw; | 69 | struct ioat_dma_descriptor *hw = desc->hw; |
@@ -184,6 +85,16 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) | |||
184 | return false; | 85 | return false; |
185 | } | 86 | } |
186 | 87 | ||
88 | static void | ||
89 | ioat3_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) | ||
90 | { | ||
91 | if (!sed) | ||
92 | return; | ||
93 | |||
94 | dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | ||
95 | kmem_cache_free(ioat_sed_cache, sed); | ||
96 | } | ||
97 | |||
187 | static u64 ioat3_get_current_completion(struct ioatdma_chan *ioat_chan) | 98 | static u64 ioat3_get_current_completion(struct ioatdma_chan *ioat_chan) |
188 | { | 99 | { |
189 | u64 phys_complete; | 100 | u64 phys_complete; |
@@ -530,556 +441,6 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
530 | return dma_cookie_status(c, cookie, txstate); | 441 | return dma_cookie_status(c, cookie, txstate); |
531 | } | 442 | } |
532 | 443 | ||
533 | static struct dma_async_tx_descriptor * | ||
534 | __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
535 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | ||
536 | size_t len, unsigned long flags) | ||
537 | { | ||
538 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
539 | struct ioat_ring_ent *compl_desc; | ||
540 | struct ioat_ring_ent *desc; | ||
541 | struct ioat_ring_ent *ext; | ||
542 | size_t total_len = len; | ||
543 | struct ioat_xor_descriptor *xor; | ||
544 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
545 | struct ioat_dma_descriptor *hw; | ||
546 | int num_descs, with_ext, idx, i; | ||
547 | u32 offset = 0; | ||
548 | u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; | ||
549 | |||
550 | BUG_ON(src_cnt < 2); | ||
551 | |||
552 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
553 | /* we need 2x the number of descriptors to cover greater than 5 | ||
554 | * sources | ||
555 | */ | ||
556 | if (src_cnt > 5) { | ||
557 | with_ext = 1; | ||
558 | num_descs *= 2; | ||
559 | } else | ||
560 | with_ext = 0; | ||
561 | |||
562 | /* completion writes from the raid engine may pass completion | ||
563 | * writes from the legacy engine, so we need one extra null | ||
564 | * (legacy) descriptor to ensure all completion writes arrive in | ||
565 | * order. | ||
566 | */ | ||
567 | if (likely(num_descs) && | ||
568 | ioat_check_space_lock(ioat_chan, num_descs+1) == 0) | ||
569 | idx = ioat_chan->head; | ||
570 | else | ||
571 | return NULL; | ||
572 | i = 0; | ||
573 | do { | ||
574 | struct ioat_raw_descriptor *descs[2]; | ||
575 | size_t xfer_size = min_t(size_t, | ||
576 | len, 1 << ioat_chan->xfercap_log); | ||
577 | int s; | ||
578 | |||
579 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
580 | xor = desc->xor; | ||
581 | |||
582 | /* save a branch by unconditionally retrieving the | ||
583 | * extended descriptor xor_set_src() knows to not write | ||
584 | * to it in the single descriptor case | ||
585 | */ | ||
586 | ext = ioat_get_ring_ent(ioat_chan, idx + i + 1); | ||
587 | xor_ex = ext->xor_ex; | ||
588 | |||
589 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
590 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
591 | for (s = 0; s < src_cnt; s++) | ||
592 | xor_set_src(descs, src[s], offset, s); | ||
593 | xor->size = xfer_size; | ||
594 | xor->dst_addr = dest + offset; | ||
595 | xor->ctl = 0; | ||
596 | xor->ctl_f.op = op; | ||
597 | xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); | ||
598 | |||
599 | len -= xfer_size; | ||
600 | offset += xfer_size; | ||
601 | dump_desc_dbg(ioat_chan, desc); | ||
602 | } while ((i += 1 + with_ext) < num_descs); | ||
603 | |||
604 | /* last xor descriptor carries the unmap parameters and fence bit */ | ||
605 | desc->txd.flags = flags; | ||
606 | desc->len = total_len; | ||
607 | if (result) | ||
608 | desc->result = result; | ||
609 | xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
610 | |||
611 | /* completion descriptor carries interrupt bit */ | ||
612 | compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
613 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | ||
614 | hw = compl_desc->hw; | ||
615 | hw->ctl = 0; | ||
616 | hw->ctl_f.null = 1; | ||
617 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
618 | hw->ctl_f.compl_write = 1; | ||
619 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
620 | dump_desc_dbg(ioat_chan, compl_desc); | ||
621 | |||
622 | /* we leave the channel locked to ensure in order submission */ | ||
623 | return &compl_desc->txd; | ||
624 | } | ||
625 | |||
626 | struct dma_async_tx_descriptor * | ||
627 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
628 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
629 | { | ||
630 | return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); | ||
631 | } | ||
632 | |||
633 | struct dma_async_tx_descriptor * | ||
634 | ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | ||
635 | unsigned int src_cnt, size_t len, | ||
636 | enum sum_check_flags *result, unsigned long flags) | ||
637 | { | ||
638 | /* the cleanup routine only sets bits on validate failure, it | ||
639 | * does not clear bits on validate success... so clear it here | ||
640 | */ | ||
641 | *result = 0; | ||
642 | |||
643 | return __ioat_prep_xor_lock(chan, result, src[0], &src[1], | ||
644 | src_cnt - 1, len, flags); | ||
645 | } | ||
646 | |||
647 | static void | ||
648 | dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc, | ||
649 | struct ioat_ring_ent *ext) | ||
650 | { | ||
651 | struct device *dev = to_dev(ioat_chan); | ||
652 | struct ioat_pq_descriptor *pq = desc->pq; | ||
653 | struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; | ||
654 | struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; | ||
655 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
656 | int i; | ||
657 | |||
658 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | ||
659 | " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" | ||
660 | " src_cnt: %d)\n", | ||
661 | desc_id(desc), (unsigned long long) desc->txd.phys, | ||
662 | (unsigned long long) (pq_ex ? pq_ex->next : pq->next), | ||
663 | desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, | ||
664 | pq->ctl_f.compl_write, | ||
665 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | ||
666 | pq->ctl_f.src_cnt); | ||
667 | for (i = 0; i < src_cnt; i++) | ||
668 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | ||
669 | (unsigned long long) pq_get_src(descs, i), pq->coef[i]); | ||
670 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | ||
671 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | ||
672 | dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); | ||
673 | } | ||
674 | |||
675 | static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan, | ||
676 | struct ioat_ring_ent *desc) | ||
677 | { | ||
678 | struct device *dev = to_dev(ioat_chan); | ||
679 | struct ioat_pq_descriptor *pq = desc->pq; | ||
680 | struct ioat_raw_descriptor *descs[] = { (void *)pq, | ||
681 | (void *)pq, | ||
682 | (void *)pq }; | ||
683 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
684 | int i; | ||
685 | |||
686 | if (desc->sed) { | ||
687 | descs[1] = (void *)desc->sed->hw; | ||
688 | descs[2] = (void *)desc->sed->hw + 64; | ||
689 | } | ||
690 | |||
691 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | ||
692 | " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" | ||
693 | " src_cnt: %d)\n", | ||
694 | desc_id(desc), (unsigned long long) desc->txd.phys, | ||
695 | (unsigned long long) pq->next, | ||
696 | desc->txd.flags, pq->size, pq->ctl, | ||
697 | pq->ctl_f.op, pq->ctl_f.int_en, | ||
698 | pq->ctl_f.compl_write, | ||
699 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | ||
700 | pq->ctl_f.src_cnt); | ||
701 | for (i = 0; i < src_cnt; i++) { | ||
702 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | ||
703 | (unsigned long long) pq16_get_src(descs, i), | ||
704 | pq->coef[i]); | ||
705 | } | ||
706 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | ||
707 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | ||
708 | } | ||
709 | |||
710 | static struct dma_async_tx_descriptor * | ||
711 | __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
712 | const dma_addr_t *dst, const dma_addr_t *src, | ||
713 | unsigned int src_cnt, const unsigned char *scf, | ||
714 | size_t len, unsigned long flags) | ||
715 | { | ||
716 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
717 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
718 | struct ioat_ring_ent *compl_desc; | ||
719 | struct ioat_ring_ent *desc; | ||
720 | struct ioat_ring_ent *ext; | ||
721 | size_t total_len = len; | ||
722 | struct ioat_pq_descriptor *pq; | ||
723 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
724 | struct ioat_dma_descriptor *hw; | ||
725 | u32 offset = 0; | ||
726 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; | ||
727 | int i, s, idx, with_ext, num_descs; | ||
728 | int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0; | ||
729 | |||
730 | dev_dbg(to_dev(ioat_chan), "%s\n", __func__); | ||
731 | /* the engine requires at least two sources (we provide | ||
732 | * at least 1 implied source in the DMA_PREP_CONTINUE case) | ||
733 | */ | ||
734 | BUG_ON(src_cnt + dmaf_continue(flags) < 2); | ||
735 | |||
736 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
737 | /* we need 2x the number of descriptors to cover greater than 3 | ||
738 | * sources (we need 1 extra source in the q-only continuation | ||
739 | * case and 3 extra sources in the p+q continuation case. | ||
740 | */ | ||
741 | if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || | ||
742 | (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { | ||
743 | with_ext = 1; | ||
744 | num_descs *= 2; | ||
745 | } else | ||
746 | with_ext = 0; | ||
747 | |||
748 | /* completion writes from the raid engine may pass completion | ||
749 | * writes from the legacy engine, so we need one extra null | ||
750 | * (legacy) descriptor to ensure all completion writes arrive in | ||
751 | * order. | ||
752 | */ | ||
753 | if (likely(num_descs) && | ||
754 | ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0) | ||
755 | idx = ioat_chan->head; | ||
756 | else | ||
757 | return NULL; | ||
758 | i = 0; | ||
759 | do { | ||
760 | struct ioat_raw_descriptor *descs[2]; | ||
761 | size_t xfer_size = min_t(size_t, len, | ||
762 | 1 << ioat_chan->xfercap_log); | ||
763 | |||
764 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
765 | pq = desc->pq; | ||
766 | |||
767 | /* save a branch by unconditionally retrieving the | ||
768 | * extended descriptor pq_set_src() knows to not write | ||
769 | * to it in the single descriptor case | ||
770 | */ | ||
771 | ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext); | ||
772 | pq_ex = ext->pq_ex; | ||
773 | |||
774 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
775 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
776 | |||
777 | for (s = 0; s < src_cnt; s++) | ||
778 | pq_set_src(descs, src[s], offset, scf[s], s); | ||
779 | |||
780 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | ||
781 | if (dmaf_p_disabled_continue(flags)) | ||
782 | pq_set_src(descs, dst[1], offset, 1, s++); | ||
783 | else if (dmaf_continue(flags)) { | ||
784 | pq_set_src(descs, dst[0], offset, 0, s++); | ||
785 | pq_set_src(descs, dst[1], offset, 1, s++); | ||
786 | pq_set_src(descs, dst[1], offset, 0, s++); | ||
787 | } | ||
788 | pq->size = xfer_size; | ||
789 | pq->p_addr = dst[0] + offset; | ||
790 | pq->q_addr = dst[1] + offset; | ||
791 | pq->ctl = 0; | ||
792 | pq->ctl_f.op = op; | ||
793 | /* we turn on descriptor write back error status */ | ||
794 | if (ioat_dma->cap & IOAT_CAP_DWBES) | ||
795 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
796 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); | ||
797 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | ||
798 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | ||
799 | |||
800 | len -= xfer_size; | ||
801 | offset += xfer_size; | ||
802 | } while ((i += 1 + with_ext) < num_descs); | ||
803 | |||
804 | /* last pq descriptor carries the unmap parameters and fence bit */ | ||
805 | desc->txd.flags = flags; | ||
806 | desc->len = total_len; | ||
807 | if (result) | ||
808 | desc->result = result; | ||
809 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
810 | dump_pq_desc_dbg(ioat_chan, desc, ext); | ||
811 | |||
812 | if (!cb32) { | ||
813 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
814 | pq->ctl_f.compl_write = 1; | ||
815 | compl_desc = desc; | ||
816 | } else { | ||
817 | /* completion descriptor carries interrupt bit */ | ||
818 | compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
819 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | ||
820 | hw = compl_desc->hw; | ||
821 | hw->ctl = 0; | ||
822 | hw->ctl_f.null = 1; | ||
823 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
824 | hw->ctl_f.compl_write = 1; | ||
825 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
826 | dump_desc_dbg(ioat_chan, compl_desc); | ||
827 | } | ||
828 | |||
829 | |||
830 | /* we leave the channel locked to ensure in order submission */ | ||
831 | return &compl_desc->txd; | ||
832 | } | ||
833 | |||
834 | static struct dma_async_tx_descriptor * | ||
835 | __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
836 | const dma_addr_t *dst, const dma_addr_t *src, | ||
837 | unsigned int src_cnt, const unsigned char *scf, | ||
838 | size_t len, unsigned long flags) | ||
839 | { | ||
840 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
841 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
842 | struct ioat_ring_ent *desc; | ||
843 | size_t total_len = len; | ||
844 | struct ioat_pq_descriptor *pq; | ||
845 | u32 offset = 0; | ||
846 | u8 op; | ||
847 | int i, s, idx, num_descs; | ||
848 | |||
849 | /* this function is only called with 9-16 sources */ | ||
850 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; | ||
851 | |||
852 | dev_dbg(to_dev(ioat_chan), "%s\n", __func__); | ||
853 | |||
854 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
855 | |||
856 | /* | ||
857 | * 16 source pq is only available on cb3.3 and has no completion | ||
858 | * write hw bug. | ||
859 | */ | ||
860 | if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0) | ||
861 | idx = ioat_chan->head; | ||
862 | else | ||
863 | return NULL; | ||
864 | |||
865 | i = 0; | ||
866 | |||
867 | do { | ||
868 | struct ioat_raw_descriptor *descs[4]; | ||
869 | size_t xfer_size = min_t(size_t, len, | ||
870 | 1 << ioat_chan->xfercap_log); | ||
871 | |||
872 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
873 | pq = desc->pq; | ||
874 | |||
875 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
876 | |||
877 | desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3); | ||
878 | if (!desc->sed) { | ||
879 | dev_err(to_dev(ioat_chan), | ||
880 | "%s: no free sed entries\n", __func__); | ||
881 | return NULL; | ||
882 | } | ||
883 | |||
884 | pq->sed_addr = desc->sed->dma; | ||
885 | desc->sed->parent = desc; | ||
886 | |||
887 | descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; | ||
888 | descs[2] = (void *)descs[1] + 64; | ||
889 | |||
890 | for (s = 0; s < src_cnt; s++) | ||
891 | pq16_set_src(descs, src[s], offset, scf[s], s); | ||
892 | |||
893 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | ||
894 | if (dmaf_p_disabled_continue(flags)) | ||
895 | pq16_set_src(descs, dst[1], offset, 1, s++); | ||
896 | else if (dmaf_continue(flags)) { | ||
897 | pq16_set_src(descs, dst[0], offset, 0, s++); | ||
898 | pq16_set_src(descs, dst[1], offset, 1, s++); | ||
899 | pq16_set_src(descs, dst[1], offset, 0, s++); | ||
900 | } | ||
901 | |||
902 | pq->size = xfer_size; | ||
903 | pq->p_addr = dst[0] + offset; | ||
904 | pq->q_addr = dst[1] + offset; | ||
905 | pq->ctl = 0; | ||
906 | pq->ctl_f.op = op; | ||
907 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); | ||
908 | /* we turn on descriptor write back error status */ | ||
909 | if (ioat_dma->cap & IOAT_CAP_DWBES) | ||
910 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
911 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | ||
912 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | ||
913 | |||
914 | len -= xfer_size; | ||
915 | offset += xfer_size; | ||
916 | } while (++i < num_descs); | ||
917 | |||
918 | /* last pq descriptor carries the unmap parameters and fence bit */ | ||
919 | desc->txd.flags = flags; | ||
920 | desc->len = total_len; | ||
921 | if (result) | ||
922 | desc->result = result; | ||
923 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
924 | |||
925 | /* with cb3.3 we should be able to do completion w/o a null desc */ | ||
926 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
927 | pq->ctl_f.compl_write = 1; | ||
928 | |||
929 | dump_pq16_desc_dbg(ioat_chan, desc); | ||
930 | |||
931 | /* we leave the channel locked to ensure in order submission */ | ||
932 | return &desc->txd; | ||
933 | } | ||
934 | |||
935 | static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) | ||
936 | { | ||
937 | if (dmaf_p_disabled_continue(flags)) | ||
938 | return src_cnt + 1; | ||
939 | else if (dmaf_continue(flags)) | ||
940 | return src_cnt + 3; | ||
941 | else | ||
942 | return src_cnt; | ||
943 | } | ||
944 | |||
945 | struct dma_async_tx_descriptor * | ||
946 | ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | ||
947 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
948 | unsigned long flags) | ||
949 | { | ||
950 | /* specify valid address for disabled result */ | ||
951 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
952 | dst[0] = dst[1]; | ||
953 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
954 | dst[1] = dst[0]; | ||
955 | |||
956 | /* handle the single source multiply case from the raid6 | ||
957 | * recovery path | ||
958 | */ | ||
959 | if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { | ||
960 | dma_addr_t single_source[2]; | ||
961 | unsigned char single_source_coef[2]; | ||
962 | |||
963 | BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); | ||
964 | single_source[0] = src[0]; | ||
965 | single_source[1] = src[0]; | ||
966 | single_source_coef[0] = scf[0]; | ||
967 | single_source_coef[1] = 0; | ||
968 | |||
969 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
970 | __ioat_prep_pq16_lock(chan, NULL, dst, single_source, | ||
971 | 2, single_source_coef, len, | ||
972 | flags) : | ||
973 | __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2, | ||
974 | single_source_coef, len, flags); | ||
975 | |||
976 | } else { | ||
977 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
978 | __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt, | ||
979 | scf, len, flags) : | ||
980 | __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt, | ||
981 | scf, len, flags); | ||
982 | } | ||
983 | } | ||
984 | |||
985 | struct dma_async_tx_descriptor * | ||
986 | ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | ||
987 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
988 | enum sum_check_flags *pqres, unsigned long flags) | ||
989 | { | ||
990 | /* specify valid address for disabled result */ | ||
991 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
992 | pq[0] = pq[1]; | ||
993 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
994 | pq[1] = pq[0]; | ||
995 | |||
996 | /* the cleanup routine only sets bits on validate failure, it | ||
997 | * does not clear bits on validate success... so clear it here | ||
998 | */ | ||
999 | *pqres = 0; | ||
1000 | |||
1001 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
1002 | __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, | ||
1003 | flags) : | ||
1004 | __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | ||
1005 | flags); | ||
1006 | } | ||
1007 | |||
1008 | struct dma_async_tx_descriptor * | ||
1009 | ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | ||
1010 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
1011 | { | ||
1012 | unsigned char scf[src_cnt]; | ||
1013 | dma_addr_t pq[2]; | ||
1014 | |||
1015 | memset(scf, 0, src_cnt); | ||
1016 | pq[0] = dst; | ||
1017 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
1018 | pq[1] = dst; /* specify valid address for disabled result */ | ||
1019 | |||
1020 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
1021 | __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, | ||
1022 | flags) : | ||
1023 | __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | ||
1024 | flags); | ||
1025 | } | ||
1026 | |||
1027 | struct dma_async_tx_descriptor * | ||
1028 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | ||
1029 | unsigned int src_cnt, size_t len, | ||
1030 | enum sum_check_flags *result, unsigned long flags) | ||
1031 | { | ||
1032 | unsigned char scf[src_cnt]; | ||
1033 | dma_addr_t pq[2]; | ||
1034 | |||
1035 | /* the cleanup routine only sets bits on validate failure, it | ||
1036 | * does not clear bits on validate success... so clear it here | ||
1037 | */ | ||
1038 | *result = 0; | ||
1039 | |||
1040 | memset(scf, 0, src_cnt); | ||
1041 | pq[0] = src[0]; | ||
1042 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
1043 | pq[1] = pq[0]; /* specify valid address for disabled result */ | ||
1044 | |||
1045 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
1046 | __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, | ||
1047 | scf, len, flags) : | ||
1048 | __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, | ||
1049 | scf, len, flags); | ||
1050 | } | ||
1051 | |||
1052 | struct dma_async_tx_descriptor * | ||
1053 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | ||
1054 | { | ||
1055 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
1056 | struct ioat_ring_ent *desc; | ||
1057 | struct ioat_dma_descriptor *hw; | ||
1058 | |||
1059 | if (ioat_check_space_lock(ioat_chan, 1) == 0) | ||
1060 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); | ||
1061 | else | ||
1062 | return NULL; | ||
1063 | |||
1064 | hw = desc->hw; | ||
1065 | hw->ctl = 0; | ||
1066 | hw->ctl_f.null = 1; | ||
1067 | hw->ctl_f.int_en = 1; | ||
1068 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
1069 | hw->ctl_f.compl_write = 1; | ||
1070 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
1071 | hw->src_addr = 0; | ||
1072 | hw->dst_addr = 0; | ||
1073 | |||
1074 | desc->txd.flags = flags; | ||
1075 | desc->len = 1; | ||
1076 | |||
1077 | dump_desc_dbg(ioat_chan, desc); | ||
1078 | |||
1079 | /* we leave the channel locked to ensure in order submission */ | ||
1080 | return &desc->txd; | ||
1081 | } | ||
1082 | |||
1083 | static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) | 444 | static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) |
1084 | { | 445 | { |
1085 | struct pci_dev *pdev = ioat_dma->pdev; | 446 | struct pci_dev *pdev = ioat_dma->pdev; |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index de8141c7cd01..6b8fd49cf718 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -110,6 +110,9 @@ MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | |||
110 | 110 | ||
111 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | 111 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); |
112 | static void ioat_remove(struct pci_dev *pdev); | 112 | static void ioat_remove(struct pci_dev *pdev); |
113 | static void | ||
114 | ioat_init_channel(struct ioatdma_device *ioat_dma, | ||
115 | struct ioatdma_chan *ioat_chan, int idx); | ||
113 | 116 | ||
114 | static int ioat_dca_enabled = 1; | 117 | static int ioat_dca_enabled = 1; |
115 | module_param(ioat_dca_enabled, int, 0644); | 118 | module_param(ioat_dca_enabled, int, 0644); |
@@ -269,7 +272,7 @@ static void ioat_dma_test_callback(void *dma_async_param) | |||
269 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | 272 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. |
270 | * @ioat_dma: dma device to be tested | 273 | * @ioat_dma: dma device to be tested |
271 | */ | 274 | */ |
272 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma) | 275 | static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) |
273 | { | 276 | { |
274 | int i; | 277 | int i; |
275 | u8 *src; | 278 | u8 *src; |
@@ -453,7 +456,6 @@ err_no_irq: | |||
453 | dev_err(dev, "no usable interrupts\n"); | 456 | dev_err(dev, "no usable interrupts\n"); |
454 | return err; | 457 | return err; |
455 | } | 458 | } |
456 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); | ||
457 | 459 | ||
458 | static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) | 460 | static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) |
459 | { | 461 | { |
@@ -461,7 +463,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) | |||
461 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | 463 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); |
462 | } | 464 | } |
463 | 465 | ||
464 | int ioat_probe(struct ioatdma_device *ioat_dma) | 466 | static int ioat_probe(struct ioatdma_device *ioat_dma) |
465 | { | 467 | { |
466 | int err = -ENODEV; | 468 | int err = -ENODEV; |
467 | struct dma_device *dma = &ioat_dma->dma_dev; | 469 | struct dma_device *dma = &ioat_dma->dma_dev; |
@@ -517,7 +519,7 @@ err_dma_pool: | |||
517 | return err; | 519 | return err; |
518 | } | 520 | } |
519 | 521 | ||
520 | int ioat_register(struct ioatdma_device *ioat_dma) | 522 | static int ioat_register(struct ioatdma_device *ioat_dma) |
521 | { | 523 | { |
522 | int err = dma_async_device_register(&ioat_dma->dma_dev); | 524 | int err = dma_async_device_register(&ioat_dma->dma_dev); |
523 | 525 | ||
@@ -530,7 +532,7 @@ int ioat_register(struct ioatdma_device *ioat_dma) | |||
530 | return err; | 532 | return err; |
531 | } | 533 | } |
532 | 534 | ||
533 | void ioat_dma_remove(struct ioatdma_device *ioat_dma) | 535 | static void ioat_dma_remove(struct ioatdma_device *ioat_dma) |
534 | { | 536 | { |
535 | struct dma_device *dma = &ioat_dma->dma_dev; | 537 | struct dma_device *dma = &ioat_dma->dma_dev; |
536 | 538 | ||
@@ -550,7 +552,7 @@ void ioat_dma_remove(struct ioatdma_device *ioat_dma) | |||
550 | * ioat_enumerate_channels - find and initialize the device's channels | 552 | * ioat_enumerate_channels - find and initialize the device's channels |
551 | * @ioat_dma: the ioat dma device to be enumerated | 553 | * @ioat_dma: the ioat dma device to be enumerated |
552 | */ | 554 | */ |
553 | int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) | 555 | static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) |
554 | { | 556 | { |
555 | struct ioatdma_chan *ioat_chan; | 557 | struct ioatdma_chan *ioat_chan; |
556 | struct device *dev = &ioat_dma->pdev->dev; | 558 | struct device *dev = &ioat_dma->pdev->dev; |
@@ -593,7 +595,7 @@ int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) | |||
593 | * ioat_free_chan_resources - release all the descriptors | 595 | * ioat_free_chan_resources - release all the descriptors |
594 | * @chan: the channel to be cleaned | 596 | * @chan: the channel to be cleaned |
595 | */ | 597 | */ |
596 | void ioat_free_chan_resources(struct dma_chan *c) | 598 | static void ioat_free_chan_resources(struct dma_chan *c) |
597 | { | 599 | { |
598 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 600 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
599 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | 601 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
@@ -646,7 +648,7 @@ void ioat_free_chan_resources(struct dma_chan *c) | |||
646 | /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring | 648 | /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring |
647 | * @chan: channel to be initialized | 649 | * @chan: channel to be initialized |
648 | */ | 650 | */ |
649 | int ioat_alloc_chan_resources(struct dma_chan *c) | 651 | static int ioat_alloc_chan_resources(struct dma_chan *c) |
650 | { | 652 | { |
651 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 653 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
652 | struct ioat_ring_ent **ring; | 654 | struct ioat_ring_ent **ring; |
@@ -712,7 +714,7 @@ int ioat_alloc_chan_resources(struct dma_chan *c) | |||
712 | } | 714 | } |
713 | 715 | ||
714 | /* common channel initialization */ | 716 | /* common channel initialization */ |
715 | void | 717 | static void |
716 | ioat_init_channel(struct ioatdma_device *ioat_dma, | 718 | ioat_init_channel(struct ioatdma_device *ioat_dma, |
717 | struct ioatdma_chan *ioat_chan, int idx) | 719 | struct ioatdma_chan *ioat_chan, int idx) |
718 | { | 720 | { |
@@ -1048,7 +1050,7 @@ static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) | |||
1048 | } | 1050 | } |
1049 | } | 1051 | } |
1050 | 1052 | ||
1051 | int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | 1053 | static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) |
1052 | { | 1054 | { |
1053 | struct pci_dev *pdev = ioat_dma->pdev; | 1055 | struct pci_dev *pdev = ioat_dma->pdev; |
1054 | int dca_en = system_has_dca_enabled(pdev); | 1056 | int dca_en = system_has_dca_enabled(pdev); |
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c new file mode 100644 index 000000000000..e323a4036908 --- /dev/null +++ b/drivers/dma/ioat/prep.c | |||
@@ -0,0 +1,707 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2004 - 2015 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The full GNU General Public License is included in this distribution in | ||
15 | * the file called "COPYING". | ||
16 | * | ||
17 | */ | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/gfp.h> | ||
21 | #include <linux/dmaengine.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/prefetch.h> | ||
24 | #include "../dmaengine.h" | ||
25 | #include "registers.h" | ||
26 | #include "hw.h" | ||
27 | #include "dma.h" | ||
28 | |||
29 | /* provide a lookup table for setting the source address in the base or | ||
30 | * extended descriptor of an xor or pq descriptor | ||
31 | */ | ||
32 | static const u8 xor_idx_to_desc = 0xe0; | ||
33 | static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; | ||
34 | static const u8 pq_idx_to_desc = 0xf8; | ||
35 | static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, | ||
36 | 2, 2, 2, 2, 2, 2, 2 }; | ||
37 | static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; | ||
38 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, | ||
39 | 0, 1, 2, 3, 4, 5, 6 }; | ||
40 | |||
41 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | ||
42 | dma_addr_t addr, u32 offset, int idx) | ||
43 | { | ||
44 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
45 | |||
46 | raw->field[xor_idx_to_field[idx]] = addr + offset; | ||
47 | } | ||
48 | |||
49 | static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
50 | { | ||
51 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | ||
52 | |||
53 | return raw->field[pq_idx_to_field[idx]]; | ||
54 | } | ||
55 | |||
56 | static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) | ||
57 | { | ||
58 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | ||
59 | |||
60 | return raw->field[pq16_idx_to_field[idx]]; | ||
61 | } | ||
62 | |||
63 | static void pq_set_src(struct ioat_raw_descriptor *descs[2], | ||
64 | dma_addr_t addr, u32 offset, u8 coef, int idx) | ||
65 | { | ||
66 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; | ||
67 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | ||
68 | |||
69 | raw->field[pq_idx_to_field[idx]] = addr + offset; | ||
70 | pq->coef[idx] = coef; | ||
71 | } | ||
72 | |||
73 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], | ||
74 | dma_addr_t addr, u32 offset, u8 coef, unsigned idx) | ||
75 | { | ||
76 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; | ||
77 | struct ioat_pq16a_descriptor *pq16 = | ||
78 | (struct ioat_pq16a_descriptor *)desc[1]; | ||
79 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | ||
80 | |||
81 | raw->field[pq16_idx_to_field[idx]] = addr + offset; | ||
82 | |||
83 | if (idx < 8) | ||
84 | pq->coef[idx] = coef; | ||
85 | else | ||
86 | pq16->coef[idx - 8] = coef; | ||
87 | } | ||
88 | |||
89 | static struct ioat_sed_ent * | ||
90 | ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) | ||
91 | { | ||
92 | struct ioat_sed_ent *sed; | ||
93 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | ||
94 | |||
95 | sed = kmem_cache_alloc(ioat_sed_cache, flags); | ||
96 | if (!sed) | ||
97 | return NULL; | ||
98 | |||
99 | sed->hw_pool = hw_pool; | ||
100 | sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], | ||
101 | flags, &sed->dma); | ||
102 | if (!sed->hw) { | ||
103 | kmem_cache_free(ioat_sed_cache, sed); | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | return sed; | ||
108 | } | ||
109 | |||
110 | struct dma_async_tx_descriptor * | ||
111 | ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | ||
112 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
113 | { | ||
114 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
115 | struct ioat_dma_descriptor *hw; | ||
116 | struct ioat_ring_ent *desc; | ||
117 | dma_addr_t dst = dma_dest; | ||
118 | dma_addr_t src = dma_src; | ||
119 | size_t total_len = len; | ||
120 | int num_descs, idx, i; | ||
121 | |||
122 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
123 | if (likely(num_descs) && | ||
124 | ioat_check_space_lock(ioat_chan, num_descs) == 0) | ||
125 | idx = ioat_chan->head; | ||
126 | else | ||
127 | return NULL; | ||
128 | i = 0; | ||
129 | do { | ||
130 | size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log); | ||
131 | |||
132 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
133 | hw = desc->hw; | ||
134 | |||
135 | hw->size = copy; | ||
136 | hw->ctl = 0; | ||
137 | hw->src_addr = src; | ||
138 | hw->dst_addr = dst; | ||
139 | |||
140 | len -= copy; | ||
141 | dst += copy; | ||
142 | src += copy; | ||
143 | dump_desc_dbg(ioat_chan, desc); | ||
144 | } while (++i < num_descs); | ||
145 | |||
146 | desc->txd.flags = flags; | ||
147 | desc->len = total_len; | ||
148 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
149 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
150 | hw->ctl_f.compl_write = 1; | ||
151 | dump_desc_dbg(ioat_chan, desc); | ||
152 | /* we leave the channel locked to ensure in order submission */ | ||
153 | |||
154 | return &desc->txd; | ||
155 | } | ||
156 | |||
157 | |||
158 | static struct dma_async_tx_descriptor * | ||
159 | __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
160 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | ||
161 | size_t len, unsigned long flags) | ||
162 | { | ||
163 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
164 | struct ioat_ring_ent *compl_desc; | ||
165 | struct ioat_ring_ent *desc; | ||
166 | struct ioat_ring_ent *ext; | ||
167 | size_t total_len = len; | ||
168 | struct ioat_xor_descriptor *xor; | ||
169 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
170 | struct ioat_dma_descriptor *hw; | ||
171 | int num_descs, with_ext, idx, i; | ||
172 | u32 offset = 0; | ||
173 | u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; | ||
174 | |||
175 | BUG_ON(src_cnt < 2); | ||
176 | |||
177 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
178 | /* we need 2x the number of descriptors to cover greater than 5 | ||
179 | * sources | ||
180 | */ | ||
181 | if (src_cnt > 5) { | ||
182 | with_ext = 1; | ||
183 | num_descs *= 2; | ||
184 | } else | ||
185 | with_ext = 0; | ||
186 | |||
187 | /* completion writes from the raid engine may pass completion | ||
188 | * writes from the legacy engine, so we need one extra null | ||
189 | * (legacy) descriptor to ensure all completion writes arrive in | ||
190 | * order. | ||
191 | */ | ||
192 | if (likely(num_descs) && | ||
193 | ioat_check_space_lock(ioat_chan, num_descs+1) == 0) | ||
194 | idx = ioat_chan->head; | ||
195 | else | ||
196 | return NULL; | ||
197 | i = 0; | ||
198 | do { | ||
199 | struct ioat_raw_descriptor *descs[2]; | ||
200 | size_t xfer_size = min_t(size_t, | ||
201 | len, 1 << ioat_chan->xfercap_log); | ||
202 | int s; | ||
203 | |||
204 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
205 | xor = desc->xor; | ||
206 | |||
207 | /* save a branch by unconditionally retrieving the | ||
208 | * extended descriptor xor_set_src() knows to not write | ||
209 | * to it in the single descriptor case | ||
210 | */ | ||
211 | ext = ioat_get_ring_ent(ioat_chan, idx + i + 1); | ||
212 | xor_ex = ext->xor_ex; | ||
213 | |||
214 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
215 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
216 | for (s = 0; s < src_cnt; s++) | ||
217 | xor_set_src(descs, src[s], offset, s); | ||
218 | xor->size = xfer_size; | ||
219 | xor->dst_addr = dest + offset; | ||
220 | xor->ctl = 0; | ||
221 | xor->ctl_f.op = op; | ||
222 | xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); | ||
223 | |||
224 | len -= xfer_size; | ||
225 | offset += xfer_size; | ||
226 | dump_desc_dbg(ioat_chan, desc); | ||
227 | } while ((i += 1 + with_ext) < num_descs); | ||
228 | |||
229 | /* last xor descriptor carries the unmap parameters and fence bit */ | ||
230 | desc->txd.flags = flags; | ||
231 | desc->len = total_len; | ||
232 | if (result) | ||
233 | desc->result = result; | ||
234 | xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
235 | |||
236 | /* completion descriptor carries interrupt bit */ | ||
237 | compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
238 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | ||
239 | hw = compl_desc->hw; | ||
240 | hw->ctl = 0; | ||
241 | hw->ctl_f.null = 1; | ||
242 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
243 | hw->ctl_f.compl_write = 1; | ||
244 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
245 | dump_desc_dbg(ioat_chan, compl_desc); | ||
246 | |||
247 | /* we leave the channel locked to ensure in order submission */ | ||
248 | return &compl_desc->txd; | ||
249 | } | ||
250 | |||
251 | struct dma_async_tx_descriptor * | ||
252 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
253 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
254 | { | ||
255 | return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); | ||
256 | } | ||
257 | |||
258 | struct dma_async_tx_descriptor * | ||
259 | ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | ||
260 | unsigned int src_cnt, size_t len, | ||
261 | enum sum_check_flags *result, unsigned long flags) | ||
262 | { | ||
263 | /* the cleanup routine only sets bits on validate failure, it | ||
264 | * does not clear bits on validate success... so clear it here | ||
265 | */ | ||
266 | *result = 0; | ||
267 | |||
268 | return __ioat_prep_xor_lock(chan, result, src[0], &src[1], | ||
269 | src_cnt - 1, len, flags); | ||
270 | } | ||
271 | |||
272 | static void | ||
273 | dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc, | ||
274 | struct ioat_ring_ent *ext) | ||
275 | { | ||
276 | struct device *dev = to_dev(ioat_chan); | ||
277 | struct ioat_pq_descriptor *pq = desc->pq; | ||
278 | struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; | ||
279 | struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; | ||
280 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
281 | int i; | ||
282 | |||
283 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | ||
284 | " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" | ||
285 | " src_cnt: %d)\n", | ||
286 | desc_id(desc), (unsigned long long) desc->txd.phys, | ||
287 | (unsigned long long) (pq_ex ? pq_ex->next : pq->next), | ||
288 | desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, | ||
289 | pq->ctl_f.int_en, pq->ctl_f.compl_write, | ||
290 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | ||
291 | pq->ctl_f.src_cnt); | ||
292 | for (i = 0; i < src_cnt; i++) | ||
293 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | ||
294 | (unsigned long long) pq_get_src(descs, i), pq->coef[i]); | ||
295 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | ||
296 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | ||
297 | dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); | ||
298 | } | ||
299 | |||
300 | static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan, | ||
301 | struct ioat_ring_ent *desc) | ||
302 | { | ||
303 | struct device *dev = to_dev(ioat_chan); | ||
304 | struct ioat_pq_descriptor *pq = desc->pq; | ||
305 | struct ioat_raw_descriptor *descs[] = { (void *)pq, | ||
306 | (void *)pq, | ||
307 | (void *)pq }; | ||
308 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
309 | int i; | ||
310 | |||
311 | if (desc->sed) { | ||
312 | descs[1] = (void *)desc->sed->hw; | ||
313 | descs[2] = (void *)desc->sed->hw + 64; | ||
314 | } | ||
315 | |||
316 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | ||
317 | " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" | ||
318 | " src_cnt: %d)\n", | ||
319 | desc_id(desc), (unsigned long long) desc->txd.phys, | ||
320 | (unsigned long long) pq->next, | ||
321 | desc->txd.flags, pq->size, pq->ctl, | ||
322 | pq->ctl_f.op, pq->ctl_f.int_en, | ||
323 | pq->ctl_f.compl_write, | ||
324 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | ||
325 | pq->ctl_f.src_cnt); | ||
326 | for (i = 0; i < src_cnt; i++) { | ||
327 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | ||
328 | (unsigned long long) pq16_get_src(descs, i), | ||
329 | pq->coef[i]); | ||
330 | } | ||
331 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | ||
332 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | ||
333 | } | ||
334 | |||
335 | static struct dma_async_tx_descriptor * | ||
336 | __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
337 | const dma_addr_t *dst, const dma_addr_t *src, | ||
338 | unsigned int src_cnt, const unsigned char *scf, | ||
339 | size_t len, unsigned long flags) | ||
340 | { | ||
341 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
342 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
343 | struct ioat_ring_ent *compl_desc; | ||
344 | struct ioat_ring_ent *desc; | ||
345 | struct ioat_ring_ent *ext; | ||
346 | size_t total_len = len; | ||
347 | struct ioat_pq_descriptor *pq; | ||
348 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
349 | struct ioat_dma_descriptor *hw; | ||
350 | u32 offset = 0; | ||
351 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; | ||
352 | int i, s, idx, with_ext, num_descs; | ||
353 | int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0; | ||
354 | |||
355 | dev_dbg(to_dev(ioat_chan), "%s\n", __func__); | ||
356 | /* the engine requires at least two sources (we provide | ||
357 | * at least 1 implied source in the DMA_PREP_CONTINUE case) | ||
358 | */ | ||
359 | BUG_ON(src_cnt + dmaf_continue(flags) < 2); | ||
360 | |||
361 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
362 | /* we need 2x the number of descriptors to cover greater than 3 | ||
363 | * sources (we need 1 extra source in the q-only continuation | ||
364 | * case and 3 extra sources in the p+q continuation case. | ||
365 | */ | ||
366 | if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || | ||
367 | (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { | ||
368 | with_ext = 1; | ||
369 | num_descs *= 2; | ||
370 | } else | ||
371 | with_ext = 0; | ||
372 | |||
373 | /* completion writes from the raid engine may pass completion | ||
374 | * writes from the legacy engine, so we need one extra null | ||
375 | * (legacy) descriptor to ensure all completion writes arrive in | ||
376 | * order. | ||
377 | */ | ||
378 | if (likely(num_descs) && | ||
379 | ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0) | ||
380 | idx = ioat_chan->head; | ||
381 | else | ||
382 | return NULL; | ||
383 | i = 0; | ||
384 | do { | ||
385 | struct ioat_raw_descriptor *descs[2]; | ||
386 | size_t xfer_size = min_t(size_t, len, | ||
387 | 1 << ioat_chan->xfercap_log); | ||
388 | |||
389 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
390 | pq = desc->pq; | ||
391 | |||
392 | /* save a branch by unconditionally retrieving the | ||
393 | * extended descriptor pq_set_src() knows to not write | ||
394 | * to it in the single descriptor case | ||
395 | */ | ||
396 | ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext); | ||
397 | pq_ex = ext->pq_ex; | ||
398 | |||
399 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
400 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
401 | |||
402 | for (s = 0; s < src_cnt; s++) | ||
403 | pq_set_src(descs, src[s], offset, scf[s], s); | ||
404 | |||
405 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | ||
406 | if (dmaf_p_disabled_continue(flags)) | ||
407 | pq_set_src(descs, dst[1], offset, 1, s++); | ||
408 | else if (dmaf_continue(flags)) { | ||
409 | pq_set_src(descs, dst[0], offset, 0, s++); | ||
410 | pq_set_src(descs, dst[1], offset, 1, s++); | ||
411 | pq_set_src(descs, dst[1], offset, 0, s++); | ||
412 | } | ||
413 | pq->size = xfer_size; | ||
414 | pq->p_addr = dst[0] + offset; | ||
415 | pq->q_addr = dst[1] + offset; | ||
416 | pq->ctl = 0; | ||
417 | pq->ctl_f.op = op; | ||
418 | /* we turn on descriptor write back error status */ | ||
419 | if (ioat_dma->cap & IOAT_CAP_DWBES) | ||
420 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
421 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); | ||
422 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | ||
423 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | ||
424 | |||
425 | len -= xfer_size; | ||
426 | offset += xfer_size; | ||
427 | } while ((i += 1 + with_ext) < num_descs); | ||
428 | |||
429 | /* last pq descriptor carries the unmap parameters and fence bit */ | ||
430 | desc->txd.flags = flags; | ||
431 | desc->len = total_len; | ||
432 | if (result) | ||
433 | desc->result = result; | ||
434 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
435 | dump_pq_desc_dbg(ioat_chan, desc, ext); | ||
436 | |||
437 | if (!cb32) { | ||
438 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
439 | pq->ctl_f.compl_write = 1; | ||
440 | compl_desc = desc; | ||
441 | } else { | ||
442 | /* completion descriptor carries interrupt bit */ | ||
443 | compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
444 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | ||
445 | hw = compl_desc->hw; | ||
446 | hw->ctl = 0; | ||
447 | hw->ctl_f.null = 1; | ||
448 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
449 | hw->ctl_f.compl_write = 1; | ||
450 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
451 | dump_desc_dbg(ioat_chan, compl_desc); | ||
452 | } | ||
453 | |||
454 | |||
455 | /* we leave the channel locked to ensure in order submission */ | ||
456 | return &compl_desc->txd; | ||
457 | } | ||
458 | |||
459 | static struct dma_async_tx_descriptor * | ||
460 | __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
461 | const dma_addr_t *dst, const dma_addr_t *src, | ||
462 | unsigned int src_cnt, const unsigned char *scf, | ||
463 | size_t len, unsigned long flags) | ||
464 | { | ||
465 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
466 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
467 | struct ioat_ring_ent *desc; | ||
468 | size_t total_len = len; | ||
469 | struct ioat_pq_descriptor *pq; | ||
470 | u32 offset = 0; | ||
471 | u8 op; | ||
472 | int i, s, idx, num_descs; | ||
473 | |||
474 | /* this function is only called with 9-16 sources */ | ||
475 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; | ||
476 | |||
477 | dev_dbg(to_dev(ioat_chan), "%s\n", __func__); | ||
478 | |||
479 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | ||
480 | |||
481 | /* | ||
482 | * 16 source pq is only available on cb3.3 and has no completion | ||
483 | * write hw bug. | ||
484 | */ | ||
485 | if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0) | ||
486 | idx = ioat_chan->head; | ||
487 | else | ||
488 | return NULL; | ||
489 | |||
490 | i = 0; | ||
491 | |||
492 | do { | ||
493 | struct ioat_raw_descriptor *descs[4]; | ||
494 | size_t xfer_size = min_t(size_t, len, | ||
495 | 1 << ioat_chan->xfercap_log); | ||
496 | |||
497 | desc = ioat_get_ring_ent(ioat_chan, idx + i); | ||
498 | pq = desc->pq; | ||
499 | |||
500 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
501 | |||
502 | desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3); | ||
503 | if (!desc->sed) { | ||
504 | dev_err(to_dev(ioat_chan), | ||
505 | "%s: no free sed entries\n", __func__); | ||
506 | return NULL; | ||
507 | } | ||
508 | |||
509 | pq->sed_addr = desc->sed->dma; | ||
510 | desc->sed->parent = desc; | ||
511 | |||
512 | descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; | ||
513 | descs[2] = (void *)descs[1] + 64; | ||
514 | |||
515 | for (s = 0; s < src_cnt; s++) | ||
516 | pq16_set_src(descs, src[s], offset, scf[s], s); | ||
517 | |||
518 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | ||
519 | if (dmaf_p_disabled_continue(flags)) | ||
520 | pq16_set_src(descs, dst[1], offset, 1, s++); | ||
521 | else if (dmaf_continue(flags)) { | ||
522 | pq16_set_src(descs, dst[0], offset, 0, s++); | ||
523 | pq16_set_src(descs, dst[1], offset, 1, s++); | ||
524 | pq16_set_src(descs, dst[1], offset, 0, s++); | ||
525 | } | ||
526 | |||
527 | pq->size = xfer_size; | ||
528 | pq->p_addr = dst[0] + offset; | ||
529 | pq->q_addr = dst[1] + offset; | ||
530 | pq->ctl = 0; | ||
531 | pq->ctl_f.op = op; | ||
532 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); | ||
533 | /* we turn on descriptor write back error status */ | ||
534 | if (ioat_dma->cap & IOAT_CAP_DWBES) | ||
535 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
536 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | ||
537 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | ||
538 | |||
539 | len -= xfer_size; | ||
540 | offset += xfer_size; | ||
541 | } while (++i < num_descs); | ||
542 | |||
543 | /* last pq descriptor carries the unmap parameters and fence bit */ | ||
544 | desc->txd.flags = flags; | ||
545 | desc->len = total_len; | ||
546 | if (result) | ||
547 | desc->result = result; | ||
548 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
549 | |||
550 | /* with cb3.3 we should be able to do completion w/o a null desc */ | ||
551 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
552 | pq->ctl_f.compl_write = 1; | ||
553 | |||
554 | dump_pq16_desc_dbg(ioat_chan, desc); | ||
555 | |||
556 | /* we leave the channel locked to ensure in order submission */ | ||
557 | return &desc->txd; | ||
558 | } | ||
559 | |||
560 | static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) | ||
561 | { | ||
562 | if (dmaf_p_disabled_continue(flags)) | ||
563 | return src_cnt + 1; | ||
564 | else if (dmaf_continue(flags)) | ||
565 | return src_cnt + 3; | ||
566 | else | ||
567 | return src_cnt; | ||
568 | } | ||
569 | |||
570 | struct dma_async_tx_descriptor * | ||
571 | ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | ||
572 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
573 | unsigned long flags) | ||
574 | { | ||
575 | /* specify valid address for disabled result */ | ||
576 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
577 | dst[0] = dst[1]; | ||
578 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
579 | dst[1] = dst[0]; | ||
580 | |||
581 | /* handle the single source multiply case from the raid6 | ||
582 | * recovery path | ||
583 | */ | ||
584 | if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { | ||
585 | dma_addr_t single_source[2]; | ||
586 | unsigned char single_source_coef[2]; | ||
587 | |||
588 | BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); | ||
589 | single_source[0] = src[0]; | ||
590 | single_source[1] = src[0]; | ||
591 | single_source_coef[0] = scf[0]; | ||
592 | single_source_coef[1] = 0; | ||
593 | |||
594 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
595 | __ioat_prep_pq16_lock(chan, NULL, dst, single_source, | ||
596 | 2, single_source_coef, len, | ||
597 | flags) : | ||
598 | __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2, | ||
599 | single_source_coef, len, flags); | ||
600 | |||
601 | } else { | ||
602 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
603 | __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt, | ||
604 | scf, len, flags) : | ||
605 | __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt, | ||
606 | scf, len, flags); | ||
607 | } | ||
608 | } | ||
609 | |||
610 | struct dma_async_tx_descriptor * | ||
611 | ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | ||
612 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
613 | enum sum_check_flags *pqres, unsigned long flags) | ||
614 | { | ||
615 | /* specify valid address for disabled result */ | ||
616 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
617 | pq[0] = pq[1]; | ||
618 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
619 | pq[1] = pq[0]; | ||
620 | |||
621 | /* the cleanup routine only sets bits on validate failure, it | ||
622 | * does not clear bits on validate success... so clear it here | ||
623 | */ | ||
624 | *pqres = 0; | ||
625 | |||
626 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
627 | __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, | ||
628 | flags) : | ||
629 | __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | ||
630 | flags); | ||
631 | } | ||
632 | |||
633 | struct dma_async_tx_descriptor * | ||
634 | ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | ||
635 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
636 | { | ||
637 | unsigned char scf[src_cnt]; | ||
638 | dma_addr_t pq[2]; | ||
639 | |||
640 | memset(scf, 0, src_cnt); | ||
641 | pq[0] = dst; | ||
642 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
643 | pq[1] = dst; /* specify valid address for disabled result */ | ||
644 | |||
645 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
646 | __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, | ||
647 | flags) : | ||
648 | __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | ||
649 | flags); | ||
650 | } | ||
651 | |||
652 | struct dma_async_tx_descriptor * | ||
653 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | ||
654 | unsigned int src_cnt, size_t len, | ||
655 | enum sum_check_flags *result, unsigned long flags) | ||
656 | { | ||
657 | unsigned char scf[src_cnt]; | ||
658 | dma_addr_t pq[2]; | ||
659 | |||
660 | /* the cleanup routine only sets bits on validate failure, it | ||
661 | * does not clear bits on validate success... so clear it here | ||
662 | */ | ||
663 | *result = 0; | ||
664 | |||
665 | memset(scf, 0, src_cnt); | ||
666 | pq[0] = src[0]; | ||
667 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
668 | pq[1] = pq[0]; /* specify valid address for disabled result */ | ||
669 | |||
670 | return src_cnt_flags(src_cnt, flags) > 8 ? | ||
671 | __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, | ||
672 | scf, len, flags) : | ||
673 | __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, | ||
674 | scf, len, flags); | ||
675 | } | ||
676 | |||
677 | struct dma_async_tx_descriptor * | ||
678 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | ||
679 | { | ||
680 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
681 | struct ioat_ring_ent *desc; | ||
682 | struct ioat_dma_descriptor *hw; | ||
683 | |||
684 | if (ioat_check_space_lock(ioat_chan, 1) == 0) | ||
685 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); | ||
686 | else | ||
687 | return NULL; | ||
688 | |||
689 | hw = desc->hw; | ||
690 | hw->ctl = 0; | ||
691 | hw->ctl_f.null = 1; | ||
692 | hw->ctl_f.int_en = 1; | ||
693 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
694 | hw->ctl_f.compl_write = 1; | ||
695 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
696 | hw->src_addr = 0; | ||
697 | hw->dst_addr = 0; | ||
698 | |||
699 | desc->txd.flags = flags; | ||
700 | desc->len = 1; | ||
701 | |||
702 | dump_desc_dbg(ioat_chan, desc); | ||
703 | |||
704 | /* we leave the channel locked to ensure in order submission */ | ||
705 | return &desc->txd; | ||
706 | } | ||
707 | |||