aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ioat_dma.c')
-rw-r--r--drivers/dma/ioat_dma.c578
1 files changed, 476 insertions, 102 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 7e4a785c2dff..c1c2dcc6fc2e 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -36,18 +36,24 @@
36#include "ioatdma_registers.h" 36#include "ioatdma_registers.h"
37#include "ioatdma_hw.h" 37#include "ioatdma_hw.h"
38 38
39#define INITIAL_IOAT_DESC_COUNT 128
40
41#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) 39#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) 40#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
43#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 41#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 42#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45 43
44static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)");
48
46/* internal functions */ 49/* internal functions */
47static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
52
53static struct ioat_desc_sw *
54ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
49static struct ioat_desc_sw * 55static struct ioat_desc_sw *
50ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); 56ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
51 57
52static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( 58static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
53 struct ioatdma_device *device, 59 struct ioatdma_device *device,
@@ -130,6 +136,12 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
130 ioat_chan->device = device; 136 ioat_chan->device = device;
131 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
132 ioat_chan->xfercap = xfercap; 138 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0;
140 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU,
143 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144 }
133 spin_lock_init(&ioat_chan->cleanup_lock); 145 spin_lock_init(&ioat_chan->cleanup_lock);
134 spin_lock_init(&ioat_chan->desc_lock); 146 spin_lock_init(&ioat_chan->desc_lock);
135 INIT_LIST_HEAD(&ioat_chan->free_desc); 147 INIT_LIST_HEAD(&ioat_chan->free_desc);
@@ -161,13 +173,17 @@ static void ioat_set_dest(dma_addr_t addr,
161 tx_to_ioat_desc(tx)->dst = addr; 173 tx_to_ioat_desc(tx)->dst = addr;
162} 174}
163 175
164static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) 176static inline void __ioat1_dma_memcpy_issue_pending(
177 struct ioat_dma_chan *ioat_chan);
178static inline void __ioat2_dma_memcpy_issue_pending(
179 struct ioat_dma_chan *ioat_chan);
180
181static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
165{ 182{
166 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 183 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
167 struct ioat_desc_sw *first = tx_to_ioat_desc(tx); 184 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
168 struct ioat_desc_sw *prev, *new; 185 struct ioat_desc_sw *prev, *new;
169 struct ioat_dma_descriptor *hw; 186 struct ioat_dma_descriptor *hw;
170 int append = 0;
171 dma_cookie_t cookie; 187 dma_cookie_t cookie;
172 LIST_HEAD(new_chain); 188 LIST_HEAD(new_chain);
173 u32 copy; 189 u32 copy;
@@ -209,7 +225,7 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
209 list_add_tail(&new->node, &new_chain); 225 list_add_tail(&new->node, &new_chain);
210 desc_count++; 226 desc_count++;
211 prev = new; 227 prev = new;
212 } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan))); 228 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
213 229
214 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 230 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
215 if (new->async_tx.callback) { 231 if (new->async_tx.callback) {
@@ -246,20 +262,98 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
246 first->async_tx.phys; 262 first->async_tx.phys;
247 __list_splice(&new_chain, ioat_chan->used_desc.prev); 263 __list_splice(&new_chain, ioat_chan->used_desc.prev);
248 264
265 ioat_chan->dmacount += desc_count;
249 ioat_chan->pending += desc_count; 266 ioat_chan->pending += desc_count;
250 if (ioat_chan->pending >= 4) { 267 if (ioat_chan->pending >= ioat_pending_level)
251 append = 1; 268 __ioat1_dma_memcpy_issue_pending(ioat_chan);
252 ioat_chan->pending = 0;
253 }
254 spin_unlock_bh(&ioat_chan->desc_lock); 269 spin_unlock_bh(&ioat_chan->desc_lock);
255 270
256 if (append) 271 return cookie;
257 writeb(IOAT_CHANCMD_APPEND, 272}
258 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); 273
274static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
275{
276 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
277 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
278 struct ioat_desc_sw *new;
279 struct ioat_dma_descriptor *hw;
280 dma_cookie_t cookie;
281 u32 copy;
282 size_t len;
283 dma_addr_t src, dst;
284 int orig_ack;
285 unsigned int desc_count = 0;
286
287 /* src and dest and len are stored in the initial descriptor */
288 len = first->len;
289 src = first->src;
290 dst = first->dst;
291 orig_ack = first->async_tx.ack;
292 new = first;
293
294 /* ioat_chan->desc_lock is still in force in version 2 path */
295
296 do {
297 copy = min((u32) len, ioat_chan->xfercap);
298
299 new->async_tx.ack = 1;
300
301 hw = new->hw;
302 hw->size = copy;
303 hw->ctl = 0;
304 hw->src_addr = src;
305 hw->dst_addr = dst;
306
307 len -= copy;
308 dst += copy;
309 src += copy;
310 desc_count++;
311 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
312
313 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
314 if (new->async_tx.callback) {
315 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
316 if (first != new) {
317 /* move callback into to last desc */
318 new->async_tx.callback = first->async_tx.callback;
319 new->async_tx.callback_param
320 = first->async_tx.callback_param;
321 first->async_tx.callback = NULL;
322 first->async_tx.callback_param = NULL;
323 }
324 }
325
326 new->tx_cnt = desc_count;
327 new->async_tx.ack = orig_ack; /* client is in control of this ack */
328
329 /* store the original values for use in later cleanup */
330 if (new != first) {
331 new->src = first->src;
332 new->dst = first->dst;
333 new->len = first->len;
334 }
335
336 /* cookie incr and addition to used_list must be atomic */
337 cookie = ioat_chan->common.cookie;
338 cookie++;
339 if (cookie < 0)
340 cookie = 1;
341 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
342
343 ioat_chan->dmacount += desc_count;
344 ioat_chan->pending += desc_count;
345 if (ioat_chan->pending >= ioat_pending_level)
346 __ioat2_dma_memcpy_issue_pending(ioat_chan);
347 spin_unlock_bh(&ioat_chan->desc_lock);
259 348
260 return cookie; 349 return cookie;
261} 350}
262 351
352/**
353 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
354 * @ioat_chan: the channel supplying the memory pool for the descriptors
355 * @flags: allocation flags
356 */
263static struct ioat_desc_sw *ioat_dma_alloc_descriptor( 357static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
264 struct ioat_dma_chan *ioat_chan, 358 struct ioat_dma_chan *ioat_chan,
265 gfp_t flags) 359 gfp_t flags)
@@ -284,15 +378,57 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
284 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); 378 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
285 desc_sw->async_tx.tx_set_src = ioat_set_src; 379 desc_sw->async_tx.tx_set_src = ioat_set_src;
286 desc_sw->async_tx.tx_set_dest = ioat_set_dest; 380 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
287 desc_sw->async_tx.tx_submit = ioat_tx_submit; 381 switch (ioat_chan->device->version) {
382 case IOAT_VER_1_2:
383 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
384 break;
385 case IOAT_VER_2_0:
386 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
387 break;
388 }
288 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); 389 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
390
289 desc_sw->hw = desc; 391 desc_sw->hw = desc;
290 desc_sw->async_tx.phys = phys; 392 desc_sw->async_tx.phys = phys;
291 393
292 return desc_sw; 394 return desc_sw;
293} 395}
294 396
295/* returns the actual number of allocated descriptors */ 397static int ioat_initial_desc_count = 256;
398module_param(ioat_initial_desc_count, int, 0644);
399MODULE_PARM_DESC(ioat_initial_desc_count,
400 "initial descriptors per channel (default: 256)");
401
402/**
403 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
404 * @ioat_chan: the channel to be massaged
405 */
406static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
407{
408 struct ioat_desc_sw *desc, *_desc;
409
410 /* setup used_desc */
411 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
412 ioat_chan->used_desc.prev = NULL;
413
414 /* pull free_desc out of the circle so that every node is a hw
415 * descriptor, but leave it pointing to the list
416 */
417 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
418 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
419
420 /* circle link the hw descriptors */
421 desc = to_ioat_desc(ioat_chan->free_desc.next);
422 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
423 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
424 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
425 }
426}
427
428/**
429 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
430 * @chan: the channel to be filled out
431 */
296static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 432static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
297{ 433{
298 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 434 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -304,7 +440,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
304 440
305 /* have we already been set up? */ 441 /* have we already been set up? */
306 if (!list_empty(&ioat_chan->free_desc)) 442 if (!list_empty(&ioat_chan->free_desc))
307 return INITIAL_IOAT_DESC_COUNT; 443 return ioat_chan->desccount;
308 444
309 /* Setup register to interrupt and write completion status on error */ 445 /* Setup register to interrupt and write completion status on error */
310 chanctrl = IOAT_CHANCTRL_ERR_INT_EN | 446 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
@@ -320,7 +456,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
320 } 456 }
321 457
322 /* Allocate descriptors */ 458 /* Allocate descriptors */
323 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) { 459 for (i = 0; i < ioat_initial_desc_count; i++) {
324 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); 460 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
325 if (!desc) { 461 if (!desc) {
326 dev_err(&ioat_chan->device->pdev->dev, 462 dev_err(&ioat_chan->device->pdev->dev,
@@ -330,7 +466,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
330 list_add_tail(&desc->node, &tmp_list); 466 list_add_tail(&desc->node, &tmp_list);
331 } 467 }
332 spin_lock_bh(&ioat_chan->desc_lock); 468 spin_lock_bh(&ioat_chan->desc_lock);
469 ioat_chan->desccount = i;
333 list_splice(&tmp_list, &ioat_chan->free_desc); 470 list_splice(&tmp_list, &ioat_chan->free_desc);
471 if (ioat_chan->device->version != IOAT_VER_1_2)
472 ioat2_dma_massage_chan_desc(ioat_chan);
334 spin_unlock_bh(&ioat_chan->desc_lock); 473 spin_unlock_bh(&ioat_chan->desc_lock);
335 474
336 /* allocate a completion writeback area */ 475 /* allocate a completion writeback area */
@@ -347,10 +486,14 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
347 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 486 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
348 487
349 tasklet_enable(&ioat_chan->cleanup_task); 488 tasklet_enable(&ioat_chan->cleanup_task);
350 ioat_dma_start_null_desc(ioat_chan); 489 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
351 return i; 490 return ioat_chan->desccount;
352} 491}
353 492
493/**
494 * ioat_dma_free_chan_resources - release all the descriptors
495 * @chan: the channel to be cleaned
496 */
354static void ioat_dma_free_chan_resources(struct dma_chan *chan) 497static void ioat_dma_free_chan_resources(struct dma_chan *chan)
355{ 498{
356 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 499 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -364,22 +507,45 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
364 /* Delay 100ms after reset to allow internal DMA logic to quiesce 507 /* Delay 100ms after reset to allow internal DMA logic to quiesce
365 * before removing DMA descriptor resources. 508 * before removing DMA descriptor resources.
366 */ 509 */
367 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); 510 writeb(IOAT_CHANCMD_RESET,
511 ioat_chan->reg_base
512 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
368 mdelay(100); 513 mdelay(100);
369 514
370 spin_lock_bh(&ioat_chan->desc_lock); 515 spin_lock_bh(&ioat_chan->desc_lock);
371 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { 516 switch (ioat_chan->device->version) {
372 in_use_descs++; 517 case IOAT_VER_1_2:
373 list_del(&desc->node); 518 list_for_each_entry_safe(desc, _desc,
374 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 519 &ioat_chan->used_desc, node) {
375 desc->async_tx.phys); 520 in_use_descs++;
376 kfree(desc); 521 list_del(&desc->node);
377 } 522 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
378 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { 523 desc->async_tx.phys);
379 list_del(&desc->node); 524 kfree(desc);
525 }
526 list_for_each_entry_safe(desc, _desc,
527 &ioat_chan->free_desc, node) {
528 list_del(&desc->node);
529 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
530 desc->async_tx.phys);
531 kfree(desc);
532 }
533 break;
534 case IOAT_VER_2_0:
535 list_for_each_entry_safe(desc, _desc,
536 ioat_chan->free_desc.next, node) {
537 list_del(&desc->node);
538 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
539 desc->async_tx.phys);
540 kfree(desc);
541 }
542 desc = to_ioat_desc(ioat_chan->free_desc.next);
380 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 543 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
381 desc->async_tx.phys); 544 desc->async_tx.phys);
382 kfree(desc); 545 kfree(desc);
546 INIT_LIST_HEAD(&ioat_chan->free_desc);
547 INIT_LIST_HEAD(&ioat_chan->used_desc);
548 break;
383 } 549 }
384 spin_unlock_bh(&ioat_chan->desc_lock); 550 spin_unlock_bh(&ioat_chan->desc_lock);
385 551
@@ -395,6 +561,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
395 561
396 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 562 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
397 ioat_chan->pending = 0; 563 ioat_chan->pending = 0;
564 ioat_chan->dmacount = 0;
398} 565}
399 566
400/** 567/**
@@ -406,7 +573,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
406 * has run out. 573 * has run out.
407 */ 574 */
408static struct ioat_desc_sw * 575static struct ioat_desc_sw *
409ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 576ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
410{ 577{
411 struct ioat_desc_sw *new = NULL; 578 struct ioat_desc_sw *new = NULL;
412 579
@@ -425,7 +592,82 @@ ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
425 return new; 592 return new;
426} 593}
427 594
428static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( 595static struct ioat_desc_sw *
596ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
597{
598 struct ioat_desc_sw *new = NULL;
599
600 /*
601 * used.prev points to where to start processing
602 * used.next points to next free descriptor
603 * if used.prev == NULL, there are none waiting to be processed
604 * if used.next == used.prev.prev, there is only one free descriptor,
605 * and we need to use it to as a noop descriptor before
606 * linking in a new set of descriptors, since the device
607 * has probably already read the pointer to it
608 */
609 if (ioat_chan->used_desc.prev &&
610 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
611
612 struct ioat_desc_sw *desc = NULL;
613 struct ioat_desc_sw *noop_desc = NULL;
614 int i;
615
616 /* set up the noop descriptor */
617 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
618 noop_desc->hw->size = 0;
619 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
620 noop_desc->hw->src_addr = 0;
621 noop_desc->hw->dst_addr = 0;
622
623 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
624 ioat_chan->pending++;
625 ioat_chan->dmacount++;
626
627 /* get a few more descriptors */
628 for (i = 16; i; i--) {
629 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
630 BUG_ON(!desc);
631 list_add_tail(&desc->node, ioat_chan->used_desc.next);
632
633 desc->hw->next
634 = to_ioat_desc(desc->node.next)->async_tx.phys;
635 to_ioat_desc(desc->node.prev)->hw->next
636 = desc->async_tx.phys;
637 ioat_chan->desccount++;
638 }
639
640 ioat_chan->used_desc.next = noop_desc->node.next;
641 }
642 new = to_ioat_desc(ioat_chan->used_desc.next);
643 prefetch(new);
644 ioat_chan->used_desc.next = new->node.next;
645
646 if (ioat_chan->used_desc.prev == NULL)
647 ioat_chan->used_desc.prev = &new->node;
648
649 prefetch(new->hw);
650 return new;
651}
652
653static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
654 struct ioat_dma_chan *ioat_chan)
655{
656 if (!ioat_chan)
657 return NULL;
658
659 switch (ioat_chan->device->version) {
660 case IOAT_VER_1_2:
661 return ioat1_dma_get_next_descriptor(ioat_chan);
662 break;
663 case IOAT_VER_2_0:
664 return ioat2_dma_get_next_descriptor(ioat_chan);
665 break;
666 }
667 return NULL;
668}
669
670static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
429 struct dma_chan *chan, 671 struct dma_chan *chan,
430 size_t len, 672 size_t len,
431 int int_en) 673 int int_en)
@@ -441,19 +683,62 @@ static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
441 return new ? &new->async_tx : NULL; 683 return new ? &new->async_tx : NULL;
442} 684}
443 685
686static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
687 struct dma_chan *chan,
688 size_t len,
689 int int_en)
690{
691 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
692 struct ioat_desc_sw *new;
693
694 spin_lock_bh(&ioat_chan->desc_lock);
695 new = ioat2_dma_get_next_descriptor(ioat_chan);
696 new->len = len;
697
698 /* leave ioat_chan->desc_lock set in version 2 path */
699 return new ? &new->async_tx : NULL;
700}
701
702
444/** 703/**
445 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 704 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
446 * descriptors to hw 705 * descriptors to hw
447 * @chan: DMA channel handle 706 * @chan: DMA channel handle
448 */ 707 */
449static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) 708static inline void __ioat1_dma_memcpy_issue_pending(
709 struct ioat_dma_chan *ioat_chan)
710{
711 ioat_chan->pending = 0;
712 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
713}
714
715static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
450{ 716{
451 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 717 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
452 718
453 if (ioat_chan->pending != 0) { 719 if (ioat_chan->pending != 0) {
454 ioat_chan->pending = 0; 720 spin_lock_bh(&ioat_chan->desc_lock);
455 writeb(IOAT_CHANCMD_APPEND, 721 __ioat1_dma_memcpy_issue_pending(ioat_chan);
456 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); 722 spin_unlock_bh(&ioat_chan->desc_lock);
723 }
724}
725
726static inline void __ioat2_dma_memcpy_issue_pending(
727 struct ioat_dma_chan *ioat_chan)
728{
729 ioat_chan->pending = 0;
730 writew(ioat_chan->dmacount,
731 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
732}
733
734static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
735{
736 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
737
738 if (ioat_chan->pending != 0) {
739 spin_lock_bh(&ioat_chan->desc_lock);
740 __ioat2_dma_memcpy_issue_pending(ioat_chan);
741 spin_unlock_bh(&ioat_chan->desc_lock);
457 } 742 }
458} 743}
459 744
@@ -465,11 +750,17 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
465 chan->reg_base + IOAT_CHANCTRL_OFFSET); 750 chan->reg_base + IOAT_CHANCTRL_OFFSET);
466} 751}
467 752
753/**
754 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
755 * @chan: ioat channel to be cleaned up
756 */
468static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) 757static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
469{ 758{
470 unsigned long phys_complete; 759 unsigned long phys_complete;
471 struct ioat_desc_sw *desc, *_desc; 760 struct ioat_desc_sw *desc, *_desc;
472 dma_cookie_t cookie = 0; 761 dma_cookie_t cookie = 0;
762 unsigned long desc_phys;
763 struct ioat_desc_sw *latest_desc;
473 764
474 prefetch(ioat_chan->completion_virt); 765 prefetch(ioat_chan->completion_virt);
475 766
@@ -507,56 +798,115 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
507 798
508 cookie = 0; 799 cookie = 0;
509 spin_lock_bh(&ioat_chan->desc_lock); 800 spin_lock_bh(&ioat_chan->desc_lock);
510 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { 801 switch (ioat_chan->device->version) {
511 802 case IOAT_VER_1_2:
512 /* 803 list_for_each_entry_safe(desc, _desc,
513 * Incoming DMA requests may use multiple descriptors, due to 804 &ioat_chan->used_desc, node) {
514 * exceeding xfercap, perhaps. If so, only the last one will
515 * have a cookie, and require unmapping.
516 */
517 if (desc->async_tx.cookie) {
518 cookie = desc->async_tx.cookie;
519 805
520 /* 806 /*
521 * yes we are unmapping both _page and _single alloc'd 807 * Incoming DMA requests may use multiple descriptors,
522 * regions with unmap_page. Is this *really* that bad? 808 * due to exceeding xfercap, perhaps. If so, only the
809 * last one will have a cookie, and require unmapping.
523 */ 810 */
524 pci_unmap_page(ioat_chan->device->pdev, 811 if (desc->async_tx.cookie) {
525 pci_unmap_addr(desc, dst), 812 cookie = desc->async_tx.cookie;
526 pci_unmap_len(desc, len), 813
527 PCI_DMA_FROMDEVICE); 814 /*
528 pci_unmap_page(ioat_chan->device->pdev, 815 * yes we are unmapping both _page and _single
529 pci_unmap_addr(desc, src), 816 * alloc'd regions with unmap_page. Is this
530 pci_unmap_len(desc, len), 817 * *really* that bad?
531 PCI_DMA_TODEVICE); 818 */
532 if (desc->async_tx.callback) { 819 pci_unmap_page(ioat_chan->device->pdev,
533 desc->async_tx.callback( 820 pci_unmap_addr(desc, dst),
534 desc->async_tx.callback_param); 821 pci_unmap_len(desc, len),
535 desc->async_tx.callback = NULL; 822 PCI_DMA_FROMDEVICE);
823 pci_unmap_page(ioat_chan->device->pdev,
824 pci_unmap_addr(desc, src),
825 pci_unmap_len(desc, len),
826 PCI_DMA_TODEVICE);
827
828 if (desc->async_tx.callback) {
829 desc->async_tx.callback(desc->async_tx.callback_param);
830 desc->async_tx.callback = NULL;
831 }
536 } 832 }
537 }
538 833
539 if (desc->async_tx.phys != phys_complete) { 834 if (desc->async_tx.phys != phys_complete) {
540 /* 835 /*
541 * a completed entry, but not the last, so cleanup 836 * a completed entry, but not the last, so clean
542 * if the client is done with the descriptor 837 * up if the client is done with the descriptor
543 */ 838 */
544 if (desc->async_tx.ack) { 839 if (desc->async_tx.ack) {
545 list_del(&desc->node); 840 list_del(&desc->node);
546 list_add_tail(&desc->node, 841 list_add_tail(&desc->node,
547 &ioat_chan->free_desc); 842 &ioat_chan->free_desc);
548 } else 843 } else
844 desc->async_tx.cookie = 0;
845 } else {
846 /*
847 * last used desc. Do not remove, so we can
848 * append from it, but don't look at it next
849 * time, either
850 */
549 desc->async_tx.cookie = 0; 851 desc->async_tx.cookie = 0;
550 } else {
551 /*
552 * last used desc. Do not remove, so we can append from
553 * it, but don't look at it next time, either
554 */
555 desc->async_tx.cookie = 0;
556 852
557 /* TODO check status bits? */ 853 /* TODO check status bits? */
854 break;
855 }
856 }
857 break;
858 case IOAT_VER_2_0:
859 /* has some other thread has already cleaned up? */
860 if (ioat_chan->used_desc.prev == NULL)
558 break; 861 break;
862
863 /* work backwards to find latest finished desc */
864 desc = to_ioat_desc(ioat_chan->used_desc.next);
865 latest_desc = NULL;
866 do {
867 desc = to_ioat_desc(desc->node.prev);
868 desc_phys = (unsigned long)desc->async_tx.phys
869 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
870 if (desc_phys == phys_complete) {
871 latest_desc = desc;
872 break;
873 }
874 } while (&desc->node != ioat_chan->used_desc.prev);
875
876 if (latest_desc != NULL) {
877
878 /* work forwards to clear finished descriptors */
879 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
880 &desc->node != latest_desc->node.next &&
881 &desc->node != ioat_chan->used_desc.next;
882 desc = to_ioat_desc(desc->node.next)) {
883 if (desc->async_tx.cookie) {
884 cookie = desc->async_tx.cookie;
885 desc->async_tx.cookie = 0;
886
887 pci_unmap_page(ioat_chan->device->pdev,
888 pci_unmap_addr(desc, dst),
889 pci_unmap_len(desc, len),
890 PCI_DMA_FROMDEVICE);
891 pci_unmap_page(ioat_chan->device->pdev,
892 pci_unmap_addr(desc, src),
893 pci_unmap_len(desc, len),
894 PCI_DMA_TODEVICE);
895
896 if (desc->async_tx.callback) {
897 desc->async_tx.callback(desc->async_tx.callback_param);
898 desc->async_tx.callback = NULL;
899 }
900 }
901 }
902
903 /* move used.prev up beyond those that are finished */
904 if (&desc->node == ioat_chan->used_desc.next)
905 ioat_chan->used_desc.prev = NULL;
906 else
907 ioat_chan->used_desc.prev = &desc->node;
559 } 908 }
909 break;
560 } 910 }
561 911
562 spin_unlock_bh(&ioat_chan->desc_lock); 912 spin_unlock_bh(&ioat_chan->desc_lock);
@@ -621,8 +971,6 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
621 return dma_async_is_complete(cookie, last_complete, last_used); 971 return dma_async_is_complete(cookie, last_complete, last_used);
622} 972}
623 973
624/* PCI API */
625
626static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) 974static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
627{ 975{
628 struct ioat_desc_sw *desc; 976 struct ioat_desc_sw *desc;
@@ -633,21 +981,34 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
633 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL 981 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
634 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN 982 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
635 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 983 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
636 desc->hw->next = 0;
637 desc->hw->size = 0; 984 desc->hw->size = 0;
638 desc->hw->src_addr = 0; 985 desc->hw->src_addr = 0;
639 desc->hw->dst_addr = 0; 986 desc->hw->dst_addr = 0;
640 desc->async_tx.ack = 1; 987 desc->async_tx.ack = 1;
641 988 switch (ioat_chan->device->version) {
642 list_add_tail(&desc->node, &ioat_chan->used_desc); 989 case IOAT_VER_1_2:
990 desc->hw->next = 0;
991 list_add_tail(&desc->node, &ioat_chan->used_desc);
992
993 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
994 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
995 writel(((u64) desc->async_tx.phys) >> 32,
996 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
997
998 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
999 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1000 break;
1001 case IOAT_VER_2_0:
1002 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1003 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1004 writel(((u64) desc->async_tx.phys) >> 32,
1005 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1006
1007 ioat_chan->dmacount++;
1008 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1009 break;
1010 }
643 spin_unlock_bh(&ioat_chan->desc_lock); 1011 spin_unlock_bh(&ioat_chan->desc_lock);
644
645 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
646 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
647 writel(((u64) desc->async_tx.phys) >> 32,
648 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
649
650 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
651} 1012}
652 1013
653/* 1014/*
@@ -693,14 +1054,14 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
693 dma_chan = container_of(device->common.channels.next, 1054 dma_chan = container_of(device->common.channels.next,
694 struct dma_chan, 1055 struct dma_chan,
695 device_node); 1056 device_node);
696 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) { 1057 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
697 dev_err(&device->pdev->dev, 1058 dev_err(&device->pdev->dev,
698 "selftest cannot allocate chan resource\n"); 1059 "selftest cannot allocate chan resource\n");
699 err = -ENODEV; 1060 err = -ENODEV;
700 goto out; 1061 goto out;
701 } 1062 }
702 1063
703 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0); 1064 tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
704 if (!tx) { 1065 if (!tx) {
705 dev_err(&device->pdev->dev, 1066 dev_err(&device->pdev->dev,
706 "Self-test prep failed, disabling\n"); 1067 "Self-test prep failed, disabling\n");
@@ -710,24 +1071,25 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
710 1071
711 async_tx_ack(tx); 1072 async_tx_ack(tx);
712 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, 1073 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
713 DMA_TO_DEVICE); 1074 DMA_TO_DEVICE);
714 ioat_set_src(addr, tx, 0); 1075 tx->tx_set_src(addr, tx, 0);
715 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, 1076 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
716 DMA_FROM_DEVICE); 1077 DMA_FROM_DEVICE);
717 ioat_set_dest(addr, tx, 0); 1078 tx->tx_set_dest(addr, tx, 0);
718 tx->callback = ioat_dma_test_callback; 1079 tx->callback = ioat_dma_test_callback;
719 tx->callback_param = (void *)0x8086; 1080 tx->callback_param = (void *)0x8086;
720 cookie = ioat_tx_submit(tx); 1081 cookie = tx->tx_submit(tx);
721 if (cookie < 0) { 1082 if (cookie < 0) {
722 dev_err(&device->pdev->dev, 1083 dev_err(&device->pdev->dev,
723 "Self-test setup failed, disabling\n"); 1084 "Self-test setup failed, disabling\n");
724 err = -ENODEV; 1085 err = -ENODEV;
725 goto free_resources; 1086 goto free_resources;
726 } 1087 }
727 ioat_dma_memcpy_issue_pending(dma_chan); 1088 device->common.device_issue_pending(dma_chan);
728 msleep(1); 1089 msleep(1);
729 1090
730 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1091 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1092 != DMA_SUCCESS) {
731 dev_err(&device->pdev->dev, 1093 dev_err(&device->pdev->dev,
732 "Self-test copy timed out, disabling\n"); 1094 "Self-test copy timed out, disabling\n");
733 err = -ENODEV; 1095 err = -ENODEV;
@@ -741,7 +1103,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
741 } 1103 }
742 1104
743free_resources: 1105free_resources:
744 ioat_dma_free_chan_resources(dma_chan); 1106 device->common.device_free_chan_resources(dma_chan);
745out: 1107out:
746 kfree(src); 1108 kfree(src);
747 kfree(dest); 1109 kfree(dest);
@@ -941,16 +1303,28 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
941 INIT_LIST_HEAD(&device->common.channels); 1303 INIT_LIST_HEAD(&device->common.channels);
942 ioat_dma_enumerate_channels(device); 1304 ioat_dma_enumerate_channels(device);
943 1305
944 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
945 device->common.device_alloc_chan_resources = 1306 device->common.device_alloc_chan_resources =
946 ioat_dma_alloc_chan_resources; 1307 ioat_dma_alloc_chan_resources;
947 device->common.device_free_chan_resources = 1308 device->common.device_free_chan_resources =
948 ioat_dma_free_chan_resources; 1309 ioat_dma_free_chan_resources;
949 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; 1310 device->common.dev = &pdev->dev;
1311
1312 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
950 device->common.device_is_tx_complete = ioat_dma_is_complete; 1313 device->common.device_is_tx_complete = ioat_dma_is_complete;
951 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
952 device->common.device_dependency_added = ioat_dma_dependency_added; 1314 device->common.device_dependency_added = ioat_dma_dependency_added;
953 device->common.dev = &pdev->dev; 1315 switch (device->version) {
1316 case IOAT_VER_1_2:
1317 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1318 device->common.device_issue_pending =
1319 ioat1_dma_memcpy_issue_pending;
1320 break;
1321 case IOAT_VER_2_0:
1322 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1323 device->common.device_issue_pending =
1324 ioat2_dma_memcpy_issue_pending;
1325 break;
1326 }
1327
954 dev_err(&device->pdev->dev, 1328 dev_err(&device->pdev->dev,
955 "Intel(R) I/OAT DMA Engine found," 1329 "Intel(R) I/OAT DMA Engine found,"
956 " %d channels, device version 0x%02x, driver version %s\n", 1330 " %d channels, device version 0x%02x, driver version %s\n",