aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2012-03-23 16:36:42 -0400
committerDan Williams <dan.j.williams@intel.com>2012-03-23 16:36:42 -0400
commit275029353953c2117941ade84f02a2303912fad1 (patch)
tree35e49f7b6d288f5ff74c5c95533e4353c587cd7d /drivers/dma/ioat/dma.c
parentc16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff)
ioat: fix size of 'completion' for Xen
Starting with v3.2 Jonathan reports that Xen crashes loading the ioatdma driver. A debug run shows: ioatdma 0000:00:16.4: desc[0]: (0x300cc7000->0x300cc7040) cookie: 0 flags: 0x2 ctl: 0x29 (op: 0 int_en: 1 compl: 1) ... ioatdma 0000:00:16.4: ioat_get_current_completion: phys_complete: 0xcc7000 ...which shows that in this environment GFP_KERNEL memory may be backed by a 64-bit dma address. This breaks the driver's assumption that an unsigned long should be able to contain the physical address for descriptor memory. Switch to dma_addr_t which beyond being the right size, is the true type for the data i.e. an io-virtual address inidicating the engine's last processed descriptor. [stable: 3.2+] Cc: <stable@vger.kernel.org> Reported-by: Jonathan Nieder <jrnieder@gmail.com> Reported-by: William Dauchy <wdauchy@gmail.com> Tested-by: William Dauchy <wdauchy@gmail.com> Tested-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r--drivers/dma/ioat/dma.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a4d6cb0c0343..659518015972 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -548,9 +548,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
548 PCI_DMA_TODEVICE, flags, 0); 548 PCI_DMA_TODEVICE, flags, 0);
549} 549}
550 550
551unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) 551dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
552{ 552{
553 unsigned long phys_complete; 553 dma_addr_t phys_complete;
554 u64 completion; 554 u64 completion;
555 555
556 completion = *chan->completion; 556 completion = *chan->completion;
@@ -571,7 +571,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
571} 571}
572 572
573bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 573bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
574 unsigned long *phys_complete) 574 dma_addr_t *phys_complete)
575{ 575{
576 *phys_complete = ioat_get_current_completion(chan); 576 *phys_complete = ioat_get_current_completion(chan);
577 if (*phys_complete == chan->last_completion) 577 if (*phys_complete == chan->last_completion)
@@ -582,14 +582,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
582 return true; 582 return true;
583} 583}
584 584
585static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) 585static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
586{ 586{
587 struct ioat_chan_common *chan = &ioat->base; 587 struct ioat_chan_common *chan = &ioat->base;
588 struct list_head *_desc, *n; 588 struct list_head *_desc, *n;
589 struct dma_async_tx_descriptor *tx; 589 struct dma_async_tx_descriptor *tx;
590 590
591 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", 591 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
592 __func__, phys_complete); 592 __func__, (unsigned long long) phys_complete);
593 list_for_each_safe(_desc, n, &ioat->used_desc) { 593 list_for_each_safe(_desc, n, &ioat->used_desc) {
594 struct ioat_desc_sw *desc; 594 struct ioat_desc_sw *desc;
595 595
@@ -655,7 +655,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
655static void ioat1_cleanup(struct ioat_dma_chan *ioat) 655static void ioat1_cleanup(struct ioat_dma_chan *ioat)
656{ 656{
657 struct ioat_chan_common *chan = &ioat->base; 657 struct ioat_chan_common *chan = &ioat->base;
658 unsigned long phys_complete; 658 dma_addr_t phys_complete;
659 659
660 prefetch(chan->completion); 660 prefetch(chan->completion);
661 661
@@ -701,7 +701,7 @@ static void ioat1_timer_event(unsigned long data)
701 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 701 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
702 spin_unlock_bh(&ioat->desc_lock); 702 spin_unlock_bh(&ioat->desc_lock);
703 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 703 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
704 unsigned long phys_complete; 704 dma_addr_t phys_complete;
705 705
706 spin_lock_bh(&ioat->desc_lock); 706 spin_lock_bh(&ioat->desc_lock);
707 /* if we haven't made progress and we have already 707 /* if we haven't made progress and we have already