diff options
author | Dan Williams <dan.j.williams@intel.com> | 2012-03-23 16:36:42 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2012-03-23 16:36:42 -0400 |
commit | 275029353953c2117941ade84f02a2303912fad1 (patch) | |
tree | 35e49f7b6d288f5ff74c5c95533e4353c587cd7d | |
parent | c16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff) |
ioat: fix size of 'completion' for Xen
Starting with v3.2 Jonathan reports that Xen crashes loading the ioatdma
driver. A debug run shows:
ioatdma 0000:00:16.4: desc[0]: (0x300cc7000->0x300cc7040) cookie: 0 flags: 0x2 ctl: 0x29 (op: 0 int_en: 1 compl: 1)
...
ioatdma 0000:00:16.4: ioat_get_current_completion: phys_complete: 0xcc7000
...which shows that in this environment GFP_KERNEL memory may be backed
by a 64-bit dma address. This breaks the driver's assumption that an
unsigned long should be able to contain the physical address for
descriptor memory. Switch to dma_addr_t which beyond being the right
size, is the true type for the data i.e. an io-virtual address
inidicating the engine's last processed descriptor.
[stable: 3.2+]
Cc: <stable@vger.kernel.org>
Reported-by: Jonathan Nieder <jrnieder@gmail.com>
Reported-by: William Dauchy <wdauchy@gmail.com>
Tested-by: William Dauchy <wdauchy@gmail.com>
Tested-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/dma/ioat/dma.c | 16 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 6 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 8 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 8 |
4 files changed, 19 insertions, 19 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index a4d6cb0c0343..659518015972 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -548,9 +548,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | |||
548 | PCI_DMA_TODEVICE, flags, 0); | 548 | PCI_DMA_TODEVICE, flags, 0); |
549 | } | 549 | } |
550 | 550 | ||
551 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | 551 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
552 | { | 552 | { |
553 | unsigned long phys_complete; | 553 | dma_addr_t phys_complete; |
554 | u64 completion; | 554 | u64 completion; |
555 | 555 | ||
556 | completion = *chan->completion; | 556 | completion = *chan->completion; |
@@ -571,7 +571,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | |||
571 | } | 571 | } |
572 | 572 | ||
573 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 573 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
574 | unsigned long *phys_complete) | 574 | dma_addr_t *phys_complete) |
575 | { | 575 | { |
576 | *phys_complete = ioat_get_current_completion(chan); | 576 | *phys_complete = ioat_get_current_completion(chan); |
577 | if (*phys_complete == chan->last_completion) | 577 | if (*phys_complete == chan->last_completion) |
@@ -582,14 +582,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | |||
582 | return true; | 582 | return true; |
583 | } | 583 | } |
584 | 584 | ||
585 | static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) | 585 | static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) |
586 | { | 586 | { |
587 | struct ioat_chan_common *chan = &ioat->base; | 587 | struct ioat_chan_common *chan = &ioat->base; |
588 | struct list_head *_desc, *n; | 588 | struct list_head *_desc, *n; |
589 | struct dma_async_tx_descriptor *tx; | 589 | struct dma_async_tx_descriptor *tx; |
590 | 590 | ||
591 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", | 591 | dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", |
592 | __func__, phys_complete); | 592 | __func__, (unsigned long long) phys_complete); |
593 | list_for_each_safe(_desc, n, &ioat->used_desc) { | 593 | list_for_each_safe(_desc, n, &ioat->used_desc) { |
594 | struct ioat_desc_sw *desc; | 594 | struct ioat_desc_sw *desc; |
595 | 595 | ||
@@ -655,7 +655,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) | |||
655 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | 655 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) |
656 | { | 656 | { |
657 | struct ioat_chan_common *chan = &ioat->base; | 657 | struct ioat_chan_common *chan = &ioat->base; |
658 | unsigned long phys_complete; | 658 | dma_addr_t phys_complete; |
659 | 659 | ||
660 | prefetch(chan->completion); | 660 | prefetch(chan->completion); |
661 | 661 | ||
@@ -701,7 +701,7 @@ static void ioat1_timer_event(unsigned long data) | |||
701 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 701 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
702 | spin_unlock_bh(&ioat->desc_lock); | 702 | spin_unlock_bh(&ioat->desc_lock); |
703 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 703 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
704 | unsigned long phys_complete; | 704 | dma_addr_t phys_complete; |
705 | 705 | ||
706 | spin_lock_bh(&ioat->desc_lock); | 706 | spin_lock_bh(&ioat->desc_lock); |
707 | /* if we haven't made progress and we have already | 707 | /* if we haven't made progress and we have already |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5216c8a92a21..8bebddd189c7 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -88,7 +88,7 @@ struct ioatdma_device { | |||
88 | struct ioat_chan_common { | 88 | struct ioat_chan_common { |
89 | struct dma_chan common; | 89 | struct dma_chan common; |
90 | void __iomem *reg_base; | 90 | void __iomem *reg_base; |
91 | unsigned long last_completion; | 91 | dma_addr_t last_completion; |
92 | spinlock_t cleanup_lock; | 92 | spinlock_t cleanup_lock; |
93 | dma_cookie_t completed_cookie; | 93 | dma_cookie_t completed_cookie; |
94 | unsigned long state; | 94 | unsigned long state; |
@@ -333,7 +333,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device); | |||
333 | void __devexit ioat_dma_remove(struct ioatdma_device *device); | 333 | void __devexit ioat_dma_remove(struct ioatdma_device *device); |
334 | struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, | 334 | struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, |
335 | void __iomem *iobase); | 335 | void __iomem *iobase); |
336 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); | 336 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan); |
337 | void ioat_init_channel(struct ioatdma_device *device, | 337 | void ioat_init_channel(struct ioatdma_device *device, |
338 | struct ioat_chan_common *chan, int idx); | 338 | struct ioat_chan_common *chan, int idx); |
339 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 339 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
@@ -341,7 +341,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
341 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | 341 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
342 | size_t len, struct ioat_dma_descriptor *hw); | 342 | size_t len, struct ioat_dma_descriptor *hw); |
343 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 343 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
344 | unsigned long *phys_complete); | 344 | dma_addr_t *phys_complete); |
345 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 345 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); |
346 | void ioat_kobject_del(struct ioatdma_device *device); | 346 | void ioat_kobject_del(struct ioatdma_device *device); |
347 | extern const struct sysfs_ops ioat_sysfs_ops; | 347 | extern const struct sysfs_ops ioat_sysfs_ops; |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5d65f8377971..cb8864d45601 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -126,7 +126,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
126 | spin_unlock_bh(&ioat->prep_lock); | 126 | spin_unlock_bh(&ioat->prep_lock); |
127 | } | 127 | } |
128 | 128 | ||
129 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | 129 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) |
130 | { | 130 | { |
131 | struct ioat_chan_common *chan = &ioat->base; | 131 | struct ioat_chan_common *chan = &ioat->base; |
132 | struct dma_async_tx_descriptor *tx; | 132 | struct dma_async_tx_descriptor *tx; |
@@ -178,7 +178,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
178 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | 178 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) |
179 | { | 179 | { |
180 | struct ioat_chan_common *chan = &ioat->base; | 180 | struct ioat_chan_common *chan = &ioat->base; |
181 | unsigned long phys_complete; | 181 | dma_addr_t phys_complete; |
182 | 182 | ||
183 | spin_lock_bh(&chan->cleanup_lock); | 183 | spin_lock_bh(&chan->cleanup_lock); |
184 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 184 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -259,7 +259,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) | |||
259 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | 259 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) |
260 | { | 260 | { |
261 | struct ioat_chan_common *chan = &ioat->base; | 261 | struct ioat_chan_common *chan = &ioat->base; |
262 | unsigned long phys_complete; | 262 | dma_addr_t phys_complete; |
263 | 263 | ||
264 | ioat2_quiesce(chan, 0); | 264 | ioat2_quiesce(chan, 0); |
265 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 265 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -274,7 +274,7 @@ void ioat2_timer_event(unsigned long data) | |||
274 | struct ioat_chan_common *chan = &ioat->base; | 274 | struct ioat_chan_common *chan = &ioat->base; |
275 | 275 | ||
276 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 276 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
277 | unsigned long phys_complete; | 277 | dma_addr_t phys_complete; |
278 | u64 status; | 278 | u64 status; |
279 | 279 | ||
280 | status = ioat_chansts(chan); | 280 | status = ioat_chansts(chan); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f519c93a61e7..2dbf32b02735 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -256,7 +256,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) | |||
256 | * The difference from the dma_v2.c __cleanup() is that this routine | 256 | * The difference from the dma_v2.c __cleanup() is that this routine |
257 | * handles extended descriptors and dma-unmapping raid operations. | 257 | * handles extended descriptors and dma-unmapping raid operations. |
258 | */ | 258 | */ |
259 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | 259 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) |
260 | { | 260 | { |
261 | struct ioat_chan_common *chan = &ioat->base; | 261 | struct ioat_chan_common *chan = &ioat->base; |
262 | struct ioat_ring_ent *desc; | 262 | struct ioat_ring_ent *desc; |
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
314 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) | 314 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) |
315 | { | 315 | { |
316 | struct ioat_chan_common *chan = &ioat->base; | 316 | struct ioat_chan_common *chan = &ioat->base; |
317 | unsigned long phys_complete; | 317 | dma_addr_t phys_complete; |
318 | 318 | ||
319 | spin_lock_bh(&chan->cleanup_lock); | 319 | spin_lock_bh(&chan->cleanup_lock); |
320 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 320 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data) | |||
333 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | 333 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) |
334 | { | 334 | { |
335 | struct ioat_chan_common *chan = &ioat->base; | 335 | struct ioat_chan_common *chan = &ioat->base; |
336 | unsigned long phys_complete; | 336 | dma_addr_t phys_complete; |
337 | 337 | ||
338 | ioat2_quiesce(chan, 0); | 338 | ioat2_quiesce(chan, 0); |
339 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 339 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data) | |||
348 | struct ioat_chan_common *chan = &ioat->base; | 348 | struct ioat_chan_common *chan = &ioat->base; |
349 | 349 | ||
350 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 350 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
351 | unsigned long phys_complete; | 351 | dma_addr_t phys_complete; |
352 | u64 status; | 352 | u64 status; |
353 | 353 | ||
354 | status = ioat_chansts(chan); | 354 | status = ioat_chansts(chan); |