diff options
| author | Thomas Hellstrom <thellstrom@vmware.com> | 2013-11-12 02:49:26 -0500 |
|---|---|---|
| committer | Thomas Hellstrom <thellstrom@vmware.com> | 2013-11-13 02:47:32 -0500 |
| commit | e14cd9536bd11b174a849da91c7a26bbc980330d (patch) | |
| tree | 63110073766ec571e2d17dbdf5d8e88c5f063615 | |
| parent | ea029c28deadc33d2af4baf26810dd5fc44d4926 (diff) | |
drm/vmwgfx: Fix a couple of compile / sparse warnings and errors
Fixes
*) an implicit function declaration on mips,
*) a defined but not used label on !CONFIG_INTEL_IOMMU
*) Hopefully a couple of sparse warnings where we implicitly typecast
integer to __le32 and vice versa.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 7 |
2 files changed, 13 insertions, 3 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0b5c7818ebfb..a278581ca276 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) | |||
| 453 | */ | 453 | */ |
| 454 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) | 454 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) |
| 455 | { | 455 | { |
| 456 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
| 457 | static const char *names[vmw_dma_map_max] = { | 456 | static const char *names[vmw_dma_map_max] = { |
| 458 | [vmw_dma_phys] = "Using physical TTM page addresses.", | 457 | [vmw_dma_phys] = "Using physical TTM page addresses.", |
| 459 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | 458 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
| 460 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | 459 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
| 461 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | 460 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
| 461 | #ifdef CONFIG_X86 | ||
| 462 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
| 462 | 463 | ||
| 463 | #ifdef CONFIG_INTEL_IOMMU | 464 | #ifdef CONFIG_INTEL_IOMMU |
| 464 | if (intel_iommu_enabled) { | 465 | if (intel_iommu_enabled) { |
| @@ -482,7 +483,9 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) | |||
| 482 | dev_priv->map_mode = vmw_dma_map_populate; | 483 | dev_priv->map_mode = vmw_dma_map_populate; |
| 483 | #endif | 484 | #endif |
| 484 | 485 | ||
| 486 | #ifdef CONFIG_INTEL_IOMMU | ||
| 485 | out_fixup: | 487 | out_fixup: |
| 488 | #endif | ||
| 486 | if (dev_priv->map_mode == vmw_dma_map_populate && | 489 | if (dev_priv->map_mode == vmw_dma_map_populate && |
| 487 | vmw_restrict_iommu) | 490 | vmw_restrict_iommu) |
| 488 | dev_priv->map_mode = vmw_dma_map_bind; | 491 | dev_priv->map_mode = vmw_dma_map_bind; |
| @@ -498,6 +501,10 @@ out_fixup: | |||
| 498 | return -EINVAL; | 501 | return -EINVAL; |
| 499 | #endif | 502 | #endif |
| 500 | 503 | ||
| 504 | #else /* CONFIG_X86 */ | ||
| 505 | dev_priv->map_mode = vmw_dma_map_populate; | ||
| 506 | #endif /* CONFIG_X86 */ | ||
| 507 | |||
| 501 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 508 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
| 502 | 509 | ||
| 503 | return 0; | 510 | return 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 6d0952366f91..6ef0b035becb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
| @@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, | |||
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | page_virtual = kmap_atomic(page); | 147 | page_virtual = kmap_atomic(page); |
| 148 | desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT; | 148 | desc_dma = (dma_addr_t) |
| 149 | le32_to_cpu(page_virtual[desc_per_page].ppn) << | ||
| 150 | PAGE_SHIFT; | ||
| 149 | kunmap_atomic(page_virtual); | 151 | kunmap_atomic(page_virtual); |
| 150 | 152 | ||
| 151 | __free_page(page); | 153 | __free_page(page); |
| @@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev, | |||
| 217 | desc_dma = 0; | 219 | desc_dma = 0; |
| 218 | list_for_each_entry_reverse(page, desc_pages, lru) { | 220 | list_for_each_entry_reverse(page, desc_pages, lru) { |
| 219 | page_virtual = kmap_atomic(page); | 221 | page_virtual = kmap_atomic(page); |
| 220 | page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT; | 222 | page_virtual[desc_per_page].ppn = cpu_to_le32 |
| 223 | (desc_dma >> PAGE_SHIFT); | ||
| 221 | kunmap_atomic(page_virtual); | 224 | kunmap_atomic(page_virtual); |
| 222 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, | 225 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, |
| 223 | DMA_TO_DEVICE); | 226 | DMA_TO_DEVICE); |
