diff options
author | Eric Anholt <eric@anholt.net> | 2008-10-02 15:24:47 -0400 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2008-10-17 17:10:52 -0400 |
commit | 3043c60c485ad694392d3f71bd7ef9f5c5f7cfdd (patch) | |
tree | 8cda25875decd54bfb96f712b25b371450ab5f3d /drivers/gpu | |
parent | bd88ee4c1b1c8fc8b78a0ba7b6235d230cea0d05 (diff) |
drm: Clean up many sparse warnings in i915.
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 31 |
3 files changed, 32 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 048da791ca66..13a6116b59fe 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -76,7 +76,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
76 | * Sets up the hardware status page for devices that need a physical address | 76 | * Sets up the hardware status page for devices that need a physical address |
77 | * in the register. | 77 | * in the register. |
78 | */ | 78 | */ |
79 | int i915_init_phys_hws(struct drm_device *dev) | 79 | static int i915_init_phys_hws(struct drm_device *dev) |
80 | { | 80 | { |
81 | drm_i915_private_t *dev_priv = dev->dev_private; | 81 | drm_i915_private_t *dev_priv = dev->dev_private; |
82 | /* Program Hardware Status Page */ | 82 | /* Program Hardware Status Page */ |
@@ -101,7 +101,7 @@ int i915_init_phys_hws(struct drm_device *dev) | |||
101 | * Frees the hardware status page, whether it's a physical address or a virtual | 101 | * Frees the hardware status page, whether it's a physical address or a virtual |
102 | * address set up by the X Server. | 102 | * address set up by the X Server. |
103 | */ | 103 | */ |
104 | void i915_free_hws(struct drm_device *dev) | 104 | static void i915_free_hws(struct drm_device *dev) |
105 | { | 105 | { |
106 | drm_i915_private_t *dev_priv = dev->dev_private; | 106 | drm_i915_private_t *dev_priv = dev->dev_private; |
107 | if (dev_priv->status_page_dmah) { | 107 | if (dev_priv->status_page_dmah) { |
@@ -145,8 +145,8 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
145 | 145 | ||
146 | if (dev_priv->ring.virtual_start) { | 146 | if (dev_priv->ring.virtual_start) { |
147 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | 147 | drm_core_ioremapfree(&dev_priv->ring.map, dev); |
148 | dev_priv->ring.virtual_start = 0; | 148 | dev_priv->ring.virtual_start = NULL; |
149 | dev_priv->ring.map.handle = 0; | 149 | dev_priv->ring.map.handle = NULL; |
150 | dev_priv->ring.map.size = 0; | 150 | dev_priv->ring.map.size = 0; |
151 | } | 151 | } |
152 | 152 | ||
@@ -827,9 +827,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
827 | base = drm_get_resource_start(dev, mmio_bar); | 827 | base = drm_get_resource_start(dev, mmio_bar); |
828 | size = drm_get_resource_len(dev, mmio_bar); | 828 | size = drm_get_resource_len(dev, mmio_bar); |
829 | 829 | ||
830 | ret = drm_addmap(dev, base, size, _DRM_REGISTERS, | 830 | dev_priv->regs = ioremap(base, size); |
831 | _DRM_KERNEL | _DRM_DRIVER, | ||
832 | &dev_priv->mmio_map); | ||
833 | 831 | ||
834 | i915_gem_load(dev); | 832 | i915_gem_load(dev); |
835 | 833 | ||
@@ -867,8 +865,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
867 | 865 | ||
868 | i915_free_hws(dev); | 866 | i915_free_hws(dev); |
869 | 867 | ||
870 | if (dev_priv->mmio_map) | 868 | if (dev_priv->regs != NULL) |
871 | drm_rmmap(dev, dev_priv->mmio_map); | 869 | iounmap(dev_priv->regs); |
872 | 870 | ||
873 | intel_opregion_free(dev); | 871 | intel_opregion_free(dev); |
874 | 872 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8547f0aeafc6..74011235f2cc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -110,8 +110,8 @@ struct intel_opregion { | |||
110 | typedef struct drm_i915_private { | 110 | typedef struct drm_i915_private { |
111 | struct drm_device *dev; | 111 | struct drm_device *dev; |
112 | 112 | ||
113 | void __iomem *regs; | ||
113 | drm_local_map_t *sarea; | 114 | drm_local_map_t *sarea; |
114 | drm_local_map_t *mmio_map; | ||
115 | 115 | ||
116 | drm_i915_sarea_t *sarea_priv; | 116 | drm_i915_sarea_t *sarea_priv; |
117 | drm_i915_ring_buffer_t ring; | 117 | drm_i915_ring_buffer_t ring; |
@@ -553,12 +553,12 @@ extern void opregion_enable_asle(struct drm_device *dev); | |||
553 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ | 553 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ |
554 | } while (0) | 554 | } while (0) |
555 | 555 | ||
556 | #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) | 556 | #define I915_READ(reg) readl(dev_priv->regs + (reg)) |
557 | #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) | 557 | #define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) |
558 | #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) | 558 | #define I915_READ16(reg) readw(dev_priv->regs + (reg)) |
559 | #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) | 559 | #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) |
560 | #define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg)) | 560 | #define I915_READ8(reg) readb(dev_priv->regs + (reg)) |
561 | #define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val)) | 561 | #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) |
562 | 562 | ||
563 | #define I915_VERBOSE 0 | 563 | #define I915_VERBOSE 0 |
564 | 564 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6ecfd108effa..6a89449f31d1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -176,7 +176,8 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
176 | ssize_t remain; | 176 | ssize_t remain; |
177 | loff_t offset; | 177 | loff_t offset; |
178 | char __user *user_data; | 178 | char __user *user_data; |
179 | char *vaddr; | 179 | char __iomem *vaddr; |
180 | char *vaddr_atomic; | ||
180 | int i, o, l; | 181 | int i, o, l; |
181 | int ret = 0; | 182 | int ret = 0; |
182 | unsigned long pfn; | 183 | unsigned long pfn; |
@@ -219,16 +220,20 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
219 | pfn = (dev->agp->base >> PAGE_SHIFT) + i; | 220 | pfn = (dev->agp->base >> PAGE_SHIFT) + i; |
220 | 221 | ||
221 | #ifdef CONFIG_HIGHMEM | 222 | #ifdef CONFIG_HIGHMEM |
222 | /* kmap_atomic can't map IO pages on non-HIGHMEM kernels | 223 | /* This is a workaround for the low performance of iounmap |
224 | * (approximate 10% cpu cost on normal 3D workloads). | ||
225 | * kmap_atomic on HIGHMEM kernels happens to let us map card | ||
226 | * memory without taking IPIs. When the vmap rework lands | ||
227 | * we should be able to dump this hack. | ||
223 | */ | 228 | */ |
224 | vaddr = kmap_atomic_pfn(pfn, KM_USER0); | 229 | vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0); |
225 | #if WATCH_PWRITE | 230 | #if WATCH_PWRITE |
226 | DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", | 231 | DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", |
227 | i, o, l, pfn, vaddr); | 232 | i, o, l, pfn, vaddr_atomic); |
228 | #endif | 233 | #endif |
229 | unwritten = __copy_from_user_inatomic_nocache(vaddr + o, | 234 | unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, |
230 | user_data, l); | 235 | user_data, l); |
231 | kunmap_atomic(vaddr, KM_USER0); | 236 | kunmap_atomic(vaddr_atomic, KM_USER0); |
232 | 237 | ||
233 | if (unwritten) | 238 | if (unwritten) |
234 | #endif /* CONFIG_HIGHMEM */ | 239 | #endif /* CONFIG_HIGHMEM */ |
@@ -271,7 +276,7 @@ fail: | |||
271 | return ret; | 276 | return ret; |
272 | } | 277 | } |
273 | 278 | ||
274 | int | 279 | static int |
275 | i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 280 | i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
276 | struct drm_i915_gem_pwrite *args, | 281 | struct drm_i915_gem_pwrite *args, |
277 | struct drm_file *file_priv) | 282 | struct drm_file *file_priv) |
@@ -587,7 +592,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) | |||
587 | * Ensures that all commands in the ring are finished | 592 | * Ensures that all commands in the ring are finished |
588 | * before signalling the CPU | 593 | * before signalling the CPU |
589 | */ | 594 | */ |
590 | uint32_t | 595 | static uint32_t |
591 | i915_retire_commands(struct drm_device *dev) | 596 | i915_retire_commands(struct drm_device *dev) |
592 | { | 597 | { |
593 | drm_i915_private_t *dev_priv = dev->dev_private; | 598 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -734,7 +739,7 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
734 | * Waits for a sequence number to be signaled, and cleans up the | 739 | * Waits for a sequence number to be signaled, and cleans up the |
735 | * request and object lists appropriately for that event. | 740 | * request and object lists appropriately for that event. |
736 | */ | 741 | */ |
737 | int | 742 | static int |
738 | i915_wait_request(struct drm_device *dev, uint32_t seqno) | 743 | i915_wait_request(struct drm_device *dev, uint32_t seqno) |
739 | { | 744 | { |
740 | drm_i915_private_t *dev_priv = dev->dev_private; | 745 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -1483,7 +1488,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
1483 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1488 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1484 | int i, ret; | 1489 | int i, ret; |
1485 | uint32_t last_reloc_offset = -1; | 1490 | uint32_t last_reloc_offset = -1; |
1486 | void *reloc_page = NULL; | 1491 | void __iomem *reloc_page = NULL; |
1487 | 1492 | ||
1488 | /* Choose the GTT offset for our buffer and put it there. */ | 1493 | /* Choose the GTT offset for our buffer and put it there. */ |
1489 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 1494 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
@@ -1500,8 +1505,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
1500 | for (i = 0; i < entry->relocation_count; i++) { | 1505 | for (i = 0; i < entry->relocation_count; i++) { |
1501 | struct drm_gem_object *target_obj; | 1506 | struct drm_gem_object *target_obj; |
1502 | struct drm_i915_gem_object *target_obj_priv; | 1507 | struct drm_i915_gem_object *target_obj_priv; |
1503 | uint32_t reloc_val, reloc_offset, *reloc_entry; | 1508 | uint32_t reloc_val, reloc_offset; |
1504 | int ret; | 1509 | uint32_t __iomem *reloc_entry; |
1505 | 1510 | ||
1506 | ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); | 1511 | ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); |
1507 | if (ret != 0) { | 1512 | if (ret != 0) { |
@@ -1624,7 +1629,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
1624 | } | 1629 | } |
1625 | } | 1630 | } |
1626 | 1631 | ||
1627 | reloc_entry = (uint32_t *)((char *)reloc_page + | 1632 | reloc_entry = (uint32_t __iomem *)(reloc_page + |
1628 | (reloc_offset & (PAGE_SIZE - 1))); | 1633 | (reloc_offset & (PAGE_SIZE - 1))); |
1629 | reloc_val = target_obj_priv->gtt_offset + reloc.delta; | 1634 | reloc_val = target_obj_priv->gtt_offset + reloc.delta; |
1630 | 1635 | ||