aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-26 17:21:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 19:52:08 -0400
commit3e4d3af501cccdc8a8cca41bdbe57d54ad7e7e73 (patch)
tree2ce507f7ec7275563653e52f18606aba4f99b7f1 /drivers/gpu/drm/i915/i915_gem.c
parent61ecdb801ef2cd28e32442383106d7837d76deac (diff)
mm: stack based kmap_atomic()
Keep the current interface but ignore the KM_type and use a stack based approach. The advantage is that we get rid of crappy code like: #define __KM_PTE \ (in_nmi() ? KM_NMI_PTE : \ in_irq() ? KM_IRQ_PTE : \ KM_PTE0) and in general can stop worrying about what context we're in and what kmap slots might be appropriate for that. The downside is that FRV kmap_atomic() gets more expensive. For now we use a CPP trick suggested by Andrew: #define kmap_atomic(page, args...) __kmap_atomic(page) to avoid having to touch all kmap_atomic() users in a single patch. [ not compiled on: - mn10300: the arch doesn't actually build with highmem to begin with ] [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Airlie <airlied@linux.ie> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90b1d6753b9d..eb6c473c6d1b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -155,11 +155,11 @@ fast_shmem_read(struct page **pages,
155 char __iomem *vaddr; 155 char __iomem *vaddr;
156 int unwritten; 156 int unwritten;
157 157
158 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 158 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
159 if (vaddr == NULL) 159 if (vaddr == NULL)
160 return -ENOMEM; 160 return -ENOMEM;
161 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); 161 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
162 kunmap_atomic(vaddr, KM_USER0); 162 kunmap_atomic(vaddr);
163 163
164 if (unwritten) 164 if (unwritten)
165 return -EFAULT; 165 return -EFAULT;
@@ -509,10 +509,10 @@ fast_user_write(struct io_mapping *mapping,
509 char *vaddr_atomic; 509 char *vaddr_atomic;
510 unsigned long unwritten; 510 unsigned long unwritten;
511 511
512 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); 512 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
513 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 513 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
514 user_data, length); 514 user_data, length);
515 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); 515 io_mapping_unmap_atomic(vaddr_atomic);
516 if (unwritten) 516 if (unwritten)
517 return -EFAULT; 517 return -EFAULT;
518 return 0; 518 return 0;
@@ -551,11 +551,11 @@ fast_shmem_write(struct page **pages,
551 char __iomem *vaddr; 551 char __iomem *vaddr;
552 unsigned long unwritten; 552 unsigned long unwritten;
553 553
554 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 554 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
555 if (vaddr == NULL) 555 if (vaddr == NULL)
556 return -ENOMEM; 556 return -ENOMEM;
557 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); 557 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
558 kunmap_atomic(vaddr, KM_USER0); 558 kunmap_atomic(vaddr);
559 559
560 if (unwritten) 560 if (unwritten)
561 return -EFAULT; 561 return -EFAULT;
@@ -3346,8 +3346,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3346 reloc_offset = obj_priv->gtt_offset + reloc->offset; 3346 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3347 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 3347 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3348 (reloc_offset & 3348 (reloc_offset &
3349 ~(PAGE_SIZE - 1)), 3349 ~(PAGE_SIZE - 1)));
3350 KM_USER0);
3351 reloc_entry = (uint32_t __iomem *)(reloc_page + 3350 reloc_entry = (uint32_t __iomem *)(reloc_page +
3352 (reloc_offset & (PAGE_SIZE - 1))); 3351 (reloc_offset & (PAGE_SIZE - 1)));
3353 reloc_val = target_obj_priv->gtt_offset + reloc->delta; 3352 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
@@ -3358,7 +3357,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3358 readl(reloc_entry), reloc_val); 3357 readl(reloc_entry), reloc_val);
3359#endif 3358#endif
3360 writel(reloc_val, reloc_entry); 3359 writel(reloc_val, reloc_entry);
3361 io_mapping_unmap_atomic(reloc_page, KM_USER0); 3360 io_mapping_unmap_atomic(reloc_page);
3362 3361
3363 /* The updated presumed offset for this entry will be 3362 /* The updated presumed offset for this entry will be
3364 * copied back out to the user. 3363 * copied back out to the user.
@@ -4772,11 +4771,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4772 page_count = obj->size / PAGE_SIZE; 4771 page_count = obj->size / PAGE_SIZE;
4773 4772
4774 for (i = 0; i < page_count; i++) { 4773 for (i = 0; i < page_count; i++) {
4775 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); 4774 char *dst = kmap_atomic(obj_priv->pages[i]);
4776 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4775 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4777 4776
4778 memcpy(dst, src, PAGE_SIZE); 4777 memcpy(dst, src, PAGE_SIZE);
4779 kunmap_atomic(dst, KM_USER0); 4778 kunmap_atomic(dst);
4780 } 4779 }
4781 drm_clflush_pages(obj_priv->pages, page_count); 4780 drm_clflush_pages(obj_priv->pages, page_count);
4782 drm_agp_chipset_flush(dev); 4781 drm_agp_chipset_flush(dev);
@@ -4833,11 +4832,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4833 page_count = obj->size / PAGE_SIZE; 4832 page_count = obj->size / PAGE_SIZE;
4834 4833
4835 for (i = 0; i < page_count; i++) { 4834 for (i = 0; i < page_count; i++) {
4836 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); 4835 char *src = kmap_atomic(obj_priv->pages[i]);
4837 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4836 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4838 4837
4839 memcpy(dst, src, PAGE_SIZE); 4838 memcpy(dst, src, PAGE_SIZE);
4840 kunmap_atomic(src, KM_USER0); 4839 kunmap_atomic(src);
4841 } 4840 }
4842 4841
4843 i915_gem_object_put_pages(obj); 4842 i915_gem_object_put_pages(obj);