diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-03-25 13:47:43 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-03-27 07:41:41 -0400 |
commit | e7e58eb5c0d1d7d1a42fcb2b5a247d28ec08b47e (patch) | |
tree | 50757627f2cb424548fa4af48552facade94cab3 | |
parent | 23c18c71da801fb7ce11acc3041e4f10a1bb5cb0 (diff) |
drm/i915: mark pwrite/pread slowpaths with unlikely
Beside helping the compiler untangle this maze they double-up as
documentation for which parts of the code aren't performance-critical
but just around to keep old (but already dead-slow) userspace from
breaking.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c964dfbdb577..b8c6248b25c6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -298,7 +298,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, | |||
298 | char *vaddr; | 298 | char *vaddr; |
299 | int ret; | 299 | int ret; |
300 | 300 | ||
301 | if (page_do_bit17_swizzling) | 301 | if (unlikely(page_do_bit17_swizzling)) |
302 | return -EINVAL; | 302 | return -EINVAL; |
303 | 303 | ||
304 | vaddr = kmap_atomic(page); | 304 | vaddr = kmap_atomic(page); |
@@ -317,7 +317,7 @@ static void | |||
317 | shmem_clflush_swizzled_range(char *addr, unsigned long length, | 317 | shmem_clflush_swizzled_range(char *addr, unsigned long length, |
318 | bool swizzled) | 318 | bool swizzled) |
319 | { | 319 | { |
320 | if (swizzled) { | 320 | if (unlikely(swizzled)) { |
321 | unsigned long start = (unsigned long) addr; | 321 | unsigned long start = (unsigned long) addr; |
322 | unsigned long end = (unsigned long) addr + length; | 322 | unsigned long end = (unsigned long) addr + length; |
323 | 323 | ||
@@ -629,7 +629,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, | |||
629 | char *vaddr; | 629 | char *vaddr; |
630 | int ret; | 630 | int ret; |
631 | 631 | ||
632 | if (page_do_bit17_swizzling) | 632 | if (unlikely(page_do_bit17_swizzling)) |
633 | return -EINVAL; | 633 | return -EINVAL; |
634 | 634 | ||
635 | vaddr = kmap_atomic(page); | 635 | vaddr = kmap_atomic(page); |
@@ -660,7 +660,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, | |||
660 | int ret; | 660 | int ret; |
661 | 661 | ||
662 | vaddr = kmap(page); | 662 | vaddr = kmap(page); |
663 | if (needs_clflush_before || page_do_bit17_swizzling) | 663 | if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) |
664 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, | 664 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, |
665 | page_length, | 665 | page_length, |
666 | page_do_bit17_swizzling); | 666 | page_do_bit17_swizzling); |