diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-05-27 09:15:34 -0400 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2010-05-28 14:02:36 -0400 |
commit | 99a03df57c82ec20848d2634f652c07ac3504b98 (patch) | |
tree | 2ba5c9f606c4e1e8b903ed507229bd32195c3a7c | |
parent | 9b8c4a0b215e603497daebe8ecbc9b1f0f035808 (diff) |
drm/i915: Use non-atomic kmap for slow copy paths
As we do not have a requirement to be atomic and avoid sleeping whilst
performing the slow copy for shmem based pread and pwrite, we can use
kmap instead, thus simplifying the code.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 82 |
1 files changed, 30 insertions, 52 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4590c78f4283..b8e351274493 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -167,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | |||
167 | obj_priv->tiling_mode != I915_TILING_NONE; | 167 | obj_priv->tiling_mode != I915_TILING_NONE; |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline int | 170 | static inline void |
171 | slow_shmem_copy(struct page *dst_page, | 171 | slow_shmem_copy(struct page *dst_page, |
172 | int dst_offset, | 172 | int dst_offset, |
173 | struct page *src_page, | 173 | struct page *src_page, |
@@ -176,25 +176,16 @@ slow_shmem_copy(struct page *dst_page, | |||
176 | { | 176 | { |
177 | char *dst_vaddr, *src_vaddr; | 177 | char *dst_vaddr, *src_vaddr; |
178 | 178 | ||
179 | dst_vaddr = kmap_atomic(dst_page, KM_USER0); | 179 | dst_vaddr = kmap(dst_page); |
180 | if (dst_vaddr == NULL) | 180 | src_vaddr = kmap(src_page); |
181 | return -ENOMEM; | ||
182 | |||
183 | src_vaddr = kmap_atomic(src_page, KM_USER1); | ||
184 | if (src_vaddr == NULL) { | ||
185 | kunmap_atomic(dst_vaddr, KM_USER0); | ||
186 | return -ENOMEM; | ||
187 | } | ||
188 | 181 | ||
189 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | 182 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); |
190 | 183 | ||
191 | kunmap_atomic(src_vaddr, KM_USER1); | 184 | kunmap(src_page); |
192 | kunmap_atomic(dst_vaddr, KM_USER0); | 185 | kunmap(dst_page); |
193 | |||
194 | return 0; | ||
195 | } | 186 | } |
196 | 187 | ||
197 | static inline int | 188 | static inline void |
198 | slow_shmem_bit17_copy(struct page *gpu_page, | 189 | slow_shmem_bit17_copy(struct page *gpu_page, |
199 | int gpu_offset, | 190 | int gpu_offset, |
200 | struct page *cpu_page, | 191 | struct page *cpu_page, |
@@ -214,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
214 | cpu_page, cpu_offset, length); | 205 | cpu_page, cpu_offset, length); |
215 | } | 206 | } |
216 | 207 | ||
217 | gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); | 208 | gpu_vaddr = kmap(gpu_page); |
218 | if (gpu_vaddr == NULL) | 209 | cpu_vaddr = kmap(cpu_page); |
219 | return -ENOMEM; | ||
220 | |||
221 | cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); | ||
222 | if (cpu_vaddr == NULL) { | ||
223 | kunmap_atomic(gpu_vaddr, KM_USER0); | ||
224 | return -ENOMEM; | ||
225 | } | ||
226 | 210 | ||
227 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's | 211 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's |
228 | * XORing with the other bits (A9 for Y, A9 and A10 for X) | 212 | * XORing with the other bits (A9 for Y, A9 and A10 for X) |
@@ -246,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
246 | length -= this_length; | 230 | length -= this_length; |
247 | } | 231 | } |
248 | 232 | ||
249 | kunmap_atomic(cpu_vaddr, KM_USER1); | 233 | kunmap(cpu_page); |
250 | kunmap_atomic(gpu_vaddr, KM_USER0); | 234 | kunmap(gpu_page); |
251 | |||
252 | return 0; | ||
253 | } | 235 | } |
254 | 236 | ||
255 | /** | 237 | /** |
@@ -425,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
425 | page_length = PAGE_SIZE - data_page_offset; | 407 | page_length = PAGE_SIZE - data_page_offset; |
426 | 408 | ||
427 | if (do_bit17_swizzling) { | 409 | if (do_bit17_swizzling) { |
428 | ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 410 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], |
429 | shmem_page_offset, | ||
430 | user_pages[data_page_index], | ||
431 | data_page_offset, | ||
432 | page_length, | ||
433 | 1); | ||
434 | } else { | ||
435 | ret = slow_shmem_copy(user_pages[data_page_index], | ||
436 | data_page_offset, | ||
437 | obj_priv->pages[shmem_page_index], | ||
438 | shmem_page_offset, | 411 | shmem_page_offset, |
439 | page_length); | 412 | user_pages[data_page_index], |
413 | data_page_offset, | ||
414 | page_length, | ||
415 | 1); | ||
416 | } else { | ||
417 | slow_shmem_copy(user_pages[data_page_index], | ||
418 | data_page_offset, | ||
419 | obj_priv->pages[shmem_page_index], | ||
420 | shmem_page_offset, | ||
421 | page_length); | ||
440 | } | 422 | } |
441 | if (ret) | ||
442 | goto fail_put_pages; | ||
443 | 423 | ||
444 | remain -= page_length; | 424 | remain -= page_length; |
445 | data_ptr += page_length; | 425 | data_ptr += page_length; |
@@ -900,21 +880,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
900 | page_length = PAGE_SIZE - data_page_offset; | 880 | page_length = PAGE_SIZE - data_page_offset; |
901 | 881 | ||
902 | if (do_bit17_swizzling) { | 882 | if (do_bit17_swizzling) { |
903 | ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 883 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], |
904 | shmem_page_offset, | ||
905 | user_pages[data_page_index], | ||
906 | data_page_offset, | ||
907 | page_length, | ||
908 | 0); | ||
909 | } else { | ||
910 | ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | ||
911 | shmem_page_offset, | 884 | shmem_page_offset, |
912 | user_pages[data_page_index], | 885 | user_pages[data_page_index], |
913 | data_page_offset, | 886 | data_page_offset, |
914 | page_length); | 887 | page_length, |
888 | 0); | ||
889 | } else { | ||
890 | slow_shmem_copy(obj_priv->pages[shmem_page_index], | ||
891 | shmem_page_offset, | ||
892 | user_pages[data_page_index], | ||
893 | data_page_offset, | ||
894 | page_length); | ||
915 | } | 895 | } |
916 | if (ret) | ||
917 | goto fail_put_pages; | ||
918 | 896 | ||
919 | remain -= page_length; | 897 | remain -= page_length; |
920 | data_ptr += page_length; | 898 | data_ptr += page_length; |