aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-08-26 19:53:47 -0400
committerDave Airlie <airlied@redhat.com>2009-08-26 19:53:47 -0400
commitc9c97b8c75019814d8c007059bc827bb475be917 (patch)
tree2a990d2beee85bb059a38fdea6a4d3325d3791e3 /drivers
parent50f153036c9d9e4ae1768d5ca9c2ad4184f7a0b7 (diff)
drm/ttm: consolidate cache flushing code in one place.
This merges the TTM and drm cache flushing into one file in the drm core. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_cache.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c67
2 files changed, 44 insertions, 74 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0e994a0e46d4..3a5575e638db 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -45,25 +45,58 @@ drm_clflush_page(struct page *page)
45 clflush(page_virtual + i); 45 clflush(page_virtual + i);
46 kunmap_atomic(page_virtual, KM_USER0); 46 kunmap_atomic(page_virtual, KM_USER0);
47} 47}
48#endif
49 48
49static void drm_cache_flush_clflush(struct page *pages[],
50 unsigned long num_pages)
51{
52 unsigned long i;
53
54 mb();
55 for (i = 0; i < num_pages; i++)
56 drm_clflush_page(*pages++);
57 mb();
58}
59
60static void
61drm_clflush_ipi_handler(void *null)
62{
63 wbinvd();
64}
65#elif !defined(__powerpc__)
66static void drm_cache_ipi_handler(void *dummy)
67{
68}
69#endif
50void 70void
51drm_clflush_pages(struct page *pages[], unsigned long num_pages) 71drm_clflush_pages(struct page *pages[], unsigned long num_pages)
52{ 72{
53 73
54#if defined(CONFIG_X86) 74#if defined(CONFIG_X86)
55 if (cpu_has_clflush) { 75 if (cpu_has_clflush) {
56 unsigned long i; 76 drm_cache_flush_clflush(pages, num_pages);
57
58 mb();
59 for (i = 0; i < num_pages; ++i)
60 drm_clflush_page(*pages++);
61 mb();
62
63 return; 77 return;
64 } 78 }
65 79
66 wbinvd(); 80 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
81 printk(KERN_ERR "Timed out waiting for cache flush.\n");
82
83#elif defined(__powerpc__)
84 unsigned long i;
85 for (i = 0; i < num_pages; i++) {
86 struct page *page = pages[i];
87 void *page_virtual;
88
89 if (unlikely(page == NULL))
90 continue;
91
92 page_virtual = kmap_atomic(page, KM_USER0);
93 flush_dcache_range((unsigned long)page_virtual,
94 (unsigned long)page_virtual + PAGE_SIZE);
95 kunmap_atomic(page_virtual, KM_USER0);
96 }
97#else
98 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
99 printk(KERN_ERR "Timed out waiting for drm cache flush\n");
67#endif 100#endif
68} 101}
69EXPORT_SYMBOL(drm_clflush_pages); 102EXPORT_SYMBOL(drm_clflush_pages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 42cca5519761..a55ee1a56c16 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -34,76 +34,13 @@
34#include <linux/pagemap.h> 34#include <linux/pagemap.h>
35#include <linux/file.h> 35#include <linux/file.h>
36#include <linux/swap.h> 36#include <linux/swap.h>
37#include "drm_cache.h"
37#include "ttm/ttm_module.h" 38#include "ttm/ttm_module.h"
38#include "ttm/ttm_bo_driver.h" 39#include "ttm/ttm_bo_driver.h"
39#include "ttm/ttm_placement.h" 40#include "ttm/ttm_placement.h"
40 41
41static int ttm_tt_swapin(struct ttm_tt *ttm); 42static int ttm_tt_swapin(struct ttm_tt *ttm);
42 43
43#if defined(CONFIG_X86)
44static void ttm_tt_clflush_page(struct page *page)
45{
46 uint8_t *page_virtual;
47 unsigned int i;
48
49 if (unlikely(page == NULL))
50 return;
51
52 page_virtual = kmap_atomic(page, KM_USER0);
53
54 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
55 clflush(page_virtual + i);
56
57 kunmap_atomic(page_virtual, KM_USER0);
58}
59
60static void ttm_tt_cache_flush_clflush(struct page *pages[],
61 unsigned long num_pages)
62{
63 unsigned long i;
64
65 mb();
66 for (i = 0; i < num_pages; ++i)
67 ttm_tt_clflush_page(*pages++);
68 mb();
69}
70#elif !defined(__powerpc__)
71static void ttm_tt_ipi_handler(void *null)
72{
73 ;
74}
75#endif
76
77void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
78{
79
80#if defined(CONFIG_X86)
81 if (cpu_has_clflush) {
82 ttm_tt_cache_flush_clflush(pages, num_pages);
83 return;
84 }
85#elif defined(__powerpc__)
86 unsigned long i;
87
88 for (i = 0; i < num_pages; ++i) {
89 struct page *page = pages[i];
90 void *page_virtual;
91
92 if (unlikely(page == NULL))
93 continue;
94
95 page_virtual = kmap_atomic(page, KM_USER0);
96 flush_dcache_range((unsigned long) page_virtual,
97 (unsigned long) page_virtual + PAGE_SIZE);
98 kunmap_atomic(page_virtual, KM_USER0);
99 }
100#else
101 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
102 printk(KERN_ERR TTM_PFX
103 "Timed out waiting for drm cache flush.\n");
104#endif
105}
106
107/** 44/**
108 * Allocates storage for pointers to the pages that back the ttm. 45 * Allocates storage for pointers to the pages that back the ttm.
109 * 46 *
@@ -302,7 +239,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
302 } 239 }
303 240
304 if (ttm->caching_state == tt_cached) 241 if (ttm->caching_state == tt_cached)
305 ttm_tt_cache_flush(ttm->pages, ttm->num_pages); 242 drm_clflush_pages(ttm->pages, ttm->num_pages);
306 243
307 for (i = 0; i < ttm->num_pages; ++i) { 244 for (i = 0; i < ttm->num_pages; ++i) {
308 cur_page = ttm->pages[i]; 245 cur_page = ttm->pages[i];