aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_gart.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/radeon/radeon_gart.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c74
1 files changed, 53 insertions, 21 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d7566178c..1770d3c07fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_object_create(rdev, NULL, 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
82 rdev->gart.table_size, 82 true, RADEON_GEM_DOMAIN_VRAM,
83 true, 83 &rdev->gart.table.vram.robj);
84 RADEON_GEM_DOMAIN_VRAM,
85 false, &rdev->gart.table.vram.robj);
86 if (r) { 84 if (r) {
87 return r; 85 return r;
88 } 86 }
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
95 uint64_t gpu_addr; 93 uint64_t gpu_addr;
96 int r; 94 int r;
97 95
98 r = radeon_object_pin(rdev->gart.table.vram.robj, 96 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
99 RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 97 if (unlikely(r != 0))
100 if (r) {
101 radeon_object_unref(&rdev->gart.table.vram.robj);
102 return r; 98 return r;
103 } 99 r = radeon_bo_pin(rdev->gart.table.vram.robj,
104 r = radeon_object_kmap(rdev->gart.table.vram.robj, 100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
105 (void **)&rdev->gart.table.vram.ptr);
106 if (r) { 101 if (r) {
107 radeon_object_unpin(rdev->gart.table.vram.robj); 102 radeon_bo_unreserve(rdev->gart.table.vram.robj);
108 radeon_object_unref(&rdev->gart.table.vram.robj);
109 DRM_ERROR("radeon: failed to map gart vram table.\n");
110 return r; 103 return r;
111 } 104 }
105 r = radeon_bo_kmap(rdev->gart.table.vram.robj,
106 (void **)&rdev->gart.table.vram.ptr);
107 if (r)
108 radeon_bo_unpin(rdev->gart.table.vram.robj);
109 radeon_bo_unreserve(rdev->gart.table.vram.robj);
112 rdev->gart.table_addr = gpu_addr; 110 rdev->gart.table_addr = gpu_addr;
113 return 0; 111 return r;
114} 112}
115 113
116void radeon_gart_table_vram_free(struct radeon_device *rdev) 114void radeon_gart_table_vram_free(struct radeon_device *rdev)
117{ 115{
116 int r;
117
118 if (rdev->gart.table.vram.robj == NULL) { 118 if (rdev->gart.table.vram.robj == NULL) {
119 return; 119 return;
120 } 120 }
121 radeon_object_kunmap(rdev->gart.table.vram.robj); 121 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
122 radeon_object_unpin(rdev->gart.table.vram.robj); 122 if (likely(r == 0)) {
123 radeon_object_unref(&rdev->gart.table.vram.robj); 123 radeon_bo_kunmap(rdev->gart.table.vram.robj);
124 radeon_bo_unpin(rdev->gart.table.vram.robj);
125 radeon_bo_unreserve(rdev->gart.table.vram.robj);
126 }
127 radeon_bo_unref(&rdev->gart.table.vram.robj);
124} 128}
125 129
126 130
@@ -135,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
135 unsigned t; 139 unsigned t;
136 unsigned p; 140 unsigned p;
137 int i, j; 141 int i, j;
142 u64 page_base;
138 143
139 if (!rdev->gart.ready) { 144 if (!rdev->gart.ready) {
140 WARN(1, "trying to unbind memory to unitialized GART !\n"); 145 WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -147,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
147 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 152 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
148 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
149 rdev->gart.pages[p] = NULL; 154 rdev->gart.pages[p] = NULL;
150 rdev->gart.pages_addr[p] = 0; 155 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
156 page_base = rdev->gart.pages_addr[p];
151 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 157 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
152 radeon_gart_set_page(rdev, t, 0); 158 radeon_gart_set_page(rdev, t, page_base);
159 page_base += RADEON_GPU_PAGE_SIZE;
153 } 160 }
154 } 161 }
155 } 162 }
@@ -195,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
195 return 0; 202 return 0;
196} 203}
197 204
205void radeon_gart_restore(struct radeon_device *rdev)
206{
207 int i, j, t;
208 u64 page_base;
209
210 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
211 page_base = rdev->gart.pages_addr[i];
212 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
213 radeon_gart_set_page(rdev, t, page_base);
214 page_base += RADEON_GPU_PAGE_SIZE;
215 }
216 }
217 mb();
218 radeon_gart_tlb_flush(rdev);
219}
220
198int radeon_gart_init(struct radeon_device *rdev) 221int radeon_gart_init(struct radeon_device *rdev)
199{ 222{
223 int r, i;
224
200 if (rdev->gart.pages) { 225 if (rdev->gart.pages) {
201 return 0; 226 return 0;
202 } 227 }
@@ -205,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
205 DRM_ERROR("Page size is smaller than GPU page size!\n"); 230 DRM_ERROR("Page size is smaller than GPU page size!\n");
206 return -EINVAL; 231 return -EINVAL;
207 } 232 }
233 r = radeon_dummy_page_init(rdev);
234 if (r)
235 return r;
208 /* Compute table size */ 236 /* Compute table size */
209 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; 237 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
210 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; 238 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -223,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
223 radeon_gart_fini(rdev); 251 radeon_gart_fini(rdev);
224 return -ENOMEM; 252 return -ENOMEM;
225 } 253 }
254 /* set GART entry to point to the dummy page by default */
255 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
256 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
257 }
226 return 0; 258 return 0;
227} 259}
228 260