aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/drm_vm.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@starflyer.(none)>2005-09-25 00:28:13 -0400
committerDave Airlie <airlied@linux.ie>2005-09-25 00:28:13 -0400
commitb5e89ed53ed8d24f83ba1941c07382af00ed238e (patch)
tree747bae7a565f88a2e1d5974776eeb054a932c505 /drivers/char/drm/drm_vm.c
parent99a2657a29e2d623c3568cd86b27cac13fb63140 (diff)
drm: lindent the drm directory.
I've been threatening this for a while, so no point hanging around. This lindents the DRM code which was always really bad in tabbing department. I've also fixed some misnamed files in comments and removed some trailing whitespace. Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/drm_vm.c')
-rw-r--r--drivers/char/drm/drm_vm.c369
1 files changed, 194 insertions, 175 deletions
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index a13d07f44202..e84a7876a1b3 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -1,7 +1,7 @@
1/** 1/**
2 * \file drm_vm.h 2 * \file drm_vm.c
3 * Memory mapping for DRM 3 * Memory mapping for DRM
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com>
7 */ 7 */
@@ -47,32 +47,34 @@ static void drm_vm_close(struct vm_area_struct *vma);
47 * \param vma virtual memory area. 47 * \param vma virtual memory area.
48 * \param address access address. 48 * \param address access address.
49 * \return pointer to the page structure. 49 * \return pointer to the page structure.
50 * 50 *
51 * Find the right map and if it's AGP memory find the real physical page to 51 * Find the right map and if it's AGP memory find the real physical page to
52 * map, get the page, increment the use count and return it. 52 * map, get the page, increment the use count and return it.
53 */ 53 */
54#if __OS_HAS_AGP 54#if __OS_HAS_AGP
55static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 55static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56 unsigned long address) 56 unsigned long address)
57{ 57{
58 drm_file_t *priv = vma->vm_file->private_data; 58 drm_file_t *priv = vma->vm_file->private_data;
59 drm_device_t *dev = priv->head->dev; 59 drm_device_t *dev = priv->head->dev;
60 drm_map_t *map = NULL; 60 drm_map_t *map = NULL;
61 drm_map_list_t *r_list; 61 drm_map_list_t *r_list;
62 struct list_head *list; 62 struct list_head *list;
63 63
64 /* 64 /*
65 * Find the right map 65 * Find the right map
66 */ 66 */
67 if (!drm_core_has_AGP(dev)) 67 if (!drm_core_has_AGP(dev))
68 goto vm_nopage_error; 68 goto vm_nopage_error;
69 69
70 if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; 70 if (!dev->agp || !dev->agp->cant_use_aperture)
71 goto vm_nopage_error;
71 72
72 list_for_each(list, &dev->maplist->head) { 73 list_for_each(list, &dev->maplist->head) {
73 r_list = list_entry(list, drm_map_list_t, head); 74 r_list = list_entry(list, drm_map_list_t, head);
74 map = r_list->map; 75 map = r_list->map;
75 if (!map) continue; 76 if (!map)
77 continue;
76 if (r_list->user_token == VM_OFFSET(vma)) 78 if (r_list->user_token == VM_OFFSET(vma))
77 break; 79 break;
78 } 80 }
@@ -85,45 +87,47 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
85 87
86#ifdef __alpha__ 88#ifdef __alpha__
87 /* 89 /*
88 * Adjust to a bus-relative address 90 * Adjust to a bus-relative address
89 */ 91 */
90 baddr -= dev->hose->mem_space->start; 92 baddr -= dev->hose->mem_space->start;
91#endif 93#endif
92 94
93 /* 95 /*
94 * It's AGP memory - find the real physical page to map 96 * It's AGP memory - find the real physical page to map
95 */ 97 */
96 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) { 98 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
97 if (agpmem->bound <= baddr && 99 if (agpmem->bound <= baddr &&
98 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 100 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
99 break; 101 break;
100 } 102 }
101 103
102 if (!agpmem) goto vm_nopage_error; 104 if (!agpmem)
105 goto vm_nopage_error;
103 106
104 /* 107 /*
105 * Get the page, inc the use count, and return it 108 * Get the page, inc the use count, and return it
106 */ 109 */
107 offset = (baddr - agpmem->bound) >> PAGE_SHIFT; 110 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
108 page = virt_to_page(__va(agpmem->memory->memory[offset])); 111 page = virt_to_page(__va(agpmem->memory->memory[offset]));
109 get_page(page); 112 get_page(page);
110 113
111 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", 114 DRM_DEBUG
112 baddr, __va(agpmem->memory->memory[offset]), offset, 115 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
113 page_count(page)); 116 baddr, __va(agpmem->memory->memory[offset]), offset,
117 page_count(page));
114 118
115 return page; 119 return page;
116 } 120 }
117vm_nopage_error: 121 vm_nopage_error:
118 return NOPAGE_SIGBUS; /* Disallow mremap */ 122 return NOPAGE_SIGBUS; /* Disallow mremap */
119} 123}
120#else /* __OS_HAS_AGP */ 124#else /* __OS_HAS_AGP */
121static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 125static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
122 unsigned long address) 126 unsigned long address)
123{ 127{
124 return NOPAGE_SIGBUS; 128 return NOPAGE_SIGBUS;
125} 129}
126#endif /* __OS_HAS_AGP */ 130#endif /* __OS_HAS_AGP */
127 131
128/** 132/**
129 * \c nopage method for shared virtual memory. 133 * \c nopage method for shared virtual memory.
@@ -131,25 +135,27 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
131 * \param vma virtual memory area. 135 * \param vma virtual memory area.
132 * \param address access address. 136 * \param address access address.
133 * \return pointer to the page structure. 137 * \return pointer to the page structure.
134 * 138 *
135 * Get the the mapping, find the real physical page to map, get the page, and 139 * Get the the mapping, find the real physical page to map, get the page, and
136 * return it. 140 * return it.
137 */ 141 */
138static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, 142static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
139 unsigned long address) 143 unsigned long address)
140{ 144{
141 drm_map_t *map = (drm_map_t *)vma->vm_private_data; 145 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
142 unsigned long offset; 146 unsigned long offset;
143 unsigned long i; 147 unsigned long i;
144 struct page *page; 148 struct page *page;
145 149
146 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ 150 if (address > vma->vm_end)
147 if (!map) return NOPAGE_OOM; /* Nothing allocated */ 151 return NOPAGE_SIGBUS; /* Disallow mremap */
152 if (!map)
153 return NOPAGE_OOM; /* Nothing allocated */
148 154
149 offset = address - vma->vm_start; 155 offset = address - vma->vm_start;
150 i = (unsigned long)map->handle + offset; 156 i = (unsigned long)map->handle + offset;
151 page = (map->type == _DRM_CONSISTENT) ? 157 page = (map->type == _DRM_CONSISTENT) ?
152 virt_to_page((void *)i) : vmalloc_to_page((void *)i); 158 virt_to_page((void *)i) : vmalloc_to_page((void *)i);
153 if (!page) 159 if (!page)
154 return NOPAGE_OOM; 160 return NOPAGE_OOM;
155 get_page(page); 161 get_page(page);
@@ -158,19 +164,18 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
158 return page; 164 return page;
159} 165}
160 166
161
162/** 167/**
163 * \c close method for shared virtual memory. 168 * \c close method for shared virtual memory.
164 * 169 *
165 * \param vma virtual memory area. 170 * \param vma virtual memory area.
166 * 171 *
167 * Deletes map information if we are the last 172 * Deletes map information if we are the last
168 * person to close a mapping and it's not in the global maplist. 173 * person to close a mapping and it's not in the global maplist.
169 */ 174 */
170static void drm_vm_shm_close(struct vm_area_struct *vma) 175static void drm_vm_shm_close(struct vm_area_struct *vma)
171{ 176{
172 drm_file_t *priv = vma->vm_file->private_data; 177 drm_file_t *priv = vma->vm_file->private_data;
173 drm_device_t *dev = priv->head->dev; 178 drm_device_t *dev = priv->head->dev;
174 drm_vma_entry_t *pt, *prev, *next; 179 drm_vma_entry_t *pt, *prev, *next;
175 drm_map_t *map; 180 drm_map_t *map;
176 drm_map_list_t *r_list; 181 drm_map_list_t *r_list;
@@ -186,7 +191,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
186 down(&dev->struct_sem); 191 down(&dev->struct_sem);
187 for (pt = dev->vmalist, prev = NULL; pt; pt = next) { 192 for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
188 next = pt->next; 193 next = pt->next;
189 if (pt->vma->vm_private_data == map) found_maps++; 194 if (pt->vma->vm_private_data == map)
195 found_maps++;
190 if (pt->vma == vma) { 196 if (pt->vma == vma) {
191 if (prev) { 197 if (prev) {
192 prev->next = pt->next; 198 prev->next = pt->next;
@@ -199,8 +205,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
199 } 205 }
200 } 206 }
201 /* We were the only map that was found */ 207 /* We were the only map that was found */
202 if(found_maps == 1 && 208 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
203 map->flags & _DRM_REMOVABLE) {
204 /* Check to see if we are in the maplist, if we are not, then 209 /* Check to see if we are in the maplist, if we are not, then
205 * we delete this mappings information. 210 * we delete this mappings information.
206 */ 211 */
@@ -208,10 +213,11 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
208 list = &dev->maplist->head; 213 list = &dev->maplist->head;
209 list_for_each(list, &dev->maplist->head) { 214 list_for_each(list, &dev->maplist->head) {
210 r_list = list_entry(list, drm_map_list_t, head); 215 r_list = list_entry(list, drm_map_list_t, head);
211 if (r_list->map == map) found_maps++; 216 if (r_list->map == map)
217 found_maps++;
212 } 218 }
213 219
214 if(!found_maps) { 220 if (!found_maps) {
215 drm_dma_handle_t dmah; 221 drm_dma_handle_t dmah;
216 222
217 switch (map->type) { 223 switch (map->type) {
@@ -251,27 +257,29 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
251 * \param vma virtual memory area. 257 * \param vma virtual memory area.
252 * \param address access address. 258 * \param address access address.
253 * \return pointer to the page structure. 259 * \return pointer to the page structure.
254 * 260 *
255 * Determine the page number from the page offset and get it from drm_device_dma::pagelist. 261 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
256 */ 262 */
257static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, 263static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
258 unsigned long address) 264 unsigned long address)
259{ 265{
260 drm_file_t *priv = vma->vm_file->private_data; 266 drm_file_t *priv = vma->vm_file->private_data;
261 drm_device_t *dev = priv->head->dev; 267 drm_device_t *dev = priv->head->dev;
262 drm_device_dma_t *dma = dev->dma; 268 drm_device_dma_t *dma = dev->dma;
263 unsigned long offset; 269 unsigned long offset;
264 unsigned long page_nr; 270 unsigned long page_nr;
265 struct page *page; 271 struct page *page;
266 272
267 if (!dma) return NOPAGE_SIGBUS; /* Error */ 273 if (!dma)
268 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ 274 return NOPAGE_SIGBUS; /* Error */
269 if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */ 275 if (address > vma->vm_end)
270 276 return NOPAGE_SIGBUS; /* Disallow mremap */
271 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ 277 if (!dma->pagelist)
272 page_nr = offset >> PAGE_SHIFT; 278 return NOPAGE_OOM; /* Nothing allocated */
273 page = virt_to_page((dma->pagelist[page_nr] + 279
274 (offset & (~PAGE_MASK)))); 280 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
281 page_nr = offset >> PAGE_SHIFT;
282 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
275 283
276 get_page(page); 284 get_page(page);
277 285
@@ -285,13 +293,13 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
285 * \param vma virtual memory area. 293 * \param vma virtual memory area.
286 * \param address access address. 294 * \param address access address.
287 * \return pointer to the page structure. 295 * \return pointer to the page structure.
288 * 296 *
289 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. 297 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
290 */ 298 */
291static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, 299static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
292 unsigned long address) 300 unsigned long address)
293{ 301{
294 drm_map_t *map = (drm_map_t *)vma->vm_private_data; 302 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
295 drm_file_t *priv = vma->vm_file->private_data; 303 drm_file_t *priv = vma->vm_file->private_data;
296 drm_device_t *dev = priv->head->dev; 304 drm_device_t *dev = priv->head->dev;
297 drm_sg_mem_t *entry = dev->sg; 305 drm_sg_mem_t *entry = dev->sg;
@@ -300,10 +308,12 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
300 unsigned long page_offset; 308 unsigned long page_offset;
301 struct page *page; 309 struct page *page;
302 310
303 if (!entry) return NOPAGE_SIGBUS; /* Error */ 311 if (!entry)
304 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ 312 return NOPAGE_SIGBUS; /* Error */
305 if (!entry->pagelist) return NOPAGE_OOM ; /* Nothing allocated */ 313 if (address > vma->vm_end)
306 314 return NOPAGE_SIGBUS; /* Disallow mremap */
315 if (!entry->pagelist)
316 return NOPAGE_OOM; /* Nothing allocated */
307 317
308 offset = address - vma->vm_start; 318 offset = address - vma->vm_start;
309 map_offset = map->offset - (unsigned long)dev->sg->virtual; 319 map_offset = map->offset - (unsigned long)dev->sg->virtual;
@@ -314,76 +324,78 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
314 return page; 324 return page;
315} 325}
316 326
317
318static struct page *drm_vm_nopage(struct vm_area_struct *vma, 327static struct page *drm_vm_nopage(struct vm_area_struct *vma,
319 unsigned long address, 328 unsigned long address, int *type)
320 int *type) { 329{
321 if (type) *type = VM_FAULT_MINOR; 330 if (type)
331 *type = VM_FAULT_MINOR;
322 return drm_do_vm_nopage(vma, address); 332 return drm_do_vm_nopage(vma, address);
323} 333}
324 334
325static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, 335static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
326 unsigned long address, 336 unsigned long address, int *type)
327 int *type) { 337{
328 if (type) *type = VM_FAULT_MINOR; 338 if (type)
339 *type = VM_FAULT_MINOR;
329 return drm_do_vm_shm_nopage(vma, address); 340 return drm_do_vm_shm_nopage(vma, address);
330} 341}
331 342
332static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, 343static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
333 unsigned long address, 344 unsigned long address, int *type)
334 int *type) { 345{
335 if (type) *type = VM_FAULT_MINOR; 346 if (type)
347 *type = VM_FAULT_MINOR;
336 return drm_do_vm_dma_nopage(vma, address); 348 return drm_do_vm_dma_nopage(vma, address);
337} 349}
338 350
339static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, 351static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
340 unsigned long address, 352 unsigned long address, int *type)
341 int *type) { 353{
342 if (type) *type = VM_FAULT_MINOR; 354 if (type)
355 *type = VM_FAULT_MINOR;
343 return drm_do_vm_sg_nopage(vma, address); 356 return drm_do_vm_sg_nopage(vma, address);
344} 357}
345 358
346/** AGP virtual memory operations */ 359/** AGP virtual memory operations */
347static struct vm_operations_struct drm_vm_ops = { 360static struct vm_operations_struct drm_vm_ops = {
348 .nopage = drm_vm_nopage, 361 .nopage = drm_vm_nopage,
349 .open = drm_vm_open, 362 .open = drm_vm_open,
350 .close = drm_vm_close, 363 .close = drm_vm_close,
351}; 364};
352 365
353/** Shared virtual memory operations */ 366/** Shared virtual memory operations */
354static struct vm_operations_struct drm_vm_shm_ops = { 367static struct vm_operations_struct drm_vm_shm_ops = {
355 .nopage = drm_vm_shm_nopage, 368 .nopage = drm_vm_shm_nopage,
356 .open = drm_vm_open, 369 .open = drm_vm_open,
357 .close = drm_vm_shm_close, 370 .close = drm_vm_shm_close,
358}; 371};
359 372
360/** DMA virtual memory operations */ 373/** DMA virtual memory operations */
361static struct vm_operations_struct drm_vm_dma_ops = { 374static struct vm_operations_struct drm_vm_dma_ops = {
362 .nopage = drm_vm_dma_nopage, 375 .nopage = drm_vm_dma_nopage,
363 .open = drm_vm_open, 376 .open = drm_vm_open,
364 .close = drm_vm_close, 377 .close = drm_vm_close,
365}; 378};
366 379
367/** Scatter-gather virtual memory operations */ 380/** Scatter-gather virtual memory operations */
368static struct vm_operations_struct drm_vm_sg_ops = { 381static struct vm_operations_struct drm_vm_sg_ops = {
369 .nopage = drm_vm_sg_nopage, 382 .nopage = drm_vm_sg_nopage,
370 .open = drm_vm_open, 383 .open = drm_vm_open,
371 .close = drm_vm_close, 384 .close = drm_vm_close,
372}; 385};
373 386
374
375/** 387/**
376 * \c open method for shared virtual memory. 388 * \c open method for shared virtual memory.
377 * 389 *
378 * \param vma virtual memory area. 390 * \param vma virtual memory area.
379 * 391 *
380 * Create a new drm_vma_entry structure as the \p vma private data entry and 392 * Create a new drm_vma_entry structure as the \p vma private data entry and
381 * add it to drm_device::vmalist. 393 * add it to drm_device::vmalist.
382 */ 394 */
383static void drm_vm_open(struct vm_area_struct *vma) 395static void drm_vm_open(struct vm_area_struct *vma)
384{ 396{
385 drm_file_t *priv = vma->vm_file->private_data; 397 drm_file_t *priv = vma->vm_file->private_data;
386 drm_device_t *dev = priv->head->dev; 398 drm_device_t *dev = priv->head->dev;
387 drm_vma_entry_t *vma_entry; 399 drm_vma_entry_t *vma_entry;
388 400
389 DRM_DEBUG("0x%08lx,0x%08lx\n", 401 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -393,26 +405,26 @@ static void drm_vm_open(struct vm_area_struct *vma)
393 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); 405 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
394 if (vma_entry) { 406 if (vma_entry) {
395 down(&dev->struct_sem); 407 down(&dev->struct_sem);
396 vma_entry->vma = vma; 408 vma_entry->vma = vma;
397 vma_entry->next = dev->vmalist; 409 vma_entry->next = dev->vmalist;
398 vma_entry->pid = current->pid; 410 vma_entry->pid = current->pid;
399 dev->vmalist = vma_entry; 411 dev->vmalist = vma_entry;
400 up(&dev->struct_sem); 412 up(&dev->struct_sem);
401 } 413 }
402} 414}
403 415
404/** 416/**
405 * \c close method for all virtual memory types. 417 * \c close method for all virtual memory types.
406 * 418 *
407 * \param vma virtual memory area. 419 * \param vma virtual memory area.
408 * 420 *
409 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and 421 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
410 * free it. 422 * free it.
411 */ 423 */
412static void drm_vm_close(struct vm_area_struct *vma) 424static void drm_vm_close(struct vm_area_struct *vma)
413{ 425{
414 drm_file_t *priv = vma->vm_file->private_data; 426 drm_file_t *priv = vma->vm_file->private_data;
415 drm_device_t *dev = priv->head->dev; 427 drm_device_t *dev = priv->head->dev;
416 drm_vma_entry_t *pt, *prev; 428 drm_vma_entry_t *pt, *prev;
417 429
418 DRM_DEBUG("0x%08lx,0x%08lx\n", 430 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -440,43 +452,44 @@ static void drm_vm_close(struct vm_area_struct *vma)
440 * \param filp file pointer. 452 * \param filp file pointer.
441 * \param vma virtual memory area. 453 * \param vma virtual memory area.
442 * \return zero on success or a negative number on failure. 454 * \return zero on success or a negative number on failure.
443 * 455 *
444 * Sets the virtual memory area operations structure to vm_dma_ops, the file 456 * Sets the virtual memory area operations structure to vm_dma_ops, the file
445 * pointer, and calls vm_open(). 457 * pointer, and calls vm_open().
446 */ 458 */
447static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) 459static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
448{ 460{
449 drm_file_t *priv = filp->private_data; 461 drm_file_t *priv = filp->private_data;
450 drm_device_t *dev; 462 drm_device_t *dev;
451 drm_device_dma_t *dma; 463 drm_device_dma_t *dma;
452 unsigned long length = vma->vm_end - vma->vm_start; 464 unsigned long length = vma->vm_end - vma->vm_start;
453 465
454 lock_kernel(); 466 lock_kernel();
455 dev = priv->head->dev; 467 dev = priv->head->dev;
456 dma = dev->dma; 468 dma = dev->dma;
457 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 469 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
458 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); 470 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
459 471
460 /* Length must match exact page count */ 472 /* Length must match exact page count */
461 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { 473 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
462 unlock_kernel(); 474 unlock_kernel();
463 return -EINVAL; 475 return -EINVAL;
464 } 476 }
465 unlock_kernel(); 477 unlock_kernel();
466 478
467 vma->vm_ops = &drm_vm_dma_ops; 479 vma->vm_ops = &drm_vm_dma_ops;
468 480
469 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 481 vma->vm_flags |= VM_RESERVED; /* Don't swap */
470 482
471 vma->vm_file = filp; /* Needed for drm_vm_open() */ 483 vma->vm_file = filp; /* Needed for drm_vm_open() */
472 drm_vm_open(vma); 484 drm_vm_open(vma);
473 return 0; 485 return 0;
474} 486}
475 487
476unsigned long drm_core_get_map_ofs(drm_map_t *map) 488unsigned long drm_core_get_map_ofs(drm_map_t * map)
477{ 489{
478 return map->offset; 490 return map->offset;
479} 491}
492
480EXPORT_SYMBOL(drm_core_get_map_ofs); 493EXPORT_SYMBOL(drm_core_get_map_ofs);
481 494
482unsigned long drm_core_get_reg_ofs(struct drm_device *dev) 495unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
@@ -487,6 +500,7 @@ unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
487 return 0; 500 return 0;
488#endif 501#endif
489} 502}
503
490EXPORT_SYMBOL(drm_core_get_reg_ofs); 504EXPORT_SYMBOL(drm_core_get_reg_ofs);
491 505
492/** 506/**
@@ -495,7 +509,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
495 * \param filp file pointer. 509 * \param filp file pointer.
496 * \param vma virtual memory area. 510 * \param vma virtual memory area.
497 * \return zero on success or a negative number on failure. 511 * \return zero on success or a negative number on failure.
498 * 512 *
499 * If the virtual memory area has no offset associated with it then it's a DMA 513 * If the virtual memory area has no offset associated with it then it's a DMA
500 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, 514 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
501 * checks that the restricted flag is not set, sets the virtual memory operations 515 * checks that the restricted flag is not set, sets the virtual memory operations
@@ -504,17 +518,18 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
504 */ 518 */
505int drm_mmap(struct file *filp, struct vm_area_struct *vma) 519int drm_mmap(struct file *filp, struct vm_area_struct *vma)
506{ 520{
507 drm_file_t *priv = filp->private_data; 521 drm_file_t *priv = filp->private_data;
508 drm_device_t *dev = priv->head->dev; 522 drm_device_t *dev = priv->head->dev;
509 drm_map_t *map = NULL; 523 drm_map_t *map = NULL;
510 drm_map_list_t *r_list; 524 drm_map_list_t *r_list;
511 unsigned long offset = 0; 525 unsigned long offset = 0;
512 struct list_head *list; 526 struct list_head *list;
513 527
514 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 528 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
515 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); 529 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
516 530
517 if ( !priv->authenticated ) return -EACCES; 531 if (!priv->authenticated)
532 return -EACCES;
518 533
519 /* We check for "dma". On Apple's UniNorth, it's valid to have 534 /* We check for "dma". On Apple's UniNorth, it's valid to have
520 * the AGP mapped at physical address 0 535 * the AGP mapped at physical address 0
@@ -522,61 +537,66 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
522 */ 537 */
523 if (!VM_OFFSET(vma) 538 if (!VM_OFFSET(vma)
524#if __OS_HAS_AGP 539#if __OS_HAS_AGP
525 && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) 540 && (!dev->agp
541 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
526#endif 542#endif
527 ) 543 )
528 return drm_mmap_dma(filp, vma); 544 return drm_mmap_dma(filp, vma);
529 545
530 /* A sequential search of a linked list is 546 /* A sequential search of a linked list is
531 fine here because: 1) there will only be 547 fine here because: 1) there will only be
532 about 5-10 entries in the list and, 2) a 548 about 5-10 entries in the list and, 2) a
533 DRI client only has to do this mapping 549 DRI client only has to do this mapping
534 once, so it doesn't have to be optimized 550 once, so it doesn't have to be optimized
535 for performance, even if the list was a 551 for performance, even if the list was a
536 bit longer. */ 552 bit longer. */
537 list_for_each(list, &dev->maplist->head) { 553 list_for_each(list, &dev->maplist->head) {
538 554
539 r_list = list_entry(list, drm_map_list_t, head); 555 r_list = list_entry(list, drm_map_list_t, head);
540 map = r_list->map; 556 map = r_list->map;
541 if (!map) continue; 557 if (!map)
558 continue;
542 if (r_list->user_token == VM_OFFSET(vma)) 559 if (r_list->user_token == VM_OFFSET(vma))
543 break; 560 break;
544 } 561 }
545 562
546 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) 563 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
547 return -EPERM; 564 return -EPERM;
548 565
549 /* Check for valid size. */ 566 /* Check for valid size. */
550 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL; 567 if (map->size != vma->vm_end - vma->vm_start)
568 return -EINVAL;
551 569
552 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { 570 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
553 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); 571 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
554#if defined(__i386__) || defined(__x86_64__) 572#if defined(__i386__) || defined(__x86_64__)
555 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; 573 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
556#else 574#else
557 /* Ye gads this is ugly. With more thought 575 /* Ye gads this is ugly. With more thought
558 we could move this up higher and use 576 we could move this up higher and use
559 `protection_map' instead. */ 577 `protection_map' instead. */
560 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect( 578 vma->vm_page_prot =
561 __pte(pgprot_val(vma->vm_page_prot))))); 579 __pgprot(pte_val
580 (pte_wrprotect
581 (__pte(pgprot_val(vma->vm_page_prot)))));
562#endif 582#endif
563 } 583 }
564 584
565 switch (map->type) { 585 switch (map->type) {
566 case _DRM_AGP: 586 case _DRM_AGP:
567 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { 587 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
568 /* 588 /*
569 * On some platforms we can't talk to bus dma address from the CPU, so for 589 * On some platforms we can't talk to bus dma address from the CPU, so for
570 * memory of type DRM_AGP, we'll deal with sorting out the real physical 590 * memory of type DRM_AGP, we'll deal with sorting out the real physical
571 * pages and mappings in nopage() 591 * pages and mappings in nopage()
572 */ 592 */
573#if defined(__powerpc__) 593#if defined(__powerpc__)
574 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 594 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
575#endif 595#endif
576 vma->vm_ops = &drm_vm_ops; 596 vma->vm_ops = &drm_vm_ops;
577 break; 597 break;
578 } 598 }
579 /* fall through to _DRM_FRAME_BUFFER... */ 599 /* fall through to _DRM_FRAME_BUFFER... */
580 case _DRM_FRAME_BUFFER: 600 case _DRM_FRAME_BUFFER:
581 case _DRM_REGISTERS: 601 case _DRM_REGISTERS:
582#if defined(__i386__) || defined(__x86_64__) 602#if defined(__i386__) || defined(__x86_64__)
@@ -591,27 +611,25 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
591#endif 611#endif
592 vma->vm_flags |= VM_IO; /* not in core dump */ 612 vma->vm_flags |= VM_IO; /* not in core dump */
593#if defined(__ia64__) 613#if defined(__ia64__)
594 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 614 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
595 vma->vm_start))
596 vma->vm_page_prot = 615 vma->vm_page_prot =
597 pgprot_writecombine(vma->vm_page_prot); 616 pgprot_writecombine(vma->vm_page_prot);
598 else 617 else
599 vma->vm_page_prot = 618 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
600 pgprot_noncached(vma->vm_page_prot);
601#endif 619#endif
602 offset = dev->driver->get_reg_ofs(dev); 620 offset = dev->driver->get_reg_ofs(dev);
603#ifdef __sparc__ 621#ifdef __sparc__
604 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, 622 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
605 (map->offset + offset) >> PAGE_SHIFT, 623 (map->offset + offset) >> PAGE_SHIFT,
606 vma->vm_end - vma->vm_start, 624 vma->vm_end - vma->vm_start,
607 vma->vm_page_prot)) 625 vma->vm_page_prot))
608#else 626#else
609 if (io_remap_pfn_range(vma, vma->vm_start, 627 if (io_remap_pfn_range(vma, vma->vm_start,
610 (map->offset + offset) >> PAGE_SHIFT, 628 (map->offset + offset) >> PAGE_SHIFT,
611 vma->vm_end - vma->vm_start, 629 vma->vm_end - vma->vm_start,
612 vma->vm_page_prot)) 630 vma->vm_page_prot))
613#endif 631#endif
614 return -EAGAIN; 632 return -EAGAIN;
615 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," 633 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
616 " offset = 0x%lx\n", 634 " offset = 0x%lx\n",
617 map->type, 635 map->type,
@@ -624,22 +642,23 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
624 * allocate in a different way */ 642 * allocate in a different way */
625 vma->vm_ops = &drm_vm_shm_ops; 643 vma->vm_ops = &drm_vm_shm_ops;
626 vma->vm_private_data = (void *)map; 644 vma->vm_private_data = (void *)map;
627 /* Don't let this area swap. Change when 645 /* Don't let this area swap. Change when
628 DRM_KERNEL advisory is supported. */ 646 DRM_KERNEL advisory is supported. */
629 vma->vm_flags |= VM_RESERVED; 647 vma->vm_flags |= VM_RESERVED;
630 break; 648 break;
631 case _DRM_SCATTER_GATHER: 649 case _DRM_SCATTER_GATHER:
632 vma->vm_ops = &drm_vm_sg_ops; 650 vma->vm_ops = &drm_vm_sg_ops;
633 vma->vm_private_data = (void *)map; 651 vma->vm_private_data = (void *)map;
634 vma->vm_flags |= VM_RESERVED; 652 vma->vm_flags |= VM_RESERVED;
635 break; 653 break;
636 default: 654 default:
637 return -EINVAL; /* This should never happen. */ 655 return -EINVAL; /* This should never happen. */
638 } 656 }
639 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 657 vma->vm_flags |= VM_RESERVED; /* Don't swap */
640 658
641 vma->vm_file = filp; /* Needed for drm_vm_open() */ 659 vma->vm_file = filp; /* Needed for drm_vm_open() */
642 drm_vm_open(vma); 660 drm_vm_open(vma);
643 return 0; 661 return 0;
644} 662}
663
645EXPORT_SYMBOL(drm_mmap); 664EXPORT_SYMBOL(drm_mmap);