diff options
Diffstat (limited to 'drivers/char/drm/drm_vm.c')
-rw-r--r-- | drivers/char/drm/drm_vm.c | 368 |
1 files changed, 194 insertions, 174 deletions
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index ced4215e2275..3f73aa774c80 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * \file drm_vm.h | 2 | * \file drm_vm.c |
3 | * Memory mapping for DRM | 3 | * Memory mapping for DRM |
4 | * | 4 | * |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | 5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
6 | * \author Gareth Hughes <gareth@valinux.com> | 6 | * \author Gareth Hughes <gareth@valinux.com> |
7 | */ | 7 | */ |
@@ -47,32 +47,34 @@ static void drm_vm_close(struct vm_area_struct *vma); | |||
47 | * \param vma virtual memory area. | 47 | * \param vma virtual memory area. |
48 | * \param address access address. | 48 | * \param address access address. |
49 | * \return pointer to the page structure. | 49 | * \return pointer to the page structure. |
50 | * | 50 | * |
51 | * Find the right map and if it's AGP memory find the real physical page to | 51 | * Find the right map and if it's AGP memory find the real physical page to |
52 | * map, get the page, increment the use count and return it. | 52 | * map, get the page, increment the use count and return it. |
53 | */ | 53 | */ |
54 | #if __OS_HAS_AGP | 54 | #if __OS_HAS_AGP |
55 | static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, | 55 | static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, |
56 | unsigned long address) | 56 | unsigned long address) |
57 | { | 57 | { |
58 | drm_file_t *priv = vma->vm_file->private_data; | 58 | drm_file_t *priv = vma->vm_file->private_data; |
59 | drm_device_t *dev = priv->head->dev; | 59 | drm_device_t *dev = priv->head->dev; |
60 | drm_map_t *map = NULL; | 60 | drm_map_t *map = NULL; |
61 | drm_map_list_t *r_list; | 61 | drm_map_list_t *r_list; |
62 | struct list_head *list; | 62 | struct list_head *list; |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * Find the right map | 65 | * Find the right map |
66 | */ | 66 | */ |
67 | if (!drm_core_has_AGP(dev)) | 67 | if (!drm_core_has_AGP(dev)) |
68 | goto vm_nopage_error; | 68 | goto vm_nopage_error; |
69 | 69 | ||
70 | if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; | 70 | if (!dev->agp || !dev->agp->cant_use_aperture) |
71 | goto vm_nopage_error; | ||
71 | 72 | ||
72 | list_for_each(list, &dev->maplist->head) { | 73 | list_for_each(list, &dev->maplist->head) { |
73 | r_list = list_entry(list, drm_map_list_t, head); | 74 | r_list = list_entry(list, drm_map_list_t, head); |
74 | map = r_list->map; | 75 | map = r_list->map; |
75 | if (!map) continue; | 76 | if (!map) |
77 | continue; | ||
76 | if (r_list->user_token == VM_OFFSET(vma)) | 78 | if (r_list->user_token == VM_OFFSET(vma)) |
77 | break; | 79 | break; |
78 | } | 80 | } |
@@ -85,45 +87,47 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, | |||
85 | 87 | ||
86 | #ifdef __alpha__ | 88 | #ifdef __alpha__ |
87 | /* | 89 | /* |
88 | * Adjust to a bus-relative address | 90 | * Adjust to a bus-relative address |
89 | */ | 91 | */ |
90 | baddr -= dev->hose->mem_space->start; | 92 | baddr -= dev->hose->mem_space->start; |
91 | #endif | 93 | #endif |
92 | 94 | ||
93 | /* | 95 | /* |
94 | * It's AGP memory - find the real physical page to map | 96 | * It's AGP memory - find the real physical page to map |
95 | */ | 97 | */ |
96 | for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) { | 98 | for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) { |
97 | if (agpmem->bound <= baddr && | 99 | if (agpmem->bound <= baddr && |
98 | agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) | 100 | agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) |
99 | break; | 101 | break; |
100 | } | 102 | } |
101 | 103 | ||
102 | if (!agpmem) goto vm_nopage_error; | 104 | if (!agpmem) |
105 | goto vm_nopage_error; | ||
103 | 106 | ||
104 | /* | 107 | /* |
105 | * Get the page, inc the use count, and return it | 108 | * Get the page, inc the use count, and return it |
106 | */ | 109 | */ |
107 | offset = (baddr - agpmem->bound) >> PAGE_SHIFT; | 110 | offset = (baddr - agpmem->bound) >> PAGE_SHIFT; |
108 | page = virt_to_page(__va(agpmem->memory->memory[offset])); | 111 | page = virt_to_page(__va(agpmem->memory->memory[offset])); |
109 | get_page(page); | 112 | get_page(page); |
110 | 113 | ||
111 | DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", | 114 | DRM_DEBUG |
112 | baddr, __va(agpmem->memory->memory[offset]), offset, | 115 | ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", |
113 | page_count(page)); | 116 | baddr, __va(agpmem->memory->memory[offset]), offset, |
117 | page_count(page)); | ||
114 | 118 | ||
115 | return page; | 119 | return page; |
116 | } | 120 | } |
117 | vm_nopage_error: | 121 | vm_nopage_error: |
118 | return NOPAGE_SIGBUS; /* Disallow mremap */ | 122 | return NOPAGE_SIGBUS; /* Disallow mremap */ |
119 | } | 123 | } |
120 | #else /* __OS_HAS_AGP */ | 124 | #else /* __OS_HAS_AGP */ |
121 | static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, | 125 | static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, |
122 | unsigned long address) | 126 | unsigned long address) |
123 | { | 127 | { |
124 | return NOPAGE_SIGBUS; | 128 | return NOPAGE_SIGBUS; |
125 | } | 129 | } |
126 | #endif /* __OS_HAS_AGP */ | 130 | #endif /* __OS_HAS_AGP */ |
127 | 131 | ||
128 | /** | 132 | /** |
129 | * \c nopage method for shared virtual memory. | 133 | * \c nopage method for shared virtual memory. |
@@ -131,24 +135,27 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, | |||
131 | * \param vma virtual memory area. | 135 | * \param vma virtual memory area. |
132 | * \param address access address. | 136 | * \param address access address. |
133 | * \return pointer to the page structure. | 137 | * \return pointer to the page structure. |
134 | * | 138 | * |
135 | * Get the the mapping, find the real physical page to map, get the page, and | 139 | * Get the the mapping, find the real physical page to map, get the page, and |
136 | * return it. | 140 | * return it. |
137 | */ | 141 | */ |
138 | static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, | 142 | static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, |
139 | unsigned long address) | 143 | unsigned long address) |
140 | { | 144 | { |
141 | drm_map_t *map = (drm_map_t *)vma->vm_private_data; | 145 | drm_map_t *map = (drm_map_t *) vma->vm_private_data; |
142 | unsigned long offset; | 146 | unsigned long offset; |
143 | unsigned long i; | 147 | unsigned long i; |
144 | struct page *page; | 148 | struct page *page; |
145 | 149 | ||
146 | if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ | 150 | if (address > vma->vm_end) |
147 | if (!map) return NOPAGE_OOM; /* Nothing allocated */ | 151 | return NOPAGE_SIGBUS; /* Disallow mremap */ |
152 | if (!map) | ||
153 | return NOPAGE_OOM; /* Nothing allocated */ | ||
148 | 154 | ||
149 | offset = address - vma->vm_start; | 155 | offset = address - vma->vm_start; |
150 | i = (unsigned long)map->handle + offset; | 156 | i = (unsigned long)map->handle + offset; |
151 | page = vmalloc_to_page((void *)i); | 157 | page = (map->type == _DRM_CONSISTENT) ? |
158 | virt_to_page((void *)i) : vmalloc_to_page((void *)i); | ||
152 | if (!page) | 159 | if (!page) |
153 | return NOPAGE_OOM; | 160 | return NOPAGE_OOM; |
154 | get_page(page); | 161 | get_page(page); |
@@ -157,19 +164,18 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, | |||
157 | return page; | 164 | return page; |
158 | } | 165 | } |
159 | 166 | ||
160 | |||
161 | /** | 167 | /** |
162 | * \c close method for shared virtual memory. | 168 | * \c close method for shared virtual memory. |
163 | * | 169 | * |
164 | * \param vma virtual memory area. | 170 | * \param vma virtual memory area. |
165 | * | 171 | * |
166 | * Deletes map information if we are the last | 172 | * Deletes map information if we are the last |
167 | * person to close a mapping and it's not in the global maplist. | 173 | * person to close a mapping and it's not in the global maplist. |
168 | */ | 174 | */ |
169 | static void drm_vm_shm_close(struct vm_area_struct *vma) | 175 | static void drm_vm_shm_close(struct vm_area_struct *vma) |
170 | { | 176 | { |
171 | drm_file_t *priv = vma->vm_file->private_data; | 177 | drm_file_t *priv = vma->vm_file->private_data; |
172 | drm_device_t *dev = priv->head->dev; | 178 | drm_device_t *dev = priv->head->dev; |
173 | drm_vma_entry_t *pt, *prev, *next; | 179 | drm_vma_entry_t *pt, *prev, *next; |
174 | drm_map_t *map; | 180 | drm_map_t *map; |
175 | drm_map_list_t *r_list; | 181 | drm_map_list_t *r_list; |
@@ -185,7 +191,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
185 | down(&dev->struct_sem); | 191 | down(&dev->struct_sem); |
186 | for (pt = dev->vmalist, prev = NULL; pt; pt = next) { | 192 | for (pt = dev->vmalist, prev = NULL; pt; pt = next) { |
187 | next = pt->next; | 193 | next = pt->next; |
188 | if (pt->vma->vm_private_data == map) found_maps++; | 194 | if (pt->vma->vm_private_data == map) |
195 | found_maps++; | ||
189 | if (pt->vma == vma) { | 196 | if (pt->vma == vma) { |
190 | if (prev) { | 197 | if (prev) { |
191 | prev->next = pt->next; | 198 | prev->next = pt->next; |
@@ -198,8 +205,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
198 | } | 205 | } |
199 | } | 206 | } |
200 | /* We were the only map that was found */ | 207 | /* We were the only map that was found */ |
201 | if(found_maps == 1 && | 208 | if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { |
202 | map->flags & _DRM_REMOVABLE) { | ||
203 | /* Check to see if we are in the maplist, if we are not, then | 209 | /* Check to see if we are in the maplist, if we are not, then |
204 | * we delete this mappings information. | 210 | * we delete this mappings information. |
205 | */ | 211 | */ |
@@ -207,10 +213,11 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
207 | list = &dev->maplist->head; | 213 | list = &dev->maplist->head; |
208 | list_for_each(list, &dev->maplist->head) { | 214 | list_for_each(list, &dev->maplist->head) { |
209 | r_list = list_entry(list, drm_map_list_t, head); | 215 | r_list = list_entry(list, drm_map_list_t, head); |
210 | if (r_list->map == map) found_maps++; | 216 | if (r_list->map == map) |
217 | found_maps++; | ||
211 | } | 218 | } |
212 | 219 | ||
213 | if(!found_maps) { | 220 | if (!found_maps) { |
214 | drm_dma_handle_t dmah; | 221 | drm_dma_handle_t dmah; |
215 | 222 | ||
216 | switch (map->type) { | 223 | switch (map->type) { |
@@ -250,27 +257,29 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
250 | * \param vma virtual memory area. | 257 | * \param vma virtual memory area. |
251 | * \param address access address. | 258 | * \param address access address. |
252 | * \return pointer to the page structure. | 259 | * \return pointer to the page structure. |
253 | * | 260 | * |
254 | * Determine the page number from the page offset and get it from drm_device_dma::pagelist. | 261 | * Determine the page number from the page offset and get it from drm_device_dma::pagelist. |
255 | */ | 262 | */ |
256 | static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, | 263 | static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, |
257 | unsigned long address) | 264 | unsigned long address) |
258 | { | 265 | { |
259 | drm_file_t *priv = vma->vm_file->private_data; | 266 | drm_file_t *priv = vma->vm_file->private_data; |
260 | drm_device_t *dev = priv->head->dev; | 267 | drm_device_t *dev = priv->head->dev; |
261 | drm_device_dma_t *dma = dev->dma; | 268 | drm_device_dma_t *dma = dev->dma; |
262 | unsigned long offset; | 269 | unsigned long offset; |
263 | unsigned long page_nr; | 270 | unsigned long page_nr; |
264 | struct page *page; | 271 | struct page *page; |
265 | 272 | ||
266 | if (!dma) return NOPAGE_SIGBUS; /* Error */ | 273 | if (!dma) |
267 | if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ | 274 | return NOPAGE_SIGBUS; /* Error */ |
268 | if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */ | 275 | if (address > vma->vm_end) |
269 | 276 | return NOPAGE_SIGBUS; /* Disallow mremap */ | |
270 | offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ | 277 | if (!dma->pagelist) |
271 | page_nr = offset >> PAGE_SHIFT; | 278 | return NOPAGE_OOM; /* Nothing allocated */ |
272 | page = virt_to_page((dma->pagelist[page_nr] + | 279 | |
273 | (offset & (~PAGE_MASK)))); | 280 | offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ |
281 | page_nr = offset >> PAGE_SHIFT; | ||
282 | page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); | ||
274 | 283 | ||
275 | get_page(page); | 284 | get_page(page); |
276 | 285 | ||
@@ -284,13 +293,13 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, | |||
284 | * \param vma virtual memory area. | 293 | * \param vma virtual memory area. |
285 | * \param address access address. | 294 | * \param address access address. |
286 | * \return pointer to the page structure. | 295 | * \return pointer to the page structure. |
287 | * | 296 | * |
288 | * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. | 297 | * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. |
289 | */ | 298 | */ |
290 | static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, | 299 | static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, |
291 | unsigned long address) | 300 | unsigned long address) |
292 | { | 301 | { |
293 | drm_map_t *map = (drm_map_t *)vma->vm_private_data; | 302 | drm_map_t *map = (drm_map_t *) vma->vm_private_data; |
294 | drm_file_t *priv = vma->vm_file->private_data; | 303 | drm_file_t *priv = vma->vm_file->private_data; |
295 | drm_device_t *dev = priv->head->dev; | 304 | drm_device_t *dev = priv->head->dev; |
296 | drm_sg_mem_t *entry = dev->sg; | 305 | drm_sg_mem_t *entry = dev->sg; |
@@ -299,10 +308,12 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, | |||
299 | unsigned long page_offset; | 308 | unsigned long page_offset; |
300 | struct page *page; | 309 | struct page *page; |
301 | 310 | ||
302 | if (!entry) return NOPAGE_SIGBUS; /* Error */ | 311 | if (!entry) |
303 | if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ | 312 | return NOPAGE_SIGBUS; /* Error */ |
304 | if (!entry->pagelist) return NOPAGE_OOM ; /* Nothing allocated */ | 313 | if (address > vma->vm_end) |
305 | 314 | return NOPAGE_SIGBUS; /* Disallow mremap */ | |
315 | if (!entry->pagelist) | ||
316 | return NOPAGE_OOM; /* Nothing allocated */ | ||
306 | 317 | ||
307 | offset = address - vma->vm_start; | 318 | offset = address - vma->vm_start; |
308 | map_offset = map->offset - (unsigned long)dev->sg->virtual; | 319 | map_offset = map->offset - (unsigned long)dev->sg->virtual; |
@@ -313,76 +324,78 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, | |||
313 | return page; | 324 | return page; |
314 | } | 325 | } |
315 | 326 | ||
316 | |||
317 | static struct page *drm_vm_nopage(struct vm_area_struct *vma, | 327 | static struct page *drm_vm_nopage(struct vm_area_struct *vma, |
318 | unsigned long address, | 328 | unsigned long address, int *type) |
319 | int *type) { | 329 | { |
320 | if (type) *type = VM_FAULT_MINOR; | 330 | if (type) |
331 | *type = VM_FAULT_MINOR; | ||
321 | return drm_do_vm_nopage(vma, address); | 332 | return drm_do_vm_nopage(vma, address); |
322 | } | 333 | } |
323 | 334 | ||
324 | static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, | 335 | static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, |
325 | unsigned long address, | 336 | unsigned long address, int *type) |
326 | int *type) { | 337 | { |
327 | if (type) *type = VM_FAULT_MINOR; | 338 | if (type) |
339 | *type = VM_FAULT_MINOR; | ||
328 | return drm_do_vm_shm_nopage(vma, address); | 340 | return drm_do_vm_shm_nopage(vma, address); |
329 | } | 341 | } |
330 | 342 | ||
331 | static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, | 343 | static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, |
332 | unsigned long address, | 344 | unsigned long address, int *type) |
333 | int *type) { | 345 | { |
334 | if (type) *type = VM_FAULT_MINOR; | 346 | if (type) |
347 | *type = VM_FAULT_MINOR; | ||
335 | return drm_do_vm_dma_nopage(vma, address); | 348 | return drm_do_vm_dma_nopage(vma, address); |
336 | } | 349 | } |
337 | 350 | ||
338 | static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, | 351 | static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, |
339 | unsigned long address, | 352 | unsigned long address, int *type) |
340 | int *type) { | 353 | { |
341 | if (type) *type = VM_FAULT_MINOR; | 354 | if (type) |
355 | *type = VM_FAULT_MINOR; | ||
342 | return drm_do_vm_sg_nopage(vma, address); | 356 | return drm_do_vm_sg_nopage(vma, address); |
343 | } | 357 | } |
344 | 358 | ||
345 | /** AGP virtual memory operations */ | 359 | /** AGP virtual memory operations */ |
346 | static struct vm_operations_struct drm_vm_ops = { | 360 | static struct vm_operations_struct drm_vm_ops = { |
347 | .nopage = drm_vm_nopage, | 361 | .nopage = drm_vm_nopage, |
348 | .open = drm_vm_open, | 362 | .open = drm_vm_open, |
349 | .close = drm_vm_close, | 363 | .close = drm_vm_close, |
350 | }; | 364 | }; |
351 | 365 | ||
352 | /** Shared virtual memory operations */ | 366 | /** Shared virtual memory operations */ |
353 | static struct vm_operations_struct drm_vm_shm_ops = { | 367 | static struct vm_operations_struct drm_vm_shm_ops = { |
354 | .nopage = drm_vm_shm_nopage, | 368 | .nopage = drm_vm_shm_nopage, |
355 | .open = drm_vm_open, | 369 | .open = drm_vm_open, |
356 | .close = drm_vm_shm_close, | 370 | .close = drm_vm_shm_close, |
357 | }; | 371 | }; |
358 | 372 | ||
359 | /** DMA virtual memory operations */ | 373 | /** DMA virtual memory operations */ |
360 | static struct vm_operations_struct drm_vm_dma_ops = { | 374 | static struct vm_operations_struct drm_vm_dma_ops = { |
361 | .nopage = drm_vm_dma_nopage, | 375 | .nopage = drm_vm_dma_nopage, |
362 | .open = drm_vm_open, | 376 | .open = drm_vm_open, |
363 | .close = drm_vm_close, | 377 | .close = drm_vm_close, |
364 | }; | 378 | }; |
365 | 379 | ||
366 | /** Scatter-gather virtual memory operations */ | 380 | /** Scatter-gather virtual memory operations */ |
367 | static struct vm_operations_struct drm_vm_sg_ops = { | 381 | static struct vm_operations_struct drm_vm_sg_ops = { |
368 | .nopage = drm_vm_sg_nopage, | 382 | .nopage = drm_vm_sg_nopage, |
369 | .open = drm_vm_open, | 383 | .open = drm_vm_open, |
370 | .close = drm_vm_close, | 384 | .close = drm_vm_close, |
371 | }; | 385 | }; |
372 | 386 | ||
373 | |||
374 | /** | 387 | /** |
375 | * \c open method for shared virtual memory. | 388 | * \c open method for shared virtual memory. |
376 | * | 389 | * |
377 | * \param vma virtual memory area. | 390 | * \param vma virtual memory area. |
378 | * | 391 | * |
379 | * Create a new drm_vma_entry structure as the \p vma private data entry and | 392 | * Create a new drm_vma_entry structure as the \p vma private data entry and |
380 | * add it to drm_device::vmalist. | 393 | * add it to drm_device::vmalist. |
381 | */ | 394 | */ |
382 | static void drm_vm_open(struct vm_area_struct *vma) | 395 | static void drm_vm_open(struct vm_area_struct *vma) |
383 | { | 396 | { |
384 | drm_file_t *priv = vma->vm_file->private_data; | 397 | drm_file_t *priv = vma->vm_file->private_data; |
385 | drm_device_t *dev = priv->head->dev; | 398 | drm_device_t *dev = priv->head->dev; |
386 | drm_vma_entry_t *vma_entry; | 399 | drm_vma_entry_t *vma_entry; |
387 | 400 | ||
388 | DRM_DEBUG("0x%08lx,0x%08lx\n", | 401 | DRM_DEBUG("0x%08lx,0x%08lx\n", |
@@ -392,26 +405,26 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
392 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); | 405 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); |
393 | if (vma_entry) { | 406 | if (vma_entry) { |
394 | down(&dev->struct_sem); | 407 | down(&dev->struct_sem); |
395 | vma_entry->vma = vma; | 408 | vma_entry->vma = vma; |
396 | vma_entry->next = dev->vmalist; | 409 | vma_entry->next = dev->vmalist; |
397 | vma_entry->pid = current->pid; | 410 | vma_entry->pid = current->pid; |
398 | dev->vmalist = vma_entry; | 411 | dev->vmalist = vma_entry; |
399 | up(&dev->struct_sem); | 412 | up(&dev->struct_sem); |
400 | } | 413 | } |
401 | } | 414 | } |
402 | 415 | ||
403 | /** | 416 | /** |
404 | * \c close method for all virtual memory types. | 417 | * \c close method for all virtual memory types. |
405 | * | 418 | * |
406 | * \param vma virtual memory area. | 419 | * \param vma virtual memory area. |
407 | * | 420 | * |
408 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | 421 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and |
409 | * free it. | 422 | * free it. |
410 | */ | 423 | */ |
411 | static void drm_vm_close(struct vm_area_struct *vma) | 424 | static void drm_vm_close(struct vm_area_struct *vma) |
412 | { | 425 | { |
413 | drm_file_t *priv = vma->vm_file->private_data; | 426 | drm_file_t *priv = vma->vm_file->private_data; |
414 | drm_device_t *dev = priv->head->dev; | 427 | drm_device_t *dev = priv->head->dev; |
415 | drm_vma_entry_t *pt, *prev; | 428 | drm_vma_entry_t *pt, *prev; |
416 | 429 | ||
417 | DRM_DEBUG("0x%08lx,0x%08lx\n", | 430 | DRM_DEBUG("0x%08lx,0x%08lx\n", |
@@ -439,43 +452,44 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
439 | * \param filp file pointer. | 452 | * \param filp file pointer. |
440 | * \param vma virtual memory area. | 453 | * \param vma virtual memory area. |
441 | * \return zero on success or a negative number on failure. | 454 | * \return zero on success or a negative number on failure. |
442 | * | 455 | * |
443 | * Sets the virtual memory area operations structure to vm_dma_ops, the file | 456 | * Sets the virtual memory area operations structure to vm_dma_ops, the file |
444 | * pointer, and calls vm_open(). | 457 | * pointer, and calls vm_open(). |
445 | */ | 458 | */ |
446 | static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | 459 | static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) |
447 | { | 460 | { |
448 | drm_file_t *priv = filp->private_data; | 461 | drm_file_t *priv = filp->private_data; |
449 | drm_device_t *dev; | 462 | drm_device_t *dev; |
450 | drm_device_dma_t *dma; | 463 | drm_device_dma_t *dma; |
451 | unsigned long length = vma->vm_end - vma->vm_start; | 464 | unsigned long length = vma->vm_end - vma->vm_start; |
452 | 465 | ||
453 | lock_kernel(); | 466 | lock_kernel(); |
454 | dev = priv->head->dev; | 467 | dev = priv->head->dev; |
455 | dma = dev->dma; | 468 | dma = dev->dma; |
456 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", | 469 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", |
457 | vma->vm_start, vma->vm_end, VM_OFFSET(vma)); | 470 | vma->vm_start, vma->vm_end, VM_OFFSET(vma)); |
458 | 471 | ||
459 | /* Length must match exact page count */ | 472 | /* Length must match exact page count */ |
460 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { | 473 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { |
461 | unlock_kernel(); | 474 | unlock_kernel(); |
462 | return -EINVAL; | 475 | return -EINVAL; |
463 | } | 476 | } |
464 | unlock_kernel(); | 477 | unlock_kernel(); |
465 | 478 | ||
466 | vma->vm_ops = &drm_vm_dma_ops; | 479 | vma->vm_ops = &drm_vm_dma_ops; |
467 | 480 | ||
468 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 481 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
469 | 482 | ||
470 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 483 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
471 | drm_vm_open(vma); | 484 | drm_vm_open(vma); |
472 | return 0; | 485 | return 0; |
473 | } | 486 | } |
474 | 487 | ||
475 | unsigned long drm_core_get_map_ofs(drm_map_t *map) | 488 | unsigned long drm_core_get_map_ofs(drm_map_t * map) |
476 | { | 489 | { |
477 | return map->offset; | 490 | return map->offset; |
478 | } | 491 | } |
492 | |||
479 | EXPORT_SYMBOL(drm_core_get_map_ofs); | 493 | EXPORT_SYMBOL(drm_core_get_map_ofs); |
480 | 494 | ||
481 | unsigned long drm_core_get_reg_ofs(struct drm_device *dev) | 495 | unsigned long drm_core_get_reg_ofs(struct drm_device *dev) |
@@ -486,6 +500,7 @@ unsigned long drm_core_get_reg_ofs(struct drm_device *dev) | |||
486 | return 0; | 500 | return 0; |
487 | #endif | 501 | #endif |
488 | } | 502 | } |
503 | |||
489 | EXPORT_SYMBOL(drm_core_get_reg_ofs); | 504 | EXPORT_SYMBOL(drm_core_get_reg_ofs); |
490 | 505 | ||
491 | /** | 506 | /** |
@@ -494,7 +509,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); | |||
494 | * \param filp file pointer. | 509 | * \param filp file pointer. |
495 | * \param vma virtual memory area. | 510 | * \param vma virtual memory area. |
496 | * \return zero on success or a negative number on failure. | 511 | * \return zero on success or a negative number on failure. |
497 | * | 512 | * |
498 | * If the virtual memory area has no offset associated with it then it's a DMA | 513 | * If the virtual memory area has no offset associated with it then it's a DMA |
499 | * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, | 514 | * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, |
500 | * checks that the restricted flag is not set, sets the virtual memory operations | 515 | * checks that the restricted flag is not set, sets the virtual memory operations |
@@ -503,17 +518,18 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); | |||
503 | */ | 518 | */ |
504 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) | 519 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) |
505 | { | 520 | { |
506 | drm_file_t *priv = filp->private_data; | 521 | drm_file_t *priv = filp->private_data; |
507 | drm_device_t *dev = priv->head->dev; | 522 | drm_device_t *dev = priv->head->dev; |
508 | drm_map_t *map = NULL; | 523 | drm_map_t *map = NULL; |
509 | drm_map_list_t *r_list; | 524 | drm_map_list_t *r_list; |
510 | unsigned long offset = 0; | 525 | unsigned long offset = 0; |
511 | struct list_head *list; | 526 | struct list_head *list; |
512 | 527 | ||
513 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", | 528 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", |
514 | vma->vm_start, vma->vm_end, VM_OFFSET(vma)); | 529 | vma->vm_start, vma->vm_end, VM_OFFSET(vma)); |
515 | 530 | ||
516 | if ( !priv->authenticated ) return -EACCES; | 531 | if (!priv->authenticated) |
532 | return -EACCES; | ||
517 | 533 | ||
518 | /* We check for "dma". On Apple's UniNorth, it's valid to have | 534 | /* We check for "dma". On Apple's UniNorth, it's valid to have |
519 | * the AGP mapped at physical address 0 | 535 | * the AGP mapped at physical address 0 |
@@ -521,61 +537,66 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
521 | */ | 537 | */ |
522 | if (!VM_OFFSET(vma) | 538 | if (!VM_OFFSET(vma) |
523 | #if __OS_HAS_AGP | 539 | #if __OS_HAS_AGP |
524 | && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) | 540 | && (!dev->agp |
541 | || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) | ||
525 | #endif | 542 | #endif |
526 | ) | 543 | ) |
527 | return drm_mmap_dma(filp, vma); | 544 | return drm_mmap_dma(filp, vma); |
528 | 545 | ||
529 | /* A sequential search of a linked list is | 546 | /* A sequential search of a linked list is |
530 | fine here because: 1) there will only be | 547 | fine here because: 1) there will only be |
531 | about 5-10 entries in the list and, 2) a | 548 | about 5-10 entries in the list and, 2) a |
532 | DRI client only has to do this mapping | 549 | DRI client only has to do this mapping |
533 | once, so it doesn't have to be optimized | 550 | once, so it doesn't have to be optimized |
534 | for performance, even if the list was a | 551 | for performance, even if the list was a |
535 | bit longer. */ | 552 | bit longer. */ |
536 | list_for_each(list, &dev->maplist->head) { | 553 | list_for_each(list, &dev->maplist->head) { |
537 | 554 | ||
538 | r_list = list_entry(list, drm_map_list_t, head); | 555 | r_list = list_entry(list, drm_map_list_t, head); |
539 | map = r_list->map; | 556 | map = r_list->map; |
540 | if (!map) continue; | 557 | if (!map) |
558 | continue; | ||
541 | if (r_list->user_token == VM_OFFSET(vma)) | 559 | if (r_list->user_token == VM_OFFSET(vma)) |
542 | break; | 560 | break; |
543 | } | 561 | } |
544 | 562 | ||
545 | if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) | 563 | if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) |
546 | return -EPERM; | 564 | return -EPERM; |
547 | 565 | ||
548 | /* Check for valid size. */ | 566 | /* Check for valid size. */ |
549 | if (map->size != vma->vm_end - vma->vm_start) return -EINVAL; | 567 | if (map->size != vma->vm_end - vma->vm_start) |
568 | return -EINVAL; | ||
550 | 569 | ||
551 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { | 570 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { |
552 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); | 571 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); |
553 | #if defined(__i386__) || defined(__x86_64__) | 572 | #if defined(__i386__) || defined(__x86_64__) |
554 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; | 573 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; |
555 | #else | 574 | #else |
556 | /* Ye gads this is ugly. With more thought | 575 | /* Ye gads this is ugly. With more thought |
557 | we could move this up higher and use | 576 | we could move this up higher and use |
558 | `protection_map' instead. */ | 577 | `protection_map' instead. */ |
559 | vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect( | 578 | vma->vm_page_prot = |
560 | __pte(pgprot_val(vma->vm_page_prot))))); | 579 | __pgprot(pte_val |
580 | (pte_wrprotect | ||
581 | (__pte(pgprot_val(vma->vm_page_prot))))); | ||
561 | #endif | 582 | #endif |
562 | } | 583 | } |
563 | 584 | ||
564 | switch (map->type) { | 585 | switch (map->type) { |
565 | case _DRM_AGP: | 586 | case _DRM_AGP: |
566 | if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { | 587 | if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { |
567 | /* | 588 | /* |
568 | * On some platforms we can't talk to bus dma address from the CPU, so for | 589 | * On some platforms we can't talk to bus dma address from the CPU, so for |
569 | * memory of type DRM_AGP, we'll deal with sorting out the real physical | 590 | * memory of type DRM_AGP, we'll deal with sorting out the real physical |
570 | * pages and mappings in nopage() | 591 | * pages and mappings in nopage() |
571 | */ | 592 | */ |
572 | #if defined(__powerpc__) | 593 | #if defined(__powerpc__) |
573 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | 594 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; |
574 | #endif | 595 | #endif |
575 | vma->vm_ops = &drm_vm_ops; | 596 | vma->vm_ops = &drm_vm_ops; |
576 | break; | 597 | break; |
577 | } | 598 | } |
578 | /* fall through to _DRM_FRAME_BUFFER... */ | 599 | /* fall through to _DRM_FRAME_BUFFER... */ |
579 | case _DRM_FRAME_BUFFER: | 600 | case _DRM_FRAME_BUFFER: |
580 | case _DRM_REGISTERS: | 601 | case _DRM_REGISTERS: |
581 | #if defined(__i386__) || defined(__x86_64__) | 602 | #if defined(__i386__) || defined(__x86_64__) |
@@ -590,27 +611,25 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
590 | #endif | 611 | #endif |
591 | vma->vm_flags |= VM_IO; /* not in core dump */ | 612 | vma->vm_flags |= VM_IO; /* not in core dump */ |
592 | #if defined(__ia64__) | 613 | #if defined(__ia64__) |
593 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - | 614 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) |
594 | vma->vm_start)) | ||
595 | vma->vm_page_prot = | 615 | vma->vm_page_prot = |
596 | pgprot_writecombine(vma->vm_page_prot); | 616 | pgprot_writecombine(vma->vm_page_prot); |
597 | else | 617 | else |
598 | vma->vm_page_prot = | 618 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
599 | pgprot_noncached(vma->vm_page_prot); | ||
600 | #endif | 619 | #endif |
601 | offset = dev->driver->get_reg_ofs(dev); | 620 | offset = dev->driver->get_reg_ofs(dev); |
602 | #ifdef __sparc__ | 621 | #ifdef __sparc__ |
603 | if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, | 622 | if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, |
604 | (map->offset + offset) >> PAGE_SHIFT, | 623 | (map->offset + offset) >> PAGE_SHIFT, |
605 | vma->vm_end - vma->vm_start, | 624 | vma->vm_end - vma->vm_start, |
606 | vma->vm_page_prot)) | 625 | vma->vm_page_prot)) |
607 | #else | 626 | #else |
608 | if (io_remap_pfn_range(vma, vma->vm_start, | 627 | if (io_remap_pfn_range(vma, vma->vm_start, |
609 | (map->offset + offset) >> PAGE_SHIFT, | 628 | (map->offset + offset) >> PAGE_SHIFT, |
610 | vma->vm_end - vma->vm_start, | 629 | vma->vm_end - vma->vm_start, |
611 | vma->vm_page_prot)) | 630 | vma->vm_page_prot)) |
612 | #endif | 631 | #endif |
613 | return -EAGAIN; | 632 | return -EAGAIN; |
614 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," | 633 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," |
615 | " offset = 0x%lx\n", | 634 | " offset = 0x%lx\n", |
616 | map->type, | 635 | map->type, |
@@ -623,22 +642,23 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
623 | * allocate in a different way */ | 642 | * allocate in a different way */ |
624 | vma->vm_ops = &drm_vm_shm_ops; | 643 | vma->vm_ops = &drm_vm_shm_ops; |
625 | vma->vm_private_data = (void *)map; | 644 | vma->vm_private_data = (void *)map; |
626 | /* Don't let this area swap. Change when | 645 | /* Don't let this area swap. Change when |
627 | DRM_KERNEL advisory is supported. */ | 646 | DRM_KERNEL advisory is supported. */ |
628 | vma->vm_flags |= VM_RESERVED; | 647 | vma->vm_flags |= VM_RESERVED; |
629 | break; | 648 | break; |
630 | case _DRM_SCATTER_GATHER: | 649 | case _DRM_SCATTER_GATHER: |
631 | vma->vm_ops = &drm_vm_sg_ops; | 650 | vma->vm_ops = &drm_vm_sg_ops; |
632 | vma->vm_private_data = (void *)map; | 651 | vma->vm_private_data = (void *)map; |
633 | vma->vm_flags |= VM_RESERVED; | 652 | vma->vm_flags |= VM_RESERVED; |
634 | break; | 653 | break; |
635 | default: | 654 | default: |
636 | return -EINVAL; /* This should never happen. */ | 655 | return -EINVAL; /* This should never happen. */ |
637 | } | 656 | } |
638 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 657 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
639 | 658 | ||
640 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 659 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
641 | drm_vm_open(vma); | 660 | drm_vm_open(vma); |
642 | return 0; | 661 | return 0; |
643 | } | 662 | } |
663 | |||
644 | EXPORT_SYMBOL(drm_mmap); | 664 | EXPORT_SYMBOL(drm_mmap); |