aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-02-07 01:20:50 -0500
committerDave Airlie <airlied@redhat.com>2008-02-19 18:37:12 -0500
commitca0b07d9a969c6561e5d6f69c861fbedf8d09e5d (patch)
tree233d099610d7503e2faea7399724e5ae5e8372a4
parentb39d50e53b1bb27f6c29f88a697a4af78427dffd (diff)
drm: convert drm from nopage to fault.
Remove redundant vma range checks. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dave Airlie <airlied@linux.ie>
-rw-r--r--drivers/char/drm/drm_vm.c125
1 files changed, 55 insertions, 70 deletions
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index cea4105374b2..3d65c4dcd0c6 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -66,7 +66,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
66} 66}
67 67
68/** 68/**
69 * \c nopage method for AGP virtual memory. 69 * \c fault method for AGP virtual memory.
70 * 70 *
71 * \param vma virtual memory area. 71 * \param vma virtual memory area.
72 * \param address access address. 72 * \param address access address.
@@ -76,8 +76,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
76 * map, get the page, increment the use count and return it. 76 * map, get the page, increment the use count and return it.
77 */ 77 */
78#if __OS_HAS_AGP 78#if __OS_HAS_AGP
79static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 79static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
80 unsigned long address)
81{ 80{
82 struct drm_file *priv = vma->vm_file->private_data; 81 struct drm_file *priv = vma->vm_file->private_data;
83 struct drm_device *dev = priv->head->dev; 82 struct drm_device *dev = priv->head->dev;
@@ -89,19 +88,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
89 * Find the right map 88 * Find the right map
90 */ 89 */
91 if (!drm_core_has_AGP(dev)) 90 if (!drm_core_has_AGP(dev))
92 goto vm_nopage_error; 91 goto vm_fault_error;
93 92
94 if (!dev->agp || !dev->agp->cant_use_aperture) 93 if (!dev->agp || !dev->agp->cant_use_aperture)
95 goto vm_nopage_error; 94 goto vm_fault_error;
96 95
97 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) 96 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
98 goto vm_nopage_error; 97 goto vm_fault_error;
99 98
100 r_list = drm_hash_entry(hash, struct drm_map_list, hash); 99 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
101 map = r_list->map; 100 map = r_list->map;
102 101
103 if (map && map->type == _DRM_AGP) { 102 if (map && map->type == _DRM_AGP) {
104 unsigned long offset = address - vma->vm_start; 103 /*
104 * Using vm_pgoff as a selector forces us to use this unusual
105 * addressing scheme.
106 */
107 unsigned long offset = (unsigned long)vmf->virtual_address -
108 vma->vm_start;
105 unsigned long baddr = map->offset + offset; 109 unsigned long baddr = map->offset + offset;
106 struct drm_agp_mem *agpmem; 110 struct drm_agp_mem *agpmem;
107 struct page *page; 111 struct page *page;
@@ -123,7 +127,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
123 } 127 }
124 128
125 if (!agpmem) 129 if (!agpmem)
126 goto vm_nopage_error; 130 goto vm_fault_error;
127 131
128 /* 132 /*
129 * Get the page, inc the use count, and return it 133 * Get the page, inc the use count, and return it
@@ -131,22 +135,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
131 offset = (baddr - agpmem->bound) >> PAGE_SHIFT; 135 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
132 page = virt_to_page(__va(agpmem->memory->memory[offset])); 136 page = virt_to_page(__va(agpmem->memory->memory[offset]));
133 get_page(page); 137 get_page(page);
138 vmf->page = page;
134 139
135 DRM_DEBUG 140 DRM_DEBUG
136 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", 141 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
137 baddr, __va(agpmem->memory->memory[offset]), offset, 142 baddr, __va(agpmem->memory->memory[offset]), offset,
138 page_count(page)); 143 page_count(page));
139 144 return 0;
140 return page;
141 } 145 }
142 vm_nopage_error: 146vm_fault_error:
143 return NOPAGE_SIGBUS; /* Disallow mremap */ 147 return VM_FAULT_SIGBUS; /* Disallow mremap */
144} 148}
145#else /* __OS_HAS_AGP */ 149#else /* __OS_HAS_AGP */
146static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 150static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
147 unsigned long address)
148{ 151{
149 return NOPAGE_SIGBUS; 152 return VM_FAULT_SIGBUS;
150} 153}
151#endif /* __OS_HAS_AGP */ 154#endif /* __OS_HAS_AGP */
152 155
@@ -160,28 +163,26 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
160 * Get the mapping, find the real physical page to map, get the page, and 163 * Get the mapping, find the real physical page to map, get the page, and
161 * return it. 164 * return it.
162 */ 165 */
163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, 166static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
164 unsigned long address)
165{ 167{
166 struct drm_map *map = (struct drm_map *) vma->vm_private_data; 168 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
167 unsigned long offset; 169 unsigned long offset;
168 unsigned long i; 170 unsigned long i;
169 struct page *page; 171 struct page *page;
170 172
171 if (address > vma->vm_end)
172 return NOPAGE_SIGBUS; /* Disallow mremap */
173 if (!map) 173 if (!map)
174 return NOPAGE_SIGBUS; /* Nothing allocated */ 174 return VM_FAULT_SIGBUS; /* Nothing allocated */
175 175
176 offset = address - vma->vm_start; 176 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
177 i = (unsigned long)map->handle + offset; 177 i = (unsigned long)map->handle + offset;
178 page = vmalloc_to_page((void *)i); 178 page = vmalloc_to_page((void *)i);
179 if (!page) 179 if (!page)
180 return NOPAGE_SIGBUS; 180 return VM_FAULT_SIGBUS;
181 get_page(page); 181 get_page(page);
182 vmf->page = page;
182 183
183 DRM_DEBUG("0x%lx\n", address); 184 DRM_DEBUG("shm_fault 0x%lx\n", offset);
184 return page; 185 return 0;
185} 186}
186 187
187/** 188/**
@@ -263,7 +264,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
263} 264}
264 265
265/** 266/**
266 * \c nopage method for DMA virtual memory. 267 * \c fault method for DMA virtual memory.
267 * 268 *
268 * \param vma virtual memory area. 269 * \param vma virtual memory area.
269 * \param address access address. 270 * \param address access address.
@@ -271,8 +272,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
271 * 272 *
272 * Determine the page number from the page offset and get it from drm_device_dma::pagelist. 273 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
273 */ 274 */
274static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, 275static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
275 unsigned long address)
276{ 276{
277 struct drm_file *priv = vma->vm_file->private_data; 277 struct drm_file *priv = vma->vm_file->private_data;
278 struct drm_device *dev = priv->head->dev; 278 struct drm_device *dev = priv->head->dev;
@@ -282,24 +282,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
282 struct page *page; 282 struct page *page;
283 283
284 if (!dma) 284 if (!dma)
285 return NOPAGE_SIGBUS; /* Error */ 285 return VM_FAULT_SIGBUS; /* Error */
286 if (address > vma->vm_end)
287 return NOPAGE_SIGBUS; /* Disallow mremap */
288 if (!dma->pagelist) 286 if (!dma->pagelist)
289 return NOPAGE_SIGBUS; /* Nothing allocated */ 287 return VM_FAULT_SIGBUS; /* Nothing allocated */
290 288
291 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ 289 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
292 page_nr = offset >> PAGE_SHIFT; 290 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
293 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); 291 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
294 292
295 get_page(page); 293 get_page(page);
294 vmf->page = page;
296 295
297 DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); 296 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
298 return page; 297 return 0;
299} 298}
300 299
301/** 300/**
302 * \c nopage method for scatter-gather virtual memory. 301 * \c fault method for scatter-gather virtual memory.
303 * 302 *
304 * \param vma virtual memory area. 303 * \param vma virtual memory area.
305 * \param address access address. 304 * \param address access address.
@@ -307,8 +306,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
307 * 306 *
308 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. 307 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
309 */ 308 */
310static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, 309static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
311 unsigned long address)
312{ 310{
313 struct drm_map *map = (struct drm_map *) vma->vm_private_data; 311 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
314 struct drm_file *priv = vma->vm_file->private_data; 312 struct drm_file *priv = vma->vm_file->private_data;
@@ -320,77 +318,64 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
320 struct page *page; 318 struct page *page;
321 319
322 if (!entry) 320 if (!entry)
323 return NOPAGE_SIGBUS; /* Error */ 321 return VM_FAULT_SIGBUS; /* Error */
324 if (address > vma->vm_end)
325 return NOPAGE_SIGBUS; /* Disallow mremap */
326 if (!entry->pagelist) 322 if (!entry->pagelist)
327 return NOPAGE_SIGBUS; /* Nothing allocated */ 323 return VM_FAULT_SIGBUS; /* Nothing allocated */
328 324
329 offset = address - vma->vm_start; 325 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
330 map_offset = map->offset - (unsigned long)dev->sg->virtual; 326 map_offset = map->offset - (unsigned long)dev->sg->virtual;
331 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); 327 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
332 page = entry->pagelist[page_offset]; 328 page = entry->pagelist[page_offset];
333 get_page(page); 329 get_page(page);
330 vmf->page = page;
334 331
335 return page; 332 return 0;
336} 333}
337 334
338static struct page *drm_vm_nopage(struct vm_area_struct *vma, 335static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
339 unsigned long address, int *type)
340{ 336{
341 if (type) 337 return drm_do_vm_fault(vma, vmf);
342 *type = VM_FAULT_MINOR;
343 return drm_do_vm_nopage(vma, address);
344} 338}
345 339
346static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, 340static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
347 unsigned long address, int *type)
348{ 341{
349 if (type) 342 return drm_do_vm_shm_fault(vma, vmf);
350 *type = VM_FAULT_MINOR;
351 return drm_do_vm_shm_nopage(vma, address);
352} 343}
353 344
354static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, 345static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
355 unsigned long address, int *type)
356{ 346{
357 if (type) 347 return drm_do_vm_dma_fault(vma, vmf);
358 *type = VM_FAULT_MINOR;
359 return drm_do_vm_dma_nopage(vma, address);
360} 348}
361 349
362static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, 350static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
363 unsigned long address, int *type)
364{ 351{
365 if (type) 352 return drm_do_vm_sg_fault(vma, vmf);
366 *type = VM_FAULT_MINOR;
367 return drm_do_vm_sg_nopage(vma, address);
368} 353}
369 354
370/** AGP virtual memory operations */ 355/** AGP virtual memory operations */
371static struct vm_operations_struct drm_vm_ops = { 356static struct vm_operations_struct drm_vm_ops = {
372 .nopage = drm_vm_nopage, 357 .fault = drm_vm_fault,
373 .open = drm_vm_open, 358 .open = drm_vm_open,
374 .close = drm_vm_close, 359 .close = drm_vm_close,
375}; 360};
376 361
377/** Shared virtual memory operations */ 362/** Shared virtual memory operations */
378static struct vm_operations_struct drm_vm_shm_ops = { 363static struct vm_operations_struct drm_vm_shm_ops = {
379 .nopage = drm_vm_shm_nopage, 364 .fault = drm_vm_shm_fault,
380 .open = drm_vm_open, 365 .open = drm_vm_open,
381 .close = drm_vm_shm_close, 366 .close = drm_vm_shm_close,
382}; 367};
383 368
384/** DMA virtual memory operations */ 369/** DMA virtual memory operations */
385static struct vm_operations_struct drm_vm_dma_ops = { 370static struct vm_operations_struct drm_vm_dma_ops = {
386 .nopage = drm_vm_dma_nopage, 371 .fault = drm_vm_dma_fault,
387 .open = drm_vm_open, 372 .open = drm_vm_open,
388 .close = drm_vm_close, 373 .close = drm_vm_close,
389}; 374};
390 375
391/** Scatter-gather virtual memory operations */ 376/** Scatter-gather virtual memory operations */
392static struct vm_operations_struct drm_vm_sg_ops = { 377static struct vm_operations_struct drm_vm_sg_ops = {
393 .nopage = drm_vm_sg_nopage, 378 .fault = drm_vm_sg_fault,
394 .open = drm_vm_open, 379 .open = drm_vm_open,
395 .close = drm_vm_close, 380 .close = drm_vm_close,
396}; 381};
@@ -604,7 +589,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
604 /* 589 /*
605 * On some platforms we can't talk to bus dma address from the CPU, so for 590 * On some platforms we can't talk to bus dma address from the CPU, so for
606 * memory of type DRM_AGP, we'll deal with sorting out the real physical 591 * memory of type DRM_AGP, we'll deal with sorting out the real physical
607 * pages and mappings in nopage() 592 * pages and mappings in fault()
608 */ 593 */
609#if defined(__powerpc__) 594#if defined(__powerpc__)
610 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 595 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
@@ -634,7 +619,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
634 break; 619 break;
635 case _DRM_CONSISTENT: 620 case _DRM_CONSISTENT:
636 /* Consistent memory is really like shared memory. But 621 /* Consistent memory is really like shared memory. But
637 * it's allocated in a different way, so avoid nopage */ 622 * it's allocated in a different way, so avoid fault */
638 if (remap_pfn_range(vma, vma->vm_start, 623 if (remap_pfn_range(vma, vma->vm_start,
639 page_to_pfn(virt_to_page(map->handle)), 624 page_to_pfn(virt_to_page(map->handle)),
640 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 625 vma->vm_end - vma->vm_start, vma->vm_page_prot))