diff options
Diffstat (limited to 'drivers/gpu/drm/drm_bufs.c')
| -rw-r--r-- | drivers/gpu/drm/drm_bufs.c | 140 |
1 files changed, 56 insertions, 84 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 80a257554b30..6246e3f3dad7 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
| @@ -151,7 +151,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 151 | unsigned long user_token; | 151 | unsigned long user_token; |
| 152 | int ret; | 152 | int ret; |
| 153 | 153 | ||
| 154 | map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); | 154 | map = kmalloc(sizeof(*map), GFP_KERNEL); |
| 155 | if (!map) | 155 | if (!map) |
| 156 | return -ENOMEM; | 156 | return -ENOMEM; |
| 157 | 157 | ||
| @@ -165,7 +165,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 165 | * when processes fork. | 165 | * when processes fork. |
| 166 | */ | 166 | */ |
| 167 | if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { | 167 | if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { |
| 168 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 168 | kfree(map); |
| 169 | return -EINVAL; | 169 | return -EINVAL; |
| 170 | } | 170 | } |
| 171 | DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", | 171 | DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", |
| @@ -179,7 +179,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 179 | map->size = PAGE_ALIGN(map->size); | 179 | map->size = PAGE_ALIGN(map->size); |
| 180 | 180 | ||
| 181 | if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { | 181 | if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { |
| 182 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 182 | kfree(map); |
| 183 | return -EINVAL; | 183 | return -EINVAL; |
| 184 | } | 184 | } |
| 185 | map->mtrr = -1; | 185 | map->mtrr = -1; |
| @@ -191,7 +191,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 191 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) | 191 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) |
| 192 | if (map->offset + (map->size-1) < map->offset || | 192 | if (map->offset + (map->size-1) < map->offset || |
| 193 | map->offset < virt_to_phys(high_memory)) { | 193 | map->offset < virt_to_phys(high_memory)) { |
| 194 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 194 | kfree(map); |
| 195 | return -EINVAL; | 195 | return -EINVAL; |
| 196 | } | 196 | } |
| 197 | #endif | 197 | #endif |
| @@ -212,7 +212,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 212 | list->map->size = map->size; | 212 | list->map->size = map->size; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 215 | kfree(map); |
| 216 | *maplist = list; | 216 | *maplist = list; |
| 217 | return 0; | 217 | return 0; |
| 218 | } | 218 | } |
| @@ -227,7 +227,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 227 | if (map->type == _DRM_REGISTERS) { | 227 | if (map->type == _DRM_REGISTERS) { |
| 228 | map->handle = ioremap(map->offset, map->size); | 228 | map->handle = ioremap(map->offset, map->size); |
| 229 | if (!map->handle) { | 229 | if (!map->handle) { |
| 230 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 230 | kfree(map); |
| 231 | return -ENOMEM; | 231 | return -ENOMEM; |
| 232 | } | 232 | } |
| 233 | } | 233 | } |
| @@ -243,7 +243,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 243 | list->map->size = map->size; | 243 | list->map->size = map->size; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 246 | kfree(map); |
| 247 | *maplist = list; | 247 | *maplist = list; |
| 248 | return 0; | 248 | return 0; |
| 249 | } | 249 | } |
| @@ -251,7 +251,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 251 | DRM_DEBUG("%lu %d %p\n", | 251 | DRM_DEBUG("%lu %d %p\n", |
| 252 | map->size, drm_order(map->size), map->handle); | 252 | map->size, drm_order(map->size), map->handle); |
| 253 | if (!map->handle) { | 253 | if (!map->handle) { |
| 254 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 254 | kfree(map); |
| 255 | return -ENOMEM; | 255 | return -ENOMEM; |
| 256 | } | 256 | } |
| 257 | map->offset = (unsigned long)map->handle; | 257 | map->offset = (unsigned long)map->handle; |
| @@ -259,7 +259,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 259 | /* Prevent a 2nd X Server from creating a 2nd lock */ | 259 | /* Prevent a 2nd X Server from creating a 2nd lock */ |
| 260 | if (dev->primary->master->lock.hw_lock != NULL) { | 260 | if (dev->primary->master->lock.hw_lock != NULL) { |
| 261 | vfree(map->handle); | 261 | vfree(map->handle); |
| 262 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 262 | kfree(map); |
| 263 | return -EBUSY; | 263 | return -EBUSY; |
| 264 | } | 264 | } |
| 265 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ | 265 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ |
| @@ -270,7 +270,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 270 | int valid = 0; | 270 | int valid = 0; |
| 271 | 271 | ||
| 272 | if (!drm_core_has_AGP(dev)) { | 272 | if (!drm_core_has_AGP(dev)) { |
| 273 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 273 | kfree(map); |
| 274 | return -EINVAL; | 274 | return -EINVAL; |
| 275 | } | 275 | } |
| 276 | #ifdef __alpha__ | 276 | #ifdef __alpha__ |
| @@ -303,7 +303,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 303 | } | 303 | } |
| 304 | } | 304 | } |
| 305 | if (!list_empty(&dev->agp->memory) && !valid) { | 305 | if (!list_empty(&dev->agp->memory) && !valid) { |
| 306 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 306 | kfree(map); |
| 307 | return -EPERM; | 307 | return -EPERM; |
| 308 | } | 308 | } |
| 309 | DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", | 309 | DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", |
| @@ -316,7 +316,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 316 | } | 316 | } |
| 317 | case _DRM_SCATTER_GATHER: | 317 | case _DRM_SCATTER_GATHER: |
| 318 | if (!dev->sg) { | 318 | if (!dev->sg) { |
| 319 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 319 | kfree(map); |
| 320 | return -EINVAL; | 320 | return -EINVAL; |
| 321 | } | 321 | } |
| 322 | map->offset += (unsigned long)dev->sg->virtual; | 322 | map->offset += (unsigned long)dev->sg->virtual; |
| @@ -328,7 +328,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 328 | * need to point to a 64bit variable first. */ | 328 | * need to point to a 64bit variable first. */ |
| 329 | dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); | 329 | dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); |
| 330 | if (!dmah) { | 330 | if (!dmah) { |
| 331 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 331 | kfree(map); |
| 332 | return -ENOMEM; | 332 | return -ENOMEM; |
| 333 | } | 333 | } |
| 334 | map->handle = dmah->vaddr; | 334 | map->handle = dmah->vaddr; |
| @@ -336,15 +336,15 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 336 | kfree(dmah); | 336 | kfree(dmah); |
| 337 | break; | 337 | break; |
| 338 | default: | 338 | default: |
| 339 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 339 | kfree(map); |
| 340 | return -EINVAL; | 340 | return -EINVAL; |
| 341 | } | 341 | } |
| 342 | 342 | ||
| 343 | list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); | 343 | list = kmalloc(sizeof(*list), GFP_KERNEL); |
| 344 | if (!list) { | 344 | if (!list) { |
| 345 | if (map->type == _DRM_REGISTERS) | 345 | if (map->type == _DRM_REGISTERS) |
| 346 | iounmap(map->handle); | 346 | iounmap(map->handle); |
| 347 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 347 | kfree(map); |
| 348 | return -EINVAL; | 348 | return -EINVAL; |
| 349 | } | 349 | } |
| 350 | memset(list, 0, sizeof(*list)); | 350 | memset(list, 0, sizeof(*list)); |
| @@ -362,8 +362,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 362 | if (ret) { | 362 | if (ret) { |
| 363 | if (map->type == _DRM_REGISTERS) | 363 | if (map->type == _DRM_REGISTERS) |
| 364 | iounmap(map->handle); | 364 | iounmap(map->handle); |
| 365 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 365 | kfree(map); |
| 366 | drm_free(list, sizeof(*list), DRM_MEM_MAPS); | 366 | kfree(list); |
| 367 | mutex_unlock(&dev->struct_mutex); | 367 | mutex_unlock(&dev->struct_mutex); |
| 368 | return ret; | 368 | return ret; |
| 369 | } | 369 | } |
| @@ -448,7 +448,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) | |||
| 448 | list_del(&r_list->head); | 448 | list_del(&r_list->head); |
| 449 | drm_ht_remove_key(&dev->map_hash, | 449 | drm_ht_remove_key(&dev->map_hash, |
| 450 | r_list->user_token >> PAGE_SHIFT); | 450 | r_list->user_token >> PAGE_SHIFT); |
| 451 | drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); | 451 | kfree(r_list); |
| 452 | found = 1; | 452 | found = 1; |
| 453 | break; | 453 | break; |
| 454 | } | 454 | } |
| @@ -491,7 +491,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) | |||
| 491 | DRM_ERROR("tried to rmmap GEM object\n"); | 491 | DRM_ERROR("tried to rmmap GEM object\n"); |
| 492 | break; | 492 | break; |
| 493 | } | 493 | } |
| 494 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 494 | kfree(map); |
| 495 | 495 | ||
| 496 | return 0; | 496 | return 0; |
| 497 | } | 497 | } |
| @@ -582,24 +582,16 @@ static void drm_cleanup_buf_error(struct drm_device * dev, | |||
| 582 | drm_pci_free(dev, entry->seglist[i]); | 582 | drm_pci_free(dev, entry->seglist[i]); |
| 583 | } | 583 | } |
| 584 | } | 584 | } |
| 585 | drm_free(entry->seglist, | 585 | kfree(entry->seglist); |
| 586 | entry->seg_count * | ||
| 587 | sizeof(*entry->seglist), DRM_MEM_SEGS); | ||
| 588 | 586 | ||
| 589 | entry->seg_count = 0; | 587 | entry->seg_count = 0; |
| 590 | } | 588 | } |
| 591 | 589 | ||
| 592 | if (entry->buf_count) { | 590 | if (entry->buf_count) { |
| 593 | for (i = 0; i < entry->buf_count; i++) { | 591 | for (i = 0; i < entry->buf_count; i++) { |
| 594 | if (entry->buflist[i].dev_private) { | 592 | kfree(entry->buflist[i].dev_private); |
| 595 | drm_free(entry->buflist[i].dev_private, | ||
| 596 | entry->buflist[i].dev_priv_size, | ||
| 597 | DRM_MEM_BUFS); | ||
| 598 | } | ||
| 599 | } | 593 | } |
| 600 | drm_free(entry->buflist, | 594 | kfree(entry->buflist); |
| 601 | entry->buf_count * | ||
| 602 | sizeof(*entry->buflist), DRM_MEM_BUFS); | ||
| 603 | 595 | ||
| 604 | entry->buf_count = 0; | 596 | entry->buf_count = 0; |
| 605 | } | 597 | } |
| @@ -698,8 +690,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 698 | return -EINVAL; | 690 | return -EINVAL; |
| 699 | } | 691 | } |
| 700 | 692 | ||
| 701 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 693 | entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); |
| 702 | DRM_MEM_BUFS); | ||
| 703 | if (!entry->buflist) { | 694 | if (!entry->buflist) { |
| 704 | mutex_unlock(&dev->struct_mutex); | 695 | mutex_unlock(&dev->struct_mutex); |
| 705 | atomic_dec(&dev->buf_alloc); | 696 | atomic_dec(&dev->buf_alloc); |
| @@ -729,7 +720,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 729 | buf->file_priv = NULL; | 720 | buf->file_priv = NULL; |
| 730 | 721 | ||
| 731 | buf->dev_priv_size = dev->driver->dev_priv_size; | 722 | buf->dev_priv_size = dev->driver->dev_priv_size; |
| 732 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); | 723 | buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); |
| 733 | if (!buf->dev_private) { | 724 | if (!buf->dev_private) { |
| 734 | /* Set count correctly so we free the proper amount. */ | 725 | /* Set count correctly so we free the proper amount. */ |
| 735 | entry->buf_count = count; | 726 | entry->buf_count = count; |
| @@ -749,10 +740,9 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 749 | 740 | ||
| 750 | DRM_DEBUG("byte_count: %d\n", byte_count); | 741 | DRM_DEBUG("byte_count: %d\n", byte_count); |
| 751 | 742 | ||
| 752 | temp_buflist = drm_realloc(dma->buflist, | 743 | temp_buflist = krealloc(dma->buflist, |
| 753 | dma->buf_count * sizeof(*dma->buflist), | 744 | (dma->buf_count + entry->buf_count) * |
| 754 | (dma->buf_count + entry->buf_count) | 745 | sizeof(*dma->buflist), GFP_KERNEL); |
| 755 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | ||
| 756 | if (!temp_buflist) { | 746 | if (!temp_buflist) { |
| 757 | /* Free the entry because it isn't valid */ | 747 | /* Free the entry because it isn't valid */ |
| 758 | drm_cleanup_buf_error(dev, entry); | 748 | drm_cleanup_buf_error(dev, entry); |
| @@ -854,8 +844,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 854 | return -EINVAL; | 844 | return -EINVAL; |
| 855 | } | 845 | } |
| 856 | 846 | ||
| 857 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 847 | entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); |
| 858 | DRM_MEM_BUFS); | ||
| 859 | if (!entry->buflist) { | 848 | if (!entry->buflist) { |
| 860 | mutex_unlock(&dev->struct_mutex); | 849 | mutex_unlock(&dev->struct_mutex); |
| 861 | atomic_dec(&dev->buf_alloc); | 850 | atomic_dec(&dev->buf_alloc); |
| @@ -863,11 +852,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 863 | } | 852 | } |
| 864 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); | 853 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); |
| 865 | 854 | ||
| 866 | entry->seglist = drm_alloc(count * sizeof(*entry->seglist), | 855 | entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL); |
| 867 | DRM_MEM_SEGS); | ||
| 868 | if (!entry->seglist) { | 856 | if (!entry->seglist) { |
| 869 | drm_free(entry->buflist, | 857 | kfree(entry->buflist); |
| 870 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | ||
| 871 | mutex_unlock(&dev->struct_mutex); | 858 | mutex_unlock(&dev->struct_mutex); |
| 872 | atomic_dec(&dev->buf_alloc); | 859 | atomic_dec(&dev->buf_alloc); |
| 873 | return -ENOMEM; | 860 | return -ENOMEM; |
| @@ -877,13 +864,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 877 | /* Keep the original pagelist until we know all the allocations | 864 | /* Keep the original pagelist until we know all the allocations |
| 878 | * have succeeded | 865 | * have succeeded |
| 879 | */ | 866 | */ |
| 880 | temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) | 867 | temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * |
| 881 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | 868 | sizeof(*dma->pagelist), GFP_KERNEL); |
| 882 | if (!temp_pagelist) { | 869 | if (!temp_pagelist) { |
| 883 | drm_free(entry->buflist, | 870 | kfree(entry->buflist); |
| 884 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | 871 | kfree(entry->seglist); |
| 885 | drm_free(entry->seglist, | ||
| 886 | count * sizeof(*entry->seglist), DRM_MEM_SEGS); | ||
| 887 | mutex_unlock(&dev->struct_mutex); | 872 | mutex_unlock(&dev->struct_mutex); |
| 888 | atomic_dec(&dev->buf_alloc); | 873 | atomic_dec(&dev->buf_alloc); |
| 889 | return -ENOMEM; | 874 | return -ENOMEM; |
| @@ -907,9 +892,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 907 | entry->buf_count = count; | 892 | entry->buf_count = count; |
| 908 | entry->seg_count = count; | 893 | entry->seg_count = count; |
| 909 | drm_cleanup_buf_error(dev, entry); | 894 | drm_cleanup_buf_error(dev, entry); |
| 910 | drm_free(temp_pagelist, | 895 | kfree(temp_pagelist); |
| 911 | (dma->page_count + (count << page_order)) | ||
| 912 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | ||
| 913 | mutex_unlock(&dev->struct_mutex); | 896 | mutex_unlock(&dev->struct_mutex); |
| 914 | atomic_dec(&dev->buf_alloc); | 897 | atomic_dec(&dev->buf_alloc); |
| 915 | return -ENOMEM; | 898 | return -ENOMEM; |
| @@ -940,18 +923,14 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 940 | buf->file_priv = NULL; | 923 | buf->file_priv = NULL; |
| 941 | 924 | ||
| 942 | buf->dev_priv_size = dev->driver->dev_priv_size; | 925 | buf->dev_priv_size = dev->driver->dev_priv_size; |
| 943 | buf->dev_private = drm_alloc(buf->dev_priv_size, | 926 | buf->dev_private = kmalloc(buf->dev_priv_size, |
| 944 | DRM_MEM_BUFS); | 927 | GFP_KERNEL); |
| 945 | if (!buf->dev_private) { | 928 | if (!buf->dev_private) { |
| 946 | /* Set count correctly so we free the proper amount. */ | 929 | /* Set count correctly so we free the proper amount. */ |
| 947 | entry->buf_count = count; | 930 | entry->buf_count = count; |
| 948 | entry->seg_count = count; | 931 | entry->seg_count = count; |
| 949 | drm_cleanup_buf_error(dev, entry); | 932 | drm_cleanup_buf_error(dev, entry); |
| 950 | drm_free(temp_pagelist, | 933 | kfree(temp_pagelist); |
| 951 | (dma->page_count + | ||
| 952 | (count << page_order)) | ||
| 953 | * sizeof(*dma->pagelist), | ||
| 954 | DRM_MEM_PAGES); | ||
| 955 | mutex_unlock(&dev->struct_mutex); | 934 | mutex_unlock(&dev->struct_mutex); |
| 956 | atomic_dec(&dev->buf_alloc); | 935 | atomic_dec(&dev->buf_alloc); |
| 957 | return -ENOMEM; | 936 | return -ENOMEM; |
| @@ -964,16 +943,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 964 | byte_count += PAGE_SIZE << page_order; | 943 | byte_count += PAGE_SIZE << page_order; |
| 965 | } | 944 | } |
| 966 | 945 | ||
| 967 | temp_buflist = drm_realloc(dma->buflist, | 946 | temp_buflist = krealloc(dma->buflist, |
| 968 | dma->buf_count * sizeof(*dma->buflist), | 947 | (dma->buf_count + entry->buf_count) * |
| 969 | (dma->buf_count + entry->buf_count) | 948 | sizeof(*dma->buflist), GFP_KERNEL); |
| 970 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | ||
| 971 | if (!temp_buflist) { | 949 | if (!temp_buflist) { |
| 972 | /* Free the entry because it isn't valid */ | 950 | /* Free the entry because it isn't valid */ |
| 973 | drm_cleanup_buf_error(dev, entry); | 951 | drm_cleanup_buf_error(dev, entry); |
| 974 | drm_free(temp_pagelist, | 952 | kfree(temp_pagelist); |
| 975 | (dma->page_count + (count << page_order)) | ||
| 976 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | ||
| 977 | mutex_unlock(&dev->struct_mutex); | 953 | mutex_unlock(&dev->struct_mutex); |
| 978 | atomic_dec(&dev->buf_alloc); | 954 | atomic_dec(&dev->buf_alloc); |
| 979 | return -ENOMEM; | 955 | return -ENOMEM; |
| @@ -988,9 +964,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 988 | * with the new one. | 964 | * with the new one. |
| 989 | */ | 965 | */ |
| 990 | if (dma->page_count) { | 966 | if (dma->page_count) { |
| 991 | drm_free(dma->pagelist, | 967 | kfree(dma->pagelist); |
| 992 | dma->page_count * sizeof(*dma->pagelist), | ||
| 993 | DRM_MEM_PAGES); | ||
| 994 | } | 968 | } |
| 995 | dma->pagelist = temp_pagelist; | 969 | dma->pagelist = temp_pagelist; |
| 996 | 970 | ||
| @@ -1086,8 +1060,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request | |||
| 1086 | return -EINVAL; | 1060 | return -EINVAL; |
| 1087 | } | 1061 | } |
| 1088 | 1062 | ||
| 1089 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 1063 | entry->buflist = kmalloc(count * sizeof(*entry->buflist), |
| 1090 | DRM_MEM_BUFS); | 1064 | GFP_KERNEL); |
| 1091 | if (!entry->buflist) { | 1065 | if (!entry->buflist) { |
| 1092 | mutex_unlock(&dev->struct_mutex); | 1066 | mutex_unlock(&dev->struct_mutex); |
| 1093 | atomic_dec(&dev->buf_alloc); | 1067 | atomic_dec(&dev->buf_alloc); |
| @@ -1118,7 +1092,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request | |||
| 1118 | buf->file_priv = NULL; | 1092 | buf->file_priv = NULL; |
| 1119 | 1093 | ||
| 1120 | buf->dev_priv_size = dev->driver->dev_priv_size; | 1094 | buf->dev_priv_size = dev->driver->dev_priv_size; |
| 1121 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); | 1095 | buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); |
| 1122 | if (!buf->dev_private) { | 1096 | if (!buf->dev_private) { |
| 1123 | /* Set count correctly so we free the proper amount. */ | 1097 | /* Set count correctly so we free the proper amount. */ |
| 1124 | entry->buf_count = count; | 1098 | entry->buf_count = count; |
| @@ -1139,10 +1113,9 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request | |||
| 1139 | 1113 | ||
| 1140 | DRM_DEBUG("byte_count: %d\n", byte_count); | 1114 | DRM_DEBUG("byte_count: %d\n", byte_count); |
| 1141 | 1115 | ||
| 1142 | temp_buflist = drm_realloc(dma->buflist, | 1116 | temp_buflist = krealloc(dma->buflist, |
| 1143 | dma->buf_count * sizeof(*dma->buflist), | 1117 | (dma->buf_count + entry->buf_count) * |
| 1144 | (dma->buf_count + entry->buf_count) | 1118 | sizeof(*dma->buflist), GFP_KERNEL); |
| 1145 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | ||
| 1146 | if (!temp_buflist) { | 1119 | if (!temp_buflist) { |
| 1147 | /* Free the entry because it isn't valid */ | 1120 | /* Free the entry because it isn't valid */ |
| 1148 | drm_cleanup_buf_error(dev, entry); | 1121 | drm_cleanup_buf_error(dev, entry); |
| @@ -1248,8 +1221,8 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request | |||
| 1248 | return -EINVAL; | 1221 | return -EINVAL; |
| 1249 | } | 1222 | } |
| 1250 | 1223 | ||
| 1251 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 1224 | entry->buflist = kmalloc(count * sizeof(*entry->buflist), |
| 1252 | DRM_MEM_BUFS); | 1225 | GFP_KERNEL); |
| 1253 | if (!entry->buflist) { | 1226 | if (!entry->buflist) { |
| 1254 | mutex_unlock(&dev->struct_mutex); | 1227 | mutex_unlock(&dev->struct_mutex); |
| 1255 | atomic_dec(&dev->buf_alloc); | 1228 | atomic_dec(&dev->buf_alloc); |
| @@ -1279,7 +1252,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request | |||
| 1279 | buf->file_priv = NULL; | 1252 | buf->file_priv = NULL; |
| 1280 | 1253 | ||
| 1281 | buf->dev_priv_size = dev->driver->dev_priv_size; | 1254 | buf->dev_priv_size = dev->driver->dev_priv_size; |
| 1282 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); | 1255 | buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); |
| 1283 | if (!buf->dev_private) { | 1256 | if (!buf->dev_private) { |
| 1284 | /* Set count correctly so we free the proper amount. */ | 1257 | /* Set count correctly so we free the proper amount. */ |
| 1285 | entry->buf_count = count; | 1258 | entry->buf_count = count; |
| @@ -1299,10 +1272,9 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request | |||
| 1299 | 1272 | ||
| 1300 | DRM_DEBUG("byte_count: %d\n", byte_count); | 1273 | DRM_DEBUG("byte_count: %d\n", byte_count); |
| 1301 | 1274 | ||
| 1302 | temp_buflist = drm_realloc(dma->buflist, | 1275 | temp_buflist = krealloc(dma->buflist, |
| 1303 | dma->buf_count * sizeof(*dma->buflist), | 1276 | (dma->buf_count + entry->buf_count) * |
| 1304 | (dma->buf_count + entry->buf_count) | 1277 | sizeof(*dma->buflist), GFP_KERNEL); |
| 1305 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | ||
| 1306 | if (!temp_buflist) { | 1278 | if (!temp_buflist) { |
| 1307 | /* Free the entry because it isn't valid */ | 1279 | /* Free the entry because it isn't valid */ |
| 1308 | drm_cleanup_buf_error(dev, entry); | 1280 | drm_cleanup_buf_error(dev, entry); |
