diff options
author | Tejun Heo <tj@kernel.org> | 2010-04-04 22:37:28 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-04-04 22:37:28 -0400 |
commit | 336f5899d287f06d8329e208fc14ce50f7ec9698 (patch) | |
tree | 9b762d450d5eb248a6ff8317badb7e223d93ed58 /drivers/gpu/drm/ttm | |
parent | a4ab2773205e8b94c18625455f85e3b6bb9d7ad6 (diff) | |
parent | db217dece3003df0841bacf9556b5c06aa097dae (diff) |
Merge branch 'master' into export-slabh
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_memory.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 23 |
3 files changed, 13 insertions, 32 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 89c38c49066f..dd47b2a9a791 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref) | |||
1425 | 1425 | ||
1426 | atomic_set(&glob->bo_count, 0); | 1426 | atomic_set(&glob->bo_count, 0); |
1427 | 1427 | ||
1428 | kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); | 1428 | ret = kobject_init_and_add( |
1429 | ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); | 1429 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); |
1430 | if (unlikely(ret != 0)) | 1430 | if (unlikely(ret != 0)) |
1431 | kobject_put(&glob->kobj); | 1431 | kobject_put(&glob->kobj); |
1432 | return ret; | 1432 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index e055a3af926d..801b702566e6 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -261,8 +261,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, | |||
261 | zone->used_mem = 0; | 261 | zone->used_mem = 0; |
262 | zone->glob = glob; | 262 | zone->glob = glob; |
263 | glob->zone_kernel = zone; | 263 | glob->zone_kernel = zone; |
264 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 264 | ret = kobject_init_and_add( |
265 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 265 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
266 | if (unlikely(ret != 0)) { | 266 | if (unlikely(ret != 0)) { |
267 | kobject_put(&zone->kobj); | 267 | kobject_put(&zone->kobj); |
268 | return ret; | 268 | return ret; |
@@ -297,8 +297,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, | |||
297 | zone->used_mem = 0; | 297 | zone->used_mem = 0; |
298 | zone->glob = glob; | 298 | zone->glob = glob; |
299 | glob->zone_highmem = zone; | 299 | glob->zone_highmem = zone; |
300 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 300 | ret = kobject_init_and_add( |
301 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 301 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
302 | if (unlikely(ret != 0)) { | 302 | if (unlikely(ret != 0)) { |
303 | kobject_put(&zone->kobj); | 303 | kobject_put(&zone->kobj); |
304 | return ret; | 304 | return ret; |
@@ -344,8 +344,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, | |||
344 | zone->used_mem = 0; | 344 | zone->used_mem = 0; |
345 | zone->glob = glob; | 345 | zone->glob = glob; |
346 | glob->zone_dma32 = zone; | 346 | glob->zone_dma32 = zone; |
347 | kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); | 347 | ret = kobject_init_and_add( |
348 | ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); | 348 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
349 | if (unlikely(ret != 0)) { | 349 | if (unlikely(ret != 0)) { |
350 | kobject_put(&zone->kobj); | 350 | kobject_put(&zone->kobj); |
351 | return ret; | 351 | return ret; |
@@ -366,10 +366,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) | |||
366 | glob->swap_queue = create_singlethread_workqueue("ttm_swap"); | 366 | glob->swap_queue = create_singlethread_workqueue("ttm_swap"); |
367 | INIT_WORK(&glob->work, ttm_shrink_work); | 367 | INIT_WORK(&glob->work, ttm_shrink_work); |
368 | init_waitqueue_head(&glob->queue); | 368 | init_waitqueue_head(&glob->queue); |
369 | kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); | 369 | ret = kobject_init_and_add( |
370 | ret = kobject_add(&glob->kobj, | 370 | &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); |
371 | ttm_get_kobj(), | ||
372 | "memory_accounting"); | ||
373 | if (unlikely(ret != 0)) { | 371 | if (unlikely(ret != 0)) { |
374 | kobject_put(&glob->kobj); | 372 | kobject_put(&glob->kobj); |
375 | return ret; | 373 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 0ef7f73ea56c..d5fd5b8faeb3 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -28,7 +28,6 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
33 | #include <linux/highmem.h> | 32 | #include <linux/highmem.h> |
34 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
@@ -36,6 +35,7 @@ | |||
36 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | #include "drm_cache.h" | 37 | #include "drm_cache.h" |
38 | #include "drm_mem_util.h" | ||
39 | #include "ttm/ttm_module.h" | 39 | #include "ttm/ttm_module.h" |
40 | #include "ttm/ttm_bo_driver.h" | 40 | #include "ttm/ttm_bo_driver.h" |
41 | #include "ttm/ttm_placement.h" | 41 | #include "ttm/ttm_placement.h" |
@@ -44,32 +44,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm); | |||
44 | 44 | ||
45 | /** | 45 | /** |
46 | * Allocates storage for pointers to the pages that back the ttm. | 46 | * Allocates storage for pointers to the pages that back the ttm. |
47 | * | ||
48 | * Uses kmalloc if possible. Otherwise falls back to vmalloc. | ||
49 | */ | 47 | */ |
50 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | 48 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) |
51 | { | 49 | { |
52 | unsigned long size = ttm->num_pages * sizeof(*ttm->pages); | 50 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); |
53 | ttm->pages = NULL; | ||
54 | |||
55 | if (size <= PAGE_SIZE) | ||
56 | ttm->pages = kzalloc(size, GFP_KERNEL); | ||
57 | |||
58 | if (!ttm->pages) { | ||
59 | ttm->pages = vmalloc_user(size); | ||
60 | if (ttm->pages) | ||
61 | ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; | ||
62 | } | ||
63 | } | 51 | } |
64 | 52 | ||
65 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | 53 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) |
66 | { | 54 | { |
67 | if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { | 55 | drm_free_large(ttm->pages); |
68 | vfree(ttm->pages); | ||
69 | ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; | ||
70 | } else { | ||
71 | kfree(ttm->pages); | ||
72 | } | ||
73 | ttm->pages = NULL; | 56 | ttm->pages = NULL; |
74 | } | 57 | } |
75 | 58 | ||