diff options
| author | Laurent Pinchart <laurent.pinchart@ideasonboard.com> | 2015-12-14 15:39:33 -0500 |
|---|---|---|
| committer | Tomi Valkeinen <tomi.valkeinen@ti.com> | 2015-12-31 04:25:43 -0500 |
| commit | 7ef93b0aa85dd16632be29ae7b4bffd550c17d5a (patch) | |
| tree | 2ee432e8c4607de2bdf66afb328081016de50f08 | |
| parent | b902f8f4d69094690accf4c483e9f176be525000 (diff) | |
drm: omapdrm: gem: Group functions by purpose
Divide the GEM implementation in groups of functions to improve
readability.
No code change is performed by this commit.
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
| -rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 140 |
1 files changed, 87 insertions, 53 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index a953a967b7db..b6dffdbbc0c1 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
| @@ -29,14 +29,11 @@ | |||
| 29 | * GEM buffer object implementation. | 29 | * GEM buffer object implementation. |
| 30 | */ | 30 | */ |
| 31 | 31 | ||
| 32 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) | ||
| 33 | |||
| 34 | /* note: we use upper 8 bits of flags for driver-internal flags: */ | 32 | /* note: we use upper 8 bits of flags for driver-internal flags: */ |
| 35 | #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ | 33 | #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ |
| 36 | #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ | 34 | #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ |
| 37 | #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ | 35 | #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ |
| 38 | 36 | ||
| 39 | |||
| 40 | struct omap_gem_object { | 37 | struct omap_gem_object { |
| 41 | struct drm_gem_object base; | 38 | struct drm_gem_object base; |
| 42 | 39 | ||
| @@ -113,6 +110,7 @@ struct omap_gem_object { | |||
| 113 | } *sync; | 110 | } *sync; |
| 114 | }; | 111 | }; |
| 115 | 112 | ||
| 113 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) | ||
| 116 | 114 | ||
| 117 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are | 115 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are |
| 118 | * not necessarily pinned in TILER all the time, and (b) when they are | 116 | * not necessarily pinned in TILER all the time, and (b) when they are |
| @@ -166,6 +164,22 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) | |||
| 166 | return drm_vma_node_offset_addr(&obj->vma_node); | 164 | return drm_vma_node_offset_addr(&obj->vma_node); |
| 167 | } | 165 | } |
| 168 | 166 | ||
| 167 | /* GEM objects can either be allocated from contiguous memory (in which | ||
| 168 | * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non | ||
| 169 | * contiguous buffers can be remapped in TILER/DMM if they need to be | ||
| 170 | * contiguous... but we don't do this all the time to reduce pressure | ||
| 171 | * on TILER/DMM space when we know at allocation time that the buffer | ||
| 172 | * will need to be scanned out. | ||
| 173 | */ | ||
| 174 | static inline bool is_shmem(struct drm_gem_object *obj) | ||
| 175 | { | ||
| 176 | return obj->filp != NULL; | ||
| 177 | } | ||
| 178 | |||
| 179 | /* ----------------------------------------------------------------------------- | ||
| 180 | * Eviction | ||
| 181 | */ | ||
| 182 | |||
| 169 | static void evict_entry(struct drm_gem_object *obj, | 183 | static void evict_entry(struct drm_gem_object *obj, |
| 170 | enum tiler_fmt fmt, struct usergart_entry *entry) | 184 | enum tiler_fmt fmt, struct usergart_entry *entry) |
| 171 | { | 185 | { |
| @@ -212,30 +226,9 @@ static void evict(struct drm_gem_object *obj) | |||
| 212 | } | 226 | } |
| 213 | } | 227 | } |
| 214 | 228 | ||
| 215 | /* GEM objects can either be allocated from contiguous memory (in which | 229 | /* ----------------------------------------------------------------------------- |
| 216 | * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non | 230 | * Page Management |
| 217 | * contiguous buffers can be remapped in TILER/DMM if they need to be | ||
| 218 | * contiguous... but we don't do this all the time to reduce pressure | ||
| 219 | * on TILER/DMM space when we know at allocation time that the buffer | ||
| 220 | * will need to be scanned out. | ||
| 221 | */ | ||
| 222 | static inline bool is_shmem(struct drm_gem_object *obj) | ||
| 223 | { | ||
| 224 | return obj->filp != NULL; | ||
| 225 | } | ||
| 226 | |||
| 227 | /** | ||
| 228 | * shmem buffers that are mapped cached can simulate coherency via using | ||
| 229 | * page faulting to keep track of dirty pages | ||
| 230 | */ | 231 | */ |
| 231 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | ||
| 232 | { | ||
| 233 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
| 234 | return is_shmem(obj) && | ||
| 235 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); | ||
| 236 | } | ||
| 237 | |||
| 238 | static DEFINE_SPINLOCK(sync_lock); | ||
| 239 | 232 | ||
| 240 | /** ensure backing pages are allocated */ | 233 | /** ensure backing pages are allocated */ |
| 241 | static int omap_gem_attach_pages(struct drm_gem_object *obj) | 234 | static int omap_gem_attach_pages(struct drm_gem_object *obj) |
| @@ -380,6 +373,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) | |||
| 380 | return -EINVAL; | 373 | return -EINVAL; |
| 381 | } | 374 | } |
| 382 | 375 | ||
| 376 | /* ----------------------------------------------------------------------------- | ||
| 377 | * Fault Handling | ||
| 378 | */ | ||
| 379 | |||
| 383 | /* Normal handling for the case of faulting in non-tiled buffers */ | 380 | /* Normal handling for the case of faulting in non-tiled buffers */ |
| 384 | static int fault_1d(struct drm_gem_object *obj, | 381 | static int fault_1d(struct drm_gem_object *obj, |
| 385 | struct vm_area_struct *vma, struct vm_fault *vmf) | 382 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| @@ -614,6 +611,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, | |||
| 614 | return 0; | 611 | return 0; |
| 615 | } | 612 | } |
| 616 | 613 | ||
| 614 | /* ----------------------------------------------------------------------------- | ||
| 615 | * Dumb Buffers | ||
| 616 | */ | ||
| 617 | 617 | ||
| 618 | /** | 618 | /** |
| 619 | * omap_gem_dumb_create - create a dumb buffer | 619 | * omap_gem_dumb_create - create a dumb buffer |
| @@ -710,6 +710,21 @@ fail: | |||
| 710 | } | 710 | } |
| 711 | #endif | 711 | #endif |
| 712 | 712 | ||
| 713 | /* ----------------------------------------------------------------------------- | ||
| 714 | * Memory Management & DMA Sync | ||
| 715 | */ | ||
| 716 | |||
| 717 | /** | ||
| 718 | * shmem buffers that are mapped cached can simulate coherency via using | ||
| 719 | * page faulting to keep track of dirty pages | ||
| 720 | */ | ||
| 721 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | ||
| 722 | { | ||
| 723 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
| 724 | return is_shmem(obj) && | ||
| 725 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); | ||
| 726 | } | ||
| 727 | |||
| 713 | /* Sync the buffer for CPU access.. note pages should already be | 728 | /* Sync the buffer for CPU access.. note pages should already be |
| 714 | * attached, ie. omap_gem_get_pages() | 729 | * attached, ie. omap_gem_get_pages() |
| 715 | */ | 730 | */ |
| @@ -943,6 +958,10 @@ void *omap_gem_vaddr(struct drm_gem_object *obj) | |||
| 943 | } | 958 | } |
| 944 | #endif | 959 | #endif |
| 945 | 960 | ||
| 961 | /* ----------------------------------------------------------------------------- | ||
| 962 | * Power Management | ||
| 963 | */ | ||
| 964 | |||
| 946 | #ifdef CONFIG_PM | 965 | #ifdef CONFIG_PM |
| 947 | /* re-pin objects in DMM in resume path: */ | 966 | /* re-pin objects in DMM in resume path: */ |
| 948 | int omap_gem_resume(struct device *dev) | 967 | int omap_gem_resume(struct device *dev) |
| @@ -971,6 +990,10 @@ int omap_gem_resume(struct device *dev) | |||
| 971 | } | 990 | } |
| 972 | #endif | 991 | #endif |
| 973 | 992 | ||
| 993 | /* ----------------------------------------------------------------------------- | ||
| 994 | * DebugFS | ||
| 995 | */ | ||
| 996 | |||
| 974 | #ifdef CONFIG_DEBUG_FS | 997 | #ifdef CONFIG_DEBUG_FS |
| 975 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | 998 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
| 976 | { | 999 | { |
| @@ -1017,9 +1040,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) | |||
| 1017 | } | 1040 | } |
| 1018 | #endif | 1041 | #endif |
| 1019 | 1042 | ||
| 1020 | /* Buffer Synchronization: | 1043 | /* ----------------------------------------------------------------------------- |
| 1044 | * Buffer Synchronization | ||
| 1021 | */ | 1045 | */ |
| 1022 | 1046 | ||
| 1047 | static DEFINE_SPINLOCK(sync_lock); | ||
| 1048 | |||
| 1023 | struct omap_gem_sync_waiter { | 1049 | struct omap_gem_sync_waiter { |
| 1024 | struct list_head list; | 1050 | struct list_head list; |
| 1025 | struct omap_gem_object *omap_obj; | 1051 | struct omap_gem_object *omap_obj; |
| @@ -1265,6 +1291,10 @@ unlock: | |||
| 1265 | return ret; | 1291 | return ret; |
| 1266 | } | 1292 | } |
| 1267 | 1293 | ||
| 1294 | /* ----------------------------------------------------------------------------- | ||
| 1295 | * Constructor & Destructor | ||
| 1296 | */ | ||
| 1297 | |||
| 1268 | /* don't call directly.. called from GEM core when it is time to actually | 1298 | /* don't call directly.. called from GEM core when it is time to actually |
| 1269 | * free the object.. | 1299 | * free the object.. |
| 1270 | */ | 1300 | */ |
| @@ -1311,30 +1341,6 @@ void omap_gem_free_object(struct drm_gem_object *obj) | |||
| 1311 | kfree(obj); | 1341 | kfree(obj); |
| 1312 | } | 1342 | } |
| 1313 | 1343 | ||
| 1314 | /* convenience method to construct a GEM buffer object, and userspace handle */ | ||
| 1315 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
| 1316 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) | ||
| 1317 | { | ||
| 1318 | struct drm_gem_object *obj; | ||
| 1319 | int ret; | ||
| 1320 | |||
| 1321 | obj = omap_gem_new(dev, gsize, flags); | ||
| 1322 | if (!obj) | ||
| 1323 | return -ENOMEM; | ||
| 1324 | |||
| 1325 | ret = drm_gem_handle_create(file, obj, handle); | ||
| 1326 | if (ret) { | ||
| 1327 | drm_gem_object_release(obj); | ||
| 1328 | kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */ | ||
| 1329 | return ret; | ||
| 1330 | } | ||
| 1331 | |||
| 1332 | /* drop reference from allocate - handle holds it now */ | ||
| 1333 | drm_gem_object_unreference_unlocked(obj); | ||
| 1334 | |||
| 1335 | return 0; | ||
| 1336 | } | ||
| 1337 | |||
| 1338 | /* GEM buffer object constructor */ | 1344 | /* GEM buffer object constructor */ |
| 1339 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, | 1345 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, |
| 1340 | union omap_gem_size gsize, uint32_t flags) | 1346 | union omap_gem_size gsize, uint32_t flags) |
| @@ -1426,7 +1432,35 @@ fail: | |||
| 1426 | return NULL; | 1432 | return NULL; |
| 1427 | } | 1433 | } |
| 1428 | 1434 | ||
| 1429 | /* init/cleanup.. if DMM is used, we need to set some stuff up.. */ | 1435 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
| 1436 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
| 1437 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) | ||
| 1438 | { | ||
| 1439 | struct drm_gem_object *obj; | ||
| 1440 | int ret; | ||
| 1441 | |||
| 1442 | obj = omap_gem_new(dev, gsize, flags); | ||
| 1443 | if (!obj) | ||
| 1444 | return -ENOMEM; | ||
| 1445 | |||
| 1446 | ret = drm_gem_handle_create(file, obj, handle); | ||
| 1447 | if (ret) { | ||
| 1448 | drm_gem_object_release(obj); | ||
| 1449 | kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */ | ||
| 1450 | return ret; | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | /* drop reference from allocate - handle holds it now */ | ||
| 1454 | drm_gem_object_unreference_unlocked(obj); | ||
| 1455 | |||
| 1456 | return 0; | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | /* ----------------------------------------------------------------------------- | ||
| 1460 | * Init & Cleanup | ||
| 1461 | */ | ||
| 1462 | |||
| 1463 | /* If DMM is used, we need to set some stuff up.. */ | ||
| 1430 | void omap_gem_init(struct drm_device *dev) | 1464 | void omap_gem_init(struct drm_device *dev) |
| 1431 | { | 1465 | { |
| 1432 | struct omap_drm_private *priv = dev->dev_private; | 1466 | struct omap_drm_private *priv = dev->dev_private; |
