diff options
author | Dave Airlie <airlied@redhat.com> | 2015-12-31 16:41:52 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-12-31 16:41:52 -0500 |
commit | c11b8989635166c5a1e6aac1853a847bd664f8db (patch) | |
tree | 4c0ad2b74b69c42557c681241e4f8246b9ad612e | |
parent | 54255e818ef7a5e968c0230bc75649a68932d8ca (diff) | |
parent | 9c8e566e4930a00b51bee7766a5c188f20fdb16b (diff) |
Merge tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux into drm-next
omapdrm changes for v4.5
* enable DRIVER_ATOMIC
* improved TILER performance
* cleanups preparing for DMAbuf import
* fbdev emulation is now optional
* minor fixes
* tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux:
drm/omap: remove obsolete manager assignment
drm/omap: set DRIVER_ATOMIC for omapdrm
drm/omap: remove unused plugin defines
drm/omap: Use bitmaps for TILER placement
drm: omapdrm: gem: Remove check for impossible condition
drm: omapdrm: gem: Simplify error handling when creating GEM object
drm: omapdrm: gem: Don't free mmap offset twice
drm: omapdrm: gem: Fix GEM object destroy in error path
drm: omapdrm: gem: Free the correct memory object
drm: omapdrm: gem: Mask out private flags passed from userspace
drm: omapdrm: gem: Move global usergart variable to omap_drm_private
drm: omapdrm: gem: Group functions by purpose
drm: omapdrm: gem: Remove forward declarations
drm: omapdrm: gem: Remove unused function prototypes
drm: omapdrm: Make fbdev emulation optional
drm: omapdrm: Fix plane state free in plane reset handler
drm: omapdrm: move omap_plane_reset()
drm/omap: Use platform_register/unregister_drivers()
drm: omapdrm: tiler: Remove unneded module alias for tiler
-rw-r--r-- | drivers/gpu/drm/omapdrm/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_debugfs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_drv.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_drv.h | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_encoder.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_fbdev.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 304 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_plane.c | 53 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/tcm-sita.c | 801 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/tcm.h | 26 | ||||
-rw-r--r-- | include/uapi/drm/omap_drm.h | 6 |
12 files changed, 440 insertions, 839 deletions
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile index 778372b062ad..368c1ec6805a 100644 --- a/drivers/gpu/drm/omapdrm/Makefile +++ b/drivers/gpu/drm/omapdrm/Makefile | |||
@@ -12,10 +12,11 @@ omapdrm-y := omap_drv.o \ | |||
12 | omap_encoder.o \ | 12 | omap_encoder.o \ |
13 | omap_connector.o \ | 13 | omap_connector.o \ |
14 | omap_fb.o \ | 14 | omap_fb.o \ |
15 | omap_fbdev.o \ | ||
16 | omap_gem.o \ | 15 | omap_gem.o \ |
17 | omap_gem_dmabuf.o \ | 16 | omap_gem_dmabuf.o \ |
18 | omap_dmm_tiler.o \ | 17 | omap_dmm_tiler.o \ |
19 | tcm-sita.o | 18 | tcm-sita.o |
20 | 19 | ||
20 | omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o | ||
21 | |||
21 | obj-$(CONFIG_DRM_OMAP) += omapdrm.o | 22 | obj-$(CONFIG_DRM_OMAP) += omapdrm.o |
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index ee91a25127f9..6f5fc14fc015 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c | |||
@@ -51,6 +51,7 @@ static int mm_show(struct seq_file *m, void *arg) | |||
51 | return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); | 51 | return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); |
52 | } | 52 | } |
53 | 53 | ||
54 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
54 | static int fb_show(struct seq_file *m, void *arg) | 55 | static int fb_show(struct seq_file *m, void *arg) |
55 | { | 56 | { |
56 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 57 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -73,12 +74,15 @@ static int fb_show(struct seq_file *m, void *arg) | |||
73 | 74 | ||
74 | return 0; | 75 | return 0; |
75 | } | 76 | } |
77 | #endif | ||
76 | 78 | ||
77 | /* list of debufs files that are applicable to all devices */ | 79 | /* list of debufs files that are applicable to all devices */ |
78 | static struct drm_info_list omap_debugfs_list[] = { | 80 | static struct drm_info_list omap_debugfs_list[] = { |
79 | {"gem", gem_show, 0}, | 81 | {"gem", gem_show, 0}, |
80 | {"mm", mm_show, 0}, | 82 | {"mm", mm_show, 0}, |
83 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
81 | {"fb", fb_show, 0}, | 84 | {"fb", fb_show, 0}, |
85 | #endif | ||
82 | }; | 86 | }; |
83 | 87 | ||
84 | /* list of debugfs files that are specific to devices with dmm/tiler */ | 88 | /* list of debugfs files that are specific to devices with dmm/tiler */ |
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 7841970de48d..dfebdc4aa0f2 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
@@ -363,6 +363,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, | |||
363 | u32 min_align = 128; | 363 | u32 min_align = 128; |
364 | int ret; | 364 | int ret; |
365 | unsigned long flags; | 365 | unsigned long flags; |
366 | size_t slot_bytes; | ||
366 | 367 | ||
367 | BUG_ON(!validfmt(fmt)); | 368 | BUG_ON(!validfmt(fmt)); |
368 | 369 | ||
@@ -371,13 +372,15 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, | |||
371 | h = DIV_ROUND_UP(h, geom[fmt].slot_h); | 372 | h = DIV_ROUND_UP(h, geom[fmt].slot_h); |
372 | 373 | ||
373 | /* convert alignment to slots */ | 374 | /* convert alignment to slots */ |
374 | min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp)); | 375 | slot_bytes = geom[fmt].slot_w * geom[fmt].cpp; |
375 | align = ALIGN(align, min_align); | 376 | min_align = max(min_align, slot_bytes); |
376 | align /= geom[fmt].slot_w * geom[fmt].cpp; | 377 | align = (align > min_align) ? ALIGN(align, min_align) : min_align; |
378 | align /= slot_bytes; | ||
377 | 379 | ||
378 | block->fmt = fmt; | 380 | block->fmt = fmt; |
379 | 381 | ||
380 | ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area); | 382 | ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes, |
383 | &block->area); | ||
381 | if (ret) { | 384 | if (ret) { |
382 | kfree(block); | 385 | kfree(block); |
383 | return ERR_PTR(-ENOMEM); | 386 | return ERR_PTR(-ENOMEM); |
@@ -739,8 +742,7 @@ static int omap_dmm_probe(struct platform_device *dev) | |||
739 | programming during reill operations */ | 742 | programming during reill operations */ |
740 | for (i = 0; i < omap_dmm->num_lut; i++) { | 743 | for (i = 0; i < omap_dmm->num_lut; i++) { |
741 | omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, | 744 | omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, |
742 | omap_dmm->container_height, | 745 | omap_dmm->container_height); |
743 | NULL); | ||
744 | 746 | ||
745 | if (!omap_dmm->tcm[i]) { | 747 | if (!omap_dmm->tcm[i]) { |
746 | dev_err(&dev->dev, "failed to allocate container\n"); | 748 | dev_err(&dev->dev, "failed to allocate container\n"); |
@@ -1030,4 +1032,3 @@ struct platform_driver omap_dmm_driver = { | |||
1030 | MODULE_LICENSE("GPL v2"); | 1032 | MODULE_LICENSE("GPL v2"); |
1031 | MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>"); | 1033 | MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>"); |
1032 | MODULE_DESCRIPTION("OMAP DMM/Tiler Driver"); | 1034 | MODULE_DESCRIPTION("OMAP DMM/Tiler Driver"); |
1033 | MODULE_ALIAS("platform:" DMM_DRIVER_NAME); | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 5c6609cbb6a2..dfafdb602ad2 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c | |||
@@ -547,14 +547,19 @@ static int ioctl_set_param(struct drm_device *dev, void *data, | |||
547 | return 0; | 547 | return 0; |
548 | } | 548 | } |
549 | 549 | ||
550 | #define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */ | ||
551 | |||
550 | static int ioctl_gem_new(struct drm_device *dev, void *data, | 552 | static int ioctl_gem_new(struct drm_device *dev, void *data, |
551 | struct drm_file *file_priv) | 553 | struct drm_file *file_priv) |
552 | { | 554 | { |
553 | struct drm_omap_gem_new *args = data; | 555 | struct drm_omap_gem_new *args = data; |
556 | u32 flags = args->flags & OMAP_BO_USER_MASK; | ||
557 | |||
554 | VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv, | 558 | VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv, |
555 | args->size.bytes, args->flags); | 559 | args->size.bytes, flags); |
556 | return omap_gem_new_handle(dev, file_priv, args->size, | 560 | |
557 | args->flags, &args->handle); | 561 | return omap_gem_new_handle(dev, file_priv, args->size, flags, |
562 | &args->handle); | ||
558 | } | 563 | } |
559 | 564 | ||
560 | static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data, | 565 | static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data, |
@@ -692,10 +697,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags) | |||
692 | drm_crtc_vblank_off(priv->crtcs[i]); | 697 | drm_crtc_vblank_off(priv->crtcs[i]); |
693 | 698 | ||
694 | priv->fbdev = omap_fbdev_init(dev); | 699 | priv->fbdev = omap_fbdev_init(dev); |
695 | if (!priv->fbdev) { | ||
696 | dev_warn(dev->dev, "omap_fbdev_init failed\n"); | ||
697 | /* well, limp along without an fbdev.. maybe X11 will work? */ | ||
698 | } | ||
699 | 700 | ||
700 | /* store off drm_device for use in pm ops */ | 701 | /* store off drm_device for use in pm ops */ |
701 | dev_set_drvdata(dev->dev, dev); | 702 | dev_set_drvdata(dev->dev, dev); |
@@ -831,7 +832,8 @@ static const struct file_operations omapdriver_fops = { | |||
831 | }; | 832 | }; |
832 | 833 | ||
833 | static struct drm_driver omap_drm_driver = { | 834 | static struct drm_driver omap_drm_driver = { |
834 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, | 835 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | |
836 | DRIVER_ATOMIC, | ||
835 | .load = dev_load, | 837 | .load = dev_load, |
836 | .unload = dev_unload, | 838 | .unload = dev_unload, |
837 | .open = dev_open, | 839 | .open = dev_open, |
@@ -928,35 +930,23 @@ static struct platform_driver pdev = { | |||
928 | .remove = pdev_remove, | 930 | .remove = pdev_remove, |
929 | }; | 931 | }; |
930 | 932 | ||
933 | static struct platform_driver * const drivers[] = { | ||
934 | &omap_dmm_driver, | ||
935 | &pdev, | ||
936 | }; | ||
937 | |||
931 | static int __init omap_drm_init(void) | 938 | static int __init omap_drm_init(void) |
932 | { | 939 | { |
933 | int r; | ||
934 | |||
935 | DBG("init"); | 940 | DBG("init"); |
936 | 941 | ||
937 | r = platform_driver_register(&omap_dmm_driver); | 942 | return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
938 | if (r) { | ||
939 | pr_err("DMM driver registration failed\n"); | ||
940 | return r; | ||
941 | } | ||
942 | |||
943 | r = platform_driver_register(&pdev); | ||
944 | if (r) { | ||
945 | pr_err("omapdrm driver registration failed\n"); | ||
946 | platform_driver_unregister(&omap_dmm_driver); | ||
947 | return r; | ||
948 | } | ||
949 | |||
950 | return 0; | ||
951 | } | 943 | } |
952 | 944 | ||
953 | static void __exit omap_drm_fini(void) | 945 | static void __exit omap_drm_fini(void) |
954 | { | 946 | { |
955 | DBG("fini"); | 947 | DBG("fini"); |
956 | 948 | ||
957 | platform_driver_unregister(&pdev); | 949 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
958 | |||
959 | platform_driver_unregister(&omap_dmm_driver); | ||
960 | } | 950 | } |
961 | 951 | ||
962 | /* need late_initcall() so we load after dss_driver's are loaded */ | 952 | /* need late_initcall() so we load after dss_driver's are loaded */ |
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 130fca70bfd7..9e0030731c37 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h | |||
@@ -36,11 +36,7 @@ | |||
36 | 36 | ||
37 | #define MODULE_NAME "omapdrm" | 37 | #define MODULE_NAME "omapdrm" |
38 | 38 | ||
39 | /* max # of mapper-id's that can be assigned.. todo, come up with a better | 39 | struct omap_drm_usergart; |
40 | * (but still inexpensive) way to store/access per-buffer mapper private | ||
41 | * data.. | ||
42 | */ | ||
43 | #define MAX_MAPPERS 2 | ||
44 | 40 | ||
45 | /* parameters which describe (unrotated) coordinates of scanout within a fb: */ | 41 | /* parameters which describe (unrotated) coordinates of scanout within a fb: */ |
46 | struct omap_drm_window { | 42 | struct omap_drm_window { |
@@ -97,6 +93,7 @@ struct omap_drm_private { | |||
97 | /* list of GEM objects: */ | 93 | /* list of GEM objects: */ |
98 | struct list_head obj_list; | 94 | struct list_head obj_list; |
99 | 95 | ||
96 | struct omap_drm_usergart *usergart; | ||
100 | bool has_dmm; | 97 | bool has_dmm; |
101 | 98 | ||
102 | /* properties: */ | 99 | /* properties: */ |
@@ -138,8 +135,18 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); | |||
138 | void omap_drm_irq_uninstall(struct drm_device *dev); | 135 | void omap_drm_irq_uninstall(struct drm_device *dev); |
139 | int omap_drm_irq_install(struct drm_device *dev); | 136 | int omap_drm_irq_install(struct drm_device *dev); |
140 | 137 | ||
138 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
141 | struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev); | 139 | struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev); |
142 | void omap_fbdev_free(struct drm_device *dev); | 140 | void omap_fbdev_free(struct drm_device *dev); |
141 | #else | ||
142 | static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) | ||
143 | { | ||
144 | return NULL; | ||
145 | } | ||
146 | static inline void omap_fbdev_free(struct drm_device *dev) | ||
147 | { | ||
148 | } | ||
149 | #endif | ||
143 | 150 | ||
144 | struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc); | 151 | struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc); |
145 | enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); | 152 | enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 0c104ad7ef66..61714e9670ae 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c | |||
@@ -110,8 +110,6 @@ static int omap_encoder_update(struct drm_encoder *encoder, | |||
110 | struct omap_dss_driver *dssdrv = dssdev->driver; | 110 | struct omap_dss_driver *dssdrv = dssdev->driver; |
111 | int ret; | 111 | int ret; |
112 | 112 | ||
113 | dssdev->src->manager = omap_dss_get_overlay_manager(channel); | ||
114 | |||
115 | if (dssdrv->check_timings) { | 113 | if (dssdrv->check_timings) { |
116 | ret = dssdrv->check_timings(dssdev, timings); | 114 | ret = dssdrv->check_timings(dssdev, timings); |
117 | } else { | 115 | } else { |
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 24f92bea39c7..3cb16f0cf381 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c | |||
@@ -295,6 +295,10 @@ fini: | |||
295 | drm_fb_helper_fini(helper); | 295 | drm_fb_helper_fini(helper); |
296 | fail: | 296 | fail: |
297 | kfree(fbdev); | 297 | kfree(fbdev); |
298 | |||
299 | dev_warn(dev->dev, "omap_fbdev_init failed\n"); | ||
300 | /* well, limp along without an fbdev.. maybe X11 will work? */ | ||
301 | |||
298 | return NULL; | 302 | return NULL; |
299 | } | 303 | } |
300 | 304 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 7ed08fdc4c42..984462622291 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -25,24 +25,15 @@ | |||
25 | #include "omap_drv.h" | 25 | #include "omap_drv.h" |
26 | #include "omap_dmm_tiler.h" | 26 | #include "omap_dmm_tiler.h" |
27 | 27 | ||
28 | /* remove these once drm core helpers are merged */ | ||
29 | struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | ||
30 | void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | ||
31 | bool dirty, bool accessed); | ||
32 | int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); | ||
33 | |||
34 | /* | 28 | /* |
35 | * GEM buffer object implementation. | 29 | * GEM buffer object implementation. |
36 | */ | 30 | */ |
37 | 31 | ||
38 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) | ||
39 | |||
40 | /* note: we use upper 8 bits of flags for driver-internal flags: */ | 32 | /* note: we use upper 8 bits of flags for driver-internal flags: */ |
41 | #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ | 33 | #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ |
42 | #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ | 34 | #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ |
43 | #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ | 35 | #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ |
44 | 36 | ||
45 | |||
46 | struct omap_gem_object { | 37 | struct omap_gem_object { |
47 | struct drm_gem_object base; | 38 | struct drm_gem_object base; |
48 | 39 | ||
@@ -119,8 +110,7 @@ struct omap_gem_object { | |||
119 | } *sync; | 110 | } *sync; |
120 | }; | 111 | }; |
121 | 112 | ||
122 | static int get_pages(struct drm_gem_object *obj, struct page ***pages); | 113 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) |
123 | static uint64_t mmap_offset(struct drm_gem_object *obj); | ||
124 | 114 | ||
125 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are | 115 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are |
126 | * not necessarily pinned in TILER all the time, and (b) when they are | 116 | * not necessarily pinned in TILER all the time, and (b) when they are |
@@ -134,27 +124,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj); | |||
134 | * for later.. | 124 | * for later.. |
135 | */ | 125 | */ |
136 | #define NUM_USERGART_ENTRIES 2 | 126 | #define NUM_USERGART_ENTRIES 2 |
137 | struct usergart_entry { | 127 | struct omap_drm_usergart_entry { |
138 | struct tiler_block *block; /* the reserved tiler block */ | 128 | struct tiler_block *block; /* the reserved tiler block */ |
139 | dma_addr_t paddr; | 129 | dma_addr_t paddr; |
140 | struct drm_gem_object *obj; /* the current pinned obj */ | 130 | struct drm_gem_object *obj; /* the current pinned obj */ |
141 | pgoff_t obj_pgoff; /* page offset of obj currently | 131 | pgoff_t obj_pgoff; /* page offset of obj currently |
142 | mapped in */ | 132 | mapped in */ |
143 | }; | 133 | }; |
144 | static struct { | 134 | |
145 | struct usergart_entry entry[NUM_USERGART_ENTRIES]; | 135 | struct omap_drm_usergart { |
136 | struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; | ||
146 | int height; /* height in rows */ | 137 | int height; /* height in rows */ |
147 | int height_shift; /* ilog2(height in rows) */ | 138 | int height_shift; /* ilog2(height in rows) */ |
148 | int slot_shift; /* ilog2(width per slot) */ | 139 | int slot_shift; /* ilog2(width per slot) */ |
149 | int stride_pfn; /* stride in pages */ | 140 | int stride_pfn; /* stride in pages */ |
150 | int last; /* index of last used entry */ | 141 | int last; /* index of last used entry */ |
151 | } *usergart; | 142 | }; |
143 | |||
144 | /* ----------------------------------------------------------------------------- | ||
145 | * Helpers | ||
146 | */ | ||
147 | |||
148 | /** get mmap offset */ | ||
149 | static uint64_t mmap_offset(struct drm_gem_object *obj) | ||
150 | { | ||
151 | struct drm_device *dev = obj->dev; | ||
152 | int ret; | ||
153 | size_t size; | ||
154 | |||
155 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
156 | |||
157 | /* Make it mmapable */ | ||
158 | size = omap_gem_mmap_size(obj); | ||
159 | ret = drm_gem_create_mmap_offset_size(obj, size); | ||
160 | if (ret) { | ||
161 | dev_err(dev->dev, "could not allocate mmap offset\n"); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | return drm_vma_node_offset_addr(&obj->vma_node); | ||
166 | } | ||
167 | |||
168 | /* GEM objects can either be allocated from contiguous memory (in which | ||
169 | * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non | ||
170 | * contiguous buffers can be remapped in TILER/DMM if they need to be | ||
171 | * contiguous... but we don't do this all the time to reduce pressure | ||
172 | * on TILER/DMM space when we know at allocation time that the buffer | ||
173 | * will need to be scanned out. | ||
174 | */ | ||
175 | static inline bool is_shmem(struct drm_gem_object *obj) | ||
176 | { | ||
177 | return obj->filp != NULL; | ||
178 | } | ||
179 | |||
180 | /* ----------------------------------------------------------------------------- | ||
181 | * Eviction | ||
182 | */ | ||
152 | 183 | ||
153 | static void evict_entry(struct drm_gem_object *obj, | 184 | static void evict_entry(struct drm_gem_object *obj, |
154 | enum tiler_fmt fmt, struct usergart_entry *entry) | 185 | enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) |
155 | { | 186 | { |
156 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 187 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
157 | int n = usergart[fmt].height; | 188 | struct omap_drm_private *priv = obj->dev->dev_private; |
189 | int n = priv->usergart[fmt].height; | ||
158 | size_t size = PAGE_SIZE * n; | 190 | size_t size = PAGE_SIZE * n; |
159 | loff_t off = mmap_offset(obj) + | 191 | loff_t off = mmap_offset(obj) + |
160 | (entry->obj_pgoff << PAGE_SHIFT); | 192 | (entry->obj_pgoff << PAGE_SHIFT); |
@@ -180,46 +212,25 @@ static void evict_entry(struct drm_gem_object *obj, | |||
180 | static void evict(struct drm_gem_object *obj) | 212 | static void evict(struct drm_gem_object *obj) |
181 | { | 213 | { |
182 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 214 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
215 | struct omap_drm_private *priv = obj->dev->dev_private; | ||
183 | 216 | ||
184 | if (omap_obj->flags & OMAP_BO_TILED) { | 217 | if (omap_obj->flags & OMAP_BO_TILED) { |
185 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); | 218 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
186 | int i; | 219 | int i; |
187 | 220 | ||
188 | if (!usergart) | ||
189 | return; | ||
190 | |||
191 | for (i = 0; i < NUM_USERGART_ENTRIES; i++) { | 221 | for (i = 0; i < NUM_USERGART_ENTRIES; i++) { |
192 | struct usergart_entry *entry = &usergart[fmt].entry[i]; | 222 | struct omap_drm_usergart_entry *entry = |
223 | &priv->usergart[fmt].entry[i]; | ||
224 | |||
193 | if (entry->obj == obj) | 225 | if (entry->obj == obj) |
194 | evict_entry(obj, fmt, entry); | 226 | evict_entry(obj, fmt, entry); |
195 | } | 227 | } |
196 | } | 228 | } |
197 | } | 229 | } |
198 | 230 | ||
199 | /* GEM objects can either be allocated from contiguous memory (in which | 231 | /* ----------------------------------------------------------------------------- |
200 | * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non | 232 | * Page Management |
201 | * contiguous buffers can be remapped in TILER/DMM if they need to be | ||
202 | * contiguous... but we don't do this all the time to reduce pressure | ||
203 | * on TILER/DMM space when we know at allocation time that the buffer | ||
204 | * will need to be scanned out. | ||
205 | */ | ||
206 | static inline bool is_shmem(struct drm_gem_object *obj) | ||
207 | { | ||
208 | return obj->filp != NULL; | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * shmem buffers that are mapped cached can simulate coherency via using | ||
213 | * page faulting to keep track of dirty pages | ||
214 | */ | 233 | */ |
215 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | ||
216 | { | ||
217 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
218 | return is_shmem(obj) && | ||
219 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); | ||
220 | } | ||
221 | |||
222 | static DEFINE_SPINLOCK(sync_lock); | ||
223 | 234 | ||
224 | /** ensure backing pages are allocated */ | 235 | /** ensure backing pages are allocated */ |
225 | static int omap_gem_attach_pages(struct drm_gem_object *obj) | 236 | static int omap_gem_attach_pages(struct drm_gem_object *obj) |
@@ -272,6 +283,28 @@ free_pages: | |||
272 | return ret; | 283 | return ret; |
273 | } | 284 | } |
274 | 285 | ||
286 | /* acquire pages when needed (for example, for DMA where physically | ||
287 | * contiguous buffer is not required | ||
288 | */ | ||
289 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) | ||
290 | { | ||
291 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
292 | int ret = 0; | ||
293 | |||
294 | if (is_shmem(obj) && !omap_obj->pages) { | ||
295 | ret = omap_gem_attach_pages(obj); | ||
296 | if (ret) { | ||
297 | dev_err(obj->dev->dev, "could not attach pages\n"); | ||
298 | return ret; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | /* TODO: even phys-contig.. we should have a list of pages? */ | ||
303 | *pages = omap_obj->pages; | ||
304 | |||
305 | return 0; | ||
306 | } | ||
307 | |||
275 | /** release backing pages */ | 308 | /** release backing pages */ |
276 | static void omap_gem_detach_pages(struct drm_gem_object *obj) | 309 | static void omap_gem_detach_pages(struct drm_gem_object *obj) |
277 | { | 310 | { |
@@ -301,26 +334,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj) | |||
301 | return to_omap_bo(obj)->flags; | 334 | return to_omap_bo(obj)->flags; |
302 | } | 335 | } |
303 | 336 | ||
304 | /** get mmap offset */ | ||
305 | static uint64_t mmap_offset(struct drm_gem_object *obj) | ||
306 | { | ||
307 | struct drm_device *dev = obj->dev; | ||
308 | int ret; | ||
309 | size_t size; | ||
310 | |||
311 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
312 | |||
313 | /* Make it mmapable */ | ||
314 | size = omap_gem_mmap_size(obj); | ||
315 | ret = drm_gem_create_mmap_offset_size(obj, size); | ||
316 | if (ret) { | ||
317 | dev_err(dev->dev, "could not allocate mmap offset\n"); | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | return drm_vma_node_offset_addr(&obj->vma_node); | ||
322 | } | ||
323 | |||
324 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) | 337 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) |
325 | { | 338 | { |
326 | uint64_t offset; | 339 | uint64_t offset; |
@@ -362,6 +375,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) | |||
362 | return -EINVAL; | 375 | return -EINVAL; |
363 | } | 376 | } |
364 | 377 | ||
378 | /* ----------------------------------------------------------------------------- | ||
379 | * Fault Handling | ||
380 | */ | ||
381 | |||
365 | /* Normal handling for the case of faulting in non-tiled buffers */ | 382 | /* Normal handling for the case of faulting in non-tiled buffers */ |
366 | static int fault_1d(struct drm_gem_object *obj, | 383 | static int fault_1d(struct drm_gem_object *obj, |
367 | struct vm_area_struct *vma, struct vm_fault *vmf) | 384 | struct vm_area_struct *vma, struct vm_fault *vmf) |
@@ -393,7 +410,8 @@ static int fault_2d(struct drm_gem_object *obj, | |||
393 | struct vm_area_struct *vma, struct vm_fault *vmf) | 410 | struct vm_area_struct *vma, struct vm_fault *vmf) |
394 | { | 411 | { |
395 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 412 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
396 | struct usergart_entry *entry; | 413 | struct omap_drm_private *priv = obj->dev->dev_private; |
414 | struct omap_drm_usergart_entry *entry; | ||
397 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); | 415 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
398 | struct page *pages[64]; /* XXX is this too much to have on stack? */ | 416 | struct page *pages[64]; /* XXX is this too much to have on stack? */ |
399 | unsigned long pfn; | 417 | unsigned long pfn; |
@@ -406,8 +424,8 @@ static int fault_2d(struct drm_gem_object *obj, | |||
406 | * that need to be mapped in to fill 4kb wide CPU page. If the slot | 424 | * that need to be mapped in to fill 4kb wide CPU page. If the slot |
407 | * height is 64, then 64 pages fill a 4kb wide by 64 row region. | 425 | * height is 64, then 64 pages fill a 4kb wide by 64 row region. |
408 | */ | 426 | */ |
409 | const int n = usergart[fmt].height; | 427 | const int n = priv->usergart[fmt].height; |
410 | const int n_shift = usergart[fmt].height_shift; | 428 | const int n_shift = priv->usergart[fmt].height_shift; |
411 | 429 | ||
412 | /* | 430 | /* |
413 | * If buffer width in bytes > PAGE_SIZE then the virtual stride is | 431 | * If buffer width in bytes > PAGE_SIZE then the virtual stride is |
@@ -428,11 +446,11 @@ static int fault_2d(struct drm_gem_object *obj, | |||
428 | base_pgoff = round_down(pgoff, m << n_shift); | 446 | base_pgoff = round_down(pgoff, m << n_shift); |
429 | 447 | ||
430 | /* figure out buffer width in slots */ | 448 | /* figure out buffer width in slots */ |
431 | slots = omap_obj->width >> usergart[fmt].slot_shift; | 449 | slots = omap_obj->width >> priv->usergart[fmt].slot_shift; |
432 | 450 | ||
433 | vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); | 451 | vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); |
434 | 452 | ||
435 | entry = &usergart[fmt].entry[usergart[fmt].last]; | 453 | entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; |
436 | 454 | ||
437 | /* evict previous buffer using this usergart entry, if any: */ | 455 | /* evict previous buffer using this usergart entry, if any: */ |
438 | if (entry->obj) | 456 | if (entry->obj) |
@@ -479,12 +497,13 @@ static int fault_2d(struct drm_gem_object *obj, | |||
479 | 497 | ||
480 | for (i = n; i > 0; i--) { | 498 | for (i = n; i > 0; i--) { |
481 | vm_insert_mixed(vma, (unsigned long)vaddr, pfn); | 499 | vm_insert_mixed(vma, (unsigned long)vaddr, pfn); |
482 | pfn += usergart[fmt].stride_pfn; | 500 | pfn += priv->usergart[fmt].stride_pfn; |
483 | vaddr += PAGE_SIZE * m; | 501 | vaddr += PAGE_SIZE * m; |
484 | } | 502 | } |
485 | 503 | ||
486 | /* simple round-robin: */ | 504 | /* simple round-robin: */ |
487 | usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES; | 505 | priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) |
506 | % NUM_USERGART_ENTRIES; | ||
488 | 507 | ||
489 | return 0; | 508 | return 0; |
490 | } | 509 | } |
@@ -596,6 +615,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, | |||
596 | return 0; | 615 | return 0; |
597 | } | 616 | } |
598 | 617 | ||
618 | /* ----------------------------------------------------------------------------- | ||
619 | * Dumb Buffers | ||
620 | */ | ||
599 | 621 | ||
600 | /** | 622 | /** |
601 | * omap_gem_dumb_create - create a dumb buffer | 623 | * omap_gem_dumb_create - create a dumb buffer |
@@ -653,6 +675,7 @@ fail: | |||
653 | return ret; | 675 | return ret; |
654 | } | 676 | } |
655 | 677 | ||
678 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
656 | /* Set scrolling position. This allows us to implement fast scrolling | 679 | /* Set scrolling position. This allows us to implement fast scrolling |
657 | * for console. | 680 | * for console. |
658 | * | 681 | * |
@@ -689,6 +712,22 @@ fail: | |||
689 | 712 | ||
690 | return ret; | 713 | return ret; |
691 | } | 714 | } |
715 | #endif | ||
716 | |||
717 | /* ----------------------------------------------------------------------------- | ||
718 | * Memory Management & DMA Sync | ||
719 | */ | ||
720 | |||
721 | /** | ||
722 | * shmem buffers that are mapped cached can simulate coherency via using | ||
723 | * page faulting to keep track of dirty pages | ||
724 | */ | ||
725 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | ||
726 | { | ||
727 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
728 | return is_shmem(obj) && | ||
729 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); | ||
730 | } | ||
692 | 731 | ||
693 | /* Sync the buffer for CPU access.. note pages should already be | 732 | /* Sync the buffer for CPU access.. note pages should already be |
694 | * attached, ie. omap_gem_get_pages() | 733 | * attached, ie. omap_gem_get_pages() |
@@ -865,28 +904,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) | |||
865 | return ret; | 904 | return ret; |
866 | } | 905 | } |
867 | 906 | ||
868 | /* acquire pages when needed (for example, for DMA where physically | ||
869 | * contiguous buffer is not required | ||
870 | */ | ||
871 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) | ||
872 | { | ||
873 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
874 | int ret = 0; | ||
875 | |||
876 | if (is_shmem(obj) && !omap_obj->pages) { | ||
877 | ret = omap_gem_attach_pages(obj); | ||
878 | if (ret) { | ||
879 | dev_err(obj->dev->dev, "could not attach pages\n"); | ||
880 | return ret; | ||
881 | } | ||
882 | } | ||
883 | |||
884 | /* TODO: even phys-contig.. we should have a list of pages? */ | ||
885 | *pages = omap_obj->pages; | ||
886 | |||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | /* if !remap, and we don't have pages backing, then fail, rather than | 907 | /* if !remap, and we don't have pages backing, then fail, rather than |
891 | * increasing the pin count (which we don't really do yet anyways, | 908 | * increasing the pin count (which we don't really do yet anyways, |
892 | * because we don't support swapping pages back out). And 'remap' | 909 | * because we don't support swapping pages back out). And 'remap' |
@@ -924,6 +941,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj) | |||
924 | return 0; | 941 | return 0; |
925 | } | 942 | } |
926 | 943 | ||
944 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
927 | /* Get kernel virtual address for CPU access.. this more or less only | 945 | /* Get kernel virtual address for CPU access.. this more or less only |
928 | * exists for omap_fbdev. This should be called with struct_mutex | 946 | * exists for omap_fbdev. This should be called with struct_mutex |
929 | * held. | 947 | * held. |
@@ -942,6 +960,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj) | |||
942 | } | 960 | } |
943 | return omap_obj->vaddr; | 961 | return omap_obj->vaddr; |
944 | } | 962 | } |
963 | #endif | ||
964 | |||
965 | /* ----------------------------------------------------------------------------- | ||
966 | * Power Management | ||
967 | */ | ||
945 | 968 | ||
946 | #ifdef CONFIG_PM | 969 | #ifdef CONFIG_PM |
947 | /* re-pin objects in DMM in resume path: */ | 970 | /* re-pin objects in DMM in resume path: */ |
@@ -971,6 +994,10 @@ int omap_gem_resume(struct device *dev) | |||
971 | } | 994 | } |
972 | #endif | 995 | #endif |
973 | 996 | ||
997 | /* ----------------------------------------------------------------------------- | ||
998 | * DebugFS | ||
999 | */ | ||
1000 | |||
974 | #ifdef CONFIG_DEBUG_FS | 1001 | #ifdef CONFIG_DEBUG_FS |
975 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | 1002 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
976 | { | 1003 | { |
@@ -1017,9 +1044,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) | |||
1017 | } | 1044 | } |
1018 | #endif | 1045 | #endif |
1019 | 1046 | ||
1020 | /* Buffer Synchronization: | 1047 | /* ----------------------------------------------------------------------------- |
1048 | * Buffer Synchronization | ||
1021 | */ | 1049 | */ |
1022 | 1050 | ||
1051 | static DEFINE_SPINLOCK(sync_lock); | ||
1052 | |||
1023 | struct omap_gem_sync_waiter { | 1053 | struct omap_gem_sync_waiter { |
1024 | struct list_head list; | 1054 | struct list_head list; |
1025 | struct omap_gem_object *omap_obj; | 1055 | struct omap_gem_object *omap_obj; |
@@ -1265,6 +1295,10 @@ unlock: | |||
1265 | return ret; | 1295 | return ret; |
1266 | } | 1296 | } |
1267 | 1297 | ||
1298 | /* ----------------------------------------------------------------------------- | ||
1299 | * Constructor & Destructor | ||
1300 | */ | ||
1301 | |||
1268 | /* don't call directly.. called from GEM core when it is time to actually | 1302 | /* don't call directly.. called from GEM core when it is time to actually |
1269 | * free the object.. | 1303 | * free the object.. |
1270 | */ | 1304 | */ |
@@ -1282,8 +1316,6 @@ void omap_gem_free_object(struct drm_gem_object *obj) | |||
1282 | list_del(&omap_obj->mm_list); | 1316 | list_del(&omap_obj->mm_list); |
1283 | spin_unlock(&priv->list_lock); | 1317 | spin_unlock(&priv->list_lock); |
1284 | 1318 | ||
1285 | drm_gem_free_mmap_offset(obj); | ||
1286 | |||
1287 | /* this means the object is still pinned.. which really should | 1319 | /* this means the object is still pinned.. which really should |
1288 | * not happen. I think.. | 1320 | * not happen. I think.. |
1289 | */ | 1321 | */ |
@@ -1308,31 +1340,7 @@ void omap_gem_free_object(struct drm_gem_object *obj) | |||
1308 | 1340 | ||
1309 | drm_gem_object_release(obj); | 1341 | drm_gem_object_release(obj); |
1310 | 1342 | ||
1311 | kfree(obj); | 1343 | kfree(omap_obj); |
1312 | } | ||
1313 | |||
1314 | /* convenience method to construct a GEM buffer object, and userspace handle */ | ||
1315 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
1316 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) | ||
1317 | { | ||
1318 | struct drm_gem_object *obj; | ||
1319 | int ret; | ||
1320 | |||
1321 | obj = omap_gem_new(dev, gsize, flags); | ||
1322 | if (!obj) | ||
1323 | return -ENOMEM; | ||
1324 | |||
1325 | ret = drm_gem_handle_create(file, obj, handle); | ||
1326 | if (ret) { | ||
1327 | drm_gem_object_release(obj); | ||
1328 | kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */ | ||
1329 | return ret; | ||
1330 | } | ||
1331 | |||
1332 | /* drop reference from allocate - handle holds it now */ | ||
1333 | drm_gem_object_unreference_unlocked(obj); | ||
1334 | |||
1335 | return 0; | ||
1336 | } | 1344 | } |
1337 | 1345 | ||
1338 | /* GEM buffer object constructor */ | 1346 | /* GEM buffer object constructor */ |
@@ -1341,15 +1349,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, | |||
1341 | { | 1349 | { |
1342 | struct omap_drm_private *priv = dev->dev_private; | 1350 | struct omap_drm_private *priv = dev->dev_private; |
1343 | struct omap_gem_object *omap_obj; | 1351 | struct omap_gem_object *omap_obj; |
1344 | struct drm_gem_object *obj = NULL; | 1352 | struct drm_gem_object *obj; |
1345 | struct address_space *mapping; | 1353 | struct address_space *mapping; |
1346 | size_t size; | 1354 | size_t size; |
1347 | int ret; | 1355 | int ret; |
1348 | 1356 | ||
1349 | if (flags & OMAP_BO_TILED) { | 1357 | if (flags & OMAP_BO_TILED) { |
1350 | if (!usergart) { | 1358 | if (!priv->usergart) { |
1351 | dev_err(dev->dev, "Tiled buffers require DMM\n"); | 1359 | dev_err(dev->dev, "Tiled buffers require DMM\n"); |
1352 | goto fail; | 1360 | return NULL; |
1353 | } | 1361 | } |
1354 | 1362 | ||
1355 | /* tiled buffers are always shmem paged backed.. when they are | 1363 | /* tiled buffers are always shmem paged backed.. when they are |
@@ -1420,16 +1428,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, | |||
1420 | return obj; | 1428 | return obj; |
1421 | 1429 | ||
1422 | fail: | 1430 | fail: |
1423 | if (obj) | 1431 | omap_gem_free_object(obj); |
1432 | return NULL; | ||
1433 | } | ||
1434 | |||
1435 | /* convenience method to construct a GEM buffer object, and userspace handle */ | ||
1436 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
1437 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) | ||
1438 | { | ||
1439 | struct drm_gem_object *obj; | ||
1440 | int ret; | ||
1441 | |||
1442 | obj = omap_gem_new(dev, gsize, flags); | ||
1443 | if (!obj) | ||
1444 | return -ENOMEM; | ||
1445 | |||
1446 | ret = drm_gem_handle_create(file, obj, handle); | ||
1447 | if (ret) { | ||
1424 | omap_gem_free_object(obj); | 1448 | omap_gem_free_object(obj); |
1449 | return ret; | ||
1450 | } | ||
1425 | 1451 | ||
1426 | return NULL; | 1452 | /* drop reference from allocate - handle holds it now */ |
1453 | drm_gem_object_unreference_unlocked(obj); | ||
1454 | |||
1455 | return 0; | ||
1427 | } | 1456 | } |
1428 | 1457 | ||
1429 | /* init/cleanup.. if DMM is used, we need to set some stuff up.. */ | 1458 | /* ----------------------------------------------------------------------------- |
1459 | * Init & Cleanup | ||
1460 | */ | ||
1461 | |||
1462 | /* If DMM is used, we need to set some stuff up.. */ | ||
1430 | void omap_gem_init(struct drm_device *dev) | 1463 | void omap_gem_init(struct drm_device *dev) |
1431 | { | 1464 | { |
1432 | struct omap_drm_private *priv = dev->dev_private; | 1465 | struct omap_drm_private *priv = dev->dev_private; |
1466 | struct omap_drm_usergart *usergart; | ||
1433 | const enum tiler_fmt fmts[] = { | 1467 | const enum tiler_fmt fmts[] = { |
1434 | TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT | 1468 | TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT |
1435 | }; | 1469 | }; |
@@ -1458,10 +1492,11 @@ void omap_gem_init(struct drm_device *dev) | |||
1458 | usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; | 1492 | usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; |
1459 | usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); | 1493 | usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); |
1460 | for (j = 0; j < NUM_USERGART_ENTRIES; j++) { | 1494 | for (j = 0; j < NUM_USERGART_ENTRIES; j++) { |
1461 | struct usergart_entry *entry = &usergart[i].entry[j]; | 1495 | struct omap_drm_usergart_entry *entry; |
1462 | struct tiler_block *block = | 1496 | struct tiler_block *block; |
1463 | tiler_reserve_2d(fmts[i], w, h, | 1497 | |
1464 | PAGE_SIZE); | 1498 | entry = &usergart[i].entry[j]; |
1499 | block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); | ||
1465 | if (IS_ERR(block)) { | 1500 | if (IS_ERR(block)) { |
1466 | dev_err(dev->dev, | 1501 | dev_err(dev->dev, |
1467 | "reserve failed: %d, %d, %ld\n", | 1502 | "reserve failed: %d, %d, %ld\n", |
@@ -1477,13 +1512,16 @@ void omap_gem_init(struct drm_device *dev) | |||
1477 | } | 1512 | } |
1478 | } | 1513 | } |
1479 | 1514 | ||
1515 | priv->usergart = usergart; | ||
1480 | priv->has_dmm = true; | 1516 | priv->has_dmm = true; |
1481 | } | 1517 | } |
1482 | 1518 | ||
1483 | void omap_gem_deinit(struct drm_device *dev) | 1519 | void omap_gem_deinit(struct drm_device *dev) |
1484 | { | 1520 | { |
1521 | struct omap_drm_private *priv = dev->dev_private; | ||
1522 | |||
1485 | /* I believe we can rely on there being no more outstanding GEM | 1523 | /* I believe we can rely on there being no more outstanding GEM |
1486 | * objects which could depend on usergart/dmm at this point. | 1524 | * objects which could depend on usergart/dmm at this point. |
1487 | */ | 1525 | */ |
1488 | kfree(usergart); | 1526 | kfree(priv->usergart); |
1489 | } | 1527 | } |
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index d5ecabd6c14c..d75b197eff46 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c | |||
@@ -188,33 +188,6 @@ static const struct drm_plane_helper_funcs omap_plane_helper_funcs = { | |||
188 | .atomic_disable = omap_plane_atomic_disable, | 188 | .atomic_disable = omap_plane_atomic_disable, |
189 | }; | 189 | }; |
190 | 190 | ||
191 | static void omap_plane_reset(struct drm_plane *plane) | ||
192 | { | ||
193 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
194 | struct omap_plane_state *omap_state; | ||
195 | |||
196 | if (plane->state && plane->state->fb) | ||
197 | drm_framebuffer_unreference(plane->state->fb); | ||
198 | |||
199 | kfree(plane->state); | ||
200 | plane->state = NULL; | ||
201 | |||
202 | omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL); | ||
203 | if (omap_state == NULL) | ||
204 | return; | ||
205 | |||
206 | /* | ||
207 | * Set defaults depending on whether we are a primary or overlay | ||
208 | * plane. | ||
209 | */ | ||
210 | omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY | ||
211 | ? 0 : omap_plane->id; | ||
212 | omap_state->base.rotation = BIT(DRM_ROTATE_0); | ||
213 | |||
214 | plane->state = &omap_state->base; | ||
215 | plane->state->plane = plane; | ||
216 | } | ||
217 | |||
218 | static void omap_plane_destroy(struct drm_plane *plane) | 191 | static void omap_plane_destroy(struct drm_plane *plane) |
219 | { | 192 | { |
220 | struct omap_plane *omap_plane = to_omap_plane(plane); | 193 | struct omap_plane *omap_plane = to_omap_plane(plane); |
@@ -270,6 +243,32 @@ static void omap_plane_atomic_destroy_state(struct drm_plane *plane, | |||
270 | kfree(to_omap_plane_state(state)); | 243 | kfree(to_omap_plane_state(state)); |
271 | } | 244 | } |
272 | 245 | ||
246 | static void omap_plane_reset(struct drm_plane *plane) | ||
247 | { | ||
248 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
249 | struct omap_plane_state *omap_state; | ||
250 | |||
251 | if (plane->state) { | ||
252 | omap_plane_atomic_destroy_state(plane, plane->state); | ||
253 | plane->state = NULL; | ||
254 | } | ||
255 | |||
256 | omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL); | ||
257 | if (omap_state == NULL) | ||
258 | return; | ||
259 | |||
260 | /* | ||
261 | * Set defaults depending on whether we are a primary or overlay | ||
262 | * plane. | ||
263 | */ | ||
264 | omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY | ||
265 | ? 0 : omap_plane->id; | ||
266 | omap_state->base.rotation = BIT(DRM_ROTATE_0); | ||
267 | |||
268 | plane->state = &omap_state->base; | ||
269 | plane->state->plane = plane; | ||
270 | } | ||
271 | |||
273 | static int omap_plane_atomic_set_property(struct drm_plane *plane, | 272 | static int omap_plane_atomic_set_property(struct drm_plane *plane, |
274 | struct drm_plane_state *state, | 273 | struct drm_plane_state *state, |
275 | struct drm_property *property, | 274 | struct drm_property *property, |
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index 6df1f2a1bc52..c10fdfc0930f 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c | |||
@@ -5,8 +5,9 @@ | |||
5 | * | 5 | * |
6 | * Authors: Ravi Ramachandra <r.ramachandra@ti.com>, | 6 | * Authors: Ravi Ramachandra <r.ramachandra@ti.com>, |
7 | * Lajos Molnar <molnar@ti.com> | 7 | * Lajos Molnar <molnar@ti.com> |
8 | * Andy Gross <andy.gross@ti.com> | ||
8 | * | 9 | * |
9 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | 10 | * Copyright (C) 2012 Texas Instruments, Inc. |
10 | * | 11 | * |
11 | * This package is free software; you can redistribute it and/or modify | 12 | * This package is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
@@ -17,684 +18,244 @@ | |||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | 18 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. |
18 | * | 19 | * |
19 | */ | 20 | */ |
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/wait.h> | ||
26 | #include <linux/bitmap.h> | ||
20 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
21 | #include <linux/spinlock.h> | 28 | #include "tcm.h" |
22 | 29 | ||
23 | #include "tcm-sita.h" | 30 | static unsigned long mask[8]; |
24 | 31 | /* | |
25 | #define ALIGN_DOWN(value, align) ((value) & ~((align) - 1)) | 32 | * pos position in bitmap |
26 | 33 | * w width in slots | |
27 | /* Individual selection criteria for different scan areas */ | 34 | * h height in slots |
28 | static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL; | 35 | * map ptr to bitmap |
29 | static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE; | 36 | * stride slots in a row |
30 | |||
31 | /********************************************* | ||
32 | * TCM API - Sita Implementation | ||
33 | *********************************************/ | ||
34 | static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, | ||
35 | struct tcm_area *area); | ||
36 | static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area); | ||
37 | static s32 sita_free(struct tcm *tcm, struct tcm_area *area); | ||
38 | static void sita_deinit(struct tcm *tcm); | ||
39 | |||
40 | /********************************************* | ||
41 | * Main Scanner functions | ||
42 | *********************************************/ | ||
43 | static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
44 | struct tcm_area *area); | ||
45 | |||
46 | static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
47 | struct tcm_area *field, struct tcm_area *area); | ||
48 | |||
49 | static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
50 | struct tcm_area *field, struct tcm_area *area); | ||
51 | |||
52 | static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots, | ||
53 | struct tcm_area *field, struct tcm_area *area); | ||
54 | |||
55 | /********************************************* | ||
56 | * Support Infrastructure Methods | ||
57 | *********************************************/ | ||
58 | static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h); | ||
59 | |||
60 | static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h, | ||
61 | struct tcm_area *field, s32 criteria, | ||
62 | struct score *best); | ||
63 | |||
64 | static void get_nearness_factor(struct tcm_area *field, | ||
65 | struct tcm_area *candidate, | ||
66 | struct nearness_factor *nf); | ||
67 | |||
68 | static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area, | ||
69 | struct neighbor_stats *stat); | ||
70 | |||
71 | static void fill_area(struct tcm *tcm, | ||
72 | struct tcm_area *area, struct tcm_area *parent); | ||
73 | |||
74 | |||
75 | /*********************************************/ | ||
76 | |||
77 | /********************************************* | ||
78 | * Utility Methods | ||
79 | *********************************************/ | ||
80 | struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr) | ||
81 | { | ||
82 | struct tcm *tcm; | ||
83 | struct sita_pvt *pvt; | ||
84 | struct tcm_area area = {0}; | ||
85 | s32 i; | ||
86 | |||
87 | if (width == 0 || height == 0) | ||
88 | return NULL; | ||
89 | |||
90 | tcm = kzalloc(sizeof(*tcm), GFP_KERNEL); | ||
91 | pvt = kzalloc(sizeof(*pvt), GFP_KERNEL); | ||
92 | if (!tcm || !pvt) | ||
93 | goto error; | ||
94 | |||
95 | /* Updating the pointers to SiTA implementation APIs */ | ||
96 | tcm->height = height; | ||
97 | tcm->width = width; | ||
98 | tcm->reserve_2d = sita_reserve_2d; | ||
99 | tcm->reserve_1d = sita_reserve_1d; | ||
100 | tcm->free = sita_free; | ||
101 | tcm->deinit = sita_deinit; | ||
102 | tcm->pvt = (void *)pvt; | ||
103 | |||
104 | spin_lock_init(&(pvt->lock)); | ||
105 | |||
106 | /* Creating tam map */ | ||
107 | pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL); | ||
108 | if (!pvt->map) | ||
109 | goto error; | ||
110 | |||
111 | for (i = 0; i < tcm->width; i++) { | ||
112 | pvt->map[i] = | ||
113 | kmalloc(sizeof(**pvt->map) * tcm->height, | ||
114 | GFP_KERNEL); | ||
115 | if (pvt->map[i] == NULL) { | ||
116 | while (i--) | ||
117 | kfree(pvt->map[i]); | ||
118 | kfree(pvt->map); | ||
119 | goto error; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | if (attr && attr->x <= tcm->width && attr->y <= tcm->height) { | ||
124 | pvt->div_pt.x = attr->x; | ||
125 | pvt->div_pt.y = attr->y; | ||
126 | |||
127 | } else { | ||
128 | /* Defaulting to 3:1 ratio on width for 2D area split */ | ||
129 | /* Defaulting to 3:1 ratio on height for 2D and 1D split */ | ||
130 | pvt->div_pt.x = (tcm->width * 3) / 4; | ||
131 | pvt->div_pt.y = (tcm->height * 3) / 4; | ||
132 | } | ||
133 | |||
134 | spin_lock(&(pvt->lock)); | ||
135 | assign(&area, 0, 0, width - 1, height - 1); | ||
136 | fill_area(tcm, &area, NULL); | ||
137 | spin_unlock(&(pvt->lock)); | ||
138 | return tcm; | ||
139 | |||
140 | error: | ||
141 | kfree(tcm); | ||
142 | kfree(pvt); | ||
143 | return NULL; | ||
144 | } | ||
145 | |||
146 | static void sita_deinit(struct tcm *tcm) | ||
147 | { | ||
148 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
149 | struct tcm_area area = {0}; | ||
150 | s32 i; | ||
151 | |||
152 | area.p1.x = tcm->width - 1; | ||
153 | area.p1.y = tcm->height - 1; | ||
154 | |||
155 | spin_lock(&(pvt->lock)); | ||
156 | fill_area(tcm, &area, NULL); | ||
157 | spin_unlock(&(pvt->lock)); | ||
158 | |||
159 | for (i = 0; i < tcm->height; i++) | ||
160 | kfree(pvt->map[i]); | ||
161 | kfree(pvt->map); | ||
162 | kfree(pvt); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * Reserve a 1D area in the container | ||
167 | * | ||
168 | * @param num_slots size of 1D area | ||
169 | * @param area pointer to the area that will be populated with the | ||
170 | * reserved area | ||
171 | * | ||
172 | * @return 0 on success, non-0 error value on failure. | ||
173 | */ | 37 | */ |
174 | static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots, | 38 | static void free_slots(unsigned long pos, uint16_t w, uint16_t h, |
175 | struct tcm_area *area) | 39 | unsigned long *map, uint16_t stride) |
176 | { | 40 | { |
177 | s32 ret; | 41 | int i; |
178 | struct tcm_area field = {0}; | ||
179 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
180 | 42 | ||
181 | spin_lock(&(pvt->lock)); | 43 | for (i = 0; i < h; i++, pos += stride) |
182 | 44 | bitmap_clear(map, pos, w); | |
183 | /* Scanning entire container */ | ||
184 | assign(&field, tcm->width - 1, tcm->height - 1, 0, 0); | ||
185 | |||
186 | ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area); | ||
187 | if (!ret) | ||
188 | /* update map */ | ||
189 | fill_area(tcm, area, area); | ||
190 | |||
191 | spin_unlock(&(pvt->lock)); | ||
192 | return ret; | ||
193 | } | 45 | } |
194 | 46 | ||
195 | /** | 47 | /* |
196 | * Reserve a 2D area in the container | 48 | * w width in slots |
197 | * | 49 | * pos ptr to position |
198 | * @param w width | 50 | * map ptr to bitmap |
199 | * @param h height | 51 | * num_bits number of bits in bitmap |
200 | * @param area pointer to the area that will be populated with the reserved | ||
201 | * area | ||
202 | * | ||
203 | * @return 0 on success, non-0 error value on failure. | ||
204 | */ | 52 | */ |
205 | static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, | 53 | static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map, |
206 | struct tcm_area *area) | 54 | size_t num_bits) |
207 | { | 55 | { |
208 | s32 ret; | 56 | unsigned long search_count = 0; |
209 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | 57 | unsigned long bit; |
58 | bool area_found = false; | ||
210 | 59 | ||
211 | /* not supporting more than 64 as alignment */ | 60 | *pos = num_bits - w; |
212 | if (align > 64) | ||
213 | return -EINVAL; | ||
214 | 61 | ||
215 | /* we prefer 1, 32 and 64 as alignment */ | 62 | while (search_count < num_bits) { |
216 | align = align <= 1 ? 1 : align <= 32 ? 32 : 64; | 63 | bit = find_next_bit(map, num_bits, *pos); |
217 | 64 | ||
218 | spin_lock(&(pvt->lock)); | 65 | if (bit - *pos >= w) { |
219 | ret = scan_areas_and_find_fit(tcm, w, h, align, area); | 66 | /* found a long enough free area */ |
220 | if (!ret) | 67 | bitmap_set(map, *pos, w); |
221 | /* update map */ | 68 | area_found = true; |
222 | fill_area(tcm, area, area); | 69 | break; |
70 | } | ||
223 | 71 | ||
224 | spin_unlock(&(pvt->lock)); | 72 | search_count = num_bits - bit + w; |
225 | return ret; | 73 | *pos = bit - w; |
74 | } | ||
75 | |||
76 | return (area_found) ? 0 : -ENOMEM; | ||
226 | } | 77 | } |
227 | 78 | ||
228 | /** | 79 | /* |
229 | * Unreserve a previously allocated 2D or 1D area | 80 | * w = width in slots |
230 | * @param area area to be freed | 81 | * h = height in slots |
231 | * @return 0 - success | 82 | * a = align in slots (mask, 2^n-1, 0 is unaligned) |
83 | * offset = offset in bytes from 4KiB | ||
84 | * pos = position in bitmap for buffer | ||
85 | * map = bitmap ptr | ||
86 | * num_bits = size of bitmap | ||
87 | * stride = bits in one row of container | ||
232 | */ | 88 | */ |
233 | static s32 sita_free(struct tcm *tcm, struct tcm_area *area) | 89 | static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset, |
90 | unsigned long *pos, unsigned long slot_bytes, | ||
91 | unsigned long *map, size_t num_bits, size_t slot_stride) | ||
234 | { | 92 | { |
235 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | 93 | int i; |
94 | unsigned long index; | ||
95 | bool area_free; | ||
96 | unsigned long slots_per_band = PAGE_SIZE / slot_bytes; | ||
97 | unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0; | ||
98 | unsigned long curr_bit = bit_offset; | ||
99 | |||
100 | /* reset alignment to 1 if we are matching a specific offset */ | ||
101 | /* adjust alignment - 1 to get to the format expected in bitmaps */ | ||
102 | a = (offset > 0) ? 0 : a - 1; | ||
103 | |||
104 | /* FIXME Return error if slots_per_band > stride */ | ||
105 | |||
106 | while (curr_bit < num_bits) { | ||
107 | *pos = bitmap_find_next_zero_area(map, num_bits, curr_bit, w, | ||
108 | a); | ||
109 | |||
110 | /* skip forward if we are not at right offset */ | ||
111 | if (bit_offset > 0 && (*pos % slots_per_band != bit_offset)) { | ||
112 | curr_bit = ALIGN(*pos, slots_per_band) + bit_offset; | ||
113 | continue; | ||
114 | } | ||
236 | 115 | ||
237 | spin_lock(&(pvt->lock)); | 116 | /* skip forward to next row if we overlap end of row */ |
117 | if ((*pos % slot_stride) + w > slot_stride) { | ||
118 | curr_bit = ALIGN(*pos, slot_stride) + bit_offset; | ||
119 | continue; | ||
120 | } | ||
238 | 121 | ||
239 | /* check that this is in fact an existing area */ | 122 | /* TODO: Handle overlapping 4K boundaries */ |
240 | WARN_ON(pvt->map[area->p0.x][area->p0.y] != area || | ||
241 | pvt->map[area->p1.x][area->p1.y] != area); | ||
242 | 123 | ||
243 | /* Clear the contents of the associated tiles in the map */ | 124 | /* break out of look if we will go past end of container */ |
244 | fill_area(tcm, area, NULL); | 125 | if ((*pos + slot_stride * h) > num_bits) |
126 | break; | ||
245 | 127 | ||
246 | spin_unlock(&(pvt->lock)); | 128 | /* generate mask that represents out matching pattern */ |
129 | bitmap_clear(mask, 0, slot_stride); | ||
130 | bitmap_set(mask, (*pos % BITS_PER_LONG), w); | ||
247 | 131 | ||
248 | return 0; | 132 | /* assume the area is free until we find an overlap */ |
249 | } | 133 | area_free = true; |
250 | |||
251 | /** | ||
252 | * Note: In general the cordinates in the scan field area relevant to the can | ||
253 | * sweep directions. The scan origin (e.g. top-left corner) will always be | ||
254 | * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x | ||
255 | * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y | ||
256 | * <= p0.y | ||
257 | */ | ||
258 | 134 | ||
259 | /** | 135 | /* check subsequent rows to see if complete area is free */ |
260 | * Raster scan horizontally right to left from top to bottom to find a place for | 136 | for (i = 1; i < h; i++) { |
261 | * a 2D area of given size inside a scan field. | 137 | index = *pos / BITS_PER_LONG + i * 8; |
262 | * | 138 | if (bitmap_intersects(&map[index], mask, |
263 | * @param w width of desired area | 139 | (*pos % BITS_PER_LONG) + w)) { |
264 | * @param h height of desired area | 140 | area_free = false; |
265 | * @param align desired area alignment | ||
266 | * @param area pointer to the area that will be set to the best position | ||
267 | * @param field area to scan (inclusive) | ||
268 | * | ||
269 | * @return 0 on success, non-0 error value on failure. | ||
270 | */ | ||
271 | static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
272 | struct tcm_area *field, struct tcm_area *area) | ||
273 | { | ||
274 | s32 x, y; | ||
275 | s16 start_x, end_x, start_y, end_y, found_x = -1; | ||
276 | struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map; | ||
277 | struct score best = {{0}, {0}, {0}, 0}; | ||
278 | |||
279 | start_x = field->p0.x; | ||
280 | end_x = field->p1.x; | ||
281 | start_y = field->p0.y; | ||
282 | end_y = field->p1.y; | ||
283 | |||
284 | /* check scan area co-ordinates */ | ||
285 | if (field->p0.x < field->p1.x || | ||
286 | field->p1.y < field->p0.y) | ||
287 | return -EINVAL; | ||
288 | |||
289 | /* check if allocation would fit in scan area */ | ||
290 | if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y)) | ||
291 | return -ENOSPC; | ||
292 | |||
293 | /* adjust start_x and end_y, as allocation would not fit beyond */ | ||
294 | start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */ | ||
295 | end_y = end_y - h + 1; | ||
296 | |||
297 | /* check if allocation would still fit in scan area */ | ||
298 | if (start_x < end_x) | ||
299 | return -ENOSPC; | ||
300 | |||
301 | /* scan field top-to-bottom, right-to-left */ | ||
302 | for (y = start_y; y <= end_y; y++) { | ||
303 | for (x = start_x; x >= end_x; x -= align) { | ||
304 | if (is_area_free(map, x, y, w, h)) { | ||
305 | found_x = x; | ||
306 | |||
307 | /* update best candidate */ | ||
308 | if (update_candidate(tcm, x, y, w, h, field, | ||
309 | CR_R2L_T2B, &best)) | ||
310 | goto done; | ||
311 | |||
312 | /* change upper x bound */ | ||
313 | end_x = x + 1; | ||
314 | break; | 141 | break; |
315 | } else if (map[x][y] && map[x][y]->is2d) { | ||
316 | /* step over 2D areas */ | ||
317 | x = ALIGN(map[x][y]->p0.x - w + 1, align); | ||
318 | } | 142 | } |
319 | } | 143 | } |
320 | 144 | ||
321 | /* break if you find a free area shouldering the scan field */ | 145 | if (area_free) |
322 | if (found_x == start_x) | ||
323 | break; | 146 | break; |
324 | } | ||
325 | |||
326 | if (!best.a.tcm) | ||
327 | return -ENOSPC; | ||
328 | done: | ||
329 | assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y); | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * Raster scan horizontally left to right from top to bottom to find a place for | ||
335 | * a 2D area of given size inside a scan field. | ||
336 | * | ||
337 | * @param w width of desired area | ||
338 | * @param h height of desired area | ||
339 | * @param align desired area alignment | ||
340 | * @param area pointer to the area that will be set to the best position | ||
341 | * @param field area to scan (inclusive) | ||
342 | * | ||
343 | * @return 0 on success, non-0 error value on failure. | ||
344 | */ | ||
345 | static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
346 | struct tcm_area *field, struct tcm_area *area) | ||
347 | { | ||
348 | s32 x, y; | ||
349 | s16 start_x, end_x, start_y, end_y, found_x = -1; | ||
350 | struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map; | ||
351 | struct score best = {{0}, {0}, {0}, 0}; | ||
352 | |||
353 | start_x = field->p0.x; | ||
354 | end_x = field->p1.x; | ||
355 | start_y = field->p0.y; | ||
356 | end_y = field->p1.y; | ||
357 | |||
358 | /* check scan area co-ordinates */ | ||
359 | if (field->p1.x < field->p0.x || | ||
360 | field->p1.y < field->p0.y) | ||
361 | return -EINVAL; | ||
362 | |||
363 | /* check if allocation would fit in scan area */ | ||
364 | if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y)) | ||
365 | return -ENOSPC; | ||
366 | |||
367 | start_x = ALIGN(start_x, align); | ||
368 | |||
369 | /* check if allocation would still fit in scan area */ | ||
370 | if (w > LEN(end_x, start_x)) | ||
371 | return -ENOSPC; | ||
372 | |||
373 | /* adjust end_x and end_y, as allocation would not fit beyond */ | ||
374 | end_x = end_x - w + 1; /* + 1 to be inclusive */ | ||
375 | end_y = end_y - h + 1; | ||
376 | |||
377 | /* scan field top-to-bottom, left-to-right */ | ||
378 | for (y = start_y; y <= end_y; y++) { | ||
379 | for (x = start_x; x <= end_x; x += align) { | ||
380 | if (is_area_free(map, x, y, w, h)) { | ||
381 | found_x = x; | ||
382 | |||
383 | /* update best candidate */ | ||
384 | if (update_candidate(tcm, x, y, w, h, field, | ||
385 | CR_L2R_T2B, &best)) | ||
386 | goto done; | ||
387 | /* change upper x bound */ | ||
388 | end_x = x - 1; | ||
389 | 147 | ||
390 | break; | 148 | /* go forward past this match */ |
391 | } else if (map[x][y] && map[x][y]->is2d) { | 149 | if (bit_offset > 0) |
392 | /* step over 2D areas */ | 150 | curr_bit = ALIGN(*pos, slots_per_band) + bit_offset; |
393 | x = ALIGN_DOWN(map[x][y]->p1.x, align); | 151 | else |
394 | } | 152 | curr_bit = *pos + a + 1; |
395 | } | 153 | } |
396 | 154 | ||
397 | /* break if you find a free area shouldering the scan field */ | 155 | if (area_free) { |
398 | if (found_x == start_x) | 156 | /* set area as in-use. iterate over rows */ |
399 | break; | 157 | for (i = 0, index = *pos; i < h; i++, index += slot_stride) |
158 | bitmap_set(map, index, w); | ||
400 | } | 159 | } |
401 | 160 | ||
402 | if (!best.a.tcm) | 161 | return (area_free) ? 0 : -ENOMEM; |
403 | return -ENOSPC; | ||
404 | done: | ||
405 | assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y); | ||
406 | return 0; | ||
407 | } | 162 | } |
408 | 163 | ||
409 | /** | 164 | static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots, |
410 | * Raster scan horizontally right to left from bottom to top to find a place | 165 | struct tcm_area *area) |
411 | * for a 1D area of given size inside a scan field. | ||
412 | * | ||
413 | * @param num_slots size of desired area | ||
414 | * @param align desired area alignment | ||
415 | * @param area pointer to the area that will be set to the best | ||
416 | * position | ||
417 | * @param field area to scan (inclusive) | ||
418 | * | ||
419 | * @return 0 on success, non-0 error value on failure. | ||
420 | */ | ||
421 | static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots, | ||
422 | struct tcm_area *field, struct tcm_area *area) | ||
423 | { | 166 | { |
424 | s32 found = 0; | 167 | unsigned long pos; |
425 | s16 x, y; | 168 | int ret; |
426 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | 169 | |
427 | struct tcm_area *p; | 170 | spin_lock(&(tcm->lock)); |
428 | 171 | ret = r2l_b2t_1d(num_slots, &pos, tcm->bitmap, tcm->map_size); | |
429 | /* check scan area co-ordinates */ | 172 | if (!ret) { |
430 | if (field->p0.y < field->p1.y) | 173 | area->p0.x = pos % tcm->width; |
431 | return -EINVAL; | 174 | area->p0.y = pos / tcm->width; |
432 | 175 | area->p1.x = (pos + num_slots - 1) % tcm->width; | |
433 | /** | 176 | area->p1.y = (pos + num_slots - 1) / tcm->width; |
434 | * Currently we only support full width 1D scan field, which makes sense | ||
435 | * since 1D slot-ordering spans the full container width. | ||
436 | */ | ||
437 | if (tcm->width != field->p0.x - field->p1.x + 1) | ||
438 | return -EINVAL; | ||
439 | |||
440 | /* check if allocation would fit in scan area */ | ||
441 | if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y)) | ||
442 | return -ENOSPC; | ||
443 | |||
444 | x = field->p0.x; | ||
445 | y = field->p0.y; | ||
446 | |||
447 | /* find num_slots consecutive free slots to the left */ | ||
448 | while (found < num_slots) { | ||
449 | if (y < 0) | ||
450 | return -ENOSPC; | ||
451 | |||
452 | /* remember bottom-right corner */ | ||
453 | if (found == 0) { | ||
454 | area->p1.x = x; | ||
455 | area->p1.y = y; | ||
456 | } | ||
457 | |||
458 | /* skip busy regions */ | ||
459 | p = pvt->map[x][y]; | ||
460 | if (p) { | ||
461 | /* move to left of 2D areas, top left of 1D */ | ||
462 | x = p->p0.x; | ||
463 | if (!p->is2d) | ||
464 | y = p->p0.y; | ||
465 | |||
466 | /* start over */ | ||
467 | found = 0; | ||
468 | } else { | ||
469 | /* count consecutive free slots */ | ||
470 | found++; | ||
471 | if (found == num_slots) | ||
472 | break; | ||
473 | } | ||
474 | |||
475 | /* move to the left */ | ||
476 | if (x == 0) | ||
477 | y--; | ||
478 | x = (x ? : tcm->width) - 1; | ||
479 | |||
480 | } | 177 | } |
178 | spin_unlock(&(tcm->lock)); | ||
481 | 179 | ||
482 | /* set top-left corner */ | 180 | return ret; |
483 | area->p0.x = x; | ||
484 | area->p0.y = y; | ||
485 | return 0; | ||
486 | } | 181 | } |
487 | 182 | ||
488 | /** | 183 | static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align, |
489 | * Find a place for a 2D area of given size inside a scan field based on its | 184 | int16_t offset, uint16_t slot_bytes, |
490 | * alignment needs. | 185 | struct tcm_area *area) |
491 | * | ||
492 | * @param w width of desired area | ||
493 | * @param h height of desired area | ||
494 | * @param align desired area alignment | ||
495 | * @param area pointer to the area that will be set to the best position | ||
496 | * | ||
497 | * @return 0 on success, non-0 error value on failure. | ||
498 | */ | ||
499 | static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
500 | struct tcm_area *area) | ||
501 | { | 186 | { |
502 | s32 ret = 0; | 187 | unsigned long pos; |
503 | struct tcm_area field = {0}; | 188 | int ret; |
504 | u16 boundary_x, boundary_y; | 189 | |
505 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | 190 | spin_lock(&(tcm->lock)); |
506 | 191 | ret = l2r_t2b(w, h, align, offset, &pos, slot_bytes, tcm->bitmap, | |
507 | if (align > 1) { | 192 | tcm->map_size, tcm->width); |
508 | /* prefer top-left corner */ | 193 | |
509 | boundary_x = pvt->div_pt.x - 1; | 194 | if (!ret) { |
510 | boundary_y = pvt->div_pt.y - 1; | 195 | area->p0.x = pos % tcm->width; |
511 | 196 | area->p0.y = pos / tcm->width; | |
512 | /* expand width and height if needed */ | 197 | area->p1.x = area->p0.x + w - 1; |
513 | if (w > pvt->div_pt.x) | 198 | area->p1.y = area->p0.y + h - 1; |
514 | boundary_x = tcm->width - 1; | ||
515 | if (h > pvt->div_pt.y) | ||
516 | boundary_y = tcm->height - 1; | ||
517 | |||
518 | assign(&field, 0, 0, boundary_x, boundary_y); | ||
519 | ret = scan_l2r_t2b(tcm, w, h, align, &field, area); | ||
520 | |||
521 | /* scan whole container if failed, but do not scan 2x */ | ||
522 | if (ret != 0 && (boundary_x != tcm->width - 1 || | ||
523 | boundary_y != tcm->height - 1)) { | ||
524 | /* scan the entire container if nothing found */ | ||
525 | assign(&field, 0, 0, tcm->width - 1, tcm->height - 1); | ||
526 | ret = scan_l2r_t2b(tcm, w, h, align, &field, area); | ||
527 | } | ||
528 | } else if (align == 1) { | ||
529 | /* prefer top-right corner */ | ||
530 | boundary_x = pvt->div_pt.x; | ||
531 | boundary_y = pvt->div_pt.y - 1; | ||
532 | |||
533 | /* expand width and height if needed */ | ||
534 | if (w > (tcm->width - pvt->div_pt.x)) | ||
535 | boundary_x = 0; | ||
536 | if (h > pvt->div_pt.y) | ||
537 | boundary_y = tcm->height - 1; | ||
538 | |||
539 | assign(&field, tcm->width - 1, 0, boundary_x, boundary_y); | ||
540 | ret = scan_r2l_t2b(tcm, w, h, align, &field, area); | ||
541 | |||
542 | /* scan whole container if failed, but do not scan 2x */ | ||
543 | if (ret != 0 && (boundary_x != 0 || | ||
544 | boundary_y != tcm->height - 1)) { | ||
545 | /* scan the entire container if nothing found */ | ||
546 | assign(&field, tcm->width - 1, 0, 0, tcm->height - 1); | ||
547 | ret = scan_r2l_t2b(tcm, w, h, align, &field, | ||
548 | area); | ||
549 | } | ||
550 | } | 199 | } |
200 | spin_unlock(&(tcm->lock)); | ||
551 | 201 | ||
552 | return ret; | 202 | return ret; |
553 | } | 203 | } |
554 | 204 | ||
555 | /* check if an entire area is free */ | 205 | static void sita_deinit(struct tcm *tcm) |
556 | static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h) | ||
557 | { | 206 | { |
558 | u16 x = 0, y = 0; | 207 | kfree(tcm); |
559 | for (y = y0; y < y0 + h; y++) { | ||
560 | for (x = x0; x < x0 + w; x++) { | ||
561 | if (map[x][y]) | ||
562 | return false; | ||
563 | } | ||
564 | } | ||
565 | return true; | ||
566 | } | 208 | } |
567 | 209 | ||
568 | /* fills an area with a parent tcm_area */ | 210 | static s32 sita_free(struct tcm *tcm, struct tcm_area *area) |
569 | static void fill_area(struct tcm *tcm, struct tcm_area *area, | ||
570 | struct tcm_area *parent) | ||
571 | { | 211 | { |
572 | s32 x, y; | 212 | unsigned long pos; |
573 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | 213 | uint16_t w, h; |
574 | struct tcm_area a, a_; | ||
575 | |||
576 | /* set area's tcm; otherwise, enumerator considers it invalid */ | ||
577 | area->tcm = tcm; | ||
578 | |||
579 | tcm_for_each_slice(a, *area, a_) { | ||
580 | for (x = a.p0.x; x <= a.p1.x; ++x) | ||
581 | for (y = a.p0.y; y <= a.p1.y; ++y) | ||
582 | pvt->map[x][y] = parent; | ||
583 | 214 | ||
215 | pos = area->p0.x + area->p0.y * tcm->width; | ||
216 | if (area->is2d) { | ||
217 | w = area->p1.x - area->p0.x + 1; | ||
218 | h = area->p1.y - area->p0.y + 1; | ||
219 | } else { | ||
220 | w = area->p1.x + area->p1.y * tcm->width - pos + 1; | ||
221 | h = 1; | ||
584 | } | 222 | } |
223 | |||
224 | spin_lock(&(tcm->lock)); | ||
225 | free_slots(pos, w, h, tcm->bitmap, tcm->width); | ||
226 | spin_unlock(&(tcm->lock)); | ||
227 | return 0; | ||
585 | } | 228 | } |
586 | 229 | ||
587 | /** | 230 | struct tcm *sita_init(u16 width, u16 height) |
588 | * Compares a candidate area to the current best area, and if it is a better | ||
589 | * fit, it updates the best to this one. | ||
590 | * | ||
591 | * @param x0, y0, w, h top, left, width, height of candidate area | ||
592 | * @param field scan field | ||
593 | * @param criteria scan criteria | ||
594 | * @param best best candidate and its scores | ||
595 | * | ||
596 | * @return 1 (true) if the candidate area is known to be the final best, so no | ||
597 | * more searching should be performed | ||
598 | */ | ||
599 | static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h, | ||
600 | struct tcm_area *field, s32 criteria, | ||
601 | struct score *best) | ||
602 | { | 231 | { |
603 | struct score me; /* score for area */ | 232 | struct tcm *tcm; |
604 | 233 | size_t map_size = BITS_TO_LONGS(width*height) * sizeof(unsigned long); | |
605 | /* | ||
606 | * NOTE: For horizontal bias we always give the first found, because our | ||
607 | * scan is horizontal-raster-based and the first candidate will always | ||
608 | * have the horizontal bias. | ||
609 | */ | ||
610 | bool first = criteria & CR_BIAS_HORIZONTAL; | ||
611 | |||
612 | assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1); | ||
613 | |||
614 | /* calculate score for current candidate */ | ||
615 | if (!first) { | ||
616 | get_neighbor_stats(tcm, &me.a, &me.n); | ||
617 | me.neighs = me.n.edge + me.n.busy; | ||
618 | get_nearness_factor(field, &me.a, &me.f); | ||
619 | } | ||
620 | |||
621 | /* the 1st candidate is always the best */ | ||
622 | if (!best->a.tcm) | ||
623 | goto better; | ||
624 | 234 | ||
625 | BUG_ON(first); | 235 | if (width == 0 || height == 0) |
236 | return NULL; | ||
626 | 237 | ||
627 | /* diagonal balance check */ | 238 | tcm = kzalloc(sizeof(*tcm) + map_size, GFP_KERNEL); |
628 | if ((criteria & CR_DIAGONAL_BALANCE) && | 239 | if (!tcm) |
629 | best->neighs <= me.neighs && | 240 | goto error; |
630 | (best->neighs < me.neighs || | ||
631 | /* this implies that neighs and occupied match */ | ||
632 | best->n.busy < me.n.busy || | ||
633 | (best->n.busy == me.n.busy && | ||
634 | /* check the nearness factor */ | ||
635 | best->f.x + best->f.y > me.f.x + me.f.y))) | ||
636 | goto better; | ||
637 | 241 | ||
638 | /* not better, keep going */ | 242 | /* Updating the pointers to SiTA implementation APIs */ |
639 | return 0; | 243 | tcm->height = height; |
244 | tcm->width = width; | ||
245 | tcm->reserve_2d = sita_reserve_2d; | ||
246 | tcm->reserve_1d = sita_reserve_1d; | ||
247 | tcm->free = sita_free; | ||
248 | tcm->deinit = sita_deinit; | ||
640 | 249 | ||
641 | better: | 250 | spin_lock_init(&tcm->lock); |
642 | /* save current area as best */ | 251 | tcm->bitmap = (unsigned long *)(tcm + 1); |
643 | memcpy(best, &me, sizeof(me)); | 252 | bitmap_clear(tcm->bitmap, 0, width*height); |
644 | best->a.tcm = tcm; | ||
645 | return first; | ||
646 | } | ||
647 | 253 | ||
648 | /** | 254 | tcm->map_size = width*height; |
649 | * Calculate the nearness factor of an area in a search field. The nearness | ||
650 | * factor is smaller if the area is closer to the search origin. | ||
651 | */ | ||
652 | static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area, | ||
653 | struct nearness_factor *nf) | ||
654 | { | ||
655 | /** | ||
656 | * Using signed math as field coordinates may be reversed if | ||
657 | * search direction is right-to-left or bottom-to-top. | ||
658 | */ | ||
659 | nf->x = (s32)(area->p0.x - field->p0.x) * 1000 / | ||
660 | (field->p1.x - field->p0.x); | ||
661 | nf->y = (s32)(area->p0.y - field->p0.y) * 1000 / | ||
662 | (field->p1.y - field->p0.y); | ||
663 | } | ||
664 | 255 | ||
665 | /* get neighbor statistics */ | 256 | return tcm; |
666 | static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area, | ||
667 | struct neighbor_stats *stat) | ||
668 | { | ||
669 | s16 x = 0, y = 0; | ||
670 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
671 | |||
672 | /* Clearing any exisiting values */ | ||
673 | memset(stat, 0, sizeof(*stat)); | ||
674 | |||
675 | /* process top & bottom edges */ | ||
676 | for (x = area->p0.x; x <= area->p1.x; x++) { | ||
677 | if (area->p0.y == 0) | ||
678 | stat->edge++; | ||
679 | else if (pvt->map[x][area->p0.y - 1]) | ||
680 | stat->busy++; | ||
681 | |||
682 | if (area->p1.y == tcm->height - 1) | ||
683 | stat->edge++; | ||
684 | else if (pvt->map[x][area->p1.y + 1]) | ||
685 | stat->busy++; | ||
686 | } | ||
687 | 257 | ||
688 | /* process left & right edges */ | 258 | error: |
689 | for (y = area->p0.y; y <= area->p1.y; ++y) { | 259 | kfree(tcm); |
690 | if (area->p0.x == 0) | 260 | return NULL; |
691 | stat->edge++; | ||
692 | else if (pvt->map[area->p0.x - 1][y]) | ||
693 | stat->busy++; | ||
694 | |||
695 | if (area->p1.x == tcm->width - 1) | ||
696 | stat->edge++; | ||
697 | else if (pvt->map[area->p1.x + 1][y]) | ||
698 | stat->busy++; | ||
699 | } | ||
700 | } | 261 | } |
diff --git a/drivers/gpu/drm/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h index a8d5ce47686f..ef7df7d6fc84 100644 --- a/drivers/gpu/drm/omapdrm/tcm.h +++ b/drivers/gpu/drm/omapdrm/tcm.h | |||
@@ -61,18 +61,17 @@ struct tcm { | |||
61 | 61 | ||
62 | unsigned int y_offset; /* offset to use for y coordinates */ | 62 | unsigned int y_offset; /* offset to use for y coordinates */ |
63 | 63 | ||
64 | /* 'pvt' structure shall contain any tcm details (attr) along with | 64 | spinlock_t lock; |
65 | linked list of allocated areas and mutex for mutually exclusive access | 65 | unsigned long *bitmap; |
66 | to the list. It may also contain copies of width and height to notice | 66 | size_t map_size; |
67 | any changes to the publicly available width and height fields. */ | ||
68 | void *pvt; | ||
69 | 67 | ||
70 | /* function table */ | 68 | /* function table */ |
71 | s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align, | 69 | s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align, |
70 | int16_t offset, uint16_t slot_bytes, | ||
72 | struct tcm_area *area); | 71 | struct tcm_area *area); |
73 | s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); | 72 | s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); |
74 | s32 (*free) (struct tcm *tcm, struct tcm_area *area); | 73 | s32 (*free)(struct tcm *tcm, struct tcm_area *area); |
75 | void (*deinit) (struct tcm *tcm); | 74 | void (*deinit)(struct tcm *tcm); |
76 | }; | 75 | }; |
77 | 76 | ||
78 | /*============================================================================= | 77 | /*============================================================================= |
@@ -91,7 +90,7 @@ struct tcm { | |||
91 | * | 90 | * |
92 | */ | 91 | */ |
93 | 92 | ||
94 | struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr); | 93 | struct tcm *sita_init(u16 width, u16 height); |
95 | 94 | ||
96 | 95 | ||
97 | /** | 96 | /** |
@@ -120,6 +119,9 @@ static inline void tcm_deinit(struct tcm *tcm) | |||
120 | * all values may be supported by the container manager, | 119 | * all values may be supported by the container manager, |
121 | * but it must support 0 (1), 32 and 64. | 120 | * but it must support 0 (1), 32 and 64. |
122 | * 0 value is equivalent to 1. | 121 | * 0 value is equivalent to 1. |
122 | * @param offset Offset requirement, in bytes. This is the offset | ||
123 | * from a 4KiB aligned virtual address. | ||
124 | * @param slot_bytes Width of slot in bytes | ||
123 | * @param area Pointer to where the reserved area should be stored. | 125 | * @param area Pointer to where the reserved area should be stored. |
124 | * | 126 | * |
125 | * @return 0 on success. Non-0 error code on failure. Also, | 127 | * @return 0 on success. Non-0 error code on failure. Also, |
@@ -129,7 +131,8 @@ static inline void tcm_deinit(struct tcm *tcm) | |||
129 | * allocation. | 131 | * allocation. |
130 | */ | 132 | */ |
131 | static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, | 133 | static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, |
132 | u16 align, struct tcm_area *area) | 134 | u16 align, int16_t offset, uint16_t slot_bytes, |
135 | struct tcm_area *area) | ||
133 | { | 136 | { |
134 | /* perform rudimentary error checking */ | 137 | /* perform rudimentary error checking */ |
135 | s32 res = tcm == NULL ? -ENODEV : | 138 | s32 res = tcm == NULL ? -ENODEV : |
@@ -140,7 +143,8 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, | |||
140 | 143 | ||
141 | if (!res) { | 144 | if (!res) { |
142 | area->is2d = true; | 145 | area->is2d = true; |
143 | res = tcm->reserve_2d(tcm, height, width, align, area); | 146 | res = tcm->reserve_2d(tcm, height, width, align, offset, |
147 | slot_bytes, area); | ||
144 | area->tcm = res ? NULL : tcm; | 148 | area->tcm = res ? NULL : tcm; |
145 | } | 149 | } |
146 | 150 | ||
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 0750c01bb480..38a3bd847e15 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h | |||
@@ -101,9 +101,6 @@ struct drm_omap_gem_info { | |||
101 | 101 | ||
102 | #define DRM_OMAP_GET_PARAM 0x00 | 102 | #define DRM_OMAP_GET_PARAM 0x00 |
103 | #define DRM_OMAP_SET_PARAM 0x01 | 103 | #define DRM_OMAP_SET_PARAM 0x01 |
104 | /* placeholder for plugin-api | ||
105 | #define DRM_OMAP_GET_BASE 0x02 | ||
106 | */ | ||
107 | #define DRM_OMAP_GEM_NEW 0x03 | 104 | #define DRM_OMAP_GEM_NEW 0x03 |
108 | #define DRM_OMAP_GEM_CPU_PREP 0x04 | 105 | #define DRM_OMAP_GEM_CPU_PREP 0x04 |
109 | #define DRM_OMAP_GEM_CPU_FINI 0x05 | 106 | #define DRM_OMAP_GEM_CPU_FINI 0x05 |
@@ -112,9 +109,6 @@ struct drm_omap_gem_info { | |||
112 | 109 | ||
113 | #define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param) | 110 | #define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param) |
114 | #define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param) | 111 | #define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param) |
115 | /* placeholder for plugin-api | ||
116 | #define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base) | ||
117 | */ | ||
118 | #define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new) | 112 | #define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new) |
119 | #define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep) | 113 | #define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep) |
120 | #define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini) | 114 | #define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini) |