diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-25 19:46:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-25 19:46:44 -0500 |
commit | fffddfd6c8e0c10c42c6e2cc54ba880fcc36ebbb (patch) | |
tree | 71bc5e597124dbaf7550f1e089d675718b3ed5c0 /drivers/gpu/drm/omapdrm | |
parent | 69086a78bdc973ec0b722be790b146e84ba8a8c4 (diff) | |
parent | be88298b0a3f771a4802f20c5e66af74bfd1dff1 (diff) |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm merge from Dave Airlie:
"Highlights:
- TI LCD controller KMS driver
- TI OMAP KMS driver merged from staging
- drop gma500 stub driver
- the fbcon locking fixes
- the vgacon dirty like zebra fix.
- open firmware videomode and hdmi common code helpers
- major locking rework for kms object handling - pageflip/cursor
won't block on polling anymore!
- fbcon helper and prime helper cleanups
- i915: all over the map, haswell power well enhancements, valleyview
macro horrors cleaned up, killing lots of legacy GTT code,
- radeon: CS ioctl unification, deprecated UMS support, gpu reset
rework, VM fixes
- nouveau: reworked thermal code, external dp/tmds encoder support
(anx9805), fences sleep instead of polling,
- exynos: all over the driver fixes."
Lovely conflict in radeon/evergreen_cs.c between commit de0babd60d8d
("drm/radeon: enforce use of radeon_get_ib_value when reading user cmd")
and the new changes that modified that evergreen_dma_cs_parse()
function.
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (508 commits)
drm/tilcdc: only build on arm
drm/i915: Revert hdmi HDP pin checks
drm/tegra: Add list of framebuffers to debugfs
drm/tegra: Fix color expansion
drm/tegra: Split DC_CMD_STATE_CONTROL register write
drm/tegra: Implement page-flipping support
drm/tegra: Implement VBLANK support
drm/tegra: Implement .mode_set_base()
drm/tegra: Add plane support
drm/tegra: Remove bogus tegra_framebuffer structure
drm: Add consistency check for page-flipping
drm/radeon: Use generic HDMI infoframe helpers
drm/tegra: Use generic HDMI infoframe helpers
drm: Add EDID helper documentation
drm: Add HDMI infoframe helpers
video: Add generic HDMI infoframe helpers
drm: Add some missing forward declarations
drm: Move mode tables to drm_edid.c
drm: Remove duplicate drm_mode_cea_vic()
gma500: Fix n, m1 and m2 clock limits for sdvo and lvds
...
Diffstat (limited to 'drivers/gpu/drm/omapdrm')
22 files changed, 8236 insertions, 0 deletions
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig new file mode 100644 index 000000000000..09f65dc3d2c8 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/Kconfig | |||
@@ -0,0 +1,25 @@ | |||
1 | |||
2 | config DRM_OMAP | ||
3 | tristate "OMAP DRM" | ||
4 | depends on DRM && !CONFIG_FB_OMAP2 | ||
5 | depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM | ||
6 | depends on OMAP2_DSS | ||
7 | select DRM_KMS_HELPER | ||
8 | select FB_SYS_FILLRECT | ||
9 | select FB_SYS_COPYAREA | ||
10 | select FB_SYS_IMAGEBLIT | ||
11 | select FB_SYS_FOPS | ||
12 | default n | ||
13 | help | ||
14 | DRM display driver for OMAP2/3/4 based boards. | ||
15 | |||
16 | config DRM_OMAP_NUM_CRTCS | ||
17 | int "Number of CRTCs" | ||
18 | range 1 10 | ||
19 | default 1 if ARCH_OMAP2 || ARCH_OMAP3 | ||
20 | default 2 if ARCH_OMAP4 | ||
21 | depends on DRM_OMAP | ||
22 | help | ||
23 | Select the number of video overlays which can be used as framebuffers. | ||
24 | The remaining overlays are reserved for video. | ||
25 | |||
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile new file mode 100644 index 000000000000..d85e058f2845 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/Makefile | |||
@@ -0,0 +1,24 @@ | |||
1 | # | ||
2 | # Makefile for the drm device driver. This driver provides support for the | ||
3 | # Direct Rendering Infrastructure (DRI) | ||
4 | # | ||
5 | |||
6 | ccflags-y := -Iinclude/drm -Werror | ||
7 | omapdrm-y := omap_drv.o \ | ||
8 | omap_irq.o \ | ||
9 | omap_debugfs.o \ | ||
10 | omap_crtc.o \ | ||
11 | omap_plane.o \ | ||
12 | omap_encoder.o \ | ||
13 | omap_connector.o \ | ||
14 | omap_fb.o \ | ||
15 | omap_fbdev.o \ | ||
16 | omap_gem.o \ | ||
17 | omap_gem_dmabuf.o \ | ||
18 | omap_dmm_tiler.o \ | ||
19 | tcm-sita.o | ||
20 | |||
21 | # temporary: | ||
22 | omapdrm-y += omap_gem_helpers.o | ||
23 | |||
24 | obj-$(CONFIG_DRM_OMAP) += omapdrm.o | ||
diff --git a/drivers/gpu/drm/omapdrm/TODO b/drivers/gpu/drm/omapdrm/TODO new file mode 100644 index 000000000000..4d8c18aa5dd7 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/TODO | |||
@@ -0,0 +1,23 @@ | |||
1 | TODO | ||
2 | . Where should we do eviction (detatch_pages())? We aren't necessarily | ||
3 | accessing the pages via a GART, so maybe we need some other threshold | ||
4 | to put a cap on the # of pages that can be pin'd. | ||
5 | . Use mm_shrinker to trigger unpinning pages. | ||
6 | . This is mainly theoretical since most of these devices don't actually | ||
7 | have swap or harddrive. | ||
8 | . GEM/shmem backed pages can have existing mappings (kernel linear map, | ||
9 | etc..), which isn't really ideal. | ||
10 | . Revisit GEM sync object infrastructure.. TTM has some framework for this | ||
11 | already. Possibly this could be refactored out and made more common? | ||
12 | There should be some way to do this with less wheel-reinvention. | ||
13 | . This can be handled by the dma-buf fence/reservation stuff when it | ||
14 | lands | ||
15 | |||
16 | Userspace: | ||
17 | . git://anongit.freedesktop.org/xorg/driver/xf86-video-omap | ||
18 | |||
19 | Currently tested on | ||
20 | . OMAP3530 beagleboard | ||
21 | . OMAP4430 pandaboard | ||
22 | . OMAP4460 pandaboard | ||
23 | . OMAP5432 uEVM | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c new file mode 100644 index 000000000000..c451c41a7a7d --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_connector.c | |||
@@ -0,0 +1,296 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_connector.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | |||
22 | #include "drm_crtc.h" | ||
23 | #include "drm_crtc_helper.h" | ||
24 | |||
25 | /* | ||
26 | * connector funcs | ||
27 | */ | ||
28 | |||
29 | #define to_omap_connector(x) container_of(x, struct omap_connector, base) | ||
30 | |||
31 | struct omap_connector { | ||
32 | struct drm_connector base; | ||
33 | struct omap_dss_device *dssdev; | ||
34 | struct drm_encoder *encoder; | ||
35 | }; | ||
36 | |||
37 | void copy_timings_omap_to_drm(struct drm_display_mode *mode, | ||
38 | struct omap_video_timings *timings) | ||
39 | { | ||
40 | mode->clock = timings->pixel_clock; | ||
41 | |||
42 | mode->hdisplay = timings->x_res; | ||
43 | mode->hsync_start = mode->hdisplay + timings->hfp; | ||
44 | mode->hsync_end = mode->hsync_start + timings->hsw; | ||
45 | mode->htotal = mode->hsync_end + timings->hbp; | ||
46 | |||
47 | mode->vdisplay = timings->y_res; | ||
48 | mode->vsync_start = mode->vdisplay + timings->vfp; | ||
49 | mode->vsync_end = mode->vsync_start + timings->vsw; | ||
50 | mode->vtotal = mode->vsync_end + timings->vbp; | ||
51 | |||
52 | mode->flags = 0; | ||
53 | |||
54 | if (timings->interlace) | ||
55 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
56 | |||
57 | if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH) | ||
58 | mode->flags |= DRM_MODE_FLAG_PHSYNC; | ||
59 | else | ||
60 | mode->flags |= DRM_MODE_FLAG_NHSYNC; | ||
61 | |||
62 | if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH) | ||
63 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | ||
64 | else | ||
65 | mode->flags |= DRM_MODE_FLAG_NVSYNC; | ||
66 | } | ||
67 | |||
68 | void copy_timings_drm_to_omap(struct omap_video_timings *timings, | ||
69 | struct drm_display_mode *mode) | ||
70 | { | ||
71 | timings->pixel_clock = mode->clock; | ||
72 | |||
73 | timings->x_res = mode->hdisplay; | ||
74 | timings->hfp = mode->hsync_start - mode->hdisplay; | ||
75 | timings->hsw = mode->hsync_end - mode->hsync_start; | ||
76 | timings->hbp = mode->htotal - mode->hsync_end; | ||
77 | |||
78 | timings->y_res = mode->vdisplay; | ||
79 | timings->vfp = mode->vsync_start - mode->vdisplay; | ||
80 | timings->vsw = mode->vsync_end - mode->vsync_start; | ||
81 | timings->vbp = mode->vtotal - mode->vsync_end; | ||
82 | |||
83 | timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); | ||
84 | |||
85 | if (mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
86 | timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; | ||
87 | else | ||
88 | timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW; | ||
89 | |||
90 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
91 | timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; | ||
92 | else | ||
93 | timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW; | ||
94 | |||
95 | timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; | ||
96 | timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH; | ||
97 | timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; | ||
98 | } | ||
99 | |||
100 | static enum drm_connector_status omap_connector_detect( | ||
101 | struct drm_connector *connector, bool force) | ||
102 | { | ||
103 | struct omap_connector *omap_connector = to_omap_connector(connector); | ||
104 | struct omap_dss_device *dssdev = omap_connector->dssdev; | ||
105 | struct omap_dss_driver *dssdrv = dssdev->driver; | ||
106 | enum drm_connector_status ret; | ||
107 | |||
108 | if (dssdrv->detect) { | ||
109 | if (dssdrv->detect(dssdev)) | ||
110 | ret = connector_status_connected; | ||
111 | else | ||
112 | ret = connector_status_disconnected; | ||
113 | } else { | ||
114 | ret = connector_status_unknown; | ||
115 | } | ||
116 | |||
117 | VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force); | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static void omap_connector_destroy(struct drm_connector *connector) | ||
123 | { | ||
124 | struct omap_connector *omap_connector = to_omap_connector(connector); | ||
125 | struct omap_dss_device *dssdev = omap_connector->dssdev; | ||
126 | |||
127 | DBG("%s", omap_connector->dssdev->name); | ||
128 | drm_sysfs_connector_remove(connector); | ||
129 | drm_connector_cleanup(connector); | ||
130 | kfree(omap_connector); | ||
131 | |||
132 | omap_dss_put_device(dssdev); | ||
133 | } | ||
134 | |||
135 | #define MAX_EDID 512 | ||
136 | |||
137 | static int omap_connector_get_modes(struct drm_connector *connector) | ||
138 | { | ||
139 | struct omap_connector *omap_connector = to_omap_connector(connector); | ||
140 | struct omap_dss_device *dssdev = omap_connector->dssdev; | ||
141 | struct omap_dss_driver *dssdrv = dssdev->driver; | ||
142 | struct drm_device *dev = connector->dev; | ||
143 | int n = 0; | ||
144 | |||
145 | DBG("%s", omap_connector->dssdev->name); | ||
146 | |||
147 | /* if display exposes EDID, then we parse that in the normal way to | ||
148 | * build table of supported modes.. otherwise (ie. fixed resolution | ||
149 | * LCD panels) we just return a single mode corresponding to the | ||
150 | * currently configured timings: | ||
151 | */ | ||
152 | if (dssdrv->read_edid) { | ||
153 | void *edid = kzalloc(MAX_EDID, GFP_KERNEL); | ||
154 | |||
155 | if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && | ||
156 | drm_edid_is_valid(edid)) { | ||
157 | drm_mode_connector_update_edid_property( | ||
158 | connector, edid); | ||
159 | n = drm_add_edid_modes(connector, edid); | ||
160 | } else { | ||
161 | drm_mode_connector_update_edid_property( | ||
162 | connector, NULL); | ||
163 | } | ||
164 | kfree(edid); | ||
165 | } else { | ||
166 | struct drm_display_mode *mode = drm_mode_create(dev); | ||
167 | struct omap_video_timings timings = {0}; | ||
168 | |||
169 | dssdrv->get_timings(dssdev, &timings); | ||
170 | |||
171 | copy_timings_omap_to_drm(mode, &timings); | ||
172 | |||
173 | mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; | ||
174 | drm_mode_set_name(mode); | ||
175 | drm_mode_probed_add(connector, mode); | ||
176 | |||
177 | n = 1; | ||
178 | } | ||
179 | |||
180 | return n; | ||
181 | } | ||
182 | |||
183 | static int omap_connector_mode_valid(struct drm_connector *connector, | ||
184 | struct drm_display_mode *mode) | ||
185 | { | ||
186 | struct omap_connector *omap_connector = to_omap_connector(connector); | ||
187 | struct omap_dss_device *dssdev = omap_connector->dssdev; | ||
188 | struct omap_dss_driver *dssdrv = dssdev->driver; | ||
189 | struct omap_video_timings timings = {0}; | ||
190 | struct drm_device *dev = connector->dev; | ||
191 | struct drm_display_mode *new_mode; | ||
192 | int ret = MODE_BAD; | ||
193 | |||
194 | copy_timings_drm_to_omap(&timings, mode); | ||
195 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
196 | |||
197 | if (!dssdrv->check_timings(dssdev, &timings)) { | ||
198 | /* check if vrefresh is still valid */ | ||
199 | new_mode = drm_mode_duplicate(dev, mode); | ||
200 | new_mode->clock = timings.pixel_clock; | ||
201 | new_mode->vrefresh = 0; | ||
202 | if (mode->vrefresh == drm_mode_vrefresh(new_mode)) | ||
203 | ret = MODE_OK; | ||
204 | drm_mode_destroy(dev, new_mode); | ||
205 | } | ||
206 | |||
207 | DBG("connector: mode %s: " | ||
208 | "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", | ||
209 | (ret == MODE_OK) ? "valid" : "invalid", | ||
210 | mode->base.id, mode->name, mode->vrefresh, mode->clock, | ||
211 | mode->hdisplay, mode->hsync_start, | ||
212 | mode->hsync_end, mode->htotal, | ||
213 | mode->vdisplay, mode->vsync_start, | ||
214 | mode->vsync_end, mode->vtotal, mode->type, mode->flags); | ||
215 | |||
216 | return ret; | ||
217 | } | ||
218 | |||
219 | struct drm_encoder *omap_connector_attached_encoder( | ||
220 | struct drm_connector *connector) | ||
221 | { | ||
222 | struct omap_connector *omap_connector = to_omap_connector(connector); | ||
223 | return omap_connector->encoder; | ||
224 | } | ||
225 | |||
226 | static const struct drm_connector_funcs omap_connector_funcs = { | ||
227 | .dpms = drm_helper_connector_dpms, | ||
228 | .detect = omap_connector_detect, | ||
229 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
230 | .destroy = omap_connector_destroy, | ||
231 | }; | ||
232 | |||
233 | static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { | ||
234 | .get_modes = omap_connector_get_modes, | ||
235 | .mode_valid = omap_connector_mode_valid, | ||
236 | .best_encoder = omap_connector_attached_encoder, | ||
237 | }; | ||
238 | |||
239 | /* flush an area of the framebuffer (in case of manual update display that | ||
240 | * is not automatically flushed) | ||
241 | */ | ||
242 | void omap_connector_flush(struct drm_connector *connector, | ||
243 | int x, int y, int w, int h) | ||
244 | { | ||
245 | struct omap_connector *omap_connector = to_omap_connector(connector); | ||
246 | |||
247 | /* TODO: enable when supported in dss */ | ||
248 | VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h); | ||
249 | } | ||
250 | |||
251 | /* initialize connector */ | ||
252 | struct drm_connector *omap_connector_init(struct drm_device *dev, | ||
253 | int connector_type, struct omap_dss_device *dssdev, | ||
254 | struct drm_encoder *encoder) | ||
255 | { | ||
256 | struct drm_connector *connector = NULL; | ||
257 | struct omap_connector *omap_connector; | ||
258 | |||
259 | DBG("%s", dssdev->name); | ||
260 | |||
261 | omap_dss_get_device(dssdev); | ||
262 | |||
263 | omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL); | ||
264 | if (!omap_connector) | ||
265 | goto fail; | ||
266 | |||
267 | omap_connector->dssdev = dssdev; | ||
268 | omap_connector->encoder = encoder; | ||
269 | |||
270 | connector = &omap_connector->base; | ||
271 | |||
272 | drm_connector_init(dev, connector, &omap_connector_funcs, | ||
273 | connector_type); | ||
274 | drm_connector_helper_add(connector, &omap_connector_helper_funcs); | ||
275 | |||
276 | #if 0 /* enable when dss2 supports hotplug */ | ||
277 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD) | ||
278 | connector->polled = 0; | ||
279 | else | ||
280 | #endif | ||
281 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | | ||
282 | DRM_CONNECTOR_POLL_DISCONNECT; | ||
283 | |||
284 | connector->interlace_allowed = 1; | ||
285 | connector->doublescan_allowed = 0; | ||
286 | |||
287 | drm_sysfs_connector_add(connector); | ||
288 | |||
289 | return connector; | ||
290 | |||
291 | fail: | ||
292 | if (connector) | ||
293 | omap_connector_destroy(connector); | ||
294 | |||
295 | return NULL; | ||
296 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c new file mode 100644 index 000000000000..bec66a490b8f --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c | |||
@@ -0,0 +1,654 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_crtc.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | |||
22 | #include <drm/drm_mode.h> | ||
23 | #include "drm_crtc.h" | ||
24 | #include "drm_crtc_helper.h" | ||
25 | |||
26 | #define to_omap_crtc(x) container_of(x, struct omap_crtc, base) | ||
27 | |||
28 | struct omap_crtc { | ||
29 | struct drm_crtc base; | ||
30 | struct drm_plane *plane; | ||
31 | |||
32 | const char *name; | ||
33 | int pipe; | ||
34 | enum omap_channel channel; | ||
35 | struct omap_overlay_manager_info info; | ||
36 | |||
37 | /* | ||
38 | * Temporary: eventually this will go away, but it is needed | ||
39 | * for now to keep the output's happy. (They only need | ||
40 | * mgr->id.) Eventually this will be replaced w/ something | ||
41 | * more common-panel-framework-y | ||
42 | */ | ||
43 | struct omap_overlay_manager mgr; | ||
44 | |||
45 | struct omap_video_timings timings; | ||
46 | bool enabled; | ||
47 | bool full_update; | ||
48 | |||
49 | struct omap_drm_apply apply; | ||
50 | |||
51 | struct omap_drm_irq apply_irq; | ||
52 | struct omap_drm_irq error_irq; | ||
53 | |||
54 | /* list of in-progress apply's: */ | ||
55 | struct list_head pending_applies; | ||
56 | |||
57 | /* list of queued apply's: */ | ||
58 | struct list_head queued_applies; | ||
59 | |||
60 | /* for handling queued and in-progress applies: */ | ||
61 | struct work_struct apply_work; | ||
62 | |||
63 | /* if there is a pending flip, these will be non-null: */ | ||
64 | struct drm_pending_vblank_event *event; | ||
65 | struct drm_framebuffer *old_fb; | ||
66 | |||
67 | /* for handling page flips without caring about what | ||
68 | * the callback is called from. Possibly we should just | ||
69 | * make omap_gem always call the cb from the worker so | ||
70 | * we don't have to care about this.. | ||
71 | * | ||
72 | * XXX maybe fold into apply_work?? | ||
73 | */ | ||
74 | struct work_struct page_flip_work; | ||
75 | }; | ||
76 | |||
77 | /* | ||
78 | * Manager-ops, callbacks from output when they need to configure | ||
79 | * the upstream part of the video pipe. | ||
80 | * | ||
81 | * Most of these we can ignore until we add support for command-mode | ||
82 | * panels.. for video-mode the crtc-helpers already do an adequate | ||
83 | * job of sequencing the setup of the video pipe in the proper order | ||
84 | */ | ||
85 | |||
86 | /* we can probably ignore these until we support command-mode panels: */ | ||
87 | static void omap_crtc_start_update(struct omap_overlay_manager *mgr) | ||
88 | { | ||
89 | } | ||
90 | |||
91 | static int omap_crtc_enable(struct omap_overlay_manager *mgr) | ||
92 | { | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static void omap_crtc_disable(struct omap_overlay_manager *mgr) | ||
97 | { | ||
98 | } | ||
99 | |||
100 | static void omap_crtc_set_timings(struct omap_overlay_manager *mgr, | ||
101 | const struct omap_video_timings *timings) | ||
102 | { | ||
103 | struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr); | ||
104 | DBG("%s", omap_crtc->name); | ||
105 | omap_crtc->timings = *timings; | ||
106 | omap_crtc->full_update = true; | ||
107 | } | ||
108 | |||
109 | static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr, | ||
110 | const struct dss_lcd_mgr_config *config) | ||
111 | { | ||
112 | struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr); | ||
113 | DBG("%s", omap_crtc->name); | ||
114 | dispc_mgr_set_lcd_config(omap_crtc->channel, config); | ||
115 | } | ||
116 | |||
117 | static int omap_crtc_register_framedone_handler( | ||
118 | struct omap_overlay_manager *mgr, | ||
119 | void (*handler)(void *), void *data) | ||
120 | { | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static void omap_crtc_unregister_framedone_handler( | ||
125 | struct omap_overlay_manager *mgr, | ||
126 | void (*handler)(void *), void *data) | ||
127 | { | ||
128 | } | ||
129 | |||
130 | static const struct dss_mgr_ops mgr_ops = { | ||
131 | .start_update = omap_crtc_start_update, | ||
132 | .enable = omap_crtc_enable, | ||
133 | .disable = omap_crtc_disable, | ||
134 | .set_timings = omap_crtc_set_timings, | ||
135 | .set_lcd_config = omap_crtc_set_lcd_config, | ||
136 | .register_framedone_handler = omap_crtc_register_framedone_handler, | ||
137 | .unregister_framedone_handler = omap_crtc_unregister_framedone_handler, | ||
138 | }; | ||
139 | |||
140 | /* | ||
141 | * CRTC funcs: | ||
142 | */ | ||
143 | |||
144 | static void omap_crtc_destroy(struct drm_crtc *crtc) | ||
145 | { | ||
146 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
147 | |||
148 | DBG("%s", omap_crtc->name); | ||
149 | |||
150 | WARN_ON(omap_crtc->apply_irq.registered); | ||
151 | omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); | ||
152 | |||
153 | omap_crtc->plane->funcs->destroy(omap_crtc->plane); | ||
154 | drm_crtc_cleanup(crtc); | ||
155 | |||
156 | kfree(omap_crtc); | ||
157 | } | ||
158 | |||
159 | static void omap_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
160 | { | ||
161 | struct omap_drm_private *priv = crtc->dev->dev_private; | ||
162 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
163 | bool enabled = (mode == DRM_MODE_DPMS_ON); | ||
164 | int i; | ||
165 | |||
166 | DBG("%s: %d", omap_crtc->name, mode); | ||
167 | |||
168 | if (enabled != omap_crtc->enabled) { | ||
169 | omap_crtc->enabled = enabled; | ||
170 | omap_crtc->full_update = true; | ||
171 | omap_crtc_apply(crtc, &omap_crtc->apply); | ||
172 | |||
173 | /* also enable our private plane: */ | ||
174 | WARN_ON(omap_plane_dpms(omap_crtc->plane, mode)); | ||
175 | |||
176 | /* and any attached overlay planes: */ | ||
177 | for (i = 0; i < priv->num_planes; i++) { | ||
178 | struct drm_plane *plane = priv->planes[i]; | ||
179 | if (plane->crtc == crtc) | ||
180 | WARN_ON(omap_plane_dpms(plane, mode)); | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | |||
185 | static bool omap_crtc_mode_fixup(struct drm_crtc *crtc, | ||
186 | const struct drm_display_mode *mode, | ||
187 | struct drm_display_mode *adjusted_mode) | ||
188 | { | ||
189 | return true; | ||
190 | } | ||
191 | |||
192 | static int omap_crtc_mode_set(struct drm_crtc *crtc, | ||
193 | struct drm_display_mode *mode, | ||
194 | struct drm_display_mode *adjusted_mode, | ||
195 | int x, int y, | ||
196 | struct drm_framebuffer *old_fb) | ||
197 | { | ||
198 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
199 | |||
200 | mode = adjusted_mode; | ||
201 | |||
202 | DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", | ||
203 | omap_crtc->name, mode->base.id, mode->name, | ||
204 | mode->vrefresh, mode->clock, | ||
205 | mode->hdisplay, mode->hsync_start, | ||
206 | mode->hsync_end, mode->htotal, | ||
207 | mode->vdisplay, mode->vsync_start, | ||
208 | mode->vsync_end, mode->vtotal, | ||
209 | mode->type, mode->flags); | ||
210 | |||
211 | copy_timings_drm_to_omap(&omap_crtc->timings, mode); | ||
212 | omap_crtc->full_update = true; | ||
213 | |||
214 | return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb, | ||
215 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
216 | x << 16, y << 16, | ||
217 | mode->hdisplay << 16, mode->vdisplay << 16, | ||
218 | NULL, NULL); | ||
219 | } | ||
220 | |||
221 | static void omap_crtc_prepare(struct drm_crtc *crtc) | ||
222 | { | ||
223 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
224 | DBG("%s", omap_crtc->name); | ||
225 | omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
226 | } | ||
227 | |||
228 | static void omap_crtc_commit(struct drm_crtc *crtc) | ||
229 | { | ||
230 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
231 | DBG("%s", omap_crtc->name); | ||
232 | omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
233 | } | ||
234 | |||
235 | static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
236 | struct drm_framebuffer *old_fb) | ||
237 | { | ||
238 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
239 | struct drm_plane *plane = omap_crtc->plane; | ||
240 | struct drm_display_mode *mode = &crtc->mode; | ||
241 | |||
242 | return omap_plane_mode_set(plane, crtc, crtc->fb, | ||
243 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
244 | x << 16, y << 16, | ||
245 | mode->hdisplay << 16, mode->vdisplay << 16, | ||
246 | NULL, NULL); | ||
247 | } | ||
248 | |||
249 | static void omap_crtc_load_lut(struct drm_crtc *crtc) | ||
250 | { | ||
251 | } | ||
252 | |||
253 | static void vblank_cb(void *arg) | ||
254 | { | ||
255 | struct drm_crtc *crtc = arg; | ||
256 | struct drm_device *dev = crtc->dev; | ||
257 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
258 | unsigned long flags; | ||
259 | |||
260 | spin_lock_irqsave(&dev->event_lock, flags); | ||
261 | |||
262 | /* wakeup userspace */ | ||
263 | if (omap_crtc->event) | ||
264 | drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event); | ||
265 | |||
266 | omap_crtc->event = NULL; | ||
267 | omap_crtc->old_fb = NULL; | ||
268 | |||
269 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
270 | } | ||
271 | |||
272 | static void page_flip_worker(struct work_struct *work) | ||
273 | { | ||
274 | struct omap_crtc *omap_crtc = | ||
275 | container_of(work, struct omap_crtc, page_flip_work); | ||
276 | struct drm_crtc *crtc = &omap_crtc->base; | ||
277 | struct drm_display_mode *mode = &crtc->mode; | ||
278 | struct drm_gem_object *bo; | ||
279 | |||
280 | mutex_lock(&crtc->mutex); | ||
281 | omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb, | ||
282 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
283 | crtc->x << 16, crtc->y << 16, | ||
284 | mode->hdisplay << 16, mode->vdisplay << 16, | ||
285 | vblank_cb, crtc); | ||
286 | mutex_unlock(&crtc->mutex); | ||
287 | |||
288 | bo = omap_framebuffer_bo(crtc->fb, 0); | ||
289 | drm_gem_object_unreference_unlocked(bo); | ||
290 | } | ||
291 | |||
292 | static void page_flip_cb(void *arg) | ||
293 | { | ||
294 | struct drm_crtc *crtc = arg; | ||
295 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
296 | struct omap_drm_private *priv = crtc->dev->dev_private; | ||
297 | |||
298 | /* avoid assumptions about what ctxt we are called from: */ | ||
299 | queue_work(priv->wq, &omap_crtc->page_flip_work); | ||
300 | } | ||
301 | |||
302 | static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, | ||
303 | struct drm_framebuffer *fb, | ||
304 | struct drm_pending_vblank_event *event) | ||
305 | { | ||
306 | struct drm_device *dev = crtc->dev; | ||
307 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
308 | struct drm_gem_object *bo; | ||
309 | |||
310 | DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1, | ||
311 | fb->base.id, event); | ||
312 | |||
313 | if (omap_crtc->old_fb) { | ||
314 | dev_err(dev->dev, "already a pending flip\n"); | ||
315 | return -EINVAL; | ||
316 | } | ||
317 | |||
318 | omap_crtc->event = event; | ||
319 | crtc->fb = fb; | ||
320 | |||
321 | /* | ||
322 | * Hold a reference temporarily until the crtc is updated | ||
323 | * and takes the reference to the bo. This avoids it | ||
324 | * getting freed from under us: | ||
325 | */ | ||
326 | bo = omap_framebuffer_bo(fb, 0); | ||
327 | drm_gem_object_reference(bo); | ||
328 | |||
329 | omap_gem_op_async(bo, OMAP_GEM_READ, page_flip_cb, crtc); | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | static int omap_crtc_set_property(struct drm_crtc *crtc, | ||
335 | struct drm_property *property, uint64_t val) | ||
336 | { | ||
337 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
338 | struct omap_drm_private *priv = crtc->dev->dev_private; | ||
339 | |||
340 | if (property == priv->rotation_prop) { | ||
341 | crtc->invert_dimensions = | ||
342 | !!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270))); | ||
343 | } | ||
344 | |||
345 | return omap_plane_set_property(omap_crtc->plane, property, val); | ||
346 | } | ||
347 | |||
348 | static const struct drm_crtc_funcs omap_crtc_funcs = { | ||
349 | .set_config = drm_crtc_helper_set_config, | ||
350 | .destroy = omap_crtc_destroy, | ||
351 | .page_flip = omap_crtc_page_flip_locked, | ||
352 | .set_property = omap_crtc_set_property, | ||
353 | }; | ||
354 | |||
355 | static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { | ||
356 | .dpms = omap_crtc_dpms, | ||
357 | .mode_fixup = omap_crtc_mode_fixup, | ||
358 | .mode_set = omap_crtc_mode_set, | ||
359 | .prepare = omap_crtc_prepare, | ||
360 | .commit = omap_crtc_commit, | ||
361 | .mode_set_base = omap_crtc_mode_set_base, | ||
362 | .load_lut = omap_crtc_load_lut, | ||
363 | }; | ||
364 | |||
365 | const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc) | ||
366 | { | ||
367 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
368 | return &omap_crtc->timings; | ||
369 | } | ||
370 | |||
371 | enum omap_channel omap_crtc_channel(struct drm_crtc *crtc) | ||
372 | { | ||
373 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
374 | return omap_crtc->channel; | ||
375 | } | ||
376 | |||
377 | static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | ||
378 | { | ||
379 | struct omap_crtc *omap_crtc = | ||
380 | container_of(irq, struct omap_crtc, error_irq); | ||
381 | struct drm_crtc *crtc = &omap_crtc->base; | ||
382 | DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus); | ||
383 | /* avoid getting in a flood, unregister the irq until next vblank */ | ||
384 | omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); | ||
385 | } | ||
386 | |||
387 | static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | ||
388 | { | ||
389 | struct omap_crtc *omap_crtc = | ||
390 | container_of(irq, struct omap_crtc, apply_irq); | ||
391 | struct drm_crtc *crtc = &omap_crtc->base; | ||
392 | |||
393 | if (!omap_crtc->error_irq.registered) | ||
394 | omap_irq_register(crtc->dev, &omap_crtc->error_irq); | ||
395 | |||
396 | if (!dispc_mgr_go_busy(omap_crtc->channel)) { | ||
397 | struct omap_drm_private *priv = | ||
398 | crtc->dev->dev_private; | ||
399 | DBG("%s: apply done", omap_crtc->name); | ||
400 | omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq); | ||
401 | queue_work(priv->wq, &omap_crtc->apply_work); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static void apply_worker(struct work_struct *work) | ||
406 | { | ||
407 | struct omap_crtc *omap_crtc = | ||
408 | container_of(work, struct omap_crtc, apply_work); | ||
409 | struct drm_crtc *crtc = &omap_crtc->base; | ||
410 | struct drm_device *dev = crtc->dev; | ||
411 | struct omap_drm_apply *apply, *n; | ||
412 | bool need_apply; | ||
413 | |||
414 | /* | ||
415 | * Synchronize everything on mode_config.mutex, to keep | ||
416 | * the callbacks and list modification all serialized | ||
417 | * with respect to modesetting ioctls from userspace. | ||
418 | */ | ||
419 | mutex_lock(&crtc->mutex); | ||
420 | dispc_runtime_get(); | ||
421 | |||
422 | /* | ||
423 | * If we are still pending a previous update, wait.. when the | ||
424 | * pending update completes, we get kicked again. | ||
425 | */ | ||
426 | if (omap_crtc->apply_irq.registered) | ||
427 | goto out; | ||
428 | |||
429 | /* finish up previous apply's: */ | ||
430 | list_for_each_entry_safe(apply, n, | ||
431 | &omap_crtc->pending_applies, pending_node) { | ||
432 | apply->post_apply(apply); | ||
433 | list_del(&apply->pending_node); | ||
434 | } | ||
435 | |||
436 | need_apply = !list_empty(&omap_crtc->queued_applies); | ||
437 | |||
438 | /* then handle the next round of of queued apply's: */ | ||
439 | list_for_each_entry_safe(apply, n, | ||
440 | &omap_crtc->queued_applies, queued_node) { | ||
441 | apply->pre_apply(apply); | ||
442 | list_del(&apply->queued_node); | ||
443 | apply->queued = false; | ||
444 | list_add_tail(&apply->pending_node, | ||
445 | &omap_crtc->pending_applies); | ||
446 | } | ||
447 | |||
448 | if (need_apply) { | ||
449 | enum omap_channel channel = omap_crtc->channel; | ||
450 | |||
451 | DBG("%s: GO", omap_crtc->name); | ||
452 | |||
453 | if (dispc_mgr_is_enabled(channel)) { | ||
454 | omap_irq_register(dev, &omap_crtc->apply_irq); | ||
455 | dispc_mgr_go(channel); | ||
456 | } else { | ||
457 | struct omap_drm_private *priv = dev->dev_private; | ||
458 | queue_work(priv->wq, &omap_crtc->apply_work); | ||
459 | } | ||
460 | } | ||
461 | |||
462 | out: | ||
463 | dispc_runtime_put(); | ||
464 | mutex_unlock(&crtc->mutex); | ||
465 | } | ||
466 | |||
467 | int omap_crtc_apply(struct drm_crtc *crtc, | ||
468 | struct omap_drm_apply *apply) | ||
469 | { | ||
470 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
471 | |||
472 | WARN_ON(!mutex_is_locked(&crtc->mutex)); | ||
473 | |||
474 | /* no need to queue it again if it is already queued: */ | ||
475 | if (apply->queued) | ||
476 | return 0; | ||
477 | |||
478 | apply->queued = true; | ||
479 | list_add_tail(&apply->queued_node, &omap_crtc->queued_applies); | ||
480 | |||
481 | /* | ||
482 | * If there are no currently pending updates, then go ahead and | ||
483 | * kick the worker immediately, otherwise it will run again when | ||
484 | * the current update finishes. | ||
485 | */ | ||
486 | if (list_empty(&omap_crtc->pending_applies)) { | ||
487 | struct omap_drm_private *priv = crtc->dev->dev_private; | ||
488 | queue_work(priv->wq, &omap_crtc->apply_work); | ||
489 | } | ||
490 | |||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | /* called only from apply */ | ||
495 | static void set_enabled(struct drm_crtc *crtc, bool enable) | ||
496 | { | ||
497 | struct drm_device *dev = crtc->dev; | ||
498 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | ||
499 | enum omap_channel channel = omap_crtc->channel; | ||
500 | struct omap_irq_wait *wait = NULL; | ||
501 | |||
502 | if (dispc_mgr_is_enabled(channel) == enable) | ||
503 | return; | ||
504 | |||
505 | /* ignore sync-lost irqs during enable/disable */ | ||
506 | omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); | ||
507 | |||
508 | if (dispc_mgr_get_framedone_irq(channel)) { | ||
509 | if (!enable) { | ||
510 | wait = omap_irq_wait_init(dev, | ||
511 | dispc_mgr_get_framedone_irq(channel), 1); | ||
512 | } | ||
513 | } else { | ||
514 | /* | ||
515 | * When we disable digit output, we need to wait until fields | ||
516 | * are done. Otherwise the DSS is still working, and turning | ||
517 | * off the clocks prevents DSS from going to OFF mode. And when | ||
518 | * enabling, we need to wait for the extra sync losts | ||
519 | */ | ||
520 | wait = omap_irq_wait_init(dev, | ||
521 | dispc_mgr_get_vsync_irq(channel), 2); | ||
522 | } | ||
523 | |||
524 | dispc_mgr_enable(channel, enable); | ||
525 | |||
526 | if (wait) { | ||
527 | int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100)); | ||
528 | if (ret) { | ||
529 | dev_err(dev->dev, "%s: timeout waiting for %s\n", | ||
530 | omap_crtc->name, enable ? "enable" : "disable"); | ||
531 | } | ||
532 | } | ||
533 | |||
534 | omap_irq_register(crtc->dev, &omap_crtc->error_irq); | ||
535 | } | ||
536 | |||
537 | static void omap_crtc_pre_apply(struct omap_drm_apply *apply) | ||
538 | { | ||
539 | struct omap_crtc *omap_crtc = | ||
540 | container_of(apply, struct omap_crtc, apply); | ||
541 | struct drm_crtc *crtc = &omap_crtc->base; | ||
542 | struct drm_encoder *encoder = NULL; | ||
543 | |||
544 | DBG("%s: enabled=%d, full=%d", omap_crtc->name, | ||
545 | omap_crtc->enabled, omap_crtc->full_update); | ||
546 | |||
547 | if (omap_crtc->full_update) { | ||
548 | struct omap_drm_private *priv = crtc->dev->dev_private; | ||
549 | int i; | ||
550 | for (i = 0; i < priv->num_encoders; i++) { | ||
551 | if (priv->encoders[i]->crtc == crtc) { | ||
552 | encoder = priv->encoders[i]; | ||
553 | break; | ||
554 | } | ||
555 | } | ||
556 | } | ||
557 | |||
558 | if (!omap_crtc->enabled) { | ||
559 | set_enabled(&omap_crtc->base, false); | ||
560 | if (encoder) | ||
561 | omap_encoder_set_enabled(encoder, false); | ||
562 | } else { | ||
563 | if (encoder) { | ||
564 | omap_encoder_set_enabled(encoder, false); | ||
565 | omap_encoder_update(encoder, &omap_crtc->mgr, | ||
566 | &omap_crtc->timings); | ||
567 | omap_encoder_set_enabled(encoder, true); | ||
568 | omap_crtc->full_update = false; | ||
569 | } | ||
570 | |||
571 | dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info); | ||
572 | dispc_mgr_set_timings(omap_crtc->channel, | ||
573 | &omap_crtc->timings); | ||
574 | set_enabled(&omap_crtc->base, true); | ||
575 | } | ||
576 | |||
577 | omap_crtc->full_update = false; | ||
578 | } | ||
579 | |||
580 | static void omap_crtc_post_apply(struct omap_drm_apply *apply) | ||
581 | { | ||
582 | /* nothing needed for post-apply */ | ||
583 | } | ||
584 | |||
585 | static const char *channel_names[] = { | ||
586 | [OMAP_DSS_CHANNEL_LCD] = "lcd", | ||
587 | [OMAP_DSS_CHANNEL_DIGIT] = "tv", | ||
588 | [OMAP_DSS_CHANNEL_LCD2] = "lcd2", | ||
589 | }; | ||
590 | |||
591 | /* initialize crtc */ | ||
592 | struct drm_crtc *omap_crtc_init(struct drm_device *dev, | ||
593 | struct drm_plane *plane, enum omap_channel channel, int id) | ||
594 | { | ||
595 | struct drm_crtc *crtc = NULL; | ||
596 | struct omap_crtc *omap_crtc; | ||
597 | struct omap_overlay_manager_info *info; | ||
598 | |||
599 | DBG("%s", channel_names[channel]); | ||
600 | |||
601 | omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL); | ||
602 | if (!omap_crtc) | ||
603 | goto fail; | ||
604 | |||
605 | crtc = &omap_crtc->base; | ||
606 | |||
607 | INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker); | ||
608 | INIT_WORK(&omap_crtc->apply_work, apply_worker); | ||
609 | |||
610 | INIT_LIST_HEAD(&omap_crtc->pending_applies); | ||
611 | INIT_LIST_HEAD(&omap_crtc->queued_applies); | ||
612 | |||
613 | omap_crtc->apply.pre_apply = omap_crtc_pre_apply; | ||
614 | omap_crtc->apply.post_apply = omap_crtc_post_apply; | ||
615 | |||
616 | omap_crtc->apply_irq.irqmask = pipe2vbl(id); | ||
617 | omap_crtc->apply_irq.irq = omap_crtc_apply_irq; | ||
618 | |||
619 | omap_crtc->error_irq.irqmask = | ||
620 | dispc_mgr_get_sync_lost_irq(channel); | ||
621 | omap_crtc->error_irq.irq = omap_crtc_error_irq; | ||
622 | omap_irq_register(dev, &omap_crtc->error_irq); | ||
623 | |||
624 | omap_crtc->channel = channel; | ||
625 | omap_crtc->plane = plane; | ||
626 | omap_crtc->plane->crtc = crtc; | ||
627 | omap_crtc->name = channel_names[channel]; | ||
628 | omap_crtc->pipe = id; | ||
629 | |||
630 | /* temporary: */ | ||
631 | omap_crtc->mgr.id = channel; | ||
632 | |||
633 | dss_install_mgr_ops(&mgr_ops); | ||
634 | |||
635 | /* TODO: fix hard-coded setup.. add properties! */ | ||
636 | info = &omap_crtc->info; | ||
637 | info->default_color = 0x00000000; | ||
638 | info->trans_key = 0x00000000; | ||
639 | info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST; | ||
640 | info->trans_enabled = false; | ||
641 | |||
642 | drm_crtc_init(dev, crtc, &omap_crtc_funcs); | ||
643 | drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); | ||
644 | |||
645 | omap_plane_install_properties(omap_crtc->plane, &crtc->base); | ||
646 | |||
647 | return crtc; | ||
648 | |||
649 | fail: | ||
650 | if (crtc) | ||
651 | omap_crtc_destroy(crtc); | ||
652 | |||
653 | return NULL; | ||
654 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c new file mode 100644 index 000000000000..c27f59da7f29 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_debugfs.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob.clark@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | #include "omap_dmm_tiler.h" | ||
22 | |||
23 | #include "drm_fb_helper.h" | ||
24 | |||
25 | |||
26 | #ifdef CONFIG_DEBUG_FS | ||
27 | |||
28 | static int gem_show(struct seq_file *m, void *arg) | ||
29 | { | ||
30 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
31 | struct drm_device *dev = node->minor->dev; | ||
32 | struct omap_drm_private *priv = dev->dev_private; | ||
33 | int ret; | ||
34 | |||
35 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
36 | if (ret) | ||
37 | return ret; | ||
38 | |||
39 | seq_printf(m, "All Objects:\n"); | ||
40 | omap_gem_describe_objects(&priv->obj_list, m); | ||
41 | |||
42 | mutex_unlock(&dev->struct_mutex); | ||
43 | |||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static int mm_show(struct seq_file *m, void *arg) | ||
48 | { | ||
49 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
50 | struct drm_device *dev = node->minor->dev; | ||
51 | return drm_mm_dump_table(m, dev->mm_private); | ||
52 | } | ||
53 | |||
54 | static int fb_show(struct seq_file *m, void *arg) | ||
55 | { | ||
56 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
57 | struct drm_device *dev = node->minor->dev; | ||
58 | struct omap_drm_private *priv = dev->dev_private; | ||
59 | struct drm_framebuffer *fb; | ||
60 | |||
61 | seq_printf(m, "fbcon "); | ||
62 | omap_framebuffer_describe(priv->fbdev->fb, m); | ||
63 | |||
64 | mutex_lock(&dev->mode_config.fb_lock); | ||
65 | list_for_each_entry(fb, &dev->mode_config.fb_list, head) { | ||
66 | if (fb == priv->fbdev->fb) | ||
67 | continue; | ||
68 | |||
69 | seq_printf(m, "user "); | ||
70 | omap_framebuffer_describe(fb, m); | ||
71 | } | ||
72 | mutex_unlock(&dev->mode_config.fb_lock); | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | /* list of debufs files that are applicable to all devices */ | ||
78 | static struct drm_info_list omap_debugfs_list[] = { | ||
79 | {"gem", gem_show, 0}, | ||
80 | {"mm", mm_show, 0}, | ||
81 | {"fb", fb_show, 0}, | ||
82 | }; | ||
83 | |||
84 | /* list of debugfs files that are specific to devices with dmm/tiler */ | ||
85 | static struct drm_info_list omap_dmm_debugfs_list[] = { | ||
86 | {"tiler_map", tiler_map_show, 0}, | ||
87 | }; | ||
88 | |||
89 | int omap_debugfs_init(struct drm_minor *minor) | ||
90 | { | ||
91 | struct drm_device *dev = minor->dev; | ||
92 | int ret; | ||
93 | |||
94 | ret = drm_debugfs_create_files(omap_debugfs_list, | ||
95 | ARRAY_SIZE(omap_debugfs_list), | ||
96 | minor->debugfs_root, minor); | ||
97 | |||
98 | if (ret) { | ||
99 | dev_err(dev->dev, "could not install omap_debugfs_list\n"); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | if (dmm_is_available()) | ||
104 | ret = drm_debugfs_create_files(omap_dmm_debugfs_list, | ||
105 | ARRAY_SIZE(omap_dmm_debugfs_list), | ||
106 | minor->debugfs_root, minor); | ||
107 | |||
108 | if (ret) { | ||
109 | dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n"); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | void omap_debugfs_cleanup(struct drm_minor *minor) | ||
117 | { | ||
118 | drm_debugfs_remove_files(omap_debugfs_list, | ||
119 | ARRAY_SIZE(omap_debugfs_list), minor); | ||
120 | if (dmm_is_available()) | ||
121 | drm_debugfs_remove_files(omap_dmm_debugfs_list, | ||
122 | ARRAY_SIZE(omap_dmm_debugfs_list), minor); | ||
123 | } | ||
124 | |||
125 | #endif | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h new file mode 100644 index 000000000000..58bcd6ae0255 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ | ||
4 | * Author: Rob Clark <rob@ti.com> | ||
5 | * Andy Gross <andy.gross@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation version 2. | ||
10 | * | ||
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
12 | * kind, whether express or implied; without even the implied warranty | ||
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | #ifndef OMAP_DMM_PRIV_H | ||
17 | #define OMAP_DMM_PRIV_H | ||
18 | |||
19 | #define DMM_REVISION 0x000 | ||
20 | #define DMM_HWINFO 0x004 | ||
21 | #define DMM_LISA_HWINFO 0x008 | ||
22 | #define DMM_DMM_SYSCONFIG 0x010 | ||
23 | #define DMM_LISA_LOCK 0x01C | ||
24 | #define DMM_LISA_MAP__0 0x040 | ||
25 | #define DMM_LISA_MAP__1 0x044 | ||
26 | #define DMM_TILER_HWINFO 0x208 | ||
27 | #define DMM_TILER_OR__0 0x220 | ||
28 | #define DMM_TILER_OR__1 0x224 | ||
29 | #define DMM_PAT_HWINFO 0x408 | ||
30 | #define DMM_PAT_GEOMETRY 0x40C | ||
31 | #define DMM_PAT_CONFIG 0x410 | ||
32 | #define DMM_PAT_VIEW__0 0x420 | ||
33 | #define DMM_PAT_VIEW__1 0x424 | ||
34 | #define DMM_PAT_VIEW_MAP__0 0x440 | ||
35 | #define DMM_PAT_VIEW_MAP_BASE 0x460 | ||
36 | #define DMM_PAT_IRQ_EOI 0x478 | ||
37 | #define DMM_PAT_IRQSTATUS_RAW 0x480 | ||
38 | #define DMM_PAT_IRQSTATUS 0x490 | ||
39 | #define DMM_PAT_IRQENABLE_SET 0x4A0 | ||
40 | #define DMM_PAT_IRQENABLE_CLR 0x4B0 | ||
41 | #define DMM_PAT_STATUS__0 0x4C0 | ||
42 | #define DMM_PAT_STATUS__1 0x4C4 | ||
43 | #define DMM_PAT_STATUS__2 0x4C8 | ||
44 | #define DMM_PAT_STATUS__3 0x4CC | ||
45 | #define DMM_PAT_DESCR__0 0x500 | ||
46 | #define DMM_PAT_DESCR__1 0x510 | ||
47 | #define DMM_PAT_DESCR__2 0x520 | ||
48 | #define DMM_PAT_DESCR__3 0x530 | ||
49 | #define DMM_PEG_HWINFO 0x608 | ||
50 | #define DMM_PEG_PRIO 0x620 | ||
51 | #define DMM_PEG_PRIO_PAT 0x640 | ||
52 | |||
53 | #define DMM_IRQSTAT_DST (1<<0) | ||
54 | #define DMM_IRQSTAT_LST (1<<1) | ||
55 | #define DMM_IRQSTAT_ERR_INV_DSC (1<<2) | ||
56 | #define DMM_IRQSTAT_ERR_INV_DATA (1<<3) | ||
57 | #define DMM_IRQSTAT_ERR_UPD_AREA (1<<4) | ||
58 | #define DMM_IRQSTAT_ERR_UPD_CTRL (1<<5) | ||
59 | #define DMM_IRQSTAT_ERR_UPD_DATA (1<<6) | ||
60 | #define DMM_IRQSTAT_ERR_LUT_MISS (1<<7) | ||
61 | |||
62 | #define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \ | ||
63 | DMM_IRQ_STAT_ERR_INV_DATA | \ | ||
64 | DMM_IRQ_STAT_ERR_UPD_AREA | \ | ||
65 | DMM_IRQ_STAT_ERR_UPD_CTRL | \ | ||
66 | DMM_IRQ_STAT_ERR_UPD_DATA | \ | ||
67 | DMM_IRQ_STAT_ERR_LUT_MISS) | ||
68 | |||
69 | #define DMM_PATSTATUS_READY (1<<0) | ||
70 | #define DMM_PATSTATUS_VALID (1<<1) | ||
71 | #define DMM_PATSTATUS_RUN (1<<2) | ||
72 | #define DMM_PATSTATUS_DONE (1<<3) | ||
73 | #define DMM_PATSTATUS_LINKED (1<<4) | ||
74 | #define DMM_PATSTATUS_BYPASSED (1<<7) | ||
75 | #define DMM_PATSTATUS_ERR_INV_DESCR (1<<10) | ||
76 | #define DMM_PATSTATUS_ERR_INV_DATA (1<<11) | ||
77 | #define DMM_PATSTATUS_ERR_UPD_AREA (1<<12) | ||
78 | #define DMM_PATSTATUS_ERR_UPD_CTRL (1<<13) | ||
79 | #define DMM_PATSTATUS_ERR_UPD_DATA (1<<14) | ||
80 | #define DMM_PATSTATUS_ERR_ACCESS (1<<15) | ||
81 | |||
82 | /* note: don't treat DMM_PATSTATUS_ERR_ACCESS as an error */ | ||
83 | #define DMM_PATSTATUS_ERR (DMM_PATSTATUS_ERR_INV_DESCR | \ | ||
84 | DMM_PATSTATUS_ERR_INV_DATA | \ | ||
85 | DMM_PATSTATUS_ERR_UPD_AREA | \ | ||
86 | DMM_PATSTATUS_ERR_UPD_CTRL | \ | ||
87 | DMM_PATSTATUS_ERR_UPD_DATA) | ||
88 | |||
89 | |||
90 | |||
91 | enum { | ||
92 | PAT_STATUS, | ||
93 | PAT_DESCR | ||
94 | }; | ||
95 | |||
96 | struct pat_ctrl { | ||
97 | u32 start:4; | ||
98 | u32 dir:4; | ||
99 | u32 lut_id:8; | ||
100 | u32 sync:12; | ||
101 | u32 ini:4; | ||
102 | }; | ||
103 | |||
104 | struct pat { | ||
105 | uint32_t next_pa; | ||
106 | struct pat_area area; | ||
107 | struct pat_ctrl ctrl; | ||
108 | uint32_t data_pa; | ||
109 | }; | ||
110 | |||
111 | #define DMM_FIXED_RETRY_COUNT 1000 | ||
112 | |||
113 | /* create refill buffer big enough to refill all slots, plus 3 descriptors.. | ||
114 | * 3 descriptors is probably the worst-case for # of 2d-slices in a 1d area, | ||
115 | * but I guess you don't hit that worst case at the same time as full area | ||
116 | * refill | ||
117 | */ | ||
118 | #define DESCR_SIZE 128 | ||
119 | #define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE)) | ||
120 | |||
121 | /* For OMAP5, a fixed offset is added to all Y coordinates for 1D buffers. | ||
122 | * This is used in programming to address the upper portion of the LUT | ||
123 | */ | ||
124 | #define OMAP5_LUT_OFFSET 128 | ||
125 | |||
126 | struct dmm; | ||
127 | |||
128 | struct dmm_txn { | ||
129 | void *engine_handle; | ||
130 | struct tcm *tcm; | ||
131 | |||
132 | uint8_t *current_va; | ||
133 | dma_addr_t current_pa; | ||
134 | |||
135 | struct pat *last_pat; | ||
136 | }; | ||
137 | |||
138 | struct refill_engine { | ||
139 | int id; | ||
140 | struct dmm *dmm; | ||
141 | struct tcm *tcm; | ||
142 | |||
143 | uint8_t *refill_va; | ||
144 | dma_addr_t refill_pa; | ||
145 | |||
146 | /* only one trans per engine for now */ | ||
147 | struct dmm_txn txn; | ||
148 | |||
149 | bool async; | ||
150 | |||
151 | wait_queue_head_t wait_for_refill; | ||
152 | |||
153 | struct list_head idle_node; | ||
154 | }; | ||
155 | |||
156 | struct dmm { | ||
157 | struct device *dev; | ||
158 | void __iomem *base; | ||
159 | int irq; | ||
160 | |||
161 | struct page *dummy_page; | ||
162 | dma_addr_t dummy_pa; | ||
163 | |||
164 | void *refill_va; | ||
165 | dma_addr_t refill_pa; | ||
166 | |||
167 | /* refill engines */ | ||
168 | wait_queue_head_t engine_queue; | ||
169 | struct list_head idle_head; | ||
170 | struct refill_engine *engines; | ||
171 | int num_engines; | ||
172 | atomic_t engine_counter; | ||
173 | |||
174 | /* container information */ | ||
175 | int container_width; | ||
176 | int container_height; | ||
177 | int lut_width; | ||
178 | int lut_height; | ||
179 | int num_lut; | ||
180 | |||
181 | /* array of LUT - TCM containers */ | ||
182 | struct tcm **tcm; | ||
183 | |||
184 | /* allocation list and lock */ | ||
185 | struct list_head alloc_head; | ||
186 | }; | ||
187 | |||
188 | #endif | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c new file mode 100644 index 000000000000..9b794c933c81 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
@@ -0,0 +1,986 @@ | |||
1 | /* | ||
2 | * DMM IOMMU driver support functions for TI OMAP processors. | ||
3 | * | ||
4 | * Author: Rob Clark <rob@ti.com> | ||
5 | * Andy Gross <andy.gross@ti.com> | ||
6 | * | ||
7 | * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation version 2. | ||
12 | * | ||
13 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
14 | * kind, whether express or implied; without even the implied warranty | ||
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | */ | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/platform_device.h> /* platform_device() */ | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/vmalloc.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/time.h> | ||
31 | #include <linux/list.h> | ||
32 | |||
33 | #include "omap_dmm_tiler.h" | ||
34 | #include "omap_dmm_priv.h" | ||
35 | |||
36 | #define DMM_DRIVER_NAME "dmm" | ||
37 | |||
38 | /* mappings for associating views to luts */ | ||
39 | static struct tcm *containers[TILFMT_NFORMATS]; | ||
40 | static struct dmm *omap_dmm; | ||
41 | |||
42 | /* global spinlock for protecting lists */ | ||
43 | static DEFINE_SPINLOCK(list_lock); | ||
44 | |||
45 | /* Geometry table */ | ||
46 | #define GEOM(xshift, yshift, bytes_per_pixel) { \ | ||
47 | .x_shft = (xshift), \ | ||
48 | .y_shft = (yshift), \ | ||
49 | .cpp = (bytes_per_pixel), \ | ||
50 | .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \ | ||
51 | .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \ | ||
52 | } | ||
53 | |||
54 | static const struct { | ||
55 | uint32_t x_shft; /* unused X-bits (as part of bpp) */ | ||
56 | uint32_t y_shft; /* unused Y-bits (as part of bpp) */ | ||
57 | uint32_t cpp; /* bytes/chars per pixel */ | ||
58 | uint32_t slot_w; /* width of each slot (in pixels) */ | ||
59 | uint32_t slot_h; /* height of each slot (in pixels) */ | ||
60 | } geom[TILFMT_NFORMATS] = { | ||
61 | [TILFMT_8BIT] = GEOM(0, 0, 1), | ||
62 | [TILFMT_16BIT] = GEOM(0, 1, 2), | ||
63 | [TILFMT_32BIT] = GEOM(1, 1, 4), | ||
64 | [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1), | ||
65 | }; | ||
66 | |||
67 | |||
68 | /* lookup table for registers w/ per-engine instances */ | ||
69 | static const uint32_t reg[][4] = { | ||
70 | [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1, | ||
71 | DMM_PAT_STATUS__2, DMM_PAT_STATUS__3}, | ||
72 | [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1, | ||
73 | DMM_PAT_DESCR__2, DMM_PAT_DESCR__3}, | ||
74 | }; | ||
75 | |||
76 | /* simple allocator to grab next 16 byte aligned memory from txn */ | ||
77 | static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa) | ||
78 | { | ||
79 | void *ptr; | ||
80 | struct refill_engine *engine = txn->engine_handle; | ||
81 | |||
82 | /* dmm programming requires 16 byte aligned addresses */ | ||
83 | txn->current_pa = round_up(txn->current_pa, 16); | ||
84 | txn->current_va = (void *)round_up((long)txn->current_va, 16); | ||
85 | |||
86 | ptr = txn->current_va; | ||
87 | *pa = txn->current_pa; | ||
88 | |||
89 | txn->current_pa += sz; | ||
90 | txn->current_va += sz; | ||
91 | |||
92 | BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE); | ||
93 | |||
94 | return ptr; | ||
95 | } | ||
96 | |||
97 | /* check status and spin until wait_mask comes true */ | ||
98 | static int wait_status(struct refill_engine *engine, uint32_t wait_mask) | ||
99 | { | ||
100 | struct dmm *dmm = engine->dmm; | ||
101 | uint32_t r = 0, err, i; | ||
102 | |||
103 | i = DMM_FIXED_RETRY_COUNT; | ||
104 | while (true) { | ||
105 | r = readl(dmm->base + reg[PAT_STATUS][engine->id]); | ||
106 | err = r & DMM_PATSTATUS_ERR; | ||
107 | if (err) | ||
108 | return -EFAULT; | ||
109 | |||
110 | if ((r & wait_mask) == wait_mask) | ||
111 | break; | ||
112 | |||
113 | if (--i == 0) | ||
114 | return -ETIMEDOUT; | ||
115 | |||
116 | udelay(1); | ||
117 | } | ||
118 | |||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static void release_engine(struct refill_engine *engine) | ||
123 | { | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&list_lock, flags); | ||
127 | list_add(&engine->idle_node, &omap_dmm->idle_head); | ||
128 | spin_unlock_irqrestore(&list_lock, flags); | ||
129 | |||
130 | atomic_inc(&omap_dmm->engine_counter); | ||
131 | wake_up_interruptible(&omap_dmm->engine_queue); | ||
132 | } | ||
133 | |||
134 | static irqreturn_t omap_dmm_irq_handler(int irq, void *arg) | ||
135 | { | ||
136 | struct dmm *dmm = arg; | ||
137 | uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS); | ||
138 | int i; | ||
139 | |||
140 | /* ack IRQ */ | ||
141 | writel(status, dmm->base + DMM_PAT_IRQSTATUS); | ||
142 | |||
143 | for (i = 0; i < dmm->num_engines; i++) { | ||
144 | if (status & DMM_IRQSTAT_LST) { | ||
145 | wake_up_interruptible(&dmm->engines[i].wait_for_refill); | ||
146 | |||
147 | if (dmm->engines[i].async) | ||
148 | release_engine(&dmm->engines[i]); | ||
149 | } | ||
150 | |||
151 | status >>= 8; | ||
152 | } | ||
153 | |||
154 | return IRQ_HANDLED; | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * Get a handle for a DMM transaction | ||
159 | */ | ||
160 | static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) | ||
161 | { | ||
162 | struct dmm_txn *txn = NULL; | ||
163 | struct refill_engine *engine = NULL; | ||
164 | int ret; | ||
165 | unsigned long flags; | ||
166 | |||
167 | |||
168 | /* wait until an engine is available */ | ||
169 | ret = wait_event_interruptible(omap_dmm->engine_queue, | ||
170 | atomic_add_unless(&omap_dmm->engine_counter, -1, 0)); | ||
171 | if (ret) | ||
172 | return ERR_PTR(ret); | ||
173 | |||
174 | /* grab an idle engine */ | ||
175 | spin_lock_irqsave(&list_lock, flags); | ||
176 | if (!list_empty(&dmm->idle_head)) { | ||
177 | engine = list_entry(dmm->idle_head.next, struct refill_engine, | ||
178 | idle_node); | ||
179 | list_del(&engine->idle_node); | ||
180 | } | ||
181 | spin_unlock_irqrestore(&list_lock, flags); | ||
182 | |||
183 | BUG_ON(!engine); | ||
184 | |||
185 | txn = &engine->txn; | ||
186 | engine->tcm = tcm; | ||
187 | txn->engine_handle = engine; | ||
188 | txn->last_pat = NULL; | ||
189 | txn->current_va = engine->refill_va; | ||
190 | txn->current_pa = engine->refill_pa; | ||
191 | |||
192 | return txn; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * Add region to DMM transaction. If pages or pages[i] is NULL, then the | ||
197 | * corresponding slot is cleared (ie. dummy_pa is programmed) | ||
198 | */ | ||
199 | static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, | ||
200 | struct page **pages, uint32_t npages, uint32_t roll) | ||
201 | { | ||
202 | dma_addr_t pat_pa = 0; | ||
203 | uint32_t *data; | ||
204 | struct pat *pat; | ||
205 | struct refill_engine *engine = txn->engine_handle; | ||
206 | int columns = (1 + area->x1 - area->x0); | ||
207 | int rows = (1 + area->y1 - area->y0); | ||
208 | int i = columns*rows; | ||
209 | |||
210 | pat = alloc_dma(txn, sizeof(struct pat), &pat_pa); | ||
211 | |||
212 | if (txn->last_pat) | ||
213 | txn->last_pat->next_pa = (uint32_t)pat_pa; | ||
214 | |||
215 | pat->area = *area; | ||
216 | |||
217 | /* adjust Y coordinates based off of container parameters */ | ||
218 | pat->area.y0 += engine->tcm->y_offset; | ||
219 | pat->area.y1 += engine->tcm->y_offset; | ||
220 | |||
221 | pat->ctrl = (struct pat_ctrl){ | ||
222 | .start = 1, | ||
223 | .lut_id = engine->tcm->lut_id, | ||
224 | }; | ||
225 | |||
226 | data = alloc_dma(txn, 4*i, &pat->data_pa); | ||
227 | |||
228 | while (i--) { | ||
229 | int n = i + roll; | ||
230 | if (n >= npages) | ||
231 | n -= npages; | ||
232 | data[i] = (pages && pages[n]) ? | ||
233 | page_to_phys(pages[n]) : engine->dmm->dummy_pa; | ||
234 | } | ||
235 | |||
236 | txn->last_pat = pat; | ||
237 | |||
238 | return; | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * Commit the DMM transaction. | ||
243 | */ | ||
244 | static int dmm_txn_commit(struct dmm_txn *txn, bool wait) | ||
245 | { | ||
246 | int ret = 0; | ||
247 | struct refill_engine *engine = txn->engine_handle; | ||
248 | struct dmm *dmm = engine->dmm; | ||
249 | |||
250 | if (!txn->last_pat) { | ||
251 | dev_err(engine->dmm->dev, "need at least one txn\n"); | ||
252 | ret = -EINVAL; | ||
253 | goto cleanup; | ||
254 | } | ||
255 | |||
256 | txn->last_pat->next_pa = 0; | ||
257 | |||
258 | /* write to PAT_DESCR to clear out any pending transaction */ | ||
259 | writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]); | ||
260 | |||
261 | /* wait for engine ready: */ | ||
262 | ret = wait_status(engine, DMM_PATSTATUS_READY); | ||
263 | if (ret) { | ||
264 | ret = -EFAULT; | ||
265 | goto cleanup; | ||
266 | } | ||
267 | |||
268 | /* mark whether it is async to denote list management in IRQ handler */ | ||
269 | engine->async = wait ? false : true; | ||
270 | |||
271 | /* kick reload */ | ||
272 | writel(engine->refill_pa, | ||
273 | dmm->base + reg[PAT_DESCR][engine->id]); | ||
274 | |||
275 | if (wait) { | ||
276 | if (wait_event_interruptible_timeout(engine->wait_for_refill, | ||
277 | wait_status(engine, DMM_PATSTATUS_READY) == 0, | ||
278 | msecs_to_jiffies(1)) <= 0) { | ||
279 | dev_err(dmm->dev, "timed out waiting for done\n"); | ||
280 | ret = -ETIMEDOUT; | ||
281 | } | ||
282 | } | ||
283 | |||
284 | cleanup: | ||
285 | /* only place engine back on list if we are done with it */ | ||
286 | if (ret || wait) | ||
287 | release_engine(engine); | ||
288 | |||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * DMM programming | ||
294 | */ | ||
295 | static int fill(struct tcm_area *area, struct page **pages, | ||
296 | uint32_t npages, uint32_t roll, bool wait) | ||
297 | { | ||
298 | int ret = 0; | ||
299 | struct tcm_area slice, area_s; | ||
300 | struct dmm_txn *txn; | ||
301 | |||
302 | txn = dmm_txn_init(omap_dmm, area->tcm); | ||
303 | if (IS_ERR_OR_NULL(txn)) | ||
304 | return -ENOMEM; | ||
305 | |||
306 | tcm_for_each_slice(slice, *area, area_s) { | ||
307 | struct pat_area p_area = { | ||
308 | .x0 = slice.p0.x, .y0 = slice.p0.y, | ||
309 | .x1 = slice.p1.x, .y1 = slice.p1.y, | ||
310 | }; | ||
311 | |||
312 | dmm_txn_append(txn, &p_area, pages, npages, roll); | ||
313 | |||
314 | roll += tcm_sizeof(slice); | ||
315 | } | ||
316 | |||
317 | ret = dmm_txn_commit(txn, wait); | ||
318 | |||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * Pin/unpin | ||
324 | */ | ||
325 | |||
326 | /* note: slots for which pages[i] == NULL are filled w/ dummy page | ||
327 | */ | ||
328 | int tiler_pin(struct tiler_block *block, struct page **pages, | ||
329 | uint32_t npages, uint32_t roll, bool wait) | ||
330 | { | ||
331 | int ret; | ||
332 | |||
333 | ret = fill(&block->area, pages, npages, roll, wait); | ||
334 | |||
335 | if (ret) | ||
336 | tiler_unpin(block); | ||
337 | |||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | int tiler_unpin(struct tiler_block *block) | ||
342 | { | ||
343 | return fill(&block->area, NULL, 0, 0, false); | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Reserve/release | ||
348 | */ | ||
349 | struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, | ||
350 | uint16_t h, uint16_t align) | ||
351 | { | ||
352 | struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); | ||
353 | u32 min_align = 128; | ||
354 | int ret; | ||
355 | unsigned long flags; | ||
356 | |||
357 | BUG_ON(!validfmt(fmt)); | ||
358 | |||
359 | /* convert width/height to slots */ | ||
360 | w = DIV_ROUND_UP(w, geom[fmt].slot_w); | ||
361 | h = DIV_ROUND_UP(h, geom[fmt].slot_h); | ||
362 | |||
363 | /* convert alignment to slots */ | ||
364 | min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp)); | ||
365 | align = ALIGN(align, min_align); | ||
366 | align /= geom[fmt].slot_w * geom[fmt].cpp; | ||
367 | |||
368 | block->fmt = fmt; | ||
369 | |||
370 | ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area); | ||
371 | if (ret) { | ||
372 | kfree(block); | ||
373 | return ERR_PTR(-ENOMEM); | ||
374 | } | ||
375 | |||
376 | /* add to allocation list */ | ||
377 | spin_lock_irqsave(&list_lock, flags); | ||
378 | list_add(&block->alloc_node, &omap_dmm->alloc_head); | ||
379 | spin_unlock_irqrestore(&list_lock, flags); | ||
380 | |||
381 | return block; | ||
382 | } | ||
383 | |||
384 | struct tiler_block *tiler_reserve_1d(size_t size) | ||
385 | { | ||
386 | struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); | ||
387 | int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
388 | unsigned long flags; | ||
389 | |||
390 | if (!block) | ||
391 | return ERR_PTR(-ENOMEM); | ||
392 | |||
393 | block->fmt = TILFMT_PAGE; | ||
394 | |||
395 | if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages, | ||
396 | &block->area)) { | ||
397 | kfree(block); | ||
398 | return ERR_PTR(-ENOMEM); | ||
399 | } | ||
400 | |||
401 | spin_lock_irqsave(&list_lock, flags); | ||
402 | list_add(&block->alloc_node, &omap_dmm->alloc_head); | ||
403 | spin_unlock_irqrestore(&list_lock, flags); | ||
404 | |||
405 | return block; | ||
406 | } | ||
407 | |||
408 | /* note: if you have pin'd pages, you should have already unpin'd first! */ | ||
409 | int tiler_release(struct tiler_block *block) | ||
410 | { | ||
411 | int ret = tcm_free(&block->area); | ||
412 | unsigned long flags; | ||
413 | |||
414 | if (block->area.tcm) | ||
415 | dev_err(omap_dmm->dev, "failed to release block\n"); | ||
416 | |||
417 | spin_lock_irqsave(&list_lock, flags); | ||
418 | list_del(&block->alloc_node); | ||
419 | spin_unlock_irqrestore(&list_lock, flags); | ||
420 | |||
421 | kfree(block); | ||
422 | return ret; | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Utils | ||
427 | */ | ||
428 | |||
429 | /* calculate the tiler space address of a pixel in a view orientation... | ||
430 | * below description copied from the display subsystem section of TRM: | ||
431 | * | ||
432 | * When the TILER is addressed, the bits: | ||
433 | * [28:27] = 0x0 for 8-bit tiled | ||
434 | * 0x1 for 16-bit tiled | ||
435 | * 0x2 for 32-bit tiled | ||
436 | * 0x3 for page mode | ||
437 | * [31:29] = 0x0 for 0-degree view | ||
438 | * 0x1 for 180-degree view + mirroring | ||
439 | * 0x2 for 0-degree view + mirroring | ||
440 | * 0x3 for 180-degree view | ||
441 | * 0x4 for 270-degree view + mirroring | ||
442 | * 0x5 for 270-degree view | ||
443 | * 0x6 for 90-degree view | ||
444 | * 0x7 for 90-degree view + mirroring | ||
445 | * Otherwise the bits indicated the corresponding bit address to access | ||
446 | * the SDRAM. | ||
447 | */ | ||
448 | static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y) | ||
449 | { | ||
450 | u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment; | ||
451 | |||
452 | x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft; | ||
453 | y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft; | ||
454 | alignment = geom[fmt].x_shft + geom[fmt].y_shft; | ||
455 | |||
456 | /* validate coordinate */ | ||
457 | x_mask = MASK(x_bits); | ||
458 | y_mask = MASK(y_bits); | ||
459 | |||
460 | if (x < 0 || x > x_mask || y < 0 || y > y_mask) { | ||
461 | DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u", | ||
462 | x, x, x_mask, y, y, y_mask); | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | /* account for mirroring */ | ||
467 | if (orient & MASK_X_INVERT) | ||
468 | x ^= x_mask; | ||
469 | if (orient & MASK_Y_INVERT) | ||
470 | y ^= y_mask; | ||
471 | |||
472 | /* get coordinate address */ | ||
473 | if (orient & MASK_XY_FLIP) | ||
474 | tmp = ((x << y_bits) + y); | ||
475 | else | ||
476 | tmp = ((y << x_bits) + x); | ||
477 | |||
478 | return TIL_ADDR((tmp << alignment), orient, fmt); | ||
479 | } | ||
480 | |||
481 | dma_addr_t tiler_ssptr(struct tiler_block *block) | ||
482 | { | ||
483 | BUG_ON(!validfmt(block->fmt)); | ||
484 | |||
485 | return TILVIEW_8BIT + tiler_get_address(block->fmt, 0, | ||
486 | block->area.p0.x * geom[block->fmt].slot_w, | ||
487 | block->area.p0.y * geom[block->fmt].slot_h); | ||
488 | } | ||
489 | |||
490 | dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient, | ||
491 | uint32_t x, uint32_t y) | ||
492 | { | ||
493 | struct tcm_pt *p = &block->area.p0; | ||
494 | BUG_ON(!validfmt(block->fmt)); | ||
495 | |||
496 | return tiler_get_address(block->fmt, orient, | ||
497 | (p->x * geom[block->fmt].slot_w) + x, | ||
498 | (p->y * geom[block->fmt].slot_h) + y); | ||
499 | } | ||
500 | |||
501 | void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h) | ||
502 | { | ||
503 | BUG_ON(!validfmt(fmt)); | ||
504 | *w = round_up(*w, geom[fmt].slot_w); | ||
505 | *h = round_up(*h, geom[fmt].slot_h); | ||
506 | } | ||
507 | |||
508 | uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient) | ||
509 | { | ||
510 | BUG_ON(!validfmt(fmt)); | ||
511 | |||
512 | if (orient & MASK_XY_FLIP) | ||
513 | return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft); | ||
514 | else | ||
515 | return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft); | ||
516 | } | ||
517 | |||
518 | size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h) | ||
519 | { | ||
520 | tiler_align(fmt, &w, &h); | ||
521 | return geom[fmt].cpp * w * h; | ||
522 | } | ||
523 | |||
524 | size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h) | ||
525 | { | ||
526 | BUG_ON(!validfmt(fmt)); | ||
527 | return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h; | ||
528 | } | ||
529 | |||
530 | bool dmm_is_available(void) | ||
531 | { | ||
532 | return omap_dmm ? true : false; | ||
533 | } | ||
534 | |||
535 | static int omap_dmm_remove(struct platform_device *dev) | ||
536 | { | ||
537 | struct tiler_block *block, *_block; | ||
538 | int i; | ||
539 | unsigned long flags; | ||
540 | |||
541 | if (omap_dmm) { | ||
542 | /* free all area regions */ | ||
543 | spin_lock_irqsave(&list_lock, flags); | ||
544 | list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head, | ||
545 | alloc_node) { | ||
546 | list_del(&block->alloc_node); | ||
547 | kfree(block); | ||
548 | } | ||
549 | spin_unlock_irqrestore(&list_lock, flags); | ||
550 | |||
551 | for (i = 0; i < omap_dmm->num_lut; i++) | ||
552 | if (omap_dmm->tcm && omap_dmm->tcm[i]) | ||
553 | omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]); | ||
554 | kfree(omap_dmm->tcm); | ||
555 | |||
556 | kfree(omap_dmm->engines); | ||
557 | if (omap_dmm->refill_va) | ||
558 | dma_free_writecombine(omap_dmm->dev, | ||
559 | REFILL_BUFFER_SIZE * omap_dmm->num_engines, | ||
560 | omap_dmm->refill_va, | ||
561 | omap_dmm->refill_pa); | ||
562 | if (omap_dmm->dummy_page) | ||
563 | __free_page(omap_dmm->dummy_page); | ||
564 | |||
565 | if (omap_dmm->irq > 0) | ||
566 | free_irq(omap_dmm->irq, omap_dmm); | ||
567 | |||
568 | iounmap(omap_dmm->base); | ||
569 | kfree(omap_dmm); | ||
570 | omap_dmm = NULL; | ||
571 | } | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static int omap_dmm_probe(struct platform_device *dev) | ||
577 | { | ||
578 | int ret = -EFAULT, i; | ||
579 | struct tcm_area area = {0}; | ||
580 | u32 hwinfo, pat_geom; | ||
581 | struct resource *mem; | ||
582 | |||
583 | omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL); | ||
584 | if (!omap_dmm) | ||
585 | goto fail; | ||
586 | |||
587 | /* initialize lists */ | ||
588 | INIT_LIST_HEAD(&omap_dmm->alloc_head); | ||
589 | INIT_LIST_HEAD(&omap_dmm->idle_head); | ||
590 | |||
591 | init_waitqueue_head(&omap_dmm->engine_queue); | ||
592 | |||
593 | /* lookup hwmod data - base address and irq */ | ||
594 | mem = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
595 | if (!mem) { | ||
596 | dev_err(&dev->dev, "failed to get base address resource\n"); | ||
597 | goto fail; | ||
598 | } | ||
599 | |||
600 | omap_dmm->base = ioremap(mem->start, SZ_2K); | ||
601 | |||
602 | if (!omap_dmm->base) { | ||
603 | dev_err(&dev->dev, "failed to get dmm base address\n"); | ||
604 | goto fail; | ||
605 | } | ||
606 | |||
607 | omap_dmm->irq = platform_get_irq(dev, 0); | ||
608 | if (omap_dmm->irq < 0) { | ||
609 | dev_err(&dev->dev, "failed to get IRQ resource\n"); | ||
610 | goto fail; | ||
611 | } | ||
612 | |||
613 | omap_dmm->dev = &dev->dev; | ||
614 | |||
615 | hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO); | ||
616 | omap_dmm->num_engines = (hwinfo >> 24) & 0x1F; | ||
617 | omap_dmm->num_lut = (hwinfo >> 16) & 0x1F; | ||
618 | omap_dmm->container_width = 256; | ||
619 | omap_dmm->container_height = 128; | ||
620 | |||
621 | atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines); | ||
622 | |||
623 | /* read out actual LUT width and height */ | ||
624 | pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY); | ||
625 | omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5; | ||
626 | omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5; | ||
627 | |||
628 | /* increment LUT by one if on OMAP5 */ | ||
629 | /* LUT has twice the height, and is split into a separate container */ | ||
630 | if (omap_dmm->lut_height != omap_dmm->container_height) | ||
631 | omap_dmm->num_lut++; | ||
632 | |||
633 | /* initialize DMM registers */ | ||
634 | writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0); | ||
635 | writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1); | ||
636 | writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0); | ||
637 | writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE); | ||
638 | writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0); | ||
639 | writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1); | ||
640 | |||
641 | ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED, | ||
642 | "omap_dmm_irq_handler", omap_dmm); | ||
643 | |||
644 | if (ret) { | ||
645 | dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n", | ||
646 | omap_dmm->irq, ret); | ||
647 | omap_dmm->irq = -1; | ||
648 | goto fail; | ||
649 | } | ||
650 | |||
651 | /* Enable all interrupts for each refill engine except | ||
652 | * ERR_LUT_MISS<n> (which is just advisory, and we don't care | ||
653 | * about because we want to be able to refill live scanout | ||
654 | * buffers for accelerated pan/scroll) and FILL_DSC<n> which | ||
655 | * we just generally don't care about. | ||
656 | */ | ||
657 | writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET); | ||
658 | |||
659 | omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32); | ||
660 | if (!omap_dmm->dummy_page) { | ||
661 | dev_err(&dev->dev, "could not allocate dummy page\n"); | ||
662 | ret = -ENOMEM; | ||
663 | goto fail; | ||
664 | } | ||
665 | |||
666 | /* set dma mask for device */ | ||
667 | /* NOTE: this is a workaround for the hwmod not initializing properly */ | ||
668 | dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
669 | |||
670 | omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); | ||
671 | |||
672 | /* alloc refill memory */ | ||
673 | omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev, | ||
674 | REFILL_BUFFER_SIZE * omap_dmm->num_engines, | ||
675 | &omap_dmm->refill_pa, GFP_KERNEL); | ||
676 | if (!omap_dmm->refill_va) { | ||
677 | dev_err(&dev->dev, "could not allocate refill memory\n"); | ||
678 | goto fail; | ||
679 | } | ||
680 | |||
681 | /* alloc engines */ | ||
682 | omap_dmm->engines = kcalloc(omap_dmm->num_engines, | ||
683 | sizeof(struct refill_engine), GFP_KERNEL); | ||
684 | if (!omap_dmm->engines) { | ||
685 | ret = -ENOMEM; | ||
686 | goto fail; | ||
687 | } | ||
688 | |||
689 | for (i = 0; i < omap_dmm->num_engines; i++) { | ||
690 | omap_dmm->engines[i].id = i; | ||
691 | omap_dmm->engines[i].dmm = omap_dmm; | ||
692 | omap_dmm->engines[i].refill_va = omap_dmm->refill_va + | ||
693 | (REFILL_BUFFER_SIZE * i); | ||
694 | omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa + | ||
695 | (REFILL_BUFFER_SIZE * i); | ||
696 | init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill); | ||
697 | |||
698 | list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head); | ||
699 | } | ||
700 | |||
701 | omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm), | ||
702 | GFP_KERNEL); | ||
703 | if (!omap_dmm->tcm) { | ||
704 | ret = -ENOMEM; | ||
705 | goto fail; | ||
706 | } | ||
707 | |||
708 | /* init containers */ | ||
709 | /* Each LUT is associated with a TCM (container manager). We use the | ||
710 | lut_id to denote the lut_id used to identify the correct LUT for | ||
711 | programming during reill operations */ | ||
712 | for (i = 0; i < omap_dmm->num_lut; i++) { | ||
713 | omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, | ||
714 | omap_dmm->container_height, | ||
715 | NULL); | ||
716 | |||
717 | if (!omap_dmm->tcm[i]) { | ||
718 | dev_err(&dev->dev, "failed to allocate container\n"); | ||
719 | ret = -ENOMEM; | ||
720 | goto fail; | ||
721 | } | ||
722 | |||
723 | omap_dmm->tcm[i]->lut_id = i; | ||
724 | } | ||
725 | |||
726 | /* assign access mode containers to applicable tcm container */ | ||
727 | /* OMAP 4 has 1 container for all 4 views */ | ||
728 | /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */ | ||
729 | containers[TILFMT_8BIT] = omap_dmm->tcm[0]; | ||
730 | containers[TILFMT_16BIT] = omap_dmm->tcm[0]; | ||
731 | containers[TILFMT_32BIT] = omap_dmm->tcm[0]; | ||
732 | |||
733 | if (omap_dmm->container_height != omap_dmm->lut_height) { | ||
734 | /* second LUT is used for PAGE mode. Programming must use | ||
735 | y offset that is added to all y coordinates. LUT id is still | ||
736 | 0, because it is the same LUT, just the upper 128 lines */ | ||
737 | containers[TILFMT_PAGE] = omap_dmm->tcm[1]; | ||
738 | omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET; | ||
739 | omap_dmm->tcm[1]->lut_id = 0; | ||
740 | } else { | ||
741 | containers[TILFMT_PAGE] = omap_dmm->tcm[0]; | ||
742 | } | ||
743 | |||
744 | area = (struct tcm_area) { | ||
745 | .tcm = NULL, | ||
746 | .p1.x = omap_dmm->container_width - 1, | ||
747 | .p1.y = omap_dmm->container_height - 1, | ||
748 | }; | ||
749 | |||
750 | /* initialize all LUTs to dummy page entries */ | ||
751 | for (i = 0; i < omap_dmm->num_lut; i++) { | ||
752 | area.tcm = omap_dmm->tcm[i]; | ||
753 | if (fill(&area, NULL, 0, 0, true)) | ||
754 | dev_err(omap_dmm->dev, "refill failed"); | ||
755 | } | ||
756 | |||
757 | dev_info(omap_dmm->dev, "initialized all PAT entries\n"); | ||
758 | |||
759 | return 0; | ||
760 | |||
761 | fail: | ||
762 | if (omap_dmm_remove(dev)) | ||
763 | dev_err(&dev->dev, "cleanup failed\n"); | ||
764 | return ret; | ||
765 | } | ||
766 | |||
767 | /* | ||
768 | * debugfs support | ||
769 | */ | ||
770 | |||
771 | #ifdef CONFIG_DEBUG_FS | ||
772 | |||
773 | static const char *alphabet = "abcdefghijklmnopqrstuvwxyz" | ||
774 | "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; | ||
775 | static const char *special = ".,:;'\"`~!^-+"; | ||
776 | |||
777 | static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a, | ||
778 | char c, bool ovw) | ||
779 | { | ||
780 | int x, y; | ||
781 | for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++) | ||
782 | for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++) | ||
783 | if (map[y][x] == ' ' || ovw) | ||
784 | map[y][x] = c; | ||
785 | } | ||
786 | |||
787 | static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p, | ||
788 | char c) | ||
789 | { | ||
790 | map[p->y / ydiv][p->x / xdiv] = c; | ||
791 | } | ||
792 | |||
793 | static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p) | ||
794 | { | ||
795 | return map[p->y / ydiv][p->x / xdiv]; | ||
796 | } | ||
797 | |||
798 | static int map_width(int xdiv, int x0, int x1) | ||
799 | { | ||
800 | return (x1 / xdiv) - (x0 / xdiv) + 1; | ||
801 | } | ||
802 | |||
803 | static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1) | ||
804 | { | ||
805 | char *p = map[yd] + (x0 / xdiv); | ||
806 | int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2; | ||
807 | if (w >= 0) { | ||
808 | p += w; | ||
809 | while (*nice) | ||
810 | *p++ = *nice++; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | static void map_1d_info(char **map, int xdiv, int ydiv, char *nice, | ||
815 | struct tcm_area *a) | ||
816 | { | ||
817 | sprintf(nice, "%dK", tcm_sizeof(*a) * 4); | ||
818 | if (a->p0.y + 1 < a->p1.y) { | ||
819 | text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0, | ||
820 | 256 - 1); | ||
821 | } else if (a->p0.y < a->p1.y) { | ||
822 | if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1)) | ||
823 | text_map(map, xdiv, nice, a->p0.y / ydiv, | ||
824 | a->p0.x + xdiv, 256 - 1); | ||
825 | else if (strlen(nice) < map_width(xdiv, 0, a->p1.x)) | ||
826 | text_map(map, xdiv, nice, a->p1.y / ydiv, | ||
827 | 0, a->p1.y - xdiv); | ||
828 | } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) { | ||
829 | text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x); | ||
830 | } | ||
831 | } | ||
832 | |||
833 | static void map_2d_info(char **map, int xdiv, int ydiv, char *nice, | ||
834 | struct tcm_area *a) | ||
835 | { | ||
836 | sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a)); | ||
837 | if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) | ||
838 | text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, | ||
839 | a->p0.x, a->p1.x); | ||
840 | } | ||
841 | |||
842 | int tiler_map_show(struct seq_file *s, void *arg) | ||
843 | { | ||
844 | int xdiv = 2, ydiv = 1; | ||
845 | char **map = NULL, *global_map; | ||
846 | struct tiler_block *block; | ||
847 | struct tcm_area a, p; | ||
848 | int i; | ||
849 | const char *m2d = alphabet; | ||
850 | const char *a2d = special; | ||
851 | const char *m2dp = m2d, *a2dp = a2d; | ||
852 | char nice[128]; | ||
853 | int h_adj; | ||
854 | int w_adj; | ||
855 | unsigned long flags; | ||
856 | int lut_idx; | ||
857 | |||
858 | |||
859 | if (!omap_dmm) { | ||
860 | /* early return if dmm/tiler device is not initialized */ | ||
861 | return 0; | ||
862 | } | ||
863 | |||
864 | h_adj = omap_dmm->container_height / ydiv; | ||
865 | w_adj = omap_dmm->container_width / xdiv; | ||
866 | |||
867 | map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL); | ||
868 | global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL); | ||
869 | |||
870 | if (!map || !global_map) | ||
871 | goto error; | ||
872 | |||
873 | for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { | ||
874 | memset(map, 0, sizeof(h_adj * sizeof(*map))); | ||
875 | memset(global_map, ' ', (w_adj + 1) * h_adj); | ||
876 | |||
877 | for (i = 0; i < omap_dmm->container_height; i++) { | ||
878 | map[i] = global_map + i * (w_adj + 1); | ||
879 | map[i][w_adj] = 0; | ||
880 | } | ||
881 | |||
882 | spin_lock_irqsave(&list_lock, flags); | ||
883 | |||
884 | list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) { | ||
885 | if (block->area.tcm == omap_dmm->tcm[lut_idx]) { | ||
886 | if (block->fmt != TILFMT_PAGE) { | ||
887 | fill_map(map, xdiv, ydiv, &block->area, | ||
888 | *m2dp, true); | ||
889 | if (!*++a2dp) | ||
890 | a2dp = a2d; | ||
891 | if (!*++m2dp) | ||
892 | m2dp = m2d; | ||
893 | map_2d_info(map, xdiv, ydiv, nice, | ||
894 | &block->area); | ||
895 | } else { | ||
896 | bool start = read_map_pt(map, xdiv, | ||
897 | ydiv, &block->area.p0) == ' '; | ||
898 | bool end = read_map_pt(map, xdiv, ydiv, | ||
899 | &block->area.p1) == ' '; | ||
900 | |||
901 | tcm_for_each_slice(a, block->area, p) | ||
902 | fill_map(map, xdiv, ydiv, &a, | ||
903 | '=', true); | ||
904 | fill_map_pt(map, xdiv, ydiv, | ||
905 | &block->area.p0, | ||
906 | start ? '<' : 'X'); | ||
907 | fill_map_pt(map, xdiv, ydiv, | ||
908 | &block->area.p1, | ||
909 | end ? '>' : 'X'); | ||
910 | map_1d_info(map, xdiv, ydiv, nice, | ||
911 | &block->area); | ||
912 | } | ||
913 | } | ||
914 | } | ||
915 | |||
916 | spin_unlock_irqrestore(&list_lock, flags); | ||
917 | |||
918 | if (s) { | ||
919 | seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx); | ||
920 | for (i = 0; i < 128; i++) | ||
921 | seq_printf(s, "%03d:%s\n", i, map[i]); | ||
922 | seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx); | ||
923 | } else { | ||
924 | dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n", | ||
925 | lut_idx); | ||
926 | for (i = 0; i < 128; i++) | ||
927 | dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]); | ||
928 | dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n", | ||
929 | lut_idx); | ||
930 | } | ||
931 | } | ||
932 | |||
933 | error: | ||
934 | kfree(map); | ||
935 | kfree(global_map); | ||
936 | |||
937 | return 0; | ||
938 | } | ||
939 | #endif | ||
940 | |||
941 | #ifdef CONFIG_PM | ||
942 | static int omap_dmm_resume(struct device *dev) | ||
943 | { | ||
944 | struct tcm_area area; | ||
945 | int i; | ||
946 | |||
947 | if (!omap_dmm) | ||
948 | return -ENODEV; | ||
949 | |||
950 | area = (struct tcm_area) { | ||
951 | .tcm = NULL, | ||
952 | .p1.x = omap_dmm->container_width - 1, | ||
953 | .p1.y = omap_dmm->container_height - 1, | ||
954 | }; | ||
955 | |||
956 | /* initialize all LUTs to dummy page entries */ | ||
957 | for (i = 0; i < omap_dmm->num_lut; i++) { | ||
958 | area.tcm = omap_dmm->tcm[i]; | ||
959 | if (fill(&area, NULL, 0, 0, true)) | ||
960 | dev_err(dev, "refill failed"); | ||
961 | } | ||
962 | |||
963 | return 0; | ||
964 | } | ||
965 | |||
966 | static const struct dev_pm_ops omap_dmm_pm_ops = { | ||
967 | .resume = omap_dmm_resume, | ||
968 | }; | ||
969 | #endif | ||
970 | |||
971 | struct platform_driver omap_dmm_driver = { | ||
972 | .probe = omap_dmm_probe, | ||
973 | .remove = omap_dmm_remove, | ||
974 | .driver = { | ||
975 | .owner = THIS_MODULE, | ||
976 | .name = DMM_DRIVER_NAME, | ||
977 | #ifdef CONFIG_PM | ||
978 | .pm = &omap_dmm_pm_ops, | ||
979 | #endif | ||
980 | }, | ||
981 | }; | ||
982 | |||
983 | MODULE_LICENSE("GPL v2"); | ||
984 | MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>"); | ||
985 | MODULE_DESCRIPTION("OMAP DMM/Tiler Driver"); | ||
986 | MODULE_ALIAS("platform:" DMM_DRIVER_NAME); | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h new file mode 100644 index 000000000000..4fdd61e54bd2 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ | ||
4 | * Author: Rob Clark <rob@ti.com> | ||
5 | * Andy Gross <andy.gross@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation version 2. | ||
10 | * | ||
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
12 | * kind, whether express or implied; without even the implied warranty | ||
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | #ifndef OMAP_DMM_TILER_H | ||
17 | #define OMAP_DMM_TILER_H | ||
18 | |||
19 | #include "omap_drv.h" | ||
20 | #include "tcm.h" | ||
21 | |||
22 | enum tiler_fmt { | ||
23 | TILFMT_8BIT = 0, | ||
24 | TILFMT_16BIT, | ||
25 | TILFMT_32BIT, | ||
26 | TILFMT_PAGE, | ||
27 | TILFMT_NFORMATS | ||
28 | }; | ||
29 | |||
30 | struct pat_area { | ||
31 | u32 x0:8; | ||
32 | u32 y0:8; | ||
33 | u32 x1:8; | ||
34 | u32 y1:8; | ||
35 | }; | ||
36 | |||
37 | struct tiler_block { | ||
38 | struct list_head alloc_node; /* node for global block list */ | ||
39 | struct tcm_area area; /* area */ | ||
40 | enum tiler_fmt fmt; /* format */ | ||
41 | }; | ||
42 | |||
43 | /* bits representing the same slot in DMM-TILER hw-block */ | ||
44 | #define SLOT_WIDTH_BITS 6 | ||
45 | #define SLOT_HEIGHT_BITS 6 | ||
46 | |||
47 | /* bits reserved to describe coordinates in DMM-TILER hw-block */ | ||
48 | #define CONT_WIDTH_BITS 14 | ||
49 | #define CONT_HEIGHT_BITS 13 | ||
50 | |||
51 | /* calculated constants */ | ||
52 | #define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS)) | ||
53 | #define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS)) | ||
54 | #define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS)) | ||
55 | |||
56 | /* | ||
57 | Table 15-11. Coding and Description of TILER Orientations | ||
58 | S Y X Description Alternate description | ||
59 | 0 0 0 0-degree view Natural view | ||
60 | 0 0 1 0-degree view with vertical mirror 180-degree view with horizontal mirror | ||
61 | 0 1 0 0-degree view with horizontal mirror 180-degree view with vertical mirror | ||
62 | 0 1 1 180-degree view | ||
63 | 1 0 0 90-degree view with vertical mirror 270-degree view with horizontal mirror | ||
64 | 1 0 1 270-degree view | ||
65 | 1 1 0 90-degree view | ||
66 | 1 1 1 90-degree view with horizontal mirror 270-degree view with vertical mirror | ||
67 | */ | ||
68 | #define MASK_XY_FLIP (1 << 31) | ||
69 | #define MASK_Y_INVERT (1 << 30) | ||
70 | #define MASK_X_INVERT (1 << 29) | ||
71 | #define SHIFT_ACC_MODE 27 | ||
72 | #define MASK_ACC_MODE 3 | ||
73 | |||
74 | #define MASK(bits) ((1 << (bits)) - 1) | ||
75 | |||
76 | #define TILVIEW_8BIT 0x60000000u | ||
77 | #define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE) | ||
78 | #define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE) | ||
79 | #define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE) | ||
80 | #define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE) | ||
81 | |||
82 | /* create tsptr by adding view orientation and access mode */ | ||
83 | #define TIL_ADDR(x, orient, a)\ | ||
84 | ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE)) | ||
85 | |||
86 | #ifdef CONFIG_DEBUG_FS | ||
87 | int tiler_map_show(struct seq_file *s, void *arg); | ||
88 | #endif | ||
89 | |||
90 | /* pin/unpin */ | ||
91 | int tiler_pin(struct tiler_block *block, struct page **pages, | ||
92 | uint32_t npages, uint32_t roll, bool wait); | ||
93 | int tiler_unpin(struct tiler_block *block); | ||
94 | |||
95 | /* reserve/release */ | ||
96 | struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h, | ||
97 | uint16_t align); | ||
98 | struct tiler_block *tiler_reserve_1d(size_t size); | ||
99 | int tiler_release(struct tiler_block *block); | ||
100 | |||
101 | /* utilities */ | ||
102 | dma_addr_t tiler_ssptr(struct tiler_block *block); | ||
103 | dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient, | ||
104 | uint32_t x, uint32_t y); | ||
105 | uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient); | ||
106 | size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h); | ||
107 | size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h); | ||
108 | void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h); | ||
109 | bool dmm_is_available(void); | ||
110 | |||
111 | extern struct platform_driver omap_dmm_driver; | ||
112 | |||
113 | /* GEM bo flags -> tiler fmt */ | ||
114 | static inline enum tiler_fmt gem2fmt(uint32_t flags) | ||
115 | { | ||
116 | switch (flags & OMAP_BO_TILED) { | ||
117 | case OMAP_BO_TILED_8: | ||
118 | return TILFMT_8BIT; | ||
119 | case OMAP_BO_TILED_16: | ||
120 | return TILFMT_16BIT; | ||
121 | case OMAP_BO_TILED_32: | ||
122 | return TILFMT_32BIT; | ||
123 | default: | ||
124 | return TILFMT_PAGE; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static inline bool validfmt(enum tiler_fmt fmt) | ||
129 | { | ||
130 | switch (fmt) { | ||
131 | case TILFMT_8BIT: | ||
132 | case TILFMT_16BIT: | ||
133 | case TILFMT_32BIT: | ||
134 | case TILFMT_PAGE: | ||
135 | return true; | ||
136 | default: | ||
137 | return false; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | #endif | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c new file mode 100644 index 000000000000..079c54c6f94c --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_drv.c | |||
@@ -0,0 +1,608 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_drv.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | |||
22 | #include "drm_crtc_helper.h" | ||
23 | #include "drm_fb_helper.h" | ||
24 | #include "omap_dmm_tiler.h" | ||
25 | |||
26 | #define DRIVER_NAME MODULE_NAME | ||
27 | #define DRIVER_DESC "OMAP DRM" | ||
28 | #define DRIVER_DATE "20110917" | ||
29 | #define DRIVER_MAJOR 1 | ||
30 | #define DRIVER_MINOR 0 | ||
31 | #define DRIVER_PATCHLEVEL 0 | ||
32 | |||
33 | static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS; | ||
34 | |||
35 | MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs"); | ||
36 | module_param(num_crtc, int, 0600); | ||
37 | |||
38 | /* | ||
39 | * mode config funcs | ||
40 | */ | ||
41 | |||
42 | /* Notes about mapping DSS and DRM entities: | ||
43 | * CRTC: overlay | ||
44 | * encoder: manager.. with some extension to allow one primary CRTC | ||
45 | * and zero or more video CRTC's to be mapped to one encoder? | ||
46 | * connector: dssdev.. manager can be attached/detached from different | ||
47 | * devices | ||
48 | */ | ||
49 | |||
50 | static void omap_fb_output_poll_changed(struct drm_device *dev) | ||
51 | { | ||
52 | struct omap_drm_private *priv = dev->dev_private; | ||
53 | DBG("dev=%p", dev); | ||
54 | if (priv->fbdev) | ||
55 | drm_fb_helper_hotplug_event(priv->fbdev); | ||
56 | } | ||
57 | |||
58 | static const struct drm_mode_config_funcs omap_mode_config_funcs = { | ||
59 | .fb_create = omap_framebuffer_create, | ||
60 | .output_poll_changed = omap_fb_output_poll_changed, | ||
61 | }; | ||
62 | |||
63 | static int get_connector_type(struct omap_dss_device *dssdev) | ||
64 | { | ||
65 | switch (dssdev->type) { | ||
66 | case OMAP_DISPLAY_TYPE_HDMI: | ||
67 | return DRM_MODE_CONNECTOR_HDMIA; | ||
68 | case OMAP_DISPLAY_TYPE_DPI: | ||
69 | if (!strcmp(dssdev->name, "dvi")) | ||
70 | return DRM_MODE_CONNECTOR_DVID; | ||
71 | /* fallthrough */ | ||
72 | default: | ||
73 | return DRM_MODE_CONNECTOR_Unknown; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static int omap_modeset_init(struct drm_device *dev) | ||
78 | { | ||
79 | struct omap_drm_private *priv = dev->dev_private; | ||
80 | struct omap_dss_device *dssdev = NULL; | ||
81 | int num_ovls = dss_feat_get_num_ovls(); | ||
82 | int id; | ||
83 | |||
84 | drm_mode_config_init(dev); | ||
85 | |||
86 | omap_drm_irq_install(dev); | ||
87 | |||
88 | /* | ||
89 | * Create private planes and CRTCs for the last NUM_CRTCs overlay | ||
90 | * plus manager: | ||
91 | */ | ||
92 | for (id = 0; id < min(num_crtc, num_ovls); id++) { | ||
93 | struct drm_plane *plane; | ||
94 | struct drm_crtc *crtc; | ||
95 | |||
96 | plane = omap_plane_init(dev, id, true); | ||
97 | crtc = omap_crtc_init(dev, plane, pipe2chan(id), id); | ||
98 | |||
99 | BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs)); | ||
100 | priv->crtcs[id] = crtc; | ||
101 | priv->num_crtcs++; | ||
102 | |||
103 | priv->planes[id] = plane; | ||
104 | priv->num_planes++; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Create normal planes for the remaining overlays: | ||
109 | */ | ||
110 | for (; id < num_ovls; id++) { | ||
111 | struct drm_plane *plane = omap_plane_init(dev, id, false); | ||
112 | |||
113 | BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)); | ||
114 | priv->planes[priv->num_planes++] = plane; | ||
115 | } | ||
116 | |||
117 | for_each_dss_dev(dssdev) { | ||
118 | struct drm_connector *connector; | ||
119 | struct drm_encoder *encoder; | ||
120 | |||
121 | if (!dssdev->driver) { | ||
122 | dev_warn(dev->dev, "%s has no driver.. skipping it\n", | ||
123 | dssdev->name); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | if (!(dssdev->driver->get_timings || | ||
128 | dssdev->driver->read_edid)) { | ||
129 | dev_warn(dev->dev, "%s driver does not support " | ||
130 | "get_timings or read_edid.. skipping it!\n", | ||
131 | dssdev->name); | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | encoder = omap_encoder_init(dev, dssdev); | ||
136 | |||
137 | if (!encoder) { | ||
138 | dev_err(dev->dev, "could not create encoder: %s\n", | ||
139 | dssdev->name); | ||
140 | return -ENOMEM; | ||
141 | } | ||
142 | |||
143 | connector = omap_connector_init(dev, | ||
144 | get_connector_type(dssdev), dssdev, encoder); | ||
145 | |||
146 | if (!connector) { | ||
147 | dev_err(dev->dev, "could not create connector: %s\n", | ||
148 | dssdev->name); | ||
149 | return -ENOMEM; | ||
150 | } | ||
151 | |||
152 | BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders)); | ||
153 | BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors)); | ||
154 | |||
155 | priv->encoders[priv->num_encoders++] = encoder; | ||
156 | priv->connectors[priv->num_connectors++] = connector; | ||
157 | |||
158 | drm_mode_connector_attach_encoder(connector, encoder); | ||
159 | |||
160 | /* figure out which crtc's we can connect the encoder to: */ | ||
161 | encoder->possible_crtcs = 0; | ||
162 | for (id = 0; id < priv->num_crtcs; id++) { | ||
163 | enum omap_dss_output_id supported_outputs = | ||
164 | dss_feat_get_supported_outputs(pipe2chan(id)); | ||
165 | if (supported_outputs & dssdev->output->id) | ||
166 | encoder->possible_crtcs |= (1 << id); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | dev->mode_config.min_width = 32; | ||
171 | dev->mode_config.min_height = 32; | ||
172 | |||
173 | /* note: eventually will need some cpu_is_omapXYZ() type stuff here | ||
174 | * to fill in these limits properly on different OMAP generations.. | ||
175 | */ | ||
176 | dev->mode_config.max_width = 2048; | ||
177 | dev->mode_config.max_height = 2048; | ||
178 | |||
179 | dev->mode_config.funcs = &omap_mode_config_funcs; | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static void omap_modeset_free(struct drm_device *dev) | ||
185 | { | ||
186 | drm_mode_config_cleanup(dev); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * drm ioctl funcs | ||
191 | */ | ||
192 | |||
193 | |||
194 | static int ioctl_get_param(struct drm_device *dev, void *data, | ||
195 | struct drm_file *file_priv) | ||
196 | { | ||
197 | struct omap_drm_private *priv = dev->dev_private; | ||
198 | struct drm_omap_param *args = data; | ||
199 | |||
200 | DBG("%p: param=%llu", dev, args->param); | ||
201 | |||
202 | switch (args->param) { | ||
203 | case OMAP_PARAM_CHIPSET_ID: | ||
204 | args->value = priv->omaprev; | ||
205 | break; | ||
206 | default: | ||
207 | DBG("unknown parameter %lld", args->param); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int ioctl_set_param(struct drm_device *dev, void *data, | ||
215 | struct drm_file *file_priv) | ||
216 | { | ||
217 | struct drm_omap_param *args = data; | ||
218 | |||
219 | switch (args->param) { | ||
220 | default: | ||
221 | DBG("unknown parameter %lld", args->param); | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | static int ioctl_gem_new(struct drm_device *dev, void *data, | ||
229 | struct drm_file *file_priv) | ||
230 | { | ||
231 | struct drm_omap_gem_new *args = data; | ||
232 | VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv, | ||
233 | args->size.bytes, args->flags); | ||
234 | return omap_gem_new_handle(dev, file_priv, args->size, | ||
235 | args->flags, &args->handle); | ||
236 | } | ||
237 | |||
238 | static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data, | ||
239 | struct drm_file *file_priv) | ||
240 | { | ||
241 | struct drm_omap_gem_cpu_prep *args = data; | ||
242 | struct drm_gem_object *obj; | ||
243 | int ret; | ||
244 | |||
245 | VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op); | ||
246 | |||
247 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
248 | if (!obj) | ||
249 | return -ENOENT; | ||
250 | |||
251 | ret = omap_gem_op_sync(obj, args->op); | ||
252 | |||
253 | if (!ret) | ||
254 | ret = omap_gem_op_start(obj, args->op); | ||
255 | |||
256 | drm_gem_object_unreference_unlocked(obj); | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data, | ||
262 | struct drm_file *file_priv) | ||
263 | { | ||
264 | struct drm_omap_gem_cpu_fini *args = data; | ||
265 | struct drm_gem_object *obj; | ||
266 | int ret; | ||
267 | |||
268 | VERB("%p:%p: handle=%d", dev, file_priv, args->handle); | ||
269 | |||
270 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
271 | if (!obj) | ||
272 | return -ENOENT; | ||
273 | |||
274 | /* XXX flushy, flushy */ | ||
275 | ret = 0; | ||
276 | |||
277 | if (!ret) | ||
278 | ret = omap_gem_op_finish(obj, args->op); | ||
279 | |||
280 | drm_gem_object_unreference_unlocked(obj); | ||
281 | |||
282 | return ret; | ||
283 | } | ||
284 | |||
285 | static int ioctl_gem_info(struct drm_device *dev, void *data, | ||
286 | struct drm_file *file_priv) | ||
287 | { | ||
288 | struct drm_omap_gem_info *args = data; | ||
289 | struct drm_gem_object *obj; | ||
290 | int ret = 0; | ||
291 | |||
292 | VERB("%p:%p: handle=%d", dev, file_priv, args->handle); | ||
293 | |||
294 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
295 | if (!obj) | ||
296 | return -ENOENT; | ||
297 | |||
298 | args->size = omap_gem_mmap_size(obj); | ||
299 | args->offset = omap_gem_mmap_offset(obj); | ||
300 | |||
301 | drm_gem_object_unreference_unlocked(obj); | ||
302 | |||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { | ||
307 | DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), | ||
308 | DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | ||
309 | DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), | ||
310 | DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH), | ||
311 | DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH), | ||
312 | DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH), | ||
313 | }; | ||
314 | |||
315 | /* | ||
316 | * drm driver funcs | ||
317 | */ | ||
318 | |||
319 | /** | ||
320 | * load - setup chip and create an initial config | ||
321 | * @dev: DRM device | ||
322 | * @flags: startup flags | ||
323 | * | ||
324 | * The driver load routine has to do several things: | ||
325 | * - initialize the memory manager | ||
326 | * - allocate initial config memory | ||
327 | * - setup the DRM framebuffer with the allocated memory | ||
328 | */ | ||
329 | static int dev_load(struct drm_device *dev, unsigned long flags) | ||
330 | { | ||
331 | struct omap_drm_platform_data *pdata = dev->dev->platform_data; | ||
332 | struct omap_drm_private *priv; | ||
333 | int ret; | ||
334 | |||
335 | DBG("load: dev=%p", dev); | ||
336 | |||
337 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
338 | if (!priv) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | priv->omaprev = pdata->omaprev; | ||
342 | |||
343 | dev->dev_private = priv; | ||
344 | |||
345 | priv->wq = alloc_ordered_workqueue("omapdrm", 0); | ||
346 | |||
347 | INIT_LIST_HEAD(&priv->obj_list); | ||
348 | |||
349 | omap_gem_init(dev); | ||
350 | |||
351 | ret = omap_modeset_init(dev); | ||
352 | if (ret) { | ||
353 | dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret); | ||
354 | dev->dev_private = NULL; | ||
355 | kfree(priv); | ||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | ret = drm_vblank_init(dev, priv->num_crtcs); | ||
360 | if (ret) | ||
361 | dev_warn(dev->dev, "could not init vblank\n"); | ||
362 | |||
363 | priv->fbdev = omap_fbdev_init(dev); | ||
364 | if (!priv->fbdev) { | ||
365 | dev_warn(dev->dev, "omap_fbdev_init failed\n"); | ||
366 | /* well, limp along without an fbdev.. maybe X11 will work? */ | ||
367 | } | ||
368 | |||
369 | /* store off drm_device for use in pm ops */ | ||
370 | dev_set_drvdata(dev->dev, dev); | ||
371 | |||
372 | drm_kms_helper_poll_init(dev); | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | static int dev_unload(struct drm_device *dev) | ||
378 | { | ||
379 | struct omap_drm_private *priv = dev->dev_private; | ||
380 | |||
381 | DBG("unload: dev=%p", dev); | ||
382 | |||
383 | drm_kms_helper_poll_fini(dev); | ||
384 | drm_vblank_cleanup(dev); | ||
385 | omap_drm_irq_uninstall(dev); | ||
386 | |||
387 | omap_fbdev_free(dev); | ||
388 | omap_modeset_free(dev); | ||
389 | omap_gem_deinit(dev); | ||
390 | |||
391 | flush_workqueue(priv->wq); | ||
392 | destroy_workqueue(priv->wq); | ||
393 | |||
394 | kfree(dev->dev_private); | ||
395 | dev->dev_private = NULL; | ||
396 | |||
397 | dev_set_drvdata(dev->dev, NULL); | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static int dev_open(struct drm_device *dev, struct drm_file *file) | ||
403 | { | ||
404 | file->driver_priv = NULL; | ||
405 | |||
406 | DBG("open: dev=%p, file=%p", dev, file); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static int dev_firstopen(struct drm_device *dev) | ||
412 | { | ||
413 | DBG("firstopen: dev=%p", dev); | ||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | /** | ||
418 | * lastclose - clean up after all DRM clients have exited | ||
419 | * @dev: DRM device | ||
420 | * | ||
421 | * Take care of cleaning up after all DRM clients have exited. In the | ||
422 | * mode setting case, we want to restore the kernel's initial mode (just | ||
423 | * in case the last client left us in a bad state). | ||
424 | */ | ||
425 | static void dev_lastclose(struct drm_device *dev) | ||
426 | { | ||
427 | int i; | ||
428 | |||
429 | /* we don't support vga-switcheroo.. so just make sure the fbdev | ||
430 | * mode is active | ||
431 | */ | ||
432 | struct omap_drm_private *priv = dev->dev_private; | ||
433 | int ret; | ||
434 | |||
435 | DBG("lastclose: dev=%p", dev); | ||
436 | |||
437 | if (priv->rotation_prop) { | ||
438 | /* need to restore default rotation state.. not sure | ||
439 | * if there is a cleaner way to restore properties to | ||
440 | * default state? Maybe a flag that properties should | ||
441 | * automatically be restored to default state on | ||
442 | * lastclose? | ||
443 | */ | ||
444 | for (i = 0; i < priv->num_crtcs; i++) { | ||
445 | drm_object_property_set_value(&priv->crtcs[i]->base, | ||
446 | priv->rotation_prop, 0); | ||
447 | } | ||
448 | |||
449 | for (i = 0; i < priv->num_planes; i++) { | ||
450 | drm_object_property_set_value(&priv->planes[i]->base, | ||
451 | priv->rotation_prop, 0); | ||
452 | } | ||
453 | } | ||
454 | |||
455 | drm_modeset_lock_all(dev); | ||
456 | ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev); | ||
457 | drm_modeset_unlock_all(dev); | ||
458 | if (ret) | ||
459 | DBG("failed to restore crtc mode"); | ||
460 | } | ||
461 | |||
462 | static void dev_preclose(struct drm_device *dev, struct drm_file *file) | ||
463 | { | ||
464 | DBG("preclose: dev=%p", dev); | ||
465 | } | ||
466 | |||
467 | static void dev_postclose(struct drm_device *dev, struct drm_file *file) | ||
468 | { | ||
469 | DBG("postclose: dev=%p, file=%p", dev, file); | ||
470 | } | ||
471 | |||
472 | static const struct vm_operations_struct omap_gem_vm_ops = { | ||
473 | .fault = omap_gem_fault, | ||
474 | .open = drm_gem_vm_open, | ||
475 | .close = drm_gem_vm_close, | ||
476 | }; | ||
477 | |||
478 | static const struct file_operations omapdriver_fops = { | ||
479 | .owner = THIS_MODULE, | ||
480 | .open = drm_open, | ||
481 | .unlocked_ioctl = drm_ioctl, | ||
482 | .release = drm_release, | ||
483 | .mmap = omap_gem_mmap, | ||
484 | .poll = drm_poll, | ||
485 | .fasync = drm_fasync, | ||
486 | .read = drm_read, | ||
487 | .llseek = noop_llseek, | ||
488 | }; | ||
489 | |||
490 | static struct drm_driver omap_drm_driver = { | ||
491 | .driver_features = | ||
492 | DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, | ||
493 | .load = dev_load, | ||
494 | .unload = dev_unload, | ||
495 | .open = dev_open, | ||
496 | .firstopen = dev_firstopen, | ||
497 | .lastclose = dev_lastclose, | ||
498 | .preclose = dev_preclose, | ||
499 | .postclose = dev_postclose, | ||
500 | .get_vblank_counter = drm_vblank_count, | ||
501 | .enable_vblank = omap_irq_enable_vblank, | ||
502 | .disable_vblank = omap_irq_disable_vblank, | ||
503 | .irq_preinstall = omap_irq_preinstall, | ||
504 | .irq_postinstall = omap_irq_postinstall, | ||
505 | .irq_uninstall = omap_irq_uninstall, | ||
506 | .irq_handler = omap_irq_handler, | ||
507 | #ifdef CONFIG_DEBUG_FS | ||
508 | .debugfs_init = omap_debugfs_init, | ||
509 | .debugfs_cleanup = omap_debugfs_cleanup, | ||
510 | #endif | ||
511 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
512 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
513 | .gem_prime_export = omap_gem_prime_export, | ||
514 | .gem_prime_import = omap_gem_prime_import, | ||
515 | .gem_init_object = omap_gem_init_object, | ||
516 | .gem_free_object = omap_gem_free_object, | ||
517 | .gem_vm_ops = &omap_gem_vm_ops, | ||
518 | .dumb_create = omap_gem_dumb_create, | ||
519 | .dumb_map_offset = omap_gem_dumb_map_offset, | ||
520 | .dumb_destroy = omap_gem_dumb_destroy, | ||
521 | .ioctls = ioctls, | ||
522 | .num_ioctls = DRM_OMAP_NUM_IOCTLS, | ||
523 | .fops = &omapdriver_fops, | ||
524 | .name = DRIVER_NAME, | ||
525 | .desc = DRIVER_DESC, | ||
526 | .date = DRIVER_DATE, | ||
527 | .major = DRIVER_MAJOR, | ||
528 | .minor = DRIVER_MINOR, | ||
529 | .patchlevel = DRIVER_PATCHLEVEL, | ||
530 | }; | ||
531 | |||
532 | static int pdev_suspend(struct platform_device *pDevice, pm_message_t state) | ||
533 | { | ||
534 | DBG(""); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static int pdev_resume(struct platform_device *device) | ||
539 | { | ||
540 | DBG(""); | ||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | static void pdev_shutdown(struct platform_device *device) | ||
545 | { | ||
546 | DBG(""); | ||
547 | } | ||
548 | |||
549 | static int pdev_probe(struct platform_device *device) | ||
550 | { | ||
551 | DBG("%s", device->name); | ||
552 | return drm_platform_init(&omap_drm_driver, device); | ||
553 | } | ||
554 | |||
555 | static int pdev_remove(struct platform_device *device) | ||
556 | { | ||
557 | DBG(""); | ||
558 | drm_platform_exit(&omap_drm_driver, device); | ||
559 | |||
560 | platform_driver_unregister(&omap_dmm_driver); | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | #ifdef CONFIG_PM | ||
565 | static const struct dev_pm_ops omapdrm_pm_ops = { | ||
566 | .resume = omap_gem_resume, | ||
567 | }; | ||
568 | #endif | ||
569 | |||
570 | struct platform_driver pdev = { | ||
571 | .driver = { | ||
572 | .name = DRIVER_NAME, | ||
573 | .owner = THIS_MODULE, | ||
574 | #ifdef CONFIG_PM | ||
575 | .pm = &omapdrm_pm_ops, | ||
576 | #endif | ||
577 | }, | ||
578 | .probe = pdev_probe, | ||
579 | .remove = pdev_remove, | ||
580 | .suspend = pdev_suspend, | ||
581 | .resume = pdev_resume, | ||
582 | .shutdown = pdev_shutdown, | ||
583 | }; | ||
584 | |||
585 | static int __init omap_drm_init(void) | ||
586 | { | ||
587 | DBG("init"); | ||
588 | if (platform_driver_register(&omap_dmm_driver)) { | ||
589 | /* we can continue on without DMM.. so not fatal */ | ||
590 | dev_err(NULL, "DMM registration failed\n"); | ||
591 | } | ||
592 | return platform_driver_register(&pdev); | ||
593 | } | ||
594 | |||
595 | static void __exit omap_drm_fini(void) | ||
596 | { | ||
597 | DBG("fini"); | ||
598 | platform_driver_unregister(&pdev); | ||
599 | } | ||
600 | |||
601 | /* need late_initcall() so we load after dss_driver's are loaded */ | ||
602 | late_initcall(omap_drm_init); | ||
603 | module_exit(omap_drm_fini); | ||
604 | |||
605 | MODULE_AUTHOR("Rob Clark <rob@ti.com>"); | ||
606 | MODULE_DESCRIPTION("OMAP DRM Display Driver"); | ||
607 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
608 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h new file mode 100644 index 000000000000..d4f997bb4ac0 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_drv.h | |||
@@ -0,0 +1,333 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_drv.h | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #ifndef __OMAP_DRV_H__ | ||
21 | #define __OMAP_DRV_H__ | ||
22 | |||
23 | #include <video/omapdss.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <drm/drmP.h> | ||
27 | #include <drm/drm_crtc_helper.h> | ||
28 | #include <drm/omap_drm.h> | ||
29 | #include <linux/platform_data/omap_drm.h> | ||
30 | |||
31 | |||
32 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) | ||
33 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */ | ||
34 | |||
35 | #define MODULE_NAME "omapdrm" | ||
36 | |||
37 | /* max # of mapper-id's that can be assigned.. todo, come up with a better | ||
38 | * (but still inexpensive) way to store/access per-buffer mapper private | ||
39 | * data.. | ||
40 | */ | ||
41 | #define MAX_MAPPERS 2 | ||
42 | |||
43 | /* parameters which describe (unrotated) coordinates of scanout within a fb: */ | ||
44 | struct omap_drm_window { | ||
45 | uint32_t rotation; | ||
46 | int32_t crtc_x, crtc_y; /* signed because can be offscreen */ | ||
47 | uint32_t crtc_w, crtc_h; | ||
48 | uint32_t src_x, src_y; | ||
49 | uint32_t src_w, src_h; | ||
50 | }; | ||
51 | |||
52 | /* Once GO bit is set, we can't make further updates to shadowed registers | ||
53 | * until the GO bit is cleared. So various parts in the kms code that need | ||
54 | * to update shadowed registers queue up a pair of callbacks, pre_apply | ||
55 | * which is called before setting GO bit, and post_apply that is called | ||
56 | * after GO bit is cleared. The crtc manages the queuing, and everyone | ||
57 | * else goes thru omap_crtc_apply() using these callbacks so that the | ||
58 | * code which has to deal w/ GO bit state is centralized. | ||
59 | */ | ||
60 | struct omap_drm_apply { | ||
61 | struct list_head pending_node, queued_node; | ||
62 | bool queued; | ||
63 | void (*pre_apply)(struct omap_drm_apply *apply); | ||
64 | void (*post_apply)(struct omap_drm_apply *apply); | ||
65 | }; | ||
66 | |||
67 | /* For transiently registering for different DSS irqs that various parts | ||
68 | * of the KMS code need during setup/configuration. We these are not | ||
69 | * necessarily the same as what drm_vblank_get/put() are requesting, and | ||
70 | * the hysteresis in drm_vblank_put() is not necessarily desirable for | ||
71 | * internal housekeeping related irq usage. | ||
72 | */ | ||
73 | struct omap_drm_irq { | ||
74 | struct list_head node; | ||
75 | uint32_t irqmask; | ||
76 | bool registered; | ||
77 | void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus); | ||
78 | }; | ||
79 | |||
80 | /* For KMS code that needs to wait for a certain # of IRQs: | ||
81 | */ | ||
82 | struct omap_irq_wait; | ||
83 | struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev, | ||
84 | uint32_t irqmask, int count); | ||
85 | int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait, | ||
86 | unsigned long timeout); | ||
87 | |||
88 | struct omap_drm_private { | ||
89 | uint32_t omaprev; | ||
90 | |||
91 | unsigned int num_crtcs; | ||
92 | struct drm_crtc *crtcs[8]; | ||
93 | |||
94 | unsigned int num_planes; | ||
95 | struct drm_plane *planes[8]; | ||
96 | |||
97 | unsigned int num_encoders; | ||
98 | struct drm_encoder *encoders[8]; | ||
99 | |||
100 | unsigned int num_connectors; | ||
101 | struct drm_connector *connectors[8]; | ||
102 | |||
103 | struct drm_fb_helper *fbdev; | ||
104 | |||
105 | struct workqueue_struct *wq; | ||
106 | |||
107 | /* list of GEM objects: */ | ||
108 | struct list_head obj_list; | ||
109 | |||
110 | bool has_dmm; | ||
111 | |||
112 | /* properties: */ | ||
113 | struct drm_property *rotation_prop; | ||
114 | struct drm_property *zorder_prop; | ||
115 | |||
116 | /* irq handling: */ | ||
117 | struct list_head irq_list; /* list of omap_drm_irq */ | ||
118 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
119 | struct omap_drm_irq error_handler; | ||
120 | }; | ||
121 | |||
122 | /* this should probably be in drm-core to standardize amongst drivers */ | ||
123 | #define DRM_ROTATE_0 0 | ||
124 | #define DRM_ROTATE_90 1 | ||
125 | #define DRM_ROTATE_180 2 | ||
126 | #define DRM_ROTATE_270 3 | ||
127 | #define DRM_REFLECT_X 4 | ||
128 | #define DRM_REFLECT_Y 5 | ||
129 | |||
130 | #ifdef CONFIG_DEBUG_FS | ||
131 | int omap_debugfs_init(struct drm_minor *minor); | ||
132 | void omap_debugfs_cleanup(struct drm_minor *minor); | ||
133 | void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); | ||
134 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m); | ||
135 | void omap_gem_describe_objects(struct list_head *list, struct seq_file *m); | ||
136 | #endif | ||
137 | |||
138 | #ifdef CONFIG_PM | ||
139 | int omap_gem_resume(struct device *dev); | ||
140 | #endif | ||
141 | |||
142 | int omap_irq_enable_vblank(struct drm_device *dev, int crtc); | ||
143 | void omap_irq_disable_vblank(struct drm_device *dev, int crtc); | ||
144 | irqreturn_t omap_irq_handler(DRM_IRQ_ARGS); | ||
145 | void omap_irq_preinstall(struct drm_device *dev); | ||
146 | int omap_irq_postinstall(struct drm_device *dev); | ||
147 | void omap_irq_uninstall(struct drm_device *dev); | ||
148 | void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); | ||
149 | void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); | ||
150 | int omap_drm_irq_uninstall(struct drm_device *dev); | ||
151 | int omap_drm_irq_install(struct drm_device *dev); | ||
152 | |||
153 | struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev); | ||
154 | void omap_fbdev_free(struct drm_device *dev); | ||
155 | |||
156 | const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc); | ||
157 | enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); | ||
158 | int omap_crtc_apply(struct drm_crtc *crtc, | ||
159 | struct omap_drm_apply *apply); | ||
160 | struct drm_crtc *omap_crtc_init(struct drm_device *dev, | ||
161 | struct drm_plane *plane, enum omap_channel channel, int id); | ||
162 | |||
163 | struct drm_plane *omap_plane_init(struct drm_device *dev, | ||
164 | int plane_id, bool private_plane); | ||
165 | int omap_plane_dpms(struct drm_plane *plane, int mode); | ||
166 | int omap_plane_mode_set(struct drm_plane *plane, | ||
167 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
168 | int crtc_x, int crtc_y, | ||
169 | unsigned int crtc_w, unsigned int crtc_h, | ||
170 | uint32_t src_x, uint32_t src_y, | ||
171 | uint32_t src_w, uint32_t src_h, | ||
172 | void (*fxn)(void *), void *arg); | ||
173 | void omap_plane_install_properties(struct drm_plane *plane, | ||
174 | struct drm_mode_object *obj); | ||
175 | int omap_plane_set_property(struct drm_plane *plane, | ||
176 | struct drm_property *property, uint64_t val); | ||
177 | |||
178 | struct drm_encoder *omap_encoder_init(struct drm_device *dev, | ||
179 | struct omap_dss_device *dssdev); | ||
180 | int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled); | ||
181 | int omap_encoder_update(struct drm_encoder *encoder, | ||
182 | struct omap_overlay_manager *mgr, | ||
183 | struct omap_video_timings *timings); | ||
184 | |||
185 | struct drm_connector *omap_connector_init(struct drm_device *dev, | ||
186 | int connector_type, struct omap_dss_device *dssdev, | ||
187 | struct drm_encoder *encoder); | ||
188 | struct drm_encoder *omap_connector_attached_encoder( | ||
189 | struct drm_connector *connector); | ||
190 | void omap_connector_flush(struct drm_connector *connector, | ||
191 | int x, int y, int w, int h); | ||
192 | |||
193 | void copy_timings_omap_to_drm(struct drm_display_mode *mode, | ||
194 | struct omap_video_timings *timings); | ||
195 | void copy_timings_drm_to_omap(struct omap_video_timings *timings, | ||
196 | struct drm_display_mode *mode); | ||
197 | |||
198 | uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats, | ||
199 | uint32_t max_formats, enum omap_color_mode supported_modes); | ||
200 | struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, | ||
201 | struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd); | ||
202 | struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, | ||
203 | struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); | ||
204 | struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); | ||
205 | int omap_framebuffer_replace(struct drm_framebuffer *a, | ||
206 | struct drm_framebuffer *b, void *arg, | ||
207 | void (*unpin)(void *arg, struct drm_gem_object *bo)); | ||
208 | void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, | ||
209 | struct omap_drm_window *win, struct omap_overlay_info *info); | ||
210 | struct drm_connector *omap_framebuffer_get_next_connector( | ||
211 | struct drm_framebuffer *fb, struct drm_connector *from); | ||
212 | void omap_framebuffer_flush(struct drm_framebuffer *fb, | ||
213 | int x, int y, int w, int h); | ||
214 | |||
215 | void omap_gem_init(struct drm_device *dev); | ||
216 | void omap_gem_deinit(struct drm_device *dev); | ||
217 | |||
218 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, | ||
219 | union omap_gem_size gsize, uint32_t flags); | ||
220 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
221 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle); | ||
222 | void omap_gem_free_object(struct drm_gem_object *obj); | ||
223 | int omap_gem_init_object(struct drm_gem_object *obj); | ||
224 | void *omap_gem_vaddr(struct drm_gem_object *obj); | ||
225 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, | ||
226 | uint32_t handle, uint64_t *offset); | ||
227 | int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, | ||
228 | uint32_t handle); | ||
229 | int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | ||
230 | struct drm_mode_create_dumb *args); | ||
231 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); | ||
232 | int omap_gem_mmap_obj(struct drm_gem_object *obj, | ||
233 | struct vm_area_struct *vma); | ||
234 | int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
235 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op); | ||
236 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op); | ||
237 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op); | ||
238 | int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, | ||
239 | void (*fxn)(void *arg), void *arg); | ||
240 | int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll); | ||
241 | void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff); | ||
242 | void omap_gem_dma_sync(struct drm_gem_object *obj, | ||
243 | enum dma_data_direction dir); | ||
244 | int omap_gem_get_paddr(struct drm_gem_object *obj, | ||
245 | dma_addr_t *paddr, bool remap); | ||
246 | int omap_gem_put_paddr(struct drm_gem_object *obj); | ||
247 | int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, | ||
248 | bool remap); | ||
249 | int omap_gem_put_pages(struct drm_gem_object *obj); | ||
250 | uint32_t omap_gem_flags(struct drm_gem_object *obj); | ||
251 | int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, | ||
252 | int x, int y, dma_addr_t *paddr); | ||
253 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); | ||
254 | size_t omap_gem_mmap_size(struct drm_gem_object *obj); | ||
255 | int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h); | ||
256 | int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); | ||
257 | |||
258 | struct dma_buf *omap_gem_prime_export(struct drm_device *dev, | ||
259 | struct drm_gem_object *obj, int flags); | ||
260 | struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, | ||
261 | struct dma_buf *buffer); | ||
262 | |||
263 | static inline int align_pitch(int pitch, int width, int bpp) | ||
264 | { | ||
265 | int bytespp = (bpp + 7) / 8; | ||
266 | /* in case someone tries to feed us a completely bogus stride: */ | ||
267 | pitch = max(pitch, width * bytespp); | ||
268 | /* PVR needs alignment to 8 pixels.. right now that is the most | ||
269 | * restrictive stride requirement.. | ||
270 | */ | ||
271 | return ALIGN(pitch, 8 * bytespp); | ||
272 | } | ||
273 | |||
274 | static inline enum omap_channel pipe2chan(int pipe) | ||
275 | { | ||
276 | int num_mgrs = dss_feat_get_num_mgrs(); | ||
277 | |||
278 | /* | ||
279 | * We usually don't want to create a CRTC for each manager, | ||
280 | * at least not until we have a way to expose private planes | ||
281 | * to userspace. Otherwise there would not be enough video | ||
282 | * pipes left for drm planes. The higher #'d managers tend | ||
283 | * to have more features so start in reverse order. | ||
284 | */ | ||
285 | return num_mgrs - pipe - 1; | ||
286 | } | ||
287 | |||
288 | /* map crtc to vblank mask */ | ||
289 | static inline uint32_t pipe2vbl(int crtc) | ||
290 | { | ||
291 | enum omap_channel channel = pipe2chan(crtc); | ||
292 | return dispc_mgr_get_vsync_irq(channel); | ||
293 | } | ||
294 | |||
295 | static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc) | ||
296 | { | ||
297 | struct omap_drm_private *priv = dev->dev_private; | ||
298 | int i; | ||
299 | |||
300 | for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++) | ||
301 | if (priv->crtcs[i] == crtc) | ||
302 | return i; | ||
303 | |||
304 | BUG(); /* bogus CRTC ptr */ | ||
305 | return -1; | ||
306 | } | ||
307 | |||
308 | /* should these be made into common util helpers? | ||
309 | */ | ||
310 | |||
311 | static inline int objects_lookup(struct drm_device *dev, | ||
312 | struct drm_file *filp, uint32_t pixel_format, | ||
313 | struct drm_gem_object **bos, uint32_t *handles) | ||
314 | { | ||
315 | int i, n = drm_format_num_planes(pixel_format); | ||
316 | |||
317 | for (i = 0; i < n; i++) { | ||
318 | bos[i] = drm_gem_object_lookup(dev, filp, handles[i]); | ||
319 | if (!bos[i]) | ||
320 | goto fail; | ||
321 | |||
322 | } | ||
323 | |||
324 | return 0; | ||
325 | |||
326 | fail: | ||
327 | while (--i > 0) | ||
328 | drm_gem_object_unreference_unlocked(bos[i]); | ||
329 | |||
330 | return -ENOENT; | ||
331 | } | ||
332 | |||
333 | #endif /* __OMAP_DRV_H__ */ | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c new file mode 100644 index 000000000000..21d126d0317e --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_encoder.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | |||
22 | #include "drm_crtc.h" | ||
23 | #include "drm_crtc_helper.h" | ||
24 | |||
25 | #include <linux/list.h> | ||
26 | |||
27 | |||
28 | /* | ||
29 | * encoder funcs | ||
30 | */ | ||
31 | |||
32 | #define to_omap_encoder(x) container_of(x, struct omap_encoder, base) | ||
33 | |||
34 | /* The encoder and connector both map to same dssdev.. the encoder | ||
35 | * handles the 'active' parts, ie. anything the modifies the state | ||
36 | * of the hw, and the connector handles the 'read-only' parts, like | ||
37 | * detecting connection and reading edid. | ||
38 | */ | ||
39 | struct omap_encoder { | ||
40 | struct drm_encoder base; | ||
41 | struct omap_dss_device *dssdev; | ||
42 | }; | ||
43 | |||
44 | static void omap_encoder_destroy(struct drm_encoder *encoder) | ||
45 | { | ||
46 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); | ||
47 | drm_encoder_cleanup(encoder); | ||
48 | kfree(omap_encoder); | ||
49 | } | ||
50 | |||
51 | static const struct drm_encoder_funcs omap_encoder_funcs = { | ||
52 | .destroy = omap_encoder_destroy, | ||
53 | }; | ||
54 | |||
55 | /* | ||
56 | * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right | ||
57 | * order.. the easiest way to work around this for now is to make all | ||
58 | * the encoder-helper's no-op's and have the omap_crtc code take care | ||
59 | * of the sequencing and call us in the right points. | ||
60 | * | ||
61 | * Eventually to handle connecting CRTCs to different encoders properly, | ||
62 | * either the CRTC helpers need to change or we need to replace | ||
63 | * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for | ||
64 | * that. | ||
65 | */ | ||
66 | |||
67 | static void omap_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
68 | { | ||
69 | } | ||
70 | |||
71 | static bool omap_encoder_mode_fixup(struct drm_encoder *encoder, | ||
72 | const struct drm_display_mode *mode, | ||
73 | struct drm_display_mode *adjusted_mode) | ||
74 | { | ||
75 | return true; | ||
76 | } | ||
77 | |||
78 | static void omap_encoder_mode_set(struct drm_encoder *encoder, | ||
79 | struct drm_display_mode *mode, | ||
80 | struct drm_display_mode *adjusted_mode) | ||
81 | { | ||
82 | } | ||
83 | |||
84 | static void omap_encoder_prepare(struct drm_encoder *encoder) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | static void omap_encoder_commit(struct drm_encoder *encoder) | ||
89 | { | ||
90 | } | ||
91 | |||
92 | static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { | ||
93 | .dpms = omap_encoder_dpms, | ||
94 | .mode_fixup = omap_encoder_mode_fixup, | ||
95 | .mode_set = omap_encoder_mode_set, | ||
96 | .prepare = omap_encoder_prepare, | ||
97 | .commit = omap_encoder_commit, | ||
98 | }; | ||
99 | |||
100 | /* | ||
101 | * Instead of relying on the helpers for modeset, the omap_crtc code | ||
102 | * calls these functions in the proper sequence. | ||
103 | */ | ||
104 | |||
105 | int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled) | ||
106 | { | ||
107 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); | ||
108 | struct omap_dss_device *dssdev = omap_encoder->dssdev; | ||
109 | struct omap_dss_driver *dssdrv = dssdev->driver; | ||
110 | |||
111 | if (enabled) { | ||
112 | return dssdrv->enable(dssdev); | ||
113 | } else { | ||
114 | dssdrv->disable(dssdev); | ||
115 | return 0; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | int omap_encoder_update(struct drm_encoder *encoder, | ||
120 | struct omap_overlay_manager *mgr, | ||
121 | struct omap_video_timings *timings) | ||
122 | { | ||
123 | struct drm_device *dev = encoder->dev; | ||
124 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); | ||
125 | struct omap_dss_device *dssdev = omap_encoder->dssdev; | ||
126 | struct omap_dss_driver *dssdrv = dssdev->driver; | ||
127 | int ret; | ||
128 | |||
129 | dssdev->output->manager = mgr; | ||
130 | |||
131 | ret = dssdrv->check_timings(dssdev, timings); | ||
132 | if (ret) { | ||
133 | dev_err(dev->dev, "could not set timings: %d\n", ret); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | dssdrv->set_timings(dssdev, timings); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /* initialize encoder */ | ||
143 | struct drm_encoder *omap_encoder_init(struct drm_device *dev, | ||
144 | struct omap_dss_device *dssdev) | ||
145 | { | ||
146 | struct drm_encoder *encoder = NULL; | ||
147 | struct omap_encoder *omap_encoder; | ||
148 | |||
149 | omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL); | ||
150 | if (!omap_encoder) | ||
151 | goto fail; | ||
152 | |||
153 | omap_encoder->dssdev = dssdev; | ||
154 | |||
155 | encoder = &omap_encoder->base; | ||
156 | |||
157 | drm_encoder_init(dev, encoder, &omap_encoder_funcs, | ||
158 | DRM_MODE_ENCODER_TMDS); | ||
159 | drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs); | ||
160 | |||
161 | return encoder; | ||
162 | |||
163 | fail: | ||
164 | if (encoder) | ||
165 | omap_encoder_destroy(encoder); | ||
166 | |||
167 | return NULL; | ||
168 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c new file mode 100644 index 000000000000..8031402e7951 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_fb.c | |||
@@ -0,0 +1,471 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_fb.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | #include "omap_dmm_tiler.h" | ||
22 | |||
23 | #include "drm_crtc.h" | ||
24 | #include "drm_crtc_helper.h" | ||
25 | |||
26 | /* | ||
27 | * framebuffer funcs | ||
28 | */ | ||
29 | |||
30 | /* per-format info: */ | ||
31 | struct format { | ||
32 | enum omap_color_mode dss_format; | ||
33 | uint32_t pixel_format; | ||
34 | struct { | ||
35 | int stride_bpp; /* this times width is stride */ | ||
36 | int sub_y; /* sub-sample in y dimension */ | ||
37 | } planes[4]; | ||
38 | bool yuv; | ||
39 | }; | ||
40 | |||
41 | static const struct format formats[] = { | ||
42 | /* 16bpp [A]RGB: */ | ||
43 | { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */ | ||
44 | { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */ | ||
45 | { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */ | ||
46 | { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */ | ||
47 | { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */ | ||
48 | { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */ | ||
49 | { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */ | ||
50 | /* 24bpp RGB: */ | ||
51 | { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */ | ||
52 | /* 32bpp [A]RGB: */ | ||
53 | { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */ | ||
54 | { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */ | ||
55 | { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */ | ||
56 | { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */ | ||
57 | /* YUV: */ | ||
58 | { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true }, | ||
59 | { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true }, | ||
60 | { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true }, | ||
61 | }; | ||
62 | |||
63 | /* convert from overlay's pixel formats bitmask to an array of fourcc's */ | ||
64 | uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats, | ||
65 | uint32_t max_formats, enum omap_color_mode supported_modes) | ||
66 | { | ||
67 | uint32_t nformats = 0; | ||
68 | int i = 0; | ||
69 | |||
70 | for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++) | ||
71 | if (formats[i].dss_format & supported_modes) | ||
72 | pixel_formats[nformats++] = formats[i].pixel_format; | ||
73 | |||
74 | return nformats; | ||
75 | } | ||
76 | |||
77 | /* per-plane info for the fb: */ | ||
78 | struct plane { | ||
79 | struct drm_gem_object *bo; | ||
80 | uint32_t pitch; | ||
81 | uint32_t offset; | ||
82 | dma_addr_t paddr; | ||
83 | }; | ||
84 | |||
85 | #define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base) | ||
86 | |||
87 | struct omap_framebuffer { | ||
88 | struct drm_framebuffer base; | ||
89 | const struct format *format; | ||
90 | struct plane planes[4]; | ||
91 | }; | ||
92 | |||
93 | static int omap_framebuffer_create_handle(struct drm_framebuffer *fb, | ||
94 | struct drm_file *file_priv, | ||
95 | unsigned int *handle) | ||
96 | { | ||
97 | struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); | ||
98 | return drm_gem_handle_create(file_priv, | ||
99 | omap_fb->planes[0].bo, handle); | ||
100 | } | ||
101 | |||
102 | static void omap_framebuffer_destroy(struct drm_framebuffer *fb) | ||
103 | { | ||
104 | struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); | ||
105 | int i, n = drm_format_num_planes(fb->pixel_format); | ||
106 | |||
107 | DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); | ||
108 | |||
109 | drm_framebuffer_cleanup(fb); | ||
110 | |||
111 | for (i = 0; i < n; i++) { | ||
112 | struct plane *plane = &omap_fb->planes[i]; | ||
113 | if (plane->bo) | ||
114 | drm_gem_object_unreference_unlocked(plane->bo); | ||
115 | } | ||
116 | |||
117 | kfree(omap_fb); | ||
118 | } | ||
119 | |||
120 | static int omap_framebuffer_dirty(struct drm_framebuffer *fb, | ||
121 | struct drm_file *file_priv, unsigned flags, unsigned color, | ||
122 | struct drm_clip_rect *clips, unsigned num_clips) | ||
123 | { | ||
124 | int i; | ||
125 | |||
126 | for (i = 0; i < num_clips; i++) { | ||
127 | omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1, | ||
128 | clips[i].x2 - clips[i].x1, | ||
129 | clips[i].y2 - clips[i].y1); | ||
130 | } | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { | ||
136 | .create_handle = omap_framebuffer_create_handle, | ||
137 | .destroy = omap_framebuffer_destroy, | ||
138 | .dirty = omap_framebuffer_dirty, | ||
139 | }; | ||
140 | |||
141 | static uint32_t get_linear_addr(struct plane *plane, | ||
142 | const struct format *format, int n, int x, int y) | ||
143 | { | ||
144 | uint32_t offset; | ||
145 | |||
146 | offset = plane->offset + | ||
147 | (x * format->planes[n].stride_bpp) + | ||
148 | (y * plane->pitch / format->planes[n].sub_y); | ||
149 | |||
150 | return plane->paddr + offset; | ||
151 | } | ||
152 | |||
153 | /* update ovl info for scanout, handles cases of multi-planar fb's, etc. | ||
154 | */ | ||
155 | void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, | ||
156 | struct omap_drm_window *win, struct omap_overlay_info *info) | ||
157 | { | ||
158 | struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); | ||
159 | const struct format *format = omap_fb->format; | ||
160 | struct plane *plane = &omap_fb->planes[0]; | ||
161 | uint32_t x, y, orient = 0; | ||
162 | |||
163 | info->color_mode = format->dss_format; | ||
164 | |||
165 | info->pos_x = win->crtc_x; | ||
166 | info->pos_y = win->crtc_y; | ||
167 | info->out_width = win->crtc_w; | ||
168 | info->out_height = win->crtc_h; | ||
169 | info->width = win->src_w; | ||
170 | info->height = win->src_h; | ||
171 | |||
172 | x = win->src_x; | ||
173 | y = win->src_y; | ||
174 | |||
175 | if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) { | ||
176 | uint32_t w = win->src_w; | ||
177 | uint32_t h = win->src_h; | ||
178 | |||
179 | switch (win->rotation & 0xf) { | ||
180 | default: | ||
181 | dev_err(fb->dev->dev, "invalid rotation: %02x", | ||
182 | (uint32_t)win->rotation); | ||
183 | /* fallthru to default to no rotation */ | ||
184 | case 0: | ||
185 | case BIT(DRM_ROTATE_0): | ||
186 | orient = 0; | ||
187 | break; | ||
188 | case BIT(DRM_ROTATE_90): | ||
189 | orient = MASK_XY_FLIP | MASK_X_INVERT; | ||
190 | break; | ||
191 | case BIT(DRM_ROTATE_180): | ||
192 | orient = MASK_X_INVERT | MASK_Y_INVERT; | ||
193 | break; | ||
194 | case BIT(DRM_ROTATE_270): | ||
195 | orient = MASK_XY_FLIP | MASK_Y_INVERT; | ||
196 | break; | ||
197 | } | ||
198 | |||
199 | if (win->rotation & BIT(DRM_REFLECT_X)) | ||
200 | orient ^= MASK_X_INVERT; | ||
201 | |||
202 | if (win->rotation & BIT(DRM_REFLECT_Y)) | ||
203 | orient ^= MASK_Y_INVERT; | ||
204 | |||
205 | /* adjust x,y offset for flip/invert: */ | ||
206 | if (orient & MASK_XY_FLIP) | ||
207 | swap(w, h); | ||
208 | if (orient & MASK_Y_INVERT) | ||
209 | y += h - 1; | ||
210 | if (orient & MASK_X_INVERT) | ||
211 | x += w - 1; | ||
212 | |||
213 | omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr); | ||
214 | info->rotation_type = OMAP_DSS_ROT_TILER; | ||
215 | info->screen_width = omap_gem_tiled_stride(plane->bo, orient); | ||
216 | } else { | ||
217 | info->paddr = get_linear_addr(plane, format, 0, x, y); | ||
218 | info->rotation_type = OMAP_DSS_ROT_DMA; | ||
219 | info->screen_width = plane->pitch; | ||
220 | } | ||
221 | |||
222 | /* convert to pixels: */ | ||
223 | info->screen_width /= format->planes[0].stride_bpp; | ||
224 | |||
225 | if (format->dss_format == OMAP_DSS_COLOR_NV12) { | ||
226 | plane = &omap_fb->planes[1]; | ||
227 | |||
228 | if (info->rotation_type == OMAP_DSS_ROT_TILER) { | ||
229 | WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED)); | ||
230 | omap_gem_rotated_paddr(plane->bo, orient, | ||
231 | x/2, y/2, &info->p_uv_addr); | ||
232 | } else { | ||
233 | info->p_uv_addr = get_linear_addr(plane, format, 1, x, y); | ||
234 | } | ||
235 | } else { | ||
236 | info->p_uv_addr = 0; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although | ||
241 | * buffers to unpin are just pushed to the unpin fifo so that the | ||
242 | * caller can defer unpin until vblank. | ||
243 | * | ||
244 | * Note if this fails (ie. something went very wrong!), all buffers are | ||
245 | * unpinned, and the caller disables the overlay. We could have tried | ||
246 | * to revert back to the previous set of pinned buffers but if things are | ||
247 | * hosed there is no guarantee that would succeed. | ||
248 | */ | ||
249 | int omap_framebuffer_replace(struct drm_framebuffer *a, | ||
250 | struct drm_framebuffer *b, void *arg, | ||
251 | void (*unpin)(void *arg, struct drm_gem_object *bo)) | ||
252 | { | ||
253 | int ret = 0, i, na, nb; | ||
254 | struct omap_framebuffer *ofba = to_omap_framebuffer(a); | ||
255 | struct omap_framebuffer *ofbb = to_omap_framebuffer(b); | ||
256 | uint32_t pinned_mask = 0; | ||
257 | |||
258 | na = a ? drm_format_num_planes(a->pixel_format) : 0; | ||
259 | nb = b ? drm_format_num_planes(b->pixel_format) : 0; | ||
260 | |||
261 | for (i = 0; i < max(na, nb); i++) { | ||
262 | struct plane *pa, *pb; | ||
263 | |||
264 | pa = (i < na) ? &ofba->planes[i] : NULL; | ||
265 | pb = (i < nb) ? &ofbb->planes[i] : NULL; | ||
266 | |||
267 | if (pa) | ||
268 | unpin(arg, pa->bo); | ||
269 | |||
270 | if (pb && !ret) { | ||
271 | ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true); | ||
272 | if (!ret) { | ||
273 | omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE); | ||
274 | pinned_mask |= (1 << i); | ||
275 | } | ||
276 | } | ||
277 | } | ||
278 | |||
279 | if (ret) { | ||
280 | /* something went wrong.. unpin what has been pinned */ | ||
281 | for (i = 0; i < nb; i++) { | ||
282 | if (pinned_mask & (1 << i)) { | ||
283 | struct plane *pb = &ofba->planes[i]; | ||
284 | unpin(arg, pb->bo); | ||
285 | } | ||
286 | } | ||
287 | } | ||
288 | |||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) | ||
293 | { | ||
294 | struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); | ||
295 | if (p >= drm_format_num_planes(fb->pixel_format)) | ||
296 | return NULL; | ||
297 | return omap_fb->planes[p].bo; | ||
298 | } | ||
299 | |||
300 | /* iterate thru all the connectors, returning ones that are attached | ||
301 | * to the same fb.. | ||
302 | */ | ||
303 | struct drm_connector *omap_framebuffer_get_next_connector( | ||
304 | struct drm_framebuffer *fb, struct drm_connector *from) | ||
305 | { | ||
306 | struct drm_device *dev = fb->dev; | ||
307 | struct list_head *connector_list = &dev->mode_config.connector_list; | ||
308 | struct drm_connector *connector = from; | ||
309 | |||
310 | if (!from) | ||
311 | return list_first_entry(connector_list, typeof(*from), head); | ||
312 | |||
313 | list_for_each_entry_from(connector, connector_list, head) { | ||
314 | if (connector != from) { | ||
315 | struct drm_encoder *encoder = connector->encoder; | ||
316 | struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; | ||
317 | if (crtc && crtc->fb == fb) | ||
318 | return connector; | ||
319 | |||
320 | } | ||
321 | } | ||
322 | |||
323 | return NULL; | ||
324 | } | ||
325 | |||
326 | /* flush an area of the framebuffer (in case of manual update display that | ||
327 | * is not automatically flushed) | ||
328 | */ | ||
329 | void omap_framebuffer_flush(struct drm_framebuffer *fb, | ||
330 | int x, int y, int w, int h) | ||
331 | { | ||
332 | struct drm_connector *connector = NULL; | ||
333 | |||
334 | VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb); | ||
335 | |||
336 | while ((connector = omap_framebuffer_get_next_connector(fb, connector))) { | ||
337 | /* only consider connectors that are part of a chain */ | ||
338 | if (connector->encoder && connector->encoder->crtc) { | ||
339 | /* TODO: maybe this should propagate thru the crtc who | ||
340 | * could do the coordinate translation.. | ||
341 | */ | ||
342 | struct drm_crtc *crtc = connector->encoder->crtc; | ||
343 | int cx = max(0, x - crtc->x); | ||
344 | int cy = max(0, y - crtc->y); | ||
345 | int cw = w + (x - crtc->x) - cx; | ||
346 | int ch = h + (y - crtc->y) - cy; | ||
347 | |||
348 | omap_connector_flush(connector, cx, cy, cw, ch); | ||
349 | } | ||
350 | } | ||
351 | } | ||
352 | |||
353 | #ifdef CONFIG_DEBUG_FS | ||
354 | void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) | ||
355 | { | ||
356 | struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); | ||
357 | int i, n = drm_format_num_planes(fb->pixel_format); | ||
358 | |||
359 | seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height, | ||
360 | (char *)&fb->pixel_format); | ||
361 | |||
362 | for (i = 0; i < n; i++) { | ||
363 | struct plane *plane = &omap_fb->planes[i]; | ||
364 | seq_printf(m, " %d: offset=%d pitch=%d, obj: ", | ||
365 | i, plane->offset, plane->pitch); | ||
366 | omap_gem_describe(plane->bo, m); | ||
367 | } | ||
368 | } | ||
369 | #endif | ||
370 | |||
371 | struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, | ||
372 | struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd) | ||
373 | { | ||
374 | struct drm_gem_object *bos[4]; | ||
375 | struct drm_framebuffer *fb; | ||
376 | int ret; | ||
377 | |||
378 | ret = objects_lookup(dev, file, mode_cmd->pixel_format, | ||
379 | bos, mode_cmd->handles); | ||
380 | if (ret) | ||
381 | return ERR_PTR(ret); | ||
382 | |||
383 | fb = omap_framebuffer_init(dev, mode_cmd, bos); | ||
384 | if (IS_ERR(fb)) { | ||
385 | int i, n = drm_format_num_planes(mode_cmd->pixel_format); | ||
386 | for (i = 0; i < n; i++) | ||
387 | drm_gem_object_unreference_unlocked(bos[i]); | ||
388 | return fb; | ||
389 | } | ||
390 | return fb; | ||
391 | } | ||
392 | |||
393 | struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, | ||
394 | struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) | ||
395 | { | ||
396 | struct omap_framebuffer *omap_fb; | ||
397 | struct drm_framebuffer *fb = NULL; | ||
398 | const struct format *format = NULL; | ||
399 | int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); | ||
400 | |||
401 | DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", | ||
402 | dev, mode_cmd, mode_cmd->width, mode_cmd->height, | ||
403 | (char *)&mode_cmd->pixel_format); | ||
404 | |||
405 | for (i = 0; i < ARRAY_SIZE(formats); i++) { | ||
406 | if (formats[i].pixel_format == mode_cmd->pixel_format) { | ||
407 | format = &formats[i]; | ||
408 | break; | ||
409 | } | ||
410 | } | ||
411 | |||
412 | if (!format) { | ||
413 | dev_err(dev->dev, "unsupported pixel format: %4.4s\n", | ||
414 | (char *)&mode_cmd->pixel_format); | ||
415 | ret = -EINVAL; | ||
416 | goto fail; | ||
417 | } | ||
418 | |||
419 | omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL); | ||
420 | if (!omap_fb) { | ||
421 | ret = -ENOMEM; | ||
422 | goto fail; | ||
423 | } | ||
424 | |||
425 | fb = &omap_fb->base; | ||
426 | omap_fb->format = format; | ||
427 | |||
428 | for (i = 0; i < n; i++) { | ||
429 | struct plane *plane = &omap_fb->planes[i]; | ||
430 | int size, pitch = mode_cmd->pitches[i]; | ||
431 | |||
432 | if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) { | ||
433 | dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n", | ||
434 | pitch, mode_cmd->width * format->planes[i].stride_bpp); | ||
435 | ret = -EINVAL; | ||
436 | goto fail; | ||
437 | } | ||
438 | |||
439 | size = pitch * mode_cmd->height / format->planes[i].sub_y; | ||
440 | |||
441 | if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) { | ||
442 | dev_err(dev->dev, "provided buffer object is too small! %d < %d\n", | ||
443 | bos[i]->size - mode_cmd->offsets[i], size); | ||
444 | ret = -EINVAL; | ||
445 | goto fail; | ||
446 | } | ||
447 | |||
448 | plane->bo = bos[i]; | ||
449 | plane->offset = mode_cmd->offsets[i]; | ||
450 | plane->pitch = pitch; | ||
451 | plane->paddr = 0; | ||
452 | } | ||
453 | |||
454 | drm_helper_mode_fill_fb_struct(fb, mode_cmd); | ||
455 | |||
456 | ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); | ||
457 | if (ret) { | ||
458 | dev_err(dev->dev, "framebuffer init failed: %d\n", ret); | ||
459 | goto fail; | ||
460 | } | ||
461 | |||
462 | DBG("create: FB ID: %d (%p)", fb->base.id, fb); | ||
463 | |||
464 | return fb; | ||
465 | |||
466 | fail: | ||
467 | if (fb) | ||
468 | omap_framebuffer_destroy(fb); | ||
469 | |||
470 | return ERR_PTR(ret); | ||
471 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c new file mode 100644 index 000000000000..b11ce609fcc2 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c | |||
@@ -0,0 +1,397 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_fbdev.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | |||
22 | #include "drm_crtc.h" | ||
23 | #include "drm_fb_helper.h" | ||
24 | |||
25 | MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')"); | ||
26 | static bool ywrap_enabled = true; | ||
27 | module_param_named(ywrap, ywrap_enabled, bool, 0644); | ||
28 | |||
29 | /* | ||
30 | * fbdev funcs, to implement legacy fbdev interface on top of drm driver | ||
31 | */ | ||
32 | |||
33 | #define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base) | ||
34 | |||
35 | struct omap_fbdev { | ||
36 | struct drm_fb_helper base; | ||
37 | struct drm_framebuffer *fb; | ||
38 | struct drm_gem_object *bo; | ||
39 | bool ywrap_enabled; | ||
40 | |||
41 | /* for deferred dmm roll when getting called in atomic ctx */ | ||
42 | struct work_struct work; | ||
43 | }; | ||
44 | |||
45 | static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h); | ||
46 | static struct drm_fb_helper *get_fb(struct fb_info *fbi); | ||
47 | |||
48 | static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf, | ||
49 | size_t count, loff_t *ppos) | ||
50 | { | ||
51 | ssize_t res; | ||
52 | |||
53 | res = fb_sys_write(fbi, buf, count, ppos); | ||
54 | omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres); | ||
55 | |||
56 | return res; | ||
57 | } | ||
58 | |||
59 | static void omap_fbdev_fillrect(struct fb_info *fbi, | ||
60 | const struct fb_fillrect *rect) | ||
61 | { | ||
62 | sys_fillrect(fbi, rect); | ||
63 | omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height); | ||
64 | } | ||
65 | |||
66 | static void omap_fbdev_copyarea(struct fb_info *fbi, | ||
67 | const struct fb_copyarea *area) | ||
68 | { | ||
69 | sys_copyarea(fbi, area); | ||
70 | omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height); | ||
71 | } | ||
72 | |||
73 | static void omap_fbdev_imageblit(struct fb_info *fbi, | ||
74 | const struct fb_image *image) | ||
75 | { | ||
76 | sys_imageblit(fbi, image); | ||
77 | omap_fbdev_flush(fbi, image->dx, image->dy, | ||
78 | image->width, image->height); | ||
79 | } | ||
80 | |||
81 | static void pan_worker(struct work_struct *work) | ||
82 | { | ||
83 | struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work); | ||
84 | struct fb_info *fbi = fbdev->base.fbdev; | ||
85 | int npages; | ||
86 | |||
87 | /* DMM roll shifts in 4K pages: */ | ||
88 | npages = fbi->fix.line_length >> PAGE_SHIFT; | ||
89 | omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages); | ||
90 | } | ||
91 | |||
92 | static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, | ||
93 | struct fb_info *fbi) | ||
94 | { | ||
95 | struct drm_fb_helper *helper = get_fb(fbi); | ||
96 | struct omap_fbdev *fbdev = to_omap_fbdev(helper); | ||
97 | |||
98 | if (!helper) | ||
99 | goto fallback; | ||
100 | |||
101 | if (!fbdev->ywrap_enabled) | ||
102 | goto fallback; | ||
103 | |||
104 | if (drm_can_sleep()) { | ||
105 | pan_worker(&fbdev->work); | ||
106 | } else { | ||
107 | struct omap_drm_private *priv = helper->dev->dev_private; | ||
108 | queue_work(priv->wq, &fbdev->work); | ||
109 | } | ||
110 | |||
111 | return 0; | ||
112 | |||
113 | fallback: | ||
114 | return drm_fb_helper_pan_display(var, fbi); | ||
115 | } | ||
116 | |||
117 | static struct fb_ops omap_fb_ops = { | ||
118 | .owner = THIS_MODULE, | ||
119 | |||
120 | /* Note: to properly handle manual update displays, we wrap the | ||
121 | * basic fbdev ops which write to the framebuffer | ||
122 | */ | ||
123 | .fb_read = fb_sys_read, | ||
124 | .fb_write = omap_fbdev_write, | ||
125 | .fb_fillrect = omap_fbdev_fillrect, | ||
126 | .fb_copyarea = omap_fbdev_copyarea, | ||
127 | .fb_imageblit = omap_fbdev_imageblit, | ||
128 | |||
129 | .fb_check_var = drm_fb_helper_check_var, | ||
130 | .fb_set_par = drm_fb_helper_set_par, | ||
131 | .fb_pan_display = omap_fbdev_pan_display, | ||
132 | .fb_blank = drm_fb_helper_blank, | ||
133 | .fb_setcmap = drm_fb_helper_setcmap, | ||
134 | }; | ||
135 | |||
136 | static int omap_fbdev_create(struct drm_fb_helper *helper, | ||
137 | struct drm_fb_helper_surface_size *sizes) | ||
138 | { | ||
139 | struct omap_fbdev *fbdev = to_omap_fbdev(helper); | ||
140 | struct drm_device *dev = helper->dev; | ||
141 | struct omap_drm_private *priv = dev->dev_private; | ||
142 | struct drm_framebuffer *fb = NULL; | ||
143 | union omap_gem_size gsize; | ||
144 | struct fb_info *fbi = NULL; | ||
145 | struct drm_mode_fb_cmd2 mode_cmd = {0}; | ||
146 | dma_addr_t paddr; | ||
147 | int ret; | ||
148 | |||
149 | /* only doing ARGB32 since this is what is needed to alpha-blend | ||
150 | * with video overlays: | ||
151 | */ | ||
152 | sizes->surface_bpp = 32; | ||
153 | sizes->surface_depth = 32; | ||
154 | |||
155 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, | ||
156 | sizes->surface_height, sizes->surface_bpp, | ||
157 | sizes->fb_width, sizes->fb_height); | ||
158 | |||
159 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | ||
160 | sizes->surface_depth); | ||
161 | |||
162 | mode_cmd.width = sizes->surface_width; | ||
163 | mode_cmd.height = sizes->surface_height; | ||
164 | |||
165 | mode_cmd.pitches[0] = align_pitch( | ||
166 | mode_cmd.width * ((sizes->surface_bpp + 7) / 8), | ||
167 | mode_cmd.width, sizes->surface_bpp); | ||
168 | |||
169 | fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; | ||
170 | if (fbdev->ywrap_enabled) { | ||
171 | /* need to align pitch to page size if using DMM scrolling */ | ||
172 | mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE); | ||
173 | } | ||
174 | |||
175 | /* allocate backing bo */ | ||
176 | gsize = (union omap_gem_size){ | ||
177 | .bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height), | ||
178 | }; | ||
179 | DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index); | ||
180 | fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC); | ||
181 | if (!fbdev->bo) { | ||
182 | dev_err(dev->dev, "failed to allocate buffer object\n"); | ||
183 | ret = -ENOMEM; | ||
184 | goto fail; | ||
185 | } | ||
186 | |||
187 | fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo); | ||
188 | if (IS_ERR(fb)) { | ||
189 | dev_err(dev->dev, "failed to allocate fb\n"); | ||
190 | /* note: if fb creation failed, we can't rely on fb destroy | ||
191 | * to unref the bo: | ||
192 | */ | ||
193 | drm_gem_object_unreference(fbdev->bo); | ||
194 | ret = PTR_ERR(fb); | ||
195 | goto fail; | ||
196 | } | ||
197 | |||
198 | /* note: this keeps the bo pinned.. which is perhaps not ideal, | ||
199 | * but is needed as long as we use fb_mmap() to mmap to userspace | ||
200 | * (since this happens using fix.smem_start). Possibly we could | ||
201 | * implement our own mmap using GEM mmap support to avoid this | ||
202 | * (non-tiled buffer doesn't need to be pinned for fbcon to write | ||
203 | * to it). Then we just need to be sure that we are able to re- | ||
204 | * pin it in case of an opps. | ||
205 | */ | ||
206 | ret = omap_gem_get_paddr(fbdev->bo, &paddr, true); | ||
207 | if (ret) { | ||
208 | dev_err(dev->dev, | ||
209 | "could not map (paddr)! Skipping framebuffer alloc\n"); | ||
210 | ret = -ENOMEM; | ||
211 | goto fail; | ||
212 | } | ||
213 | |||
214 | mutex_lock(&dev->struct_mutex); | ||
215 | |||
216 | fbi = framebuffer_alloc(0, dev->dev); | ||
217 | if (!fbi) { | ||
218 | dev_err(dev->dev, "failed to allocate fb info\n"); | ||
219 | ret = -ENOMEM; | ||
220 | goto fail_unlock; | ||
221 | } | ||
222 | |||
223 | DBG("fbi=%p, dev=%p", fbi, dev); | ||
224 | |||
225 | fbdev->fb = fb; | ||
226 | helper->fb = fb; | ||
227 | helper->fbdev = fbi; | ||
228 | |||
229 | fbi->par = helper; | ||
230 | fbi->flags = FBINFO_DEFAULT; | ||
231 | fbi->fbops = &omap_fb_ops; | ||
232 | |||
233 | strcpy(fbi->fix.id, MODULE_NAME); | ||
234 | |||
235 | ret = fb_alloc_cmap(&fbi->cmap, 256, 0); | ||
236 | if (ret) { | ||
237 | ret = -ENOMEM; | ||
238 | goto fail_unlock; | ||
239 | } | ||
240 | |||
241 | drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); | ||
242 | drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); | ||
243 | |||
244 | dev->mode_config.fb_base = paddr; | ||
245 | |||
246 | fbi->screen_base = omap_gem_vaddr(fbdev->bo); | ||
247 | fbi->screen_size = fbdev->bo->size; | ||
248 | fbi->fix.smem_start = paddr; | ||
249 | fbi->fix.smem_len = fbdev->bo->size; | ||
250 | |||
251 | /* if we have DMM, then we can use it for scrolling by just | ||
252 | * shuffling pages around in DMM rather than doing sw blit. | ||
253 | */ | ||
254 | if (fbdev->ywrap_enabled) { | ||
255 | DRM_INFO("Enabling DMM ywrap scrolling\n"); | ||
256 | fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST; | ||
257 | fbi->fix.ywrapstep = 1; | ||
258 | } | ||
259 | |||
260 | |||
261 | DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); | ||
262 | DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height); | ||
263 | |||
264 | mutex_unlock(&dev->struct_mutex); | ||
265 | |||
266 | return 0; | ||
267 | |||
268 | fail_unlock: | ||
269 | mutex_unlock(&dev->struct_mutex); | ||
270 | fail: | ||
271 | |||
272 | if (ret) { | ||
273 | if (fbi) | ||
274 | framebuffer_release(fbi); | ||
275 | if (fb) { | ||
276 | drm_framebuffer_unregister_private(fb); | ||
277 | drm_framebuffer_remove(fb); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | return ret; | ||
282 | } | ||
283 | |||
284 | static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc, | ||
285 | u16 red, u16 green, u16 blue, int regno) | ||
286 | { | ||
287 | DBG("fbdev: set gamma"); | ||
288 | } | ||
289 | |||
290 | static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc, | ||
291 | u16 *red, u16 *green, u16 *blue, int regno) | ||
292 | { | ||
293 | DBG("fbdev: get gamma"); | ||
294 | } | ||
295 | |||
296 | static struct drm_fb_helper_funcs omap_fb_helper_funcs = { | ||
297 | .gamma_set = omap_crtc_fb_gamma_set, | ||
298 | .gamma_get = omap_crtc_fb_gamma_get, | ||
299 | .fb_probe = omap_fbdev_create, | ||
300 | }; | ||
301 | |||
302 | static struct drm_fb_helper *get_fb(struct fb_info *fbi) | ||
303 | { | ||
304 | if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) { | ||
305 | /* these are not the fb's you're looking for */ | ||
306 | return NULL; | ||
307 | } | ||
308 | return fbi->par; | ||
309 | } | ||
310 | |||
311 | /* flush an area of the framebuffer (in case of manual update display that | ||
312 | * is not automatically flushed) | ||
313 | */ | ||
314 | static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h) | ||
315 | { | ||
316 | struct drm_fb_helper *helper = get_fb(fbi); | ||
317 | |||
318 | if (!helper) | ||
319 | return; | ||
320 | |||
321 | VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi); | ||
322 | |||
323 | omap_framebuffer_flush(helper->fb, x, y, w, h); | ||
324 | } | ||
325 | |||
326 | /* initialize fbdev helper */ | ||
327 | struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) | ||
328 | { | ||
329 | struct omap_drm_private *priv = dev->dev_private; | ||
330 | struct omap_fbdev *fbdev = NULL; | ||
331 | struct drm_fb_helper *helper; | ||
332 | int ret = 0; | ||
333 | |||
334 | fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); | ||
335 | if (!fbdev) | ||
336 | goto fail; | ||
337 | |||
338 | INIT_WORK(&fbdev->work, pan_worker); | ||
339 | |||
340 | helper = &fbdev->base; | ||
341 | |||
342 | helper->funcs = &omap_fb_helper_funcs; | ||
343 | |||
344 | ret = drm_fb_helper_init(dev, helper, | ||
345 | priv->num_crtcs, priv->num_connectors); | ||
346 | if (ret) { | ||
347 | dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret); | ||
348 | goto fail; | ||
349 | } | ||
350 | |||
351 | drm_fb_helper_single_add_all_connectors(helper); | ||
352 | |||
353 | /* disable all the possible outputs/crtcs before entering KMS mode */ | ||
354 | drm_helper_disable_unused_functions(dev); | ||
355 | |||
356 | drm_fb_helper_initial_config(helper, 32); | ||
357 | |||
358 | priv->fbdev = helper; | ||
359 | |||
360 | return helper; | ||
361 | |||
362 | fail: | ||
363 | kfree(fbdev); | ||
364 | return NULL; | ||
365 | } | ||
366 | |||
367 | void omap_fbdev_free(struct drm_device *dev) | ||
368 | { | ||
369 | struct omap_drm_private *priv = dev->dev_private; | ||
370 | struct drm_fb_helper *helper = priv->fbdev; | ||
371 | struct omap_fbdev *fbdev; | ||
372 | struct fb_info *fbi; | ||
373 | |||
374 | DBG(); | ||
375 | |||
376 | fbi = helper->fbdev; | ||
377 | |||
378 | /* only cleanup framebuffer if it is present */ | ||
379 | if (fbi) { | ||
380 | unregister_framebuffer(fbi); | ||
381 | framebuffer_release(fbi); | ||
382 | } | ||
383 | |||
384 | drm_fb_helper_fini(helper); | ||
385 | |||
386 | fbdev = to_omap_fbdev(priv->fbdev); | ||
387 | |||
388 | /* this will free the backing object */ | ||
389 | if (fbdev->fb) { | ||
390 | drm_framebuffer_unregister_private(fbdev->fb); | ||
391 | drm_framebuffer_remove(fbdev->fb); | ||
392 | } | ||
393 | |||
394 | kfree(fbdev); | ||
395 | |||
396 | priv->fbdev = NULL; | ||
397 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c new file mode 100644 index 000000000000..ebbdf4132e9c --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -0,0 +1,1507 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_gem.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob.clark@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | |||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/shmem_fs.h> | ||
23 | |||
24 | #include "omap_drv.h" | ||
25 | #include "omap_dmm_tiler.h" | ||
26 | |||
27 | /* remove these once drm core helpers are merged */ | ||
28 | struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | ||
29 | void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | ||
30 | bool dirty, bool accessed); | ||
31 | int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); | ||
32 | |||
33 | /* | ||
34 | * GEM buffer object implementation. | ||
35 | */ | ||
36 | |||
37 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) | ||
38 | |||
39 | /* note: we use upper 8 bits of flags for driver-internal flags: */ | ||
40 | #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ | ||
41 | #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ | ||
42 | #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ | ||
43 | |||
44 | |||
45 | struct omap_gem_object { | ||
46 | struct drm_gem_object base; | ||
47 | |||
48 | struct list_head mm_list; | ||
49 | |||
50 | uint32_t flags; | ||
51 | |||
52 | /** width/height for tiled formats (rounded up to slot boundaries) */ | ||
53 | uint16_t width, height; | ||
54 | |||
55 | /** roll applied when mapping to DMM */ | ||
56 | uint32_t roll; | ||
57 | |||
58 | /** | ||
59 | * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag | ||
60 | * is set and the paddr is valid. Also if the buffer is remapped in | ||
61 | * TILER and paddr_cnt > 0, then paddr is valid. But if you are using | ||
62 | * the physical address and OMAP_BO_DMA is not set, then you should | ||
63 | * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is | ||
64 | * not removed from under your feet. | ||
65 | * | ||
66 | * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable | ||
67 | * buffer is requested, but doesn't mean that it is. Use the | ||
68 | * OMAP_BO_DMA flag to determine if the buffer has a DMA capable | ||
69 | * physical address. | ||
70 | */ | ||
71 | dma_addr_t paddr; | ||
72 | |||
73 | /** | ||
74 | * # of users of paddr | ||
75 | */ | ||
76 | uint32_t paddr_cnt; | ||
77 | |||
78 | /** | ||
79 | * tiler block used when buffer is remapped in DMM/TILER. | ||
80 | */ | ||
81 | struct tiler_block *block; | ||
82 | |||
83 | /** | ||
84 | * Array of backing pages, if allocated. Note that pages are never | ||
85 | * allocated for buffers originally allocated from contiguous memory | ||
86 | */ | ||
87 | struct page **pages; | ||
88 | |||
89 | /** addresses corresponding to pages in above array */ | ||
90 | dma_addr_t *addrs; | ||
91 | |||
92 | /** | ||
93 | * Virtual address, if mapped. | ||
94 | */ | ||
95 | void *vaddr; | ||
96 | |||
97 | /** | ||
98 | * sync-object allocated on demand (if needed) | ||
99 | * | ||
100 | * Per-buffer sync-object for tracking pending and completed hw/dma | ||
101 | * read and write operations. The layout in memory is dictated by | ||
102 | * the SGX firmware, which uses this information to stall the command | ||
103 | * stream if a surface is not ready yet. | ||
104 | * | ||
105 | * Note that when buffer is used by SGX, the sync-object needs to be | ||
106 | * allocated from a special heap of sync-objects. This way many sync | ||
107 | * objects can be packed in a page, and not waste GPU virtual address | ||
108 | * space. Because of this we have to have a omap_gem_set_sync_object() | ||
109 | * API to allow replacement of the syncobj after it has (potentially) | ||
110 | * already been allocated. A bit ugly but I haven't thought of a | ||
111 | * better alternative. | ||
112 | */ | ||
113 | struct { | ||
114 | uint32_t write_pending; | ||
115 | uint32_t write_complete; | ||
116 | uint32_t read_pending; | ||
117 | uint32_t read_complete; | ||
118 | } *sync; | ||
119 | }; | ||
120 | |||
121 | static int get_pages(struct drm_gem_object *obj, struct page ***pages); | ||
122 | static uint64_t mmap_offset(struct drm_gem_object *obj); | ||
123 | |||
124 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are | ||
125 | * not necessarily pinned in TILER all the time, and (b) when they are | ||
126 | * they are not necessarily page aligned, we reserve one or more small | ||
127 | * regions in each of the 2d containers to use as a user-GART where we | ||
128 | * can create a second page-aligned mapping of parts of the buffer | ||
129 | * being accessed from userspace. | ||
130 | * | ||
131 | * Note that we could optimize slightly when we know that multiple | ||
132 | * tiler containers are backed by the same PAT.. but I'll leave that | ||
133 | * for later.. | ||
134 | */ | ||
135 | #define NUM_USERGART_ENTRIES 2 | ||
136 | struct usergart_entry { | ||
137 | struct tiler_block *block; /* the reserved tiler block */ | ||
138 | dma_addr_t paddr; | ||
139 | struct drm_gem_object *obj; /* the current pinned obj */ | ||
140 | pgoff_t obj_pgoff; /* page offset of obj currently | ||
141 | mapped in */ | ||
142 | }; | ||
143 | static struct { | ||
144 | struct usergart_entry entry[NUM_USERGART_ENTRIES]; | ||
145 | int height; /* height in rows */ | ||
146 | int height_shift; /* ilog2(height in rows) */ | ||
147 | int slot_shift; /* ilog2(width per slot) */ | ||
148 | int stride_pfn; /* stride in pages */ | ||
149 | int last; /* index of last used entry */ | ||
150 | } *usergart; | ||
151 | |||
152 | static void evict_entry(struct drm_gem_object *obj, | ||
153 | enum tiler_fmt fmt, struct usergart_entry *entry) | ||
154 | { | ||
155 | if (obj->dev->dev_mapping) { | ||
156 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
157 | int n = usergart[fmt].height; | ||
158 | size_t size = PAGE_SIZE * n; | ||
159 | loff_t off = mmap_offset(obj) + | ||
160 | (entry->obj_pgoff << PAGE_SHIFT); | ||
161 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); | ||
162 | if (m > 1) { | ||
163 | int i; | ||
164 | /* if stride > than PAGE_SIZE then sparse mapping: */ | ||
165 | for (i = n; i > 0; i--) { | ||
166 | unmap_mapping_range(obj->dev->dev_mapping, | ||
167 | off, PAGE_SIZE, 1); | ||
168 | off += PAGE_SIZE * m; | ||
169 | } | ||
170 | } else { | ||
171 | unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); | ||
172 | } | ||
173 | } | ||
174 | |||
175 | entry->obj = NULL; | ||
176 | } | ||
177 | |||
178 | /* Evict a buffer from usergart, if it is mapped there */ | ||
179 | static void evict(struct drm_gem_object *obj) | ||
180 | { | ||
181 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
182 | |||
183 | if (omap_obj->flags & OMAP_BO_TILED) { | ||
184 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); | ||
185 | int i; | ||
186 | |||
187 | if (!usergart) | ||
188 | return; | ||
189 | |||
190 | for (i = 0; i < NUM_USERGART_ENTRIES; i++) { | ||
191 | struct usergart_entry *entry = &usergart[fmt].entry[i]; | ||
192 | if (entry->obj == obj) | ||
193 | evict_entry(obj, fmt, entry); | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /* GEM objects can either be allocated from contiguous memory (in which | ||
199 | * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non | ||
200 | * contiguous buffers can be remapped in TILER/DMM if they need to be | ||
201 | * contiguous... but we don't do this all the time to reduce pressure | ||
202 | * on TILER/DMM space when we know at allocation time that the buffer | ||
203 | * will need to be scanned out. | ||
204 | */ | ||
205 | static inline bool is_shmem(struct drm_gem_object *obj) | ||
206 | { | ||
207 | return obj->filp != NULL; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * shmem buffers that are mapped cached can simulate coherency via using | ||
212 | * page faulting to keep track of dirty pages | ||
213 | */ | ||
214 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | ||
215 | { | ||
216 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
217 | return is_shmem(obj) && | ||
218 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); | ||
219 | } | ||
220 | |||
221 | static DEFINE_SPINLOCK(sync_lock); | ||
222 | |||
223 | /** ensure backing pages are allocated */ | ||
224 | static int omap_gem_attach_pages(struct drm_gem_object *obj) | ||
225 | { | ||
226 | struct drm_device *dev = obj->dev; | ||
227 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
228 | struct page **pages; | ||
229 | int npages = obj->size >> PAGE_SHIFT; | ||
230 | int i, ret; | ||
231 | dma_addr_t *addrs; | ||
232 | |||
233 | WARN_ON(omap_obj->pages); | ||
234 | |||
235 | /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the | ||
236 | * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably | ||
237 | * we actually want CMA memory for it all anyways.. | ||
238 | */ | ||
239 | pages = _drm_gem_get_pages(obj, GFP_KERNEL); | ||
240 | if (IS_ERR(pages)) { | ||
241 | dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); | ||
242 | return PTR_ERR(pages); | ||
243 | } | ||
244 | |||
245 | /* for non-cached buffers, ensure the new pages are clean because | ||
246 | * DSS, GPU, etc. are not cache coherent: | ||
247 | */ | ||
248 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { | ||
249 | addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); | ||
250 | if (!addrs) { | ||
251 | ret = -ENOMEM; | ||
252 | goto free_pages; | ||
253 | } | ||
254 | |||
255 | for (i = 0; i < npages; i++) { | ||
256 | addrs[i] = dma_map_page(dev->dev, pages[i], | ||
257 | 0, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
258 | } | ||
259 | } else { | ||
260 | addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); | ||
261 | if (!addrs) { | ||
262 | ret = -ENOMEM; | ||
263 | goto free_pages; | ||
264 | } | ||
265 | } | ||
266 | |||
267 | omap_obj->addrs = addrs; | ||
268 | omap_obj->pages = pages; | ||
269 | |||
270 | return 0; | ||
271 | |||
272 | free_pages: | ||
273 | _drm_gem_put_pages(obj, pages, true, false); | ||
274 | |||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | /** release backing pages */ | ||
279 | static void omap_gem_detach_pages(struct drm_gem_object *obj) | ||
280 | { | ||
281 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
282 | |||
283 | /* for non-cached buffers, ensure the new pages are clean because | ||
284 | * DSS, GPU, etc. are not cache coherent: | ||
285 | */ | ||
286 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { | ||
287 | int i, npages = obj->size >> PAGE_SHIFT; | ||
288 | for (i = 0; i < npages; i++) { | ||
289 | dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], | ||
290 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
291 | } | ||
292 | } | ||
293 | |||
294 | kfree(omap_obj->addrs); | ||
295 | omap_obj->addrs = NULL; | ||
296 | |||
297 | _drm_gem_put_pages(obj, omap_obj->pages, true, false); | ||
298 | omap_obj->pages = NULL; | ||
299 | } | ||
300 | |||
301 | /* get buffer flags */ | ||
302 | uint32_t omap_gem_flags(struct drm_gem_object *obj) | ||
303 | { | ||
304 | return to_omap_bo(obj)->flags; | ||
305 | } | ||
306 | |||
307 | /** get mmap offset */ | ||
308 | static uint64_t mmap_offset(struct drm_gem_object *obj) | ||
309 | { | ||
310 | struct drm_device *dev = obj->dev; | ||
311 | |||
312 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
313 | |||
314 | if (!obj->map_list.map) { | ||
315 | /* Make it mmapable */ | ||
316 | size_t size = omap_gem_mmap_size(obj); | ||
317 | int ret = _drm_gem_create_mmap_offset_size(obj, size); | ||
318 | |||
319 | if (ret) { | ||
320 | dev_err(dev->dev, "could not allocate mmap offset\n"); | ||
321 | return 0; | ||
322 | } | ||
323 | } | ||
324 | |||
325 | return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; | ||
326 | } | ||
327 | |||
328 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) | ||
329 | { | ||
330 | uint64_t offset; | ||
331 | mutex_lock(&obj->dev->struct_mutex); | ||
332 | offset = mmap_offset(obj); | ||
333 | mutex_unlock(&obj->dev->struct_mutex); | ||
334 | return offset; | ||
335 | } | ||
336 | |||
337 | /** get mmap size */ | ||
338 | size_t omap_gem_mmap_size(struct drm_gem_object *obj) | ||
339 | { | ||
340 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
341 | size_t size = obj->size; | ||
342 | |||
343 | if (omap_obj->flags & OMAP_BO_TILED) { | ||
344 | /* for tiled buffers, the virtual size has stride rounded up | ||
345 | * to 4kb.. (to hide the fact that row n+1 might start 16kb or | ||
346 | * 32kb later!). But we don't back the entire buffer with | ||
347 | * pages, only the valid picture part.. so need to adjust for | ||
348 | * this in the size used to mmap and generate mmap offset | ||
349 | */ | ||
350 | size = tiler_vsize(gem2fmt(omap_obj->flags), | ||
351 | omap_obj->width, omap_obj->height); | ||
352 | } | ||
353 | |||
354 | return size; | ||
355 | } | ||
356 | |||
357 | /* get tiled size, returns -EINVAL if not tiled buffer */ | ||
358 | int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) | ||
359 | { | ||
360 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
361 | if (omap_obj->flags & OMAP_BO_TILED) { | ||
362 | *w = omap_obj->width; | ||
363 | *h = omap_obj->height; | ||
364 | return 0; | ||
365 | } | ||
366 | return -EINVAL; | ||
367 | } | ||
368 | |||
369 | /* Normal handling for the case of faulting in non-tiled buffers */ | ||
370 | static int fault_1d(struct drm_gem_object *obj, | ||
371 | struct vm_area_struct *vma, struct vm_fault *vmf) | ||
372 | { | ||
373 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
374 | unsigned long pfn; | ||
375 | pgoff_t pgoff; | ||
376 | |||
377 | /* We don't use vmf->pgoff since that has the fake offset: */ | ||
378 | pgoff = ((unsigned long)vmf->virtual_address - | ||
379 | vma->vm_start) >> PAGE_SHIFT; | ||
380 | |||
381 | if (omap_obj->pages) { | ||
382 | omap_gem_cpu_sync(obj, pgoff); | ||
383 | pfn = page_to_pfn(omap_obj->pages[pgoff]); | ||
384 | } else { | ||
385 | BUG_ON(!(omap_obj->flags & OMAP_BO_DMA)); | ||
386 | pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; | ||
387 | } | ||
388 | |||
389 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | ||
390 | pfn, pfn << PAGE_SHIFT); | ||
391 | |||
392 | return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); | ||
393 | } | ||
394 | |||
395 | /* Special handling for the case of faulting in 2d tiled buffers */ | ||
396 | static int fault_2d(struct drm_gem_object *obj, | ||
397 | struct vm_area_struct *vma, struct vm_fault *vmf) | ||
398 | { | ||
399 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
400 | struct usergart_entry *entry; | ||
401 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); | ||
402 | struct page *pages[64]; /* XXX is this too much to have on stack? */ | ||
403 | unsigned long pfn; | ||
404 | pgoff_t pgoff, base_pgoff; | ||
405 | void __user *vaddr; | ||
406 | int i, ret, slots; | ||
407 | |||
408 | /* | ||
409 | * Note the height of the slot is also equal to the number of pages | ||
410 | * that need to be mapped in to fill 4kb wide CPU page. If the slot | ||
411 | * height is 64, then 64 pages fill a 4kb wide by 64 row region. | ||
412 | */ | ||
413 | const int n = usergart[fmt].height; | ||
414 | const int n_shift = usergart[fmt].height_shift; | ||
415 | |||
416 | /* | ||
417 | * If buffer width in bytes > PAGE_SIZE then the virtual stride is | ||
418 | * rounded up to next multiple of PAGE_SIZE.. this need to be taken | ||
419 | * into account in some of the math, so figure out virtual stride | ||
420 | * in pages | ||
421 | */ | ||
422 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); | ||
423 | |||
424 | /* We don't use vmf->pgoff since that has the fake offset: */ | ||
425 | pgoff = ((unsigned long)vmf->virtual_address - | ||
426 | vma->vm_start) >> PAGE_SHIFT; | ||
427 | |||
428 | /* | ||
429 | * Actual address we start mapping at is rounded down to previous slot | ||
430 | * boundary in the y direction: | ||
431 | */ | ||
432 | base_pgoff = round_down(pgoff, m << n_shift); | ||
433 | |||
434 | /* figure out buffer width in slots */ | ||
435 | slots = omap_obj->width >> usergart[fmt].slot_shift; | ||
436 | |||
437 | vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); | ||
438 | |||
439 | entry = &usergart[fmt].entry[usergart[fmt].last]; | ||
440 | |||
441 | /* evict previous buffer using this usergart entry, if any: */ | ||
442 | if (entry->obj) | ||
443 | evict_entry(entry->obj, fmt, entry); | ||
444 | |||
445 | entry->obj = obj; | ||
446 | entry->obj_pgoff = base_pgoff; | ||
447 | |||
448 | /* now convert base_pgoff to phys offset from virt offset: */ | ||
449 | base_pgoff = (base_pgoff >> n_shift) * slots; | ||
450 | |||
451 | /* for wider-than 4k.. figure out which part of the slot-row we want: */ | ||
452 | if (m > 1) { | ||
453 | int off = pgoff % m; | ||
454 | entry->obj_pgoff += off; | ||
455 | base_pgoff /= m; | ||
456 | slots = min(slots - (off << n_shift), n); | ||
457 | base_pgoff += off << n_shift; | ||
458 | vaddr += off << PAGE_SHIFT; | ||
459 | } | ||
460 | |||
461 | /* | ||
462 | * Map in pages. Beyond the valid pixel part of the buffer, we set | ||
463 | * pages[i] to NULL to get a dummy page mapped in.. if someone | ||
464 | * reads/writes it they will get random/undefined content, but at | ||
465 | * least it won't be corrupting whatever other random page used to | ||
466 | * be mapped in, or other undefined behavior. | ||
467 | */ | ||
468 | memcpy(pages, &omap_obj->pages[base_pgoff], | ||
469 | sizeof(struct page *) * slots); | ||
470 | memset(pages + slots, 0, | ||
471 | sizeof(struct page *) * (n - slots)); | ||
472 | |||
473 | ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); | ||
474 | if (ret) { | ||
475 | dev_err(obj->dev->dev, "failed to pin: %d\n", ret); | ||
476 | return ret; | ||
477 | } | ||
478 | |||
479 | pfn = entry->paddr >> PAGE_SHIFT; | ||
480 | |||
481 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | ||
482 | pfn, pfn << PAGE_SHIFT); | ||
483 | |||
484 | for (i = n; i > 0; i--) { | ||
485 | vm_insert_mixed(vma, (unsigned long)vaddr, pfn); | ||
486 | pfn += usergart[fmt].stride_pfn; | ||
487 | vaddr += PAGE_SIZE * m; | ||
488 | } | ||
489 | |||
490 | /* simple round-robin: */ | ||
491 | usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES; | ||
492 | |||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | /** | ||
497 | * omap_gem_fault - pagefault handler for GEM objects | ||
498 | * @vma: the VMA of the GEM object | ||
499 | * @vmf: fault detail | ||
500 | * | ||
501 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM | ||
502 | * does most of the work for us including the actual map/unmap calls | ||
503 | * but we need to do the actual page work. | ||
504 | * | ||
505 | * The VMA was set up by GEM. In doing so it also ensured that the | ||
506 | * vma->vm_private_data points to the GEM object that is backing this | ||
507 | * mapping. | ||
508 | */ | ||
509 | int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
510 | { | ||
511 | struct drm_gem_object *obj = vma->vm_private_data; | ||
512 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
513 | struct drm_device *dev = obj->dev; | ||
514 | struct page **pages; | ||
515 | int ret; | ||
516 | |||
517 | /* Make sure we don't parallel update on a fault, nor move or remove | ||
518 | * something from beneath our feet | ||
519 | */ | ||
520 | mutex_lock(&dev->struct_mutex); | ||
521 | |||
522 | /* if a shmem backed object, make sure we have pages attached now */ | ||
523 | ret = get_pages(obj, &pages); | ||
524 | if (ret) | ||
525 | goto fail; | ||
526 | |||
527 | /* where should we do corresponding put_pages().. we are mapping | ||
528 | * the original page, rather than thru a GART, so we can't rely | ||
529 | * on eviction to trigger this. But munmap() or all mappings should | ||
530 | * probably trigger put_pages()? | ||
531 | */ | ||
532 | |||
533 | if (omap_obj->flags & OMAP_BO_TILED) | ||
534 | ret = fault_2d(obj, vma, vmf); | ||
535 | else | ||
536 | ret = fault_1d(obj, vma, vmf); | ||
537 | |||
538 | |||
539 | fail: | ||
540 | mutex_unlock(&dev->struct_mutex); | ||
541 | switch (ret) { | ||
542 | case 0: | ||
543 | case -ERESTARTSYS: | ||
544 | case -EINTR: | ||
545 | return VM_FAULT_NOPAGE; | ||
546 | case -ENOMEM: | ||
547 | return VM_FAULT_OOM; | ||
548 | default: | ||
549 | return VM_FAULT_SIGBUS; | ||
550 | } | ||
551 | } | ||
552 | |||
553 | /** We override mainly to fix up some of the vm mapping flags.. */ | ||
554 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
555 | { | ||
556 | int ret; | ||
557 | |||
558 | ret = drm_gem_mmap(filp, vma); | ||
559 | if (ret) { | ||
560 | DBG("mmap failed: %d", ret); | ||
561 | return ret; | ||
562 | } | ||
563 | |||
564 | return omap_gem_mmap_obj(vma->vm_private_data, vma); | ||
565 | } | ||
566 | |||
567 | int omap_gem_mmap_obj(struct drm_gem_object *obj, | ||
568 | struct vm_area_struct *vma) | ||
569 | { | ||
570 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
571 | |||
572 | vma->vm_flags &= ~VM_PFNMAP; | ||
573 | vma->vm_flags |= VM_MIXEDMAP; | ||
574 | |||
575 | if (omap_obj->flags & OMAP_BO_WC) { | ||
576 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | ||
577 | } else if (omap_obj->flags & OMAP_BO_UNCACHED) { | ||
578 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | ||
579 | } else { | ||
580 | /* | ||
581 | * We do have some private objects, at least for scanout buffers | ||
582 | * on hardware without DMM/TILER. But these are allocated write- | ||
583 | * combine | ||
584 | */ | ||
585 | if (WARN_ON(!obj->filp)) | ||
586 | return -EINVAL; | ||
587 | |||
588 | /* | ||
589 | * Shunt off cached objs to shmem file so they have their own | ||
590 | * address_space (so unmap_mapping_range does what we want, | ||
591 | * in particular in the case of mmap'd dmabufs) | ||
592 | */ | ||
593 | fput(vma->vm_file); | ||
594 | vma->vm_pgoff = 0; | ||
595 | vma->vm_file = get_file(obj->filp); | ||
596 | |||
597 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | ||
598 | } | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | |||
604 | /** | ||
605 | * omap_gem_dumb_create - create a dumb buffer | ||
606 | * @drm_file: our client file | ||
607 | * @dev: our device | ||
608 | * @args: the requested arguments copied from userspace | ||
609 | * | ||
610 | * Allocate a buffer suitable for use for a frame buffer of the | ||
611 | * form described by user space. Give userspace a handle by which | ||
612 | * to reference it. | ||
613 | */ | ||
614 | int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | ||
615 | struct drm_mode_create_dumb *args) | ||
616 | { | ||
617 | union omap_gem_size gsize; | ||
618 | |||
619 | /* in case someone tries to feed us a completely bogus stride: */ | ||
620 | args->pitch = align_pitch(args->pitch, args->width, args->bpp); | ||
621 | args->size = PAGE_ALIGN(args->pitch * args->height); | ||
622 | |||
623 | gsize = (union omap_gem_size){ | ||
624 | .bytes = args->size, | ||
625 | }; | ||
626 | |||
627 | return omap_gem_new_handle(dev, file, gsize, | ||
628 | OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); | ||
629 | } | ||
630 | |||
631 | /** | ||
632 | * omap_gem_dumb_destroy - destroy a dumb buffer | ||
633 | * @file: client file | ||
634 | * @dev: our DRM device | ||
635 | * @handle: the object handle | ||
636 | * | ||
637 | * Destroy a handle that was created via omap_gem_dumb_create. | ||
638 | */ | ||
639 | int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, | ||
640 | uint32_t handle) | ||
641 | { | ||
642 | /* No special work needed, drop the reference and see what falls out */ | ||
643 | return drm_gem_handle_delete(file, handle); | ||
644 | } | ||
645 | |||
646 | /** | ||
647 | * omap_gem_dumb_map - buffer mapping for dumb interface | ||
648 | * @file: our drm client file | ||
649 | * @dev: drm device | ||
650 | * @handle: GEM handle to the object (from dumb_create) | ||
651 | * | ||
652 | * Do the necessary setup to allow the mapping of the frame buffer | ||
653 | * into user memory. We don't have to do much here at the moment. | ||
654 | */ | ||
655 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, | ||
656 | uint32_t handle, uint64_t *offset) | ||
657 | { | ||
658 | struct drm_gem_object *obj; | ||
659 | int ret = 0; | ||
660 | |||
661 | /* GEM does all our handle to object mapping */ | ||
662 | obj = drm_gem_object_lookup(dev, file, handle); | ||
663 | if (obj == NULL) { | ||
664 | ret = -ENOENT; | ||
665 | goto fail; | ||
666 | } | ||
667 | |||
668 | *offset = omap_gem_mmap_offset(obj); | ||
669 | |||
670 | drm_gem_object_unreference_unlocked(obj); | ||
671 | |||
672 | fail: | ||
673 | return ret; | ||
674 | } | ||
675 | |||
676 | /* Set scrolling position. This allows us to implement fast scrolling | ||
677 | * for console. | ||
678 | * | ||
679 | * Call only from non-atomic contexts. | ||
680 | */ | ||
681 | int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) | ||
682 | { | ||
683 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
684 | uint32_t npages = obj->size >> PAGE_SHIFT; | ||
685 | int ret = 0; | ||
686 | |||
687 | if (roll > npages) { | ||
688 | dev_err(obj->dev->dev, "invalid roll: %d\n", roll); | ||
689 | return -EINVAL; | ||
690 | } | ||
691 | |||
692 | omap_obj->roll = roll; | ||
693 | |||
694 | mutex_lock(&obj->dev->struct_mutex); | ||
695 | |||
696 | /* if we aren't mapped yet, we don't need to do anything */ | ||
697 | if (omap_obj->block) { | ||
698 | struct page **pages; | ||
699 | ret = get_pages(obj, &pages); | ||
700 | if (ret) | ||
701 | goto fail; | ||
702 | ret = tiler_pin(omap_obj->block, pages, npages, roll, true); | ||
703 | if (ret) | ||
704 | dev_err(obj->dev->dev, "could not repin: %d\n", ret); | ||
705 | } | ||
706 | |||
707 | fail: | ||
708 | mutex_unlock(&obj->dev->struct_mutex); | ||
709 | |||
710 | return ret; | ||
711 | } | ||
712 | |||
713 | /* Sync the buffer for CPU access.. note pages should already be | ||
714 | * attached, ie. omap_gem_get_pages() | ||
715 | */ | ||
716 | void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) | ||
717 | { | ||
718 | struct drm_device *dev = obj->dev; | ||
719 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
720 | |||
721 | if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { | ||
722 | dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], | ||
723 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
724 | omap_obj->addrs[pgoff] = 0; | ||
725 | } | ||
726 | } | ||
727 | |||
728 | /* sync the buffer for DMA access */ | ||
729 | void omap_gem_dma_sync(struct drm_gem_object *obj, | ||
730 | enum dma_data_direction dir) | ||
731 | { | ||
732 | struct drm_device *dev = obj->dev; | ||
733 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
734 | |||
735 | if (is_cached_coherent(obj)) { | ||
736 | int i, npages = obj->size >> PAGE_SHIFT; | ||
737 | struct page **pages = omap_obj->pages; | ||
738 | bool dirty = false; | ||
739 | |||
740 | for (i = 0; i < npages; i++) { | ||
741 | if (!omap_obj->addrs[i]) { | ||
742 | omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0, | ||
743 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
744 | dirty = true; | ||
745 | } | ||
746 | } | ||
747 | |||
748 | if (dirty) { | ||
749 | unmap_mapping_range(obj->filp->f_mapping, 0, | ||
750 | omap_gem_mmap_size(obj), 1); | ||
751 | } | ||
752 | } | ||
753 | } | ||
754 | |||
755 | /* Get physical address for DMA.. if 'remap' is true, and the buffer is not | ||
756 | * already contiguous, remap it to pin in physically contiguous memory.. (ie. | ||
757 | * map in TILER) | ||
758 | */ | ||
759 | int omap_gem_get_paddr(struct drm_gem_object *obj, | ||
760 | dma_addr_t *paddr, bool remap) | ||
761 | { | ||
762 | struct omap_drm_private *priv = obj->dev->dev_private; | ||
763 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
764 | int ret = 0; | ||
765 | |||
766 | mutex_lock(&obj->dev->struct_mutex); | ||
767 | |||
768 | if (remap && is_shmem(obj) && priv->has_dmm) { | ||
769 | if (omap_obj->paddr_cnt == 0) { | ||
770 | struct page **pages; | ||
771 | uint32_t npages = obj->size >> PAGE_SHIFT; | ||
772 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); | ||
773 | struct tiler_block *block; | ||
774 | |||
775 | BUG_ON(omap_obj->block); | ||
776 | |||
777 | ret = get_pages(obj, &pages); | ||
778 | if (ret) | ||
779 | goto fail; | ||
780 | |||
781 | if (omap_obj->flags & OMAP_BO_TILED) { | ||
782 | block = tiler_reserve_2d(fmt, | ||
783 | omap_obj->width, | ||
784 | omap_obj->height, 0); | ||
785 | } else { | ||
786 | block = tiler_reserve_1d(obj->size); | ||
787 | } | ||
788 | |||
789 | if (IS_ERR(block)) { | ||
790 | ret = PTR_ERR(block); | ||
791 | dev_err(obj->dev->dev, | ||
792 | "could not remap: %d (%d)\n", ret, fmt); | ||
793 | goto fail; | ||
794 | } | ||
795 | |||
796 | /* TODO: enable async refill.. */ | ||
797 | ret = tiler_pin(block, pages, npages, | ||
798 | omap_obj->roll, true); | ||
799 | if (ret) { | ||
800 | tiler_release(block); | ||
801 | dev_err(obj->dev->dev, | ||
802 | "could not pin: %d\n", ret); | ||
803 | goto fail; | ||
804 | } | ||
805 | |||
806 | omap_obj->paddr = tiler_ssptr(block); | ||
807 | omap_obj->block = block; | ||
808 | |||
809 | DBG("got paddr: %08x", omap_obj->paddr); | ||
810 | } | ||
811 | |||
812 | omap_obj->paddr_cnt++; | ||
813 | |||
814 | *paddr = omap_obj->paddr; | ||
815 | } else if (omap_obj->flags & OMAP_BO_DMA) { | ||
816 | *paddr = omap_obj->paddr; | ||
817 | } else { | ||
818 | ret = -EINVAL; | ||
819 | goto fail; | ||
820 | } | ||
821 | |||
822 | fail: | ||
823 | mutex_unlock(&obj->dev->struct_mutex); | ||
824 | |||
825 | return ret; | ||
826 | } | ||
827 | |||
828 | /* Release physical address, when DMA is no longer being performed.. this | ||
829 | * could potentially unpin and unmap buffers from TILER | ||
830 | */ | ||
831 | int omap_gem_put_paddr(struct drm_gem_object *obj) | ||
832 | { | ||
833 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
834 | int ret = 0; | ||
835 | |||
836 | mutex_lock(&obj->dev->struct_mutex); | ||
837 | if (omap_obj->paddr_cnt > 0) { | ||
838 | omap_obj->paddr_cnt--; | ||
839 | if (omap_obj->paddr_cnt == 0) { | ||
840 | ret = tiler_unpin(omap_obj->block); | ||
841 | if (ret) { | ||
842 | dev_err(obj->dev->dev, | ||
843 | "could not unpin pages: %d\n", ret); | ||
844 | goto fail; | ||
845 | } | ||
846 | ret = tiler_release(omap_obj->block); | ||
847 | if (ret) { | ||
848 | dev_err(obj->dev->dev, | ||
849 | "could not release unmap: %d\n", ret); | ||
850 | } | ||
851 | omap_obj->block = NULL; | ||
852 | } | ||
853 | } | ||
854 | fail: | ||
855 | mutex_unlock(&obj->dev->struct_mutex); | ||
856 | return ret; | ||
857 | } | ||
858 | |||
859 | /* Get rotated scanout address (only valid if already pinned), at the | ||
860 | * specified orientation and x,y offset from top-left corner of buffer | ||
861 | * (only valid for tiled 2d buffers) | ||
862 | */ | ||
863 | int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, | ||
864 | int x, int y, dma_addr_t *paddr) | ||
865 | { | ||
866 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
867 | int ret = -EINVAL; | ||
868 | |||
869 | mutex_lock(&obj->dev->struct_mutex); | ||
870 | if ((omap_obj->paddr_cnt > 0) && omap_obj->block && | ||
871 | (omap_obj->flags & OMAP_BO_TILED)) { | ||
872 | *paddr = tiler_tsptr(omap_obj->block, orient, x, y); | ||
873 | ret = 0; | ||
874 | } | ||
875 | mutex_unlock(&obj->dev->struct_mutex); | ||
876 | return ret; | ||
877 | } | ||
878 | |||
879 | /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ | ||
880 | int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) | ||
881 | { | ||
882 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
883 | int ret = -EINVAL; | ||
884 | if (omap_obj->flags & OMAP_BO_TILED) | ||
885 | ret = tiler_stride(gem2fmt(omap_obj->flags), orient); | ||
886 | return ret; | ||
887 | } | ||
888 | |||
889 | /* acquire pages when needed (for example, for DMA where physically | ||
890 | * contiguous buffer is not required | ||
891 | */ | ||
892 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) | ||
893 | { | ||
894 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
895 | int ret = 0; | ||
896 | |||
897 | if (is_shmem(obj) && !omap_obj->pages) { | ||
898 | ret = omap_gem_attach_pages(obj); | ||
899 | if (ret) { | ||
900 | dev_err(obj->dev->dev, "could not attach pages\n"); | ||
901 | return ret; | ||
902 | } | ||
903 | } | ||
904 | |||
905 | /* TODO: even phys-contig.. we should have a list of pages? */ | ||
906 | *pages = omap_obj->pages; | ||
907 | |||
908 | return 0; | ||
909 | } | ||
910 | |||
911 | /* if !remap, and we don't have pages backing, then fail, rather than | ||
912 | * increasing the pin count (which we don't really do yet anyways, | ||
913 | * because we don't support swapping pages back out). And 'remap' | ||
914 | * might not be quite the right name, but I wanted to keep it working | ||
915 | * similarly to omap_gem_get_paddr(). Note though that mutex is not | ||
916 | * aquired if !remap (because this can be called in atomic ctxt), | ||
917 | * but probably omap_gem_get_paddr() should be changed to work in the | ||
918 | * same way. If !remap, a matching omap_gem_put_pages() call is not | ||
919 | * required (and should not be made). | ||
920 | */ | ||
921 | int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, | ||
922 | bool remap) | ||
923 | { | ||
924 | int ret; | ||
925 | if (!remap) { | ||
926 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
927 | if (!omap_obj->pages) | ||
928 | return -ENOMEM; | ||
929 | *pages = omap_obj->pages; | ||
930 | return 0; | ||
931 | } | ||
932 | mutex_lock(&obj->dev->struct_mutex); | ||
933 | ret = get_pages(obj, pages); | ||
934 | mutex_unlock(&obj->dev->struct_mutex); | ||
935 | return ret; | ||
936 | } | ||
937 | |||
938 | /* release pages when DMA no longer being performed */ | ||
939 | int omap_gem_put_pages(struct drm_gem_object *obj) | ||
940 | { | ||
941 | /* do something here if we dynamically attach/detach pages.. at | ||
942 | * least they would no longer need to be pinned if everyone has | ||
943 | * released the pages.. | ||
944 | */ | ||
945 | return 0; | ||
946 | } | ||
947 | |||
948 | /* Get kernel virtual address for CPU access.. this more or less only | ||
949 | * exists for omap_fbdev. This should be called with struct_mutex | ||
950 | * held. | ||
951 | */ | ||
952 | void *omap_gem_vaddr(struct drm_gem_object *obj) | ||
953 | { | ||
954 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
955 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | ||
956 | if (!omap_obj->vaddr) { | ||
957 | struct page **pages; | ||
958 | int ret = get_pages(obj, &pages); | ||
959 | if (ret) | ||
960 | return ERR_PTR(ret); | ||
961 | omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | ||
962 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | ||
963 | } | ||
964 | return omap_obj->vaddr; | ||
965 | } | ||
966 | |||
967 | #ifdef CONFIG_PM | ||
968 | /* re-pin objects in DMM in resume path: */ | ||
969 | int omap_gem_resume(struct device *dev) | ||
970 | { | ||
971 | struct drm_device *drm_dev = dev_get_drvdata(dev); | ||
972 | struct omap_drm_private *priv = drm_dev->dev_private; | ||
973 | struct omap_gem_object *omap_obj; | ||
974 | int ret = 0; | ||
975 | |||
976 | list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { | ||
977 | if (omap_obj->block) { | ||
978 | struct drm_gem_object *obj = &omap_obj->base; | ||
979 | uint32_t npages = obj->size >> PAGE_SHIFT; | ||
980 | WARN_ON(!omap_obj->pages); /* this can't happen */ | ||
981 | ret = tiler_pin(omap_obj->block, | ||
982 | omap_obj->pages, npages, | ||
983 | omap_obj->roll, true); | ||
984 | if (ret) { | ||
985 | dev_err(dev, "could not repin: %d\n", ret); | ||
986 | return ret; | ||
987 | } | ||
988 | } | ||
989 | } | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | #endif | ||
994 | |||
995 | #ifdef CONFIG_DEBUG_FS | ||
996 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | ||
997 | { | ||
998 | struct drm_device *dev = obj->dev; | ||
999 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1000 | uint64_t off = 0; | ||
1001 | |||
1002 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1003 | |||
1004 | if (obj->map_list.map) | ||
1005 | off = (uint64_t)obj->map_list.hash.key; | ||
1006 | |||
1007 | seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", | ||
1008 | omap_obj->flags, obj->name, obj->refcount.refcount.counter, | ||
1009 | off, omap_obj->paddr, omap_obj->paddr_cnt, | ||
1010 | omap_obj->vaddr, omap_obj->roll); | ||
1011 | |||
1012 | if (omap_obj->flags & OMAP_BO_TILED) { | ||
1013 | seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); | ||
1014 | if (omap_obj->block) { | ||
1015 | struct tcm_area *area = &omap_obj->block->area; | ||
1016 | seq_printf(m, " (%dx%d, %dx%d)", | ||
1017 | area->p0.x, area->p0.y, | ||
1018 | area->p1.x, area->p1.y); | ||
1019 | } | ||
1020 | } else { | ||
1021 | seq_printf(m, " %d", obj->size); | ||
1022 | } | ||
1023 | |||
1024 | seq_printf(m, "\n"); | ||
1025 | } | ||
1026 | |||
1027 | void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) | ||
1028 | { | ||
1029 | struct omap_gem_object *omap_obj; | ||
1030 | int count = 0; | ||
1031 | size_t size = 0; | ||
1032 | |||
1033 | list_for_each_entry(omap_obj, list, mm_list) { | ||
1034 | struct drm_gem_object *obj = &omap_obj->base; | ||
1035 | seq_printf(m, " "); | ||
1036 | omap_gem_describe(obj, m); | ||
1037 | count++; | ||
1038 | size += obj->size; | ||
1039 | } | ||
1040 | |||
1041 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | ||
1042 | } | ||
1043 | #endif | ||
1044 | |||
1045 | /* Buffer Synchronization: | ||
1046 | */ | ||
1047 | |||
1048 | struct omap_gem_sync_waiter { | ||
1049 | struct list_head list; | ||
1050 | struct omap_gem_object *omap_obj; | ||
1051 | enum omap_gem_op op; | ||
1052 | uint32_t read_target, write_target; | ||
1053 | /* notify called w/ sync_lock held */ | ||
1054 | void (*notify)(void *arg); | ||
1055 | void *arg; | ||
1056 | }; | ||
1057 | |||
1058 | /* list of omap_gem_sync_waiter.. the notify fxn gets called back when | ||
1059 | * the read and/or write target count is achieved which can call a user | ||
1060 | * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for | ||
1061 | * cpu access), etc. | ||
1062 | */ | ||
1063 | static LIST_HEAD(waiters); | ||
1064 | |||
1065 | static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) | ||
1066 | { | ||
1067 | struct omap_gem_object *omap_obj = waiter->omap_obj; | ||
1068 | if ((waiter->op & OMAP_GEM_READ) && | ||
1069 | (omap_obj->sync->read_complete < waiter->read_target)) | ||
1070 | return true; | ||
1071 | if ((waiter->op & OMAP_GEM_WRITE) && | ||
1072 | (omap_obj->sync->write_complete < waiter->write_target)) | ||
1073 | return true; | ||
1074 | return false; | ||
1075 | } | ||
1076 | |||
1077 | /* macro for sync debug.. */ | ||
1078 | #define SYNCDBG 0 | ||
1079 | #define SYNC(fmt, ...) do { if (SYNCDBG) \ | ||
1080 | printk(KERN_ERR "%s:%d: "fmt"\n", \ | ||
1081 | __func__, __LINE__, ##__VA_ARGS__); \ | ||
1082 | } while (0) | ||
1083 | |||
1084 | |||
1085 | static void sync_op_update(void) | ||
1086 | { | ||
1087 | struct omap_gem_sync_waiter *waiter, *n; | ||
1088 | list_for_each_entry_safe(waiter, n, &waiters, list) { | ||
1089 | if (!is_waiting(waiter)) { | ||
1090 | list_del(&waiter->list); | ||
1091 | SYNC("notify: %p", waiter); | ||
1092 | waiter->notify(waiter->arg); | ||
1093 | kfree(waiter); | ||
1094 | } | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | static inline int sync_op(struct drm_gem_object *obj, | ||
1099 | enum omap_gem_op op, bool start) | ||
1100 | { | ||
1101 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1102 | int ret = 0; | ||
1103 | |||
1104 | spin_lock(&sync_lock); | ||
1105 | |||
1106 | if (!omap_obj->sync) { | ||
1107 | omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); | ||
1108 | if (!omap_obj->sync) { | ||
1109 | ret = -ENOMEM; | ||
1110 | goto unlock; | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | if (start) { | ||
1115 | if (op & OMAP_GEM_READ) | ||
1116 | omap_obj->sync->read_pending++; | ||
1117 | if (op & OMAP_GEM_WRITE) | ||
1118 | omap_obj->sync->write_pending++; | ||
1119 | } else { | ||
1120 | if (op & OMAP_GEM_READ) | ||
1121 | omap_obj->sync->read_complete++; | ||
1122 | if (op & OMAP_GEM_WRITE) | ||
1123 | omap_obj->sync->write_complete++; | ||
1124 | sync_op_update(); | ||
1125 | } | ||
1126 | |||
1127 | unlock: | ||
1128 | spin_unlock(&sync_lock); | ||
1129 | |||
1130 | return ret; | ||
1131 | } | ||
1132 | |||
1133 | /* it is a bit lame to handle updates in this sort of polling way, but | ||
1134 | * in case of PVR, the GPU can directly update read/write complete | ||
1135 | * values, and not really tell us which ones it updated.. this also | ||
1136 | * means that sync_lock is not quite sufficient. So we'll need to | ||
1137 | * do something a bit better when it comes time to add support for | ||
1138 | * separate 2d hw.. | ||
1139 | */ | ||
1140 | void omap_gem_op_update(void) | ||
1141 | { | ||
1142 | spin_lock(&sync_lock); | ||
1143 | sync_op_update(); | ||
1144 | spin_unlock(&sync_lock); | ||
1145 | } | ||
1146 | |||
1147 | /* mark the start of read and/or write operation */ | ||
1148 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) | ||
1149 | { | ||
1150 | return sync_op(obj, op, true); | ||
1151 | } | ||
1152 | |||
1153 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) | ||
1154 | { | ||
1155 | return sync_op(obj, op, false); | ||
1156 | } | ||
1157 | |||
1158 | static DECLARE_WAIT_QUEUE_HEAD(sync_event); | ||
1159 | |||
1160 | static void sync_notify(void *arg) | ||
1161 | { | ||
1162 | struct task_struct **waiter_task = arg; | ||
1163 | *waiter_task = NULL; | ||
1164 | wake_up_all(&sync_event); | ||
1165 | } | ||
1166 | |||
1167 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) | ||
1168 | { | ||
1169 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1170 | int ret = 0; | ||
1171 | if (omap_obj->sync) { | ||
1172 | struct task_struct *waiter_task = current; | ||
1173 | struct omap_gem_sync_waiter *waiter = | ||
1174 | kzalloc(sizeof(*waiter), GFP_KERNEL); | ||
1175 | |||
1176 | if (!waiter) | ||
1177 | return -ENOMEM; | ||
1178 | |||
1179 | waiter->omap_obj = omap_obj; | ||
1180 | waiter->op = op; | ||
1181 | waiter->read_target = omap_obj->sync->read_pending; | ||
1182 | waiter->write_target = omap_obj->sync->write_pending; | ||
1183 | waiter->notify = sync_notify; | ||
1184 | waiter->arg = &waiter_task; | ||
1185 | |||
1186 | spin_lock(&sync_lock); | ||
1187 | if (is_waiting(waiter)) { | ||
1188 | SYNC("waited: %p", waiter); | ||
1189 | list_add_tail(&waiter->list, &waiters); | ||
1190 | spin_unlock(&sync_lock); | ||
1191 | ret = wait_event_interruptible(sync_event, | ||
1192 | (waiter_task == NULL)); | ||
1193 | spin_lock(&sync_lock); | ||
1194 | if (waiter_task) { | ||
1195 | SYNC("interrupted: %p", waiter); | ||
1196 | /* we were interrupted */ | ||
1197 | list_del(&waiter->list); | ||
1198 | waiter_task = NULL; | ||
1199 | } else { | ||
1200 | /* freed in sync_op_update() */ | ||
1201 | waiter = NULL; | ||
1202 | } | ||
1203 | } | ||
1204 | spin_unlock(&sync_lock); | ||
1205 | |||
1206 | if (waiter) | ||
1207 | kfree(waiter); | ||
1208 | } | ||
1209 | return ret; | ||
1210 | } | ||
1211 | |||
1212 | /* call fxn(arg), either synchronously or asynchronously if the op | ||
1213 | * is currently blocked.. fxn() can be called from any context | ||
1214 | * | ||
1215 | * (TODO for now fxn is called back from whichever context calls | ||
1216 | * omap_gem_op_update().. but this could be better defined later | ||
1217 | * if needed) | ||
1218 | * | ||
1219 | * TODO more code in common w/ _sync().. | ||
1220 | */ | ||
1221 | int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, | ||
1222 | void (*fxn)(void *arg), void *arg) | ||
1223 | { | ||
1224 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1225 | if (omap_obj->sync) { | ||
1226 | struct omap_gem_sync_waiter *waiter = | ||
1227 | kzalloc(sizeof(*waiter), GFP_ATOMIC); | ||
1228 | |||
1229 | if (!waiter) | ||
1230 | return -ENOMEM; | ||
1231 | |||
1232 | waiter->omap_obj = omap_obj; | ||
1233 | waiter->op = op; | ||
1234 | waiter->read_target = omap_obj->sync->read_pending; | ||
1235 | waiter->write_target = omap_obj->sync->write_pending; | ||
1236 | waiter->notify = fxn; | ||
1237 | waiter->arg = arg; | ||
1238 | |||
1239 | spin_lock(&sync_lock); | ||
1240 | if (is_waiting(waiter)) { | ||
1241 | SYNC("waited: %p", waiter); | ||
1242 | list_add_tail(&waiter->list, &waiters); | ||
1243 | spin_unlock(&sync_lock); | ||
1244 | return 0; | ||
1245 | } | ||
1246 | |||
1247 | spin_unlock(&sync_lock); | ||
1248 | } | ||
1249 | |||
1250 | /* no waiting.. */ | ||
1251 | fxn(arg); | ||
1252 | |||
1253 | return 0; | ||
1254 | } | ||
1255 | |||
1256 | /* special API so PVR can update the buffer to use a sync-object allocated | ||
1257 | * from it's sync-obj heap. Only used for a newly allocated (from PVR's | ||
1258 | * perspective) sync-object, so we overwrite the new syncobj w/ values | ||
1259 | * from the already allocated syncobj (if there is one) | ||
1260 | */ | ||
1261 | int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) | ||
1262 | { | ||
1263 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1264 | int ret = 0; | ||
1265 | |||
1266 | spin_lock(&sync_lock); | ||
1267 | |||
1268 | if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) { | ||
1269 | /* clearing a previously set syncobj */ | ||
1270 | syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync), | ||
1271 | GFP_ATOMIC); | ||
1272 | if (!syncobj) { | ||
1273 | ret = -ENOMEM; | ||
1274 | goto unlock; | ||
1275 | } | ||
1276 | omap_obj->flags &= ~OMAP_BO_EXT_SYNC; | ||
1277 | omap_obj->sync = syncobj; | ||
1278 | } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) { | ||
1279 | /* replacing an existing syncobj */ | ||
1280 | if (omap_obj->sync) { | ||
1281 | memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync)); | ||
1282 | kfree(omap_obj->sync); | ||
1283 | } | ||
1284 | omap_obj->flags |= OMAP_BO_EXT_SYNC; | ||
1285 | omap_obj->sync = syncobj; | ||
1286 | } | ||
1287 | |||
1288 | unlock: | ||
1289 | spin_unlock(&sync_lock); | ||
1290 | return ret; | ||
1291 | } | ||
1292 | |||
1293 | int omap_gem_init_object(struct drm_gem_object *obj) | ||
1294 | { | ||
1295 | return -EINVAL; /* unused */ | ||
1296 | } | ||
1297 | |||
1298 | /* don't call directly.. called from GEM core when it is time to actually | ||
1299 | * free the object.. | ||
1300 | */ | ||
1301 | void omap_gem_free_object(struct drm_gem_object *obj) | ||
1302 | { | ||
1303 | struct drm_device *dev = obj->dev; | ||
1304 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1305 | |||
1306 | evict(obj); | ||
1307 | |||
1308 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1309 | |||
1310 | list_del(&omap_obj->mm_list); | ||
1311 | |||
1312 | if (obj->map_list.map) | ||
1313 | drm_gem_free_mmap_offset(obj); | ||
1314 | |||
1315 | /* this means the object is still pinned.. which really should | ||
1316 | * not happen. I think.. | ||
1317 | */ | ||
1318 | WARN_ON(omap_obj->paddr_cnt > 0); | ||
1319 | |||
1320 | /* don't free externally allocated backing memory */ | ||
1321 | if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { | ||
1322 | if (omap_obj->pages) | ||
1323 | omap_gem_detach_pages(obj); | ||
1324 | |||
1325 | if (!is_shmem(obj)) { | ||
1326 | dma_free_writecombine(dev->dev, obj->size, | ||
1327 | omap_obj->vaddr, omap_obj->paddr); | ||
1328 | } else if (omap_obj->vaddr) { | ||
1329 | vunmap(omap_obj->vaddr); | ||
1330 | } | ||
1331 | } | ||
1332 | |||
1333 | /* don't free externally allocated syncobj */ | ||
1334 | if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) | ||
1335 | kfree(omap_obj->sync); | ||
1336 | |||
1337 | drm_gem_object_release(obj); | ||
1338 | |||
1339 | kfree(obj); | ||
1340 | } | ||
1341 | |||
1342 | /* convenience method to construct a GEM buffer object, and userspace handle */ | ||
1343 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
1344 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) | ||
1345 | { | ||
1346 | struct drm_gem_object *obj; | ||
1347 | int ret; | ||
1348 | |||
1349 | obj = omap_gem_new(dev, gsize, flags); | ||
1350 | if (!obj) | ||
1351 | return -ENOMEM; | ||
1352 | |||
1353 | ret = drm_gem_handle_create(file, obj, handle); | ||
1354 | if (ret) { | ||
1355 | drm_gem_object_release(obj); | ||
1356 | kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */ | ||
1357 | return ret; | ||
1358 | } | ||
1359 | |||
1360 | /* drop reference from allocate - handle holds it now */ | ||
1361 | drm_gem_object_unreference_unlocked(obj); | ||
1362 | |||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | /* GEM buffer object constructor */ | ||
1367 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, | ||
1368 | union omap_gem_size gsize, uint32_t flags) | ||
1369 | { | ||
1370 | struct omap_drm_private *priv = dev->dev_private; | ||
1371 | struct omap_gem_object *omap_obj; | ||
1372 | struct drm_gem_object *obj = NULL; | ||
1373 | size_t size; | ||
1374 | int ret; | ||
1375 | |||
1376 | if (flags & OMAP_BO_TILED) { | ||
1377 | if (!usergart) { | ||
1378 | dev_err(dev->dev, "Tiled buffers require DMM\n"); | ||
1379 | goto fail; | ||
1380 | } | ||
1381 | |||
1382 | /* tiled buffers are always shmem paged backed.. when they are | ||
1383 | * scanned out, they are remapped into DMM/TILER | ||
1384 | */ | ||
1385 | flags &= ~OMAP_BO_SCANOUT; | ||
1386 | |||
1387 | /* currently don't allow cached buffers.. there is some caching | ||
1388 | * stuff that needs to be handled better | ||
1389 | */ | ||
1390 | flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED); | ||
1391 | flags |= OMAP_BO_WC; | ||
1392 | |||
1393 | /* align dimensions to slot boundaries... */ | ||
1394 | tiler_align(gem2fmt(flags), | ||
1395 | &gsize.tiled.width, &gsize.tiled.height); | ||
1396 | |||
1397 | /* ...and calculate size based on aligned dimensions */ | ||
1398 | size = tiler_size(gem2fmt(flags), | ||
1399 | gsize.tiled.width, gsize.tiled.height); | ||
1400 | } else { | ||
1401 | size = PAGE_ALIGN(gsize.bytes); | ||
1402 | } | ||
1403 | |||
1404 | omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); | ||
1405 | if (!omap_obj) | ||
1406 | goto fail; | ||
1407 | |||
1408 | list_add(&omap_obj->mm_list, &priv->obj_list); | ||
1409 | |||
1410 | obj = &omap_obj->base; | ||
1411 | |||
1412 | if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { | ||
1413 | /* attempt to allocate contiguous memory if we don't | ||
1414 | * have DMM for remappign discontiguous buffers | ||
1415 | */ | ||
1416 | omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, | ||
1417 | &omap_obj->paddr, GFP_KERNEL); | ||
1418 | if (omap_obj->vaddr) | ||
1419 | flags |= OMAP_BO_DMA; | ||
1420 | |||
1421 | } | ||
1422 | |||
1423 | omap_obj->flags = flags; | ||
1424 | |||
1425 | if (flags & OMAP_BO_TILED) { | ||
1426 | omap_obj->width = gsize.tiled.width; | ||
1427 | omap_obj->height = gsize.tiled.height; | ||
1428 | } | ||
1429 | |||
1430 | if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) | ||
1431 | ret = drm_gem_private_object_init(dev, obj, size); | ||
1432 | else | ||
1433 | ret = drm_gem_object_init(dev, obj, size); | ||
1434 | |||
1435 | if (ret) | ||
1436 | goto fail; | ||
1437 | |||
1438 | return obj; | ||
1439 | |||
1440 | fail: | ||
1441 | if (obj) | ||
1442 | omap_gem_free_object(obj); | ||
1443 | |||
1444 | return NULL; | ||
1445 | } | ||
1446 | |||
1447 | /* init/cleanup.. if DMM is used, we need to set some stuff up.. */ | ||
1448 | void omap_gem_init(struct drm_device *dev) | ||
1449 | { | ||
1450 | struct omap_drm_private *priv = dev->dev_private; | ||
1451 | const enum tiler_fmt fmts[] = { | ||
1452 | TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT | ||
1453 | }; | ||
1454 | int i, j; | ||
1455 | |||
1456 | if (!dmm_is_available()) { | ||
1457 | /* DMM only supported on OMAP4 and later, so this isn't fatal */ | ||
1458 | dev_warn(dev->dev, "DMM not available, disable DMM support\n"); | ||
1459 | return; | ||
1460 | } | ||
1461 | |||
1462 | usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); | ||
1463 | if (!usergart) | ||
1464 | return; | ||
1465 | |||
1466 | /* reserve 4k aligned/wide regions for userspace mappings: */ | ||
1467 | for (i = 0; i < ARRAY_SIZE(fmts); i++) { | ||
1468 | uint16_t h = 1, w = PAGE_SIZE >> i; | ||
1469 | tiler_align(fmts[i], &w, &h); | ||
1470 | /* note: since each region is 1 4kb page wide, and minimum | ||
1471 | * number of rows, the height ends up being the same as the | ||
1472 | * # of pages in the region | ||
1473 | */ | ||
1474 | usergart[i].height = h; | ||
1475 | usergart[i].height_shift = ilog2(h); | ||
1476 | usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; | ||
1477 | usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); | ||
1478 | for (j = 0; j < NUM_USERGART_ENTRIES; j++) { | ||
1479 | struct usergart_entry *entry = &usergart[i].entry[j]; | ||
1480 | struct tiler_block *block = | ||
1481 | tiler_reserve_2d(fmts[i], w, h, | ||
1482 | PAGE_SIZE); | ||
1483 | if (IS_ERR(block)) { | ||
1484 | dev_err(dev->dev, | ||
1485 | "reserve failed: %d, %d, %ld\n", | ||
1486 | i, j, PTR_ERR(block)); | ||
1487 | return; | ||
1488 | } | ||
1489 | entry->paddr = tiler_ssptr(block); | ||
1490 | entry->block = block; | ||
1491 | |||
1492 | DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h, | ||
1493 | entry->paddr, | ||
1494 | usergart[i].stride_pfn << PAGE_SHIFT); | ||
1495 | } | ||
1496 | } | ||
1497 | |||
1498 | priv->has_dmm = true; | ||
1499 | } | ||
1500 | |||
1501 | void omap_gem_deinit(struct drm_device *dev) | ||
1502 | { | ||
1503 | /* I believe we can rely on there being no more outstanding GEM | ||
1504 | * objects which could depend on usergart/dmm at this point. | ||
1505 | */ | ||
1506 | kfree(usergart); | ||
1507 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c new file mode 100644 index 000000000000..ac74d1bc67bf --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob.clark@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | |||
22 | #include <linux/dma-buf.h> | ||
23 | |||
24 | static struct sg_table *omap_gem_map_dma_buf( | ||
25 | struct dma_buf_attachment *attachment, | ||
26 | enum dma_data_direction dir) | ||
27 | { | ||
28 | struct drm_gem_object *obj = attachment->dmabuf->priv; | ||
29 | struct sg_table *sg; | ||
30 | dma_addr_t paddr; | ||
31 | int ret; | ||
32 | |||
33 | sg = kzalloc(sizeof(*sg), GFP_KERNEL); | ||
34 | if (!sg) | ||
35 | return ERR_PTR(-ENOMEM); | ||
36 | |||
37 | /* camera, etc, need physically contiguous.. but we need a | ||
38 | * better way to know this.. | ||
39 | */ | ||
40 | ret = omap_gem_get_paddr(obj, &paddr, true); | ||
41 | if (ret) | ||
42 | goto out; | ||
43 | |||
44 | ret = sg_alloc_table(sg, 1, GFP_KERNEL); | ||
45 | if (ret) | ||
46 | goto out; | ||
47 | |||
48 | sg_init_table(sg->sgl, 1); | ||
49 | sg_dma_len(sg->sgl) = obj->size; | ||
50 | sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0); | ||
51 | sg_dma_address(sg->sgl) = paddr; | ||
52 | |||
53 | /* this should be after _get_paddr() to ensure we have pages attached */ | ||
54 | omap_gem_dma_sync(obj, dir); | ||
55 | |||
56 | return sg; | ||
57 | out: | ||
58 | kfree(sg); | ||
59 | return ERR_PTR(ret); | ||
60 | } | ||
61 | |||
62 | static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | ||
63 | struct sg_table *sg, enum dma_data_direction dir) | ||
64 | { | ||
65 | struct drm_gem_object *obj = attachment->dmabuf->priv; | ||
66 | omap_gem_put_paddr(obj); | ||
67 | sg_free_table(sg); | ||
68 | kfree(sg); | ||
69 | } | ||
70 | |||
71 | static void omap_gem_dmabuf_release(struct dma_buf *buffer) | ||
72 | { | ||
73 | struct drm_gem_object *obj = buffer->priv; | ||
74 | /* release reference that was taken when dmabuf was exported | ||
75 | * in omap_gem_prime_set().. | ||
76 | */ | ||
77 | drm_gem_object_unreference_unlocked(obj); | ||
78 | } | ||
79 | |||
80 | |||
81 | static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, | ||
82 | size_t start, size_t len, enum dma_data_direction dir) | ||
83 | { | ||
84 | struct drm_gem_object *obj = buffer->priv; | ||
85 | struct page **pages; | ||
86 | if (omap_gem_flags(obj) & OMAP_BO_TILED) { | ||
87 | /* TODO we would need to pin at least part of the buffer to | ||
88 | * get de-tiled view. For now just reject it. | ||
89 | */ | ||
90 | return -ENOMEM; | ||
91 | } | ||
92 | /* make sure we have the pages: */ | ||
93 | return omap_gem_get_pages(obj, &pages, true); | ||
94 | } | ||
95 | |||
96 | static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, | ||
97 | size_t start, size_t len, enum dma_data_direction dir) | ||
98 | { | ||
99 | struct drm_gem_object *obj = buffer->priv; | ||
100 | omap_gem_put_pages(obj); | ||
101 | } | ||
102 | |||
103 | |||
104 | static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer, | ||
105 | unsigned long page_num) | ||
106 | { | ||
107 | struct drm_gem_object *obj = buffer->priv; | ||
108 | struct page **pages; | ||
109 | omap_gem_get_pages(obj, &pages, false); | ||
110 | omap_gem_cpu_sync(obj, page_num); | ||
111 | return kmap_atomic(pages[page_num]); | ||
112 | } | ||
113 | |||
114 | static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer, | ||
115 | unsigned long page_num, void *addr) | ||
116 | { | ||
117 | kunmap_atomic(addr); | ||
118 | } | ||
119 | |||
120 | static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer, | ||
121 | unsigned long page_num) | ||
122 | { | ||
123 | struct drm_gem_object *obj = buffer->priv; | ||
124 | struct page **pages; | ||
125 | omap_gem_get_pages(obj, &pages, false); | ||
126 | omap_gem_cpu_sync(obj, page_num); | ||
127 | return kmap(pages[page_num]); | ||
128 | } | ||
129 | |||
130 | static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer, | ||
131 | unsigned long page_num, void *addr) | ||
132 | { | ||
133 | struct drm_gem_object *obj = buffer->priv; | ||
134 | struct page **pages; | ||
135 | omap_gem_get_pages(obj, &pages, false); | ||
136 | kunmap(pages[page_num]); | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * TODO maybe we can split up drm_gem_mmap to avoid duplicating | ||
141 | * some here.. or at least have a drm_dmabuf_mmap helper. | ||
142 | */ | ||
143 | static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, | ||
144 | struct vm_area_struct *vma) | ||
145 | { | ||
146 | struct drm_gem_object *obj = buffer->priv; | ||
147 | int ret = 0; | ||
148 | |||
149 | if (WARN_ON(!obj->filp)) | ||
150 | return -EINVAL; | ||
151 | |||
152 | /* Check for valid size. */ | ||
153 | if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) { | ||
154 | ret = -EINVAL; | ||
155 | goto out_unlock; | ||
156 | } | ||
157 | |||
158 | if (!obj->dev->driver->gem_vm_ops) { | ||
159 | ret = -EINVAL; | ||
160 | goto out_unlock; | ||
161 | } | ||
162 | |||
163 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; | ||
164 | vma->vm_ops = obj->dev->driver->gem_vm_ops; | ||
165 | vma->vm_private_data = obj; | ||
166 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | ||
167 | |||
168 | /* Take a ref for this mapping of the object, so that the fault | ||
169 | * handler can dereference the mmap offset's pointer to the object. | ||
170 | * This reference is cleaned up by the corresponding vm_close | ||
171 | * (which should happen whether the vma was created by this call, or | ||
172 | * by a vm_open due to mremap or partial unmap or whatever). | ||
173 | */ | ||
174 | vma->vm_ops->open(vma); | ||
175 | |||
176 | out_unlock: | ||
177 | |||
178 | return omap_gem_mmap_obj(obj, vma); | ||
179 | } | ||
180 | |||
181 | struct dma_buf_ops omap_dmabuf_ops = { | ||
182 | .map_dma_buf = omap_gem_map_dma_buf, | ||
183 | .unmap_dma_buf = omap_gem_unmap_dma_buf, | ||
184 | .release = omap_gem_dmabuf_release, | ||
185 | .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, | ||
186 | .end_cpu_access = omap_gem_dmabuf_end_cpu_access, | ||
187 | .kmap_atomic = omap_gem_dmabuf_kmap_atomic, | ||
188 | .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, | ||
189 | .kmap = omap_gem_dmabuf_kmap, | ||
190 | .kunmap = omap_gem_dmabuf_kunmap, | ||
191 | .mmap = omap_gem_dmabuf_mmap, | ||
192 | }; | ||
193 | |||
194 | struct dma_buf *omap_gem_prime_export(struct drm_device *dev, | ||
195 | struct drm_gem_object *obj, int flags) | ||
196 | { | ||
197 | return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags); | ||
198 | } | ||
199 | |||
200 | struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, | ||
201 | struct dma_buf *buffer) | ||
202 | { | ||
203 | struct drm_gem_object *obj; | ||
204 | |||
205 | /* is this one of own objects? */ | ||
206 | if (buffer->ops == &omap_dmabuf_ops) { | ||
207 | obj = buffer->priv; | ||
208 | /* is it from our device? */ | ||
209 | if (obj->dev == dev) { | ||
210 | /* | ||
211 | * Importing dmabuf exported from out own gem increases | ||
212 | * refcount on gem itself instead of f_count of dmabuf. | ||
213 | */ | ||
214 | drm_gem_object_reference(obj); | ||
215 | dma_buf_put(buffer); | ||
216 | return obj; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * TODO add support for importing buffers from other devices.. | ||
222 | * for now we don't need this but would be nice to add eventually | ||
223 | */ | ||
224 | return ERR_PTR(-EINVAL); | ||
225 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c new file mode 100644 index 000000000000..e4a66a35fc6a --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_gem_helpers.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob.clark@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | /* temporary copy of drm_gem_{get,put}_pages() until the | ||
21 | * "drm/gem: add functions to get/put pages" patch is merged.. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/shmem_fs.h> | ||
27 | |||
28 | #include <drm/drmP.h> | ||
29 | |||
30 | /** | ||
31 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object | ||
32 | * @obj: obj in question | ||
33 | * @gfpmask: gfp mask of requested pages | ||
34 | */ | ||
35 | struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) | ||
36 | { | ||
37 | struct inode *inode; | ||
38 | struct address_space *mapping; | ||
39 | struct page *p, **pages; | ||
40 | int i, npages; | ||
41 | |||
42 | /* This is the shared memory object that backs the GEM resource */ | ||
43 | inode = obj->filp->f_path.dentry->d_inode; | ||
44 | mapping = inode->i_mapping; | ||
45 | |||
46 | npages = obj->size >> PAGE_SHIFT; | ||
47 | |||
48 | pages = drm_malloc_ab(npages, sizeof(struct page *)); | ||
49 | if (pages == NULL) | ||
50 | return ERR_PTR(-ENOMEM); | ||
51 | |||
52 | gfpmask |= mapping_gfp_mask(mapping); | ||
53 | |||
54 | for (i = 0; i < npages; i++) { | ||
55 | p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | ||
56 | if (IS_ERR(p)) | ||
57 | goto fail; | ||
58 | pages[i] = p; | ||
59 | |||
60 | /* There is a hypothetical issue w/ drivers that require | ||
61 | * buffer memory in the low 4GB.. if the pages are un- | ||
62 | * pinned, and swapped out, they can end up swapped back | ||
63 | * in above 4GB. If pages are already in memory, then | ||
64 | * shmem_read_mapping_page_gfp will ignore the gfpmask, | ||
65 | * even if the already in-memory page disobeys the mask. | ||
66 | * | ||
67 | * It is only a theoretical issue today, because none of | ||
68 | * the devices with this limitation can be populated with | ||
69 | * enough memory to trigger the issue. But this BUG_ON() | ||
70 | * is here as a reminder in case the problem with | ||
71 | * shmem_read_mapping_page_gfp() isn't solved by the time | ||
72 | * it does become a real issue. | ||
73 | * | ||
74 | * See this thread: http://lkml.org/lkml/2011/7/11/238 | ||
75 | */ | ||
76 | BUG_ON((gfpmask & __GFP_DMA32) && | ||
77 | (page_to_pfn(p) >= 0x00100000UL)); | ||
78 | } | ||
79 | |||
80 | return pages; | ||
81 | |||
82 | fail: | ||
83 | while (i--) | ||
84 | page_cache_release(pages[i]); | ||
85 | |||
86 | drm_free_large(pages); | ||
87 | return ERR_CAST(p); | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * drm_gem_put_pages - helper to free backing pages for a GEM object | ||
92 | * @obj: obj in question | ||
93 | * @pages: pages to free | ||
94 | */ | ||
95 | void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | ||
96 | bool dirty, bool accessed) | ||
97 | { | ||
98 | int i, npages; | ||
99 | |||
100 | npages = obj->size >> PAGE_SHIFT; | ||
101 | |||
102 | for (i = 0; i < npages; i++) { | ||
103 | if (dirty) | ||
104 | set_page_dirty(pages[i]); | ||
105 | |||
106 | if (accessed) | ||
107 | mark_page_accessed(pages[i]); | ||
108 | |||
109 | /* Undo the reference we took when populating the table */ | ||
110 | page_cache_release(pages[i]); | ||
111 | } | ||
112 | |||
113 | drm_free_large(pages); | ||
114 | } | ||
115 | |||
116 | int | ||
117 | _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) | ||
118 | { | ||
119 | struct drm_device *dev = obj->dev; | ||
120 | struct drm_gem_mm *mm = dev->mm_private; | ||
121 | struct drm_map_list *list; | ||
122 | struct drm_local_map *map; | ||
123 | int ret = 0; | ||
124 | |||
125 | /* Set the object up for mmap'ing */ | ||
126 | list = &obj->map_list; | ||
127 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | ||
128 | if (!list->map) | ||
129 | return -ENOMEM; | ||
130 | |||
131 | map = list->map; | ||
132 | map->type = _DRM_GEM; | ||
133 | map->size = size; | ||
134 | map->handle = obj; | ||
135 | |||
136 | /* Get a DRM GEM mmap offset allocated... */ | ||
137 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | ||
138 | size / PAGE_SIZE, 0, 0); | ||
139 | |||
140 | if (!list->file_offset_node) { | ||
141 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | ||
142 | ret = -ENOSPC; | ||
143 | goto out_free_list; | ||
144 | } | ||
145 | |||
146 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | ||
147 | size / PAGE_SIZE, 0); | ||
148 | if (!list->file_offset_node) { | ||
149 | ret = -ENOMEM; | ||
150 | goto out_free_list; | ||
151 | } | ||
152 | |||
153 | list->hash.key = list->file_offset_node->start; | ||
154 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); | ||
155 | if (ret) { | ||
156 | DRM_ERROR("failed to add to map hash\n"); | ||
157 | goto out_free_mm; | ||
158 | } | ||
159 | |||
160 | return 0; | ||
161 | |||
162 | out_free_mm: | ||
163 | drm_mm_put_block(list->file_offset_node); | ||
164 | out_free_list: | ||
165 | kfree(list->map); | ||
166 | list->map = NULL; | ||
167 | |||
168 | return ret; | ||
169 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c new file mode 100644 index 000000000000..e01303ee00c3 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_irq.c | |||
@@ -0,0 +1,322 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_irq.c | ||
3 | * | ||
4 | * Copyright (C) 2012 Texas Instruments | ||
5 | * Author: Rob Clark <rob.clark@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include "omap_drv.h" | ||
21 | |||
22 | static DEFINE_SPINLOCK(list_lock); | ||
23 | |||
24 | static void omap_irq_error_handler(struct omap_drm_irq *irq, | ||
25 | uint32_t irqstatus) | ||
26 | { | ||
27 | DRM_ERROR("errors: %08x\n", irqstatus); | ||
28 | } | ||
29 | |||
30 | /* call with list_lock and dispc runtime held */ | ||
31 | static void omap_irq_update(struct drm_device *dev) | ||
32 | { | ||
33 | struct omap_drm_private *priv = dev->dev_private; | ||
34 | struct omap_drm_irq *irq; | ||
35 | uint32_t irqmask = priv->vblank_mask; | ||
36 | |||
37 | BUG_ON(!spin_is_locked(&list_lock)); | ||
38 | |||
39 | list_for_each_entry(irq, &priv->irq_list, node) | ||
40 | irqmask |= irq->irqmask; | ||
41 | |||
42 | DBG("irqmask=%08x", irqmask); | ||
43 | |||
44 | dispc_write_irqenable(irqmask); | ||
45 | dispc_read_irqenable(); /* flush posted write */ | ||
46 | } | ||
47 | |||
48 | void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) | ||
49 | { | ||
50 | struct omap_drm_private *priv = dev->dev_private; | ||
51 | unsigned long flags; | ||
52 | |||
53 | dispc_runtime_get(); | ||
54 | spin_lock_irqsave(&list_lock, flags); | ||
55 | |||
56 | if (!WARN_ON(irq->registered)) { | ||
57 | irq->registered = true; | ||
58 | list_add(&irq->node, &priv->irq_list); | ||
59 | omap_irq_update(dev); | ||
60 | } | ||
61 | |||
62 | spin_unlock_irqrestore(&list_lock, flags); | ||
63 | dispc_runtime_put(); | ||
64 | } | ||
65 | |||
66 | void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | |||
70 | dispc_runtime_get(); | ||
71 | spin_lock_irqsave(&list_lock, flags); | ||
72 | |||
73 | if (!WARN_ON(!irq->registered)) { | ||
74 | irq->registered = false; | ||
75 | list_del(&irq->node); | ||
76 | omap_irq_update(dev); | ||
77 | } | ||
78 | |||
79 | spin_unlock_irqrestore(&list_lock, flags); | ||
80 | dispc_runtime_put(); | ||
81 | } | ||
82 | |||
83 | struct omap_irq_wait { | ||
84 | struct omap_drm_irq irq; | ||
85 | int count; | ||
86 | }; | ||
87 | |||
88 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); | ||
89 | |||
90 | static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | ||
91 | { | ||
92 | struct omap_irq_wait *wait = | ||
93 | container_of(irq, struct omap_irq_wait, irq); | ||
94 | wait->count--; | ||
95 | wake_up_all(&wait_event); | ||
96 | } | ||
97 | |||
98 | struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev, | ||
99 | uint32_t irqmask, int count) | ||
100 | { | ||
101 | struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL); | ||
102 | wait->irq.irq = wait_irq; | ||
103 | wait->irq.irqmask = irqmask; | ||
104 | wait->count = count; | ||
105 | omap_irq_register(dev, &wait->irq); | ||
106 | return wait; | ||
107 | } | ||
108 | |||
109 | int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait, | ||
110 | unsigned long timeout) | ||
111 | { | ||
112 | int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout); | ||
113 | omap_irq_unregister(dev, &wait->irq); | ||
114 | kfree(wait); | ||
115 | if (ret == 0) | ||
116 | return -1; | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * enable_vblank - enable vblank interrupt events | ||
122 | * @dev: DRM device | ||
123 | * @crtc: which irq to enable | ||
124 | * | ||
125 | * Enable vblank interrupts for @crtc. If the device doesn't have | ||
126 | * a hardware vblank counter, this routine should be a no-op, since | ||
127 | * interrupts will have to stay on to keep the count accurate. | ||
128 | * | ||
129 | * RETURNS | ||
130 | * Zero on success, appropriate errno if the given @crtc's vblank | ||
131 | * interrupt cannot be enabled. | ||
132 | */ | ||
133 | int omap_irq_enable_vblank(struct drm_device *dev, int crtc) | ||
134 | { | ||
135 | struct omap_drm_private *priv = dev->dev_private; | ||
136 | unsigned long flags; | ||
137 | |||
138 | DBG("dev=%p, crtc=%d", dev, crtc); | ||
139 | |||
140 | dispc_runtime_get(); | ||
141 | spin_lock_irqsave(&list_lock, flags); | ||
142 | priv->vblank_mask |= pipe2vbl(crtc); | ||
143 | omap_irq_update(dev); | ||
144 | spin_unlock_irqrestore(&list_lock, flags); | ||
145 | dispc_runtime_put(); | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * disable_vblank - disable vblank interrupt events | ||
152 | * @dev: DRM device | ||
153 | * @crtc: which irq to enable | ||
154 | * | ||
155 | * Disable vblank interrupts for @crtc. If the device doesn't have | ||
156 | * a hardware vblank counter, this routine should be a no-op, since | ||
157 | * interrupts will have to stay on to keep the count accurate. | ||
158 | */ | ||
159 | void omap_irq_disable_vblank(struct drm_device *dev, int crtc) | ||
160 | { | ||
161 | struct omap_drm_private *priv = dev->dev_private; | ||
162 | unsigned long flags; | ||
163 | |||
164 | DBG("dev=%p, crtc=%d", dev, crtc); | ||
165 | |||
166 | dispc_runtime_get(); | ||
167 | spin_lock_irqsave(&list_lock, flags); | ||
168 | priv->vblank_mask &= ~pipe2vbl(crtc); | ||
169 | omap_irq_update(dev); | ||
170 | spin_unlock_irqrestore(&list_lock, flags); | ||
171 | dispc_runtime_put(); | ||
172 | } | ||
173 | |||
174 | irqreturn_t omap_irq_handler(DRM_IRQ_ARGS) | ||
175 | { | ||
176 | struct drm_device *dev = (struct drm_device *) arg; | ||
177 | struct omap_drm_private *priv = dev->dev_private; | ||
178 | struct omap_drm_irq *handler, *n; | ||
179 | unsigned long flags; | ||
180 | unsigned int id; | ||
181 | u32 irqstatus; | ||
182 | |||
183 | irqstatus = dispc_read_irqstatus(); | ||
184 | dispc_clear_irqstatus(irqstatus); | ||
185 | dispc_read_irqstatus(); /* flush posted write */ | ||
186 | |||
187 | VERB("irqs: %08x", irqstatus); | ||
188 | |||
189 | for (id = 0; id < priv->num_crtcs; id++) | ||
190 | if (irqstatus & pipe2vbl(id)) | ||
191 | drm_handle_vblank(dev, id); | ||
192 | |||
193 | spin_lock_irqsave(&list_lock, flags); | ||
194 | list_for_each_entry_safe(handler, n, &priv->irq_list, node) { | ||
195 | if (handler->irqmask & irqstatus) { | ||
196 | spin_unlock_irqrestore(&list_lock, flags); | ||
197 | handler->irq(handler, handler->irqmask & irqstatus); | ||
198 | spin_lock_irqsave(&list_lock, flags); | ||
199 | } | ||
200 | } | ||
201 | spin_unlock_irqrestore(&list_lock, flags); | ||
202 | |||
203 | return IRQ_HANDLED; | ||
204 | } | ||
205 | |||
206 | void omap_irq_preinstall(struct drm_device *dev) | ||
207 | { | ||
208 | DBG("dev=%p", dev); | ||
209 | dispc_runtime_get(); | ||
210 | dispc_clear_irqstatus(0xffffffff); | ||
211 | dispc_runtime_put(); | ||
212 | } | ||
213 | |||
214 | int omap_irq_postinstall(struct drm_device *dev) | ||
215 | { | ||
216 | struct omap_drm_private *priv = dev->dev_private; | ||
217 | struct omap_drm_irq *error_handler = &priv->error_handler; | ||
218 | |||
219 | DBG("dev=%p", dev); | ||
220 | |||
221 | INIT_LIST_HEAD(&priv->irq_list); | ||
222 | |||
223 | error_handler->irq = omap_irq_error_handler; | ||
224 | error_handler->irqmask = DISPC_IRQ_OCP_ERR; | ||
225 | |||
226 | /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think | ||
227 | * we just need to ignore it while enabling tv-out | ||
228 | */ | ||
229 | error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT; | ||
230 | |||
231 | omap_irq_register(dev, error_handler); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | void omap_irq_uninstall(struct drm_device *dev) | ||
237 | { | ||
238 | DBG("dev=%p", dev); | ||
239 | // TODO prolly need to call drm_irq_uninstall() somewhere too | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * We need a special version, instead of just using drm_irq_install(), | ||
244 | * because we need to register the irq via omapdss. Once omapdss and | ||
245 | * omapdrm are merged together we can assign the dispc hwmod data to | ||
246 | * ourselves and drop these and just use drm_irq_{install,uninstall}() | ||
247 | */ | ||
248 | |||
249 | int omap_drm_irq_install(struct drm_device *dev) | ||
250 | { | ||
251 | int ret; | ||
252 | |||
253 | mutex_lock(&dev->struct_mutex); | ||
254 | |||
255 | if (dev->irq_enabled) { | ||
256 | mutex_unlock(&dev->struct_mutex); | ||
257 | return -EBUSY; | ||
258 | } | ||
259 | dev->irq_enabled = 1; | ||
260 | mutex_unlock(&dev->struct_mutex); | ||
261 | |||
262 | /* Before installing handler */ | ||
263 | if (dev->driver->irq_preinstall) | ||
264 | dev->driver->irq_preinstall(dev); | ||
265 | |||
266 | ret = dispc_request_irq(dev->driver->irq_handler, dev); | ||
267 | |||
268 | if (ret < 0) { | ||
269 | mutex_lock(&dev->struct_mutex); | ||
270 | dev->irq_enabled = 0; | ||
271 | mutex_unlock(&dev->struct_mutex); | ||
272 | return ret; | ||
273 | } | ||
274 | |||
275 | /* After installing handler */ | ||
276 | if (dev->driver->irq_postinstall) | ||
277 | ret = dev->driver->irq_postinstall(dev); | ||
278 | |||
279 | if (ret < 0) { | ||
280 | mutex_lock(&dev->struct_mutex); | ||
281 | dev->irq_enabled = 0; | ||
282 | mutex_unlock(&dev->struct_mutex); | ||
283 | dispc_free_irq(dev); | ||
284 | } | ||
285 | |||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | int omap_drm_irq_uninstall(struct drm_device *dev) | ||
290 | { | ||
291 | unsigned long irqflags; | ||
292 | int irq_enabled, i; | ||
293 | |||
294 | mutex_lock(&dev->struct_mutex); | ||
295 | irq_enabled = dev->irq_enabled; | ||
296 | dev->irq_enabled = 0; | ||
297 | mutex_unlock(&dev->struct_mutex); | ||
298 | |||
299 | /* | ||
300 | * Wake up any waiters so they don't hang. | ||
301 | */ | ||
302 | if (dev->num_crtcs) { | ||
303 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
304 | for (i = 0; i < dev->num_crtcs; i++) { | ||
305 | DRM_WAKEUP(&dev->vbl_queue[i]); | ||
306 | dev->vblank_enabled[i] = 0; | ||
307 | dev->last_vblank[i] = | ||
308 | dev->driver->get_vblank_counter(dev, i); | ||
309 | } | ||
310 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
311 | } | ||
312 | |||
313 | if (!irq_enabled) | ||
314 | return -EINVAL; | ||
315 | |||
316 | if (dev->driver->irq_uninstall) | ||
317 | dev->driver->irq_uninstall(dev); | ||
318 | |||
319 | dispc_free_irq(dev); | ||
320 | |||
321 | return 0; | ||
322 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c new file mode 100644 index 000000000000..2882cda6ea19 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_plane.c | |||
@@ -0,0 +1,448 @@ | |||
1 | /* | ||
2 | * drivers/gpu/drm/omapdrm/omap_plane.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments | ||
5 | * Author: Rob Clark <rob.clark@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/kfifo.h> | ||
21 | |||
22 | #include "omap_drv.h" | ||
23 | #include "omap_dmm_tiler.h" | ||
24 | |||
25 | /* some hackery because omapdss has an 'enum omap_plane' (which would be | ||
26 | * better named omap_plane_id).. and compiler seems unhappy about having | ||
27 | * both a 'struct omap_plane' and 'enum omap_plane' | ||
28 | */ | ||
29 | #define omap_plane _omap_plane | ||
30 | |||
31 | /* | ||
32 | * plane funcs | ||
33 | */ | ||
34 | |||
35 | struct callback { | ||
36 | void (*fxn)(void *); | ||
37 | void *arg; | ||
38 | }; | ||
39 | |||
40 | #define to_omap_plane(x) container_of(x, struct omap_plane, base) | ||
41 | |||
42 | struct omap_plane { | ||
43 | struct drm_plane base; | ||
44 | int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */ | ||
45 | const char *name; | ||
46 | struct omap_overlay_info info; | ||
47 | struct omap_drm_apply apply; | ||
48 | |||
49 | /* position/orientation of scanout within the fb: */ | ||
50 | struct omap_drm_window win; | ||
51 | bool enabled; | ||
52 | |||
53 | /* last fb that we pinned: */ | ||
54 | struct drm_framebuffer *pinned_fb; | ||
55 | |||
56 | uint32_t nformats; | ||
57 | uint32_t formats[32]; | ||
58 | |||
59 | struct omap_drm_irq error_irq; | ||
60 | |||
61 | /* set of bo's pending unpin until next post_apply() */ | ||
62 | DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *); | ||
63 | |||
64 | // XXX maybe get rid of this and handle vblank in crtc too? | ||
65 | struct callback apply_done_cb; | ||
66 | }; | ||
67 | |||
68 | static void unpin(void *arg, struct drm_gem_object *bo) | ||
69 | { | ||
70 | struct drm_plane *plane = arg; | ||
71 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
72 | |||
73 | if (kfifo_put(&omap_plane->unpin_fifo, | ||
74 | (const struct drm_gem_object **)&bo)) { | ||
75 | /* also hold a ref so it isn't free'd while pinned */ | ||
76 | drm_gem_object_reference(bo); | ||
77 | } else { | ||
78 | dev_err(plane->dev->dev, "unpin fifo full!\n"); | ||
79 | omap_gem_put_paddr(bo); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* update which fb (if any) is pinned for scanout */ | ||
84 | static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb) | ||
85 | { | ||
86 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
87 | struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; | ||
88 | |||
89 | if (pinned_fb != fb) { | ||
90 | int ret; | ||
91 | |||
92 | DBG("%p -> %p", pinned_fb, fb); | ||
93 | |||
94 | if (fb) | ||
95 | drm_framebuffer_reference(fb); | ||
96 | |||
97 | ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin); | ||
98 | |||
99 | if (pinned_fb) | ||
100 | drm_framebuffer_unreference(pinned_fb); | ||
101 | |||
102 | if (ret) { | ||
103 | dev_err(plane->dev->dev, "could not swap %p -> %p\n", | ||
104 | omap_plane->pinned_fb, fb); | ||
105 | if (fb) | ||
106 | drm_framebuffer_unreference(fb); | ||
107 | omap_plane->pinned_fb = NULL; | ||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | omap_plane->pinned_fb = fb; | ||
112 | } | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static void omap_plane_pre_apply(struct omap_drm_apply *apply) | ||
118 | { | ||
119 | struct omap_plane *omap_plane = | ||
120 | container_of(apply, struct omap_plane, apply); | ||
121 | struct omap_drm_window *win = &omap_plane->win; | ||
122 | struct drm_plane *plane = &omap_plane->base; | ||
123 | struct drm_device *dev = plane->dev; | ||
124 | struct omap_overlay_info *info = &omap_plane->info; | ||
125 | struct drm_crtc *crtc = plane->crtc; | ||
126 | enum omap_channel channel; | ||
127 | bool enabled = omap_plane->enabled && crtc; | ||
128 | bool ilace, replication; | ||
129 | int ret; | ||
130 | |||
131 | DBG("%s, enabled=%d", omap_plane->name, enabled); | ||
132 | |||
133 | /* if fb has changed, pin new fb: */ | ||
134 | update_pin(plane, enabled ? plane->fb : NULL); | ||
135 | |||
136 | if (!enabled) { | ||
137 | dispc_ovl_enable(omap_plane->id, false); | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | channel = omap_crtc_channel(crtc); | ||
142 | |||
143 | /* update scanout: */ | ||
144 | omap_framebuffer_update_scanout(plane->fb, win, info); | ||
145 | |||
146 | DBG("%dx%d -> %dx%d (%d)", info->width, info->height, | ||
147 | info->out_width, info->out_height, | ||
148 | info->screen_width); | ||
149 | DBG("%d,%d %08x %08x", info->pos_x, info->pos_y, | ||
150 | info->paddr, info->p_uv_addr); | ||
151 | |||
152 | /* TODO: */ | ||
153 | ilace = false; | ||
154 | replication = false; | ||
155 | |||
156 | /* and finally, update omapdss: */ | ||
157 | ret = dispc_ovl_setup(omap_plane->id, info, | ||
158 | replication, omap_crtc_timings(crtc), false); | ||
159 | if (ret) { | ||
160 | dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret); | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | dispc_ovl_enable(omap_plane->id, true); | ||
165 | dispc_ovl_set_channel_out(omap_plane->id, channel); | ||
166 | } | ||
167 | |||
168 | static void omap_plane_post_apply(struct omap_drm_apply *apply) | ||
169 | { | ||
170 | struct omap_plane *omap_plane = | ||
171 | container_of(apply, struct omap_plane, apply); | ||
172 | struct drm_plane *plane = &omap_plane->base; | ||
173 | struct omap_overlay_info *info = &omap_plane->info; | ||
174 | struct drm_gem_object *bo = NULL; | ||
175 | struct callback cb; | ||
176 | |||
177 | cb = omap_plane->apply_done_cb; | ||
178 | omap_plane->apply_done_cb.fxn = NULL; | ||
179 | |||
180 | while (kfifo_get(&omap_plane->unpin_fifo, &bo)) { | ||
181 | omap_gem_put_paddr(bo); | ||
182 | drm_gem_object_unreference_unlocked(bo); | ||
183 | } | ||
184 | |||
185 | if (cb.fxn) | ||
186 | cb.fxn(cb.arg); | ||
187 | |||
188 | if (omap_plane->enabled) { | ||
189 | omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y, | ||
190 | info->out_width, info->out_height); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | static int apply(struct drm_plane *plane) | ||
195 | { | ||
196 | if (plane->crtc) { | ||
197 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
198 | return omap_crtc_apply(plane->crtc, &omap_plane->apply); | ||
199 | } | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | int omap_plane_mode_set(struct drm_plane *plane, | ||
204 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
205 | int crtc_x, int crtc_y, | ||
206 | unsigned int crtc_w, unsigned int crtc_h, | ||
207 | uint32_t src_x, uint32_t src_y, | ||
208 | uint32_t src_w, uint32_t src_h, | ||
209 | void (*fxn)(void *), void *arg) | ||
210 | { | ||
211 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
212 | struct omap_drm_window *win = &omap_plane->win; | ||
213 | |||
214 | win->crtc_x = crtc_x; | ||
215 | win->crtc_y = crtc_y; | ||
216 | win->crtc_w = crtc_w; | ||
217 | win->crtc_h = crtc_h; | ||
218 | |||
219 | /* src values are in Q16 fixed point, convert to integer: */ | ||
220 | win->src_x = src_x >> 16; | ||
221 | win->src_y = src_y >> 16; | ||
222 | win->src_w = src_w >> 16; | ||
223 | win->src_h = src_h >> 16; | ||
224 | |||
225 | if (fxn) { | ||
226 | /* omap_crtc should ensure that a new page flip | ||
227 | * isn't permitted while there is one pending: | ||
228 | */ | ||
229 | BUG_ON(omap_plane->apply_done_cb.fxn); | ||
230 | |||
231 | omap_plane->apply_done_cb.fxn = fxn; | ||
232 | omap_plane->apply_done_cb.arg = arg; | ||
233 | } | ||
234 | |||
235 | plane->fb = fb; | ||
236 | plane->crtc = crtc; | ||
237 | |||
238 | return apply(plane); | ||
239 | } | ||
240 | |||
241 | static int omap_plane_update(struct drm_plane *plane, | ||
242 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
243 | int crtc_x, int crtc_y, | ||
244 | unsigned int crtc_w, unsigned int crtc_h, | ||
245 | uint32_t src_x, uint32_t src_y, | ||
246 | uint32_t src_w, uint32_t src_h) | ||
247 | { | ||
248 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
249 | omap_plane->enabled = true; | ||
250 | return omap_plane_mode_set(plane, crtc, fb, | ||
251 | crtc_x, crtc_y, crtc_w, crtc_h, | ||
252 | src_x, src_y, src_w, src_h, | ||
253 | NULL, NULL); | ||
254 | } | ||
255 | |||
256 | static int omap_plane_disable(struct drm_plane *plane) | ||
257 | { | ||
258 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
259 | omap_plane->win.rotation = BIT(DRM_ROTATE_0); | ||
260 | return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF); | ||
261 | } | ||
262 | |||
263 | static void omap_plane_destroy(struct drm_plane *plane) | ||
264 | { | ||
265 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
266 | |||
267 | DBG("%s", omap_plane->name); | ||
268 | |||
269 | omap_irq_unregister(plane->dev, &omap_plane->error_irq); | ||
270 | |||
271 | omap_plane_disable(plane); | ||
272 | drm_plane_cleanup(plane); | ||
273 | |||
274 | WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo)); | ||
275 | kfifo_free(&omap_plane->unpin_fifo); | ||
276 | |||
277 | kfree(omap_plane); | ||
278 | } | ||
279 | |||
280 | int omap_plane_dpms(struct drm_plane *plane, int mode) | ||
281 | { | ||
282 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
283 | bool enabled = (mode == DRM_MODE_DPMS_ON); | ||
284 | int ret = 0; | ||
285 | |||
286 | if (enabled != omap_plane->enabled) { | ||
287 | omap_plane->enabled = enabled; | ||
288 | ret = apply(plane); | ||
289 | } | ||
290 | |||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | /* helper to install properties which are common to planes and crtcs */ | ||
295 | void omap_plane_install_properties(struct drm_plane *plane, | ||
296 | struct drm_mode_object *obj) | ||
297 | { | ||
298 | struct drm_device *dev = plane->dev; | ||
299 | struct omap_drm_private *priv = dev->dev_private; | ||
300 | struct drm_property *prop; | ||
301 | |||
302 | if (priv->has_dmm) { | ||
303 | prop = priv->rotation_prop; | ||
304 | if (!prop) { | ||
305 | const struct drm_prop_enum_list props[] = { | ||
306 | { DRM_ROTATE_0, "rotate-0" }, | ||
307 | { DRM_ROTATE_90, "rotate-90" }, | ||
308 | { DRM_ROTATE_180, "rotate-180" }, | ||
309 | { DRM_ROTATE_270, "rotate-270" }, | ||
310 | { DRM_REFLECT_X, "reflect-x" }, | ||
311 | { DRM_REFLECT_Y, "reflect-y" }, | ||
312 | }; | ||
313 | prop = drm_property_create_bitmask(dev, 0, "rotation", | ||
314 | props, ARRAY_SIZE(props)); | ||
315 | if (prop == NULL) | ||
316 | return; | ||
317 | priv->rotation_prop = prop; | ||
318 | } | ||
319 | drm_object_attach_property(obj, prop, 0); | ||
320 | } | ||
321 | |||
322 | prop = priv->zorder_prop; | ||
323 | if (!prop) { | ||
324 | prop = drm_property_create_range(dev, 0, "zorder", 0, 3); | ||
325 | if (prop == NULL) | ||
326 | return; | ||
327 | priv->zorder_prop = prop; | ||
328 | } | ||
329 | drm_object_attach_property(obj, prop, 0); | ||
330 | } | ||
331 | |||
332 | int omap_plane_set_property(struct drm_plane *plane, | ||
333 | struct drm_property *property, uint64_t val) | ||
334 | { | ||
335 | struct omap_plane *omap_plane = to_omap_plane(plane); | ||
336 | struct omap_drm_private *priv = plane->dev->dev_private; | ||
337 | int ret = -EINVAL; | ||
338 | |||
339 | if (property == priv->rotation_prop) { | ||
340 | DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val); | ||
341 | omap_plane->win.rotation = val; | ||
342 | ret = apply(plane); | ||
343 | } else if (property == priv->zorder_prop) { | ||
344 | DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val); | ||
345 | omap_plane->info.zorder = val; | ||
346 | ret = apply(plane); | ||
347 | } | ||
348 | |||
349 | return ret; | ||
350 | } | ||
351 | |||
352 | static const struct drm_plane_funcs omap_plane_funcs = { | ||
353 | .update_plane = omap_plane_update, | ||
354 | .disable_plane = omap_plane_disable, | ||
355 | .destroy = omap_plane_destroy, | ||
356 | .set_property = omap_plane_set_property, | ||
357 | }; | ||
358 | |||
359 | static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | ||
360 | { | ||
361 | struct omap_plane *omap_plane = | ||
362 | container_of(irq, struct omap_plane, error_irq); | ||
363 | DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus); | ||
364 | } | ||
365 | |||
366 | static const char *plane_names[] = { | ||
367 | [OMAP_DSS_GFX] = "gfx", | ||
368 | [OMAP_DSS_VIDEO1] = "vid1", | ||
369 | [OMAP_DSS_VIDEO2] = "vid2", | ||
370 | [OMAP_DSS_VIDEO3] = "vid3", | ||
371 | }; | ||
372 | |||
373 | static const uint32_t error_irqs[] = { | ||
374 | [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, | ||
375 | [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, | ||
376 | [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, | ||
377 | [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, | ||
378 | }; | ||
379 | |||
380 | /* initialize plane */ | ||
381 | struct drm_plane *omap_plane_init(struct drm_device *dev, | ||
382 | int id, bool private_plane) | ||
383 | { | ||
384 | struct omap_drm_private *priv = dev->dev_private; | ||
385 | struct drm_plane *plane = NULL; | ||
386 | struct omap_plane *omap_plane; | ||
387 | struct omap_overlay_info *info; | ||
388 | int ret; | ||
389 | |||
390 | DBG("%s: priv=%d", plane_names[id], private_plane); | ||
391 | |||
392 | omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); | ||
393 | if (!omap_plane) | ||
394 | goto fail; | ||
395 | |||
396 | ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL); | ||
397 | if (ret) { | ||
398 | dev_err(dev->dev, "could not allocate unpin FIFO\n"); | ||
399 | goto fail; | ||
400 | } | ||
401 | |||
402 | omap_plane->nformats = omap_framebuffer_get_formats( | ||
403 | omap_plane->formats, ARRAY_SIZE(omap_plane->formats), | ||
404 | dss_feat_get_supported_color_modes(id)); | ||
405 | omap_plane->id = id; | ||
406 | omap_plane->name = plane_names[id]; | ||
407 | |||
408 | plane = &omap_plane->base; | ||
409 | |||
410 | omap_plane->apply.pre_apply = omap_plane_pre_apply; | ||
411 | omap_plane->apply.post_apply = omap_plane_post_apply; | ||
412 | |||
413 | omap_plane->error_irq.irqmask = error_irqs[id]; | ||
414 | omap_plane->error_irq.irq = omap_plane_error_irq; | ||
415 | omap_irq_register(dev, &omap_plane->error_irq); | ||
416 | |||
417 | drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs, | ||
418 | omap_plane->formats, omap_plane->nformats, private_plane); | ||
419 | |||
420 | omap_plane_install_properties(plane, &plane->base); | ||
421 | |||
422 | /* get our starting configuration, set defaults for parameters | ||
423 | * we don't currently use, etc: | ||
424 | */ | ||
425 | info = &omap_plane->info; | ||
426 | info->rotation_type = OMAP_DSS_ROT_DMA; | ||
427 | info->rotation = OMAP_DSS_ROT_0; | ||
428 | info->global_alpha = 0xff; | ||
429 | info->mirror = 0; | ||
430 | |||
431 | /* Set defaults depending on whether we are a CRTC or overlay | ||
432 | * layer. | ||
433 | * TODO add ioctl to give userspace an API to change this.. this | ||
434 | * will come in a subsequent patch. | ||
435 | */ | ||
436 | if (private_plane) | ||
437 | omap_plane->info.zorder = 0; | ||
438 | else | ||
439 | omap_plane->info.zorder = id; | ||
440 | |||
441 | return plane; | ||
442 | |||
443 | fail: | ||
444 | if (plane) | ||
445 | omap_plane_destroy(plane); | ||
446 | |||
447 | return NULL; | ||
448 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c new file mode 100644 index 000000000000..efb609510540 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c | |||
@@ -0,0 +1,703 @@ | |||
1 | /* | ||
2 | * tcm-sita.c | ||
3 | * | ||
4 | * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm | ||
5 | * | ||
6 | * Authors: Ravi Ramachandra <r.ramachandra@ti.com>, | ||
7 | * Lajos Molnar <molnar@ti.com> | ||
8 | * | ||
9 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | * | ||
19 | */ | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | |||
23 | #include "tcm-sita.h" | ||
24 | |||
25 | #define ALIGN_DOWN(value, align) ((value) & ~((align) - 1)) | ||
26 | |||
27 | /* Individual selection criteria for different scan areas */ | ||
28 | static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL; | ||
29 | static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE; | ||
30 | |||
31 | /********************************************* | ||
32 | * TCM API - Sita Implementation | ||
33 | *********************************************/ | ||
34 | static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, | ||
35 | struct tcm_area *area); | ||
36 | static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area); | ||
37 | static s32 sita_free(struct tcm *tcm, struct tcm_area *area); | ||
38 | static void sita_deinit(struct tcm *tcm); | ||
39 | |||
40 | /********************************************* | ||
41 | * Main Scanner functions | ||
42 | *********************************************/ | ||
43 | static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
44 | struct tcm_area *area); | ||
45 | |||
46 | static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
47 | struct tcm_area *field, struct tcm_area *area); | ||
48 | |||
49 | static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
50 | struct tcm_area *field, struct tcm_area *area); | ||
51 | |||
52 | static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots, | ||
53 | struct tcm_area *field, struct tcm_area *area); | ||
54 | |||
55 | /********************************************* | ||
56 | * Support Infrastructure Methods | ||
57 | *********************************************/ | ||
58 | static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h); | ||
59 | |||
60 | static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h, | ||
61 | struct tcm_area *field, s32 criteria, | ||
62 | struct score *best); | ||
63 | |||
64 | static void get_nearness_factor(struct tcm_area *field, | ||
65 | struct tcm_area *candidate, | ||
66 | struct nearness_factor *nf); | ||
67 | |||
68 | static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area, | ||
69 | struct neighbor_stats *stat); | ||
70 | |||
71 | static void fill_area(struct tcm *tcm, | ||
72 | struct tcm_area *area, struct tcm_area *parent); | ||
73 | |||
74 | |||
75 | /*********************************************/ | ||
76 | |||
77 | /********************************************* | ||
78 | * Utility Methods | ||
79 | *********************************************/ | ||
80 | struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr) | ||
81 | { | ||
82 | struct tcm *tcm; | ||
83 | struct sita_pvt *pvt; | ||
84 | struct tcm_area area = {0}; | ||
85 | s32 i; | ||
86 | |||
87 | if (width == 0 || height == 0) | ||
88 | return NULL; | ||
89 | |||
90 | tcm = kmalloc(sizeof(*tcm), GFP_KERNEL); | ||
91 | pvt = kmalloc(sizeof(*pvt), GFP_KERNEL); | ||
92 | if (!tcm || !pvt) | ||
93 | goto error; | ||
94 | |||
95 | memset(tcm, 0, sizeof(*tcm)); | ||
96 | memset(pvt, 0, sizeof(*pvt)); | ||
97 | |||
98 | /* Updating the pointers to SiTA implementation APIs */ | ||
99 | tcm->height = height; | ||
100 | tcm->width = width; | ||
101 | tcm->reserve_2d = sita_reserve_2d; | ||
102 | tcm->reserve_1d = sita_reserve_1d; | ||
103 | tcm->free = sita_free; | ||
104 | tcm->deinit = sita_deinit; | ||
105 | tcm->pvt = (void *)pvt; | ||
106 | |||
107 | spin_lock_init(&(pvt->lock)); | ||
108 | |||
109 | /* Creating tam map */ | ||
110 | pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL); | ||
111 | if (!pvt->map) | ||
112 | goto error; | ||
113 | |||
114 | for (i = 0; i < tcm->width; i++) { | ||
115 | pvt->map[i] = | ||
116 | kmalloc(sizeof(**pvt->map) * tcm->height, | ||
117 | GFP_KERNEL); | ||
118 | if (pvt->map[i] == NULL) { | ||
119 | while (i--) | ||
120 | kfree(pvt->map[i]); | ||
121 | kfree(pvt->map); | ||
122 | goto error; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | if (attr && attr->x <= tcm->width && attr->y <= tcm->height) { | ||
127 | pvt->div_pt.x = attr->x; | ||
128 | pvt->div_pt.y = attr->y; | ||
129 | |||
130 | } else { | ||
131 | /* Defaulting to 3:1 ratio on width for 2D area split */ | ||
132 | /* Defaulting to 3:1 ratio on height for 2D and 1D split */ | ||
133 | pvt->div_pt.x = (tcm->width * 3) / 4; | ||
134 | pvt->div_pt.y = (tcm->height * 3) / 4; | ||
135 | } | ||
136 | |||
137 | spin_lock(&(pvt->lock)); | ||
138 | assign(&area, 0, 0, width - 1, height - 1); | ||
139 | fill_area(tcm, &area, NULL); | ||
140 | spin_unlock(&(pvt->lock)); | ||
141 | return tcm; | ||
142 | |||
143 | error: | ||
144 | kfree(tcm); | ||
145 | kfree(pvt); | ||
146 | return NULL; | ||
147 | } | ||
148 | |||
149 | static void sita_deinit(struct tcm *tcm) | ||
150 | { | ||
151 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
152 | struct tcm_area area = {0}; | ||
153 | s32 i; | ||
154 | |||
155 | area.p1.x = tcm->width - 1; | ||
156 | area.p1.y = tcm->height - 1; | ||
157 | |||
158 | spin_lock(&(pvt->lock)); | ||
159 | fill_area(tcm, &area, NULL); | ||
160 | spin_unlock(&(pvt->lock)); | ||
161 | |||
162 | for (i = 0; i < tcm->height; i++) | ||
163 | kfree(pvt->map[i]); | ||
164 | kfree(pvt->map); | ||
165 | kfree(pvt); | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * Reserve a 1D area in the container | ||
170 | * | ||
171 | * @param num_slots size of 1D area | ||
172 | * @param area pointer to the area that will be populated with the | ||
173 | * reserved area | ||
174 | * | ||
175 | * @return 0 on success, non-0 error value on failure. | ||
176 | */ | ||
177 | static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots, | ||
178 | struct tcm_area *area) | ||
179 | { | ||
180 | s32 ret; | ||
181 | struct tcm_area field = {0}; | ||
182 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
183 | |||
184 | spin_lock(&(pvt->lock)); | ||
185 | |||
186 | /* Scanning entire container */ | ||
187 | assign(&field, tcm->width - 1, tcm->height - 1, 0, 0); | ||
188 | |||
189 | ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area); | ||
190 | if (!ret) | ||
191 | /* update map */ | ||
192 | fill_area(tcm, area, area); | ||
193 | |||
194 | spin_unlock(&(pvt->lock)); | ||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * Reserve a 2D area in the container | ||
200 | * | ||
201 | * @param w width | ||
202 | * @param h height | ||
203 | * @param area pointer to the area that will be populated with the reserved | ||
204 | * area | ||
205 | * | ||
206 | * @return 0 on success, non-0 error value on failure. | ||
207 | */ | ||
208 | static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, | ||
209 | struct tcm_area *area) | ||
210 | { | ||
211 | s32 ret; | ||
212 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
213 | |||
214 | /* not supporting more than 64 as alignment */ | ||
215 | if (align > 64) | ||
216 | return -EINVAL; | ||
217 | |||
218 | /* we prefer 1, 32 and 64 as alignment */ | ||
219 | align = align <= 1 ? 1 : align <= 32 ? 32 : 64; | ||
220 | |||
221 | spin_lock(&(pvt->lock)); | ||
222 | ret = scan_areas_and_find_fit(tcm, w, h, align, area); | ||
223 | if (!ret) | ||
224 | /* update map */ | ||
225 | fill_area(tcm, area, area); | ||
226 | |||
227 | spin_unlock(&(pvt->lock)); | ||
228 | return ret; | ||
229 | } | ||
230 | |||
231 | /** | ||
232 | * Unreserve a previously allocated 2D or 1D area | ||
233 | * @param area area to be freed | ||
234 | * @return 0 - success | ||
235 | */ | ||
236 | static s32 sita_free(struct tcm *tcm, struct tcm_area *area) | ||
237 | { | ||
238 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
239 | |||
240 | spin_lock(&(pvt->lock)); | ||
241 | |||
242 | /* check that this is in fact an existing area */ | ||
243 | WARN_ON(pvt->map[area->p0.x][area->p0.y] != area || | ||
244 | pvt->map[area->p1.x][area->p1.y] != area); | ||
245 | |||
246 | /* Clear the contents of the associated tiles in the map */ | ||
247 | fill_area(tcm, area, NULL); | ||
248 | |||
249 | spin_unlock(&(pvt->lock)); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * Note: In general the cordinates in the scan field area relevant to the can | ||
256 | * sweep directions. The scan origin (e.g. top-left corner) will always be | ||
257 | * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x | ||
258 | * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y | ||
259 | * <= p0.y | ||
260 | */ | ||
261 | |||
262 | /** | ||
263 | * Raster scan horizontally right to left from top to bottom to find a place for | ||
264 | * a 2D area of given size inside a scan field. | ||
265 | * | ||
266 | * @param w width of desired area | ||
267 | * @param h height of desired area | ||
268 | * @param align desired area alignment | ||
269 | * @param area pointer to the area that will be set to the best position | ||
270 | * @param field area to scan (inclusive) | ||
271 | * | ||
272 | * @return 0 on success, non-0 error value on failure. | ||
273 | */ | ||
274 | static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
275 | struct tcm_area *field, struct tcm_area *area) | ||
276 | { | ||
277 | s32 x, y; | ||
278 | s16 start_x, end_x, start_y, end_y, found_x = -1; | ||
279 | struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map; | ||
280 | struct score best = {{0}, {0}, {0}, 0}; | ||
281 | |||
282 | start_x = field->p0.x; | ||
283 | end_x = field->p1.x; | ||
284 | start_y = field->p0.y; | ||
285 | end_y = field->p1.y; | ||
286 | |||
287 | /* check scan area co-ordinates */ | ||
288 | if (field->p0.x < field->p1.x || | ||
289 | field->p1.y < field->p0.y) | ||
290 | return -EINVAL; | ||
291 | |||
292 | /* check if allocation would fit in scan area */ | ||
293 | if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y)) | ||
294 | return -ENOSPC; | ||
295 | |||
296 | /* adjust start_x and end_y, as allocation would not fit beyond */ | ||
297 | start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */ | ||
298 | end_y = end_y - h + 1; | ||
299 | |||
300 | /* check if allocation would still fit in scan area */ | ||
301 | if (start_x < end_x) | ||
302 | return -ENOSPC; | ||
303 | |||
304 | /* scan field top-to-bottom, right-to-left */ | ||
305 | for (y = start_y; y <= end_y; y++) { | ||
306 | for (x = start_x; x >= end_x; x -= align) { | ||
307 | if (is_area_free(map, x, y, w, h)) { | ||
308 | found_x = x; | ||
309 | |||
310 | /* update best candidate */ | ||
311 | if (update_candidate(tcm, x, y, w, h, field, | ||
312 | CR_R2L_T2B, &best)) | ||
313 | goto done; | ||
314 | |||
315 | /* change upper x bound */ | ||
316 | end_x = x + 1; | ||
317 | break; | ||
318 | } else if (map[x][y] && map[x][y]->is2d) { | ||
319 | /* step over 2D areas */ | ||
320 | x = ALIGN(map[x][y]->p0.x - w + 1, align); | ||
321 | } | ||
322 | } | ||
323 | |||
324 | /* break if you find a free area shouldering the scan field */ | ||
325 | if (found_x == start_x) | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | if (!best.a.tcm) | ||
330 | return -ENOSPC; | ||
331 | done: | ||
332 | assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y); | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /** | ||
337 | * Raster scan horizontally left to right from top to bottom to find a place for | ||
338 | * a 2D area of given size inside a scan field. | ||
339 | * | ||
340 | * @param w width of desired area | ||
341 | * @param h height of desired area | ||
342 | * @param align desired area alignment | ||
343 | * @param area pointer to the area that will be set to the best position | ||
344 | * @param field area to scan (inclusive) | ||
345 | * | ||
346 | * @return 0 on success, non-0 error value on failure. | ||
347 | */ | ||
348 | static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
349 | struct tcm_area *field, struct tcm_area *area) | ||
350 | { | ||
351 | s32 x, y; | ||
352 | s16 start_x, end_x, start_y, end_y, found_x = -1; | ||
353 | struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map; | ||
354 | struct score best = {{0}, {0}, {0}, 0}; | ||
355 | |||
356 | start_x = field->p0.x; | ||
357 | end_x = field->p1.x; | ||
358 | start_y = field->p0.y; | ||
359 | end_y = field->p1.y; | ||
360 | |||
361 | /* check scan area co-ordinates */ | ||
362 | if (field->p1.x < field->p0.x || | ||
363 | field->p1.y < field->p0.y) | ||
364 | return -EINVAL; | ||
365 | |||
366 | /* check if allocation would fit in scan area */ | ||
367 | if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y)) | ||
368 | return -ENOSPC; | ||
369 | |||
370 | start_x = ALIGN(start_x, align); | ||
371 | |||
372 | /* check if allocation would still fit in scan area */ | ||
373 | if (w > LEN(end_x, start_x)) | ||
374 | return -ENOSPC; | ||
375 | |||
376 | /* adjust end_x and end_y, as allocation would not fit beyond */ | ||
377 | end_x = end_x - w + 1; /* + 1 to be inclusive */ | ||
378 | end_y = end_y - h + 1; | ||
379 | |||
380 | /* scan field top-to-bottom, left-to-right */ | ||
381 | for (y = start_y; y <= end_y; y++) { | ||
382 | for (x = start_x; x <= end_x; x += align) { | ||
383 | if (is_area_free(map, x, y, w, h)) { | ||
384 | found_x = x; | ||
385 | |||
386 | /* update best candidate */ | ||
387 | if (update_candidate(tcm, x, y, w, h, field, | ||
388 | CR_L2R_T2B, &best)) | ||
389 | goto done; | ||
390 | /* change upper x bound */ | ||
391 | end_x = x - 1; | ||
392 | |||
393 | break; | ||
394 | } else if (map[x][y] && map[x][y]->is2d) { | ||
395 | /* step over 2D areas */ | ||
396 | x = ALIGN_DOWN(map[x][y]->p1.x, align); | ||
397 | } | ||
398 | } | ||
399 | |||
400 | /* break if you find a free area shouldering the scan field */ | ||
401 | if (found_x == start_x) | ||
402 | break; | ||
403 | } | ||
404 | |||
405 | if (!best.a.tcm) | ||
406 | return -ENOSPC; | ||
407 | done: | ||
408 | assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y); | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | /** | ||
413 | * Raster scan horizontally right to left from bottom to top to find a place | ||
414 | * for a 1D area of given size inside a scan field. | ||
415 | * | ||
416 | * @param num_slots size of desired area | ||
417 | * @param align desired area alignment | ||
418 | * @param area pointer to the area that will be set to the best | ||
419 | * position | ||
420 | * @param field area to scan (inclusive) | ||
421 | * | ||
422 | * @return 0 on success, non-0 error value on failure. | ||
423 | */ | ||
424 | static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots, | ||
425 | struct tcm_area *field, struct tcm_area *area) | ||
426 | { | ||
427 | s32 found = 0; | ||
428 | s16 x, y; | ||
429 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
430 | struct tcm_area *p; | ||
431 | |||
432 | /* check scan area co-ordinates */ | ||
433 | if (field->p0.y < field->p1.y) | ||
434 | return -EINVAL; | ||
435 | |||
436 | /** | ||
437 | * Currently we only support full width 1D scan field, which makes sense | ||
438 | * since 1D slot-ordering spans the full container width. | ||
439 | */ | ||
440 | if (tcm->width != field->p0.x - field->p1.x + 1) | ||
441 | return -EINVAL; | ||
442 | |||
443 | /* check if allocation would fit in scan area */ | ||
444 | if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y)) | ||
445 | return -ENOSPC; | ||
446 | |||
447 | x = field->p0.x; | ||
448 | y = field->p0.y; | ||
449 | |||
450 | /* find num_slots consecutive free slots to the left */ | ||
451 | while (found < num_slots) { | ||
452 | if (y < 0) | ||
453 | return -ENOSPC; | ||
454 | |||
455 | /* remember bottom-right corner */ | ||
456 | if (found == 0) { | ||
457 | area->p1.x = x; | ||
458 | area->p1.y = y; | ||
459 | } | ||
460 | |||
461 | /* skip busy regions */ | ||
462 | p = pvt->map[x][y]; | ||
463 | if (p) { | ||
464 | /* move to left of 2D areas, top left of 1D */ | ||
465 | x = p->p0.x; | ||
466 | if (!p->is2d) | ||
467 | y = p->p0.y; | ||
468 | |||
469 | /* start over */ | ||
470 | found = 0; | ||
471 | } else { | ||
472 | /* count consecutive free slots */ | ||
473 | found++; | ||
474 | if (found == num_slots) | ||
475 | break; | ||
476 | } | ||
477 | |||
478 | /* move to the left */ | ||
479 | if (x == 0) | ||
480 | y--; | ||
481 | x = (x ? : tcm->width) - 1; | ||
482 | |||
483 | } | ||
484 | |||
485 | /* set top-left corner */ | ||
486 | area->p0.x = x; | ||
487 | area->p0.y = y; | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * Find a place for a 2D area of given size inside a scan field based on its | ||
493 | * alignment needs. | ||
494 | * | ||
495 | * @param w width of desired area | ||
496 | * @param h height of desired area | ||
497 | * @param align desired area alignment | ||
498 | * @param area pointer to the area that will be set to the best position | ||
499 | * | ||
500 | * @return 0 on success, non-0 error value on failure. | ||
501 | */ | ||
502 | static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align, | ||
503 | struct tcm_area *area) | ||
504 | { | ||
505 | s32 ret = 0; | ||
506 | struct tcm_area field = {0}; | ||
507 | u16 boundary_x, boundary_y; | ||
508 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
509 | |||
510 | if (align > 1) { | ||
511 | /* prefer top-left corner */ | ||
512 | boundary_x = pvt->div_pt.x - 1; | ||
513 | boundary_y = pvt->div_pt.y - 1; | ||
514 | |||
515 | /* expand width and height if needed */ | ||
516 | if (w > pvt->div_pt.x) | ||
517 | boundary_x = tcm->width - 1; | ||
518 | if (h > pvt->div_pt.y) | ||
519 | boundary_y = tcm->height - 1; | ||
520 | |||
521 | assign(&field, 0, 0, boundary_x, boundary_y); | ||
522 | ret = scan_l2r_t2b(tcm, w, h, align, &field, area); | ||
523 | |||
524 | /* scan whole container if failed, but do not scan 2x */ | ||
525 | if (ret != 0 && (boundary_x != tcm->width - 1 || | ||
526 | boundary_y != tcm->height - 1)) { | ||
527 | /* scan the entire container if nothing found */ | ||
528 | assign(&field, 0, 0, tcm->width - 1, tcm->height - 1); | ||
529 | ret = scan_l2r_t2b(tcm, w, h, align, &field, area); | ||
530 | } | ||
531 | } else if (align == 1) { | ||
532 | /* prefer top-right corner */ | ||
533 | boundary_x = pvt->div_pt.x; | ||
534 | boundary_y = pvt->div_pt.y - 1; | ||
535 | |||
536 | /* expand width and height if needed */ | ||
537 | if (w > (tcm->width - pvt->div_pt.x)) | ||
538 | boundary_x = 0; | ||
539 | if (h > pvt->div_pt.y) | ||
540 | boundary_y = tcm->height - 1; | ||
541 | |||
542 | assign(&field, tcm->width - 1, 0, boundary_x, boundary_y); | ||
543 | ret = scan_r2l_t2b(tcm, w, h, align, &field, area); | ||
544 | |||
545 | /* scan whole container if failed, but do not scan 2x */ | ||
546 | if (ret != 0 && (boundary_x != 0 || | ||
547 | boundary_y != tcm->height - 1)) { | ||
548 | /* scan the entire container if nothing found */ | ||
549 | assign(&field, tcm->width - 1, 0, 0, tcm->height - 1); | ||
550 | ret = scan_r2l_t2b(tcm, w, h, align, &field, | ||
551 | area); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | return ret; | ||
556 | } | ||
557 | |||
558 | /* check if an entire area is free */ | ||
559 | static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h) | ||
560 | { | ||
561 | u16 x = 0, y = 0; | ||
562 | for (y = y0; y < y0 + h; y++) { | ||
563 | for (x = x0; x < x0 + w; x++) { | ||
564 | if (map[x][y]) | ||
565 | return false; | ||
566 | } | ||
567 | } | ||
568 | return true; | ||
569 | } | ||
570 | |||
571 | /* fills an area with a parent tcm_area */ | ||
572 | static void fill_area(struct tcm *tcm, struct tcm_area *area, | ||
573 | struct tcm_area *parent) | ||
574 | { | ||
575 | s32 x, y; | ||
576 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
577 | struct tcm_area a, a_; | ||
578 | |||
579 | /* set area's tcm; otherwise, enumerator considers it invalid */ | ||
580 | area->tcm = tcm; | ||
581 | |||
582 | tcm_for_each_slice(a, *area, a_) { | ||
583 | for (x = a.p0.x; x <= a.p1.x; ++x) | ||
584 | for (y = a.p0.y; y <= a.p1.y; ++y) | ||
585 | pvt->map[x][y] = parent; | ||
586 | |||
587 | } | ||
588 | } | ||
589 | |||
590 | /** | ||
591 | * Compares a candidate area to the current best area, and if it is a better | ||
592 | * fit, it updates the best to this one. | ||
593 | * | ||
594 | * @param x0, y0, w, h top, left, width, height of candidate area | ||
595 | * @param field scan field | ||
596 | * @param criteria scan criteria | ||
597 | * @param best best candidate and its scores | ||
598 | * | ||
599 | * @return 1 (true) if the candidate area is known to be the final best, so no | ||
600 | * more searching should be performed | ||
601 | */ | ||
602 | static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h, | ||
603 | struct tcm_area *field, s32 criteria, | ||
604 | struct score *best) | ||
605 | { | ||
606 | struct score me; /* score for area */ | ||
607 | |||
608 | /* | ||
609 | * NOTE: For horizontal bias we always give the first found, because our | ||
610 | * scan is horizontal-raster-based and the first candidate will always | ||
611 | * have the horizontal bias. | ||
612 | */ | ||
613 | bool first = criteria & CR_BIAS_HORIZONTAL; | ||
614 | |||
615 | assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1); | ||
616 | |||
617 | /* calculate score for current candidate */ | ||
618 | if (!first) { | ||
619 | get_neighbor_stats(tcm, &me.a, &me.n); | ||
620 | me.neighs = me.n.edge + me.n.busy; | ||
621 | get_nearness_factor(field, &me.a, &me.f); | ||
622 | } | ||
623 | |||
624 | /* the 1st candidate is always the best */ | ||
625 | if (!best->a.tcm) | ||
626 | goto better; | ||
627 | |||
628 | BUG_ON(first); | ||
629 | |||
630 | /* diagonal balance check */ | ||
631 | if ((criteria & CR_DIAGONAL_BALANCE) && | ||
632 | best->neighs <= me.neighs && | ||
633 | (best->neighs < me.neighs || | ||
634 | /* this implies that neighs and occupied match */ | ||
635 | best->n.busy < me.n.busy || | ||
636 | (best->n.busy == me.n.busy && | ||
637 | /* check the nearness factor */ | ||
638 | best->f.x + best->f.y > me.f.x + me.f.y))) | ||
639 | goto better; | ||
640 | |||
641 | /* not better, keep going */ | ||
642 | return 0; | ||
643 | |||
644 | better: | ||
645 | /* save current area as best */ | ||
646 | memcpy(best, &me, sizeof(me)); | ||
647 | best->a.tcm = tcm; | ||
648 | return first; | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * Calculate the nearness factor of an area in a search field. The nearness | ||
653 | * factor is smaller if the area is closer to the search origin. | ||
654 | */ | ||
655 | static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area, | ||
656 | struct nearness_factor *nf) | ||
657 | { | ||
658 | /** | ||
659 | * Using signed math as field coordinates may be reversed if | ||
660 | * search direction is right-to-left or bottom-to-top. | ||
661 | */ | ||
662 | nf->x = (s32)(area->p0.x - field->p0.x) * 1000 / | ||
663 | (field->p1.x - field->p0.x); | ||
664 | nf->y = (s32)(area->p0.y - field->p0.y) * 1000 / | ||
665 | (field->p1.y - field->p0.y); | ||
666 | } | ||
667 | |||
668 | /* get neighbor statistics */ | ||
669 | static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area, | ||
670 | struct neighbor_stats *stat) | ||
671 | { | ||
672 | s16 x = 0, y = 0; | ||
673 | struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; | ||
674 | |||
675 | /* Clearing any exisiting values */ | ||
676 | memset(stat, 0, sizeof(*stat)); | ||
677 | |||
678 | /* process top & bottom edges */ | ||
679 | for (x = area->p0.x; x <= area->p1.x; x++) { | ||
680 | if (area->p0.y == 0) | ||
681 | stat->edge++; | ||
682 | else if (pvt->map[x][area->p0.y - 1]) | ||
683 | stat->busy++; | ||
684 | |||
685 | if (area->p1.y == tcm->height - 1) | ||
686 | stat->edge++; | ||
687 | else if (pvt->map[x][area->p1.y + 1]) | ||
688 | stat->busy++; | ||
689 | } | ||
690 | |||
691 | /* process left & right edges */ | ||
692 | for (y = area->p0.y; y <= area->p1.y; ++y) { | ||
693 | if (area->p0.x == 0) | ||
694 | stat->edge++; | ||
695 | else if (pvt->map[area->p0.x - 1][y]) | ||
696 | stat->busy++; | ||
697 | |||
698 | if (area->p1.x == tcm->width - 1) | ||
699 | stat->edge++; | ||
700 | else if (pvt->map[area->p1.x + 1][y]) | ||
701 | stat->busy++; | ||
702 | } | ||
703 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h new file mode 100644 index 000000000000..0444f868671c --- /dev/null +++ b/drivers/gpu/drm/omapdrm/tcm-sita.h | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * tcm_sita.h | ||
3 | * | ||
4 | * SImple Tiler Allocator (SiTA) private structures. | ||
5 | * | ||
6 | * Author: Ravi Ramachandra <r.ramachandra@ti.com> | ||
7 | * | ||
8 | * Copyright (C) 2009-2011 Texas Instruments, Inc. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * * Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * | ||
18 | * * Redistributions in binary form must reproduce the above copyright | ||
19 | * notice, this list of conditions and the following disclaimer in the | ||
20 | * documentation and/or other materials provided with the distribution. | ||
21 | * | ||
22 | * * Neither the name of Texas Instruments Incorporated nor the names of | ||
23 | * its contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
27 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | ||
28 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
29 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | ||
30 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
31 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
32 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | ||
33 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
34 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | ||
35 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, | ||
36 | * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
37 | */ | ||
38 | |||
39 | #ifndef _TCM_SITA_H | ||
40 | #define _TCM_SITA_H | ||
41 | |||
42 | #include "tcm.h" | ||
43 | |||
44 | /* length between two coordinates */ | ||
45 | #define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1) | ||
46 | |||
47 | enum criteria { | ||
48 | CR_MAX_NEIGHS = 0x01, | ||
49 | CR_FIRST_FOUND = 0x10, | ||
50 | CR_BIAS_HORIZONTAL = 0x20, | ||
51 | CR_BIAS_VERTICAL = 0x40, | ||
52 | CR_DIAGONAL_BALANCE = 0x80 | ||
53 | }; | ||
54 | |||
55 | /* nearness to the beginning of the search field from 0 to 1000 */ | ||
56 | struct nearness_factor { | ||
57 | s32 x; | ||
58 | s32 y; | ||
59 | }; | ||
60 | |||
61 | /* | ||
62 | * Statistics on immediately neighboring slots. Edge is the number of | ||
63 | * border segments that are also border segments of the scan field. Busy | ||
64 | * refers to the number of neighbors that are occupied. | ||
65 | */ | ||
66 | struct neighbor_stats { | ||
67 | u16 edge; | ||
68 | u16 busy; | ||
69 | }; | ||
70 | |||
71 | /* structure to keep the score of a potential allocation */ | ||
72 | struct score { | ||
73 | struct nearness_factor f; | ||
74 | struct neighbor_stats n; | ||
75 | struct tcm_area a; | ||
76 | u16 neighs; /* number of busy neighbors */ | ||
77 | }; | ||
78 | |||
79 | struct sita_pvt { | ||
80 | spinlock_t lock; /* spinlock to protect access */ | ||
81 | struct tcm_pt div_pt; /* divider point splitting container */ | ||
82 | struct tcm_area ***map; /* pointers to the parent area for each slot */ | ||
83 | }; | ||
84 | |||
85 | /* assign coordinates to area */ | ||
86 | static inline | ||
87 | void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1) | ||
88 | { | ||
89 | a->p0.x = x0; | ||
90 | a->p0.y = y0; | ||
91 | a->p1.x = x1; | ||
92 | a->p1.y = y1; | ||
93 | } | ||
94 | |||
95 | #endif | ||
diff --git a/drivers/gpu/drm/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h new file mode 100644 index 000000000000..a8d5ce47686f --- /dev/null +++ b/drivers/gpu/drm/omapdrm/tcm.h | |||
@@ -0,0 +1,328 @@ | |||
1 | /* | ||
2 | * tcm.h | ||
3 | * | ||
4 | * TILER container manager specification and support functions for TI | ||
5 | * TILER driver. | ||
6 | * | ||
7 | * Author: Lajos Molnar <molnar@ti.com> | ||
8 | * | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * * Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * | ||
18 | * * Redistributions in binary form must reproduce the above copyright | ||
19 | * notice, this list of conditions and the following disclaimer in the | ||
20 | * documentation and/or other materials provided with the distribution. | ||
21 | * | ||
22 | * * Neither the name of Texas Instruments Incorporated nor the names of | ||
23 | * its contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
27 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | ||
28 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
29 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | ||
30 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
31 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
32 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | ||
33 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
34 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | ||
35 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, | ||
36 | * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
37 | */ | ||
38 | |||
39 | #ifndef TCM_H | ||
40 | #define TCM_H | ||
41 | |||
42 | struct tcm; | ||
43 | |||
44 | /* point */ | ||
45 | struct tcm_pt { | ||
46 | u16 x; | ||
47 | u16 y; | ||
48 | }; | ||
49 | |||
50 | /* 1d or 2d area */ | ||
51 | struct tcm_area { | ||
52 | bool is2d; /* whether area is 1d or 2d */ | ||
53 | struct tcm *tcm; /* parent */ | ||
54 | struct tcm_pt p0; | ||
55 | struct tcm_pt p1; | ||
56 | }; | ||
57 | |||
58 | struct tcm { | ||
59 | u16 width, height; /* container dimensions */ | ||
60 | int lut_id; /* Lookup table identifier */ | ||
61 | |||
62 | unsigned int y_offset; /* offset to use for y coordinates */ | ||
63 | |||
64 | /* 'pvt' structure shall contain any tcm details (attr) along with | ||
65 | linked list of allocated areas and mutex for mutually exclusive access | ||
66 | to the list. It may also contain copies of width and height to notice | ||
67 | any changes to the publicly available width and height fields. */ | ||
68 | void *pvt; | ||
69 | |||
70 | /* function table */ | ||
71 | s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align, | ||
72 | struct tcm_area *area); | ||
73 | s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); | ||
74 | s32 (*free) (struct tcm *tcm, struct tcm_area *area); | ||
75 | void (*deinit) (struct tcm *tcm); | ||
76 | }; | ||
77 | |||
78 | /*============================================================================= | ||
79 | BASIC TILER CONTAINER MANAGER INTERFACE | ||
80 | =============================================================================*/ | ||
81 | |||
82 | /* | ||
83 | * NOTE: | ||
84 | * | ||
85 | * Since some basic parameter checking is done outside the TCM algorithms, | ||
86 | * TCM implementation do NOT have to check the following: | ||
87 | * | ||
88 | * area pointer is NULL | ||
89 | * width and height fits within container | ||
90 | * number of pages is more than the size of the container | ||
91 | * | ||
92 | */ | ||
93 | |||
94 | struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr); | ||
95 | |||
96 | |||
97 | /** | ||
98 | * Deinitialize tiler container manager. | ||
99 | * | ||
100 | * @param tcm Pointer to container manager. | ||
101 | * | ||
102 | * @return 0 on success, non-0 error value on error. The call | ||
103 | * should free as much memory as possible and meaningful | ||
104 | * even on failure. Some error codes: -ENODEV: invalid | ||
105 | * manager. | ||
106 | */ | ||
107 | static inline void tcm_deinit(struct tcm *tcm) | ||
108 | { | ||
109 | if (tcm) | ||
110 | tcm->deinit(tcm); | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * Reserves a 2D area in the container. | ||
115 | * | ||
116 | * @param tcm Pointer to container manager. | ||
117 | * @param height Height(in pages) of area to be reserved. | ||
118 | * @param width Width(in pages) of area to be reserved. | ||
119 | * @param align Alignment requirement for top-left corner of area. Not | ||
120 | * all values may be supported by the container manager, | ||
121 | * but it must support 0 (1), 32 and 64. | ||
122 | * 0 value is equivalent to 1. | ||
123 | * @param area Pointer to where the reserved area should be stored. | ||
124 | * | ||
125 | * @return 0 on success. Non-0 error code on failure. Also, | ||
126 | * the tcm field of the area will be set to NULL on | ||
127 | * failure. Some error codes: -ENODEV: invalid manager, | ||
128 | * -EINVAL: invalid area, -ENOMEM: not enough space for | ||
129 | * allocation. | ||
130 | */ | ||
131 | static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, | ||
132 | u16 align, struct tcm_area *area) | ||
133 | { | ||
134 | /* perform rudimentary error checking */ | ||
135 | s32 res = tcm == NULL ? -ENODEV : | ||
136 | (area == NULL || width == 0 || height == 0 || | ||
137 | /* align must be a 2 power */ | ||
138 | (align & (align - 1))) ? -EINVAL : | ||
139 | (height > tcm->height || width > tcm->width) ? -ENOMEM : 0; | ||
140 | |||
141 | if (!res) { | ||
142 | area->is2d = true; | ||
143 | res = tcm->reserve_2d(tcm, height, width, align, area); | ||
144 | area->tcm = res ? NULL : tcm; | ||
145 | } | ||
146 | |||
147 | return res; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * Reserves a 1D area in the container. | ||
152 | * | ||
153 | * @param tcm Pointer to container manager. | ||
154 | * @param slots Number of (contiguous) slots to reserve. | ||
155 | * @param area Pointer to where the reserved area should be stored. | ||
156 | * | ||
157 | * @return 0 on success. Non-0 error code on failure. Also, | ||
158 | * the tcm field of the area will be set to NULL on | ||
159 | * failure. Some error codes: -ENODEV: invalid manager, | ||
160 | * -EINVAL: invalid area, -ENOMEM: not enough space for | ||
161 | * allocation. | ||
162 | */ | ||
163 | static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots, | ||
164 | struct tcm_area *area) | ||
165 | { | ||
166 | /* perform rudimentary error checking */ | ||
167 | s32 res = tcm == NULL ? -ENODEV : | ||
168 | (area == NULL || slots == 0) ? -EINVAL : | ||
169 | slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0; | ||
170 | |||
171 | if (!res) { | ||
172 | area->is2d = false; | ||
173 | res = tcm->reserve_1d(tcm, slots, area); | ||
174 | area->tcm = res ? NULL : tcm; | ||
175 | } | ||
176 | |||
177 | return res; | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * Free a previously reserved area from the container. | ||
182 | * | ||
183 | * @param area Pointer to area reserved by a prior call to | ||
184 | * tcm_reserve_1d or tcm_reserve_2d call, whether | ||
185 | * it was successful or not. (Note: all fields of | ||
186 | * the structure must match.) | ||
187 | * | ||
188 | * @return 0 on success. Non-0 error code on failure. Also, the tcm | ||
189 | * field of the area is set to NULL on success to avoid subsequent | ||
190 | * freeing. This call will succeed even if supplying | ||
191 | * the area from a failed reserved call. | ||
192 | */ | ||
193 | static inline s32 tcm_free(struct tcm_area *area) | ||
194 | { | ||
195 | s32 res = 0; /* free succeeds by default */ | ||
196 | |||
197 | if (area && area->tcm) { | ||
198 | res = area->tcm->free(area->tcm, area); | ||
199 | if (res == 0) | ||
200 | area->tcm = NULL; | ||
201 | } | ||
202 | |||
203 | return res; | ||
204 | } | ||
205 | |||
206 | /*============================================================================= | ||
207 | HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER | ||
208 | =============================================================================*/ | ||
209 | |||
210 | /** | ||
211 | * This method slices off the topmost 2D slice from the parent area, and stores | ||
212 | * it in the 'slice' parameter. The 'parent' parameter will get modified to | ||
213 | * contain the remaining portion of the area. If the whole parent area can | ||
214 | * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no | ||
215 | * longer a valid area. | ||
216 | * | ||
217 | * @param parent Pointer to a VALID parent area that will get modified | ||
218 | * @param slice Pointer to the slice area that will get modified | ||
219 | */ | ||
220 | static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice) | ||
221 | { | ||
222 | *slice = *parent; | ||
223 | |||
224 | /* check if we need to slice */ | ||
225 | if (slice->tcm && !slice->is2d && | ||
226 | slice->p0.y != slice->p1.y && | ||
227 | (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) { | ||
228 | /* set end point of slice (start always remains) */ | ||
229 | slice->p1.x = slice->tcm->width - 1; | ||
230 | slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1; | ||
231 | /* adjust remaining area */ | ||
232 | parent->p0.x = 0; | ||
233 | parent->p0.y = slice->p1.y + 1; | ||
234 | } else { | ||
235 | /* mark this as the last slice */ | ||
236 | parent->tcm = NULL; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* Verify if a tcm area is logically valid */ | ||
241 | static inline bool tcm_area_is_valid(struct tcm_area *area) | ||
242 | { | ||
243 | return area && area->tcm && | ||
244 | /* coordinate bounds */ | ||
245 | area->p1.x < area->tcm->width && | ||
246 | area->p1.y < area->tcm->height && | ||
247 | area->p0.y <= area->p1.y && | ||
248 | /* 1D coordinate relationship + p0.x check */ | ||
249 | ((!area->is2d && | ||
250 | area->p0.x < area->tcm->width && | ||
251 | area->p0.x + area->p0.y * area->tcm->width <= | ||
252 | area->p1.x + area->p1.y * area->tcm->width) || | ||
253 | /* 2D coordinate relationship */ | ||
254 | (area->is2d && | ||
255 | area->p0.x <= area->p1.x)); | ||
256 | } | ||
257 | |||
258 | /* see if a coordinate is within an area */ | ||
259 | static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a) | ||
260 | { | ||
261 | u16 i; | ||
262 | |||
263 | if (a->is2d) { | ||
264 | return p->x >= a->p0.x && p->x <= a->p1.x && | ||
265 | p->y >= a->p0.y && p->y <= a->p1.y; | ||
266 | } else { | ||
267 | i = p->x + p->y * a->tcm->width; | ||
268 | return i >= a->p0.x + a->p0.y * a->tcm->width && | ||
269 | i <= a->p1.x + a->p1.y * a->tcm->width; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | /* calculate area width */ | ||
274 | static inline u16 __tcm_area_width(struct tcm_area *area) | ||
275 | { | ||
276 | return area->p1.x - area->p0.x + 1; | ||
277 | } | ||
278 | |||
279 | /* calculate area height */ | ||
280 | static inline u16 __tcm_area_height(struct tcm_area *area) | ||
281 | { | ||
282 | return area->p1.y - area->p0.y + 1; | ||
283 | } | ||
284 | |||
285 | /* calculate number of slots in an area */ | ||
286 | static inline u16 __tcm_sizeof(struct tcm_area *area) | ||
287 | { | ||
288 | return area->is2d ? | ||
289 | __tcm_area_width(area) * __tcm_area_height(area) : | ||
290 | (area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) * | ||
291 | area->tcm->width; | ||
292 | } | ||
293 | #define tcm_sizeof(area) __tcm_sizeof(&(area)) | ||
294 | #define tcm_awidth(area) __tcm_area_width(&(area)) | ||
295 | #define tcm_aheight(area) __tcm_area_height(&(area)) | ||
296 | #define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area)) | ||
297 | |||
298 | /* limit a 1D area to the first N pages */ | ||
299 | static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg) | ||
300 | { | ||
301 | if (__tcm_sizeof(a) < num_pg) | ||
302 | return -ENOMEM; | ||
303 | if (!num_pg) | ||
304 | return -EINVAL; | ||
305 | |||
306 | a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width; | ||
307 | a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | /** | ||
312 | * Iterate through 2D slices of a valid area. Behaves | ||
313 | * syntactically as a for(;;) statement. | ||
314 | * | ||
315 | * @param var Name of a local variable of type 'struct | ||
316 | * tcm_area *' that will get modified to | ||
317 | * contain each slice. | ||
318 | * @param area Pointer to the VALID parent area. This | ||
319 | * structure will not get modified | ||
320 | * throughout the loop. | ||
321 | * | ||
322 | */ | ||
323 | #define tcm_for_each_slice(var, area, safe) \ | ||
324 | for (safe = area, \ | ||
325 | tcm_slice(&safe, &var); \ | ||
326 | var.tcm; tcm_slice(&safe, &var)) | ||
327 | |||
328 | #endif | ||