aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-04-22 06:49:07 -0400
committerDave Airlie <airlied@redhat.com>2013-04-22 06:49:07 -0400
commitf9df7ea33c87291365d943828bec852874f15c2f (patch)
tree53afb4d74f0af01555a13d440d978dff3318c6f3
parentce83adf78bbbe6bdcd99f0b97212337ce6b84940 (diff)
parente1041ca41670dc5502deee1fa3517dbaf9c0a09e (diff)
Merge tag 'drm/tegra/for-3.10' of git://anongit.freedesktop.org/tegra/linux into drm-next
drm/tegra: Changes for v3.10-rc1 The bulk of this pull-request is the host1x series that has been in the works for a few months. The current implementation looks good and has been tested by several independent parties. So far no issues have been found. To be on the safe side, the new Tegra-specific DRM IOCTLs depend on staging in order to give some amount of flexibility to change them just in case. The plan is to remove that dependency once more userspace exists to verify the adequacy of the IOCTLs. Currently only the 2D engine is supported, but patches are in the works to enable 3D support on top of this framework as well. Various bits of open-source userspace exist to test the 2D and 3D support[0]. This is still a bit immature but it allows to verify that the kernel interfaces work properly. To round things off there are two smaller cleanup patches, one of them adding a new pixel format and the other removing a redundent Kconfig dependency. [0]: https://github.com/grate-driver * tag 'drm/tegra/for-3.10' of git://anongit.freedesktop.org/tegra/linux: drm/tegra: don't depend on OF drm/tegra: Support the XBGR8888 pixelformat drm/tegra: Add gr2d device gpu: host1x: drm: Add memory manager and fb gpu: host1x: Remove second host1x driver gpu: host1x: drm: Rename host1x to host1x_drm drm/tegra: Move drm to live under host1x gpu: host1x: Add debug support gpu: host1x: Add channel support gpu: host1x: Add syncpoint wait and interrupts gpu: host1x: Add host1x driver
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/drm.c217
-rw-r--r--drivers/gpu/drm/tegra/fb.c52
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/host1x/Kconfig24
-rw-r--r--drivers/gpu/host1x/Makefile20
-rw-r--r--drivers/gpu/host1x/cdma.c491
-rw-r--r--drivers/gpu/host1x/cdma.h100
-rw-r--r--drivers/gpu/host1x/channel.c126
-rw-r--r--drivers/gpu/host1x/channel.h52
-rw-r--r--drivers/gpu/host1x/debug.c210
-rw-r--r--drivers/gpu/host1x/debug.h51
-rw-r--r--drivers/gpu/host1x/dev.c246
-rw-r--r--drivers/gpu/host1x/dev.h308
-rw-r--r--drivers/gpu/host1x/drm/Kconfig (renamed from drivers/gpu/drm/tegra/Kconfig)20
-rw-r--r--drivers/gpu/host1x/drm/dc.c (renamed from drivers/gpu/drm/tegra/dc.c)31
-rw-r--r--drivers/gpu/host1x/drm/dc.h (renamed from drivers/gpu/drm/tegra/dc.h)0
-rw-r--r--drivers/gpu/host1x/drm/drm.c640
-rw-r--r--drivers/gpu/host1x/drm/drm.h (renamed from drivers/gpu/drm/tegra/drm.h)68
-rw-r--r--drivers/gpu/host1x/drm/fb.c374
-rw-r--r--drivers/gpu/host1x/drm/gem.c270
-rw-r--r--drivers/gpu/host1x/drm/gem.h59
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c339
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c (renamed from drivers/gpu/drm/tegra/hdmi.c)5
-rw-r--r--drivers/gpu/host1x/drm/hdmi.h (renamed from drivers/gpu/drm/tegra/hdmi.h)0
-rw-r--r--drivers/gpu/host1x/drm/output.c (renamed from drivers/gpu/drm/tegra/output.c)0
-rw-r--r--drivers/gpu/host1x/drm/rgb.c (renamed from drivers/gpu/drm/tegra/rgb.c)0
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/host1x_client.h35
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c326
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c168
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c322
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x01.h25
-rw-r--r--drivers/gpu/host1x/hw/host1x01_hardware.h143
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_channel.h120
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h174
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c143
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c114
-rw-r--r--drivers/gpu/host1x/intr.c354
-rw-r--r--drivers/gpu/host1x/intr.h102
-rw-r--r--drivers/gpu/host1x/job.c603
-rw-r--r--drivers/gpu/host1x/job.h162
-rw-r--r--drivers/gpu/host1x/syncpt.c387
-rw-r--r--drivers/gpu/host1x/syncpt.h165
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--include/trace/events/host1x.h253
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/tegra_drm.h136
55 files changed, 7540 insertions, 644 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 30879df3daea..d8a22c2a579d 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
1obj-y += drm/ vga/ 1obj-y += drm/ vga/
2obj-$(CONFIG_TEGRA_HOST1X) += host1x/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 19b8e0d5d910..b16c50ee769c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -215,8 +215,6 @@ source "drivers/gpu/drm/cirrus/Kconfig"
215 215
216source "drivers/gpu/drm/shmobile/Kconfig" 216source "drivers/gpu/drm/shmobile/Kconfig"
217 217
218source "drivers/gpu/drm/tegra/Kconfig"
219
220source "drivers/gpu/drm/omapdrm/Kconfig" 218source "drivers/gpu/drm/omapdrm/Kconfig"
221 219
222source "drivers/gpu/drm/tilcdc/Kconfig" 220source "drivers/gpu/drm/tilcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6a4211521011..1c9f24396002 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -49,7 +49,6 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
49obj-$(CONFIG_DRM_UDL) += udl/ 49obj-$(CONFIG_DRM_UDL) += udl/
50obj-$(CONFIG_DRM_AST) += ast/ 50obj-$(CONFIG_DRM_AST) += ast/
51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
52obj-$(CONFIG_DRM_TEGRA) += tegra/
53obj-$(CONFIG_DRM_OMAP) += omapdrm/ 52obj-$(CONFIG_DRM_OMAP) += omapdrm/
54obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 53obj-$(CONFIG_DRM_TILCDC) += tilcdc/
55obj-$(CONFIG_DRM_QXL) += qxl/ 54obj-$(CONFIG_DRM_QXL) += qxl/
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
deleted file mode 100644
index 80f73d1315d0..000000000000
--- a/drivers/gpu/drm/tegra/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1ccflags-y := -Iinclude/drm
2ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
3
4tegra-drm-y := drm.o fb.o dc.o host1x.o
5tegra-drm-y += output.o rgb.o hdmi.o
6
7obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
deleted file mode 100644
index 9d452df5bcad..000000000000
--- a/drivers/gpu/drm/tegra/drm.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include "drm.h"
18
19#define DRIVER_NAME "tegra"
20#define DRIVER_DESC "NVIDIA Tegra graphics"
21#define DRIVER_DATE "20120330"
22#define DRIVER_MAJOR 0
23#define DRIVER_MINOR 0
24#define DRIVER_PATCHLEVEL 0
25
26static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
27{
28 struct device *dev = drm->dev;
29 struct host1x *host1x;
30 int err;
31
32 host1x = dev_get_drvdata(dev);
33 drm->dev_private = host1x;
34 host1x->drm = drm;
35
36 drm_mode_config_init(drm);
37
38 err = host1x_drm_init(host1x, drm);
39 if (err < 0)
40 return err;
41
42 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
43 if (err < 0)
44 return err;
45
46 err = tegra_drm_fb_init(drm);
47 if (err < 0)
48 return err;
49
50 drm_kms_helper_poll_init(drm);
51
52 return 0;
53}
54
55static int tegra_drm_unload(struct drm_device *drm)
56{
57 drm_kms_helper_poll_fini(drm);
58 tegra_drm_fb_exit(drm);
59
60 drm_mode_config_cleanup(drm);
61
62 return 0;
63}
64
65static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
66{
67 return 0;
68}
69
70static void tegra_drm_lastclose(struct drm_device *drm)
71{
72 struct host1x *host1x = drm->dev_private;
73
74 drm_fbdev_cma_restore_mode(host1x->fbdev);
75}
76
77static struct drm_ioctl_desc tegra_drm_ioctls[] = {
78};
79
80static const struct file_operations tegra_drm_fops = {
81 .owner = THIS_MODULE,
82 .open = drm_open,
83 .release = drm_release,
84 .unlocked_ioctl = drm_ioctl,
85 .mmap = drm_gem_cma_mmap,
86 .poll = drm_poll,
87 .fasync = drm_fasync,
88 .read = drm_read,
89#ifdef CONFIG_COMPAT
90 .compat_ioctl = drm_compat_ioctl,
91#endif
92 .llseek = noop_llseek,
93};
94
95static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
96{
97 struct drm_crtc *crtc;
98
99 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
100 struct tegra_dc *dc = to_tegra_dc(crtc);
101
102 if (dc->pipe == pipe)
103 return crtc;
104 }
105
106 return NULL;
107}
108
109static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
110{
111 /* TODO: implement real hardware counter using syncpoints */
112 return drm_vblank_count(dev, crtc);
113}
114
115static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
116{
117 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
118 struct tegra_dc *dc = to_tegra_dc(crtc);
119
120 if (!crtc)
121 return -ENODEV;
122
123 tegra_dc_enable_vblank(dc);
124
125 return 0;
126}
127
128static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
129{
130 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
131 struct tegra_dc *dc = to_tegra_dc(crtc);
132
133 if (crtc)
134 tegra_dc_disable_vblank(dc);
135}
136
137static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
138{
139 struct drm_crtc *crtc;
140
141 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
142 tegra_dc_cancel_page_flip(crtc, file);
143}
144
145#ifdef CONFIG_DEBUG_FS
146static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
147{
148 struct drm_info_node *node = (struct drm_info_node *)s->private;
149 struct drm_device *drm = node->minor->dev;
150 struct drm_framebuffer *fb;
151
152 mutex_lock(&drm->mode_config.fb_lock);
153
154 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
155 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
156 fb->base.id, fb->width, fb->height, fb->depth,
157 fb->bits_per_pixel,
158 atomic_read(&fb->refcount.refcount));
159 }
160
161 mutex_unlock(&drm->mode_config.fb_lock);
162
163 return 0;
164}
165
166static struct drm_info_list tegra_debugfs_list[] = {
167 { "framebuffers", tegra_debugfs_framebuffers, 0 },
168};
169
170static int tegra_debugfs_init(struct drm_minor *minor)
171{
172 return drm_debugfs_create_files(tegra_debugfs_list,
173 ARRAY_SIZE(tegra_debugfs_list),
174 minor->debugfs_root, minor);
175}
176
177static void tegra_debugfs_cleanup(struct drm_minor *minor)
178{
179 drm_debugfs_remove_files(tegra_debugfs_list,
180 ARRAY_SIZE(tegra_debugfs_list), minor);
181}
182#endif
183
184struct drm_driver tegra_drm_driver = {
185 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
186 .load = tegra_drm_load,
187 .unload = tegra_drm_unload,
188 .open = tegra_drm_open,
189 .preclose = tegra_drm_preclose,
190 .lastclose = tegra_drm_lastclose,
191
192 .get_vblank_counter = tegra_drm_get_vblank_counter,
193 .enable_vblank = tegra_drm_enable_vblank,
194 .disable_vblank = tegra_drm_disable_vblank,
195
196#if defined(CONFIG_DEBUG_FS)
197 .debugfs_init = tegra_debugfs_init,
198 .debugfs_cleanup = tegra_debugfs_cleanup,
199#endif
200
201 .gem_free_object = drm_gem_cma_free_object,
202 .gem_vm_ops = &drm_gem_cma_vm_ops,
203 .dumb_create = drm_gem_cma_dumb_create,
204 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
205 .dumb_destroy = drm_gem_cma_dumb_destroy,
206
207 .ioctls = tegra_drm_ioctls,
208 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
209 .fops = &tegra_drm_fops,
210
211 .name = DRIVER_NAME,
212 .desc = DRIVER_DESC,
213 .date = DRIVER_DATE,
214 .major = DRIVER_MAJOR,
215 .minor = DRIVER_MINOR,
216 .patchlevel = DRIVER_PATCHLEVEL,
217};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
deleted file mode 100644
index 03914953cb1c..000000000000
--- a/drivers/gpu/drm/tegra/fb.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include "drm.h"
11
12static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
13{
14 struct host1x *host1x = drm->dev_private;
15
16 drm_fbdev_cma_hotplug_event(host1x->fbdev);
17}
18
19static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
20 .fb_create = drm_fb_cma_create,
21 .output_poll_changed = tegra_drm_fb_output_poll_changed,
22};
23
24int tegra_drm_fb_init(struct drm_device *drm)
25{
26 struct host1x *host1x = drm->dev_private;
27 struct drm_fbdev_cma *fbdev;
28
29 drm->mode_config.min_width = 0;
30 drm->mode_config.min_height = 0;
31
32 drm->mode_config.max_width = 4096;
33 drm->mode_config.max_height = 4096;
34
35 drm->mode_config.funcs = &tegra_drm_mode_funcs;
36
37 fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
38 drm->mode_config.num_connector);
39 if (IS_ERR(fbdev))
40 return PTR_ERR(fbdev);
41
42 host1x->fbdev = fbdev;
43
44 return 0;
45}
46
47void tegra_drm_fb_exit(struct drm_device *drm)
48{
49 struct host1x *host1x = drm->dev_private;
50
51 drm_fbdev_cma_fini(host1x->fbdev);
52}
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
deleted file mode 100644
index 92e25a7e00ea..000000000000
--- a/drivers/gpu/drm/tegra/host1x.c
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#include "drm.h"
17
18struct host1x_drm_client {
19 struct host1x_client *client;
20 struct device_node *np;
21 struct list_head list;
22};
23
24static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
25{
26 struct host1x_drm_client *client;
27
28 client = kzalloc(sizeof(*client), GFP_KERNEL);
29 if (!client)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&client->list);
33 client->np = of_node_get(np);
34
35 list_add_tail(&client->list, &host1x->drm_clients);
36
37 return 0;
38}
39
40static int host1x_activate_drm_client(struct host1x *host1x,
41 struct host1x_drm_client *drm,
42 struct host1x_client *client)
43{
44 mutex_lock(&host1x->drm_clients_lock);
45 list_del_init(&drm->list);
46 list_add_tail(&drm->list, &host1x->drm_active);
47 drm->client = client;
48 mutex_unlock(&host1x->drm_clients_lock);
49
50 return 0;
51}
52
53static int host1x_remove_drm_client(struct host1x *host1x,
54 struct host1x_drm_client *client)
55{
56 mutex_lock(&host1x->drm_clients_lock);
57 list_del_init(&client->list);
58 mutex_unlock(&host1x->drm_clients_lock);
59
60 of_node_put(client->np);
61 kfree(client);
62
63 return 0;
64}
65
66static int host1x_parse_dt(struct host1x *host1x)
67{
68 static const char * const compat[] = {
69 "nvidia,tegra20-dc",
70 "nvidia,tegra20-hdmi",
71 "nvidia,tegra30-dc",
72 "nvidia,tegra30-hdmi",
73 };
74 unsigned int i;
75 int err;
76
77 for (i = 0; i < ARRAY_SIZE(compat); i++) {
78 struct device_node *np;
79
80 for_each_child_of_node(host1x->dev->of_node, np) {
81 if (of_device_is_compatible(np, compat[i]) &&
82 of_device_is_available(np)) {
83 err = host1x_add_drm_client(host1x, np);
84 if (err < 0)
85 return err;
86 }
87 }
88 }
89
90 return 0;
91}
92
93static int tegra_host1x_probe(struct platform_device *pdev)
94{
95 struct host1x *host1x;
96 struct resource *regs;
97 int err;
98
99 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
100 if (!host1x)
101 return -ENOMEM;
102
103 mutex_init(&host1x->drm_clients_lock);
104 INIT_LIST_HEAD(&host1x->drm_clients);
105 INIT_LIST_HEAD(&host1x->drm_active);
106 mutex_init(&host1x->clients_lock);
107 INIT_LIST_HEAD(&host1x->clients);
108 host1x->dev = &pdev->dev;
109
110 err = host1x_parse_dt(host1x);
111 if (err < 0) {
112 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
113 return err;
114 }
115
116 host1x->clk = devm_clk_get(&pdev->dev, NULL);
117 if (IS_ERR(host1x->clk))
118 return PTR_ERR(host1x->clk);
119
120 err = clk_prepare_enable(host1x->clk);
121 if (err < 0)
122 return err;
123
124 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
125 if (!regs) {
126 err = -ENXIO;
127 goto err;
128 }
129
130 err = platform_get_irq(pdev, 0);
131 if (err < 0)
132 goto err;
133
134 host1x->syncpt = err;
135
136 err = platform_get_irq(pdev, 1);
137 if (err < 0)
138 goto err;
139
140 host1x->irq = err;
141
142 host1x->regs = devm_ioremap_resource(&pdev->dev, regs);
143 if (IS_ERR(host1x->regs)) {
144 err = PTR_ERR(host1x->regs);
145 goto err;
146 }
147
148 platform_set_drvdata(pdev, host1x);
149
150 return 0;
151
152err:
153 clk_disable_unprepare(host1x->clk);
154 return err;
155}
156
157static int tegra_host1x_remove(struct platform_device *pdev)
158{
159 struct host1x *host1x = platform_get_drvdata(pdev);
160
161 clk_disable_unprepare(host1x->clk);
162
163 return 0;
164}
165
166int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
167{
168 struct host1x_client *client;
169
170 mutex_lock(&host1x->clients_lock);
171
172 list_for_each_entry(client, &host1x->clients, list) {
173 if (client->ops && client->ops->drm_init) {
174 int err = client->ops->drm_init(client, drm);
175 if (err < 0) {
176 dev_err(host1x->dev,
177 "DRM setup failed for %s: %d\n",
178 dev_name(client->dev), err);
179 return err;
180 }
181 }
182 }
183
184 mutex_unlock(&host1x->clients_lock);
185
186 return 0;
187}
188
189int host1x_drm_exit(struct host1x *host1x)
190{
191 struct platform_device *pdev = to_platform_device(host1x->dev);
192 struct host1x_client *client;
193
194 if (!host1x->drm)
195 return 0;
196
197 mutex_lock(&host1x->clients_lock);
198
199 list_for_each_entry_reverse(client, &host1x->clients, list) {
200 if (client->ops && client->ops->drm_exit) {
201 int err = client->ops->drm_exit(client);
202 if (err < 0) {
203 dev_err(host1x->dev,
204 "DRM cleanup failed for %s: %d\n",
205 dev_name(client->dev), err);
206 return err;
207 }
208 }
209 }
210
211 mutex_unlock(&host1x->clients_lock);
212
213 drm_platform_exit(&tegra_drm_driver, pdev);
214 host1x->drm = NULL;
215
216 return 0;
217}
218
219int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 mutex_lock(&host1x->clients_lock);
225 list_add_tail(&client->list, &host1x->clients);
226 mutex_unlock(&host1x->clients_lock);
227
228 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
229 if (drm->np == client->dev->of_node)
230 host1x_activate_drm_client(host1x, drm, client);
231
232 if (list_empty(&host1x->drm_clients)) {
233 struct platform_device *pdev = to_platform_device(host1x->dev);
234
235 err = drm_platform_init(&tegra_drm_driver, pdev);
236 if (err < 0) {
237 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
238 return err;
239 }
240 }
241
242 client->host1x = host1x;
243
244 return 0;
245}
246
247int host1x_unregister_client(struct host1x *host1x,
248 struct host1x_client *client)
249{
250 struct host1x_drm_client *drm, *tmp;
251 int err;
252
253 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
254 if (drm->client == client) {
255 err = host1x_drm_exit(host1x);
256 if (err < 0) {
257 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
258 err);
259 return err;
260 }
261
262 host1x_remove_drm_client(host1x, drm);
263 break;
264 }
265 }
266
267 mutex_lock(&host1x->clients_lock);
268 list_del_init(&client->list);
269 mutex_unlock(&host1x->clients_lock);
270
271 return 0;
272}
273
274static struct of_device_id tegra_host1x_of_match[] = {
275 { .compatible = "nvidia,tegra30-host1x", },
276 { .compatible = "nvidia,tegra20-host1x", },
277 { },
278};
279MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
280
281struct platform_driver tegra_host1x_driver = {
282 .driver = {
283 .name = "tegra-host1x",
284 .owner = THIS_MODULE,
285 .of_match_table = tegra_host1x_of_match,
286 },
287 .probe = tegra_host1x_probe,
288 .remove = tegra_host1x_remove,
289};
290
291static int __init tegra_host1x_init(void)
292{
293 int err;
294
295 err = platform_driver_register(&tegra_host1x_driver);
296 if (err < 0)
297 return err;
298
299 err = platform_driver_register(&tegra_dc_driver);
300 if (err < 0)
301 goto unregister_host1x;
302
303 err = platform_driver_register(&tegra_hdmi_driver);
304 if (err < 0)
305 goto unregister_dc;
306
307 return 0;
308
309unregister_dc:
310 platform_driver_unregister(&tegra_dc_driver);
311unregister_host1x:
312 platform_driver_unregister(&tegra_host1x_driver);
313 return err;
314}
315module_init(tegra_host1x_init);
316
317static void __exit tegra_host1x_exit(void)
318{
319 platform_driver_unregister(&tegra_hdmi_driver);
320 platform_driver_unregister(&tegra_dc_driver);
321 platform_driver_unregister(&tegra_host1x_driver);
322}
323module_exit(tegra_host1x_exit);
324
325MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
326MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
327MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
new file mode 100644
index 000000000000..ccfd42b23606
--- /dev/null
+++ b/drivers/gpu/host1x/Kconfig
@@ -0,0 +1,24 @@
1config TEGRA_HOST1X
2 tristate "NVIDIA Tegra host1x driver"
3 depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
4 help
5 Driver for the NVIDIA Tegra host1x hardware.
6
7 The Tegra host1x module is the DMA engine for register access to
8 Tegra's graphics- and multimedia-related modules. The modules served
9 by host1x are referred to as clients. host1x includes some other
10 functionality, such as synchronization.
11
12if TEGRA_HOST1X
13
14config TEGRA_HOST1X_FIREWALL
15 bool "Enable HOST1X security firewall"
16 default y
17 help
18 Say yes if kernel should protect command streams from tampering.
19
20 If unsure, choose Y.
21
22source "drivers/gpu/host1x/drm/Kconfig"
23
24endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
new file mode 100644
index 000000000000..3b037b6e0298
--- /dev/null
+++ b/drivers/gpu/host1x/Makefile
@@ -0,0 +1,20 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-y = \
4 syncpt.o \
5 dev.o \
6 intr.o \
7 cdma.o \
8 channel.o \
9 job.o \
10 debug.o \
11 hw/host1x01.o
12
13ccflags-y += -Iinclude/drm
14ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
15
16host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
17host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
18host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
19host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
20obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
new file mode 100644
index 000000000000..de72172d3b5f
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.c
@@ -0,0 +1,491 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19
20#include <asm/cacheflush.h>
21#include <linux/device.h>
22#include <linux/dma-mapping.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/kfifo.h>
26#include <linux/slab.h>
27#include <trace/events/host1x.h>
28
29#include "cdma.h"
30#include "channel.h"
31#include "dev.h"
32#include "debug.h"
33#include "host1x_bo.h"
34#include "job.h"
35
36/*
37 * push_buffer
38 *
39 * The push buffer is a circular array of words to be fetched by command DMA.
40 * Note that it works slightly differently to the sync queue; fence == pos
41 * means that the push buffer is full, not empty.
42 */
43
44#define HOST1X_PUSHBUFFER_SLOTS 512
45
46/*
47 * Clean up push buffer resources
48 */
49static void host1x_pushbuffer_destroy(struct push_buffer *pb)
50{
51 struct host1x_cdma *cdma = pb_to_cdma(pb);
52 struct host1x *host1x = cdma_to_host1x(cdma);
53
54 if (pb->phys != 0)
55 dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
56 pb->mapped, pb->phys);
57
58 pb->mapped = NULL;
59 pb->phys = 0;
60}
61
62/*
63 * Init push buffer resources
64 */
65static int host1x_pushbuffer_init(struct push_buffer *pb)
66{
67 struct host1x_cdma *cdma = pb_to_cdma(pb);
68 struct host1x *host1x = cdma_to_host1x(cdma);
69
70 pb->mapped = NULL;
71 pb->phys = 0;
72 pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
73
74 /* initialize buffer pointers */
75 pb->fence = pb->size_bytes - 8;
76 pb->pos = 0;
77
78 /* allocate and map pushbuffer memory */
79 pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
80 &pb->phys, GFP_KERNEL);
81 if (!pb->mapped)
82 goto fail;
83
84 host1x_hw_pushbuffer_init(host1x, pb);
85
86 return 0;
87
88fail:
89 host1x_pushbuffer_destroy(pb);
90 return -ENOMEM;
91}
92
93/*
94 * Push two words to the push buffer
95 * Caller must ensure push buffer is not full
96 */
97static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
98{
99 u32 pos = pb->pos;
100 u32 *p = (u32 *)((u32)pb->mapped + pos);
101 WARN_ON(pos == pb->fence);
102 *(p++) = op1;
103 *(p++) = op2;
104 pb->pos = (pos + 8) & (pb->size_bytes - 1);
105}
106
107/*
108 * Pop a number of two word slots from the push buffer
109 * Caller must ensure push buffer is not empty
110 */
111static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
112{
113 /* Advance the next write position */
114 pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
115}
116
117/*
118 * Return the number of two word slots free in the push buffer
119 */
120static u32 host1x_pushbuffer_space(struct push_buffer *pb)
121{
122 return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
123}
124
125/*
126 * Sleep (if necessary) until the requested event happens
127 * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
128 * - Returns 1
129 * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
130 * - Return the amount of space (> 0)
131 * Must be called with the cdma lock held.
132 */
133unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
134 enum cdma_event event)
135{
136 for (;;) {
137 unsigned int space;
138
139 if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
140 space = list_empty(&cdma->sync_queue) ? 1 : 0;
141 else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
142 struct push_buffer *pb = &cdma->push_buffer;
143 space = host1x_pushbuffer_space(pb);
144 } else {
145 WARN_ON(1);
146 return -EINVAL;
147 }
148
149 if (space)
150 return space;
151
152 trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
153 event);
154
155 /* If somebody has managed to already start waiting, yield */
156 if (cdma->event != CDMA_EVENT_NONE) {
157 mutex_unlock(&cdma->lock);
158 schedule();
159 mutex_lock(&cdma->lock);
160 continue;
161 }
162 cdma->event = event;
163
164 mutex_unlock(&cdma->lock);
165 down(&cdma->sem);
166 mutex_lock(&cdma->lock);
167 }
168 return 0;
169}
170
171/*
172 * Start timer that tracks the time spent by the job.
173 * Must be called with the cdma lock held.
174 */
175static void cdma_start_timer_locked(struct host1x_cdma *cdma,
176 struct host1x_job *job)
177{
178 struct host1x *host = cdma_to_host1x(cdma);
179
180 if (cdma->timeout.client) {
181 /* timer already started */
182 return;
183 }
184
185 cdma->timeout.client = job->client;
186 cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
187 cdma->timeout.syncpt_val = job->syncpt_end;
188 cdma->timeout.start_ktime = ktime_get();
189
190 schedule_delayed_work(&cdma->timeout.wq,
191 msecs_to_jiffies(job->timeout));
192}
193
194/*
195 * Stop timer when a buffer submission completes.
196 * Must be called with the cdma lock held.
197 */
198static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
199{
200 cancel_delayed_work(&cdma->timeout.wq);
201 cdma->timeout.client = 0;
202}
203
204/*
205 * For all sync queue entries that have already finished according to the
206 * current sync point registers:
207 * - unpin & unref their mems
208 * - pop their push buffer slots
209 * - remove them from the sync queue
210 * This is normally called from the host code's worker thread, but can be
211 * called manually if necessary.
212 * Must be called with the cdma lock held.
213 */
214static void update_cdma_locked(struct host1x_cdma *cdma)
215{
216 bool signal = false;
217 struct host1x *host1x = cdma_to_host1x(cdma);
218 struct host1x_job *job, *n;
219
220 /* If CDMA is stopped, queue is cleared and we can return */
221 if (!cdma->running)
222 return;
223
224 /*
225 * Walk the sync queue, reading the sync point registers as necessary,
226 * to consume as many sync queue entries as possible without blocking
227 */
228 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
229 struct host1x_syncpt *sp =
230 host1x_syncpt_get(host1x, job->syncpt_id);
231
232 /* Check whether this syncpt has completed, and bail if not */
233 if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
234 /* Start timer on next pending syncpt */
235 if (job->timeout)
236 cdma_start_timer_locked(cdma, job);
237 break;
238 }
239
240 /* Cancel timeout, when a buffer completes */
241 if (cdma->timeout.client)
242 stop_cdma_timer_locked(cdma);
243
244 /* Unpin the memory */
245 host1x_job_unpin(job);
246
247 /* Pop push buffer slots */
248 if (job->num_slots) {
249 struct push_buffer *pb = &cdma->push_buffer;
250 host1x_pushbuffer_pop(pb, job->num_slots);
251 if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
252 signal = true;
253 }
254
255 list_del(&job->list);
256 host1x_job_put(job);
257 }
258
259 if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
260 list_empty(&cdma->sync_queue))
261 signal = true;
262
263 if (signal) {
264 cdma->event = CDMA_EVENT_NONE;
265 up(&cdma->sem);
266 }
267}
268
269void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
270 struct device *dev)
271{
272 u32 restart_addr;
273 u32 syncpt_incrs;
274 struct host1x_job *job = NULL;
275 u32 syncpt_val;
276 struct host1x *host1x = cdma_to_host1x(cdma);
277
278 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
279
280 dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
281 __func__, syncpt_val);
282
283 /*
284 * Move the sync_queue read pointer to the first entry that hasn't
285 * completed based on the current HW syncpt value. It's likely there
286 * won't be any (i.e. we're still at the head), but covers the case
287 * where a syncpt incr happens just prior/during the teardown.
288 */
289
290 dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
291 __func__);
292
293 list_for_each_entry(job, &cdma->sync_queue, list) {
294 if (syncpt_val < job->syncpt_end)
295 break;
296
297 host1x_job_dump(dev, job);
298 }
299
300 /*
301 * Walk the sync_queue, first incrementing with the CPU syncpts that
302 * are partially executed (the first buffer) or fully skipped while
303 * still in the current context (slots are also NOP-ed).
304 *
305 * At the point contexts are interleaved, syncpt increments must be
306 * done inline with the pushbuffer from a GATHER buffer to maintain
307 * the order (slots are modified to be a GATHER of syncpt incrs).
308 *
309 * Note: save in restart_addr the location where the timed out buffer
310 * started in the PB, so we can start the refetch from there (with the
311 * modified NOP-ed PB slots). This lets things appear to have completed
312 * properly for this buffer and resources are freed.
313 */
314
315 dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n",
316 __func__);
317
318 if (!list_empty(&cdma->sync_queue))
319 restart_addr = job->first_get;
320 else
321 restart_addr = cdma->last_pos;
322
323 /* do CPU increments as long as this context continues */
324 list_for_each_entry_from(job, &cdma->sync_queue, list) {
325 /* different context, gets us out of this loop */
326 if (job->client != cdma->timeout.client)
327 break;
328
329 /* won't need a timeout when replayed */
330 job->timeout = 0;
331
332 syncpt_incrs = job->syncpt_end - syncpt_val;
333 dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
334
335 host1x_job_dump(dev, job);
336
337 /* safe to use CPU to incr syncpts */
338 host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
339 syncpt_incrs, job->syncpt_end,
340 job->num_slots);
341
342 syncpt_val += syncpt_incrs;
343 }
344
345 /* The following sumbits from the same client may be dependent on the
346 * failed submit and therefore they may fail. Force a small timeout
347 * to make the queue cleanup faster */
348
349 list_for_each_entry_from(job, &cdma->sync_queue, list)
350 if (job->client == cdma->timeout.client)
351 job->timeout = min_t(unsigned int, job->timeout, 500);
352
353 dev_dbg(dev, "%s: finished sync_queue modification\n", __func__);
354
355 /* roll back DMAGET and start up channel again */
356 host1x_hw_cdma_resume(host1x, cdma, restart_addr);
357}
358
359/*
360 * Create a cdma
361 */
362int host1x_cdma_init(struct host1x_cdma *cdma)
363{
364 int err;
365
366 mutex_init(&cdma->lock);
367 sema_init(&cdma->sem, 0);
368
369 INIT_LIST_HEAD(&cdma->sync_queue);
370
371 cdma->event = CDMA_EVENT_NONE;
372 cdma->running = false;
373 cdma->torndown = false;
374
375 err = host1x_pushbuffer_init(&cdma->push_buffer);
376 if (err)
377 return err;
378 return 0;
379}
380
381/*
382 * Destroy a cdma
383 */
384int host1x_cdma_deinit(struct host1x_cdma *cdma)
385{
386 struct push_buffer *pb = &cdma->push_buffer;
387 struct host1x *host1x = cdma_to_host1x(cdma);
388
389 if (cdma->running) {
390 pr_warn("%s: CDMA still running\n", __func__);
391 return -EBUSY;
392 }
393
394 host1x_pushbuffer_destroy(pb);
395 host1x_hw_cdma_timeout_destroy(host1x, cdma);
396
397 return 0;
398}
399
400/*
401 * Begin a cdma submit
402 */
403int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
404{
405 struct host1x *host1x = cdma_to_host1x(cdma);
406
407 mutex_lock(&cdma->lock);
408
409 if (job->timeout) {
410 /* init state on first submit with timeout value */
411 if (!cdma->timeout.initialized) {
412 int err;
413 err = host1x_hw_cdma_timeout_init(host1x, cdma,
414 job->syncpt_id);
415 if (err) {
416 mutex_unlock(&cdma->lock);
417 return err;
418 }
419 }
420 }
421 if (!cdma->running)
422 host1x_hw_cdma_start(host1x, cdma);
423
424 cdma->slots_free = 0;
425 cdma->slots_used = 0;
426 cdma->first_get = cdma->push_buffer.pos;
427
428 trace_host1x_cdma_begin(dev_name(job->channel->dev));
429 return 0;
430}
431
432/*
433 * Push two words into a push buffer slot
434 * Blocks as necessary if the push buffer is full.
435 */
436void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
437{
438 struct host1x *host1x = cdma_to_host1x(cdma);
439 struct push_buffer *pb = &cdma->push_buffer;
440 u32 slots_free = cdma->slots_free;
441
442 if (host1x_debug_trace_cmdbuf)
443 trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
444 op1, op2);
445
446 if (slots_free == 0) {
447 host1x_hw_cdma_flush(host1x, cdma);
448 slots_free = host1x_cdma_wait_locked(cdma,
449 CDMA_EVENT_PUSH_BUFFER_SPACE);
450 }
451 cdma->slots_free = slots_free - 1;
452 cdma->slots_used++;
453 host1x_pushbuffer_push(pb, op1, op2);
454}
455
456/*
457 * End a cdma submit
458 * Kick off DMA, add job to the sync queue, and a number of slots to be freed
459 * from the pushbuffer. The handles for a submit must all be pinned at the same
460 * time, but they can be unpinned in smaller chunks.
461 */
462void host1x_cdma_end(struct host1x_cdma *cdma,
463 struct host1x_job *job)
464{
465 struct host1x *host1x = cdma_to_host1x(cdma);
466 bool idle = list_empty(&cdma->sync_queue);
467
468 host1x_hw_cdma_flush(host1x, cdma);
469
470 job->first_get = cdma->first_get;
471 job->num_slots = cdma->slots_used;
472 host1x_job_get(job);
473 list_add_tail(&job->list, &cdma->sync_queue);
474
475 /* start timer on idle -> active transitions */
476 if (job->timeout && idle)
477 cdma_start_timer_locked(cdma, job);
478
479 trace_host1x_cdma_end(dev_name(job->channel->dev));
480 mutex_unlock(&cdma->lock);
481}
482
483/*
484 * Update cdma state according to current sync point values
485 */
486void host1x_cdma_update(struct host1x_cdma *cdma)
487{
488 mutex_lock(&cdma->lock);
489 update_cdma_locked(cdma);
490 mutex_unlock(&cdma->lock);
491}
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
new file mode 100644
index 000000000000..313c4b784348
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.h
@@ -0,0 +1,100 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_CDMA_H
20#define __HOST1X_CDMA_H
21
22#include <linux/sched.h>
23#include <linux/semaphore.h>
24#include <linux/list.h>
25
26struct host1x_syncpt;
27struct host1x_userctx_timeout;
28struct host1x_job;
29
30/*
31 * cdma
32 *
33 * This is in charge of a host command DMA channel.
34 * Sends ops to a push buffer, and takes responsibility for unpinning
35 * (& possibly freeing) of memory after those ops have completed.
36 * Producer:
37 * begin
38 * push - send ops to the push buffer
39 * end - start command DMA and enqueue handles to be unpinned
40 * Consumer:
41 * update - call to update sync queue and push buffer, unpin memory
42 */
43
44struct push_buffer {
45 u32 *mapped; /* mapped pushbuffer memory */
46 dma_addr_t phys; /* physical address of pushbuffer */
47 u32 fence; /* index we've written */
48 u32 pos; /* index to write to */
49 u32 size_bytes;
50};
51
52struct buffer_timeout {
53 struct delayed_work wq; /* work queue */
54 bool initialized; /* timer one-time setup flag */
55 struct host1x_syncpt *syncpt; /* buffer completion syncpt */
56 u32 syncpt_val; /* syncpt value when completed */
57 ktime_t start_ktime; /* starting time */
58 /* context timeout information */
59 int client;
60};
61
62enum cdma_event {
63 CDMA_EVENT_NONE, /* not waiting for any event */
64 CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
65 CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
66};
67
68struct host1x_cdma {
69 struct mutex lock; /* controls access to shared state */
70 struct semaphore sem; /* signalled when event occurs */
71 enum cdma_event event; /* event that sem is waiting for */
72 unsigned int slots_used; /* pb slots used in current submit */
73 unsigned int slots_free; /* pb slots free in current submit */
74 unsigned int first_get; /* DMAGET value, where submit begins */
75 unsigned int last_pos; /* last value written to DMAPUT */
76 struct push_buffer push_buffer; /* channel's push buffer */
77 struct list_head sync_queue; /* job queue */
78 struct buffer_timeout timeout; /* channel's timeout state/wq */
79 bool running;
80 bool torndown;
81};
82
83#define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
84#define cdma_to_host1x(cdma) dev_get_drvdata(cdma_to_channel(cdma)->dev->parent)
85#define pb_to_cdma(pb) container_of(pb, struct host1x_cdma, push_buffer)
86
87int host1x_cdma_init(struct host1x_cdma *cdma);
88int host1x_cdma_deinit(struct host1x_cdma *cdma);
89void host1x_cdma_stop(struct host1x_cdma *cdma);
90int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
91void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
92void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
93void host1x_cdma_update(struct host1x_cdma *cdma);
94void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
95 u32 *out);
96unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
97 enum cdma_event event);
98void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
99 struct device *dev);
100#endif
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
new file mode 100644
index 000000000000..83ea51b9f0fc
--- /dev/null
+++ b/drivers/gpu/host1x/channel.c
@@ -0,0 +1,126 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <linux/module.h>
21
22#include "channel.h"
23#include "dev.h"
24#include "job.h"
25
26/* Constructor for the host1x device list */
27int host1x_channel_list_init(struct host1x *host)
28{
29 INIT_LIST_HEAD(&host->chlist.list);
30 mutex_init(&host->chlist_mutex);
31
32 if (host->info->nb_channels > BITS_PER_LONG) {
33 WARN(1, "host1x hardware has more channels than supported by the driver\n");
34 return -ENOSYS;
35 }
36
37 return 0;
38}
39
40int host1x_job_submit(struct host1x_job *job)
41{
42 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
43
44 return host1x_hw_channel_submit(host, job);
45}
46
47struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
48{
49 int err = 0;
50
51 mutex_lock(&channel->reflock);
52
53 if (channel->refcount == 0)
54 err = host1x_cdma_init(&channel->cdma);
55
56 if (!err)
57 channel->refcount++;
58
59 mutex_unlock(&channel->reflock);
60
61 return err ? NULL : channel;
62}
63
64void host1x_channel_put(struct host1x_channel *channel)
65{
66 mutex_lock(&channel->reflock);
67
68 if (channel->refcount == 1) {
69 struct host1x *host = dev_get_drvdata(channel->dev->parent);
70
71 host1x_hw_cdma_stop(host, &channel->cdma);
72 host1x_cdma_deinit(&channel->cdma);
73 }
74
75 channel->refcount--;
76
77 mutex_unlock(&channel->reflock);
78}
79
80struct host1x_channel *host1x_channel_request(struct device *dev)
81{
82 struct host1x *host = dev_get_drvdata(dev->parent);
83 int max_channels = host->info->nb_channels;
84 struct host1x_channel *channel = NULL;
85 int index, err;
86
87 mutex_lock(&host->chlist_mutex);
88
89 index = find_first_zero_bit(&host->allocated_channels, max_channels);
90 if (index >= max_channels)
91 goto fail;
92
93 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
94 if (!channel)
95 goto fail;
96
97 err = host1x_hw_channel_init(host, channel, index);
98 if (err < 0)
99 goto fail;
100
101 /* Link device to host1x_channel */
102 channel->dev = dev;
103
104 /* Add to channel list */
105 list_add_tail(&channel->list, &host->chlist.list);
106
107 host->allocated_channels |= BIT(index);
108
109 mutex_unlock(&host->chlist_mutex);
110 return channel;
111
112fail:
113 dev_err(dev, "failed to init channel\n");
114 kfree(channel);
115 mutex_unlock(&host->chlist_mutex);
116 return NULL;
117}
118
119void host1x_channel_free(struct host1x_channel *channel)
120{
121 struct host1x *host = dev_get_drvdata(channel->dev->parent);
122
123 host->allocated_channels &= ~BIT(channel->id);
124 list_del(&channel->list);
125 kfree(channel);
126}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
new file mode 100644
index 000000000000..48723b8eea42
--- /dev/null
+++ b/drivers/gpu/host1x/channel.h
@@ -0,0 +1,52 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_CHANNEL_H
20#define __HOST1X_CHANNEL_H
21
22#include <linux/io.h>
23
24#include "cdma.h"
25
26struct host1x;
27
28struct host1x_channel {
29 struct list_head list;
30
31 unsigned int refcount;
32 unsigned int id;
33 struct mutex reflock;
34 struct mutex submitlock;
35 void __iomem *regs;
36 struct device *dev;
37 struct host1x_cdma cdma;
38};
39
40/* channel list operations */
41int host1x_channel_list_init(struct host1x *host);
42
43struct host1x_channel *host1x_channel_request(struct device *dev);
44void host1x_channel_free(struct host1x_channel *channel);
45struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
46void host1x_channel_put(struct host1x_channel *channel);
47int host1x_job_submit(struct host1x_job *job);
48
49#define host1x_for_each_channel(host, channel) \
50 list_for_each_entry(channel, &host->chlist.list, list)
51
52#endif
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
new file mode 100644
index 000000000000..3ec7d77de24d
--- /dev/null
+++ b/drivers/gpu/host1x/debug.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Erik Gilling <konkers@android.com>
4 *
5 * Copyright (C) 2011-2013 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/uaccess.h>
21
22#include <linux/io.h>
23
24#include "dev.h"
25#include "debug.h"
26#include "channel.h"
27
28unsigned int host1x_debug_trace_cmdbuf;
29
30static pid_t host1x_debug_force_timeout_pid;
31static u32 host1x_debug_force_timeout_val;
32static u32 host1x_debug_force_timeout_channel;
33
34void host1x_debug_output(struct output *o, const char *fmt, ...)
35{
36 va_list args;
37 int len;
38
39 va_start(args, fmt);
40 len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
41 va_end(args);
42 o->fn(o->ctx, o->buf, len);
43}
44
45static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
46{
47 struct host1x *m = dev_get_drvdata(ch->dev->parent);
48 struct output *o = data;
49
50 mutex_lock(&ch->reflock);
51 if (ch->refcount) {
52 mutex_lock(&ch->cdma.lock);
53 if (show_fifo)
54 host1x_hw_show_channel_fifo(m, ch, o);
55 host1x_hw_show_channel_cdma(m, ch, o);
56 mutex_unlock(&ch->cdma.lock);
57 }
58 mutex_unlock(&ch->reflock);
59
60 return 0;
61}
62
63static void show_syncpts(struct host1x *m, struct output *o)
64{
65 int i;
66 host1x_debug_output(o, "---- syncpts ----\n");
67 for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
68 u32 max = host1x_syncpt_read_max(m->syncpt + i);
69 u32 min = host1x_syncpt_load(m->syncpt + i);
70 if (!min && !max)
71 continue;
72 host1x_debug_output(o, "id %d (%s) min %d max %d\n",
73 i, m->syncpt[i].name, min, max);
74 }
75
76 for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
77 u32 base_val;
78 base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
79 if (base_val)
80 host1x_debug_output(o, "waitbase id %d val %d\n", i,
81 base_val);
82 }
83
84 host1x_debug_output(o, "\n");
85}
86
87static void show_all(struct host1x *m, struct output *o)
88{
89 struct host1x_channel *ch;
90
91 host1x_hw_show_mlocks(m, o);
92 show_syncpts(m, o);
93 host1x_debug_output(o, "---- channels ----\n");
94
95 host1x_for_each_channel(m, ch)
96 show_channels(ch, o, true);
97}
98
99#ifdef CONFIG_DEBUG_FS
100static void show_all_no_fifo(struct host1x *host1x, struct output *o)
101{
102 struct host1x_channel *ch;
103
104 host1x_hw_show_mlocks(host1x, o);
105 show_syncpts(host1x, o);
106 host1x_debug_output(o, "---- channels ----\n");
107
108 host1x_for_each_channel(host1x, ch)
109 show_channels(ch, o, false);
110}
111
112static int host1x_debug_show_all(struct seq_file *s, void *unused)
113{
114 struct output o = {
115 .fn = write_to_seqfile,
116 .ctx = s
117 };
118 show_all(s->private, &o);
119 return 0;
120}
121
122static int host1x_debug_show(struct seq_file *s, void *unused)
123{
124 struct output o = {
125 .fn = write_to_seqfile,
126 .ctx = s
127 };
128 show_all_no_fifo(s->private, &o);
129 return 0;
130}
131
132static int host1x_debug_open_all(struct inode *inode, struct file *file)
133{
134 return single_open(file, host1x_debug_show_all, inode->i_private);
135}
136
137static const struct file_operations host1x_debug_all_fops = {
138 .open = host1x_debug_open_all,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = single_release,
142};
143
144static int host1x_debug_open(struct inode *inode, struct file *file)
145{
146 return single_open(file, host1x_debug_show, inode->i_private);
147}
148
149static const struct file_operations host1x_debug_fops = {
150 .open = host1x_debug_open,
151 .read = seq_read,
152 .llseek = seq_lseek,
153 .release = single_release,
154};
155
156void host1x_debug_init(struct host1x *host1x)
157{
158 struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
159
160 if (!de)
161 return;
162
163 /* Store the created entry */
164 host1x->debugfs = de;
165
166 debugfs_create_file("status", S_IRUGO, de, host1x, &host1x_debug_fops);
167 debugfs_create_file("status_all", S_IRUGO, de, host1x,
168 &host1x_debug_all_fops);
169
170 debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
171 &host1x_debug_trace_cmdbuf);
172
173 host1x_hw_debug_init(host1x, de);
174
175 debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
176 &host1x_debug_force_timeout_pid);
177 debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
178 &host1x_debug_force_timeout_val);
179 debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
180 &host1x_debug_force_timeout_channel);
181}
182
183void host1x_debug_deinit(struct host1x *host1x)
184{
185 debugfs_remove_recursive(host1x->debugfs);
186}
187#else
188void host1x_debug_init(struct host1x *host1x)
189{
190}
191void host1x_debug_deinit(struct host1x *host1x)
192{
193}
194#endif
195
196void host1x_debug_dump(struct host1x *host1x)
197{
198 struct output o = {
199 .fn = write_to_printk
200 };
201 show_all(host1x, &o);
202}
203
204void host1x_debug_dump_syncpts(struct host1x *host1x)
205{
206 struct output o = {
207 .fn = write_to_printk
208 };
209 show_syncpts(host1x, &o);
210}
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
new file mode 100644
index 000000000000..4595b2e0799f
--- /dev/null
+++ b/drivers/gpu/host1x/debug.h
@@ -0,0 +1,51 @@
1/*
2 * Tegra host1x Debug
3 *
4 * Copyright (c) 2011-2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __HOST1X_DEBUG_H
19#define __HOST1X_DEBUG_H
20
21#include <linux/debugfs.h>
22#include <linux/seq_file.h>
23
24struct host1x;
25
26struct output {
27 void (*fn)(void *ctx, const char *str, size_t len);
28 void *ctx;
29 char buf[256];
30};
31
32static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
33{
34 seq_write((struct seq_file *)ctx, str, len);
35}
36
37static inline void write_to_printk(void *ctx, const char *str, size_t len)
38{
39 pr_info("%s", str);
40}
41
42void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
43
44extern unsigned int host1x_debug_trace_cmdbuf;
45
46void host1x_debug_init(struct host1x *host1x);
47void host1x_debug_deinit(struct host1x *host1x);
48void host1x_debug_dump(struct host1x *host1x);
49void host1x_debug_dump_syncpts(struct host1x *host1x);
50
51#endif
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
new file mode 100644
index 000000000000..28e28a23d444
--- /dev/null
+++ b/drivers/gpu/host1x/dev.c
@@ -0,0 +1,246 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/list.h>
21#include <linux/slab.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26
27#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h>
29
30#include "dev.h"
31#include "intr.h"
32#include "channel.h"
33#include "debug.h"
34#include "hw/host1x01.h"
35#include "host1x_client.h"
36
37void host1x_set_drm_data(struct device *dev, void *data)
38{
39 struct host1x *host1x = dev_get_drvdata(dev);
40 host1x->drm_data = data;
41}
42
43void *host1x_get_drm_data(struct device *dev)
44{
45 struct host1x *host1x = dev_get_drvdata(dev);
46 return host1x->drm_data;
47}
48
49void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
50{
51 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
52
53 writel(v, sync_regs + r);
54}
55
56u32 host1x_sync_readl(struct host1x *host1x, u32 r)
57{
58 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
59
60 return readl(sync_regs + r);
61}
62
63void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
64{
65 writel(v, ch->regs + r);
66}
67
68u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
69{
70 return readl(ch->regs + r);
71}
72
73static const struct host1x_info host1x01_info = {
74 .nb_channels = 8,
75 .nb_pts = 32,
76 .nb_mlocks = 16,
77 .nb_bases = 8,
78 .init = host1x01_init,
79 .sync_offset = 0x3000,
80};
81
82static struct of_device_id host1x_of_match[] = {
83 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
84 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
85 { },
86};
87MODULE_DEVICE_TABLE(of, host1x_of_match);
88
89static int host1x_probe(struct platform_device *pdev)
90{
91 const struct of_device_id *id;
92 struct host1x *host;
93 struct resource *regs;
94 int syncpt_irq;
95 int err;
96
97 id = of_match_device(host1x_of_match, &pdev->dev);
98 if (!id)
99 return -EINVAL;
100
101 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
102 if (!regs) {
103 dev_err(&pdev->dev, "failed to get registers\n");
104 return -ENXIO;
105 }
106
107 syncpt_irq = platform_get_irq(pdev, 0);
108 if (syncpt_irq < 0) {
109 dev_err(&pdev->dev, "failed to get IRQ\n");
110 return -ENXIO;
111 }
112
113 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
114 if (!host)
115 return -ENOMEM;
116
117 host->dev = &pdev->dev;
118 host->info = id->data;
119
120 /* set common host1x device data */
121 platform_set_drvdata(pdev, host);
122
123 host->regs = devm_ioremap_resource(&pdev->dev, regs);
124 if (IS_ERR(host->regs))
125 return PTR_ERR(host->regs);
126
127 if (host->info->init) {
128 err = host->info->init(host);
129 if (err)
130 return err;
131 }
132
133 host->clk = devm_clk_get(&pdev->dev, NULL);
134 if (IS_ERR(host->clk)) {
135 dev_err(&pdev->dev, "failed to get clock\n");
136 err = PTR_ERR(host->clk);
137 return err;
138 }
139
140 err = host1x_channel_list_init(host);
141 if (err) {
142 dev_err(&pdev->dev, "failed to initialize channel list\n");
143 return err;
144 }
145
146 err = clk_prepare_enable(host->clk);
147 if (err < 0) {
148 dev_err(&pdev->dev, "failed to enable clock\n");
149 return err;
150 }
151
152 err = host1x_syncpt_init(host);
153 if (err) {
154 dev_err(&pdev->dev, "failed to initialize syncpts\n");
155 return err;
156 }
157
158 err = host1x_intr_init(host, syncpt_irq);
159 if (err) {
160 dev_err(&pdev->dev, "failed to initialize interrupts\n");
161 goto fail_deinit_syncpt;
162 }
163
164 host1x_debug_init(host);
165
166 host1x_drm_alloc(pdev);
167
168 return 0;
169
170fail_deinit_syncpt:
171 host1x_syncpt_deinit(host);
172 return err;
173}
174
175static int __exit host1x_remove(struct platform_device *pdev)
176{
177 struct host1x *host = platform_get_drvdata(pdev);
178
179 host1x_intr_deinit(host);
180 host1x_syncpt_deinit(host);
181 clk_disable_unprepare(host->clk);
182
183 return 0;
184}
185
186static struct platform_driver tegra_host1x_driver = {
187 .probe = host1x_probe,
188 .remove = __exit_p(host1x_remove),
189 .driver = {
190 .owner = THIS_MODULE,
191 .name = "tegra-host1x",
192 .of_match_table = host1x_of_match,
193 },
194};
195
196static int __init tegra_host1x_init(void)
197{
198 int err;
199
200 err = platform_driver_register(&tegra_host1x_driver);
201 if (err < 0)
202 return err;
203
204#ifdef CONFIG_DRM_TEGRA
205 err = platform_driver_register(&tegra_dc_driver);
206 if (err < 0)
207 goto unregister_host1x;
208
209 err = platform_driver_register(&tegra_hdmi_driver);
210 if (err < 0)
211 goto unregister_dc;
212
213 err = platform_driver_register(&tegra_gr2d_driver);
214 if (err < 0)
215 goto unregister_hdmi;
216#endif
217
218 return 0;
219
220#ifdef CONFIG_DRM_TEGRA
221unregister_hdmi:
222 platform_driver_unregister(&tegra_hdmi_driver);
223unregister_dc:
224 platform_driver_unregister(&tegra_dc_driver);
225unregister_host1x:
226 platform_driver_unregister(&tegra_host1x_driver);
227 return err;
228#endif
229}
230module_init(tegra_host1x_init);
231
232static void __exit tegra_host1x_exit(void)
233{
234#ifdef CONFIG_DRM_TEGRA
235 platform_driver_unregister(&tegra_gr2d_driver);
236 platform_driver_unregister(&tegra_hdmi_driver);
237 platform_driver_unregister(&tegra_dc_driver);
238#endif
239 platform_driver_unregister(&tegra_host1x_driver);
240}
241module_exit(tegra_host1x_exit);
242
243MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
244MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
245MODULE_DESCRIPTION("Host1x driver for Tegra products");
246MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
new file mode 100644
index 000000000000..a1607d6e135b
--- /dev/null
+++ b/drivers/gpu/host1x/dev.h
@@ -0,0 +1,308 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef HOST1X_DEV_H
18#define HOST1X_DEV_H
19
20#include <linux/platform_device.h>
21#include <linux/device.h>
22
23#include "channel.h"
24#include "syncpt.h"
25#include "intr.h"
26#include "cdma.h"
27#include "job.h"
28
29struct host1x_syncpt;
30struct host1x_channel;
31struct host1x_cdma;
32struct host1x_job;
33struct push_buffer;
34struct output;
35struct dentry;
36
37struct host1x_channel_ops {
38 int (*init)(struct host1x_channel *channel, struct host1x *host,
39 unsigned int id);
40 int (*submit)(struct host1x_job *job);
41};
42
43struct host1x_cdma_ops {
44 void (*start)(struct host1x_cdma *cdma);
45 void (*stop)(struct host1x_cdma *cdma);
46 void (*flush)(struct host1x_cdma *cdma);
47 int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
48 void (*timeout_destroy)(struct host1x_cdma *cdma);
49 void (*freeze)(struct host1x_cdma *cdma);
50 void (*resume)(struct host1x_cdma *cdma, u32 getptr);
51 void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
52 u32 syncpt_incrs, u32 syncval, u32 nr_slots);
53};
54
55struct host1x_pushbuffer_ops {
56 void (*init)(struct push_buffer *pb);
57};
58
59struct host1x_debug_ops {
60 void (*debug_init)(struct dentry *de);
61 void (*show_channel_cdma)(struct host1x *host,
62 struct host1x_channel *ch,
63 struct output *o);
64 void (*show_channel_fifo)(struct host1x *host,
65 struct host1x_channel *ch,
66 struct output *o);
67 void (*show_mlocks)(struct host1x *host, struct output *output);
68
69};
70
71struct host1x_syncpt_ops {
72 void (*restore)(struct host1x_syncpt *syncpt);
73 void (*restore_wait_base)(struct host1x_syncpt *syncpt);
74 void (*load_wait_base)(struct host1x_syncpt *syncpt);
75 u32 (*load)(struct host1x_syncpt *syncpt);
76 void (*cpu_incr)(struct host1x_syncpt *syncpt);
77 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
78};
79
80struct host1x_intr_ops {
81 int (*init_host_sync)(struct host1x *host, u32 cpm,
82 void (*syncpt_thresh_work)(struct work_struct *work));
83 void (*set_syncpt_threshold)(
84 struct host1x *host, u32 id, u32 thresh);
85 void (*enable_syncpt_intr)(struct host1x *host, u32 id);
86 void (*disable_syncpt_intr)(struct host1x *host, u32 id);
87 void (*disable_all_syncpt_intrs)(struct host1x *host);
88 int (*free_syncpt_irq)(struct host1x *host);
89};
90
91struct host1x_info {
92 int nb_channels; /* host1x: num channels supported */
93 int nb_pts; /* host1x: num syncpoints supported */
94 int nb_bases; /* host1x: num syncpoints supported */
95 int nb_mlocks; /* host1x: number of mlocks */
96 int (*init)(struct host1x *); /* initialize per SoC ops */
97 int sync_offset;
98};
99
100struct host1x {
101 const struct host1x_info *info;
102
103 void __iomem *regs;
104 struct host1x_syncpt *syncpt;
105 struct device *dev;
106 struct clk *clk;
107
108 struct mutex intr_mutex;
109 struct workqueue_struct *intr_wq;
110 int intr_syncpt_irq;
111
112 const struct host1x_syncpt_ops *syncpt_op;
113 const struct host1x_intr_ops *intr_op;
114 const struct host1x_channel_ops *channel_op;
115 const struct host1x_cdma_ops *cdma_op;
116 const struct host1x_pushbuffer_ops *cdma_pb_op;
117 const struct host1x_debug_ops *debug_op;
118
119 struct host1x_syncpt *nop_sp;
120
121 struct mutex chlist_mutex;
122 struct host1x_channel chlist;
123 unsigned long allocated_channels;
124 unsigned int num_allocated_channels;
125
126 struct dentry *debugfs;
127
128 void *drm_data;
129};
130
131void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
132u32 host1x_sync_readl(struct host1x *host1x, u32 r);
133void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
134u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
135
136static inline void host1x_hw_syncpt_restore(struct host1x *host,
137 struct host1x_syncpt *sp)
138{
139 host->syncpt_op->restore(sp);
140}
141
142static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
143 struct host1x_syncpt *sp)
144{
145 host->syncpt_op->restore_wait_base(sp);
146}
147
148static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
149 struct host1x_syncpt *sp)
150{
151 host->syncpt_op->load_wait_base(sp);
152}
153
154static inline u32 host1x_hw_syncpt_load(struct host1x *host,
155 struct host1x_syncpt *sp)
156{
157 return host->syncpt_op->load(sp);
158}
159
160static inline void host1x_hw_syncpt_cpu_incr(struct host1x *host,
161 struct host1x_syncpt *sp)
162{
163 host->syncpt_op->cpu_incr(sp);
164}
165
166static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
167 struct host1x_syncpt *sp,
168 void *patch_addr)
169{
170 return host->syncpt_op->patch_wait(sp, patch_addr);
171}
172
173static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
174 void (*syncpt_thresh_work)(struct work_struct *))
175{
176 return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
177}
178
179static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
180 u32 id, u32 thresh)
181{
182 host->intr_op->set_syncpt_threshold(host, id, thresh);
183}
184
185static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
186 u32 id)
187{
188 host->intr_op->enable_syncpt_intr(host, id);
189}
190
191static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
192 u32 id)
193{
194 host->intr_op->disable_syncpt_intr(host, id);
195}
196
197static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
198{
199 host->intr_op->disable_all_syncpt_intrs(host);
200}
201
202static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
203{
204 return host->intr_op->free_syncpt_irq(host);
205}
206
207static inline int host1x_hw_channel_init(struct host1x *host,
208 struct host1x_channel *channel,
209 int chid)
210{
211 return host->channel_op->init(channel, host, chid);
212}
213
214static inline int host1x_hw_channel_submit(struct host1x *host,
215 struct host1x_job *job)
216{
217 return host->channel_op->submit(job);
218}
219
220static inline void host1x_hw_cdma_start(struct host1x *host,
221 struct host1x_cdma *cdma)
222{
223 host->cdma_op->start(cdma);
224}
225
226static inline void host1x_hw_cdma_stop(struct host1x *host,
227 struct host1x_cdma *cdma)
228{
229 host->cdma_op->stop(cdma);
230}
231
232static inline void host1x_hw_cdma_flush(struct host1x *host,
233 struct host1x_cdma *cdma)
234{
235 host->cdma_op->flush(cdma);
236}
237
238static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
239 struct host1x_cdma *cdma,
240 u32 syncpt_id)
241{
242 return host->cdma_op->timeout_init(cdma, syncpt_id);
243}
244
245static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
246 struct host1x_cdma *cdma)
247{
248 host->cdma_op->timeout_destroy(cdma);
249}
250
251static inline void host1x_hw_cdma_freeze(struct host1x *host,
252 struct host1x_cdma *cdma)
253{
254 host->cdma_op->freeze(cdma);
255}
256
257static inline void host1x_hw_cdma_resume(struct host1x *host,
258 struct host1x_cdma *cdma, u32 getptr)
259{
260 host->cdma_op->resume(cdma, getptr);
261}
262
263static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
264 struct host1x_cdma *cdma,
265 u32 getptr,
266 u32 syncpt_incrs,
267 u32 syncval, u32 nr_slots)
268{
269 host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
270 nr_slots);
271}
272
273static inline void host1x_hw_pushbuffer_init(struct host1x *host,
274 struct push_buffer *pb)
275{
276 host->cdma_pb_op->init(pb);
277}
278
279static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
280{
281 if (host->debug_op && host->debug_op->debug_init)
282 host->debug_op->debug_init(de);
283}
284
285static inline void host1x_hw_show_channel_cdma(struct host1x *host,
286 struct host1x_channel *channel,
287 struct output *o)
288{
289 host->debug_op->show_channel_cdma(host, channel, o);
290}
291
292static inline void host1x_hw_show_channel_fifo(struct host1x *host,
293 struct host1x_channel *channel,
294 struct output *o)
295{
296 host->debug_op->show_channel_fifo(host, channel, o);
297}
298
299static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
300{
301 host->debug_op->show_mlocks(host, o);
302}
303
304extern struct platform_driver tegra_hdmi_driver;
305extern struct platform_driver tegra_dc_driver;
306extern struct platform_driver tegra_gr2d_driver;
307
308#endif
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/host1x/drm/Kconfig
index be1daf7344d3..69853a4de40a 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/host1x/drm/Kconfig
@@ -1,12 +1,10 @@
1config DRM_TEGRA 1config DRM_TEGRA
2 tristate "NVIDIA Tegra DRM" 2 bool "NVIDIA Tegra DRM"
3 depends on DRM && OF && ARCH_TEGRA 3 depends on DRM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 5 select FB_SYS_FILLRECT
6 select DRM_KMS_CMA_HELPER 6 select FB_SYS_COPYAREA
7 select FB_CFB_FILLRECT 7 select FB_SYS_IMAGEBLIT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 help 8 help
11 Choose this option if you have an NVIDIA Tegra SoC. 9 Choose this option if you have an NVIDIA Tegra SoC.
12 10
@@ -15,6 +13,14 @@ config DRM_TEGRA
15 13
16if DRM_TEGRA 14if DRM_TEGRA
17 15
16config DRM_TEGRA_STAGING
17 bool "Enable HOST1X interface"
18 depends on STAGING
19 help
20 Say yes if HOST1X should be available for userspace DRM users.
21
22 If unsure, choose N.
23
18config DRM_TEGRA_DEBUG 24config DRM_TEGRA_DEBUG
19 bool "NVIDIA Tegra DRM debug support" 25 bool "NVIDIA Tegra DRM debug support"
20 help 26 help
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/host1x/drm/dc.c
index de94707b9dbe..1e2060324f02 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -14,8 +14,10 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk/tegra.h> 15#include <linux/clk/tegra.h>
16 16
17#include "drm.h" 17#include "host1x_client.h"
18#include "dc.h" 18#include "dc.h"
19#include "drm.h"
20#include "gem.h"
19 21
20struct tegra_plane { 22struct tegra_plane {
21 struct drm_plane base; 23 struct drm_plane base;
@@ -51,9 +53,9 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
51 window.bits_per_pixel = fb->bits_per_pixel; 53 window.bits_per_pixel = fb->bits_per_pixel;
52 54
53 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 55 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
54 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i); 56 struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
55 57
56 window.base[i] = gem->paddr + fb->offsets[i]; 58 window.base[i] = bo->paddr + fb->offsets[i];
57 59
58 /* 60 /*
59 * Tegra doesn't support different strides for U and V planes 61 * Tegra doesn't support different strides for U and V planes
@@ -103,7 +105,9 @@ static const struct drm_plane_funcs tegra_plane_funcs = {
103}; 105};
104 106
105static const uint32_t plane_formats[] = { 107static const uint32_t plane_formats[] = {
108 DRM_FORMAT_XBGR8888,
106 DRM_FORMAT_XRGB8888, 109 DRM_FORMAT_XRGB8888,
110 DRM_FORMAT_RGB565,
107 DRM_FORMAT_UYVY, 111 DRM_FORMAT_UYVY,
108 DRM_FORMAT_YUV420, 112 DRM_FORMAT_YUV420,
109 DRM_FORMAT_YUV422, 113 DRM_FORMAT_YUV422,
@@ -136,7 +140,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
136static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, 140static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
137 struct drm_framebuffer *fb) 141 struct drm_framebuffer *fb)
138{ 142{
139 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0); 143 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
140 unsigned long value; 144 unsigned long value;
141 145
142 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 146 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -144,7 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
144 value = fb->offsets[0] + y * fb->pitches[0] + 148 value = fb->offsets[0] + y * fb->pitches[0] +
145 x * fb->bits_per_pixel / 8; 149 x * fb->bits_per_pixel / 8;
146 150
147 tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR); 151 tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
148 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); 152 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
149 153
150 value = GENERAL_UPDATE | WIN_A_UPDATE; 154 value = GENERAL_UPDATE | WIN_A_UPDATE;
@@ -186,20 +190,20 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
186{ 190{
187 struct drm_device *drm = dc->base.dev; 191 struct drm_device *drm = dc->base.dev;
188 struct drm_crtc *crtc = &dc->base; 192 struct drm_crtc *crtc = &dc->base;
189 struct drm_gem_cma_object *gem;
190 unsigned long flags, base; 193 unsigned long flags, base;
194 struct tegra_bo *bo;
191 195
192 if (!dc->event) 196 if (!dc->event)
193 return; 197 return;
194 198
195 gem = drm_fb_cma_get_gem_obj(crtc->fb, 0); 199 bo = tegra_fb_get_plane(crtc->fb, 0);
196 200
197 /* check if new start address has been latched */ 201 /* check if new start address has been latched */
198 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); 202 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
199 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); 203 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
200 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); 204 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
201 205
202 if (base == gem->paddr + crtc->fb->offsets[0]) { 206 if (base == bo->paddr + crtc->fb->offsets[0]) {
203 spin_lock_irqsave(&drm->event_lock, flags); 207 spin_lock_irqsave(&drm->event_lock, flags);
204 drm_send_vblank_event(drm, dc->pipe, dc->event); 208 drm_send_vblank_event(drm, dc->pipe, dc->event);
205 drm_vblank_put(drm, dc->pipe); 209 drm_vblank_put(drm, dc->pipe);
@@ -541,6 +545,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
541unsigned int tegra_dc_format(uint32_t format) 545unsigned int tegra_dc_format(uint32_t format)
542{ 546{
543 switch (format) { 547 switch (format) {
548 case DRM_FORMAT_XBGR8888:
549 return WIN_COLOR_DEPTH_R8G8B8A8;
550
544 case DRM_FORMAT_XRGB8888: 551 case DRM_FORMAT_XRGB8888:
545 return WIN_COLOR_DEPTH_B8G8R8A8; 552 return WIN_COLOR_DEPTH_B8G8R8A8;
546 553
@@ -569,7 +576,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
569 struct drm_display_mode *adjusted, 576 struct drm_display_mode *adjusted,
570 int x, int y, struct drm_framebuffer *old_fb) 577 int x, int y, struct drm_framebuffer *old_fb)
571{ 578{
572 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0); 579 struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0);
573 struct tegra_dc *dc = to_tegra_dc(crtc); 580 struct tegra_dc *dc = to_tegra_dc(crtc);
574 struct tegra_dc_window window; 581 struct tegra_dc_window window;
575 unsigned long div, value; 582 unsigned long div, value;
@@ -616,7 +623,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
616 window.format = tegra_dc_format(crtc->fb->pixel_format); 623 window.format = tegra_dc_format(crtc->fb->pixel_format);
617 window.bits_per_pixel = crtc->fb->bits_per_pixel; 624 window.bits_per_pixel = crtc->fb->bits_per_pixel;
618 window.stride[0] = crtc->fb->pitches[0]; 625 window.stride[0] = crtc->fb->pitches[0];
619 window.base[0] = gem->paddr; 626 window.base[0] = bo->paddr;
620 627
621 err = tegra_dc_setup_window(dc, 0, &window); 628 err = tegra_dc_setup_window(dc, 0, &window);
622 if (err < 0) 629 if (err < 0)
@@ -1097,7 +1104,7 @@ static const struct host1x_client_ops dc_client_ops = {
1097 1104
1098static int tegra_dc_probe(struct platform_device *pdev) 1105static int tegra_dc_probe(struct platform_device *pdev)
1099{ 1106{
1100 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1107 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1101 struct resource *regs; 1108 struct resource *regs;
1102 struct tegra_dc *dc; 1109 struct tegra_dc *dc;
1103 int err; 1110 int err;
@@ -1160,7 +1167,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
1160 1167
1161static int tegra_dc_remove(struct platform_device *pdev) 1168static int tegra_dc_remove(struct platform_device *pdev)
1162{ 1169{
1163 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1170 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1164 struct tegra_dc *dc = platform_get_drvdata(pdev); 1171 struct tegra_dc *dc = platform_get_drvdata(pdev);
1165 int err; 1172 int err;
1166 1173
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/host1x/drm/dc.h
index 79eaec9aac77..79eaec9aac77 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/host1x/drm/dc.h
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
new file mode 100644
index 000000000000..2b561c9118c6
--- /dev/null
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -0,0 +1,640 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include <drm/drm.h>
18#include <drm/drmP.h>
19
20#include "host1x_client.h"
21#include "dev.h"
22#include "drm.h"
23#include "gem.h"
24#include "syncpt.h"
25
26#define DRIVER_NAME "tegra"
27#define DRIVER_DESC "NVIDIA Tegra graphics"
28#define DRIVER_DATE "20120330"
29#define DRIVER_MAJOR 0
30#define DRIVER_MINOR 0
31#define DRIVER_PATCHLEVEL 0
32
33struct host1x_drm_client {
34 struct host1x_client *client;
35 struct device_node *np;
36 struct list_head list;
37};
38
39static int host1x_add_drm_client(struct host1x_drm *host1x,
40 struct device_node *np)
41{
42 struct host1x_drm_client *client;
43
44 client = kzalloc(sizeof(*client), GFP_KERNEL);
45 if (!client)
46 return -ENOMEM;
47
48 INIT_LIST_HEAD(&client->list);
49 client->np = of_node_get(np);
50
51 list_add_tail(&client->list, &host1x->drm_clients);
52
53 return 0;
54}
55
56static int host1x_activate_drm_client(struct host1x_drm *host1x,
57 struct host1x_drm_client *drm,
58 struct host1x_client *client)
59{
60 mutex_lock(&host1x->drm_clients_lock);
61 list_del_init(&drm->list);
62 list_add_tail(&drm->list, &host1x->drm_active);
63 drm->client = client;
64 mutex_unlock(&host1x->drm_clients_lock);
65
66 return 0;
67}
68
69static int host1x_remove_drm_client(struct host1x_drm *host1x,
70 struct host1x_drm_client *client)
71{
72 mutex_lock(&host1x->drm_clients_lock);
73 list_del_init(&client->list);
74 mutex_unlock(&host1x->drm_clients_lock);
75
76 of_node_put(client->np);
77 kfree(client);
78
79 return 0;
80}
81
82static int host1x_parse_dt(struct host1x_drm *host1x)
83{
84 static const char * const compat[] = {
85 "nvidia,tegra20-dc",
86 "nvidia,tegra20-hdmi",
87 "nvidia,tegra20-gr2d",
88 "nvidia,tegra30-dc",
89 "nvidia,tegra30-hdmi",
90 "nvidia,tegra30-gr2d",
91 };
92 unsigned int i;
93 int err;
94
95 for (i = 0; i < ARRAY_SIZE(compat); i++) {
96 struct device_node *np;
97
98 for_each_child_of_node(host1x->dev->of_node, np) {
99 if (of_device_is_compatible(np, compat[i]) &&
100 of_device_is_available(np)) {
101 err = host1x_add_drm_client(host1x, np);
102 if (err < 0)
103 return err;
104 }
105 }
106 }
107
108 return 0;
109}
110
111int host1x_drm_alloc(struct platform_device *pdev)
112{
113 struct host1x_drm *host1x;
114 int err;
115
116 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
117 if (!host1x)
118 return -ENOMEM;
119
120 mutex_init(&host1x->drm_clients_lock);
121 INIT_LIST_HEAD(&host1x->drm_clients);
122 INIT_LIST_HEAD(&host1x->drm_active);
123 mutex_init(&host1x->clients_lock);
124 INIT_LIST_HEAD(&host1x->clients);
125 host1x->dev = &pdev->dev;
126
127 err = host1x_parse_dt(host1x);
128 if (err < 0) {
129 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
130 return err;
131 }
132
133 host1x_set_drm_data(&pdev->dev, host1x);
134
135 return 0;
136}
137
138int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
139{
140 struct host1x_client *client;
141
142 mutex_lock(&host1x->clients_lock);
143
144 list_for_each_entry(client, &host1x->clients, list) {
145 if (client->ops && client->ops->drm_init) {
146 int err = client->ops->drm_init(client, drm);
147 if (err < 0) {
148 dev_err(host1x->dev,
149 "DRM setup failed for %s: %d\n",
150 dev_name(client->dev), err);
151 return err;
152 }
153 }
154 }
155
156 mutex_unlock(&host1x->clients_lock);
157
158 return 0;
159}
160
161int host1x_drm_exit(struct host1x_drm *host1x)
162{
163 struct platform_device *pdev = to_platform_device(host1x->dev);
164 struct host1x_client *client;
165
166 if (!host1x->drm)
167 return 0;
168
169 mutex_lock(&host1x->clients_lock);
170
171 list_for_each_entry_reverse(client, &host1x->clients, list) {
172 if (client->ops && client->ops->drm_exit) {
173 int err = client->ops->drm_exit(client);
174 if (err < 0) {
175 dev_err(host1x->dev,
176 "DRM cleanup failed for %s: %d\n",
177 dev_name(client->dev), err);
178 return err;
179 }
180 }
181 }
182
183 mutex_unlock(&host1x->clients_lock);
184
185 drm_platform_exit(&tegra_drm_driver, pdev);
186 host1x->drm = NULL;
187
188 return 0;
189}
190
191int host1x_register_client(struct host1x_drm *host1x,
192 struct host1x_client *client)
193{
194 struct host1x_drm_client *drm, *tmp;
195 int err;
196
197 mutex_lock(&host1x->clients_lock);
198 list_add_tail(&client->list, &host1x->clients);
199 mutex_unlock(&host1x->clients_lock);
200
201 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
202 if (drm->np == client->dev->of_node)
203 host1x_activate_drm_client(host1x, drm, client);
204
205 if (list_empty(&host1x->drm_clients)) {
206 struct platform_device *pdev = to_platform_device(host1x->dev);
207
208 err = drm_platform_init(&tegra_drm_driver, pdev);
209 if (err < 0) {
210 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
211 return err;
212 }
213 }
214
215 return 0;
216}
217
218int host1x_unregister_client(struct host1x_drm *host1x,
219 struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
225 if (drm->client == client) {
226 err = host1x_drm_exit(host1x);
227 if (err < 0) {
228 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
229 err);
230 return err;
231 }
232
233 host1x_remove_drm_client(host1x, drm);
234 break;
235 }
236 }
237
238 mutex_lock(&host1x->clients_lock);
239 list_del_init(&client->list);
240 mutex_unlock(&host1x->clients_lock);
241
242 return 0;
243}
244
245static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
246{
247 struct host1x_drm *host1x;
248 int err;
249
250 host1x = host1x_get_drm_data(drm->dev);
251 drm->dev_private = host1x;
252 host1x->drm = drm;
253
254 drm_mode_config_init(drm);
255
256 err = host1x_drm_init(host1x, drm);
257 if (err < 0)
258 return err;
259
260 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
261 if (err < 0)
262 return err;
263
264 err = tegra_drm_fb_init(drm);
265 if (err < 0)
266 return err;
267
268 drm_kms_helper_poll_init(drm);
269
270 return 0;
271}
272
273static int tegra_drm_unload(struct drm_device *drm)
274{
275 drm_kms_helper_poll_fini(drm);
276 tegra_drm_fb_exit(drm);
277
278 drm_mode_config_cleanup(drm);
279
280 return 0;
281}
282
283static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
284{
285 struct host1x_drm_file *fpriv;
286
287 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
288 if (!fpriv)
289 return -ENOMEM;
290
291 INIT_LIST_HEAD(&fpriv->contexts);
292 filp->driver_priv = fpriv;
293
294 return 0;
295}
296
297static void host1x_drm_context_free(struct host1x_drm_context *context)
298{
299 context->client->ops->close_channel(context);
300 kfree(context);
301}
302
303static void tegra_drm_lastclose(struct drm_device *drm)
304{
305 struct host1x_drm *host1x = drm->dev_private;
306
307 tegra_fbdev_restore_mode(host1x->fbdev);
308}
309
310#ifdef CONFIG_DRM_TEGRA_STAGING
311static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
312 struct host1x_drm_context *context)
313{
314 struct host1x_drm_context *ctx;
315
316 list_for_each_entry(ctx, &file->contexts, list)
317 if (ctx == context)
318 return true;
319
320 return false;
321}
322
323static int tegra_gem_create(struct drm_device *drm, void *data,
324 struct drm_file *file)
325{
326 struct drm_tegra_gem_create *args = data;
327 struct tegra_bo *bo;
328
329 bo = tegra_bo_create_with_handle(file, drm, args->size,
330 &args->handle);
331 if (IS_ERR(bo))
332 return PTR_ERR(bo);
333
334 return 0;
335}
336
337static int tegra_gem_mmap(struct drm_device *drm, void *data,
338 struct drm_file *file)
339{
340 struct drm_tegra_gem_mmap *args = data;
341 struct drm_gem_object *gem;
342 struct tegra_bo *bo;
343
344 gem = drm_gem_object_lookup(drm, file, args->handle);
345 if (!gem)
346 return -EINVAL;
347
348 bo = to_tegra_bo(gem);
349
350 args->offset = tegra_bo_get_mmap_offset(bo);
351
352 drm_gem_object_unreference(gem);
353
354 return 0;
355}
356
357static int tegra_syncpt_read(struct drm_device *drm, void *data,
358 struct drm_file *file)
359{
360 struct drm_tegra_syncpt_read *args = data;
361 struct host1x *host = dev_get_drvdata(drm->dev);
362 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
363
364 if (!sp)
365 return -EINVAL;
366
367 args->value = host1x_syncpt_read_min(sp);
368 return 0;
369}
370
371static int tegra_syncpt_incr(struct drm_device *drm, void *data,
372 struct drm_file *file)
373{
374 struct drm_tegra_syncpt_incr *args = data;
375 struct host1x *host = dev_get_drvdata(drm->dev);
376 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
377
378 if (!sp)
379 return -EINVAL;
380
381 host1x_syncpt_incr(sp);
382 return 0;
383}
384
385static int tegra_syncpt_wait(struct drm_device *drm, void *data,
386 struct drm_file *file)
387{
388 struct drm_tegra_syncpt_wait *args = data;
389 struct host1x *host = dev_get_drvdata(drm->dev);
390 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
391
392 if (!sp)
393 return -EINVAL;
394
395 return host1x_syncpt_wait(sp, args->thresh, args->timeout,
396 &args->value);
397}
398
399static int tegra_open_channel(struct drm_device *drm, void *data,
400 struct drm_file *file)
401{
402 struct drm_tegra_open_channel *args = data;
403 struct host1x_client *client;
404 struct host1x_drm_context *context;
405 struct host1x_drm_file *fpriv = file->driver_priv;
406 struct host1x_drm *host1x = drm->dev_private;
407 int err = -ENODEV;
408
409 context = kzalloc(sizeof(*context), GFP_KERNEL);
410 if (!context)
411 return -ENOMEM;
412
413 list_for_each_entry(client, &host1x->clients, list)
414 if (client->class == args->client) {
415 err = client->ops->open_channel(client, context);
416 if (err)
417 break;
418
419 context->client = client;
420 list_add(&context->list, &fpriv->contexts);
421 args->context = (uintptr_t)context;
422 return 0;
423 }
424
425 kfree(context);
426 return err;
427}
428
429static int tegra_close_channel(struct drm_device *drm, void *data,
430 struct drm_file *file)
431{
432 struct drm_tegra_close_channel *args = data;
433 struct host1x_drm_file *fpriv = file->driver_priv;
434 struct host1x_drm_context *context =
435 (struct host1x_drm_context *)(uintptr_t)args->context;
436
437 if (!host1x_drm_file_owns_context(fpriv, context))
438 return -EINVAL;
439
440 list_del(&context->list);
441 host1x_drm_context_free(context);
442
443 return 0;
444}
445
446static int tegra_get_syncpt(struct drm_device *drm, void *data,
447 struct drm_file *file)
448{
449 struct drm_tegra_get_syncpt *args = data;
450 struct host1x_drm_file *fpriv = file->driver_priv;
451 struct host1x_drm_context *context =
452 (struct host1x_drm_context *)(uintptr_t)args->context;
453 struct host1x_syncpt *syncpt;
454
455 if (!host1x_drm_file_owns_context(fpriv, context))
456 return -ENODEV;
457
458 if (args->index >= context->client->num_syncpts)
459 return -EINVAL;
460
461 syncpt = context->client->syncpts[args->index];
462 args->id = host1x_syncpt_id(syncpt);
463
464 return 0;
465}
466
467static int tegra_submit(struct drm_device *drm, void *data,
468 struct drm_file *file)
469{
470 struct drm_tegra_submit *args = data;
471 struct host1x_drm_file *fpriv = file->driver_priv;
472 struct host1x_drm_context *context =
473 (struct host1x_drm_context *)(uintptr_t)args->context;
474
475 if (!host1x_drm_file_owns_context(fpriv, context))
476 return -ENODEV;
477
478 return context->client->ops->submit(context, args, drm, file);
479}
480#endif
481
482static struct drm_ioctl_desc tegra_drm_ioctls[] = {
483#ifdef CONFIG_DRM_TEGRA_STAGING
484 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
485 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
486 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
487 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
488 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
489 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
490 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
491 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
492 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
493#endif
494};
495
496static const struct file_operations tegra_drm_fops = {
497 .owner = THIS_MODULE,
498 .open = drm_open,
499 .release = drm_release,
500 .unlocked_ioctl = drm_ioctl,
501 .mmap = tegra_drm_mmap,
502 .poll = drm_poll,
503 .fasync = drm_fasync,
504 .read = drm_read,
505#ifdef CONFIG_COMPAT
506 .compat_ioctl = drm_compat_ioctl,
507#endif
508 .llseek = noop_llseek,
509};
510
511static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
512{
513 struct drm_crtc *crtc;
514
515 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
516 struct tegra_dc *dc = to_tegra_dc(crtc);
517
518 if (dc->pipe == pipe)
519 return crtc;
520 }
521
522 return NULL;
523}
524
525static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
526{
527 /* TODO: implement real hardware counter using syncpoints */
528 return drm_vblank_count(dev, crtc);
529}
530
531static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
532{
533 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
534 struct tegra_dc *dc = to_tegra_dc(crtc);
535
536 if (!crtc)
537 return -ENODEV;
538
539 tegra_dc_enable_vblank(dc);
540
541 return 0;
542}
543
544static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
545{
546 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
547 struct tegra_dc *dc = to_tegra_dc(crtc);
548
549 if (crtc)
550 tegra_dc_disable_vblank(dc);
551}
552
553static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
554{
555 struct host1x_drm_file *fpriv = file->driver_priv;
556 struct host1x_drm_context *context, *tmp;
557 struct drm_crtc *crtc;
558
559 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
560 tegra_dc_cancel_page_flip(crtc, file);
561
562 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
563 host1x_drm_context_free(context);
564
565 kfree(fpriv);
566}
567
568#ifdef CONFIG_DEBUG_FS
569static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
570{
571 struct drm_info_node *node = (struct drm_info_node *)s->private;
572 struct drm_device *drm = node->minor->dev;
573 struct drm_framebuffer *fb;
574
575 mutex_lock(&drm->mode_config.fb_lock);
576
577 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
578 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
579 fb->base.id, fb->width, fb->height, fb->depth,
580 fb->bits_per_pixel,
581 atomic_read(&fb->refcount.refcount));
582 }
583
584 mutex_unlock(&drm->mode_config.fb_lock);
585
586 return 0;
587}
588
589static struct drm_info_list tegra_debugfs_list[] = {
590 { "framebuffers", tegra_debugfs_framebuffers, 0 },
591};
592
593static int tegra_debugfs_init(struct drm_minor *minor)
594{
595 return drm_debugfs_create_files(tegra_debugfs_list,
596 ARRAY_SIZE(tegra_debugfs_list),
597 minor->debugfs_root, minor);
598}
599
600static void tegra_debugfs_cleanup(struct drm_minor *minor)
601{
602 drm_debugfs_remove_files(tegra_debugfs_list,
603 ARRAY_SIZE(tegra_debugfs_list), minor);
604}
605#endif
606
607struct drm_driver tegra_drm_driver = {
608 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
609 .load = tegra_drm_load,
610 .unload = tegra_drm_unload,
611 .open = tegra_drm_open,
612 .preclose = tegra_drm_preclose,
613 .lastclose = tegra_drm_lastclose,
614
615 .get_vblank_counter = tegra_drm_get_vblank_counter,
616 .enable_vblank = tegra_drm_enable_vblank,
617 .disable_vblank = tegra_drm_disable_vblank,
618
619#if defined(CONFIG_DEBUG_FS)
620 .debugfs_init = tegra_debugfs_init,
621 .debugfs_cleanup = tegra_debugfs_cleanup,
622#endif
623
624 .gem_free_object = tegra_bo_free_object,
625 .gem_vm_ops = &tegra_bo_vm_ops,
626 .dumb_create = tegra_bo_dumb_create,
627 .dumb_map_offset = tegra_bo_dumb_map_offset,
628 .dumb_destroy = tegra_bo_dumb_destroy,
629
630 .ioctls = tegra_drm_ioctls,
631 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
632 .fops = &tegra_drm_fops,
633
634 .name = DRIVER_NAME,
635 .desc = DRIVER_DESC,
636 .date = DRIVER_DATE,
637 .major = DRIVER_MAJOR,
638 .minor = DRIVER_MINOR,
639 .patchlevel = DRIVER_PATCHLEVEL,
640};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/host1x/drm/drm.h
index 6dd75a2600eb..02ce020f2575 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/host1x/drm/drm.h
@@ -1,24 +1,36 @@
1/* 1/*
2 * Copyright (C) 2012 Avionic Design GmbH 2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. 3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#ifndef TEGRA_DRM_H 10#ifndef HOST1X_DRM_H
11#define TEGRA_DRM_H 1 11#define HOST1X_DRM_H 1
12 12
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h> 14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_edid.h> 15#include <drm/drm_edid.h>
16#include <drm/drm_fb_helper.h> 16#include <drm/drm_fb_helper.h>
17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_fixed.h> 17#include <drm/drm_fixed.h>
18#include <uapi/drm/tegra_drm.h>
20 19
21struct host1x { 20#include "host1x.h"
21
22struct tegra_fb {
23 struct drm_framebuffer base;
24 struct tegra_bo **planes;
25 unsigned int num_planes;
26};
27
28struct tegra_fbdev {
29 struct drm_fb_helper base;
30 struct tegra_fb *fb;
31};
32
33struct host1x_drm {
22 struct drm_device *drm; 34 struct drm_device *drm;
23 struct device *dev; 35 struct device *dev;
24 void __iomem *regs; 36 void __iomem *regs;
@@ -33,31 +45,53 @@ struct host1x {
33 struct mutex clients_lock; 45 struct mutex clients_lock;
34 struct list_head clients; 46 struct list_head clients;
35 47
36 struct drm_fbdev_cma *fbdev; 48 struct tegra_fbdev *fbdev;
37}; 49};
38 50
39struct host1x_client; 51struct host1x_client;
40 52
53struct host1x_drm_context {
54 struct host1x_client *client;
55 struct host1x_channel *channel;
56 struct list_head list;
57};
58
41struct host1x_client_ops { 59struct host1x_client_ops {
42 int (*drm_init)(struct host1x_client *client, struct drm_device *drm); 60 int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
43 int (*drm_exit)(struct host1x_client *client); 61 int (*drm_exit)(struct host1x_client *client);
62 int (*open_channel)(struct host1x_client *client,
63 struct host1x_drm_context *context);
64 void (*close_channel)(struct host1x_drm_context *context);
65 int (*submit)(struct host1x_drm_context *context,
66 struct drm_tegra_submit *args, struct drm_device *drm,
67 struct drm_file *file);
68};
69
70struct host1x_drm_file {
71 struct list_head contexts;
44}; 72};
45 73
46struct host1x_client { 74struct host1x_client {
47 struct host1x *host1x; 75 struct host1x_drm *host1x;
48 struct device *dev; 76 struct device *dev;
49 77
50 const struct host1x_client_ops *ops; 78 const struct host1x_client_ops *ops;
51 79
80 enum host1x_class class;
81 struct host1x_channel *channel;
82
83 struct host1x_syncpt **syncpts;
84 unsigned int num_syncpts;
85
52 struct list_head list; 86 struct list_head list;
53}; 87};
54 88
55extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm); 89extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
56extern int host1x_drm_exit(struct host1x *host1x); 90extern int host1x_drm_exit(struct host1x_drm *host1x);
57 91
58extern int host1x_register_client(struct host1x *host1x, 92extern int host1x_register_client(struct host1x_drm *host1x,
59 struct host1x_client *client); 93 struct host1x_client *client);
60extern int host1x_unregister_client(struct host1x *host1x, 94extern int host1x_unregister_client(struct host1x_drm *host1x,
61 struct host1x_client *client); 95 struct host1x_client *client);
62 96
63struct tegra_output; 97struct tegra_output;
@@ -66,7 +100,7 @@ struct tegra_dc {
66 struct host1x_client client; 100 struct host1x_client client;
67 spinlock_t lock; 101 spinlock_t lock;
68 102
69 struct host1x *host1x; 103 struct host1x_drm *host1x;
70 struct device *dev; 104 struct device *dev;
71 105
72 struct drm_crtc base; 106 struct drm_crtc base;
@@ -226,12 +260,12 @@ extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output
226extern int tegra_output_exit(struct tegra_output *output); 260extern int tegra_output_exit(struct tegra_output *output);
227 261
228/* from fb.c */ 262/* from fb.c */
263struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
264 unsigned int index);
229extern int tegra_drm_fb_init(struct drm_device *drm); 265extern int tegra_drm_fb_init(struct drm_device *drm);
230extern void tegra_drm_fb_exit(struct drm_device *drm); 266extern void tegra_drm_fb_exit(struct drm_device *drm);
267extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
231 268
232extern struct platform_driver tegra_host1x_driver;
233extern struct platform_driver tegra_hdmi_driver;
234extern struct platform_driver tegra_dc_driver;
235extern struct drm_driver tegra_drm_driver; 269extern struct drm_driver tegra_drm_driver;
236 270
237#endif /* TEGRA_DRM_H */ 271#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/host1x/drm/fb.c
new file mode 100644
index 000000000000..979a3e32b78b
--- /dev/null
+++ b/drivers/gpu/host1x/drm/fb.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (C) 2012-2013 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * Based on the KMS/FB CMA helpers
6 * Copyright (C) 2012 Analog Device Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14
15#include "drm.h"
16#include "gem.h"
17
18static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
19{
20 return container_of(fb, struct tegra_fb, base);
21}
22
23static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
24{
25 return container_of(helper, struct tegra_fbdev, base);
26}
27
28struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
29 unsigned int index)
30{
31 struct tegra_fb *fb = to_tegra_fb(framebuffer);
32
33 if (index >= drm_format_num_planes(framebuffer->pixel_format))
34 return NULL;
35
36 return fb->planes[index];
37}
38
39static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
40{
41 struct tegra_fb *fb = to_tegra_fb(framebuffer);
42 unsigned int i;
43
44 for (i = 0; i < fb->num_planes; i++) {
45 struct tegra_bo *bo = fb->planes[i];
46
47 if (bo)
48 drm_gem_object_unreference_unlocked(&bo->gem);
49 }
50
51 drm_framebuffer_cleanup(framebuffer);
52 kfree(fb->planes);
53 kfree(fb);
54}
55
56static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
57 struct drm_file *file, unsigned int *handle)
58{
59 struct tegra_fb *fb = to_tegra_fb(framebuffer);
60
61 return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
62}
63
64static struct drm_framebuffer_funcs tegra_fb_funcs = {
65 .destroy = tegra_fb_destroy,
66 .create_handle = tegra_fb_create_handle,
67};
68
69static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
70 struct drm_mode_fb_cmd2 *mode_cmd,
71 struct tegra_bo **planes,
72 unsigned int num_planes)
73{
74 struct tegra_fb *fb;
75 unsigned int i;
76 int err;
77
78 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
79 if (!fb)
80 return ERR_PTR(-ENOMEM);
81
82 fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
83 if (!fb->planes)
84 return ERR_PTR(-ENOMEM);
85
86 fb->num_planes = num_planes;
87
88 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
89
90 for (i = 0; i < fb->num_planes; i++)
91 fb->planes[i] = planes[i];
92
93 err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
94 if (err < 0) {
95 dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
96 err);
97 kfree(fb->planes);
98 kfree(fb);
99 return ERR_PTR(err);
100 }
101
102 return fb;
103}
104
105static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
106 struct drm_file *file,
107 struct drm_mode_fb_cmd2 *cmd)
108{
109 unsigned int hsub, vsub, i;
110 struct tegra_bo *planes[4];
111 struct drm_gem_object *gem;
112 struct tegra_fb *fb;
113 int err;
114
115 hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
116 vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format);
117
118 for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) {
119 unsigned int width = cmd->width / (i ? hsub : 1);
120 unsigned int height = cmd->height / (i ? vsub : 1);
121 unsigned int size, bpp;
122
123 gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
124 if (!gem) {
125 err = -ENXIO;
126 goto unreference;
127 }
128
129 bpp = drm_format_plane_cpp(cmd->pixel_format, i);
130
131 size = (height - 1) * cmd->pitches[i] +
132 width * bpp + cmd->offsets[i];
133
134 if (gem->size < size) {
135 err = -EINVAL;
136 goto unreference;
137 }
138
139 planes[i] = to_tegra_bo(gem);
140 }
141
142 fb = tegra_fb_alloc(drm, cmd, planes, i);
143 if (IS_ERR(fb)) {
144 err = PTR_ERR(fb);
145 goto unreference;
146 }
147
148 return &fb->base;
149
150unreference:
151 while (i--)
152 drm_gem_object_unreference_unlocked(&planes[i]->gem);
153
154 return ERR_PTR(err);
155}
156
157static struct fb_ops tegra_fb_ops = {
158 .owner = THIS_MODULE,
159 .fb_fillrect = sys_fillrect,
160 .fb_copyarea = sys_copyarea,
161 .fb_imageblit = sys_imageblit,
162 .fb_check_var = drm_fb_helper_check_var,
163 .fb_set_par = drm_fb_helper_set_par,
164 .fb_blank = drm_fb_helper_blank,
165 .fb_pan_display = drm_fb_helper_pan_display,
166 .fb_setcmap = drm_fb_helper_setcmap,
167};
168
169static int tegra_fbdev_probe(struct drm_fb_helper *helper,
170 struct drm_fb_helper_surface_size *sizes)
171{
172 struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
173 struct drm_device *drm = helper->dev;
174 struct drm_mode_fb_cmd2 cmd = { 0 };
175 unsigned int bytes_per_pixel;
176 struct drm_framebuffer *fb;
177 unsigned long offset;
178 struct fb_info *info;
179 struct tegra_bo *bo;
180 size_t size;
181 int err;
182
183 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
184
185 cmd.width = sizes->surface_width;
186 cmd.height = sizes->surface_height;
187 cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
188 cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
189 sizes->surface_depth);
190
191 size = cmd.pitches[0] * cmd.height;
192
193 bo = tegra_bo_create(drm, size);
194 if (IS_ERR(bo))
195 return PTR_ERR(bo);
196
197 info = framebuffer_alloc(0, drm->dev);
198 if (!info) {
199 dev_err(drm->dev, "failed to allocate framebuffer info\n");
200 tegra_bo_free_object(&bo->gem);
201 return -ENOMEM;
202 }
203
204 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
205 if (IS_ERR(fbdev->fb)) {
206 dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
207 err = PTR_ERR(fbdev->fb);
208 goto release;
209 }
210
211 fb = &fbdev->fb->base;
212 helper->fb = fb;
213 helper->fbdev = info;
214
215 info->par = helper;
216 info->flags = FBINFO_FLAG_DEFAULT;
217 info->fbops = &tegra_fb_ops;
218
219 err = fb_alloc_cmap(&info->cmap, 256, 0);
220 if (err < 0) {
221 dev_err(drm->dev, "failed to allocate color map: %d\n", err);
222 goto destroy;
223 }
224
225 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
226 drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
227
228 offset = info->var.xoffset * bytes_per_pixel +
229 info->var.yoffset * fb->pitches[0];
230
231 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
232 info->screen_base = bo->vaddr + offset;
233 info->screen_size = size;
234 info->fix.smem_start = (unsigned long)(bo->paddr + offset);
235 info->fix.smem_len = size;
236
237 return 0;
238
239destroy:
240 drm_framebuffer_unregister_private(fb);
241 tegra_fb_destroy(fb);
242release:
243 framebuffer_release(info);
244 return err;
245}
246
247static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
248 .fb_probe = tegra_fbdev_probe,
249};
250
251static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
252 unsigned int preferred_bpp,
253 unsigned int num_crtc,
254 unsigned int max_connectors)
255{
256 struct drm_fb_helper *helper;
257 struct tegra_fbdev *fbdev;
258 int err;
259
260 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
261 if (!fbdev) {
262 dev_err(drm->dev, "failed to allocate DRM fbdev\n");
263 return ERR_PTR(-ENOMEM);
264 }
265
266 fbdev->base.funcs = &tegra_fb_helper_funcs;
267 helper = &fbdev->base;
268
269 err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
270 if (err < 0) {
271 dev_err(drm->dev, "failed to initialize DRM FB helper\n");
272 goto free;
273 }
274
275 err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
276 if (err < 0) {
277 dev_err(drm->dev, "failed to add connectors\n");
278 goto fini;
279 }
280
281 drm_helper_disable_unused_functions(drm);
282
283 err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
284 if (err < 0) {
285 dev_err(drm->dev, "failed to set initial configuration\n");
286 goto fini;
287 }
288
289 return fbdev;
290
291fini:
292 drm_fb_helper_fini(&fbdev->base);
293free:
294 kfree(fbdev);
295 return ERR_PTR(err);
296}
297
298static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
299{
300 struct fb_info *info = fbdev->base.fbdev;
301
302 if (info) {
303 int err;
304
305 err = unregister_framebuffer(info);
306 if (err < 0)
307 DRM_DEBUG_KMS("failed to unregister framebuffer\n");
308
309 if (info->cmap.len)
310 fb_dealloc_cmap(&info->cmap);
311
312 framebuffer_release(info);
313 }
314
315 if (fbdev->fb) {
316 drm_framebuffer_unregister_private(&fbdev->fb->base);
317 tegra_fb_destroy(&fbdev->fb->base);
318 }
319
320 drm_fb_helper_fini(&fbdev->base);
321 kfree(fbdev);
322}
323
324static void tegra_fb_output_poll_changed(struct drm_device *drm)
325{
326 struct host1x_drm *host1x = drm->dev_private;
327
328 if (host1x->fbdev)
329 drm_fb_helper_hotplug_event(&host1x->fbdev->base);
330}
331
332static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
333 .fb_create = tegra_fb_create,
334 .output_poll_changed = tegra_fb_output_poll_changed,
335};
336
337int tegra_drm_fb_init(struct drm_device *drm)
338{
339 struct host1x_drm *host1x = drm->dev_private;
340 struct tegra_fbdev *fbdev;
341
342 drm->mode_config.min_width = 0;
343 drm->mode_config.min_height = 0;
344
345 drm->mode_config.max_width = 4096;
346 drm->mode_config.max_height = 4096;
347
348 drm->mode_config.funcs = &tegra_drm_mode_funcs;
349
350 fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
351 drm->mode_config.num_connector);
352 if (IS_ERR(fbdev))
353 return PTR_ERR(fbdev);
354
355 host1x->fbdev = fbdev;
356
357 return 0;
358}
359
360void tegra_drm_fb_exit(struct drm_device *drm)
361{
362 struct host1x_drm *host1x = drm->dev_private;
363
364 tegra_fbdev_free(host1x->fbdev);
365}
366
367void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
368{
369 if (fbdev) {
370 drm_modeset_lock_all(fbdev->base.dev);
371 drm_fb_helper_restore_fbdev_mode(&fbdev->base);
372 drm_modeset_unlock_all(fbdev->base.dev);
373 }
374}
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
new file mode 100644
index 000000000000..c5e9a9b494c2
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -0,0 +1,270 @@
1/*
2 * NVIDIA Tegra DRM GEM helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6 *
7 * Based on the GEM/CMA helpers
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/mm.h>
22#include <linux/slab.h>
23#include <linux/mutex.h>
24#include <linux/export.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29
30#include "gem.h"
31
32static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
33{
34 return container_of(bo, struct tegra_bo, base);
35}
36
37static void tegra_bo_put(struct host1x_bo *bo)
38{
39 struct tegra_bo *obj = host1x_to_drm_bo(bo);
40 struct drm_device *drm = obj->gem.dev;
41
42 mutex_lock(&drm->struct_mutex);
43 drm_gem_object_unreference(&obj->gem);
44 mutex_unlock(&drm->struct_mutex);
45}
46
47static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
48{
49 struct tegra_bo *obj = host1x_to_drm_bo(bo);
50
51 return obj->paddr;
52}
53
54static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
55{
56}
57
58static void *tegra_bo_mmap(struct host1x_bo *bo)
59{
60 struct tegra_bo *obj = host1x_to_drm_bo(bo);
61
62 return obj->vaddr;
63}
64
65static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
66{
67}
68
69static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
70{
71 struct tegra_bo *obj = host1x_to_drm_bo(bo);
72
73 return obj->vaddr + page * PAGE_SIZE;
74}
75
76static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
77 void *addr)
78{
79}
80
81static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
82{
83 struct tegra_bo *obj = host1x_to_drm_bo(bo);
84 struct drm_device *drm = obj->gem.dev;
85
86 mutex_lock(&drm->struct_mutex);
87 drm_gem_object_reference(&obj->gem);
88 mutex_unlock(&drm->struct_mutex);
89
90 return bo;
91}
92
93const struct host1x_bo_ops tegra_bo_ops = {
94 .get = tegra_bo_get,
95 .put = tegra_bo_put,
96 .pin = tegra_bo_pin,
97 .unpin = tegra_bo_unpin,
98 .mmap = tegra_bo_mmap,
99 .munmap = tegra_bo_munmap,
100 .kmap = tegra_bo_kmap,
101 .kunmap = tegra_bo_kunmap,
102};
103
104static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
105{
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107}
108
109unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
110{
111 return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
112}
113
114struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
115{
116 struct tegra_bo *bo;
117 int err;
118
119 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
120 if (!bo)
121 return ERR_PTR(-ENOMEM);
122
123 host1x_bo_init(&bo->base, &tegra_bo_ops);
124 size = round_up(size, PAGE_SIZE);
125
126 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
127 GFP_KERNEL | __GFP_NOWARN);
128 if (!bo->vaddr) {
129 dev_err(drm->dev, "failed to allocate buffer with size %u\n",
130 size);
131 err = -ENOMEM;
132 goto err_dma;
133 }
134
135 err = drm_gem_object_init(drm, &bo->gem, size);
136 if (err)
137 goto err_init;
138
139 err = drm_gem_create_mmap_offset(&bo->gem);
140 if (err)
141 goto err_mmap;
142
143 return bo;
144
145err_mmap:
146 drm_gem_object_release(&bo->gem);
147err_init:
148 tegra_bo_destroy(drm, bo);
149err_dma:
150 kfree(bo);
151
152 return ERR_PTR(err);
153
154}
155
156struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
157 struct drm_device *drm,
158 unsigned int size,
159 unsigned int *handle)
160{
161 struct tegra_bo *bo;
162 int ret;
163
164 bo = tegra_bo_create(drm, size);
165 if (IS_ERR(bo))
166 return bo;
167
168 ret = drm_gem_handle_create(file, &bo->gem, handle);
169 if (ret)
170 goto err;
171
172 drm_gem_object_unreference_unlocked(&bo->gem);
173
174 return bo;
175
176err:
177 tegra_bo_free_object(&bo->gem);
178 return ERR_PTR(ret);
179}
180
181void tegra_bo_free_object(struct drm_gem_object *gem)
182{
183 struct tegra_bo *bo = to_tegra_bo(gem);
184
185 if (gem->map_list.map)
186 drm_gem_free_mmap_offset(gem);
187
188 drm_gem_object_release(gem);
189 tegra_bo_destroy(gem->dev, bo);
190
191 kfree(bo);
192}
193
194int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
195 struct drm_mode_create_dumb *args)
196{
197 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
198 struct tegra_bo *bo;
199
200 if (args->pitch < min_pitch)
201 args->pitch = min_pitch;
202
203 if (args->size < args->pitch * args->height)
204 args->size = args->pitch * args->height;
205
206 bo = tegra_bo_create_with_handle(file, drm, args->size,
207 &args->handle);
208 if (IS_ERR(bo))
209 return PTR_ERR(bo);
210
211 return 0;
212}
213
214int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
215 uint32_t handle, uint64_t *offset)
216{
217 struct drm_gem_object *gem;
218 struct tegra_bo *bo;
219
220 mutex_lock(&drm->struct_mutex);
221
222 gem = drm_gem_object_lookup(drm, file, handle);
223 if (!gem) {
224 dev_err(drm->dev, "failed to lookup GEM object\n");
225 mutex_unlock(&drm->struct_mutex);
226 return -EINVAL;
227 }
228
229 bo = to_tegra_bo(gem);
230
231 *offset = tegra_bo_get_mmap_offset(bo);
232
233 drm_gem_object_unreference(gem);
234
235 mutex_unlock(&drm->struct_mutex);
236
237 return 0;
238}
239
240const struct vm_operations_struct tegra_bo_vm_ops = {
241 .open = drm_gem_vm_open,
242 .close = drm_gem_vm_close,
243};
244
245int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
246{
247 struct drm_gem_object *gem;
248 struct tegra_bo *bo;
249 int ret;
250
251 ret = drm_gem_mmap(file, vma);
252 if (ret)
253 return ret;
254
255 gem = vma->vm_private_data;
256 bo = to_tegra_bo(gem);
257
258 ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
259 vma->vm_end - vma->vm_start, vma->vm_page_prot);
260 if (ret)
261 drm_gem_vm_close(vma);
262
263 return ret;
264}
265
266int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
267 unsigned int handle)
268{
269 return drm_gem_handle_delete(file, handle);
270}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
new file mode 100644
index 000000000000..34de2b486eb7
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -0,0 +1,59 @@
1/*
2 * Tegra host1x GEM implementation
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_GEM_H
20#define __HOST1X_GEM_H
21
22#include <drm/drm.h>
23#include <drm/drmP.h>
24
25#include "host1x_bo.h"
26
27struct tegra_bo {
28 struct drm_gem_object gem;
29 struct host1x_bo base;
30 dma_addr_t paddr;
31 void *vaddr;
32};
33
34static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
35{
36 return container_of(gem, struct tegra_bo, gem);
37}
38
39extern const struct host1x_bo_ops tegra_bo_ops;
40
41struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
42struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
43 struct drm_device *drm,
44 unsigned int size,
45 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem);
47unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
48int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
49 struct drm_mode_create_dumb *args);
50int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
51 uint32_t handle, uint64_t *offset);
52int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
53 unsigned int handle);
54
55int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
56
57extern const struct vm_operations_struct tegra_bo_vm_ops;
58
59#endif
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
new file mode 100644
index 000000000000..6a45ae090ee7
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gr2d.c
@@ -0,0 +1,339 @@
1/*
2 * drivers/video/tegra/host/gr2d/gr2d.c
3 *
4 * Tegra Graphics 2D
5 *
6 * Copyright (c) 2012-2013, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/export.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25
26#include "channel.h"
27#include "drm.h"
28#include "gem.h"
29#include "job.h"
30#include "host1x.h"
31#include "host1x_bo.h"
32#include "host1x_client.h"
33#include "syncpt.h"
34
35struct gr2d {
36 struct host1x_client client;
37 struct clk *clk;
38 struct host1x_channel *channel;
39 unsigned long *addr_regs;
40};
41
42static inline struct gr2d *to_gr2d(struct host1x_client *client)
43{
44 return container_of(client, struct gr2d, client);
45}
46
47static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
48
49static int gr2d_client_init(struct host1x_client *client,
50 struct drm_device *drm)
51{
52 return 0;
53}
54
55static int gr2d_client_exit(struct host1x_client *client)
56{
57 return 0;
58}
59
60static int gr2d_open_channel(struct host1x_client *client,
61 struct host1x_drm_context *context)
62{
63 struct gr2d *gr2d = to_gr2d(client);
64
65 context->channel = host1x_channel_get(gr2d->channel);
66
67 if (!context->channel)
68 return -ENOMEM;
69
70 return 0;
71}
72
73static void gr2d_close_channel(struct host1x_drm_context *context)
74{
75 host1x_channel_put(context->channel);
76}
77
78static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
79 struct drm_file *file,
80 u32 handle)
81{
82 struct drm_gem_object *gem;
83 struct tegra_bo *bo;
84
85 gem = drm_gem_object_lookup(drm, file, handle);
86 if (!gem)
87 return 0;
88
89 mutex_lock(&drm->struct_mutex);
90 drm_gem_object_unreference(gem);
91 mutex_unlock(&drm->struct_mutex);
92
93 bo = to_tegra_bo(gem);
94 return &bo->base;
95}
96
97static int gr2d_submit(struct host1x_drm_context *context,
98 struct drm_tegra_submit *args, struct drm_device *drm,
99 struct drm_file *file)
100{
101 struct host1x_job *job;
102 unsigned int num_cmdbufs = args->num_cmdbufs;
103 unsigned int num_relocs = args->num_relocs;
104 unsigned int num_waitchks = args->num_waitchks;
105 struct drm_tegra_cmdbuf __user *cmdbufs =
106 (void * __user)(uintptr_t)args->cmdbufs;
107 struct drm_tegra_reloc __user *relocs =
108 (void * __user)(uintptr_t)args->relocs;
109 struct drm_tegra_waitchk __user *waitchks =
110 (void * __user)(uintptr_t)args->waitchks;
111 struct drm_tegra_syncpt syncpt;
112 int err;
113
114 /* We don't yet support other than one syncpt_incr struct per submit */
115 if (args->num_syncpts != 1)
116 return -EINVAL;
117
118 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
119 args->num_relocs, args->num_waitchks);
120 if (!job)
121 return -ENOMEM;
122
123 job->num_relocs = args->num_relocs;
124 job->num_waitchk = args->num_waitchks;
125 job->client = (u32)args->context;
126 job->class = context->client->class;
127 job->serialize = true;
128
129 while (num_cmdbufs) {
130 struct drm_tegra_cmdbuf cmdbuf;
131 struct host1x_bo *bo;
132
133 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
134 if (err)
135 goto fail;
136
137 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
138 if (!bo)
139 goto fail;
140
141 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
142 num_cmdbufs--;
143 cmdbufs++;
144 }
145
146 err = copy_from_user(job->relocarray, relocs,
147 sizeof(*relocs) * num_relocs);
148 if (err)
149 goto fail;
150
151 while (num_relocs--) {
152 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
153 struct host1x_bo *cmdbuf, *target;
154
155 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
156 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
157
158 reloc->cmdbuf = cmdbuf;
159 reloc->target = target;
160
161 if (!reloc->target || !reloc->cmdbuf)
162 goto fail;
163 }
164
165 err = copy_from_user(job->waitchk, waitchks,
166 sizeof(*waitchks) * num_waitchks);
167 if (err)
168 goto fail;
169
170 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
171 sizeof(syncpt));
172 if (err)
173 goto fail;
174
175 job->syncpt_id = syncpt.id;
176 job->syncpt_incrs = syncpt.incrs;
177 job->timeout = 10000;
178 job->is_addr_reg = gr2d_is_addr_reg;
179
180 if (args->timeout && args->timeout < 10000)
181 job->timeout = args->timeout;
182
183 err = host1x_job_pin(job, context->client->dev);
184 if (err)
185 goto fail;
186
187 err = host1x_job_submit(job);
188 if (err)
189 goto fail_submit;
190
191 args->fence = job->syncpt_end;
192
193 host1x_job_put(job);
194 return 0;
195
196fail_submit:
197 host1x_job_unpin(job);
198fail:
199 host1x_job_put(job);
200 return err;
201}
202
203static struct host1x_client_ops gr2d_client_ops = {
204 .drm_init = gr2d_client_init,
205 .drm_exit = gr2d_client_exit,
206 .open_channel = gr2d_open_channel,
207 .close_channel = gr2d_close_channel,
208 .submit = gr2d_submit,
209};
210
211static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
212{
213 const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
214 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
215 unsigned long *bitmap;
216 int i;
217
218 bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
219 GFP_KERNEL);
220
221 for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
222 u32 reg = gr2d_addr_regs[i];
223 bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
224 }
225
226 gr2d->addr_regs = bitmap;
227}
228
229static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
230{
231 struct gr2d *gr2d = dev_get_drvdata(dev);
232
233 switch (class) {
234 case HOST1X_CLASS_HOST1X:
235 return reg == 0x2b;
236 case HOST1X_CLASS_GR2D:
237 case HOST1X_CLASS_GR2D_SB:
238 reg &= 0xff;
239 if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
240 return 1;
241 default:
242 return 0;
243 }
244}
245
246static const struct of_device_id gr2d_match[] = {
247 { .compatible = "nvidia,tegra30-gr2d" },
248 { .compatible = "nvidia,tegra20-gr2d" },
249 { },
250};
251
252static int gr2d_probe(struct platform_device *pdev)
253{
254 struct device *dev = &pdev->dev;
255 struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
256 int err;
257 struct gr2d *gr2d = NULL;
258 struct host1x_syncpt **syncpts;
259
260 gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
261 if (!gr2d)
262 return -ENOMEM;
263
264 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
265 if (!syncpts)
266 return -ENOMEM;
267
268 gr2d->clk = devm_clk_get(dev, NULL);
269 if (IS_ERR(gr2d->clk)) {
270 dev_err(dev, "cannot get clock\n");
271 return PTR_ERR(gr2d->clk);
272 }
273
274 err = clk_prepare_enable(gr2d->clk);
275 if (err) {
276 dev_err(dev, "cannot turn on clock\n");
277 return err;
278 }
279
280 gr2d->channel = host1x_channel_request(dev);
281 if (!gr2d->channel)
282 return -ENOMEM;
283
284 *syncpts = host1x_syncpt_request(dev, 0);
285 if (!(*syncpts)) {
286 host1x_channel_free(gr2d->channel);
287 return -ENOMEM;
288 }
289
290 gr2d->client.ops = &gr2d_client_ops;
291 gr2d->client.dev = dev;
292 gr2d->client.class = HOST1X_CLASS_GR2D;
293 gr2d->client.syncpts = syncpts;
294 gr2d->client.num_syncpts = 1;
295
296 err = host1x_register_client(host1x, &gr2d->client);
297 if (err < 0) {
298 dev_err(dev, "failed to register host1x client: %d\n", err);
299 return err;
300 }
301
302 gr2d_init_addr_reg_map(dev, gr2d);
303
304 platform_set_drvdata(pdev, gr2d);
305
306 return 0;
307}
308
309static int __exit gr2d_remove(struct platform_device *pdev)
310{
311 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
312 struct gr2d *gr2d = platform_get_drvdata(pdev);
313 unsigned int i;
314 int err;
315
316 err = host1x_unregister_client(host1x, &gr2d->client);
317 if (err < 0) {
318 dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
319 return err;
320 }
321
322 for (i = 0; i < gr2d->client.num_syncpts; i++)
323 host1x_syncpt_free(gr2d->client.syncpts[i]);
324
325 host1x_channel_free(gr2d->channel);
326 clk_disable_unprepare(gr2d->clk);
327
328 return 0;
329}
330
331struct platform_driver tegra_gr2d_driver = {
332 .probe = gr2d_probe,
333 .remove = __exit_p(gr2d_remove),
334 .driver = {
335 .owner = THIS_MODULE,
336 .name = "gr2d",
337 .of_match_table = gr2d_match,
338 }
339};
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index bb747f6cd1a4..01097da09f7f 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -22,6 +22,7 @@
22#include "hdmi.h" 22#include "hdmi.h"
23#include "drm.h" 23#include "drm.h"
24#include "dc.h" 24#include "dc.h"
25#include "host1x_client.h"
25 26
26struct tegra_hdmi { 27struct tegra_hdmi {
27 struct host1x_client client; 28 struct host1x_client client;
@@ -1189,7 +1190,7 @@ static const struct host1x_client_ops hdmi_client_ops = {
1189 1190
1190static int tegra_hdmi_probe(struct platform_device *pdev) 1191static int tegra_hdmi_probe(struct platform_device *pdev)
1191{ 1192{
1192 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1193 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1193 struct tegra_hdmi *hdmi; 1194 struct tegra_hdmi *hdmi;
1194 struct resource *regs; 1195 struct resource *regs;
1195 int err; 1196 int err;
@@ -1278,7 +1279,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1278 1279
1279static int tegra_hdmi_remove(struct platform_device *pdev) 1280static int tegra_hdmi_remove(struct platform_device *pdev)
1280{ 1281{
1281 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1282 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1282 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); 1283 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1283 int err; 1284 int err;
1284 1285
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/host1x/drm/hdmi.h
index 52ac36e08ccb..52ac36e08ccb 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/host1x/drm/hdmi.h
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/host1x/drm/output.c
index 8140fc6c34d8..8140fc6c34d8 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/host1x/drm/output.c
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/host1x/drm/rgb.c
index ed4416f20260..ed4416f20260 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/host1x/drm/rgb.c
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
new file mode 100644
index 000000000000..a2bc1e65e972
--- /dev/null
+++ b/drivers/gpu/host1x/host1x.h
@@ -0,0 +1,30 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef __LINUX_HOST1X_H
22#define __LINUX_HOST1X_H
23
24enum host1x_class {
25 HOST1X_CLASS_HOST1X = 0x1,
26 HOST1X_CLASS_GR2D = 0x51,
27 HOST1X_CLASS_GR2D_SB = 0x52
28};
29
30#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
new file mode 100644
index 000000000000..4c1f10bd773d
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_bo.h
@@ -0,0 +1,87 @@
1/*
2 * Tegra host1x Memory Management Abstraction header
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _HOST1X_BO_H
20#define _HOST1X_BO_H
21
22struct host1x_bo;
23
24struct host1x_bo_ops {
25 struct host1x_bo *(*get)(struct host1x_bo *bo);
26 void (*put)(struct host1x_bo *bo);
27 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
28 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
29 void *(*mmap)(struct host1x_bo *bo);
30 void (*munmap)(struct host1x_bo *bo, void *addr);
31 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
32 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
33};
34
35struct host1x_bo {
36 const struct host1x_bo_ops *ops;
37};
38
39static inline void host1x_bo_init(struct host1x_bo *bo,
40 const struct host1x_bo_ops *ops)
41{
42 bo->ops = ops;
43}
44
45static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
46{
47 return bo->ops->get(bo);
48}
49
50static inline void host1x_bo_put(struct host1x_bo *bo)
51{
52 bo->ops->put(bo);
53}
54
55static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
56 struct sg_table **sgt)
57{
58 return bo->ops->pin(bo, sgt);
59}
60
61static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
62{
63 bo->ops->unpin(bo, sgt);
64}
65
66static inline void *host1x_bo_mmap(struct host1x_bo *bo)
67{
68 return bo->ops->mmap(bo);
69}
70
71static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
72{
73 bo->ops->munmap(bo, addr);
74}
75
76static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
77{
78 return bo->ops->kmap(bo, pagenum);
79}
80
81static inline void host1x_bo_kunmap(struct host1x_bo *bo,
82 unsigned int pagenum, void *addr)
83{
84 bo->ops->kunmap(bo, pagenum, addr);
85}
86
87#endif
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/host1x_client.h
new file mode 100644
index 000000000000..9b85f10f4a44
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_client.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef HOST1X_CLIENT_H
18#define HOST1X_CLIENT_H
19
20struct device;
21struct platform_device;
22
23#ifdef CONFIG_DRM_TEGRA
24int host1x_drm_alloc(struct platform_device *pdev);
25#else
26static inline int host1x_drm_alloc(struct platform_device *pdev)
27{
28 return 0;
29}
30#endif
31
32void host1x_set_drm_data(struct device *dev, void *data);
33void *host1x_get_drm_data(struct device *dev);
34
35#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
new file mode 100644
index 000000000000..9b50863a2236
--- /dev/null
+++ b/drivers/gpu/host1x/hw/Makefile
@@ -0,0 +1,6 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-hw-objs = \
4 host1x01.o
5
6obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
new file mode 100644
index 000000000000..590b69d91dab
--- /dev/null
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -0,0 +1,326 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
22
23#include "cdma.h"
24#include "channel.h"
25#include "dev.h"
26#include "debug.h"
27
28/*
29 * Put the restart at the end of pushbuffer memor
30 */
31static void push_buffer_init(struct push_buffer *pb)
32{
33 *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
34}
35
36/*
37 * Increment timedout buffer's syncpt via CPU.
38 */
39static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
40 u32 syncpt_incrs, u32 syncval, u32 nr_slots)
41{
42 struct host1x *host1x = cdma_to_host1x(cdma);
43 struct push_buffer *pb = &cdma->push_buffer;
44 u32 i;
45
46 for (i = 0; i < syncpt_incrs; i++)
47 host1x_syncpt_cpu_incr(cdma->timeout.syncpt);
48
49 /* after CPU incr, ensure shadow is up to date */
50 host1x_syncpt_load(cdma->timeout.syncpt);
51
52 /* NOP all the PB slots */
53 while (nr_slots--) {
54 u32 *p = (u32 *)((u32)pb->mapped + getptr);
55 *(p++) = HOST1X_OPCODE_NOP;
56 *(p++) = HOST1X_OPCODE_NOP;
57 dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
58 pb->phys + getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 }
61 wmb();
62}
63
64/*
65 * Start channel DMA
66 */
67static void cdma_start(struct host1x_cdma *cdma)
68{
69 struct host1x_channel *ch = cdma_to_channel(cdma);
70
71 if (cdma->running)
72 return;
73
74 cdma->last_pos = cdma->push_buffer.pos;
75
76 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
77 HOST1X_CHANNEL_DMACTRL);
78
79 /* set base, put and end pointer */
80 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
81 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
82 host1x_ch_writel(ch, cdma->push_buffer.phys +
83 cdma->push_buffer.size_bytes + 4,
84 HOST1X_CHANNEL_DMAEND);
85
86 /* reset GET */
87 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
88 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
89 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
90 HOST1X_CHANNEL_DMACTRL);
91
92 /* start the command DMA */
93 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
94
95 cdma->running = true;
96}
97
98/*
99 * Similar to cdma_start(), but rather than starting from an idle
100 * state (where DMA GET is set to DMA PUT), on a timeout we restore
101 * DMA GET from an explicit value (so DMA may again be pending).
102 */
103static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
104{
105 struct host1x *host1x = cdma_to_host1x(cdma);
106 struct host1x_channel *ch = cdma_to_channel(cdma);
107
108 if (cdma->running)
109 return;
110
111 cdma->last_pos = cdma->push_buffer.pos;
112
113 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
114 HOST1X_CHANNEL_DMACTRL);
115
116 /* set base, end pointer (all of memory) */
117 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
118 host1x_ch_writel(ch, cdma->push_buffer.phys +
119 cdma->push_buffer.size_bytes,
120 HOST1X_CHANNEL_DMAEND);
121
122 /* set GET, by loading the value in PUT (then reset GET) */
123 host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
124 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
125 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
126 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
127 HOST1X_CHANNEL_DMACTRL);
128
129 dev_dbg(host1x->dev,
130 "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
131 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
132 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
133 cdma->last_pos);
134
135 /* deassert GET reset and set PUT */
136 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
137 HOST1X_CHANNEL_DMACTRL);
138 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
139
140 /* start the command DMA */
141 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
142
143 cdma->running = true;
144}
145
146/*
147 * Kick channel DMA into action by writing its PUT offset (if it has changed)
148 */
149static void cdma_flush(struct host1x_cdma *cdma)
150{
151 struct host1x_channel *ch = cdma_to_channel(cdma);
152
153 if (cdma->push_buffer.pos != cdma->last_pos) {
154 host1x_ch_writel(ch, cdma->push_buffer.pos,
155 HOST1X_CHANNEL_DMAPUT);
156 cdma->last_pos = cdma->push_buffer.pos;
157 }
158}
159
160static void cdma_stop(struct host1x_cdma *cdma)
161{
162 struct host1x_channel *ch = cdma_to_channel(cdma);
163
164 mutex_lock(&cdma->lock);
165 if (cdma->running) {
166 host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
167 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
168 HOST1X_CHANNEL_DMACTRL);
169 cdma->running = false;
170 }
171 mutex_unlock(&cdma->lock);
172}
173
174/*
175 * Stops both channel's command processor and CDMA immediately.
176 * Also, tears down the channel and resets corresponding module.
177 */
178static void cdma_freeze(struct host1x_cdma *cdma)
179{
180 struct host1x *host = cdma_to_host1x(cdma);
181 struct host1x_channel *ch = cdma_to_channel(cdma);
182 u32 cmdproc_stop;
183
184 if (cdma->torndown && !cdma->running) {
185 dev_warn(host->dev, "Already torn down\n");
186 return;
187 }
188
189 dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
190
191 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
192 cmdproc_stop |= BIT(ch->id);
193 host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
194
195 dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
196 __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
197 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
198 cdma->last_pos);
199
200 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
201 HOST1X_CHANNEL_DMACTRL);
202
203 host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
204
205 cdma->running = false;
206 cdma->torndown = true;
207}
208
209static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
210{
211 struct host1x *host1x = cdma_to_host1x(cdma);
212 struct host1x_channel *ch = cdma_to_channel(cdma);
213 u32 cmdproc_stop;
214
215 dev_dbg(host1x->dev,
216 "resuming channel (id %d, DMAGET restart = 0x%x)\n",
217 ch->id, getptr);
218
219 cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
220 cmdproc_stop &= ~(BIT(ch->id));
221 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
222
223 cdma->torndown = false;
224 cdma_timeout_restart(cdma, getptr);
225}
226
227/*
228 * If this timeout fires, it indicates the current sync_queue entry has
229 * exceeded its TTL and the userctx should be timed out and remaining
230 * submits already issued cleaned up (future submits return an error).
231 */
232static void cdma_timeout_handler(struct work_struct *work)
233{
234 struct host1x_cdma *cdma;
235 struct host1x *host1x;
236 struct host1x_channel *ch;
237
238 u32 syncpt_val;
239
240 u32 prev_cmdproc, cmdproc_stop;
241
242 cdma = container_of(to_delayed_work(work), struct host1x_cdma,
243 timeout.wq);
244 host1x = cdma_to_host1x(cdma);
245 ch = cdma_to_channel(cdma);
246
247 host1x_debug_dump(cdma_to_host1x(cdma));
248
249 mutex_lock(&cdma->lock);
250
251 if (!cdma->timeout.client) {
252 dev_dbg(host1x->dev,
253 "cdma_timeout: expired, but has no clientid\n");
254 mutex_unlock(&cdma->lock);
255 return;
256 }
257
258 /* stop processing to get a clean snapshot */
259 prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
260 cmdproc_stop = prev_cmdproc | BIT(ch->id);
261 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
262
263 dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
264 prev_cmdproc, cmdproc_stop);
265
266 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
267
268 /* has buffer actually completed? */
269 if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
270 dev_dbg(host1x->dev,
271 "cdma_timeout: expired, but buffer had completed\n");
272 /* restore */
273 cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
274 host1x_sync_writel(host1x, cmdproc_stop,
275 HOST1X_SYNC_CMDPROC_STOP);
276 mutex_unlock(&cdma->lock);
277 return;
278 }
279
280 dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
281 __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
282 syncpt_val, cdma->timeout.syncpt_val);
283
284 /* stop HW, resetting channel/module */
285 host1x_hw_cdma_freeze(host1x, cdma);
286
287 host1x_cdma_update_sync_queue(cdma, ch->dev);
288 mutex_unlock(&cdma->lock);
289}
290
291/*
292 * Init timeout resources
293 */
294static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
295{
296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
297 cdma->timeout.initialized = true;
298
299 return 0;
300}
301
302/*
303 * Clean up timeout resources
304 */
305static void cdma_timeout_destroy(struct host1x_cdma *cdma)
306{
307 if (cdma->timeout.initialized)
308 cancel_delayed_work(&cdma->timeout.wq);
309 cdma->timeout.initialized = false;
310}
311
312static const struct host1x_cdma_ops host1x_cdma_ops = {
313 .start = cdma_start,
314 .stop = cdma_stop,
315 .flush = cdma_flush,
316
317 .timeout_init = cdma_timeout_init,
318 .timeout_destroy = cdma_timeout_destroy,
319 .freeze = cdma_freeze,
320 .resume = cdma_resume,
321 .timeout_cpu_incr = cdma_timeout_cpu_incr,
322};
323
324static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
325 .init = push_buffer_init,
326};
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
new file mode 100644
index 000000000000..ee199623e365
--- /dev/null
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -0,0 +1,168 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <trace/events/host1x.h>
21
22#include "host1x.h"
23#include "host1x_bo.h"
24#include "channel.h"
25#include "dev.h"
26#include "intr.h"
27#include "job.h"
28
29#define HOST1X_CHANNEL_SIZE 16384
30#define TRACE_MAX_LENGTH 128U
31
32static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
33 u32 offset, u32 words)
34{
35 void *mem = NULL;
36
37 if (host1x_debug_trace_cmdbuf)
38 mem = host1x_bo_mmap(bo);
39
40 if (mem) {
41 u32 i;
42 /*
43 * Write in batches of 128 as there seems to be a limit
44 * of how much you can output to ftrace at once.
45 */
46 for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
47 trace_host1x_cdma_push_gather(
48 dev_name(cdma_to_channel(cdma)->dev),
49 (u32)bo, min(words - i, TRACE_MAX_LENGTH),
50 offset + i * sizeof(u32), mem);
51 }
52 host1x_bo_munmap(bo, mem);
53 }
54}
55
56static void submit_gathers(struct host1x_job *job)
57{
58 struct host1x_cdma *cdma = &job->channel->cdma;
59 unsigned int i;
60
61 for (i = 0; i < job->num_gathers; i++) {
62 struct host1x_job_gather *g = &job->gathers[i];
63 u32 op1 = host1x_opcode_gather(g->words);
64 u32 op2 = g->base + g->offset;
65 trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
66 host1x_cdma_push(cdma, op1, op2);
67 }
68}
69
70static int channel_submit(struct host1x_job *job)
71{
72 struct host1x_channel *ch = job->channel;
73 struct host1x_syncpt *sp;
74 u32 user_syncpt_incrs = job->syncpt_incrs;
75 u32 prev_max = 0;
76 u32 syncval;
77 int err;
78 struct host1x_waitlist *completed_waiter = NULL;
79 struct host1x *host = dev_get_drvdata(ch->dev->parent);
80
81 sp = host->syncpt + job->syncpt_id;
82 trace_host1x_channel_submit(dev_name(ch->dev),
83 job->num_gathers, job->num_relocs,
84 job->num_waitchk, job->syncpt_id,
85 job->syncpt_incrs);
86
87 /* before error checks, return current max */
88 prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
89
90 /* get submit lock */
91 err = mutex_lock_interruptible(&ch->submitlock);
92 if (err)
93 goto error;
94
95 completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
96 if (!completed_waiter) {
97 mutex_unlock(&ch->submitlock);
98 err = -ENOMEM;
99 goto error;
100 }
101
102 /* begin a CDMA submit */
103 err = host1x_cdma_begin(&ch->cdma, job);
104 if (err) {
105 mutex_unlock(&ch->submitlock);
106 goto error;
107 }
108
109 if (job->serialize) {
110 /*
111 * Force serialization by inserting a host wait for the
112 * previous job to finish before this one can commence.
113 */
114 host1x_cdma_push(&ch->cdma,
115 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
116 host1x_uclass_wait_syncpt_r(), 1),
117 host1x_class_host_wait_syncpt(job->syncpt_id,
118 host1x_syncpt_read_max(sp)));
119 }
120
121 syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
122
123 job->syncpt_end = syncval;
124
125 /* add a setclass for modules that require it */
126 if (job->class)
127 host1x_cdma_push(&ch->cdma,
128 host1x_opcode_setclass(job->class, 0, 0),
129 HOST1X_OPCODE_NOP);
130
131 submit_gathers(job);
132
133 /* end CDMA submit & stash pinned hMems into sync queue */
134 host1x_cdma_end(&ch->cdma, job);
135
136 trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
137
138 /* schedule a submit complete interrupt */
139 err = host1x_intr_add_action(host, job->syncpt_id, syncval,
140 HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
141 completed_waiter, NULL);
142 completed_waiter = NULL;
143 WARN(err, "Failed to set submit complete interrupt");
144
145 mutex_unlock(&ch->submitlock);
146
147 return 0;
148
149error:
150 kfree(completed_waiter);
151 return err;
152}
153
154static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
155 unsigned int index)
156{
157 ch->id = index;
158 mutex_init(&ch->reflock);
159 mutex_init(&ch->submitlock);
160
161 ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
162 return 0;
163}
164
165static const struct host1x_channel_ops host1x_channel_ops = {
166 .init = host1x_channel_init,
167 .submit = channel_submit,
168};
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
new file mode 100644
index 000000000000..334c038052f5
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Erik Gilling <konkers@android.com>
4 *
5 * Copyright (C) 2011-2013 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/mm.h>
21#include <linux/scatterlist.h>
22
23#include <linux/io.h>
24
25#include "dev.h"
26#include "debug.h"
27#include "cdma.h"
28#include "channel.h"
29#include "host1x_bo.h"
30
31#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
32
33enum {
34 HOST1X_OPCODE_SETCLASS = 0x00,
35 HOST1X_OPCODE_INCR = 0x01,
36 HOST1X_OPCODE_NONINCR = 0x02,
37 HOST1X_OPCODE_MASK = 0x03,
38 HOST1X_OPCODE_IMM = 0x04,
39 HOST1X_OPCODE_RESTART = 0x05,
40 HOST1X_OPCODE_GATHER = 0x06,
41 HOST1X_OPCODE_EXTEND = 0x0e,
42};
43
44enum {
45 HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK = 0x00,
46 HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01,
47};
48
49static unsigned int show_channel_command(struct output *o, u32 val)
50{
51 unsigned mask;
52 unsigned subop;
53
54 switch (val >> 28) {
55 case HOST1X_OPCODE_SETCLASS:
56 mask = val & 0x3f;
57 if (mask) {
58 host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
59 val >> 6 & 0x3ff,
60 val >> 16 & 0xfff, mask);
61 return hweight8(mask);
62 } else {
63 host1x_debug_output(o, "SETCL(class=%03x)\n",
64 val >> 6 & 0x3ff);
65 return 0;
66 }
67
68 case HOST1X_OPCODE_INCR:
69 host1x_debug_output(o, "INCR(offset=%03x, [",
70 val >> 16 & 0xfff);
71 return val & 0xffff;
72
73 case HOST1X_OPCODE_NONINCR:
74 host1x_debug_output(o, "NONINCR(offset=%03x, [",
75 val >> 16 & 0xfff);
76 return val & 0xffff;
77
78 case HOST1X_OPCODE_MASK:
79 mask = val & 0xffff;
80 host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
81 val >> 16 & 0xfff, mask);
82 return hweight16(mask);
83
84 case HOST1X_OPCODE_IMM:
85 host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
86 val >> 16 & 0xfff, val & 0xffff);
87 return 0;
88
89 case HOST1X_OPCODE_RESTART:
90 host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
91 return 0;
92
93 case HOST1X_OPCODE_GATHER:
94 host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
95 val >> 16 & 0xfff, val >> 15 & 0x1,
96 val >> 14 & 0x1, val & 0x3fff);
97 return 1;
98
99 case HOST1X_OPCODE_EXTEND:
100 subop = val >> 24 & 0xf;
101 if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
102 host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
103 val & 0xff);
104 else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
105 host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
106 val & 0xff);
107 else
108 host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
109 return 0;
110
111 default:
112 return 0;
113 }
114}
115
116static void show_gather(struct output *o, phys_addr_t phys_addr,
117 unsigned int words, struct host1x_cdma *cdma,
118 phys_addr_t pin_addr, u32 *map_addr)
119{
120 /* Map dmaget cursor to corresponding mem handle */
121 u32 offset = phys_addr - pin_addr;
122 unsigned int data_count = 0, i;
123
124 /*
125 * Sometimes we're given different hardware address to the same
126 * page - in these cases the offset will get an invalid number and
127 * we just have to bail out.
128 */
129 if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) {
130 host1x_debug_output(o, "[address mismatch]\n");
131 return;
132 }
133
134 for (i = 0; i < words; i++) {
135 u32 addr = phys_addr + i * 4;
136 u32 val = *(map_addr + offset / 4 + i);
137
138 if (!data_count) {
139 host1x_debug_output(o, "%08x: %08x:", addr, val);
140 data_count = show_channel_command(o, val);
141 } else {
142 host1x_debug_output(o, "%08x%s", val,
143 data_count > 0 ? ", " : "])\n");
144 data_count--;
145 }
146 }
147}
148
149static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
150{
151 struct host1x_job *job;
152
153 list_for_each_entry(job, &cdma->sync_queue, list) {
154 int i;
155 host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
156 job, job->syncpt_id, job->syncpt_end,
157 job->first_get, job->timeout,
158 job->num_slots, job->num_unpins);
159
160 for (i = 0; i < job->num_gathers; i++) {
161 struct host1x_job_gather *g = &job->gathers[i];
162 u32 *mapped;
163
164 if (job->gather_copy_mapped)
165 mapped = (u32 *)job->gather_copy_mapped;
166 else
167 mapped = host1x_bo_mmap(g->bo);
168
169 if (!mapped) {
170 host1x_debug_output(o, "[could not mmap]\n");
171 continue;
172 }
173
174 host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n",
175 g->base, g->offset, g->words);
176
177 show_gather(o, g->base + g->offset, g->words, cdma,
178 g->base, mapped);
179
180 if (!job->gather_copy_mapped)
181 host1x_bo_munmap(g->bo, mapped);
182 }
183 }
184}
185
186static void host1x_debug_show_channel_cdma(struct host1x *host,
187 struct host1x_channel *ch,
188 struct output *o)
189{
190 struct host1x_cdma *cdma = &ch->cdma;
191 u32 dmaput, dmaget, dmactrl;
192 u32 cbstat, cbread;
193 u32 val, base, baseval;
194
195 dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
196 dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
197 dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
198 cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
199 cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
200
201 host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
202
203 if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
204 !ch->cdma.push_buffer.mapped) {
205 host1x_debug_output(o, "inactive\n\n");
206 return;
207 }
208
209 if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
210 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
211 HOST1X_UCLASS_WAIT_SYNCPT)
212 host1x_debug_output(o, "waiting on syncpt %d val %d\n",
213 cbread >> 24, cbread & 0xffffff);
214 else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
215 HOST1X_CLASS_HOST1X &&
216 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
217 HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
218
219 base = (cbread >> 16) & 0xff;
220 baseval =
221 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
222 val = cbread & 0xffff;
223 host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
224 cbread >> 24, baseval + val, base,
225 baseval, val);
226 } else
227 host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
228 HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
229 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
230 cbread);
231
232 host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
233 dmaput, dmaget, dmactrl);
234 host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
235
236 show_channel_gathers(o, cdma);
237 host1x_debug_output(o, "\n");
238}
239
240static void host1x_debug_show_channel_fifo(struct host1x *host,
241 struct host1x_channel *ch,
242 struct output *o)
243{
244 u32 val, rd_ptr, wr_ptr, start, end;
245 unsigned int data_count = 0;
246
247 host1x_debug_output(o, "%d: fifo:\n", ch->id);
248
249 val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
250 host1x_debug_output(o, "FIFOSTAT %08x\n", val);
251 if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
252 host1x_debug_output(o, "[empty]\n");
253 return;
254 }
255
256 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
257 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
258 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
259 HOST1X_SYNC_CFPEEK_CTRL);
260
261 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
262 rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
263 wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
264
265 val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
266 start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
267 end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
268
269 do {
270 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
271 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
272 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
273 HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
274 HOST1X_SYNC_CFPEEK_CTRL);
275 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
276
277 if (!data_count) {
278 host1x_debug_output(o, "%08x:", val);
279 data_count = show_channel_command(o, val);
280 } else {
281 host1x_debug_output(o, "%08x%s", val,
282 data_count > 0 ? ", " : "])\n");
283 data_count--;
284 }
285
286 if (rd_ptr == end)
287 rd_ptr = start;
288 else
289 rd_ptr++;
290 } while (rd_ptr != wr_ptr);
291
292 if (data_count)
293 host1x_debug_output(o, ", ...])\n");
294 host1x_debug_output(o, "\n");
295
296 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
297}
298
299static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
300{
301 int i;
302
303 host1x_debug_output(o, "---- mlocks ----\n");
304 for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
305 u32 owner =
306 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
307 if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
308 host1x_debug_output(o, "%d: locked by channel %d\n",
309 i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner));
310 else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
311 host1x_debug_output(o, "%d: locked by cpu\n", i);
312 else
313 host1x_debug_output(o, "%d: unlocked\n", i);
314 }
315 host1x_debug_output(o, "\n");
316}
317
318static const struct host1x_debug_ops host1x_debug_ops = {
319 .show_channel_cdma = host1x_debug_show_channel_cdma,
320 .show_channel_fifo = host1x_debug_show_channel_fifo,
321 .show_mlocks = host1x_debug_show_mlocks,
322};
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
new file mode 100644
index 000000000000..a14e91cd1e58
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -0,0 +1,42 @@
1/*
2 * Host1x init for T20 and T30 Architecture Chips
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/* include hw specification */
20#include "hw/host1x01.h"
21#include "hw/host1x01_hardware.h"
22
23/* include code */
24#include "hw/cdma_hw.c"
25#include "hw/channel_hw.c"
26#include "hw/debug_hw.c"
27#include "hw/intr_hw.c"
28#include "hw/syncpt_hw.c"
29
30#include "dev.h"
31
32int host1x01_init(struct host1x *host)
33{
34 host->channel_op = &host1x_channel_ops;
35 host->cdma_op = &host1x_cdma_ops;
36 host->cdma_pb_op = &host1x_pushbuffer_ops;
37 host->syncpt_op = &host1x_syncpt_ops;
38 host->intr_op = &host1x_intr_ops;
39 host->debug_op = &host1x_debug_ops;
40
41 return 0;
42}
diff --git a/drivers/gpu/host1x/hw/host1x01.h b/drivers/gpu/host1x/hw/host1x01.h
new file mode 100644
index 000000000000..2706b6743250
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.h
@@ -0,0 +1,25 @@
1/*
2 * Host1x init for T20 and T30 Architecture Chips
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef HOST1X_HOST1X01_H
19#define HOST1X_HOST1X01_H
20
21struct host1x;
22
23int host1x01_init(struct host1x *host);
24
25#endif /* HOST1X_HOST1X01_H_ */
diff --git a/drivers/gpu/host1x/hw/host1x01_hardware.h b/drivers/gpu/host1x/hw/host1x01_hardware.h
new file mode 100644
index 000000000000..5f0fb866efa8
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01_hardware.h
@@ -0,0 +1,143 @@
1/*
2 * Tegra host1x Register Offsets for Tegra20 and Tegra30
3 *
4 * Copyright (c) 2010-2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_HOST1X01_HARDWARE_H
20#define __HOST1X_HOST1X01_HARDWARE_H
21
22#include <linux/types.h>
23#include <linux/bitops.h>
24
25#include "hw_host1x01_channel.h"
26#include "hw_host1x01_sync.h"
27#include "hw_host1x01_uclass.h"
28
29static inline u32 host1x_class_host_wait_syncpt(
30 unsigned indx, unsigned threshold)
31{
32 return host1x_uclass_wait_syncpt_indx_f(indx)
33 | host1x_uclass_wait_syncpt_thresh_f(threshold);
34}
35
36static inline u32 host1x_class_host_load_syncpt_base(
37 unsigned indx, unsigned threshold)
38{
39 return host1x_uclass_load_syncpt_base_base_indx_f(indx)
40 | host1x_uclass_load_syncpt_base_value_f(threshold);
41}
42
43static inline u32 host1x_class_host_wait_syncpt_base(
44 unsigned indx, unsigned base_indx, unsigned offset)
45{
46 return host1x_uclass_wait_syncpt_base_indx_f(indx)
47 | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
48 | host1x_uclass_wait_syncpt_base_offset_f(offset);
49}
50
51static inline u32 host1x_class_host_incr_syncpt_base(
52 unsigned base_indx, unsigned offset)
53{
54 return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
55 | host1x_uclass_incr_syncpt_base_offset_f(offset);
56}
57
58static inline u32 host1x_class_host_incr_syncpt(
59 unsigned cond, unsigned indx)
60{
61 return host1x_uclass_incr_syncpt_cond_f(cond)
62 | host1x_uclass_incr_syncpt_indx_f(indx);
63}
64
65static inline u32 host1x_class_host_indoff_reg_write(
66 unsigned mod_id, unsigned offset, bool auto_inc)
67{
68 u32 v = host1x_uclass_indoff_indbe_f(0xf)
69 | host1x_uclass_indoff_indmodid_f(mod_id)
70 | host1x_uclass_indoff_indroffset_f(offset);
71 if (auto_inc)
72 v |= host1x_uclass_indoff_autoinc_f(1);
73 return v;
74}
75
76static inline u32 host1x_class_host_indoff_reg_read(
77 unsigned mod_id, unsigned offset, bool auto_inc)
78{
79 u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
80 | host1x_uclass_indoff_indroffset_f(offset)
81 | host1x_uclass_indoff_rwn_read_v();
82 if (auto_inc)
83 v |= host1x_uclass_indoff_autoinc_f(1);
84 return v;
85}
86
87
88/* cdma opcodes */
89static inline u32 host1x_opcode_setclass(
90 unsigned class_id, unsigned offset, unsigned mask)
91{
92 return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
93}
94
95static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
96{
97 return (1 << 28) | (offset << 16) | count;
98}
99
100static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
101{
102 return (2 << 28) | (offset << 16) | count;
103}
104
105static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
106{
107 return (3 << 28) | (offset << 16) | mask;
108}
109
110static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
111{
112 return (4 << 28) | (offset << 16) | value;
113}
114
115static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
116{
117 return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
118 host1x_class_host_incr_syncpt(cond, indx));
119}
120
121static inline u32 host1x_opcode_restart(unsigned address)
122{
123 return (5 << 28) | (address >> 4);
124}
125
126static inline u32 host1x_opcode_gather(unsigned count)
127{
128 return (6 << 28) | count;
129}
130
131static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
132{
133 return (6 << 28) | (offset << 16) | BIT(15) | count;
134}
135
136static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
137{
138 return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
139}
140
141#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
142
143#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_channel.h b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
new file mode 100644
index 000000000000..b4bc7ca4e051
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
@@ -0,0 +1,120 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x_channel_host1x_h__
52#define __hw_host1x_channel_host1x_h__
53
54static inline u32 host1x_channel_fifostat_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_CHANNEL_FIFOSTAT \
59 host1x_channel_fifostat_r()
60static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
61{
62 return (r >> 10) & 0x1;
63}
64#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
65 host1x_channel_fifostat_cfempty_v(r)
66static inline u32 host1x_channel_dmastart_r(void)
67{
68 return 0x14;
69}
70#define HOST1X_CHANNEL_DMASTART \
71 host1x_channel_dmastart_r()
72static inline u32 host1x_channel_dmaput_r(void)
73{
74 return 0x18;
75}
76#define HOST1X_CHANNEL_DMAPUT \
77 host1x_channel_dmaput_r()
78static inline u32 host1x_channel_dmaget_r(void)
79{
80 return 0x1c;
81}
82#define HOST1X_CHANNEL_DMAGET \
83 host1x_channel_dmaget_r()
84static inline u32 host1x_channel_dmaend_r(void)
85{
86 return 0x20;
87}
88#define HOST1X_CHANNEL_DMAEND \
89 host1x_channel_dmaend_r()
90static inline u32 host1x_channel_dmactrl_r(void)
91{
92 return 0x24;
93}
94#define HOST1X_CHANNEL_DMACTRL \
95 host1x_channel_dmactrl_r()
96static inline u32 host1x_channel_dmactrl_dmastop(void)
97{
98 return 1 << 0;
99}
100#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
101 host1x_channel_dmactrl_dmastop()
102static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
103{
104 return (r >> 0) & 0x1;
105}
106#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
107 host1x_channel_dmactrl_dmastop_v(r)
108static inline u32 host1x_channel_dmactrl_dmagetrst(void)
109{
110 return 1 << 1;
111}
112#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
113 host1x_channel_dmactrl_dmagetrst()
114static inline u32 host1x_channel_dmactrl_dmainitget(void)
115{
116 return 1 << 2;
117}
118#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
119 host1x_channel_dmactrl_dmainitget()
120#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
new file mode 100644
index 000000000000..ac704e579977
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x01_sync_h__
52#define __hw_host1x01_sync_h__
53
54#define REGISTER_STRIDE 4
55
56static inline u32 host1x_sync_syncpt_r(unsigned int id)
57{
58 return 0x400 + id * REGISTER_STRIDE;
59}
60#define HOST1X_SYNC_SYNCPT(id) \
61 host1x_sync_syncpt_r(id)
62static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
63{
64 return 0x40 + id * REGISTER_STRIDE;
65}
66#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
67 host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
68static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
69{
70 return 0x60 + id * REGISTER_STRIDE;
71}
72#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
73 host1x_sync_syncpt_thresh_int_disable_r(id)
74static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
75{
76 return 0x68 + id * REGISTER_STRIDE;
77}
78#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
79 host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
80static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
81{
82 return 0x80 + channel * REGISTER_STRIDE;
83}
84#define HOST1X_SYNC_CF_SETUP(channel) \
85 host1x_sync_cf_setup_r(channel)
86static inline u32 host1x_sync_cf_setup_base_v(u32 r)
87{
88 return (r >> 0) & 0x1ff;
89}
90#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
91 host1x_sync_cf_setup_base_v(r)
92static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
93{
94 return (r >> 16) & 0x1ff;
95}
96#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
97 host1x_sync_cf_setup_limit_v(r)
98static inline u32 host1x_sync_cmdproc_stop_r(void)
99{
100 return 0xac;
101}
102#define HOST1X_SYNC_CMDPROC_STOP \
103 host1x_sync_cmdproc_stop_r()
104static inline u32 host1x_sync_ch_teardown_r(void)
105{
106 return 0xb0;
107}
108#define HOST1X_SYNC_CH_TEARDOWN \
109 host1x_sync_ch_teardown_r()
110static inline u32 host1x_sync_usec_clk_r(void)
111{
112 return 0x1a4;
113}
114#define HOST1X_SYNC_USEC_CLK \
115 host1x_sync_usec_clk_r()
116static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
117{
118 return 0x1a8;
119}
120#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
121 host1x_sync_ctxsw_timeout_cfg_r()
122static inline u32 host1x_sync_ip_busy_timeout_r(void)
123{
124 return 0x1bc;
125}
126#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
127 host1x_sync_ip_busy_timeout_r()
128static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
129{
130 return 0x340 + id * REGISTER_STRIDE;
131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
135{
136 return (v & 0xf) << 8;
137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
139 host1x_sync_mlock_owner_chid_f(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{
142 return (r >> 1) & 0x1;
143}
144#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
145 host1x_sync_mlock_owner_cpu_owns_v(r)
146static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
147{
148 return (r >> 0) & 0x1;
149}
150#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
151 host1x_sync_mlock_owner_ch_owns_v(r)
152static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
153{
154 return 0x500 + id * REGISTER_STRIDE;
155}
156#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
157 host1x_sync_syncpt_int_thresh_r(id)
158static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
159{
160 return 0x600 + id * REGISTER_STRIDE;
161}
162#define HOST1X_SYNC_SYNCPT_BASE(id) \
163 host1x_sync_syncpt_base_r(id)
164static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
165{
166 return 0x700 + id * REGISTER_STRIDE;
167}
168#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
169 host1x_sync_syncpt_cpu_incr_r(id)
170static inline u32 host1x_sync_cbread_r(unsigned int channel)
171{
172 return 0x720 + channel * REGISTER_STRIDE;
173}
174#define HOST1X_SYNC_CBREAD(channel) \
175 host1x_sync_cbread_r(channel)
176static inline u32 host1x_sync_cfpeek_ctrl_r(void)
177{
178 return 0x74c;
179}
180#define HOST1X_SYNC_CFPEEK_CTRL \
181 host1x_sync_cfpeek_ctrl_r()
182static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
183{
184 return (v & 0x1ff) << 0;
185}
186#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
187 host1x_sync_cfpeek_ctrl_addr_f(v)
188static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
189{
190 return (v & 0x7) << 16;
191}
192#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
193 host1x_sync_cfpeek_ctrl_channr_f(v)
194static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
195{
196 return (v & 0x1) << 31;
197}
198#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
199 host1x_sync_cfpeek_ctrl_ena_f(v)
200static inline u32 host1x_sync_cfpeek_read_r(void)
201{
202 return 0x750;
203}
204#define HOST1X_SYNC_CFPEEK_READ \
205 host1x_sync_cfpeek_read_r()
206static inline u32 host1x_sync_cfpeek_ptrs_r(void)
207{
208 return 0x754;
209}
210#define HOST1X_SYNC_CFPEEK_PTRS \
211 host1x_sync_cfpeek_ptrs_r()
212static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
213{
214 return (r >> 0) & 0x1ff;
215}
216#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
217 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
218static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
219{
220 return (r >> 16) & 0x1ff;
221}
222#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
223 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
224static inline u32 host1x_sync_cbstat_r(unsigned int channel)
225{
226 return 0x758 + channel * REGISTER_STRIDE;
227}
228#define HOST1X_SYNC_CBSTAT(channel) \
229 host1x_sync_cbstat_r(channel)
230static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
231{
232 return (r >> 0) & 0xffff;
233}
234#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
235 host1x_sync_cbstat_cboffset_v(r)
236static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
237{
238 return (r >> 16) & 0x3ff;
239}
240#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
241 host1x_sync_cbstat_cbclass_v(r)
242
243#endif /* __hw_host1x01_sync_h__ */
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
new file mode 100644
index 000000000000..42f3ce19ca32
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x_uclass_host1x_h__
52#define __hw_host1x_uclass_host1x_h__
53
54static inline u32 host1x_uclass_incr_syncpt_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_UCLASS_INCR_SYNCPT \
59 host1x_uclass_incr_syncpt_r()
60static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
61{
62 return (v & 0xff) << 8;
63}
64#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
65 host1x_uclass_incr_syncpt_cond_f(v)
66static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
67{
68 return (v & 0xff) << 0;
69}
70#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
71 host1x_uclass_incr_syncpt_indx_f(v)
72static inline u32 host1x_uclass_wait_syncpt_r(void)
73{
74 return 0x8;
75}
76#define HOST1X_UCLASS_WAIT_SYNCPT \
77 host1x_uclass_wait_syncpt_r()
78static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
79{
80 return (v & 0xff) << 24;
81}
82#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
83 host1x_uclass_wait_syncpt_indx_f(v)
84static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
85{
86 return (v & 0xffffff) << 0;
87}
88#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
89 host1x_uclass_wait_syncpt_thresh_f(v)
90static inline u32 host1x_uclass_wait_syncpt_base_r(void)
91{
92 return 0x9;
93}
94#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
95 host1x_uclass_wait_syncpt_base_r()
96static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
97{
98 return (v & 0xff) << 24;
99}
100#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
101 host1x_uclass_wait_syncpt_base_indx_f(v)
102static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
103{
104 return (v & 0xff) << 16;
105}
106#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
107 host1x_uclass_wait_syncpt_base_base_indx_f(v)
108static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
109{
110 return (v & 0xffff) << 0;
111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
115{
116 return (v & 0xff) << 24;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
119 host1x_uclass_load_syncpt_base_base_indx_f(v)
120static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
121{
122 return (v & 0xffffff) << 0;
123}
124#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
125 host1x_uclass_load_syncpt_base_value_f(v)
126static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
127{
128 return (v & 0xff) << 24;
129}
130#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
131 host1x_uclass_incr_syncpt_base_base_indx_f(v)
132static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
133{
134 return (v & 0xffffff) << 0;
135}
136#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
137 host1x_uclass_incr_syncpt_base_offset_f(v)
138static inline u32 host1x_uclass_indoff_r(void)
139{
140 return 0x2d;
141}
142#define HOST1X_UCLASS_INDOFF \
143 host1x_uclass_indoff_r()
144static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
145{
146 return (v & 0xf) << 28;
147}
148#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
149 host1x_uclass_indoff_indbe_f(v)
150static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
151{
152 return (v & 0x1) << 27;
153}
154#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
155 host1x_uclass_indoff_autoinc_f(v)
156static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
157{
158 return (v & 0xff) << 18;
159}
160#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
161 host1x_uclass_indoff_indmodid_f(v)
162static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
163{
164 return (v & 0xffff) << 2;
165}
166#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
167 host1x_uclass_indoff_indroffset_f(v)
168static inline u32 host1x_uclass_indoff_rwn_read_v(void)
169{
170 return 1;
171}
172#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
173 host1x_uclass_indoff_indroffset_f(v)
174#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
new file mode 100644
index 000000000000..b592eef1efcb
--- /dev/null
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -0,0 +1,143 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/io.h>
23#include <asm/mach/irq.h>
24
25#include "intr.h"
26#include "dev.h"
27
28/*
29 * Sync point threshold interrupt service function
30 * Handles sync point threshold triggers, in interrupt context
31 */
32static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
33{
34 unsigned int id = syncpt->id;
35 struct host1x *host = syncpt->host;
36
37 host1x_sync_writel(host, BIT_MASK(id),
38 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
39 host1x_sync_writel(host, BIT_MASK(id),
40 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
41
42 queue_work(host->intr_wq, &syncpt->intr.work);
43}
44
45static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
46{
47 struct host1x *host = dev_id;
48 unsigned long reg;
49 int i, id;
50
51 for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
52 reg = host1x_sync_readl(host,
53 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
54 for_each_set_bit(id, &reg, BITS_PER_LONG) {
55 struct host1x_syncpt *syncpt =
56 host->syncpt + (i * BITS_PER_LONG + id);
57 host1x_intr_syncpt_handle(syncpt);
58 }
59 }
60
61 return IRQ_HANDLED;
62}
63
64static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
65{
66 u32 i;
67
68 for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
69 host1x_sync_writel(host, 0xffffffffu,
70 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
71 host1x_sync_writel(host, 0xffffffffu,
72 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
73 }
74}
75
76static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
77 void (*syncpt_thresh_work)(struct work_struct *))
78{
79 int i, err;
80
81 host1x_hw_intr_disable_all_syncpt_intrs(host);
82
83 for (i = 0; i < host->info->nb_pts; i++)
84 INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
85
86 err = devm_request_irq(host->dev, host->intr_syncpt_irq,
87 syncpt_thresh_isr, IRQF_SHARED,
88 "host1x_syncpt", host);
89 if (IS_ERR_VALUE(err)) {
90 WARN_ON(1);
91 return err;
92 }
93
94 /* disable the ip_busy_timeout. this prevents write drops */
95 host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
96
97 /*
98 * increase the auto-ack timout to the maximum value. 2d will hang
99 * otherwise on Tegra2.
100 */
101 host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
102
103 /* update host clocks per usec */
104 host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
105
106 return 0;
107}
108
109static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
110 u32 id, u32 thresh)
111{
112 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
113}
114
115static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
116{
117 host1x_sync_writel(host, BIT_MASK(id),
118 HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
119}
120
121static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
122{
123 host1x_sync_writel(host, BIT_MASK(id),
124 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
125 host1x_sync_writel(host, BIT_MASK(id),
126 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
127}
128
129static int _host1x_free_syncpt_irq(struct host1x *host)
130{
131 devm_free_irq(host->dev, host->intr_syncpt_irq, host);
132 flush_workqueue(host->intr_wq);
133 return 0;
134}
135
136static const struct host1x_intr_ops host1x_intr_ops = {
137 .init_host_sync = _host1x_intr_init_host_sync,
138 .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
139 .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
140 .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
141 .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
142 .free_syncpt_irq = _host1x_free_syncpt_irq,
143};
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
new file mode 100644
index 000000000000..61174990102a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -0,0 +1,114 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/io.h>
20
21#include "dev.h"
22#include "syncpt.h"
23
24/*
25 * Write the current syncpoint value back to hw.
26 */
27static void syncpt_restore(struct host1x_syncpt *sp)
28{
29 struct host1x *host = sp->host;
30 int min = host1x_syncpt_read_min(sp);
31 host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
32}
33
34/*
35 * Write the current waitbase value back to hw.
36 */
37static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
38{
39 struct host1x *host = sp->host;
40 host1x_sync_writel(host, sp->base_val,
41 HOST1X_SYNC_SYNCPT_BASE(sp->id));
42}
43
44/*
45 * Read waitbase value from hw.
46 */
47static void syncpt_read_wait_base(struct host1x_syncpt *sp)
48{
49 struct host1x *host = sp->host;
50 sp->base_val =
51 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
52}
53
54/*
55 * Updates the last value read from hardware.
56 */
57static u32 syncpt_load(struct host1x_syncpt *sp)
58{
59 struct host1x *host = sp->host;
60 u32 old, live;
61
62 /* Loop in case there's a race writing to min_val */
63 do {
64 old = host1x_syncpt_read_min(sp);
65 live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
66 } while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
67
68 if (!host1x_syncpt_check_max(sp, live))
69 dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
70 __func__, sp->id, host1x_syncpt_read_min(sp),
71 host1x_syncpt_read_max(sp));
72
73 return live;
74}
75
76/*
77 * Write a cpu syncpoint increment to the hardware, without touching
78 * the cache.
79 */
80static void syncpt_cpu_incr(struct host1x_syncpt *sp)
81{
82 struct host1x *host = sp->host;
83 u32 reg_offset = sp->id / 32;
84
85 if (!host1x_syncpt_client_managed(sp) &&
86 host1x_syncpt_idle(sp)) {
87 dev_err(host->dev, "Trying to increment syncpoint id %d beyond max\n",
88 sp->id);
89 host1x_debug_dump(sp->host);
90 return;
91 }
92 host1x_sync_writel(host, BIT_MASK(sp->id),
93 HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
94 wmb();
95}
96
97/* remove a wait pointed to by patch_addr */
98static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
99{
100 u32 override = host1x_class_host_wait_syncpt(
101 HOST1X_SYNCPT_RESERVED, 0);
102
103 *((u32 *)patch_addr) = override;
104 return 0;
105}
106
107static const struct host1x_syncpt_ops host1x_syncpt_ops = {
108 .restore = syncpt_restore,
109 .restore_wait_base = syncpt_restore_wait_base,
110 .load_wait_base = syncpt_read_wait_base,
111 .load = syncpt_load,
112 .cpu_incr = syncpt_cpu_incr,
113 .patch_wait = syncpt_patch_wait,
114};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
new file mode 100644
index 000000000000..2491bf82e30c
--- /dev/null
+++ b/drivers/gpu/host1x/intr.c
@@ -0,0 +1,354 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include <trace/events/host1x.h>
25#include "channel.h"
26#include "dev.h"
27#include "intr.h"
28
29/* Wait list management */
30
31enum waitlist_state {
32 WLS_PENDING,
33 WLS_REMOVED,
34 WLS_CANCELLED,
35 WLS_HANDLED
36};
37
38static void waiter_release(struct kref *kref)
39{
40 kfree(container_of(kref, struct host1x_waitlist, refcount));
41}
42
43/*
44 * add a waiter to a waiter queue, sorted by threshold
45 * returns true if it was added at the head of the queue
46 */
47static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 struct list_head *queue)
49{
50 struct host1x_waitlist *pos;
51 u32 thresh = waiter->thresh;
52
53 list_for_each_entry_reverse(pos, queue, list)
54 if ((s32)(pos->thresh - thresh) <= 0) {
55 list_add(&waiter->list, &pos->list);
56 return false;
57 }
58
59 list_add(&waiter->list, queue);
60 return true;
61}
62
63/*
64 * run through a waiter queue for a single sync point ID
65 * and gather all completed waiters into lists by actions
66 */
67static void remove_completed_waiters(struct list_head *head, u32 sync,
68 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69{
70 struct list_head *dest;
71 struct host1x_waitlist *waiter, *next, *prev;
72
73 list_for_each_entry_safe(waiter, next, head, list) {
74 if ((s32)(waiter->thresh - sync) > 0)
75 break;
76
77 dest = completed + waiter->action;
78
79 /* consolidate submit cleanups */
80 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 !list_empty(dest)) {
82 prev = list_entry(dest->prev,
83 struct host1x_waitlist, list);
84 if (prev->data == waiter->data) {
85 prev->count++;
86 dest = NULL;
87 }
88 }
89
90 /* PENDING->REMOVED or CANCELLED->HANDLED */
91 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 list_del(&waiter->list);
93 kref_put(&waiter->refcount, waiter_release);
94 } else
95 list_move_tail(&waiter->list, dest);
96 }
97}
98
99static void reset_threshold_interrupt(struct host1x *host,
100 struct list_head *head,
101 unsigned int id)
102{
103 u32 thresh =
104 list_first_entry(head, struct host1x_waitlist, list)->thresh;
105
106 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 host1x_hw_intr_enable_syncpt_intr(host, id);
108}
109
110static void action_submit_complete(struct host1x_waitlist *waiter)
111{
112 struct host1x_channel *channel = waiter->data;
113
114 host1x_cdma_update(&channel->cdma);
115
116 /* Add nr_completed to trace */
117 trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 waiter->count, waiter->thresh);
119
120}
121
122static void action_wakeup(struct host1x_waitlist *waiter)
123{
124 wait_queue_head_t *wq = waiter->data;
125 wake_up(wq);
126}
127
128static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
129{
130 wait_queue_head_t *wq = waiter->data;
131 wake_up_interruptible(wq);
132}
133
134typedef void (*action_handler)(struct host1x_waitlist *waiter);
135
136static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
137 action_submit_complete,
138 action_wakeup,
139 action_wakeup_interruptible,
140};
141
142static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
143{
144 struct list_head *head = completed;
145 int i;
146
147 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
148 action_handler handler = action_handlers[i];
149 struct host1x_waitlist *waiter, *next;
150
151 list_for_each_entry_safe(waiter, next, head, list) {
152 list_del(&waiter->list);
153 handler(waiter);
154 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
155 WLS_REMOVED);
156 kref_put(&waiter->refcount, waiter_release);
157 }
158 }
159}
160
161/*
162 * Remove & handle all waiters that have completed for the given syncpt
163 */
164static int process_wait_list(struct host1x *host,
165 struct host1x_syncpt *syncpt,
166 u32 threshold)
167{
168 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
169 unsigned int i;
170 int empty;
171
172 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
173 INIT_LIST_HEAD(completed + i);
174
175 spin_lock(&syncpt->intr.lock);
176
177 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
178 completed);
179
180 empty = list_empty(&syncpt->intr.wait_head);
181 if (empty)
182 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
183 else
184 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
185 syncpt->id);
186
187 spin_unlock(&syncpt->intr.lock);
188
189 run_handlers(completed);
190
191 return empty;
192}
193
194/*
195 * Sync point threshold interrupt service thread function
196 * Handles sync point threshold triggers, in thread context
197 */
198
199static void syncpt_thresh_work(struct work_struct *work)
200{
201 struct host1x_syncpt_intr *syncpt_intr =
202 container_of(work, struct host1x_syncpt_intr, work);
203 struct host1x_syncpt *syncpt =
204 container_of(syncpt_intr, struct host1x_syncpt, intr);
205 unsigned int id = syncpt->id;
206 struct host1x *host = syncpt->host;
207
208 (void)process_wait_list(host, syncpt,
209 host1x_syncpt_load(host->syncpt + id));
210}
211
212int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
213 enum host1x_intr_action action, void *data,
214 struct host1x_waitlist *waiter, void **ref)
215{
216 struct host1x_syncpt *syncpt;
217 int queue_was_empty;
218
219 if (waiter == NULL) {
220 pr_warn("%s: NULL waiter\n", __func__);
221 return -EINVAL;
222 }
223
224 /* initialize a new waiter */
225 INIT_LIST_HEAD(&waiter->list);
226 kref_init(&waiter->refcount);
227 if (ref)
228 kref_get(&waiter->refcount);
229 waiter->thresh = thresh;
230 waiter->action = action;
231 atomic_set(&waiter->state, WLS_PENDING);
232 waiter->data = data;
233 waiter->count = 1;
234
235 syncpt = host->syncpt + id;
236
237 spin_lock(&syncpt->intr.lock);
238
239 queue_was_empty = list_empty(&syncpt->intr.wait_head);
240
241 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
242 /* added at head of list - new threshold value */
243 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
244
245 /* added as first waiter - enable interrupt */
246 if (queue_was_empty)
247 host1x_hw_intr_enable_syncpt_intr(host, id);
248 }
249
250 spin_unlock(&syncpt->intr.lock);
251
252 if (ref)
253 *ref = waiter;
254 return 0;
255}
256
257void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
258{
259 struct host1x_waitlist *waiter = ref;
260 struct host1x_syncpt *syncpt;
261
262 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
263 WLS_REMOVED)
264 schedule();
265
266 syncpt = host->syncpt + id;
267 (void)process_wait_list(host, syncpt,
268 host1x_syncpt_load(host->syncpt + id));
269
270 kref_put(&waiter->refcount, waiter_release);
271}
272
273int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
274{
275 unsigned int id;
276 u32 nb_pts = host1x_syncpt_nb_pts(host);
277
278 mutex_init(&host->intr_mutex);
279 host->intr_syncpt_irq = irq_sync;
280 host->intr_wq = create_workqueue("host_syncpt");
281 if (!host->intr_wq)
282 return -ENOMEM;
283
284 for (id = 0; id < nb_pts; ++id) {
285 struct host1x_syncpt *syncpt = host->syncpt + id;
286
287 spin_lock_init(&syncpt->intr.lock);
288 INIT_LIST_HEAD(&syncpt->intr.wait_head);
289 snprintf(syncpt->intr.thresh_irq_name,
290 sizeof(syncpt->intr.thresh_irq_name),
291 "host1x_sp_%02d", id);
292 }
293
294 host1x_intr_start(host);
295
296 return 0;
297}
298
299void host1x_intr_deinit(struct host1x *host)
300{
301 host1x_intr_stop(host);
302 destroy_workqueue(host->intr_wq);
303}
304
305void host1x_intr_start(struct host1x *host)
306{
307 u32 hz = clk_get_rate(host->clk);
308 int err;
309
310 mutex_lock(&host->intr_mutex);
311 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
312 syncpt_thresh_work);
313 if (err) {
314 mutex_unlock(&host->intr_mutex);
315 return;
316 }
317 mutex_unlock(&host->intr_mutex);
318}
319
320void host1x_intr_stop(struct host1x *host)
321{
322 unsigned int id;
323 struct host1x_syncpt *syncpt = host->syncpt;
324 u32 nb_pts = host1x_syncpt_nb_pts(host);
325
326 mutex_lock(&host->intr_mutex);
327
328 host1x_hw_intr_disable_all_syncpt_intrs(host);
329
330 for (id = 0; id < nb_pts; ++id) {
331 struct host1x_waitlist *waiter, *next;
332
333 list_for_each_entry_safe(waiter, next,
334 &syncpt[id].intr.wait_head, list) {
335 if (atomic_cmpxchg(&waiter->state,
336 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
337 list_del(&waiter->list);
338 kref_put(&waiter->refcount, waiter_release);
339 }
340 }
341
342 if (!list_empty(&syncpt[id].intr.wait_head)) {
343 /* output diagnostics */
344 mutex_unlock(&host->intr_mutex);
345 pr_warn("%s cannot stop syncpt intr id=%d\n",
346 __func__, id);
347 return;
348 }
349 }
350
351 host1x_hw_intr_free_syncpt_irq(host);
352
353 mutex_unlock(&host->intr_mutex);
354}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
new file mode 100644
index 000000000000..2b8adf016a05
--- /dev/null
+++ b/drivers/gpu/host1x/intr.h
@@ -0,0 +1,102 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_INTR_H
20#define __HOST1X_INTR_H
21
22#include <linux/interrupt.h>
23#include <linux/workqueue.h>
24
25struct host1x;
26
27enum host1x_intr_action {
28 /*
29 * Perform cleanup after a submit has completed.
30 * 'data' points to a channel
31 */
32 HOST1X_INTR_ACTION_SUBMIT_COMPLETE = 0,
33
34 /*
35 * Wake up a task.
36 * 'data' points to a wait_queue_head_t
37 */
38 HOST1X_INTR_ACTION_WAKEUP,
39
40 /*
41 * Wake up a interruptible task.
42 * 'data' points to a wait_queue_head_t
43 */
44 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
45
46 HOST1X_INTR_ACTION_COUNT
47};
48
49struct host1x_syncpt_intr {
50 spinlock_t lock;
51 struct list_head wait_head;
52 char thresh_irq_name[12];
53 struct work_struct work;
54};
55
56struct host1x_waitlist {
57 struct list_head list;
58 struct kref refcount;
59 u32 thresh;
60 enum host1x_intr_action action;
61 atomic_t state;
62 void *data;
63 int count;
64};
65
66/*
67 * Schedule an action to be taken when a sync point reaches the given threshold.
68 *
69 * @id the sync point
70 * @thresh the threshold
71 * @action the action to take
72 * @data a pointer to extra data depending on action, see above
73 * @waiter waiter structure - assumes ownership
74 * @ref must be passed if cancellation is possible, else NULL
75 *
76 * This is a non-blocking api.
77 */
78int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
79 enum host1x_intr_action action, void *data,
80 struct host1x_waitlist *waiter, void **ref);
81
82/*
83 * Unreference an action submitted to host1x_intr_add_action().
84 * You must call this if you passed non-NULL as ref.
85 * @ref the ref returned from host1x_intr_add_action()
86 */
87void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
88
89/* Initialize host1x sync point interrupt */
90int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
91
92/* Deinitialize host1x sync point interrupt */
93void host1x_intr_deinit(struct host1x *host);
94
95/* Enable host1x sync point interrupt */
96void host1x_intr_start(struct host1x *host);
97
98/* Disable host1x sync point interrupt */
99void host1x_intr_stop(struct host1x *host);
100
101irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
102#endif
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
new file mode 100644
index 000000000000..f665d679031c
--- /dev/null
+++ b/drivers/gpu/host1x/job.c
@@ -0,0 +1,603 @@
1/*
2 * Tegra host1x Job
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/err.h>
21#include <linux/kref.h>
22#include <linux/module.h>
23#include <linux/scatterlist.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include <trace/events/host1x.h>
27
28#include "channel.h"
29#include "dev.h"
30#include "host1x_bo.h"
31#include "job.h"
32#include "syncpt.h"
33
34struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
35 u32 num_cmdbufs, u32 num_relocs,
36 u32 num_waitchks)
37{
38 struct host1x_job *job = NULL;
39 unsigned int num_unpins = num_cmdbufs + num_relocs;
40 u64 total;
41 void *mem;
42
43 /* Check that we're not going to overflow */
44 total = sizeof(struct host1x_job) +
45 num_relocs * sizeof(struct host1x_reloc) +
46 num_unpins * sizeof(struct host1x_job_unpin_data) +
47 num_waitchks * sizeof(struct host1x_waitchk) +
48 num_cmdbufs * sizeof(struct host1x_job_gather) +
49 num_unpins * sizeof(dma_addr_t) +
50 num_unpins * sizeof(u32 *);
51 if (total > ULONG_MAX)
52 return NULL;
53
54 mem = job = kzalloc(total, GFP_KERNEL);
55 if (!job)
56 return NULL;
57
58 kref_init(&job->ref);
59 job->channel = ch;
60
61 /* Redistribute memory to the structs */
62 mem += sizeof(struct host1x_job);
63 job->relocarray = num_relocs ? mem : NULL;
64 mem += num_relocs * sizeof(struct host1x_reloc);
65 job->unpins = num_unpins ? mem : NULL;
66 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
67 job->waitchk = num_waitchks ? mem : NULL;
68 mem += num_waitchks * sizeof(struct host1x_waitchk);
69 job->gathers = num_cmdbufs ? mem : NULL;
70 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
71 job->addr_phys = num_unpins ? mem : NULL;
72
73 job->reloc_addr_phys = job->addr_phys;
74 job->gather_addr_phys = &job->addr_phys[num_relocs];
75
76 return job;
77}
78
79struct host1x_job *host1x_job_get(struct host1x_job *job)
80{
81 kref_get(&job->ref);
82 return job;
83}
84
85static void job_free(struct kref *ref)
86{
87 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
88
89 kfree(job);
90}
91
92void host1x_job_put(struct host1x_job *job)
93{
94 kref_put(&job->ref, job_free);
95}
96
97void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
98 u32 words, u32 offset)
99{
100 struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
101
102 cur_gather->words = words;
103 cur_gather->bo = bo;
104 cur_gather->offset = offset;
105 job->num_gathers++;
106}
107
108/*
109 * NULL an already satisfied WAIT_SYNCPT host method, by patching its
110 * args in the command stream. The method data is changed to reference
111 * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
112 * with a matching threshold value of 0, so is guaranteed to be popped
113 * by the host HW.
114 */
115static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
116 struct host1x_bo *h, u32 offset)
117{
118 void *patch_addr = NULL;
119
120 /* patch the wait */
121 patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
122 if (patch_addr) {
123 host1x_syncpt_patch_wait(sp,
124 patch_addr + (offset & ~PAGE_MASK));
125 host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
126 } else
127 pr_err("Could not map cmdbuf for wait check\n");
128}
129
130/*
131 * Check driver supplied waitchk structs for syncpt thresholds
132 * that have already been satisfied and NULL the comparison (to
133 * avoid a wrap condition in the HW).
134 */
135static int do_waitchks(struct host1x_job *job, struct host1x *host,
136 struct host1x_bo *patch)
137{
138 int i;
139
140 /* compare syncpt vs wait threshold */
141 for (i = 0; i < job->num_waitchk; i++) {
142 struct host1x_waitchk *wait = &job->waitchk[i];
143 struct host1x_syncpt *sp =
144 host1x_syncpt_get(host, wait->syncpt_id);
145
146 /* validate syncpt id */
147 if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
148 continue;
149
150 /* skip all other gathers */
151 if (patch != wait->bo)
152 continue;
153
154 trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
155 wait->syncpt_id, wait->thresh,
156 host1x_syncpt_read_min(sp));
157
158 if (host1x_syncpt_is_expired(sp, wait->thresh)) {
159 dev_dbg(host->dev,
160 "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
161 wait->syncpt_id, sp->name, wait->thresh,
162 host1x_syncpt_read_min(sp));
163
164 host1x_syncpt_patch_offset(sp, patch, wait->offset);
165 }
166
167 wait->bo = NULL;
168 }
169
170 return 0;
171}
172
173static unsigned int pin_job(struct host1x_job *job)
174{
175 unsigned int i;
176
177 job->num_unpins = 0;
178
179 for (i = 0; i < job->num_relocs; i++) {
180 struct host1x_reloc *reloc = &job->relocarray[i];
181 struct sg_table *sgt;
182 dma_addr_t phys_addr;
183
184 reloc->target = host1x_bo_get(reloc->target);
185 if (!reloc->target)
186 goto unpin;
187
188 phys_addr = host1x_bo_pin(reloc->target, &sgt);
189 if (!phys_addr)
190 goto unpin;
191
192 job->addr_phys[job->num_unpins] = phys_addr;
193 job->unpins[job->num_unpins].bo = reloc->target;
194 job->unpins[job->num_unpins].sgt = sgt;
195 job->num_unpins++;
196 }
197
198 for (i = 0; i < job->num_gathers; i++) {
199 struct host1x_job_gather *g = &job->gathers[i];
200 struct sg_table *sgt;
201 dma_addr_t phys_addr;
202
203 g->bo = host1x_bo_get(g->bo);
204 if (!g->bo)
205 goto unpin;
206
207 phys_addr = host1x_bo_pin(g->bo, &sgt);
208 if (!phys_addr)
209 goto unpin;
210
211 job->addr_phys[job->num_unpins] = phys_addr;
212 job->unpins[job->num_unpins].bo = g->bo;
213 job->unpins[job->num_unpins].sgt = sgt;
214 job->num_unpins++;
215 }
216
217 return job->num_unpins;
218
219unpin:
220 host1x_job_unpin(job);
221 return 0;
222}
223
224static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
225{
226 int i = 0;
227 u32 last_page = ~0;
228 void *cmdbuf_page_addr = NULL;
229
230 /* pin & patch the relocs for one gather */
231 while (i < job->num_relocs) {
232 struct host1x_reloc *reloc = &job->relocarray[i];
233 u32 reloc_addr = (job->reloc_addr_phys[i] +
234 reloc->target_offset) >> reloc->shift;
235 u32 *target;
236
237 /* skip all other gathers */
238 if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) {
239 i++;
240 continue;
241 }
242
243 if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
244 if (cmdbuf_page_addr)
245 host1x_bo_kunmap(cmdbuf, last_page,
246 cmdbuf_page_addr);
247
248 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
249 reloc->cmdbuf_offset >> PAGE_SHIFT);
250 last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
251
252 if (unlikely(!cmdbuf_page_addr)) {
253 pr_err("Could not map cmdbuf for relocation\n");
254 return -ENOMEM;
255 }
256 }
257
258 target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
259 *target = reloc_addr;
260
261 /* mark this gather as handled */
262 reloc->cmdbuf = 0;
263 }
264
265 if (cmdbuf_page_addr)
266 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
267
268 return 0;
269}
270
271static int check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
272 unsigned int offset)
273{
274 offset *= sizeof(u32);
275
276 if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
277 return -EINVAL;
278
279 return 0;
280}
281
282struct host1x_firewall {
283 struct host1x_job *job;
284 struct device *dev;
285
286 unsigned int num_relocs;
287 struct host1x_reloc *reloc;
288
289 struct host1x_bo *cmdbuf_id;
290 unsigned int offset;
291
292 u32 words;
293 u32 class;
294 u32 reg;
295 u32 mask;
296 u32 count;
297};
298
299static int check_mask(struct host1x_firewall *fw)
300{
301 u32 mask = fw->mask;
302 u32 reg = fw->reg;
303
304 while (mask) {
305 if (fw->words == 0)
306 return -EINVAL;
307
308 if (mask & 1) {
309 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
310 bool bad_reloc = check_reloc(fw->reloc,
311 fw->cmdbuf_id,
312 fw->offset);
313 if (!fw->num_relocs || bad_reloc)
314 return -EINVAL;
315 fw->reloc++;
316 fw->num_relocs--;
317 }
318 fw->words--;
319 fw->offset++;
320 }
321 mask >>= 1;
322 reg++;
323 }
324
325 return 0;
326}
327
328static int check_incr(struct host1x_firewall *fw)
329{
330 u32 count = fw->count;
331 u32 reg = fw->reg;
332
333 while (fw) {
334 if (fw->words == 0)
335 return -EINVAL;
336
337 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
338 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
339 fw->offset);
340 if (!fw->num_relocs || bad_reloc)
341 return -EINVAL;
342 fw->reloc++;
343 fw->num_relocs--;
344 }
345 reg++;
346 fw->words--;
347 fw->offset++;
348 count--;
349 }
350
351 return 0;
352}
353
354static int check_nonincr(struct host1x_firewall *fw)
355{
356 int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
357 u32 count = fw->count;
358
359 while (count) {
360 if (fw->words == 0)
361 return -EINVAL;
362
363 if (is_addr_reg) {
364 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
365 fw->offset);
366 if (!fw->num_relocs || bad_reloc)
367 return -EINVAL;
368 fw->reloc++;
369 fw->num_relocs--;
370 }
371 fw->words--;
372 fw->offset++;
373 count--;
374 }
375
376 return 0;
377}
378
379static int validate(struct host1x_job *job, struct device *dev,
380 struct host1x_job_gather *g)
381{
382 u32 *cmdbuf_base;
383 int err = 0;
384 struct host1x_firewall fw;
385
386 fw.job = job;
387 fw.dev = dev;
388 fw.reloc = job->relocarray;
389 fw.num_relocs = job->num_relocs;
390 fw.cmdbuf_id = g->bo;
391
392 fw.offset = 0;
393 fw.class = 0;
394
395 if (!job->is_addr_reg)
396 return 0;
397
398 cmdbuf_base = host1x_bo_mmap(g->bo);
399 if (!cmdbuf_base)
400 return -ENOMEM;
401
402 fw.words = g->words;
403 while (fw.words && !err) {
404 u32 word = cmdbuf_base[fw.offset];
405 u32 opcode = (word & 0xf0000000) >> 28;
406
407 fw.mask = 0;
408 fw.reg = 0;
409 fw.count = 0;
410 fw.words--;
411 fw.offset++;
412
413 switch (opcode) {
414 case 0:
415 fw.class = word >> 6 & 0x3ff;
416 fw.mask = word & 0x3f;
417 fw.reg = word >> 16 & 0xfff;
418 err = check_mask(&fw);
419 if (err)
420 goto out;
421 break;
422 case 1:
423 fw.reg = word >> 16 & 0xfff;
424 fw.count = word & 0xffff;
425 err = check_incr(&fw);
426 if (err)
427 goto out;
428 break;
429
430 case 2:
431 fw.reg = word >> 16 & 0xfff;
432 fw.count = word & 0xffff;
433 err = check_nonincr(&fw);
434 if (err)
435 goto out;
436 break;
437
438 case 3:
439 fw.mask = word & 0xffff;
440 fw.reg = word >> 16 & 0xfff;
441 err = check_mask(&fw);
442 if (err)
443 goto out;
444 break;
445 case 4:
446 case 5:
447 case 14:
448 break;
449 default:
450 err = -EINVAL;
451 break;
452 }
453 }
454
455 /* No relocs should remain at this point */
456 if (fw.num_relocs)
457 err = -EINVAL;
458
459out:
460 host1x_bo_munmap(g->bo, cmdbuf_base);
461
462 return err;
463}
464
465static inline int copy_gathers(struct host1x_job *job, struct device *dev)
466{
467 size_t size = 0;
468 size_t offset = 0;
469 int i;
470
471 for (i = 0; i < job->num_gathers; i++) {
472 struct host1x_job_gather *g = &job->gathers[i];
473 size += g->words * sizeof(u32);
474 }
475
476 job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
477 &job->gather_copy,
478 GFP_KERNEL);
479 if (!job->gather_copy_mapped) {
480 int err = PTR_ERR(job->gather_copy_mapped);
481 job->gather_copy_mapped = NULL;
482 return err;
483 }
484
485 job->gather_copy_size = size;
486
487 for (i = 0; i < job->num_gathers; i++) {
488 struct host1x_job_gather *g = &job->gathers[i];
489 void *gather;
490
491 gather = host1x_bo_mmap(g->bo);
492 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
493 g->words * sizeof(u32));
494 host1x_bo_munmap(g->bo, gather);
495
496 g->base = job->gather_copy;
497 g->offset = offset;
498 g->bo = NULL;
499
500 offset += g->words * sizeof(u32);
501 }
502
503 return 0;
504}
505
506int host1x_job_pin(struct host1x_job *job, struct device *dev)
507{
508 int err;
509 unsigned int i, j;
510 struct host1x *host = dev_get_drvdata(dev->parent);
511 DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
512
513 bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
514 for (i = 0; i < job->num_waitchk; i++) {
515 u32 syncpt_id = job->waitchk[i].syncpt_id;
516 if (syncpt_id < host1x_syncpt_nb_pts(host))
517 set_bit(syncpt_id, waitchk_mask);
518 }
519
520 /* get current syncpt values for waitchk */
521 for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
522 host1x_syncpt_load(host->syncpt + i);
523
524 /* pin memory */
525 err = pin_job(job);
526 if (!err)
527 goto out;
528
529 /* patch gathers */
530 for (i = 0; i < job->num_gathers; i++) {
531 struct host1x_job_gather *g = &job->gathers[i];
532
533 /* process each gather mem only once */
534 if (g->handled)
535 continue;
536
537 g->base = job->gather_addr_phys[i];
538
539 for (j = 0; j < job->num_gathers; j++)
540 if (job->gathers[j].bo == g->bo)
541 job->gathers[j].handled = true;
542
543 err = 0;
544
545 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
546 err = validate(job, dev, g);
547
548 if (err)
549 dev_err(dev, "Job invalid (err=%d)\n", err);
550
551 if (!err)
552 err = do_relocs(job, g->bo);
553
554 if (!err)
555 err = do_waitchks(job, host, g->bo);
556
557 if (err)
558 break;
559 }
560
561 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) {
562 err = copy_gathers(job, dev);
563 if (err) {
564 host1x_job_unpin(job);
565 return err;
566 }
567 }
568
569out:
570 wmb();
571
572 return err;
573}
574
575void host1x_job_unpin(struct host1x_job *job)
576{
577 unsigned int i;
578
579 for (i = 0; i < job->num_unpins; i++) {
580 struct host1x_job_unpin_data *unpin = &job->unpins[i];
581 host1x_bo_unpin(unpin->bo, unpin->sgt);
582 host1x_bo_put(unpin->bo);
583 }
584 job->num_unpins = 0;
585
586 if (job->gather_copy_size)
587 dma_free_writecombine(job->channel->dev, job->gather_copy_size,
588 job->gather_copy_mapped,
589 job->gather_copy);
590}
591
592/*
593 * Debug routine used to dump job entries
594 */
595void host1x_job_dump(struct device *dev, struct host1x_job *job)
596{
597 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
598 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
599 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
600 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
601 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
602 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
603}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
new file mode 100644
index 000000000000..fba45f20458e
--- /dev/null
+++ b/drivers/gpu/host1x/job.h
@@ -0,0 +1,162 @@
1/*
2 * Tegra host1x Job
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_JOB_H
20#define __HOST1X_JOB_H
21
22struct host1x_job_gather {
23 u32 words;
24 dma_addr_t base;
25 struct host1x_bo *bo;
26 int offset;
27 bool handled;
28};
29
30struct host1x_cmdbuf {
31 u32 handle;
32 u32 offset;
33 u32 words;
34 u32 pad;
35};
36
37struct host1x_reloc {
38 struct host1x_bo *cmdbuf;
39 u32 cmdbuf_offset;
40 struct host1x_bo *target;
41 u32 target_offset;
42 u32 shift;
43 u32 pad;
44};
45
46struct host1x_waitchk {
47 struct host1x_bo *bo;
48 u32 offset;
49 u32 syncpt_id;
50 u32 thresh;
51};
52
53struct host1x_job_unpin_data {
54 struct host1x_bo *bo;
55 struct sg_table *sgt;
56};
57
58/*
59 * Each submit is tracked as a host1x_job.
60 */
61struct host1x_job {
62 /* When refcount goes to zero, job can be freed */
63 struct kref ref;
64
65 /* List entry */
66 struct list_head list;
67
68 /* Channel where job is submitted to */
69 struct host1x_channel *channel;
70
71 u32 client;
72
73 /* Gathers and their memory */
74 struct host1x_job_gather *gathers;
75 unsigned int num_gathers;
76
77 /* Wait checks to be processed at submit time */
78 struct host1x_waitchk *waitchk;
79 unsigned int num_waitchk;
80 u32 waitchk_mask;
81
82 /* Array of handles to be pinned & unpinned */
83 struct host1x_reloc *relocarray;
84 unsigned int num_relocs;
85 struct host1x_job_unpin_data *unpins;
86 unsigned int num_unpins;
87
88 dma_addr_t *addr_phys;
89 dma_addr_t *gather_addr_phys;
90 dma_addr_t *reloc_addr_phys;
91
92 /* Sync point id, number of increments and end related to the submit */
93 u32 syncpt_id;
94 u32 syncpt_incrs;
95 u32 syncpt_end;
96
97 /* Maximum time to wait for this job */
98 unsigned int timeout;
99
100 /* Index and number of slots used in the push buffer */
101 unsigned int first_get;
102 unsigned int num_slots;
103
104 /* Copy of gathers */
105 size_t gather_copy_size;
106 dma_addr_t gather_copy;
107 u8 *gather_copy_mapped;
108
109 /* Check if register is marked as an address reg */
110 int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
111
112 /* Request a SETCLASS to this class */
113 u32 class;
114
115 /* Add a channel wait for previous ops to complete */
116 bool serialize;
117};
118/*
119 * Allocate memory for a job. Just enough memory will be allocated to
120 * accomodate the submit.
121 */
122struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
123 u32 num_cmdbufs, u32 num_relocs,
124 u32 num_waitchks);
125
126/*
127 * Add a gather to a job.
128 */
129void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
130 u32 words, u32 offset);
131
132/*
133 * Increment reference going to host1x_job.
134 */
135struct host1x_job *host1x_job_get(struct host1x_job *job);
136
137/*
138 * Decrement reference job, free if goes to zero.
139 */
140void host1x_job_put(struct host1x_job *job);
141
142/*
143 * Pin memory related to job. This handles relocation of addresses to the
144 * host1x address space. Handles both the gather memory and any other memory
145 * referred to from the gather buffers.
146 *
147 * Handles also patching out host waits that would wait for an expired sync
148 * point value.
149 */
150int host1x_job_pin(struct host1x_job *job, struct device *dev);
151
152/*
153 * Unpin memory related to job.
154 */
155void host1x_job_unpin(struct host1x_job *job);
156
157/*
158 * Dump contents of job to debug output.
159 */
160void host1x_job_dump(struct device *dev, struct host1x_job *job);
161
162#endif
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
new file mode 100644
index 000000000000..4b493453e805
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.c
@@ -0,0 +1,387 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/slab.h>
22
23#include <trace/events/host1x.h>
24
25#include "syncpt.h"
26#include "dev.h"
27#include "intr.h"
28#include "debug.h"
29
30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15
32
33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
34 struct device *dev,
35 int client_managed)
36{
37 int i;
38 struct host1x_syncpt *sp = host->syncpt;
39 char *name;
40
41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
42 ;
43 if (sp->dev)
44 return NULL;
45
46 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
47 dev ? dev_name(dev) : NULL);
48 if (!name)
49 return NULL;
50
51 sp->dev = dev;
52 sp->name = name;
53 sp->client_managed = client_managed;
54
55 return sp;
56}
57
58u32 host1x_syncpt_id(struct host1x_syncpt *sp)
59{
60 return sp->id;
61}
62
63/*
64 * Updates the value sent to hardware.
65 */
66u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
67{
68 return (u32)atomic_add_return(incrs, &sp->max_val);
69}
70
71 /*
72 * Write cached syncpoint and waitbase values to hardware.
73 */
74void host1x_syncpt_restore(struct host1x *host)
75{
76 struct host1x_syncpt *sp_base = host->syncpt;
77 u32 i;
78
79 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
80 host1x_hw_syncpt_restore(host, sp_base + i);
81 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
82 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
83 wmb();
84}
85
86/*
87 * Update the cached syncpoint and waitbase values by reading them
88 * from the registers.
89 */
90void host1x_syncpt_save(struct host1x *host)
91{
92 struct host1x_syncpt *sp_base = host->syncpt;
93 u32 i;
94
95 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
96 if (host1x_syncpt_client_managed(sp_base + i))
97 host1x_hw_syncpt_load(host, sp_base + i);
98 else
99 WARN_ON(!host1x_syncpt_idle(sp_base + i));
100 }
101
102 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
103 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
104}
105
106/*
107 * Updates the cached syncpoint value by reading a new value from the hardware
108 * register
109 */
110u32 host1x_syncpt_load(struct host1x_syncpt *sp)
111{
112 u32 val;
113 val = host1x_hw_syncpt_load(sp->host, sp);
114 trace_host1x_syncpt_load_min(sp->id, val);
115
116 return val;
117}
118
119/*
120 * Get the current syncpoint base
121 */
122u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
123{
124 u32 val;
125 host1x_hw_syncpt_load_wait_base(sp->host, sp);
126 val = sp->base_val;
127 return val;
128}
129
130/*
131 * Write a cpu syncpoint increment to the hardware, without touching
132 * the cache. Caller is responsible for host being powered.
133 */
134void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
135{
136 host1x_hw_syncpt_cpu_incr(sp->host, sp);
137}
138
139/*
140 * Increment syncpoint value from cpu, updating cache
141 */
142void host1x_syncpt_incr(struct host1x_syncpt *sp)
143{
144 if (host1x_syncpt_client_managed(sp))
145 host1x_syncpt_incr_max(sp, 1);
146 host1x_syncpt_cpu_incr(sp);
147}
148
149/*
150 * Updated sync point form hardware, and returns true if syncpoint is expired,
151 * false if we may need to wait
152 */
153static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
154{
155 host1x_hw_syncpt_load(sp->host, sp);
156 return host1x_syncpt_is_expired(sp, thresh);
157}
158
159/*
160 * Main entrypoint for syncpoint value waits.
161 */
162int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
163 u32 *value)
164{
165 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
166 void *ref;
167 struct host1x_waitlist *waiter;
168 int err = 0, check_count = 0;
169 u32 val;
170
171 if (value)
172 *value = 0;
173
174 /* first check cache */
175 if (host1x_syncpt_is_expired(sp, thresh)) {
176 if (value)
177 *value = host1x_syncpt_load(sp);
178 return 0;
179 }
180
181 /* try to read from register */
182 val = host1x_hw_syncpt_load(sp->host, sp);
183 if (host1x_syncpt_is_expired(sp, thresh)) {
184 if (value)
185 *value = val;
186 goto done;
187 }
188
189 if (!timeout) {
190 err = -EAGAIN;
191 goto done;
192 }
193
194 /* allocate a waiter */
195 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
196 if (!waiter) {
197 err = -ENOMEM;
198 goto done;
199 }
200
201 /* schedule a wakeup when the syncpoint value is reached */
202 err = host1x_intr_add_action(sp->host, sp->id, thresh,
203 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
204 &wq, waiter, &ref);
205 if (err)
206 goto done;
207
208 err = -EAGAIN;
209 /* Caller-specified timeout may be impractically low */
210 if (timeout < 0)
211 timeout = LONG_MAX;
212
213 /* wait for the syncpoint, or timeout, or signal */
214 while (timeout) {
215 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
216 int remain = wait_event_interruptible_timeout(wq,
217 syncpt_load_min_is_expired(sp, thresh),
218 check);
219 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
220 if (value)
221 *value = host1x_syncpt_load(sp);
222 err = 0;
223 break;
224 }
225 if (remain < 0) {
226 err = remain;
227 break;
228 }
229 timeout -= check;
230 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
231 dev_warn(sp->host->dev,
232 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
233 current->comm, sp->id, sp->name,
234 thresh, timeout);
235
236 host1x_debug_dump_syncpts(sp->host);
237 if (check_count == MAX_STUCK_CHECK_COUNT)
238 host1x_debug_dump(sp->host);
239 check_count++;
240 }
241 }
242 host1x_intr_put_ref(sp->host, sp->id, ref);
243
244done:
245 return err;
246}
247EXPORT_SYMBOL(host1x_syncpt_wait);
248
249/*
250 * Returns true if syncpoint is expired, false if we may need to wait
251 */
252bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
253{
254 u32 current_val;
255 u32 future_val;
256 smp_rmb();
257 current_val = (u32)atomic_read(&sp->min_val);
258 future_val = (u32)atomic_read(&sp->max_val);
259
260 /* Note the use of unsigned arithmetic here (mod 1<<32).
261 *
262 * c = current_val = min_val = the current value of the syncpoint.
263 * t = thresh = the value we are checking
264 * f = future_val = max_val = the value c will reach when all
265 * outstanding increments have completed.
266 *
267 * Note that c always chases f until it reaches f.
268 *
269 * Dtf = (f - t)
270 * Dtc = (c - t)
271 *
272 * Consider all cases:
273 *
274 * A) .....c..t..f..... Dtf < Dtc need to wait
275 * B) .....c.....f..t.. Dtf > Dtc expired
276 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
277 *
278 * Any case where f==c: always expired (for any t). Dtf == Dcf
279 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
280 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
281 * Dtc!=0)
282 *
283 * Other cases:
284 *
285 * A) .....t..f..c..... Dtf < Dtc need to wait
286 * A) .....f..c..t..... Dtf < Dtc need to wait
287 * A) .....f..t..c..... Dtf > Dtc expired
288 *
289 * So:
290 * Dtf >= Dtc implies EXPIRED (return true)
291 * Dtf < Dtc implies WAIT (return false)
292 *
293 * Note: If t is expired then we *cannot* wait on it. We would wait
294 * forever (hang the system).
295 *
296 * Note: do NOT get clever and remove the -thresh from both sides. It
297 * is NOT the same.
298 *
299 * If future valueis zero, we have a client managed sync point. In that
300 * case we do a direct comparison.
301 */
302 if (!host1x_syncpt_client_managed(sp))
303 return future_val - thresh >= current_val - thresh;
304 else
305 return (s32)(current_val - thresh) >= 0;
306}
307
308/* remove a wait pointed to by patch_addr */
309int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
310{
311 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
312}
313
314int host1x_syncpt_init(struct host1x *host)
315{
316 struct host1x_syncpt *syncpt;
317 int i;
318
319 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
320 GFP_KERNEL);
321 if (!syncpt)
322 return -ENOMEM;
323
324 for (i = 0; i < host->info->nb_pts; ++i) {
325 syncpt[i].id = i;
326 syncpt[i].host = host;
327 }
328
329 host->syncpt = syncpt;
330
331 host1x_syncpt_restore(host);
332
333 /* Allocate sync point to use for clearing waits for expired fences */
334 host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0);
335 if (!host->nop_sp)
336 return -ENOMEM;
337
338 return 0;
339}
340
341struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
342 int client_managed)
343{
344 struct host1x *host = dev_get_drvdata(dev->parent);
345 return _host1x_syncpt_alloc(host, dev, client_managed);
346}
347
348void host1x_syncpt_free(struct host1x_syncpt *sp)
349{
350 if (!sp)
351 return;
352
353 kfree(sp->name);
354 sp->dev = NULL;
355 sp->name = NULL;
356 sp->client_managed = 0;
357}
358
359void host1x_syncpt_deinit(struct host1x *host)
360{
361 int i;
362 struct host1x_syncpt *sp = host->syncpt;
363 for (i = 0; i < host->info->nb_pts; i++, sp++)
364 kfree(sp->name);
365}
366
367int host1x_syncpt_nb_pts(struct host1x *host)
368{
369 return host->info->nb_pts;
370}
371
372int host1x_syncpt_nb_bases(struct host1x *host)
373{
374 return host->info->nb_bases;
375}
376
377int host1x_syncpt_nb_mlocks(struct host1x *host)
378{
379 return host->info->nb_mlocks;
380}
381
382struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
383{
384 if (host->info->nb_pts < id)
385 return NULL;
386 return host->syncpt + id;
387}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
new file mode 100644
index 000000000000..c99806130f2e
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.h
@@ -0,0 +1,165 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_SYNCPT_H
20#define __HOST1X_SYNCPT_H
21
22#include <linux/atomic.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25
26#include "intr.h"
27
28struct host1x;
29
30/* Reserved for replacing an expired wait with a NOP */
31#define HOST1X_SYNCPT_RESERVED 0
32
33struct host1x_syncpt {
34 int id;
35 atomic_t min_val;
36 atomic_t max_val;
37 u32 base_val;
38 const char *name;
39 int client_managed;
40 struct host1x *host;
41 struct device *dev;
42
43 /* interrupt data */
44 struct host1x_syncpt_intr intr;
45};
46
47/* Initialize sync point array */
48int host1x_syncpt_init(struct host1x *host);
49
50/* Free sync point array */
51void host1x_syncpt_deinit(struct host1x *host);
52
53/*
54 * Read max. It indicates how many operations there are in queue, either in
55 * channel or in a software thread.
56 * */
57static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
58{
59 smp_rmb();
60 return (u32)atomic_read(&sp->max_val);
61}
62
63/*
64 * Read min, which is a shadow of the current sync point value in hardware.
65 */
66static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
67{
68 smp_rmb();
69 return (u32)atomic_read(&sp->min_val);
70}
71
72/* Return number of sync point supported. */
73int host1x_syncpt_nb_pts(struct host1x *host);
74
75/* Return number of wait bases supported. */
76int host1x_syncpt_nb_bases(struct host1x *host);
77
78/* Return number of mlocks supported. */
79int host1x_syncpt_nb_mlocks(struct host1x *host);
80
81/*
82 * Check sync point sanity. If max is larger than min, there have too many
83 * sync point increments.
84 *
85 * Client managed sync point are not tracked.
86 * */
87static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
88{
89 u32 max;
90 if (sp->client_managed)
91 return true;
92 max = host1x_syncpt_read_max(sp);
93 return (s32)(max - real) >= 0;
94}
95
96/* Return true if sync point is client managed. */
97static inline int host1x_syncpt_client_managed(struct host1x_syncpt *sp)
98{
99 return sp->client_managed;
100}
101
102/*
103 * Returns true if syncpoint min == max, which means that there are no
104 * outstanding operations.
105 */
106static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
107{
108 int min, max;
109 smp_rmb();
110 min = atomic_read(&sp->min_val);
111 max = atomic_read(&sp->max_val);
112 return (min == max);
113}
114
115/* Return pointer to struct denoting sync point id. */
116struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
117
118/* Request incrementing a sync point. */
119void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
120
121/* Load current value from hardware to the shadow register. */
122u32 host1x_syncpt_load(struct host1x_syncpt *sp);
123
124/* Check if the given syncpoint value has already passed */
125bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
126
127/* Save host1x sync point state into shadow registers. */
128void host1x_syncpt_save(struct host1x *host);
129
130/* Reset host1x sync point state from shadow registers. */
131void host1x_syncpt_restore(struct host1x *host);
132
133/* Read current wait base value into shadow register and return it. */
134u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
135
136/* Increment sync point and its max. */
137void host1x_syncpt_incr(struct host1x_syncpt *sp);
138
139/* Indicate future operations by incrementing the sync point max. */
140u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
141
142/* Wait until sync point reaches a threshold value, or a timeout. */
143int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
144 long timeout, u32 *value);
145
146/* Check if sync point id is valid. */
147static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
148{
149 return sp->id < host1x_syncpt_nb_pts(sp->host);
150}
151
152/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
153int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
154
155/* Return id of the sync point */
156u32 host1x_syncpt_id(struct host1x_syncpt *sp);
157
158/* Allocate a sync point for a device. */
159struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
160 int client_managed);
161
162/* Free a sync point. */
163void host1x_syncpt_free(struct host1x_syncpt *sp);
164
165#endif
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4c1546f71d56..3e0e3f088686 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -21,6 +21,8 @@ source "drivers/gpu/vga/Kconfig"
21 21
22source "drivers/gpu/drm/Kconfig" 22source "drivers/gpu/drm/Kconfig"
23 23
24source "drivers/gpu/host1x/Kconfig"
25
24config VGASTATE 26config VGASTATE
25 tristate 27 tristate
26 default n 28 default n
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
new file mode 100644
index 000000000000..94db6a2c3540
--- /dev/null
+++ b/include/trace/events/host1x.h
@@ -0,0 +1,253 @@
1/*
2 * include/trace/events/host1x.h
3 *
4 * host1x event logging to ftrace.
5 *
6 * Copyright (c) 2010-2013, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#undef TRACE_SYSTEM
24#define TRACE_SYSTEM host1x
25
26#if !defined(_TRACE_HOST1X_H) || defined(TRACE_HEADER_MULTI_READ)
27#define _TRACE_HOST1X_H
28
29#include <linux/ktime.h>
30#include <linux/tracepoint.h>
31
32DECLARE_EVENT_CLASS(host1x,
33 TP_PROTO(const char *name),
34 TP_ARGS(name),
35 TP_STRUCT__entry(__field(const char *, name)),
36 TP_fast_assign(__entry->name = name;),
37 TP_printk("name=%s", __entry->name)
38);
39
40DEFINE_EVENT(host1x, host1x_channel_open,
41 TP_PROTO(const char *name),
42 TP_ARGS(name)
43);
44
45DEFINE_EVENT(host1x, host1x_channel_release,
46 TP_PROTO(const char *name),
47 TP_ARGS(name)
48);
49
50DEFINE_EVENT(host1x, host1x_cdma_begin,
51 TP_PROTO(const char *name),
52 TP_ARGS(name)
53);
54
55DEFINE_EVENT(host1x, host1x_cdma_end,
56 TP_PROTO(const char *name),
57 TP_ARGS(name)
58);
59
60TRACE_EVENT(host1x_cdma_push,
61 TP_PROTO(const char *name, u32 op1, u32 op2),
62
63 TP_ARGS(name, op1, op2),
64
65 TP_STRUCT__entry(
66 __field(const char *, name)
67 __field(u32, op1)
68 __field(u32, op2)
69 ),
70
71 TP_fast_assign(
72 __entry->name = name;
73 __entry->op1 = op1;
74 __entry->op2 = op2;
75 ),
76
77 TP_printk("name=%s, op1=%08x, op2=%08x",
78 __entry->name, __entry->op1, __entry->op2)
79);
80
81TRACE_EVENT(host1x_cdma_push_gather,
82 TP_PROTO(const char *name, u32 mem_id,
83 u32 words, u32 offset, void *cmdbuf),
84
85 TP_ARGS(name, mem_id, words, offset, cmdbuf),
86
87 TP_STRUCT__entry(
88 __field(const char *, name)
89 __field(u32, mem_id)
90 __field(u32, words)
91 __field(u32, offset)
92 __field(bool, cmdbuf)
93 __dynamic_array(u32, cmdbuf, words)
94 ),
95
96 TP_fast_assign(
97 if (cmdbuf) {
98 memcpy(__get_dynamic_array(cmdbuf), cmdbuf+offset,
99 words * sizeof(u32));
100 }
101 __entry->cmdbuf = cmdbuf;
102 __entry->name = name;
103 __entry->mem_id = mem_id;
104 __entry->words = words;
105 __entry->offset = offset;
106 ),
107
108 TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
109 __entry->name, __entry->mem_id,
110 __entry->words, __entry->offset,
111 __print_hex(__get_dynamic_array(cmdbuf),
112 __entry->cmdbuf ? __entry->words * 4 : 0))
113);
114
115TRACE_EVENT(host1x_channel_submit,
116 TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks,
117 u32 syncpt_id, u32 syncpt_incrs),
118
119 TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs),
120
121 TP_STRUCT__entry(
122 __field(const char *, name)
123 __field(u32, cmdbufs)
124 __field(u32, relocs)
125 __field(u32, waitchks)
126 __field(u32, syncpt_id)
127 __field(u32, syncpt_incrs)
128 ),
129
130 TP_fast_assign(
131 __entry->name = name;
132 __entry->cmdbufs = cmdbufs;
133 __entry->relocs = relocs;
134 __entry->waitchks = waitchks;
135 __entry->syncpt_id = syncpt_id;
136 __entry->syncpt_incrs = syncpt_incrs;
137 ),
138
139 TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d,"
140 "syncpt_id=%u, syncpt_incrs=%u",
141 __entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks,
142 __entry->syncpt_id, __entry->syncpt_incrs)
143);
144
145TRACE_EVENT(host1x_channel_submitted,
146 TP_PROTO(const char *name, u32 syncpt_base, u32 syncpt_max),
147
148 TP_ARGS(name, syncpt_base, syncpt_max),
149
150 TP_STRUCT__entry(
151 __field(const char *, name)
152 __field(u32, syncpt_base)
153 __field(u32, syncpt_max)
154 ),
155
156 TP_fast_assign(
157 __entry->name = name;
158 __entry->syncpt_base = syncpt_base;
159 __entry->syncpt_max = syncpt_max;
160 ),
161
162 TP_printk("name=%s, syncpt_base=%d, syncpt_max=%d",
163 __entry->name, __entry->syncpt_base, __entry->syncpt_max)
164);
165
166TRACE_EVENT(host1x_channel_submit_complete,
167 TP_PROTO(const char *name, int count, u32 thresh),
168
169 TP_ARGS(name, count, thresh),
170
171 TP_STRUCT__entry(
172 __field(const char *, name)
173 __field(int, count)
174 __field(u32, thresh)
175 ),
176
177 TP_fast_assign(
178 __entry->name = name;
179 __entry->count = count;
180 __entry->thresh = thresh;
181 ),
182
183 TP_printk("name=%s, count=%d, thresh=%d",
184 __entry->name, __entry->count, __entry->thresh)
185);
186
187TRACE_EVENT(host1x_wait_cdma,
188 TP_PROTO(const char *name, u32 eventid),
189
190 TP_ARGS(name, eventid),
191
192 TP_STRUCT__entry(
193 __field(const char *, name)
194 __field(u32, eventid)
195 ),
196
197 TP_fast_assign(
198 __entry->name = name;
199 __entry->eventid = eventid;
200 ),
201
202 TP_printk("name=%s, event=%d", __entry->name, __entry->eventid)
203);
204
205TRACE_EVENT(host1x_syncpt_load_min,
206 TP_PROTO(u32 id, u32 val),
207
208 TP_ARGS(id, val),
209
210 TP_STRUCT__entry(
211 __field(u32, id)
212 __field(u32, val)
213 ),
214
215 TP_fast_assign(
216 __entry->id = id;
217 __entry->val = val;
218 ),
219
220 TP_printk("id=%d, val=%d", __entry->id, __entry->val)
221);
222
223TRACE_EVENT(host1x_syncpt_wait_check,
224 TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
225
226 TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
227
228 TP_STRUCT__entry(
229 __field(void *, mem_id)
230 __field(u32, offset)
231 __field(u32, syncpt_id)
232 __field(u32, thresh)
233 __field(u32, min)
234 ),
235
236 TP_fast_assign(
237 __entry->mem_id = mem_id;
238 __entry->offset = offset;
239 __entry->syncpt_id = syncpt_id;
240 __entry->thresh = thresh;
241 __entry->min = min;
242 ),
243
244 TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d",
245 __entry->mem_id, __entry->offset,
246 __entry->syncpt_id, __entry->thresh,
247 __entry->min)
248);
249
250#endif /* _TRACE_HOST1X_H */
251
252/* This part must be outside protection */
253#include <trace/define_trace.h>
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index a042a957296d..119487e05e65 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -13,5 +13,6 @@ header-y += r128_drm.h
13header-y += radeon_drm.h 13header-y += radeon_drm.h
14header-y += savage_drm.h 14header-y += savage_drm.h
15header-y += sis_drm.h 15header-y += sis_drm.h
16header-y += tegra_drm.h
16header-y += via_drm.h 17header-y += via_drm.h
17header-y += vmwgfx_drm.h 18header-y += vmwgfx_drm.h
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
new file mode 100644
index 000000000000..6e132a2f7420
--- /dev/null
+++ b/include/uapi/drm/tegra_drm.h
@@ -0,0 +1,136 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef _UAPI_TEGRA_DRM_H_
18#define _UAPI_TEGRA_DRM_H_
19
20struct drm_tegra_gem_create {
21 __u64 size;
22 __u32 flags;
23 __u32 handle;
24};
25
26struct drm_tegra_gem_mmap {
27 __u32 handle;
28 __u32 offset;
29};
30
31struct drm_tegra_syncpt_read {
32 __u32 id;
33 __u32 value;
34};
35
36struct drm_tegra_syncpt_incr {
37 __u32 id;
38 __u32 pad;
39};
40
41struct drm_tegra_syncpt_wait {
42 __u32 id;
43 __u32 thresh;
44 __u32 timeout;
45 __u32 value;
46};
47
48#define DRM_TEGRA_NO_TIMEOUT (0xffffffff)
49
50struct drm_tegra_open_channel {
51 __u32 client;
52 __u32 pad;
53 __u64 context;
54};
55
56struct drm_tegra_close_channel {
57 __u64 context;
58};
59
60struct drm_tegra_get_syncpt {
61 __u64 context;
62 __u32 index;
63 __u32 id;
64};
65
66struct drm_tegra_syncpt {
67 __u32 id;
68 __u32 incrs;
69};
70
71struct drm_tegra_cmdbuf {
72 __u32 handle;
73 __u32 offset;
74 __u32 words;
75 __u32 pad;
76};
77
78struct drm_tegra_reloc {
79 struct {
80 __u32 handle;
81 __u32 offset;
82 } cmdbuf;
83 struct {
84 __u32 handle;
85 __u32 offset;
86 } target;
87 __u32 shift;
88 __u32 pad;
89};
90
91struct drm_tegra_waitchk {
92 __u32 handle;
93 __u32 offset;
94 __u32 syncpt;
95 __u32 thresh;
96};
97
98struct drm_tegra_submit {
99 __u64 context;
100 __u32 num_syncpts;
101 __u32 num_cmdbufs;
102 __u32 num_relocs;
103 __u32 num_waitchks;
104 __u32 waitchk_mask;
105 __u32 timeout;
106 __u32 pad;
107 __u64 syncpts;
108 __u64 cmdbufs;
109 __u64 relocs;
110 __u64 waitchks;
111 __u32 fence; /* Return value */
112
113 __u32 reserved[5]; /* future expansion */
114};
115
116#define DRM_TEGRA_GEM_CREATE 0x00
117#define DRM_TEGRA_GEM_MMAP 0x01
118#define DRM_TEGRA_SYNCPT_READ 0x02
119#define DRM_TEGRA_SYNCPT_INCR 0x03
120#define DRM_TEGRA_SYNCPT_WAIT 0x04
121#define DRM_TEGRA_OPEN_CHANNEL 0x05
122#define DRM_TEGRA_CLOSE_CHANNEL 0x06
123#define DRM_TEGRA_GET_SYNCPT 0x07
124#define DRM_TEGRA_SUBMIT 0x08
125
126#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
127#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
128#define DRM_IOCTL_TEGRA_SYNCPT_READ DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_READ, struct drm_tegra_syncpt_read)
129#define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
130#define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
131#define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
132#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
133#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
134#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
135
136#endif