diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/video | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'drivers/video')
127 files changed, 43774 insertions, 0 deletions
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c new file mode 100644 index 00000000000..c861c41af44 --- /dev/null +++ b/drivers/video/backlight/adx_bl.c | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * linux/drivers/video/backlight/adx.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Avionic Design GmbH | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Written by Thierry Reding <thierry.reding@avionic-design.de> | ||
11 | */ | ||
12 | |||
13 | #include <linux/backlight.h> | ||
14 | #include <linux/fb.h> | ||
15 | #include <linux/gfp.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | /* register definitions */ | ||
21 | #define ADX_BACKLIGHT_CONTROL 0x00 | ||
22 | #define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0) | ||
23 | #define ADX_BACKLIGHT_BRIGHTNESS 0x08 | ||
24 | #define ADX_BACKLIGHT_STATUS 0x10 | ||
25 | #define ADX_BACKLIGHT_ERROR 0x18 | ||
26 | |||
27 | struct adxbl { | ||
28 | void __iomem *base; | ||
29 | }; | ||
30 | |||
31 | static int adx_backlight_update_status(struct backlight_device *bldev) | ||
32 | { | ||
33 | struct adxbl *bl = bl_get_data(bldev); | ||
34 | u32 value; | ||
35 | |||
36 | value = bldev->props.brightness; | ||
37 | writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS); | ||
38 | |||
39 | value = readl(bl->base + ADX_BACKLIGHT_CONTROL); | ||
40 | |||
41 | if (bldev->props.state & BL_CORE_FBBLANK) | ||
42 | value &= ~ADX_BACKLIGHT_CONTROL_ENABLE; | ||
43 | else | ||
44 | value |= ADX_BACKLIGHT_CONTROL_ENABLE; | ||
45 | |||
46 | writel(value, bl->base + ADX_BACKLIGHT_CONTROL); | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | static int adx_backlight_get_brightness(struct backlight_device *bldev) | ||
52 | { | ||
53 | struct adxbl *bl = bl_get_data(bldev); | ||
54 | u32 brightness; | ||
55 | |||
56 | brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS); | ||
57 | return brightness & 0xff; | ||
58 | } | ||
59 | |||
60 | static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb) | ||
61 | { | ||
62 | return 1; | ||
63 | } | ||
64 | |||
65 | static const struct backlight_ops adx_backlight_ops = { | ||
66 | .options = 0, | ||
67 | .update_status = adx_backlight_update_status, | ||
68 | .get_brightness = adx_backlight_get_brightness, | ||
69 | .check_fb = adx_backlight_check_fb, | ||
70 | }; | ||
71 | |||
72 | static int __devinit adx_backlight_probe(struct platform_device *pdev) | ||
73 | { | ||
74 | struct backlight_properties props; | ||
75 | struct backlight_device *bldev; | ||
76 | struct resource *res; | ||
77 | struct adxbl *bl; | ||
78 | int ret = 0; | ||
79 | |||
80 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
81 | if (!res) { | ||
82 | ret = -ENXIO; | ||
83 | goto out; | ||
84 | } | ||
85 | |||
86 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
87 | resource_size(res), res->name); | ||
88 | if (!res) { | ||
89 | ret = -ENXIO; | ||
90 | goto out; | ||
91 | } | ||
92 | |||
93 | bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL); | ||
94 | if (!bl) { | ||
95 | ret = -ENOMEM; | ||
96 | goto out; | ||
97 | } | ||
98 | |||
99 | bl->base = devm_ioremap_nocache(&pdev->dev, res->start, | ||
100 | resource_size(res)); | ||
101 | if (!bl->base) { | ||
102 | ret = -ENXIO; | ||
103 | goto out; | ||
104 | } | ||
105 | |||
106 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
107 | props.type = BACKLIGHT_RAW; | ||
108 | props.max_brightness = 0xff; | ||
109 | bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, | ||
110 | bl, &adx_backlight_ops, &props); | ||
111 | if (IS_ERR(bldev)) { | ||
112 | ret = PTR_ERR(bldev); | ||
113 | goto out; | ||
114 | } | ||
115 | |||
116 | bldev->props.brightness = 0xff; | ||
117 | bldev->props.power = FB_BLANK_UNBLANK; | ||
118 | |||
119 | platform_set_drvdata(pdev, bldev); | ||
120 | |||
121 | out: | ||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | static int __devexit adx_backlight_remove(struct platform_device *pdev) | ||
126 | { | ||
127 | struct backlight_device *bldev; | ||
128 | int ret = 0; | ||
129 | |||
130 | bldev = platform_get_drvdata(pdev); | ||
131 | bldev->props.power = FB_BLANK_UNBLANK; | ||
132 | bldev->props.brightness = 0xff; | ||
133 | backlight_update_status(bldev); | ||
134 | backlight_device_unregister(bldev); | ||
135 | platform_set_drvdata(pdev, NULL); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | #ifdef CONFIG_PM | ||
141 | static int adx_backlight_suspend(struct platform_device *pdev, | ||
142 | pm_message_t state) | ||
143 | { | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int adx_backlight_resume(struct platform_device *pdev) | ||
148 | { | ||
149 | return 0; | ||
150 | } | ||
151 | #else | ||
152 | #define adx_backlight_suspend NULL | ||
153 | #define adx_backlight_resume NULL | ||
154 | #endif | ||
155 | |||
156 | static struct platform_driver adx_backlight_driver = { | ||
157 | .probe = adx_backlight_probe, | ||
158 | .remove = __devexit_p(adx_backlight_remove), | ||
159 | .suspend = adx_backlight_suspend, | ||
160 | .resume = adx_backlight_resume, | ||
161 | .driver = { | ||
162 | .name = "adx-backlight", | ||
163 | .owner = THIS_MODULE, | ||
164 | }, | ||
165 | }; | ||
166 | |||
167 | static int __init adx_backlight_init(void) | ||
168 | { | ||
169 | return platform_driver_register(&adx_backlight_driver); | ||
170 | } | ||
171 | |||
172 | static void __exit adx_backlight_exit(void) | ||
173 | { | ||
174 | platform_driver_unregister(&adx_backlight_driver); | ||
175 | } | ||
176 | |||
177 | module_init(adx_backlight_init); | ||
178 | module_exit(adx_backlight_exit); | ||
179 | |||
180 | MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); | ||
181 | MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver"); | ||
182 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c new file mode 100644 index 00000000000..6af183d6465 --- /dev/null +++ b/drivers/video/backlight/progear_bl.c | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * Backlight Driver for Frontpath ProGear HX1050+ | ||
3 | * | ||
4 | * Copyright (c) 2006 Marcin Juszkiewicz | ||
5 | * | ||
6 | * Based on Progear LCD driver by M Schacht | ||
7 | * <mschacht at alumni dot washington dot edu> | ||
8 | * | ||
9 | * Based on Sharp's Corgi Backlight Driver | ||
10 | * Based on Backlight Driver for HP Jornada 680 | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/fb.h> | ||
24 | #include <linux/backlight.h> | ||
25 | #include <linux/pci.h> | ||
26 | |||
27 | #define PMU_LPCR 0xB0 | ||
28 | #define SB_MPS1 0x61 | ||
29 | #define HW_LEVEL_MAX 0x77 | ||
30 | #define HW_LEVEL_MIN 0x4f | ||
31 | |||
32 | static struct pci_dev *pmu_dev = NULL; | ||
33 | static struct pci_dev *sb_dev = NULL; | ||
34 | |||
35 | static int progearbl_set_intensity(struct backlight_device *bd) | ||
36 | { | ||
37 | int intensity = bd->props.brightness; | ||
38 | |||
39 | if (bd->props.power != FB_BLANK_UNBLANK) | ||
40 | intensity = 0; | ||
41 | if (bd->props.fb_blank != FB_BLANK_UNBLANK) | ||
42 | intensity = 0; | ||
43 | |||
44 | pci_write_config_byte(pmu_dev, PMU_LPCR, intensity + HW_LEVEL_MIN); | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static int progearbl_get_intensity(struct backlight_device *bd) | ||
50 | { | ||
51 | u8 intensity; | ||
52 | pci_read_config_byte(pmu_dev, PMU_LPCR, &intensity); | ||
53 | |||
54 | return intensity - HW_LEVEL_MIN; | ||
55 | } | ||
56 | |||
57 | static const struct backlight_ops progearbl_ops = { | ||
58 | .get_brightness = progearbl_get_intensity, | ||
59 | .update_status = progearbl_set_intensity, | ||
60 | }; | ||
61 | |||
62 | static int progearbl_probe(struct platform_device *pdev) | ||
63 | { | ||
64 | struct backlight_properties props; | ||
65 | u8 temp; | ||
66 | struct backlight_device *progear_backlight_device; | ||
67 | int ret; | ||
68 | |||
69 | pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); | ||
70 | if (!pmu_dev) { | ||
71 | printk("ALI M7101 PMU not found.\n"); | ||
72 | return -ENODEV; | ||
73 | } | ||
74 | |||
75 | sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); | ||
76 | if (!sb_dev) { | ||
77 | printk("ALI 1533 SB not found.\n"); | ||
78 | ret = -ENODEV; | ||
79 | goto put_pmu; | ||
80 | } | ||
81 | |||
82 | /* Set SB_MPS1 to enable brightness control. */ | ||
83 | pci_read_config_byte(sb_dev, SB_MPS1, &temp); | ||
84 | pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20); | ||
85 | |||
86 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
87 | props.type = BACKLIGHT_RAW; | ||
88 | props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; | ||
89 | progear_backlight_device = backlight_device_register("progear-bl", | ||
90 | &pdev->dev, NULL, | ||
91 | &progearbl_ops, | ||
92 | &props); | ||
93 | if (IS_ERR(progear_backlight_device)) { | ||
94 | ret = PTR_ERR(progear_backlight_device); | ||
95 | goto put_sb; | ||
96 | } | ||
97 | |||
98 | platform_set_drvdata(pdev, progear_backlight_device); | ||
99 | |||
100 | progear_backlight_device->props.power = FB_BLANK_UNBLANK; | ||
101 | progear_backlight_device->props.brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; | ||
102 | progearbl_set_intensity(progear_backlight_device); | ||
103 | |||
104 | return 0; | ||
105 | put_sb: | ||
106 | pci_dev_put(sb_dev); | ||
107 | put_pmu: | ||
108 | pci_dev_put(pmu_dev); | ||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | static int progearbl_remove(struct platform_device *pdev) | ||
113 | { | ||
114 | struct backlight_device *bd = platform_get_drvdata(pdev); | ||
115 | backlight_device_unregister(bd); | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static struct platform_driver progearbl_driver = { | ||
121 | .probe = progearbl_probe, | ||
122 | .remove = progearbl_remove, | ||
123 | .driver = { | ||
124 | .name = "progear-bl", | ||
125 | }, | ||
126 | }; | ||
127 | |||
128 | static struct platform_device *progearbl_device; | ||
129 | |||
130 | static int __init progearbl_init(void) | ||
131 | { | ||
132 | int ret = platform_driver_register(&progearbl_driver); | ||
133 | |||
134 | if (ret) | ||
135 | return ret; | ||
136 | progearbl_device = platform_device_register_simple("progear-bl", -1, | ||
137 | NULL, 0); | ||
138 | if (IS_ERR(progearbl_device)) { | ||
139 | platform_driver_unregister(&progearbl_driver); | ||
140 | return PTR_ERR(progearbl_device); | ||
141 | } | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static void __exit progearbl_exit(void) | ||
147 | { | ||
148 | pci_dev_put(pmu_dev); | ||
149 | pci_dev_put(sb_dev); | ||
150 | |||
151 | platform_device_unregister(progearbl_device); | ||
152 | platform_driver_unregister(&progearbl_driver); | ||
153 | } | ||
154 | |||
155 | module_init(progearbl_init); | ||
156 | module_exit(progearbl_exit); | ||
157 | |||
158 | MODULE_AUTHOR("Marcin Juszkiewicz <linux@hrw.one.pl>"); | ||
159 | MODULE_DESCRIPTION("ProGear Backlight Driver"); | ||
160 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/backlight/tegra_pwm_bl.c b/drivers/video/backlight/tegra_pwm_bl.c new file mode 100644 index 00000000000..4be691c54d3 --- /dev/null +++ b/drivers/video/backlight/tegra_pwm_bl.c | |||
@@ -0,0 +1,177 @@ | |||
1 | /* | ||
2 | * linux/drivers/video/backlight/tegra_pwm_bl.c | ||
3 | * | ||
4 | * Tegra pwm backlight driver | ||
5 | * | ||
6 | * Copyright (C) 2011 NVIDIA Corporation | ||
7 | * Author: Renuka Apte <rapte@nvidia.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/fb.h> | ||
19 | #include <linux/backlight.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/tegra_pwm_bl.h> | ||
23 | #include <mach/dc.h> | ||
24 | |||
25 | struct tegra_pwm_bl_data { | ||
26 | struct device *dev; | ||
27 | int which_dc; | ||
28 | int (*notify)(struct device *, int brightness); | ||
29 | struct tegra_dc_pwm_params params; | ||
30 | int (*check_fb)(struct device *dev, struct fb_info *info); | ||
31 | }; | ||
32 | |||
33 | static int tegra_pwm_backlight_update_status(struct backlight_device *bl) | ||
34 | { | ||
35 | struct tegra_pwm_bl_data *tbl = dev_get_drvdata(&bl->dev); | ||
36 | int brightness = bl->props.brightness; | ||
37 | int max = bl->props.max_brightness; | ||
38 | struct tegra_dc *dc; | ||
39 | |||
40 | if (bl->props.power != FB_BLANK_UNBLANK) | ||
41 | brightness = 0; | ||
42 | |||
43 | if (bl->props.fb_blank != FB_BLANK_UNBLANK) | ||
44 | brightness = 0; | ||
45 | |||
46 | if (tbl->notify) | ||
47 | brightness = tbl->notify(tbl->dev, brightness); | ||
48 | |||
49 | if (brightness > max) | ||
50 | dev_err(&bl->dev, "Invalid brightness value: %d max: %d\n", | ||
51 | brightness, max); | ||
52 | |||
53 | #if defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
54 | /* map API brightness range from (0~255) to hw range (0~128) */ | ||
55 | tbl->params.duty_cycle = (brightness * 128) / 255; | ||
56 | #else | ||
57 | tbl->params.duty_cycle = brightness & 0xFF; | ||
58 | #endif | ||
59 | |||
60 | /* Call tegra display controller function to update backlight */ | ||
61 | dc = tegra_dc_get_dc(tbl->which_dc); | ||
62 | if (dc) | ||
63 | tegra_dc_config_pwm(dc, &tbl->params); | ||
64 | else | ||
65 | dev_err(&bl->dev, "tegra display controller not available\n"); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int tegra_pwm_backlight_get_brightness(struct backlight_device *bl) | ||
71 | { | ||
72 | return bl->props.brightness; | ||
73 | } | ||
74 | |||
75 | static int tegra_pwm_backlight_check_fb(struct backlight_device *bl, | ||
76 | struct fb_info *info) | ||
77 | { | ||
78 | struct tegra_pwm_bl_data *tbl = dev_get_drvdata(&bl->dev); | ||
79 | return !tbl->check_fb || tbl->check_fb(tbl->dev, info); | ||
80 | } | ||
81 | |||
82 | static const struct backlight_ops tegra_pwm_backlight_ops = { | ||
83 | .update_status = tegra_pwm_backlight_update_status, | ||
84 | .get_brightness = tegra_pwm_backlight_get_brightness, | ||
85 | .check_fb = tegra_pwm_backlight_check_fb, | ||
86 | }; | ||
87 | |||
88 | static int tegra_pwm_backlight_probe(struct platform_device *pdev) | ||
89 | { | ||
90 | struct backlight_properties props; | ||
91 | struct platform_tegra_pwm_backlight_data *data; | ||
92 | struct backlight_device *bl; | ||
93 | struct tegra_pwm_bl_data *tbl; | ||
94 | int ret; | ||
95 | |||
96 | data = pdev->dev.platform_data; | ||
97 | if (!data) { | ||
98 | dev_err(&pdev->dev, "failed to find platform data\n"); | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | |||
102 | tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); | ||
103 | if (!tbl) { | ||
104 | dev_err(&pdev->dev, "no memory for state\n"); | ||
105 | ret = -ENOMEM; | ||
106 | goto err_alloc; | ||
107 | } | ||
108 | |||
109 | tbl->dev = &pdev->dev; | ||
110 | tbl->which_dc = data->which_dc; | ||
111 | tbl->notify = data->notify; | ||
112 | tbl->check_fb = data->check_fb; | ||
113 | tbl->params.which_pwm = data->which_pwm; | ||
114 | tbl->params.gpio_conf_to_sfio = data->gpio_conf_to_sfio; | ||
115 | tbl->params.switch_to_sfio = data->switch_to_sfio; | ||
116 | tbl->params.period = data->period; | ||
117 | tbl->params.clk_div = data->clk_div; | ||
118 | tbl->params.clk_select = data->clk_select; | ||
119 | |||
120 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
121 | props.type = BACKLIGHT_RAW; | ||
122 | props.max_brightness = data->max_brightness; | ||
123 | bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, tbl, | ||
124 | &tegra_pwm_backlight_ops, &props); | ||
125 | if (IS_ERR(bl)) { | ||
126 | dev_err(&pdev->dev, "failed to register backlight\n"); | ||
127 | ret = PTR_ERR(bl); | ||
128 | goto err_bl; | ||
129 | } | ||
130 | |||
131 | bl->props.brightness = data->dft_brightness; | ||
132 | backlight_update_status(bl); | ||
133 | |||
134 | platform_set_drvdata(pdev, bl); | ||
135 | return 0; | ||
136 | |||
137 | err_bl: | ||
138 | kfree(tbl); | ||
139 | err_alloc: | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | static int tegra_pwm_backlight_remove(struct platform_device *pdev) | ||
144 | { | ||
145 | struct backlight_device *bl = platform_get_drvdata(pdev); | ||
146 | struct tegra_pwm_bl_data *tbl = dev_get_drvdata(&bl->dev); | ||
147 | |||
148 | backlight_device_unregister(bl); | ||
149 | kfree(tbl); | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static struct platform_driver tegra_pwm_backlight_driver = { | ||
154 | .driver = { | ||
155 | .name = "tegra-pwm-bl", | ||
156 | .owner = THIS_MODULE, | ||
157 | }, | ||
158 | .probe = tegra_pwm_backlight_probe, | ||
159 | .remove = tegra_pwm_backlight_remove, | ||
160 | }; | ||
161 | |||
162 | static int __init tegra_pwm_backlight_init(void) | ||
163 | { | ||
164 | return platform_driver_register(&tegra_pwm_backlight_driver); | ||
165 | } | ||
166 | late_initcall(tegra_pwm_backlight_init); | ||
167 | |||
168 | static void __exit tegra_pwm_backlight_exit(void) | ||
169 | { | ||
170 | platform_driver_unregister(&tegra_pwm_backlight_driver); | ||
171 | } | ||
172 | module_exit(tegra_pwm_backlight_exit); | ||
173 | |||
174 | MODULE_DESCRIPTION("Tegra PWM Backlight Driver"); | ||
175 | MODULE_LICENSE("GPL"); | ||
176 | MODULE_ALIAS("platform:tegra-pwm-backlight"); | ||
177 | |||
diff --git a/drivers/video/display/Kconfig b/drivers/video/display/Kconfig new file mode 100644 index 00000000000..f99af931d4f --- /dev/null +++ b/drivers/video/display/Kconfig | |||
@@ -0,0 +1,24 @@ | |||
1 | # | ||
2 | # Display drivers configuration | ||
3 | # | ||
4 | |||
5 | menu "Display device support" | ||
6 | |||
7 | config DISPLAY_SUPPORT | ||
8 | tristate "Display panel/monitor support" | ||
9 | ---help--- | ||
10 | This framework adds support for low-level control of a display. | ||
11 | This includes support for power. | ||
12 | |||
13 | Enable this to be able to choose the drivers for controlling the | ||
14 | physical display panel/monitor on some platforms. This not only | ||
15 | covers LCD displays for PDAs but also other types of displays | ||
16 | such as CRT, TVout etc. | ||
17 | |||
18 | To have support for your specific display panel you will have to | ||
19 | select the proper drivers which depend on this option. | ||
20 | |||
21 | comment "Display hardware drivers" | ||
22 | depends on DISPLAY_SUPPORT | ||
23 | |||
24 | endmenu | ||
diff --git a/drivers/video/display/Makefile b/drivers/video/display/Makefile new file mode 100644 index 00000000000..c0ea832bf17 --- /dev/null +++ b/drivers/video/display/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # Display drivers | ||
2 | |||
3 | display-objs := display-sysfs.o | ||
4 | |||
5 | obj-$(CONFIG_DISPLAY_SUPPORT) += display.o | ||
6 | |||
diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c new file mode 100644 index 00000000000..0c647d7af0e --- /dev/null +++ b/drivers/video/display/display-sysfs.c | |||
@@ -0,0 +1,219 @@ | |||
1 | /* | ||
2 | * display-sysfs.c - Display output driver sysfs interface | ||
3 | * | ||
4 | * Copyright (C) 2007 James Simmons <jsimmons@infradead.org> | ||
5 | * | ||
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or (at | ||
11 | * your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
23 | */ | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/display.h> | ||
26 | #include <linux/ctype.h> | ||
27 | #include <linux/idr.h> | ||
28 | #include <linux/err.h> | ||
29 | #include <linux/kdev_t.h> | ||
30 | #include <linux/slab.h> | ||
31 | |||
32 | static ssize_t display_show_name(struct device *dev, | ||
33 | struct device_attribute *attr, char *buf) | ||
34 | { | ||
35 | struct display_device *dsp = dev_get_drvdata(dev); | ||
36 | return snprintf(buf, PAGE_SIZE, "%s\n", dsp->name); | ||
37 | } | ||
38 | |||
39 | static ssize_t display_show_type(struct device *dev, | ||
40 | struct device_attribute *attr, char *buf) | ||
41 | { | ||
42 | struct display_device *dsp = dev_get_drvdata(dev); | ||
43 | return snprintf(buf, PAGE_SIZE, "%s\n", dsp->type); | ||
44 | } | ||
45 | |||
46 | static ssize_t display_show_contrast(struct device *dev, | ||
47 | struct device_attribute *attr, char *buf) | ||
48 | { | ||
49 | struct display_device *dsp = dev_get_drvdata(dev); | ||
50 | ssize_t rc = -ENXIO; | ||
51 | |||
52 | mutex_lock(&dsp->lock); | ||
53 | if (likely(dsp->driver) && dsp->driver->get_contrast) | ||
54 | rc = sprintf(buf, "%d\n", dsp->driver->get_contrast(dsp)); | ||
55 | mutex_unlock(&dsp->lock); | ||
56 | return rc; | ||
57 | } | ||
58 | |||
59 | static ssize_t display_store_contrast(struct device *dev, | ||
60 | struct device_attribute *attr, | ||
61 | const char *buf, size_t count) | ||
62 | { | ||
63 | struct display_device *dsp = dev_get_drvdata(dev); | ||
64 | ssize_t ret = -EINVAL, size; | ||
65 | int contrast; | ||
66 | char *endp; | ||
67 | |||
68 | contrast = simple_strtoul(buf, &endp, 0); | ||
69 | size = endp - buf; | ||
70 | |||
71 | if (isspace(*endp)) | ||
72 | size++; | ||
73 | |||
74 | if (size != count) | ||
75 | return ret; | ||
76 | |||
77 | mutex_lock(&dsp->lock); | ||
78 | if (likely(dsp->driver && dsp->driver->set_contrast)) { | ||
79 | pr_debug("display: set contrast to %d\n", contrast); | ||
80 | dsp->driver->set_contrast(dsp, contrast); | ||
81 | ret = count; | ||
82 | } | ||
83 | mutex_unlock(&dsp->lock); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | static ssize_t display_show_max_contrast(struct device *dev, | ||
88 | struct device_attribute *attr, | ||
89 | char *buf) | ||
90 | { | ||
91 | struct display_device *dsp = dev_get_drvdata(dev); | ||
92 | ssize_t rc = -ENXIO; | ||
93 | |||
94 | mutex_lock(&dsp->lock); | ||
95 | if (likely(dsp->driver)) | ||
96 | rc = sprintf(buf, "%d\n", dsp->driver->max_contrast); | ||
97 | mutex_unlock(&dsp->lock); | ||
98 | return rc; | ||
99 | } | ||
100 | |||
101 | static struct device_attribute display_attrs[] = { | ||
102 | __ATTR(name, S_IRUGO, display_show_name, NULL), | ||
103 | __ATTR(type, S_IRUGO, display_show_type, NULL), | ||
104 | __ATTR(contrast, S_IRUGO | S_IWUSR, display_show_contrast, display_store_contrast), | ||
105 | __ATTR(max_contrast, S_IRUGO, display_show_max_contrast, NULL), | ||
106 | }; | ||
107 | |||
108 | static int display_suspend(struct device *dev, pm_message_t state) | ||
109 | { | ||
110 | struct display_device *dsp = dev_get_drvdata(dev); | ||
111 | |||
112 | mutex_lock(&dsp->lock); | ||
113 | if (likely(dsp->driver->suspend)) | ||
114 | dsp->driver->suspend(dsp, state); | ||
115 | mutex_unlock(&dsp->lock); | ||
116 | return 0; | ||
117 | }; | ||
118 | |||
119 | static int display_resume(struct device *dev) | ||
120 | { | ||
121 | struct display_device *dsp = dev_get_drvdata(dev); | ||
122 | |||
123 | mutex_lock(&dsp->lock); | ||
124 | if (likely(dsp->driver->resume)) | ||
125 | dsp->driver->resume(dsp); | ||
126 | mutex_unlock(&dsp->lock); | ||
127 | return 0; | ||
128 | }; | ||
129 | |||
130 | static struct mutex allocated_dsp_lock; | ||
131 | static DEFINE_IDR(allocated_dsp); | ||
132 | static struct class *display_class; | ||
133 | |||
134 | struct display_device *display_device_register(struct display_driver *driver, | ||
135 | struct device *parent, void *devdata) | ||
136 | { | ||
137 | struct display_device *new_dev = NULL; | ||
138 | int ret = -EINVAL; | ||
139 | |||
140 | if (unlikely(!driver)) | ||
141 | return ERR_PTR(ret); | ||
142 | |||
143 | mutex_lock(&allocated_dsp_lock); | ||
144 | ret = idr_pre_get(&allocated_dsp, GFP_KERNEL); | ||
145 | mutex_unlock(&allocated_dsp_lock); | ||
146 | if (!ret) | ||
147 | return ERR_PTR(ret); | ||
148 | |||
149 | new_dev = kzalloc(sizeof(struct display_device), GFP_KERNEL); | ||
150 | if (likely(new_dev) && unlikely(driver->probe(new_dev, devdata))) { | ||
151 | // Reserve the index for this display | ||
152 | mutex_lock(&allocated_dsp_lock); | ||
153 | ret = idr_get_new(&allocated_dsp, new_dev, &new_dev->idx); | ||
154 | mutex_unlock(&allocated_dsp_lock); | ||
155 | |||
156 | if (!ret) { | ||
157 | new_dev->dev = device_create(display_class, parent, | ||
158 | MKDEV(0, 0), new_dev, | ||
159 | "display%d", new_dev->idx); | ||
160 | if (!IS_ERR(new_dev->dev)) { | ||
161 | new_dev->parent = parent; | ||
162 | new_dev->driver = driver; | ||
163 | mutex_init(&new_dev->lock); | ||
164 | return new_dev; | ||
165 | } | ||
166 | mutex_lock(&allocated_dsp_lock); | ||
167 | idr_remove(&allocated_dsp, new_dev->idx); | ||
168 | mutex_unlock(&allocated_dsp_lock); | ||
169 | ret = -EINVAL; | ||
170 | } | ||
171 | } | ||
172 | kfree(new_dev); | ||
173 | return ERR_PTR(ret); | ||
174 | } | ||
175 | EXPORT_SYMBOL(display_device_register); | ||
176 | |||
177 | void display_device_unregister(struct display_device *ddev) | ||
178 | { | ||
179 | if (!ddev) | ||
180 | return; | ||
181 | // Free device | ||
182 | mutex_lock(&ddev->lock); | ||
183 | device_unregister(ddev->dev); | ||
184 | mutex_unlock(&ddev->lock); | ||
185 | // Mark device index as available | ||
186 | mutex_lock(&allocated_dsp_lock); | ||
187 | idr_remove(&allocated_dsp, ddev->idx); | ||
188 | mutex_unlock(&allocated_dsp_lock); | ||
189 | kfree(ddev); | ||
190 | } | ||
191 | EXPORT_SYMBOL(display_device_unregister); | ||
192 | |||
193 | static int __init display_class_init(void) | ||
194 | { | ||
195 | display_class = class_create(THIS_MODULE, "display"); | ||
196 | if (IS_ERR(display_class)) { | ||
197 | printk(KERN_ERR "Failed to create display class\n"); | ||
198 | display_class = NULL; | ||
199 | return -EINVAL; | ||
200 | } | ||
201 | display_class->dev_attrs = display_attrs; | ||
202 | display_class->suspend = display_suspend; | ||
203 | display_class->resume = display_resume; | ||
204 | mutex_init(&allocated_dsp_lock); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static void __exit display_class_exit(void) | ||
209 | { | ||
210 | class_destroy(display_class); | ||
211 | } | ||
212 | |||
213 | module_init(display_class_init); | ||
214 | module_exit(display_class_exit); | ||
215 | |||
216 | MODULE_DESCRIPTION("Display Hardware handling"); | ||
217 | MODULE_AUTHOR("James Simmons <jsimmons@infradead.org>"); | ||
218 | MODULE_LICENSE("GPL"); | ||
219 | |||
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c new file mode 100644 index 00000000000..a268cbf1cbe --- /dev/null +++ b/drivers/video/epson1355fb.c | |||
@@ -0,0 +1,749 @@ | |||
1 | /* | ||
2 | * linux/drivers/video/epson1355fb.c -- Epson S1D13505 frame buffer for 2.5. | ||
3 | * | ||
4 | * Epson Research S1D13505 Embedded RAMDAC LCD/CRT Controller | ||
5 | * (previously known as SED1355) | ||
6 | * | ||
7 | * Cf. http://vdc.epson.com/ | ||
8 | * | ||
9 | * | ||
10 | * Copyright (C) Hewlett-Packard Company. All rights reserved. | ||
11 | * | ||
12 | * Written by Christopher Hoover <ch@hpl.hp.com> | ||
13 | * | ||
14 | * Adapted from: | ||
15 | * | ||
16 | * linux/drivers/video/skeletonfb.c | ||
17 | * Modified to new api Jan 2001 by James Simmons (jsimmons@infradead.org) | ||
18 | * Created 28 Dec 1997 by Geert Uytterhoeven | ||
19 | * | ||
20 | * linux/drivers/video/epson1355fb.c (2.4 driver) | ||
21 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | ||
22 | * | ||
23 | * This file is subject to the terms and conditions of the GNU General Public | ||
24 | * License. See the file COPYING in the main directory of this archive for | ||
25 | * more details. | ||
26 | * | ||
27 | * | ||
28 | * Noteworthy Issues | ||
29 | * ----------------- | ||
30 | * | ||
31 | * This driver is complicated by the fact that this is a 16-bit chip | ||
32 | * and, on at least one platform (ceiva), we can only do 16-bit reads | ||
33 | * and writes to the framebuffer. We hide this from user space | ||
34 | * except in the case of mmap(). | ||
35 | * | ||
36 | * | ||
37 | * To Do | ||
38 | * ----- | ||
39 | * | ||
40 | * - Test 8-bit pseudocolor mode | ||
41 | * - Allow setting bpp, virtual resolution | ||
42 | * - Implement horizontal panning | ||
43 | * - (maybe) Implement hardware cursor | ||
44 | */ | ||
45 | |||
46 | #include <linux/module.h> | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/errno.h> | ||
49 | #include <linux/string.h> | ||
50 | #include <linux/mm.h> | ||
51 | #include <linux/delay.h> | ||
52 | #include <linux/fb.h> | ||
53 | #include <linux/init.h> | ||
54 | #include <linux/ioport.h> | ||
55 | #include <linux/platform_device.h> | ||
56 | |||
57 | #include <asm/types.h> | ||
58 | #include <asm/io.h> | ||
59 | #include <linux/uaccess.h> | ||
60 | |||
61 | #include <video/epson1355.h> | ||
62 | |||
63 | struct epson1355_par { | ||
64 | unsigned long reg_addr; | ||
65 | u32 pseudo_palette[16]; | ||
66 | }; | ||
67 | |||
68 | /* ------------------------------------------------------------------------- */ | ||
69 | |||
70 | #if defined(CONFIG_ARM) | ||
71 | |||
72 | # ifdef CONFIG_ARCH_CEIVA | ||
73 | # include <mach/hardware.h> | ||
74 | # define EPSON1355FB_BASE_PHYS (CEIVA_PHYS_SED1355) | ||
75 | # endif | ||
76 | |||
77 | static inline u8 epson1355_read_reg(struct epson1355_par *par, int index) | ||
78 | { | ||
79 | return __raw_readb(par->reg_addr + index); | ||
80 | } | ||
81 | |||
82 | static inline void epson1355_write_reg(struct epson1355_par *par, u8 data, int index) | ||
83 | { | ||
84 | __raw_writeb(data, par->reg_addr + index); | ||
85 | } | ||
86 | |||
87 | #else | ||
88 | # error "no architecture-specific epson1355_{read,write}_reg" | ||
89 | #endif | ||
90 | |||
91 | #ifndef EPSON1355FB_BASE_PHYS | ||
92 | # error "EPSON1355FB_BASE_PHYS is not defined" | ||
93 | #endif | ||
94 | |||
95 | #define EPSON1355FB_REGS_OFS (0) | ||
96 | #define EPSON1355FB_REGS_PHYS (EPSON1355FB_BASE_PHYS + EPSON1355FB_REGS_OFS) | ||
97 | #define EPSON1355FB_REGS_LEN (64) | ||
98 | |||
99 | #define EPSON1355FB_FB_OFS (0x00200000) | ||
100 | #define EPSON1355FB_FB_PHYS (EPSON1355FB_BASE_PHYS + EPSON1355FB_FB_OFS) | ||
101 | #define EPSON1355FB_FB_LEN (2 * 1024 * 1024) | ||
102 | |||
103 | /* ------------------------------------------------------------------------- */ | ||
104 | |||
105 | static inline u16 epson1355_read_reg16(struct epson1355_par *par, int index) | ||
106 | { | ||
107 | u8 lo = epson1355_read_reg(par, index); | ||
108 | u8 hi = epson1355_read_reg(par, index + 1); | ||
109 | |||
110 | return (hi << 8) | lo; | ||
111 | } | ||
112 | |||
113 | static inline void epson1355_write_reg16(struct epson1355_par *par, u16 data, int index) | ||
114 | { | ||
115 | u8 lo = data & 0xff; | ||
116 | u8 hi = (data >> 8) & 0xff; | ||
117 | |||
118 | epson1355_write_reg(par, lo, index); | ||
119 | epson1355_write_reg(par, hi, index + 1); | ||
120 | } | ||
121 | |||
122 | static inline u32 epson1355_read_reg20(struct epson1355_par *par, int index) | ||
123 | { | ||
124 | u8 b0 = epson1355_read_reg(par, index); | ||
125 | u8 b1 = epson1355_read_reg(par, index + 1); | ||
126 | u8 b2 = epson1355_read_reg(par, index + 2); | ||
127 | |||
128 | return (b2 & 0x0f) << 16 | (b1 << 8) | b0; | ||
129 | } | ||
130 | |||
131 | static inline void epson1355_write_reg20(struct epson1355_par *par, u32 data, int index) | ||
132 | { | ||
133 | u8 b0 = data & 0xff; | ||
134 | u8 b1 = (data >> 8) & 0xff; | ||
135 | u8 b2 = (data >> 16) & 0x0f; | ||
136 | |||
137 | epson1355_write_reg(par, b0, index); | ||
138 | epson1355_write_reg(par, b1, index + 1); | ||
139 | epson1355_write_reg(par, b2, index + 2); | ||
140 | } | ||
141 | |||
142 | /* ------------------------------------------------------------------------- */ | ||
143 | |||
144 | static void set_lut(struct epson1355_par *par, u8 index, u8 r, u8 g, u8 b) | ||
145 | { | ||
146 | epson1355_write_reg(par, index, REG_LUT_ADDR); | ||
147 | epson1355_write_reg(par, r, REG_LUT_DATA); | ||
148 | epson1355_write_reg(par, g, REG_LUT_DATA); | ||
149 | epson1355_write_reg(par, b, REG_LUT_DATA); | ||
150 | } | ||
151 | |||
152 | |||
153 | /** | ||
154 | * epson1355fb_setcolreg - sets a color register. | ||
155 | * @regno: Which register in the CLUT we are programming | ||
156 | * @red: The red value which can be up to 16 bits wide | ||
157 | * @green: The green value which can be up to 16 bits wide | ||
158 | * @blue: The blue value which can be up to 16 bits wide. | ||
159 | * @transp: If supported the alpha value which can be up to 16 bits wide. | ||
160 | * @info: frame buffer info structure | ||
161 | * | ||
162 | * Returns negative errno on error, or zero on success. | ||
163 | */ | ||
164 | static int epson1355fb_setcolreg(unsigned regno, unsigned r, unsigned g, | ||
165 | unsigned b, unsigned transp, | ||
166 | struct fb_info *info) | ||
167 | { | ||
168 | struct epson1355_par *par = info->par; | ||
169 | |||
170 | if (info->var.grayscale) | ||
171 | r = g = b = (19595 * r + 38470 * g + 7471 * b) >> 16; | ||
172 | |||
173 | switch (info->fix.visual) { | ||
174 | case FB_VISUAL_TRUECOLOR: | ||
175 | if (regno >= 16) | ||
176 | return -EINVAL; | ||
177 | |||
178 | ((u32 *) info->pseudo_palette)[regno] = | ||
179 | (r & 0xf800) | (g & 0xfc00) >> 5 | (b & 0xf800) >> 11; | ||
180 | |||
181 | break; | ||
182 | case FB_VISUAL_PSEUDOCOLOR: | ||
183 | if (regno >= 256) | ||
184 | return -EINVAL; | ||
185 | |||
186 | set_lut(par, regno, r >> 8, g >> 8, b >> 8); | ||
187 | |||
188 | break; | ||
189 | default: | ||
190 | return -ENOSYS; | ||
191 | } | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | /* ------------------------------------------------------------------------- */ | ||
196 | |||
197 | /** | ||
198 | * epson1355fb_pan_display - Pans the display. | ||
199 | * @var: frame buffer variable screen structure | ||
200 | * @info: frame buffer structure that represents a single frame buffer | ||
201 | * | ||
202 | * Pan (or wrap, depending on the `vmode' field) the display using the | ||
203 | * `xoffset' and `yoffset' fields of the `var' structure. | ||
204 | * If the values don't fit, return -EINVAL. | ||
205 | * | ||
206 | * Returns negative errno on error, or zero on success. | ||
207 | */ | ||
208 | static int epson1355fb_pan_display(struct fb_var_screeninfo *var, | ||
209 | struct fb_info *info) | ||
210 | { | ||
211 | struct epson1355_par *par = info->par; | ||
212 | u32 start; | ||
213 | |||
214 | if (var->xoffset != 0) /* not yet ... */ | ||
215 | return -EINVAL; | ||
216 | |||
217 | if (var->yoffset + info->var.yres > info->var.yres_virtual) | ||
218 | return -EINVAL; | ||
219 | |||
220 | start = (info->fix.line_length >> 1) * var->yoffset; | ||
221 | |||
222 | epson1355_write_reg20(par, start, REG_SCRN1_DISP_START_ADDR0); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | /* ------------------------------------------------------------------------- */ | ||
228 | |||
229 | static void lcd_enable(struct epson1355_par *par, int enable) | ||
230 | { | ||
231 | u8 mode = epson1355_read_reg(par, REG_DISPLAY_MODE); | ||
232 | |||
233 | if (enable) | ||
234 | mode |= 1; | ||
235 | else | ||
236 | mode &= ~1; | ||
237 | |||
238 | epson1355_write_reg(par, mode, REG_DISPLAY_MODE); | ||
239 | } | ||
240 | |||
241 | #if defined(CONFIG_ARCH_CEIVA) | ||
242 | static void backlight_enable(int enable) | ||
243 | { | ||
244 | /* ### this should be protected by a spinlock ... */ | ||
245 | u8 pddr = clps_readb(PDDR); | ||
246 | if (enable) | ||
247 | pddr |= (1 << 5); | ||
248 | else | ||
249 | pddr &= ~(1 << 5); | ||
250 | clps_writeb(pddr, PDDR); | ||
251 | } | ||
252 | #else | ||
253 | static void backlight_enable(int enable) | ||
254 | { | ||
255 | } | ||
256 | #endif | ||
257 | |||
258 | |||
259 | /** | ||
260 | * epson1355fb_blank - blanks the display. | ||
261 | * @blank_mode: the blank mode we want. | ||
262 | * @info: frame buffer structure that represents a single frame buffer | ||
263 | * | ||
264 | * Blank the screen if blank_mode != 0, else unblank. Return 0 if | ||
265 | * blanking succeeded, != 0 if un-/blanking failed due to e.g. a | ||
266 | * video mode which doesn't support it. Implements VESA suspend | ||
267 | * and powerdown modes on hardware that supports disabling hsync/vsync: | ||
268 | * blank_mode == 2: suspend vsync | ||
269 | * blank_mode == 3: suspend hsync | ||
270 | * blank_mode == 4: powerdown | ||
271 | * | ||
272 | * Returns negative errno on error, or zero on success. | ||
273 | * | ||
274 | */ | ||
275 | static int epson1355fb_blank(int blank_mode, struct fb_info *info) | ||
276 | { | ||
277 | struct epson1355_par *par = info->par; | ||
278 | |||
279 | switch (blank_mode) { | ||
280 | case FB_BLANK_UNBLANK: | ||
281 | case FB_BLANK_NORMAL: | ||
282 | lcd_enable(par, 1); | ||
283 | backlight_enable(1); | ||
284 | break; | ||
285 | case FB_BLANK_VSYNC_SUSPEND: | ||
286 | case FB_BLANK_HSYNC_SUSPEND: | ||
287 | backlight_enable(0); | ||
288 | break; | ||
289 | case FB_BLANK_POWERDOWN: | ||
290 | backlight_enable(0); | ||
291 | lcd_enable(par, 0); | ||
292 | break; | ||
293 | default: | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | |||
297 | /* let fbcon do a soft blank for us */ | ||
298 | return (blank_mode == FB_BLANK_NORMAL) ? 1 : 0; | ||
299 | } | ||
300 | |||
301 | /* ------------------------------------------------------------------------- */ | ||
302 | |||
303 | /* | ||
304 | * We can't use the cfb generic routines, as we have to limit | ||
305 | * ourselves to 16-bit or 8-bit loads and stores to this 16-bit | ||
306 | * chip. | ||
307 | */ | ||
308 | |||
309 | static inline void epson1355fb_fb_writel(unsigned long v, unsigned long *a) | ||
310 | { | ||
311 | u16 *p = (u16 *) a; | ||
312 | u16 l = v & 0xffff; | ||
313 | u16 h = v >> 16; | ||
314 | |||
315 | fb_writew(l, p); | ||
316 | fb_writew(h, p + 1); | ||
317 | } | ||
318 | |||
319 | static inline unsigned long epson1355fb_fb_readl(const unsigned long *a) | ||
320 | { | ||
321 | const u16 *p = (u16 *) a; | ||
322 | u16 l = fb_readw(p); | ||
323 | u16 h = fb_readw(p + 1); | ||
324 | |||
325 | return (h << 16) | l; | ||
326 | } | ||
327 | |||
328 | #define FB_READL epson1355fb_fb_readl | ||
329 | #define FB_WRITEL epson1355fb_fb_writel | ||
330 | |||
331 | /* ------------------------------------------------------------------------- */ | ||
332 | |||
333 | static inline unsigned long copy_from_user16(void *to, const void *from, | ||
334 | unsigned long n) | ||
335 | { | ||
336 | u16 *dst = (u16 *) to; | ||
337 | u16 *src = (u16 *) from; | ||
338 | |||
339 | if (!access_ok(VERIFY_READ, from, n)) | ||
340 | return n; | ||
341 | |||
342 | while (n > 1) { | ||
343 | u16 v; | ||
344 | if (__get_user(v, src)) | ||
345 | return n; | ||
346 | |||
347 | fb_writew(v, dst); | ||
348 | |||
349 | src++, dst++; | ||
350 | n -= 2; | ||
351 | } | ||
352 | |||
353 | if (n) { | ||
354 | u8 v; | ||
355 | |||
356 | if (__get_user(v, ((u8 *) src))) | ||
357 | return n; | ||
358 | |||
359 | fb_writeb(v, dst); | ||
360 | } | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | static inline unsigned long copy_to_user16(void *to, const void *from, | ||
365 | unsigned long n) | ||
366 | { | ||
367 | u16 *dst = (u16 *) to; | ||
368 | u16 *src = (u16 *) from; | ||
369 | |||
370 | if (!access_ok(VERIFY_WRITE, to, n)) | ||
371 | return n; | ||
372 | |||
373 | while (n > 1) { | ||
374 | u16 v = fb_readw(src); | ||
375 | |||
376 | if (__put_user(v, dst)) | ||
377 | return n; | ||
378 | |||
379 | src++, dst++; | ||
380 | n -= 2; | ||
381 | } | ||
382 | |||
383 | if (n) { | ||
384 | u8 v = fb_readb(src); | ||
385 | |||
386 | if (__put_user(v, ((u8 *) dst))) | ||
387 | return n; | ||
388 | } | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | |||
393 | static ssize_t | ||
394 | epson1355fb_read(struct fb_info *info, char *buf, size_t count, loff_t * ppos) | ||
395 | { | ||
396 | unsigned long p = *ppos; | ||
397 | |||
398 | if (p >= info->fix.smem_len) | ||
399 | return 0; | ||
400 | if (count >= info->fix.smem_len) | ||
401 | count = info->fix.smem_len; | ||
402 | if (count + p > info->fix.smem_len) | ||
403 | count = info->fix.smem_len - p; | ||
404 | |||
405 | if (count) { | ||
406 | char *base_addr; | ||
407 | |||
408 | base_addr = info->screen_base; | ||
409 | count -= copy_to_user16(buf, base_addr + p, count); | ||
410 | if (!count) | ||
411 | return -EFAULT; | ||
412 | *ppos += count; | ||
413 | } | ||
414 | return count; | ||
415 | } | ||
416 | |||
417 | static ssize_t | ||
418 | epson1355fb_write(struct fb_info *info, const char *buf, | ||
419 | size_t count, loff_t * ppos) | ||
420 | { | ||
421 | unsigned long p = *ppos; | ||
422 | int err; | ||
423 | |||
424 | /* from fbmem.c except for our own copy_*_user */ | ||
425 | if (p > info->fix.smem_len) | ||
426 | return -ENOSPC; | ||
427 | if (count >= info->fix.smem_len) | ||
428 | count = info->fix.smem_len; | ||
429 | err = 0; | ||
430 | if (count + p > info->fix.smem_len) { | ||
431 | count = info->fix.smem_len - p; | ||
432 | err = -ENOSPC; | ||
433 | } | ||
434 | |||
435 | if (count) { | ||
436 | char *base_addr; | ||
437 | |||
438 | base_addr = info->screen_base; | ||
439 | count -= copy_from_user16(base_addr + p, buf, count); | ||
440 | *ppos += count; | ||
441 | err = -EFAULT; | ||
442 | } | ||
443 | if (count) | ||
444 | return count; | ||
445 | return err; | ||
446 | } | ||
447 | |||
448 | /* ------------------------------------------------------------------------- */ | ||
449 | |||
450 | static struct fb_ops epson1355fb_fbops = { | ||
451 | .owner = THIS_MODULE, | ||
452 | .fb_setcolreg = epson1355fb_setcolreg, | ||
453 | .fb_pan_display = epson1355fb_pan_display, | ||
454 | .fb_blank = epson1355fb_blank, | ||
455 | .fb_fillrect = cfb_fillrect, | ||
456 | .fb_copyarea = cfb_copyarea, | ||
457 | .fb_imageblit = cfb_imageblit, | ||
458 | .fb_read = epson1355fb_read, | ||
459 | .fb_write = epson1355fb_write, | ||
460 | }; | ||
461 | |||
462 | /* ------------------------------------------------------------------------- */ | ||
463 | |||
464 | static __init unsigned int get_fb_size(struct fb_info *info) | ||
465 | { | ||
466 | unsigned int size = 2 * 1024 * 1024; | ||
467 | char *p = info->screen_base; | ||
468 | |||
469 | /* the 512k framebuffer is aliased at start + 0x80000 * n */ | ||
470 | fb_writeb(1, p); | ||
471 | fb_writeb(0, p + 0x80000); | ||
472 | if (!fb_readb(p)) | ||
473 | size = 512 * 1024; | ||
474 | |||
475 | fb_writeb(0, p); | ||
476 | |||
477 | return size; | ||
478 | } | ||
479 | |||
480 | static int epson1355_width_tab[2][4] __initdata = | ||
481 | { {4, 8, 16, -1}, {9, 12, 16, -1} }; | ||
482 | static int epson1355_bpp_tab[8] __initdata = { 1, 2, 4, 8, 15, 16 }; | ||
483 | |||
484 | static void __init fetch_hw_state(struct fb_info *info, struct epson1355_par *par) | ||
485 | { | ||
486 | struct fb_var_screeninfo *var = &info->var; | ||
487 | struct fb_fix_screeninfo *fix = &info->fix; | ||
488 | u8 panel, display; | ||
489 | u16 offset; | ||
490 | u32 xres, yres; | ||
491 | u32 xres_virtual, yres_virtual; | ||
492 | int bpp, lcd_bpp; | ||
493 | int is_color, is_dual, is_tft; | ||
494 | int lcd_enabled, crt_enabled; | ||
495 | |||
496 | fix->type = FB_TYPE_PACKED_PIXELS; | ||
497 | |||
498 | display = epson1355_read_reg(par, REG_DISPLAY_MODE); | ||
499 | bpp = epson1355_bpp_tab[(display >> 2) & 7]; | ||
500 | |||
501 | switch (bpp) { | ||
502 | case 8: | ||
503 | fix->visual = FB_VISUAL_PSEUDOCOLOR; | ||
504 | var->bits_per_pixel = 8; | ||
505 | var->red.offset = var->green.offset = var->blue.offset = 0; | ||
506 | var->red.length = var->green.length = var->blue.length = 8; | ||
507 | break; | ||
508 | case 16: | ||
509 | /* 5-6-5 RGB */ | ||
510 | fix->visual = FB_VISUAL_TRUECOLOR; | ||
511 | var->bits_per_pixel = 16; | ||
512 | var->red.offset = 11; | ||
513 | var->red.length = 5; | ||
514 | var->green.offset = 5; | ||
515 | var->green.length = 6; | ||
516 | var->blue.offset = 0; | ||
517 | var->blue.length = 5; | ||
518 | break; | ||
519 | default: | ||
520 | BUG(); | ||
521 | } | ||
522 | fb_alloc_cmap(&(info->cmap), 256, 0); | ||
523 | |||
524 | panel = epson1355_read_reg(par, REG_PANEL_TYPE); | ||
525 | is_color = (panel & 0x04) != 0; | ||
526 | is_dual = (panel & 0x02) != 0; | ||
527 | is_tft = (panel & 0x01) != 0; | ||
528 | crt_enabled = (display & 0x02) != 0; | ||
529 | lcd_enabled = (display & 0x01) != 0; | ||
530 | lcd_bpp = epson1355_width_tab[is_tft][(panel >> 4) & 3]; | ||
531 | |||
532 | xres = (epson1355_read_reg(par, REG_HORZ_DISP_WIDTH) + 1) * 8; | ||
533 | yres = (epson1355_read_reg16(par, REG_VERT_DISP_HEIGHT0) + 1) * | ||
534 | ((is_dual && !crt_enabled) ? 2 : 1); | ||
535 | offset = epson1355_read_reg16(par, REG_MEM_ADDR_OFFSET0) & 0x7ff; | ||
536 | xres_virtual = offset * 16 / bpp; | ||
537 | yres_virtual = fix->smem_len / (offset * 2); | ||
538 | |||
539 | var->xres = xres; | ||
540 | var->yres = yres; | ||
541 | var->xres_virtual = xres_virtual; | ||
542 | var->yres_virtual = yres_virtual; | ||
543 | var->xoffset = var->yoffset = 0; | ||
544 | |||
545 | fix->line_length = offset * 2; | ||
546 | |||
547 | fix->xpanstep = 0; /* no pan yet */ | ||
548 | fix->ypanstep = 1; | ||
549 | fix->ywrapstep = 0; | ||
550 | fix->accel = FB_ACCEL_NONE; | ||
551 | |||
552 | var->grayscale = !is_color; | ||
553 | |||
554 | #ifdef DEBUG | ||
555 | printk(KERN_INFO | ||
556 | "epson1355fb: xres=%d, yres=%d, " | ||
557 | "is_color=%d, is_dual=%d, is_tft=%d\n", | ||
558 | xres, yres, is_color, is_dual, is_tft); | ||
559 | printk(KERN_INFO | ||
560 | "epson1355fb: bpp=%d, lcd_bpp=%d, " | ||
561 | "crt_enabled=%d, lcd_enabled=%d\n", | ||
562 | bpp, lcd_bpp, crt_enabled, lcd_enabled); | ||
563 | #endif | ||
564 | } | ||
565 | |||
566 | |||
567 | static void clearfb16(struct fb_info *info) | ||
568 | { | ||
569 | u16 *dst = (u16 *) info->screen_base; | ||
570 | unsigned long n = info->fix.smem_len; | ||
571 | |||
572 | while (n > 1) { | ||
573 | fb_writew(0, dst); | ||
574 | dst++, n -= 2; | ||
575 | } | ||
576 | |||
577 | if (n) | ||
578 | fb_writeb(0, dst); | ||
579 | } | ||
580 | |||
581 | static int epson1355fb_remove(struct platform_device *dev) | ||
582 | { | ||
583 | struct fb_info *info = platform_get_drvdata(dev); | ||
584 | struct epson1355_par *par = info->par; | ||
585 | |||
586 | backlight_enable(0); | ||
587 | if (par) { | ||
588 | lcd_enable(par, 0); | ||
589 | if (par && par->reg_addr) | ||
590 | iounmap((void *) par->reg_addr); | ||
591 | } | ||
592 | |||
593 | if (info) { | ||
594 | fb_dealloc_cmap(&info->cmap); | ||
595 | if (info->screen_base) | ||
596 | iounmap(info->screen_base); | ||
597 | framebuffer_release(info); | ||
598 | } | ||
599 | release_mem_region(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN); | ||
600 | release_mem_region(EPSON1355FB_REGS_PHYS, EPSON1355FB_REGS_LEN); | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | int __devinit epson1355fb_probe(struct platform_device *dev) | ||
605 | { | ||
606 | struct epson1355_par *default_par; | ||
607 | struct fb_info *info; | ||
608 | u8 revision; | ||
609 | int rc = 0; | ||
610 | |||
611 | if (!request_mem_region(EPSON1355FB_REGS_PHYS, EPSON1355FB_REGS_LEN, "S1D13505 registers")) { | ||
612 | printk(KERN_ERR "epson1355fb: unable to reserve " | ||
613 | "registers at 0x%0x\n", EPSON1355FB_REGS_PHYS); | ||
614 | rc = -EBUSY; | ||
615 | goto bail; | ||
616 | } | ||
617 | |||
618 | if (!request_mem_region(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN, | ||
619 | "S1D13505 framebuffer")) { | ||
620 | printk(KERN_ERR "epson1355fb: unable to reserve " | ||
621 | "framebuffer at 0x%0x\n", EPSON1355FB_FB_PHYS); | ||
622 | rc = -EBUSY; | ||
623 | goto bail; | ||
624 | } | ||
625 | |||
626 | info = framebuffer_alloc(sizeof(struct epson1355_par), &dev->dev); | ||
627 | if (!info) { | ||
628 | rc = -ENOMEM; | ||
629 | goto bail; | ||
630 | } | ||
631 | |||
632 | default_par = info->par; | ||
633 | default_par->reg_addr = (unsigned long) ioremap(EPSON1355FB_REGS_PHYS, EPSON1355FB_REGS_LEN); | ||
634 | if (!default_par->reg_addr) { | ||
635 | printk(KERN_ERR "epson1355fb: unable to map registers\n"); | ||
636 | rc = -ENOMEM; | ||
637 | goto bail; | ||
638 | } | ||
639 | info->pseudo_palette = default_par->pseudo_palette; | ||
640 | |||
641 | info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN); | ||
642 | if (!info->screen_base) { | ||
643 | printk(KERN_ERR "epson1355fb: unable to map framebuffer\n"); | ||
644 | rc = -ENOMEM; | ||
645 | goto bail; | ||
646 | } | ||
647 | |||
648 | revision = epson1355_read_reg(default_par, REG_REVISION_CODE); | ||
649 | if ((revision >> 2) != 3) { | ||
650 | printk(KERN_INFO "epson1355fb: epson1355 not found\n"); | ||
651 | rc = -ENODEV; | ||
652 | goto bail; | ||
653 | } | ||
654 | |||
655 | info->fix.mmio_start = EPSON1355FB_REGS_PHYS; | ||
656 | info->fix.mmio_len = EPSON1355FB_REGS_LEN; | ||
657 | info->fix.smem_start = EPSON1355FB_FB_PHYS; | ||
658 | info->fix.smem_len = get_fb_size(info); | ||
659 | |||
660 | printk(KERN_INFO "epson1355fb: regs mapped at 0x%lx, fb %d KiB mapped at 0x%p\n", | ||
661 | default_par->reg_addr, info->fix.smem_len / 1024, info->screen_base); | ||
662 | |||
663 | strcpy(info->fix.id, "S1D13505"); | ||
664 | info->par = default_par; | ||
665 | info->fbops = &epson1355fb_fbops; | ||
666 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; | ||
667 | |||
668 | /* we expect the boot loader to have initialized the chip | ||
669 | with appropriate parameters from which we can determinte | ||
670 | the flavor of lcd panel attached */ | ||
671 | fetch_hw_state(info, default_par); | ||
672 | |||
673 | /* turn this puppy on ... */ | ||
674 | clearfb16(info); | ||
675 | backlight_enable(1); | ||
676 | lcd_enable(default_par, 1); | ||
677 | |||
678 | if (register_framebuffer(info) < 0) { | ||
679 | rc = -EINVAL; | ||
680 | goto bail; | ||
681 | } | ||
682 | /* | ||
683 | * Our driver data. | ||
684 | */ | ||
685 | platform_set_drvdata(dev, info); | ||
686 | |||
687 | printk(KERN_INFO "fb%d: %s frame buffer device\n", | ||
688 | info->node, info->fix.id); | ||
689 | |||
690 | return 0; | ||
691 | |||
692 | bail: | ||
693 | epson1355fb_remove(dev); | ||
694 | return rc; | ||
695 | } | ||
696 | |||
697 | static struct platform_driver epson1355fb_driver = { | ||
698 | .probe = epson1355fb_probe, | ||
699 | .remove = epson1355fb_remove, | ||
700 | .driver = { | ||
701 | .name = "epson1355fb", | ||
702 | }, | ||
703 | }; | ||
704 | |||
705 | static struct platform_device *epson1355fb_device; | ||
706 | |||
707 | int __init epson1355fb_init(void) | ||
708 | { | ||
709 | int ret = 0; | ||
710 | |||
711 | if (fb_get_options("epson1355fb", NULL)) | ||
712 | return -ENODEV; | ||
713 | |||
714 | ret = platform_driver_register(&epson1355fb_driver); | ||
715 | |||
716 | if (!ret) { | ||
717 | epson1355fb_device = platform_device_alloc("epson1355fb", 0); | ||
718 | |||
719 | if (epson1355fb_device) | ||
720 | ret = platform_device_add(epson1355fb_device); | ||
721 | else | ||
722 | ret = -ENOMEM; | ||
723 | |||
724 | if (ret) { | ||
725 | platform_device_put(epson1355fb_device); | ||
726 | platform_driver_unregister(&epson1355fb_driver); | ||
727 | } | ||
728 | } | ||
729 | |||
730 | return ret; | ||
731 | } | ||
732 | |||
733 | module_init(epson1355fb_init); | ||
734 | |||
735 | #ifdef MODULE | ||
736 | static void __exit epson1355fb_exit(void) | ||
737 | { | ||
738 | platform_device_unregister(epson1355fb_device); | ||
739 | platform_driver_unregister(&epson1355fb_driver); | ||
740 | } | ||
741 | |||
742 | /* ------------------------------------------------------------------------- */ | ||
743 | |||
744 | module_exit(epson1355fb_exit); | ||
745 | #endif | ||
746 | |||
747 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>"); | ||
748 | MODULE_DESCRIPTION("Framebuffer driver for Epson S1D13505"); | ||
749 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c new file mode 100644 index 00000000000..c0504a8a507 --- /dev/null +++ b/drivers/video/omap/blizzard.c | |||
@@ -0,0 +1,1648 @@ | |||
1 | /* | ||
2 | * Epson Blizzard LCD controller driver | ||
3 | * | ||
4 | * Copyright (C) 2004-2005 Nokia Corporation | ||
5 | * Authors: Juha Yrjola <juha.yrjola@nokia.com> | ||
6 | * Imre Deak <imre.deak@nokia.com> | ||
7 | * YUV support: Jussi Laako <jussi.laako@nokia.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/fb.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/clk.h> | ||
28 | |||
29 | #include <plat/dma.h> | ||
30 | #include <plat/blizzard.h> | ||
31 | |||
32 | #include "omapfb.h" | ||
33 | #include "dispc.h" | ||
34 | |||
35 | #define MODULE_NAME "blizzard" | ||
36 | |||
37 | #define BLIZZARD_REV_CODE 0x00 | ||
38 | #define BLIZZARD_CONFIG 0x02 | ||
39 | #define BLIZZARD_PLL_DIV 0x04 | ||
40 | #define BLIZZARD_PLL_LOCK_RANGE 0x06 | ||
41 | #define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08 | ||
42 | #define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a | ||
43 | #define BLIZZARD_PLL_MODE 0x0c | ||
44 | #define BLIZZARD_CLK_SRC 0x0e | ||
45 | #define BLIZZARD_MEM_BANK0_ACTIVATE 0x10 | ||
46 | #define BLIZZARD_MEM_BANK0_STATUS 0x14 | ||
47 | #define BLIZZARD_PANEL_CONFIGURATION 0x28 | ||
48 | #define BLIZZARD_HDISP 0x2a | ||
49 | #define BLIZZARD_HNDP 0x2c | ||
50 | #define BLIZZARD_VDISP0 0x2e | ||
51 | #define BLIZZARD_VDISP1 0x30 | ||
52 | #define BLIZZARD_VNDP 0x32 | ||
53 | #define BLIZZARD_HSW 0x34 | ||
54 | #define BLIZZARD_VSW 0x38 | ||
55 | #define BLIZZARD_DISPLAY_MODE 0x68 | ||
56 | #define BLIZZARD_INPUT_WIN_X_START_0 0x6c | ||
57 | #define BLIZZARD_DATA_SOURCE_SELECT 0x8e | ||
58 | #define BLIZZARD_DISP_MEM_DATA_PORT 0x90 | ||
59 | #define BLIZZARD_DISP_MEM_READ_ADDR0 0x92 | ||
60 | #define BLIZZARD_POWER_SAVE 0xE6 | ||
61 | #define BLIZZARD_NDISP_CTRL_STATUS 0xE8 | ||
62 | |||
63 | /* Data source select */ | ||
64 | /* For S1D13745 */ | ||
65 | #define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00 | ||
66 | #define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01 | ||
67 | #define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE 0x04 | ||
68 | #define BLIZZARD_SRC_DISABLE_OVERLAY 0x05 | ||
69 | /* For S1D13744 */ | ||
70 | #define BLIZZARD_SRC_WRITE_LCD 0x00 | ||
71 | #define BLIZZARD_SRC_BLT_LCD 0x06 | ||
72 | |||
73 | #define BLIZZARD_COLOR_RGB565 0x01 | ||
74 | #define BLIZZARD_COLOR_YUV420 0x09 | ||
75 | |||
76 | #define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */ | ||
77 | #define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */ | ||
78 | |||
79 | #define BLIZZARD_AUTO_UPDATE_TIME (HZ / 20) | ||
80 | |||
81 | /* Reserve 4 request slots for requests in irq context */ | ||
82 | #define REQ_POOL_SIZE 24 | ||
83 | #define IRQ_REQ_POOL_SIZE 4 | ||
84 | |||
85 | #define REQ_FROM_IRQ_POOL 0x01 | ||
86 | |||
87 | #define REQ_COMPLETE 0 | ||
88 | #define REQ_PENDING 1 | ||
89 | |||
90 | struct blizzard_reg_list { | ||
91 | int start; | ||
92 | int end; | ||
93 | }; | ||
94 | |||
95 | /* These need to be saved / restored separately from the rest. */ | ||
96 | static const struct blizzard_reg_list blizzard_pll_regs[] = { | ||
97 | { | ||
98 | .start = 0x04, /* Don't save PLL ctrl (0x0C) */ | ||
99 | .end = 0x0a, | ||
100 | }, | ||
101 | { | ||
102 | .start = 0x0e, /* Clock configuration */ | ||
103 | .end = 0x0e, | ||
104 | }, | ||
105 | }; | ||
106 | |||
107 | static const struct blizzard_reg_list blizzard_gen_regs[] = { | ||
108 | { | ||
109 | .start = 0x18, /* SDRAM control */ | ||
110 | .end = 0x20, | ||
111 | }, | ||
112 | { | ||
113 | .start = 0x28, /* LCD Panel configuration */ | ||
114 | .end = 0x5a, /* HSSI interface, TV configuration */ | ||
115 | }, | ||
116 | }; | ||
117 | |||
118 | static u8 blizzard_reg_cache[0x5a / 2]; | ||
119 | |||
120 | struct update_param { | ||
121 | int plane; | ||
122 | int x, y, width, height; | ||
123 | int out_x, out_y; | ||
124 | int out_width, out_height; | ||
125 | int color_mode; | ||
126 | int bpp; | ||
127 | int flags; | ||
128 | }; | ||
129 | |||
130 | struct blizzard_request { | ||
131 | struct list_head entry; | ||
132 | unsigned int flags; | ||
133 | |||
134 | int (*handler)(struct blizzard_request *req); | ||
135 | void (*complete)(void *data); | ||
136 | void *complete_data; | ||
137 | |||
138 | union { | ||
139 | struct update_param update; | ||
140 | struct completion *sync; | ||
141 | } par; | ||
142 | }; | ||
143 | |||
144 | struct plane_info { | ||
145 | unsigned long offset; | ||
146 | int pos_x, pos_y; | ||
147 | int width, height; | ||
148 | int out_width, out_height; | ||
149 | int scr_width; | ||
150 | int color_mode; | ||
151 | int bpp; | ||
152 | }; | ||
153 | |||
154 | struct blizzard_struct { | ||
155 | enum omapfb_update_mode update_mode; | ||
156 | enum omapfb_update_mode update_mode_before_suspend; | ||
157 | |||
158 | struct timer_list auto_update_timer; | ||
159 | int stop_auto_update; | ||
160 | struct omapfb_update_window auto_update_window; | ||
161 | int enabled_planes; | ||
162 | int vid_nonstd_color; | ||
163 | int vid_scaled; | ||
164 | int last_color_mode; | ||
165 | int zoom_on; | ||
166 | int zoom_area_gx1; | ||
167 | int zoom_area_gx2; | ||
168 | int zoom_area_gy1; | ||
169 | int zoom_area_gy2; | ||
170 | int screen_width; | ||
171 | int screen_height; | ||
172 | unsigned te_connected:1; | ||
173 | unsigned vsync_only:1; | ||
174 | |||
175 | struct plane_info plane[OMAPFB_PLANE_NUM]; | ||
176 | |||
177 | struct blizzard_request req_pool[REQ_POOL_SIZE]; | ||
178 | struct list_head pending_req_list; | ||
179 | struct list_head free_req_list; | ||
180 | struct semaphore req_sema; | ||
181 | spinlock_t req_lock; | ||
182 | |||
183 | unsigned long sys_ck_rate; | ||
184 | struct extif_timings reg_timings, lut_timings; | ||
185 | |||
186 | u32 max_transmit_size; | ||
187 | u32 extif_clk_period; | ||
188 | int extif_clk_div; | ||
189 | unsigned long pix_tx_time; | ||
190 | unsigned long line_upd_time; | ||
191 | |||
192 | struct omapfb_device *fbdev; | ||
193 | struct lcd_ctrl_extif *extif; | ||
194 | const struct lcd_ctrl *int_ctrl; | ||
195 | |||
196 | void (*power_up)(struct device *dev); | ||
197 | void (*power_down)(struct device *dev); | ||
198 | |||
199 | int version; | ||
200 | } blizzard; | ||
201 | |||
202 | struct lcd_ctrl blizzard_ctrl; | ||
203 | |||
204 | static u8 blizzard_read_reg(u8 reg) | ||
205 | { | ||
206 | u8 data; | ||
207 | |||
208 | blizzard.extif->set_bits_per_cycle(8); | ||
209 | blizzard.extif->write_command(®, 1); | ||
210 | blizzard.extif->read_data(&data, 1); | ||
211 | |||
212 | return data; | ||
213 | } | ||
214 | |||
215 | static void blizzard_write_reg(u8 reg, u8 val) | ||
216 | { | ||
217 | blizzard.extif->set_bits_per_cycle(8); | ||
218 | blizzard.extif->write_command(®, 1); | ||
219 | blizzard.extif->write_data(&val, 1); | ||
220 | } | ||
221 | |||
222 | static void blizzard_restart_sdram(void) | ||
223 | { | ||
224 | unsigned long tmo; | ||
225 | |||
226 | blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0); | ||
227 | udelay(50); | ||
228 | blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 1); | ||
229 | tmo = jiffies + msecs_to_jiffies(200); | ||
230 | while (!(blizzard_read_reg(BLIZZARD_MEM_BANK0_STATUS) & 0x01)) { | ||
231 | if (time_after(jiffies, tmo)) { | ||
232 | dev_err(blizzard.fbdev->dev, | ||
233 | "s1d1374x: SDRAM not ready\n"); | ||
234 | break; | ||
235 | } | ||
236 | msleep(1); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | static void blizzard_stop_sdram(void) | ||
241 | { | ||
242 | blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0); | ||
243 | } | ||
244 | |||
245 | /* Wait until the last window was completely written into the controllers | ||
246 | * SDRAM and we can start transferring the next window. | ||
247 | */ | ||
248 | static void blizzard_wait_line_buffer(void) | ||
249 | { | ||
250 | unsigned long tmo = jiffies + msecs_to_jiffies(30); | ||
251 | |||
252 | while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 7)) { | ||
253 | if (time_after(jiffies, tmo)) { | ||
254 | if (printk_ratelimit()) | ||
255 | dev_err(blizzard.fbdev->dev, | ||
256 | "s1d1374x: line buffer not ready\n"); | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | } | ||
261 | |||
262 | /* Wait until the YYC color space converter is idle. */ | ||
263 | static void blizzard_wait_yyc(void) | ||
264 | { | ||
265 | unsigned long tmo = jiffies + msecs_to_jiffies(30); | ||
266 | |||
267 | while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 4)) { | ||
268 | if (time_after(jiffies, tmo)) { | ||
269 | if (printk_ratelimit()) | ||
270 | dev_err(blizzard.fbdev->dev, | ||
271 | "s1d1374x: YYC not ready\n"); | ||
272 | break; | ||
273 | } | ||
274 | } | ||
275 | } | ||
276 | |||
277 | static void disable_overlay(void) | ||
278 | { | ||
279 | blizzard_write_reg(BLIZZARD_DATA_SOURCE_SELECT, | ||
280 | BLIZZARD_SRC_DISABLE_OVERLAY); | ||
281 | } | ||
282 | |||
283 | static void set_window_regs(int x_start, int y_start, int x_end, int y_end, | ||
284 | int x_out_start, int y_out_start, | ||
285 | int x_out_end, int y_out_end, int color_mode, | ||
286 | int zoom_off, int flags) | ||
287 | { | ||
288 | u8 tmp[18]; | ||
289 | u8 cmd; | ||
290 | |||
291 | x_end--; | ||
292 | y_end--; | ||
293 | tmp[0] = x_start; | ||
294 | tmp[1] = x_start >> 8; | ||
295 | tmp[2] = y_start; | ||
296 | tmp[3] = y_start >> 8; | ||
297 | tmp[4] = x_end; | ||
298 | tmp[5] = x_end >> 8; | ||
299 | tmp[6] = y_end; | ||
300 | tmp[7] = y_end >> 8; | ||
301 | |||
302 | x_out_end--; | ||
303 | y_out_end--; | ||
304 | tmp[8] = x_out_start; | ||
305 | tmp[9] = x_out_start >> 8; | ||
306 | tmp[10] = y_out_start; | ||
307 | tmp[11] = y_out_start >> 8; | ||
308 | tmp[12] = x_out_end; | ||
309 | tmp[13] = x_out_end >> 8; | ||
310 | tmp[14] = y_out_end; | ||
311 | tmp[15] = y_out_end >> 8; | ||
312 | |||
313 | tmp[16] = color_mode; | ||
314 | if (zoom_off && blizzard.version == BLIZZARD_VERSION_S1D13745) | ||
315 | tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND; | ||
316 | else if (flags & OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY) | ||
317 | tmp[17] = BLIZZARD_SRC_WRITE_OVERLAY_ENABLE; | ||
318 | else | ||
319 | tmp[17] = blizzard.version == BLIZZARD_VERSION_S1D13744 ? | ||
320 | BLIZZARD_SRC_WRITE_LCD : | ||
321 | BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE; | ||
322 | |||
323 | blizzard.extif->set_bits_per_cycle(8); | ||
324 | cmd = BLIZZARD_INPUT_WIN_X_START_0; | ||
325 | blizzard.extif->write_command(&cmd, 1); | ||
326 | blizzard.extif->write_data(tmp, 18); | ||
327 | } | ||
328 | |||
329 | static void enable_tearsync(int y, int width, int height, int screen_height, | ||
330 | int out_height, int force_vsync) | ||
331 | { | ||
332 | u8 b; | ||
333 | |||
334 | b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); | ||
335 | b |= 1 << 3; | ||
336 | blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b); | ||
337 | |||
338 | if (likely(blizzard.vsync_only || force_vsync)) { | ||
339 | blizzard.extif->enable_tearsync(1, 0); | ||
340 | return; | ||
341 | } | ||
342 | |||
343 | if (width * blizzard.pix_tx_time < blizzard.line_upd_time) { | ||
344 | blizzard.extif->enable_tearsync(1, 0); | ||
345 | return; | ||
346 | } | ||
347 | |||
348 | if ((width * blizzard.pix_tx_time / 1000) * height < | ||
349 | (y + out_height) * (blizzard.line_upd_time / 1000)) { | ||
350 | blizzard.extif->enable_tearsync(1, 0); | ||
351 | return; | ||
352 | } | ||
353 | |||
354 | blizzard.extif->enable_tearsync(1, y + 1); | ||
355 | } | ||
356 | |||
357 | static void disable_tearsync(void) | ||
358 | { | ||
359 | u8 b; | ||
360 | |||
361 | blizzard.extif->enable_tearsync(0, 0); | ||
362 | b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); | ||
363 | b &= ~(1 << 3); | ||
364 | blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b); | ||
365 | b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); | ||
366 | } | ||
367 | |||
368 | static inline void set_extif_timings(const struct extif_timings *t); | ||
369 | |||
370 | static inline struct blizzard_request *alloc_req(void) | ||
371 | { | ||
372 | unsigned long flags; | ||
373 | struct blizzard_request *req; | ||
374 | int req_flags = 0; | ||
375 | |||
376 | if (!in_interrupt()) | ||
377 | down(&blizzard.req_sema); | ||
378 | else | ||
379 | req_flags = REQ_FROM_IRQ_POOL; | ||
380 | |||
381 | spin_lock_irqsave(&blizzard.req_lock, flags); | ||
382 | BUG_ON(list_empty(&blizzard.free_req_list)); | ||
383 | req = list_entry(blizzard.free_req_list.next, | ||
384 | struct blizzard_request, entry); | ||
385 | list_del(&req->entry); | ||
386 | spin_unlock_irqrestore(&blizzard.req_lock, flags); | ||
387 | |||
388 | INIT_LIST_HEAD(&req->entry); | ||
389 | req->flags = req_flags; | ||
390 | |||
391 | return req; | ||
392 | } | ||
393 | |||
394 | static inline void free_req(struct blizzard_request *req) | ||
395 | { | ||
396 | unsigned long flags; | ||
397 | |||
398 | spin_lock_irqsave(&blizzard.req_lock, flags); | ||
399 | |||
400 | list_move(&req->entry, &blizzard.free_req_list); | ||
401 | if (!(req->flags & REQ_FROM_IRQ_POOL)) | ||
402 | up(&blizzard.req_sema); | ||
403 | |||
404 | spin_unlock_irqrestore(&blizzard.req_lock, flags); | ||
405 | } | ||
406 | |||
407 | static void process_pending_requests(void) | ||
408 | { | ||
409 | unsigned long flags; | ||
410 | |||
411 | spin_lock_irqsave(&blizzard.req_lock, flags); | ||
412 | |||
413 | while (!list_empty(&blizzard.pending_req_list)) { | ||
414 | struct blizzard_request *req; | ||
415 | void (*complete)(void *); | ||
416 | void *complete_data; | ||
417 | |||
418 | req = list_entry(blizzard.pending_req_list.next, | ||
419 | struct blizzard_request, entry); | ||
420 | spin_unlock_irqrestore(&blizzard.req_lock, flags); | ||
421 | |||
422 | if (req->handler(req) == REQ_PENDING) | ||
423 | return; | ||
424 | |||
425 | complete = req->complete; | ||
426 | complete_data = req->complete_data; | ||
427 | free_req(req); | ||
428 | |||
429 | if (complete) | ||
430 | complete(complete_data); | ||
431 | |||
432 | spin_lock_irqsave(&blizzard.req_lock, flags); | ||
433 | } | ||
434 | |||
435 | spin_unlock_irqrestore(&blizzard.req_lock, flags); | ||
436 | } | ||
437 | |||
438 | static void submit_req_list(struct list_head *head) | ||
439 | { | ||
440 | unsigned long flags; | ||
441 | int process = 1; | ||
442 | |||
443 | spin_lock_irqsave(&blizzard.req_lock, flags); | ||
444 | if (likely(!list_empty(&blizzard.pending_req_list))) | ||
445 | process = 0; | ||
446 | list_splice_init(head, blizzard.pending_req_list.prev); | ||
447 | spin_unlock_irqrestore(&blizzard.req_lock, flags); | ||
448 | |||
449 | if (process) | ||
450 | process_pending_requests(); | ||
451 | } | ||
452 | |||
453 | static void request_complete(void *data) | ||
454 | { | ||
455 | struct blizzard_request *req = (struct blizzard_request *)data; | ||
456 | void (*complete)(void *); | ||
457 | void *complete_data; | ||
458 | |||
459 | complete = req->complete; | ||
460 | complete_data = req->complete_data; | ||
461 | |||
462 | free_req(req); | ||
463 | |||
464 | if (complete) | ||
465 | complete(complete_data); | ||
466 | |||
467 | process_pending_requests(); | ||
468 | } | ||
469 | |||
470 | |||
471 | static int do_full_screen_update(struct blizzard_request *req) | ||
472 | { | ||
473 | int i; | ||
474 | int flags; | ||
475 | |||
476 | for (i = 0; i < 3; i++) { | ||
477 | struct plane_info *p = &blizzard.plane[i]; | ||
478 | if (!(blizzard.enabled_planes & (1 << i))) { | ||
479 | blizzard.int_ctrl->enable_plane(i, 0); | ||
480 | continue; | ||
481 | } | ||
482 | dev_dbg(blizzard.fbdev->dev, "pw %d ph %d\n", | ||
483 | p->width, p->height); | ||
484 | blizzard.int_ctrl->setup_plane(i, | ||
485 | OMAPFB_CHANNEL_OUT_LCD, p->offset, | ||
486 | p->scr_width, p->pos_x, p->pos_y, | ||
487 | p->width, p->height, | ||
488 | p->color_mode); | ||
489 | blizzard.int_ctrl->enable_plane(i, 1); | ||
490 | } | ||
491 | |||
492 | dev_dbg(blizzard.fbdev->dev, "sw %d sh %d\n", | ||
493 | blizzard.screen_width, blizzard.screen_height); | ||
494 | blizzard_wait_line_buffer(); | ||
495 | flags = req->par.update.flags; | ||
496 | if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC) | ||
497 | enable_tearsync(0, blizzard.screen_width, | ||
498 | blizzard.screen_height, | ||
499 | blizzard.screen_height, | ||
500 | blizzard.screen_height, | ||
501 | flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC); | ||
502 | else | ||
503 | disable_tearsync(); | ||
504 | |||
505 | set_window_regs(0, 0, blizzard.screen_width, blizzard.screen_height, | ||
506 | 0, 0, blizzard.screen_width, blizzard.screen_height, | ||
507 | BLIZZARD_COLOR_RGB565, blizzard.zoom_on, flags); | ||
508 | blizzard.zoom_on = 0; | ||
509 | |||
510 | blizzard.extif->set_bits_per_cycle(16); | ||
511 | /* set_window_regs has left the register index at the right | ||
512 | * place, so no need to set it here. | ||
513 | */ | ||
514 | blizzard.extif->transfer_area(blizzard.screen_width, | ||
515 | blizzard.screen_height, | ||
516 | request_complete, req); | ||
517 | return REQ_PENDING; | ||
518 | } | ||
519 | |||
520 | static int check_1d_intersect(int a1, int a2, int b1, int b2) | ||
521 | { | ||
522 | if (a2 <= b1 || b2 <= a1) | ||
523 | return 0; | ||
524 | return 1; | ||
525 | } | ||
526 | |||
527 | /* Setup all planes with an overlapping area with the update window. */ | ||
528 | static int do_partial_update(struct blizzard_request *req, int plane, | ||
529 | int x, int y, int w, int h, | ||
530 | int x_out, int y_out, int w_out, int h_out, | ||
531 | int wnd_color_mode, int bpp) | ||
532 | { | ||
533 | int i; | ||
534 | int gx1, gy1, gx2, gy2; | ||
535 | int gx1_out, gy1_out, gx2_out, gy2_out; | ||
536 | int color_mode; | ||
537 | int flags; | ||
538 | int zoom_off; | ||
539 | int have_zoom_for_this_update = 0; | ||
540 | |||
541 | /* Global coordinates, relative to pixel 0,0 of the LCD */ | ||
542 | gx1 = x + blizzard.plane[plane].pos_x; | ||
543 | gy1 = y + blizzard.plane[plane].pos_y; | ||
544 | gx2 = gx1 + w; | ||
545 | gy2 = gy1 + h; | ||
546 | |||
547 | flags = req->par.update.flags; | ||
548 | if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) { | ||
549 | gx1_out = gx1; | ||
550 | gy1_out = gy1; | ||
551 | gx2_out = gx1 + w * 2; | ||
552 | gy2_out = gy1 + h * 2; | ||
553 | } else { | ||
554 | gx1_out = x_out + blizzard.plane[plane].pos_x; | ||
555 | gy1_out = y_out + blizzard.plane[plane].pos_y; | ||
556 | gx2_out = gx1_out + w_out; | ||
557 | gy2_out = gy1_out + h_out; | ||
558 | } | ||
559 | |||
560 | for (i = 0; i < OMAPFB_PLANE_NUM; i++) { | ||
561 | struct plane_info *p = &blizzard.plane[i]; | ||
562 | int px1, py1; | ||
563 | int px2, py2; | ||
564 | int pw, ph; | ||
565 | int pposx, pposy; | ||
566 | unsigned long offset; | ||
567 | |||
568 | if (!(blizzard.enabled_planes & (1 << i)) || | ||
569 | (wnd_color_mode && i != plane)) { | ||
570 | blizzard.int_ctrl->enable_plane(i, 0); | ||
571 | continue; | ||
572 | } | ||
573 | /* Plane coordinates */ | ||
574 | if (i == plane) { | ||
575 | /* Plane in which we are doing the update. | ||
576 | * Local coordinates are the one in the update | ||
577 | * request. | ||
578 | */ | ||
579 | px1 = x; | ||
580 | py1 = y; | ||
581 | px2 = x + w; | ||
582 | py2 = y + h; | ||
583 | pposx = 0; | ||
584 | pposy = 0; | ||
585 | } else { | ||
586 | /* Check if this plane has an overlapping part */ | ||
587 | px1 = gx1 - p->pos_x; | ||
588 | py1 = gy1 - p->pos_y; | ||
589 | px2 = gx2 - p->pos_x; | ||
590 | py2 = gy2 - p->pos_y; | ||
591 | if (px1 >= p->width || py1 >= p->height || | ||
592 | px2 <= 0 || py2 <= 0) { | ||
593 | blizzard.int_ctrl->enable_plane(i, 0); | ||
594 | continue; | ||
595 | } | ||
596 | /* Calculate the coordinates for the overlapping | ||
597 | * part in the plane's local coordinates. | ||
598 | */ | ||
599 | pposx = -px1; | ||
600 | pposy = -py1; | ||
601 | if (px1 < 0) | ||
602 | px1 = 0; | ||
603 | if (py1 < 0) | ||
604 | py1 = 0; | ||
605 | if (px2 > p->width) | ||
606 | px2 = p->width; | ||
607 | if (py2 > p->height) | ||
608 | py2 = p->height; | ||
609 | if (pposx < 0) | ||
610 | pposx = 0; | ||
611 | if (pposy < 0) | ||
612 | pposy = 0; | ||
613 | } | ||
614 | pw = px2 - px1; | ||
615 | ph = py2 - py1; | ||
616 | offset = p->offset + (p->scr_width * py1 + px1) * p->bpp / 8; | ||
617 | if (wnd_color_mode) | ||
618 | /* Window embedded in the plane with a differing | ||
619 | * color mode / bpp. Calculate the number of DMA | ||
620 | * transfer elements in terms of the plane's bpp. | ||
621 | */ | ||
622 | pw = (pw + 1) * bpp / p->bpp; | ||
623 | #ifdef VERBOSE | ||
624 | dev_dbg(blizzard.fbdev->dev, | ||
625 | "plane %d offset %#08lx pposx %d pposy %d " | ||
626 | "px1 %d py1 %d pw %d ph %d\n", | ||
627 | i, offset, pposx, pposy, px1, py1, pw, ph); | ||
628 | #endif | ||
629 | blizzard.int_ctrl->setup_plane(i, | ||
630 | OMAPFB_CHANNEL_OUT_LCD, offset, | ||
631 | p->scr_width, | ||
632 | pposx, pposy, pw, ph, | ||
633 | p->color_mode); | ||
634 | |||
635 | blizzard.int_ctrl->enable_plane(i, 1); | ||
636 | } | ||
637 | |||
638 | switch (wnd_color_mode) { | ||
639 | case OMAPFB_COLOR_YUV420: | ||
640 | color_mode = BLIZZARD_COLOR_YUV420; | ||
641 | /* Currently only the 16 bits/pixel cycle format is | ||
642 | * supported on the external interface. Adjust the number | ||
643 | * of transfer elements per line for 12bpp format. | ||
644 | */ | ||
645 | w = (w + 1) * 3 / 4; | ||
646 | break; | ||
647 | default: | ||
648 | color_mode = BLIZZARD_COLOR_RGB565; | ||
649 | break; | ||
650 | } | ||
651 | |||
652 | blizzard_wait_line_buffer(); | ||
653 | if (blizzard.last_color_mode == BLIZZARD_COLOR_YUV420) | ||
654 | blizzard_wait_yyc(); | ||
655 | blizzard.last_color_mode = color_mode; | ||
656 | if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC) | ||
657 | enable_tearsync(gy1, w, h, | ||
658 | blizzard.screen_height, | ||
659 | h_out, | ||
660 | flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC); | ||
661 | else | ||
662 | disable_tearsync(); | ||
663 | |||
664 | if ((gx2_out - gx1_out) != (gx2 - gx1) || | ||
665 | (gy2_out - gy1_out) != (gy2 - gy1)) | ||
666 | have_zoom_for_this_update = 1; | ||
667 | |||
668 | /* 'background' type of screen update (as opposed to 'destructive') | ||
669 | can be used to disable scaling if scaling is active */ | ||
670 | zoom_off = blizzard.zoom_on && !have_zoom_for_this_update && | ||
671 | (gx1_out == 0) && (gx2_out == blizzard.screen_width) && | ||
672 | (gy1_out == 0) && (gy2_out == blizzard.screen_height) && | ||
673 | (gx1 == 0) && (gy1 == 0); | ||
674 | |||
675 | if (blizzard.zoom_on && !have_zoom_for_this_update && !zoom_off && | ||
676 | check_1d_intersect(blizzard.zoom_area_gx1, blizzard.zoom_area_gx2, | ||
677 | gx1_out, gx2_out) && | ||
678 | check_1d_intersect(blizzard.zoom_area_gy1, blizzard.zoom_area_gy2, | ||
679 | gy1_out, gy2_out)) { | ||
680 | /* Previous screen update was using scaling, current update | ||
681 | * is not using it. Additionally, current screen update is | ||
682 | * going to overlap with the scaled area. Scaling needs to be | ||
683 | * disabled in order to avoid 'magnifying glass' effect. | ||
684 | * Dummy setup of background window can be used for this. | ||
685 | */ | ||
686 | set_window_regs(0, 0, blizzard.screen_width, | ||
687 | blizzard.screen_height, | ||
688 | 0, 0, blizzard.screen_width, | ||
689 | blizzard.screen_height, | ||
690 | BLIZZARD_COLOR_RGB565, 1, flags); | ||
691 | blizzard.zoom_on = 0; | ||
692 | } | ||
693 | |||
694 | /* remember scaling settings if we have scaled update */ | ||
695 | if (have_zoom_for_this_update) { | ||
696 | blizzard.zoom_on = 1; | ||
697 | blizzard.zoom_area_gx1 = gx1_out; | ||
698 | blizzard.zoom_area_gx2 = gx2_out; | ||
699 | blizzard.zoom_area_gy1 = gy1_out; | ||
700 | blizzard.zoom_area_gy2 = gy2_out; | ||
701 | } | ||
702 | |||
703 | set_window_regs(gx1, gy1, gx2, gy2, gx1_out, gy1_out, gx2_out, gy2_out, | ||
704 | color_mode, zoom_off, flags); | ||
705 | if (zoom_off) | ||
706 | blizzard.zoom_on = 0; | ||
707 | |||
708 | blizzard.extif->set_bits_per_cycle(16); | ||
709 | /* set_window_regs has left the register index at the right | ||
710 | * place, so no need to set it here. | ||
711 | */ | ||
712 | blizzard.extif->transfer_area(w, h, request_complete, req); | ||
713 | |||
714 | return REQ_PENDING; | ||
715 | } | ||
716 | |||
717 | static int send_frame_handler(struct blizzard_request *req) | ||
718 | { | ||
719 | struct update_param *par = &req->par.update; | ||
720 | int plane = par->plane; | ||
721 | |||
722 | #ifdef VERBOSE | ||
723 | dev_dbg(blizzard.fbdev->dev, | ||
724 | "send_frame: x %d y %d w %d h %d " | ||
725 | "x_out %d y_out %d w_out %d h_out %d " | ||
726 | "color_mode %04x flags %04x planes %01x\n", | ||
727 | par->x, par->y, par->width, par->height, | ||
728 | par->out_x, par->out_y, par->out_width, par->out_height, | ||
729 | par->color_mode, par->flags, blizzard.enabled_planes); | ||
730 | #endif | ||
731 | if (par->flags & OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY) | ||
732 | disable_overlay(); | ||
733 | |||
734 | if ((blizzard.enabled_planes & blizzard.vid_nonstd_color) || | ||
735 | (blizzard.enabled_planes & blizzard.vid_scaled)) | ||
736 | return do_full_screen_update(req); | ||
737 | |||
738 | return do_partial_update(req, plane, par->x, par->y, | ||
739 | par->width, par->height, | ||
740 | par->out_x, par->out_y, | ||
741 | par->out_width, par->out_height, | ||
742 | par->color_mode, par->bpp); | ||
743 | } | ||
744 | |||
745 | static void send_frame_complete(void *data) | ||
746 | { | ||
747 | } | ||
748 | |||
749 | #define ADD_PREQ(_x, _y, _w, _h, _x_out, _y_out, _w_out, _h_out) do { \ | ||
750 | req = alloc_req(); \ | ||
751 | req->handler = send_frame_handler; \ | ||
752 | req->complete = send_frame_complete; \ | ||
753 | req->par.update.plane = plane_idx; \ | ||
754 | req->par.update.x = _x; \ | ||
755 | req->par.update.y = _y; \ | ||
756 | req->par.update.width = _w; \ | ||
757 | req->par.update.height = _h; \ | ||
758 | req->par.update.out_x = _x_out; \ | ||
759 | req->par.update.out_y = _y_out; \ | ||
760 | req->par.update.out_width = _w_out; \ | ||
761 | req->par.update.out_height = _h_out; \ | ||
762 | req->par.update.bpp = bpp; \ | ||
763 | req->par.update.color_mode = color_mode;\ | ||
764 | req->par.update.flags = flags; \ | ||
765 | list_add_tail(&req->entry, req_head); \ | ||
766 | } while(0) | ||
767 | |||
768 | static void create_req_list(int plane_idx, | ||
769 | struct omapfb_update_window *win, | ||
770 | struct list_head *req_head) | ||
771 | { | ||
772 | struct blizzard_request *req; | ||
773 | int x = win->x; | ||
774 | int y = win->y; | ||
775 | int width = win->width; | ||
776 | int height = win->height; | ||
777 | int x_out = win->out_x; | ||
778 | int y_out = win->out_y; | ||
779 | int width_out = win->out_width; | ||
780 | int height_out = win->out_height; | ||
781 | int color_mode; | ||
782 | int bpp; | ||
783 | int flags; | ||
784 | unsigned int ystart = y; | ||
785 | unsigned int yspan = height; | ||
786 | unsigned int ystart_out = y_out; | ||
787 | unsigned int yspan_out = height_out; | ||
788 | |||
789 | flags = win->format & ~OMAPFB_FORMAT_MASK; | ||
790 | color_mode = win->format & OMAPFB_FORMAT_MASK; | ||
791 | switch (color_mode) { | ||
792 | case OMAPFB_COLOR_YUV420: | ||
793 | /* Embedded window with different color mode */ | ||
794 | bpp = 12; | ||
795 | /* X, Y, height must be aligned at 2, width at 4 pixels */ | ||
796 | x &= ~1; | ||
797 | y &= ~1; | ||
798 | height = yspan = height & ~1; | ||
799 | width = width & ~3; | ||
800 | break; | ||
801 | default: | ||
802 | /* Same as the plane color mode */ | ||
803 | bpp = blizzard.plane[plane_idx].bpp; | ||
804 | break; | ||
805 | } | ||
806 | if (width * height * bpp / 8 > blizzard.max_transmit_size) { | ||
807 | yspan = blizzard.max_transmit_size / (width * bpp / 8); | ||
808 | yspan_out = yspan * height_out / height; | ||
809 | ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out, | ||
810 | width_out, yspan_out); | ||
811 | ystart += yspan; | ||
812 | ystart_out += yspan_out; | ||
813 | yspan = height - yspan; | ||
814 | yspan_out = height_out - yspan_out; | ||
815 | flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; | ||
816 | } | ||
817 | |||
818 | ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out, | ||
819 | width_out, yspan_out); | ||
820 | } | ||
821 | |||
822 | static void auto_update_complete(void *data) | ||
823 | { | ||
824 | if (!blizzard.stop_auto_update) | ||
825 | mod_timer(&blizzard.auto_update_timer, | ||
826 | jiffies + BLIZZARD_AUTO_UPDATE_TIME); | ||
827 | } | ||
828 | |||
829 | static void blizzard_update_window_auto(unsigned long arg) | ||
830 | { | ||
831 | LIST_HEAD(req_list); | ||
832 | struct blizzard_request *last; | ||
833 | struct omapfb_plane_struct *plane; | ||
834 | |||
835 | plane = blizzard.fbdev->fb_info[0]->par; | ||
836 | create_req_list(plane->idx, | ||
837 | &blizzard.auto_update_window, &req_list); | ||
838 | last = list_entry(req_list.prev, struct blizzard_request, entry); | ||
839 | |||
840 | last->complete = auto_update_complete; | ||
841 | last->complete_data = NULL; | ||
842 | |||
843 | submit_req_list(&req_list); | ||
844 | } | ||
845 | |||
846 | int blizzard_update_window_async(struct fb_info *fbi, | ||
847 | struct omapfb_update_window *win, | ||
848 | void (*complete_callback)(void *arg), | ||
849 | void *complete_callback_data) | ||
850 | { | ||
851 | LIST_HEAD(req_list); | ||
852 | struct blizzard_request *last; | ||
853 | struct omapfb_plane_struct *plane = fbi->par; | ||
854 | |||
855 | if (unlikely(blizzard.update_mode != OMAPFB_MANUAL_UPDATE)) | ||
856 | return -EINVAL; | ||
857 | if (unlikely(!blizzard.te_connected && | ||
858 | (win->format & OMAPFB_FORMAT_FLAG_TEARSYNC))) | ||
859 | return -EINVAL; | ||
860 | |||
861 | create_req_list(plane->idx, win, &req_list); | ||
862 | last = list_entry(req_list.prev, struct blizzard_request, entry); | ||
863 | |||
864 | last->complete = complete_callback; | ||
865 | last->complete_data = (void *)complete_callback_data; | ||
866 | |||
867 | submit_req_list(&req_list); | ||
868 | |||
869 | return 0; | ||
870 | } | ||
871 | EXPORT_SYMBOL(blizzard_update_window_async); | ||
872 | |||
873 | static int update_full_screen(void) | ||
874 | { | ||
875 | return blizzard_update_window_async(blizzard.fbdev->fb_info[0], | ||
876 | &blizzard.auto_update_window, NULL, NULL); | ||
877 | |||
878 | } | ||
879 | |||
880 | static int blizzard_setup_plane(int plane, int channel_out, | ||
881 | unsigned long offset, int screen_width, | ||
882 | int pos_x, int pos_y, int width, int height, | ||
883 | int color_mode) | ||
884 | { | ||
885 | struct plane_info *p; | ||
886 | |||
887 | #ifdef VERBOSE | ||
888 | dev_dbg(blizzard.fbdev->dev, | ||
889 | "plane %d ch_out %d offset %#08lx scr_width %d " | ||
890 | "pos_x %d pos_y %d width %d height %d color_mode %d\n", | ||
891 | plane, channel_out, offset, screen_width, | ||
892 | pos_x, pos_y, width, height, color_mode); | ||
893 | #endif | ||
894 | if ((unsigned)plane > OMAPFB_PLANE_NUM) | ||
895 | return -EINVAL; | ||
896 | p = &blizzard.plane[plane]; | ||
897 | |||
898 | switch (color_mode) { | ||
899 | case OMAPFB_COLOR_YUV422: | ||
900 | case OMAPFB_COLOR_YUY422: | ||
901 | p->bpp = 16; | ||
902 | blizzard.vid_nonstd_color &= ~(1 << plane); | ||
903 | break; | ||
904 | case OMAPFB_COLOR_YUV420: | ||
905 | p->bpp = 12; | ||
906 | blizzard.vid_nonstd_color |= 1 << plane; | ||
907 | break; | ||
908 | case OMAPFB_COLOR_RGB565: | ||
909 | p->bpp = 16; | ||
910 | blizzard.vid_nonstd_color &= ~(1 << plane); | ||
911 | break; | ||
912 | default: | ||
913 | return -EINVAL; | ||
914 | } | ||
915 | |||
916 | p->offset = offset; | ||
917 | p->pos_x = pos_x; | ||
918 | p->pos_y = pos_y; | ||
919 | p->width = width; | ||
920 | p->height = height; | ||
921 | p->scr_width = screen_width; | ||
922 | if (!p->out_width) | ||
923 | p->out_width = width; | ||
924 | if (!p->out_height) | ||
925 | p->out_height = height; | ||
926 | |||
927 | p->color_mode = color_mode; | ||
928 | |||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | static int blizzard_set_scale(int plane, int orig_w, int orig_h, | ||
933 | int out_w, int out_h) | ||
934 | { | ||
935 | struct plane_info *p = &blizzard.plane[plane]; | ||
936 | int r; | ||
937 | |||
938 | dev_dbg(blizzard.fbdev->dev, | ||
939 | "plane %d orig_w %d orig_h %d out_w %d out_h %d\n", | ||
940 | plane, orig_w, orig_h, out_w, out_h); | ||
941 | if ((unsigned)plane > OMAPFB_PLANE_NUM) | ||
942 | return -ENODEV; | ||
943 | |||
944 | r = blizzard.int_ctrl->set_scale(plane, orig_w, orig_h, out_w, out_h); | ||
945 | if (r < 0) | ||
946 | return r; | ||
947 | |||
948 | p->width = orig_w; | ||
949 | p->height = orig_h; | ||
950 | p->out_width = out_w; | ||
951 | p->out_height = out_h; | ||
952 | if (orig_w == out_w && orig_h == out_h) | ||
953 | blizzard.vid_scaled &= ~(1 << plane); | ||
954 | else | ||
955 | blizzard.vid_scaled |= 1 << plane; | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | static int blizzard_set_rotate(int angle) | ||
961 | { | ||
962 | u32 l; | ||
963 | |||
964 | l = blizzard_read_reg(BLIZZARD_PANEL_CONFIGURATION); | ||
965 | l &= ~0x03; | ||
966 | |||
967 | switch (angle) { | ||
968 | case 0: | ||
969 | l = l | 0x00; | ||
970 | break; | ||
971 | case 90: | ||
972 | l = l | 0x03; | ||
973 | break; | ||
974 | case 180: | ||
975 | l = l | 0x02; | ||
976 | break; | ||
977 | case 270: | ||
978 | l = l | 0x01; | ||
979 | break; | ||
980 | default: | ||
981 | return -EINVAL; | ||
982 | } | ||
983 | |||
984 | blizzard_write_reg(BLIZZARD_PANEL_CONFIGURATION, l); | ||
985 | |||
986 | return 0; | ||
987 | } | ||
988 | |||
989 | static int blizzard_enable_plane(int plane, int enable) | ||
990 | { | ||
991 | if (enable) | ||
992 | blizzard.enabled_planes |= 1 << plane; | ||
993 | else | ||
994 | blizzard.enabled_planes &= ~(1 << plane); | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | static int sync_handler(struct blizzard_request *req) | ||
1000 | { | ||
1001 | complete(req->par.sync); | ||
1002 | return REQ_COMPLETE; | ||
1003 | } | ||
1004 | |||
1005 | static void blizzard_sync(void) | ||
1006 | { | ||
1007 | LIST_HEAD(req_list); | ||
1008 | struct blizzard_request *req; | ||
1009 | struct completion comp; | ||
1010 | |||
1011 | req = alloc_req(); | ||
1012 | |||
1013 | req->handler = sync_handler; | ||
1014 | req->complete = NULL; | ||
1015 | init_completion(&comp); | ||
1016 | req->par.sync = ∁ | ||
1017 | |||
1018 | list_add(&req->entry, &req_list); | ||
1019 | submit_req_list(&req_list); | ||
1020 | |||
1021 | wait_for_completion(&comp); | ||
1022 | } | ||
1023 | |||
1024 | |||
1025 | static void blizzard_bind_client(struct omapfb_notifier_block *nb) | ||
1026 | { | ||
1027 | if (blizzard.update_mode == OMAPFB_MANUAL_UPDATE) { | ||
1028 | omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY); | ||
1029 | } | ||
1030 | } | ||
1031 | |||
1032 | static int blizzard_set_update_mode(enum omapfb_update_mode mode) | ||
1033 | { | ||
1034 | if (unlikely(mode != OMAPFB_MANUAL_UPDATE && | ||
1035 | mode != OMAPFB_AUTO_UPDATE && | ||
1036 | mode != OMAPFB_UPDATE_DISABLED)) | ||
1037 | return -EINVAL; | ||
1038 | |||
1039 | if (mode == blizzard.update_mode) | ||
1040 | return 0; | ||
1041 | |||
1042 | dev_info(blizzard.fbdev->dev, "s1d1374x: setting update mode to %s\n", | ||
1043 | mode == OMAPFB_UPDATE_DISABLED ? "disabled" : | ||
1044 | (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual")); | ||
1045 | |||
1046 | switch (blizzard.update_mode) { | ||
1047 | case OMAPFB_MANUAL_UPDATE: | ||
1048 | omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_DISABLED); | ||
1049 | break; | ||
1050 | case OMAPFB_AUTO_UPDATE: | ||
1051 | blizzard.stop_auto_update = 1; | ||
1052 | del_timer_sync(&blizzard.auto_update_timer); | ||
1053 | break; | ||
1054 | case OMAPFB_UPDATE_DISABLED: | ||
1055 | break; | ||
1056 | } | ||
1057 | |||
1058 | blizzard.update_mode = mode; | ||
1059 | blizzard_sync(); | ||
1060 | blizzard.stop_auto_update = 0; | ||
1061 | |||
1062 | switch (mode) { | ||
1063 | case OMAPFB_MANUAL_UPDATE: | ||
1064 | omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY); | ||
1065 | break; | ||
1066 | case OMAPFB_AUTO_UPDATE: | ||
1067 | blizzard_update_window_auto(0); | ||
1068 | break; | ||
1069 | case OMAPFB_UPDATE_DISABLED: | ||
1070 | break; | ||
1071 | } | ||
1072 | |||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1076 | static enum omapfb_update_mode blizzard_get_update_mode(void) | ||
1077 | { | ||
1078 | return blizzard.update_mode; | ||
1079 | } | ||
1080 | |||
1081 | static inline void set_extif_timings(const struct extif_timings *t) | ||
1082 | { | ||
1083 | blizzard.extif->set_timings(t); | ||
1084 | } | ||
1085 | |||
1086 | static inline unsigned long round_to_extif_ticks(unsigned long ps, int div) | ||
1087 | { | ||
1088 | int bus_tick = blizzard.extif_clk_period * div; | ||
1089 | return (ps + bus_tick - 1) / bus_tick * bus_tick; | ||
1090 | } | ||
1091 | |||
1092 | static int calc_reg_timing(unsigned long sysclk, int div) | ||
1093 | { | ||
1094 | struct extif_timings *t; | ||
1095 | unsigned long systim; | ||
1096 | |||
1097 | /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, | ||
1098 | * AccessTime 2 ns + 12.2 ns (regs), | ||
1099 | * WEOffTime = WEOnTime + 1 ns, | ||
1100 | * REOffTime = REOnTime + 12 ns (regs), | ||
1101 | * CSOffTime = REOffTime + 1 ns | ||
1102 | * ReadCycle = 2ns + 2*SYSCLK (regs), | ||
1103 | * WriteCycle = 2*SYSCLK + 2 ns, | ||
1104 | * CSPulseWidth = 10 ns */ | ||
1105 | |||
1106 | systim = 1000000000 / (sysclk / 1000); | ||
1107 | dev_dbg(blizzard.fbdev->dev, | ||
1108 | "Blizzard systim %lu ps extif_clk_period %u div %d\n", | ||
1109 | systim, blizzard.extif_clk_period, div); | ||
1110 | |||
1111 | t = &blizzard.reg_timings; | ||
1112 | memset(t, 0, sizeof(*t)); | ||
1113 | |||
1114 | t->clk_div = div; | ||
1115 | |||
1116 | t->cs_on_time = 0; | ||
1117 | t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); | ||
1118 | t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); | ||
1119 | t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div); | ||
1120 | t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); | ||
1121 | t->re_off_time = round_to_extif_ticks(t->re_on_time + 13000, div); | ||
1122 | t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); | ||
1123 | t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); | ||
1124 | if (t->we_cycle_time < t->we_off_time) | ||
1125 | t->we_cycle_time = t->we_off_time; | ||
1126 | t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); | ||
1127 | if (t->re_cycle_time < t->re_off_time) | ||
1128 | t->re_cycle_time = t->re_off_time; | ||
1129 | t->cs_pulse_width = 0; | ||
1130 | |||
1131 | dev_dbg(blizzard.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n", | ||
1132 | t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); | ||
1133 | dev_dbg(blizzard.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n", | ||
1134 | t->we_on_time, t->we_off_time, t->re_cycle_time, | ||
1135 | t->we_cycle_time); | ||
1136 | dev_dbg(blizzard.fbdev->dev, "[reg]rdaccess %d cspulse %d\n", | ||
1137 | t->access_time, t->cs_pulse_width); | ||
1138 | |||
1139 | return blizzard.extif->convert_timings(t); | ||
1140 | } | ||
1141 | |||
1142 | static int calc_lut_timing(unsigned long sysclk, int div) | ||
1143 | { | ||
1144 | struct extif_timings *t; | ||
1145 | unsigned long systim; | ||
1146 | |||
1147 | /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, | ||
1148 | * AccessTime 2 ns + 4 * SYSCLK + 26 (lut), | ||
1149 | * WEOffTime = WEOnTime + 1 ns, | ||
1150 | * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut), | ||
1151 | * CSOffTime = REOffTime + 1 ns | ||
1152 | * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut), | ||
1153 | * WriteCycle = 2*SYSCLK + 2 ns, | ||
1154 | * CSPulseWidth = 10 ns */ | ||
1155 | |||
1156 | systim = 1000000000 / (sysclk / 1000); | ||
1157 | dev_dbg(blizzard.fbdev->dev, | ||
1158 | "Blizzard systim %lu ps extif_clk_period %u div %d\n", | ||
1159 | systim, blizzard.extif_clk_period, div); | ||
1160 | |||
1161 | t = &blizzard.lut_timings; | ||
1162 | memset(t, 0, sizeof(*t)); | ||
1163 | |||
1164 | t->clk_div = div; | ||
1165 | |||
1166 | t->cs_on_time = 0; | ||
1167 | t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); | ||
1168 | t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); | ||
1169 | t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim + | ||
1170 | 26000, div); | ||
1171 | t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); | ||
1172 | t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim + | ||
1173 | 26000, div); | ||
1174 | t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); | ||
1175 | t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); | ||
1176 | if (t->we_cycle_time < t->we_off_time) | ||
1177 | t->we_cycle_time = t->we_off_time; | ||
1178 | t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div); | ||
1179 | if (t->re_cycle_time < t->re_off_time) | ||
1180 | t->re_cycle_time = t->re_off_time; | ||
1181 | t->cs_pulse_width = 0; | ||
1182 | |||
1183 | dev_dbg(blizzard.fbdev->dev, | ||
1184 | "[lut]cson %d csoff %d reon %d reoff %d\n", | ||
1185 | t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); | ||
1186 | dev_dbg(blizzard.fbdev->dev, | ||
1187 | "[lut]weon %d weoff %d recyc %d wecyc %d\n", | ||
1188 | t->we_on_time, t->we_off_time, t->re_cycle_time, | ||
1189 | t->we_cycle_time); | ||
1190 | dev_dbg(blizzard.fbdev->dev, "[lut]rdaccess %d cspulse %d\n", | ||
1191 | t->access_time, t->cs_pulse_width); | ||
1192 | |||
1193 | return blizzard.extif->convert_timings(t); | ||
1194 | } | ||
1195 | |||
1196 | static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div) | ||
1197 | { | ||
1198 | int max_clk_div; | ||
1199 | int div; | ||
1200 | |||
1201 | blizzard.extif->get_clk_info(&blizzard.extif_clk_period, &max_clk_div); | ||
1202 | for (div = 1; div <= max_clk_div; div++) { | ||
1203 | if (calc_reg_timing(sysclk, div) == 0) | ||
1204 | break; | ||
1205 | } | ||
1206 | if (div > max_clk_div) { | ||
1207 | dev_dbg(blizzard.fbdev->dev, "reg timing failed\n"); | ||
1208 | goto err; | ||
1209 | } | ||
1210 | *extif_mem_div = div; | ||
1211 | |||
1212 | for (div = 1; div <= max_clk_div; div++) { | ||
1213 | if (calc_lut_timing(sysclk, div) == 0) | ||
1214 | break; | ||
1215 | } | ||
1216 | |||
1217 | if (div > max_clk_div) | ||
1218 | goto err; | ||
1219 | |||
1220 | blizzard.extif_clk_div = div; | ||
1221 | |||
1222 | return 0; | ||
1223 | err: | ||
1224 | dev_err(blizzard.fbdev->dev, "can't setup timings\n"); | ||
1225 | return -1; | ||
1226 | } | ||
1227 | |||
1228 | static void calc_blizzard_clk_rates(unsigned long ext_clk, | ||
1229 | unsigned long *sys_clk, unsigned long *pix_clk) | ||
1230 | { | ||
1231 | int pix_clk_src; | ||
1232 | int sys_div = 0, sys_mul = 0; | ||
1233 | int pix_div; | ||
1234 | |||
1235 | pix_clk_src = blizzard_read_reg(BLIZZARD_CLK_SRC); | ||
1236 | pix_div = ((pix_clk_src >> 3) & 0x1f) + 1; | ||
1237 | if ((pix_clk_src & (0x3 << 1)) == 0) { | ||
1238 | /* Source is the PLL */ | ||
1239 | sys_div = (blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x3f) + 1; | ||
1240 | sys_mul = blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_0); | ||
1241 | sys_mul |= ((blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_1) | ||
1242 | & 0x0f) << 11); | ||
1243 | *sys_clk = ext_clk * sys_mul / sys_div; | ||
1244 | } else /* else source is ext clk, or oscillator */ | ||
1245 | *sys_clk = ext_clk; | ||
1246 | |||
1247 | *pix_clk = *sys_clk / pix_div; /* HZ */ | ||
1248 | dev_dbg(blizzard.fbdev->dev, | ||
1249 | "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n", | ||
1250 | ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul); | ||
1251 | dev_dbg(blizzard.fbdev->dev, "sys_clk %ld pix_clk %ld\n", | ||
1252 | *sys_clk, *pix_clk); | ||
1253 | } | ||
1254 | |||
1255 | static int setup_tearsync(unsigned long pix_clk, int extif_div) | ||
1256 | { | ||
1257 | int hdisp, vdisp; | ||
1258 | int hndp, vndp; | ||
1259 | int hsw, vsw; | ||
1260 | int hs, vs; | ||
1261 | int hs_pol_inv, vs_pol_inv; | ||
1262 | int use_hsvs, use_ndp; | ||
1263 | u8 b; | ||
1264 | |||
1265 | hsw = blizzard_read_reg(BLIZZARD_HSW); | ||
1266 | vsw = blizzard_read_reg(BLIZZARD_VSW); | ||
1267 | hs_pol_inv = !(hsw & 0x80); | ||
1268 | vs_pol_inv = !(vsw & 0x80); | ||
1269 | hsw = hsw & 0x7f; | ||
1270 | vsw = vsw & 0x3f; | ||
1271 | |||
1272 | hdisp = blizzard_read_reg(BLIZZARD_HDISP) * 8; | ||
1273 | vdisp = blizzard_read_reg(BLIZZARD_VDISP0) + | ||
1274 | ((blizzard_read_reg(BLIZZARD_VDISP1) & 0x3) << 8); | ||
1275 | |||
1276 | hndp = blizzard_read_reg(BLIZZARD_HNDP) & 0x3f; | ||
1277 | vndp = blizzard_read_reg(BLIZZARD_VNDP); | ||
1278 | |||
1279 | /* time to transfer one pixel (16bpp) in ps */ | ||
1280 | blizzard.pix_tx_time = blizzard.reg_timings.we_cycle_time; | ||
1281 | if (blizzard.extif->get_max_tx_rate != NULL) { | ||
1282 | /* The external interface might have a rate limitation, | ||
1283 | * if so, we have to maximize our transfer rate. | ||
1284 | */ | ||
1285 | unsigned long min_tx_time; | ||
1286 | unsigned long max_tx_rate = blizzard.extif->get_max_tx_rate(); | ||
1287 | |||
1288 | dev_dbg(blizzard.fbdev->dev, "max_tx_rate %ld HZ\n", | ||
1289 | max_tx_rate); | ||
1290 | min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */ | ||
1291 | if (blizzard.pix_tx_time < min_tx_time) | ||
1292 | blizzard.pix_tx_time = min_tx_time; | ||
1293 | } | ||
1294 | |||
1295 | /* time to update one line in ps */ | ||
1296 | blizzard.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000); | ||
1297 | blizzard.line_upd_time *= 1000; | ||
1298 | if (hdisp * blizzard.pix_tx_time > blizzard.line_upd_time) | ||
1299 | /* transfer speed too low, we might have to use both | ||
1300 | * HS and VS */ | ||
1301 | use_hsvs = 1; | ||
1302 | else | ||
1303 | /* decent transfer speed, we'll always use only VS */ | ||
1304 | use_hsvs = 0; | ||
1305 | |||
1306 | if (use_hsvs && (hs_pol_inv || vs_pol_inv)) { | ||
1307 | /* HS or'ed with VS doesn't work, use the active high | ||
1308 | * TE signal based on HNDP / VNDP */ | ||
1309 | use_ndp = 1; | ||
1310 | hs_pol_inv = 0; | ||
1311 | vs_pol_inv = 0; | ||
1312 | hs = hndp; | ||
1313 | vs = vndp; | ||
1314 | } else { | ||
1315 | /* Use HS or'ed with VS as a TE signal if both are needed | ||
1316 | * or VNDP if only vsync is needed. */ | ||
1317 | use_ndp = 0; | ||
1318 | hs = hsw; | ||
1319 | vs = vsw; | ||
1320 | if (!use_hsvs) { | ||
1321 | hs_pol_inv = 0; | ||
1322 | vs_pol_inv = 0; | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1326 | hs = hs * 1000000 / (pix_clk / 1000); /* ps */ | ||
1327 | hs *= 1000; | ||
1328 | |||
1329 | vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */ | ||
1330 | vs *= 1000; | ||
1331 | |||
1332 | if (vs <= hs) | ||
1333 | return -EDOM; | ||
1334 | /* set VS to 120% of HS to minimize VS detection time */ | ||
1335 | vs = hs * 12 / 10; | ||
1336 | /* minimize HS too */ | ||
1337 | if (hs > 10000) | ||
1338 | hs = 10000; | ||
1339 | |||
1340 | b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS); | ||
1341 | b &= ~0x3; | ||
1342 | b |= use_hsvs ? 1 : 0; | ||
1343 | b |= (use_ndp && use_hsvs) ? 0 : 2; | ||
1344 | blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b); | ||
1345 | |||
1346 | blizzard.vsync_only = !use_hsvs; | ||
1347 | |||
1348 | dev_dbg(blizzard.fbdev->dev, | ||
1349 | "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n", | ||
1350 | pix_clk, blizzard.pix_tx_time, blizzard.line_upd_time); | ||
1351 | dev_dbg(blizzard.fbdev->dev, | ||
1352 | "hs %d ps vs %d ps mode %d vsync_only %d\n", | ||
1353 | hs, vs, b & 0x3, !use_hsvs); | ||
1354 | |||
1355 | return blizzard.extif->setup_tearsync(1, hs, vs, | ||
1356 | hs_pol_inv, vs_pol_inv, | ||
1357 | extif_div); | ||
1358 | } | ||
1359 | |||
1360 | static void blizzard_get_caps(int plane, struct omapfb_caps *caps) | ||
1361 | { | ||
1362 | blizzard.int_ctrl->get_caps(plane, caps); | ||
1363 | caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE | | ||
1364 | OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE | | ||
1365 | OMAPFB_CAPS_WINDOW_SCALE | | ||
1366 | OMAPFB_CAPS_WINDOW_OVERLAY | | ||
1367 | OMAPFB_CAPS_WINDOW_ROTATE; | ||
1368 | if (blizzard.te_connected) | ||
1369 | caps->ctrl |= OMAPFB_CAPS_TEARSYNC; | ||
1370 | caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) | | ||
1371 | (1 << OMAPFB_COLOR_YUV420); | ||
1372 | } | ||
1373 | |||
1374 | static void _save_regs(const struct blizzard_reg_list *list, int cnt) | ||
1375 | { | ||
1376 | int i; | ||
1377 | |||
1378 | for (i = 0; i < cnt; i++, list++) { | ||
1379 | int reg; | ||
1380 | for (reg = list->start; reg <= list->end; reg += 2) | ||
1381 | blizzard_reg_cache[reg / 2] = blizzard_read_reg(reg); | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | static void _restore_regs(const struct blizzard_reg_list *list, int cnt) | ||
1386 | { | ||
1387 | int i; | ||
1388 | |||
1389 | for (i = 0; i < cnt; i++, list++) { | ||
1390 | int reg; | ||
1391 | for (reg = list->start; reg <= list->end; reg += 2) | ||
1392 | blizzard_write_reg(reg, blizzard_reg_cache[reg / 2]); | ||
1393 | } | ||
1394 | } | ||
1395 | |||
1396 | static void blizzard_save_all_regs(void) | ||
1397 | { | ||
1398 | _save_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs)); | ||
1399 | _save_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs)); | ||
1400 | } | ||
1401 | |||
1402 | static void blizzard_restore_pll_regs(void) | ||
1403 | { | ||
1404 | _restore_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs)); | ||
1405 | } | ||
1406 | |||
1407 | static void blizzard_restore_gen_regs(void) | ||
1408 | { | ||
1409 | _restore_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs)); | ||
1410 | } | ||
1411 | |||
1412 | static void blizzard_suspend(void) | ||
1413 | { | ||
1414 | u32 l; | ||
1415 | unsigned long tmo; | ||
1416 | |||
1417 | if (blizzard.last_color_mode) { | ||
1418 | update_full_screen(); | ||
1419 | blizzard_sync(); | ||
1420 | } | ||
1421 | blizzard.update_mode_before_suspend = blizzard.update_mode; | ||
1422 | /* the following will disable clocks as well */ | ||
1423 | blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED); | ||
1424 | |||
1425 | blizzard_save_all_regs(); | ||
1426 | |||
1427 | blizzard_stop_sdram(); | ||
1428 | |||
1429 | l = blizzard_read_reg(BLIZZARD_POWER_SAVE); | ||
1430 | /* Standby, Sleep. We assume we use an external clock. */ | ||
1431 | l |= 0x03; | ||
1432 | blizzard_write_reg(BLIZZARD_POWER_SAVE, l); | ||
1433 | |||
1434 | tmo = jiffies + msecs_to_jiffies(100); | ||
1435 | while (!(blizzard_read_reg(BLIZZARD_PLL_MODE) & (1 << 1))) { | ||
1436 | if (time_after(jiffies, tmo)) { | ||
1437 | dev_err(blizzard.fbdev->dev, | ||
1438 | "s1d1374x: sleep timeout, stopping PLL manually\n"); | ||
1439 | l = blizzard_read_reg(BLIZZARD_PLL_MODE); | ||
1440 | l &= ~0x03; | ||
1441 | /* Disable PLL, counter function */ | ||
1442 | l |= 0x2; | ||
1443 | blizzard_write_reg(BLIZZARD_PLL_MODE, l); | ||
1444 | break; | ||
1445 | } | ||
1446 | msleep(1); | ||
1447 | } | ||
1448 | |||
1449 | if (blizzard.power_down != NULL) | ||
1450 | blizzard.power_down(blizzard.fbdev->dev); | ||
1451 | } | ||
1452 | |||
1453 | static void blizzard_resume(void) | ||
1454 | { | ||
1455 | u32 l; | ||
1456 | |||
1457 | if (blizzard.power_up != NULL) | ||
1458 | blizzard.power_up(blizzard.fbdev->dev); | ||
1459 | |||
1460 | l = blizzard_read_reg(BLIZZARD_POWER_SAVE); | ||
1461 | /* Standby, Sleep */ | ||
1462 | l &= ~0x03; | ||
1463 | blizzard_write_reg(BLIZZARD_POWER_SAVE, l); | ||
1464 | |||
1465 | blizzard_restore_pll_regs(); | ||
1466 | l = blizzard_read_reg(BLIZZARD_PLL_MODE); | ||
1467 | l &= ~0x03; | ||
1468 | /* Enable PLL, counter function */ | ||
1469 | l |= 0x1; | ||
1470 | blizzard_write_reg(BLIZZARD_PLL_MODE, l); | ||
1471 | |||
1472 | while (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & (1 << 7))) | ||
1473 | msleep(1); | ||
1474 | |||
1475 | blizzard_restart_sdram(); | ||
1476 | |||
1477 | blizzard_restore_gen_regs(); | ||
1478 | |||
1479 | /* Enable display */ | ||
1480 | blizzard_write_reg(BLIZZARD_DISPLAY_MODE, 0x01); | ||
1481 | |||
1482 | /* the following will enable clocks as necessary */ | ||
1483 | blizzard_set_update_mode(blizzard.update_mode_before_suspend); | ||
1484 | |||
1485 | /* Force a background update */ | ||
1486 | blizzard.zoom_on = 1; | ||
1487 | update_full_screen(); | ||
1488 | blizzard_sync(); | ||
1489 | } | ||
1490 | |||
1491 | static int blizzard_init(struct omapfb_device *fbdev, int ext_mode, | ||
1492 | struct omapfb_mem_desc *req_vram) | ||
1493 | { | ||
1494 | int r = 0, i; | ||
1495 | u8 rev, conf; | ||
1496 | unsigned long ext_clk; | ||
1497 | int extif_div; | ||
1498 | unsigned long sys_clk, pix_clk; | ||
1499 | struct omapfb_platform_data *omapfb_conf; | ||
1500 | struct blizzard_platform_data *ctrl_conf; | ||
1501 | |||
1502 | blizzard.fbdev = fbdev; | ||
1503 | |||
1504 | BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl); | ||
1505 | |||
1506 | blizzard.fbdev = fbdev; | ||
1507 | blizzard.extif = fbdev->ext_if; | ||
1508 | blizzard.int_ctrl = fbdev->int_ctrl; | ||
1509 | |||
1510 | omapfb_conf = fbdev->dev->platform_data; | ||
1511 | ctrl_conf = omapfb_conf->ctrl_platform_data; | ||
1512 | if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) { | ||
1513 | dev_err(fbdev->dev, "s1d1374x: missing platform data\n"); | ||
1514 | r = -ENOENT; | ||
1515 | goto err1; | ||
1516 | } | ||
1517 | |||
1518 | blizzard.power_down = ctrl_conf->power_down; | ||
1519 | blizzard.power_up = ctrl_conf->power_up; | ||
1520 | |||
1521 | spin_lock_init(&blizzard.req_lock); | ||
1522 | |||
1523 | if ((r = blizzard.int_ctrl->init(fbdev, 1, req_vram)) < 0) | ||
1524 | goto err1; | ||
1525 | |||
1526 | if ((r = blizzard.extif->init(fbdev)) < 0) | ||
1527 | goto err2; | ||
1528 | |||
1529 | blizzard_ctrl.set_color_key = blizzard.int_ctrl->set_color_key; | ||
1530 | blizzard_ctrl.get_color_key = blizzard.int_ctrl->get_color_key; | ||
1531 | blizzard_ctrl.setup_mem = blizzard.int_ctrl->setup_mem; | ||
1532 | blizzard_ctrl.mmap = blizzard.int_ctrl->mmap; | ||
1533 | |||
1534 | ext_clk = ctrl_conf->get_clock_rate(fbdev->dev); | ||
1535 | if ((r = calc_extif_timings(ext_clk, &extif_div)) < 0) | ||
1536 | goto err3; | ||
1537 | |||
1538 | set_extif_timings(&blizzard.reg_timings); | ||
1539 | |||
1540 | if (blizzard.power_up != NULL) | ||
1541 | blizzard.power_up(fbdev->dev); | ||
1542 | |||
1543 | calc_blizzard_clk_rates(ext_clk, &sys_clk, &pix_clk); | ||
1544 | |||
1545 | if ((r = calc_extif_timings(sys_clk, &extif_div)) < 0) | ||
1546 | goto err3; | ||
1547 | set_extif_timings(&blizzard.reg_timings); | ||
1548 | |||
1549 | if (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x80)) { | ||
1550 | dev_err(fbdev->dev, | ||
1551 | "controller not initialized by the bootloader\n"); | ||
1552 | r = -ENODEV; | ||
1553 | goto err3; | ||
1554 | } | ||
1555 | |||
1556 | if (ctrl_conf->te_connected) { | ||
1557 | if ((r = setup_tearsync(pix_clk, extif_div)) < 0) | ||
1558 | goto err3; | ||
1559 | blizzard.te_connected = 1; | ||
1560 | } | ||
1561 | |||
1562 | rev = blizzard_read_reg(BLIZZARD_REV_CODE); | ||
1563 | conf = blizzard_read_reg(BLIZZARD_CONFIG); | ||
1564 | |||
1565 | switch (rev & 0xfc) { | ||
1566 | case 0x9c: | ||
1567 | blizzard.version = BLIZZARD_VERSION_S1D13744; | ||
1568 | pr_info("omapfb: s1d13744 LCD controller rev %d " | ||
1569 | "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07); | ||
1570 | break; | ||
1571 | case 0xa4: | ||
1572 | blizzard.version = BLIZZARD_VERSION_S1D13745; | ||
1573 | pr_info("omapfb: s1d13745 LCD controller rev %d " | ||
1574 | "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07); | ||
1575 | break; | ||
1576 | default: | ||
1577 | dev_err(fbdev->dev, "invalid s1d1374x revision %02x\n", | ||
1578 | rev); | ||
1579 | r = -ENODEV; | ||
1580 | goto err3; | ||
1581 | } | ||
1582 | |||
1583 | blizzard.max_transmit_size = blizzard.extif->max_transmit_size; | ||
1584 | |||
1585 | blizzard.update_mode = OMAPFB_UPDATE_DISABLED; | ||
1586 | |||
1587 | blizzard.auto_update_window.x = 0; | ||
1588 | blizzard.auto_update_window.y = 0; | ||
1589 | blizzard.auto_update_window.width = fbdev->panel->x_res; | ||
1590 | blizzard.auto_update_window.height = fbdev->panel->y_res; | ||
1591 | blizzard.auto_update_window.out_x = 0; | ||
1592 | blizzard.auto_update_window.out_y = 0; | ||
1593 | blizzard.auto_update_window.out_width = fbdev->panel->x_res; | ||
1594 | blizzard.auto_update_window.out_height = fbdev->panel->y_res; | ||
1595 | blizzard.auto_update_window.format = 0; | ||
1596 | |||
1597 | blizzard.screen_width = fbdev->panel->x_res; | ||
1598 | blizzard.screen_height = fbdev->panel->y_res; | ||
1599 | |||
1600 | init_timer(&blizzard.auto_update_timer); | ||
1601 | blizzard.auto_update_timer.function = blizzard_update_window_auto; | ||
1602 | blizzard.auto_update_timer.data = 0; | ||
1603 | |||
1604 | INIT_LIST_HEAD(&blizzard.free_req_list); | ||
1605 | INIT_LIST_HEAD(&blizzard.pending_req_list); | ||
1606 | for (i = 0; i < ARRAY_SIZE(blizzard.req_pool); i++) | ||
1607 | list_add(&blizzard.req_pool[i].entry, &blizzard.free_req_list); | ||
1608 | BUG_ON(i <= IRQ_REQ_POOL_SIZE); | ||
1609 | sema_init(&blizzard.req_sema, i - IRQ_REQ_POOL_SIZE); | ||
1610 | |||
1611 | return 0; | ||
1612 | err3: | ||
1613 | if (blizzard.power_down != NULL) | ||
1614 | blizzard.power_down(fbdev->dev); | ||
1615 | blizzard.extif->cleanup(); | ||
1616 | err2: | ||
1617 | blizzard.int_ctrl->cleanup(); | ||
1618 | err1: | ||
1619 | return r; | ||
1620 | } | ||
1621 | |||
1622 | static void blizzard_cleanup(void) | ||
1623 | { | ||
1624 | blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED); | ||
1625 | blizzard.extif->cleanup(); | ||
1626 | blizzard.int_ctrl->cleanup(); | ||
1627 | if (blizzard.power_down != NULL) | ||
1628 | blizzard.power_down(blizzard.fbdev->dev); | ||
1629 | } | ||
1630 | |||
1631 | struct lcd_ctrl blizzard_ctrl = { | ||
1632 | .name = "blizzard", | ||
1633 | .init = blizzard_init, | ||
1634 | .cleanup = blizzard_cleanup, | ||
1635 | .bind_client = blizzard_bind_client, | ||
1636 | .get_caps = blizzard_get_caps, | ||
1637 | .set_update_mode = blizzard_set_update_mode, | ||
1638 | .get_update_mode = blizzard_get_update_mode, | ||
1639 | .setup_plane = blizzard_setup_plane, | ||
1640 | .set_scale = blizzard_set_scale, | ||
1641 | .enable_plane = blizzard_enable_plane, | ||
1642 | .set_rotate = blizzard_set_rotate, | ||
1643 | .update_window = blizzard_update_window_async, | ||
1644 | .sync = blizzard_sync, | ||
1645 | .suspend = blizzard_suspend, | ||
1646 | .resume = blizzard_resume, | ||
1647 | }; | ||
1648 | |||
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c new file mode 100644 index 00000000000..0ccd7adf47b --- /dev/null +++ b/drivers/video/omap/dispc.c | |||
@@ -0,0 +1,1546 @@ | |||
1 | /* | ||
2 | * OMAP2 display controller support | ||
3 | * | ||
4 | * Copyright (C) 2005 Nokia Corporation | ||
5 | * Author: Imre Deak <imre.deak@nokia.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/clk.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/slab.h> | ||
29 | |||
30 | #include <plat/sram.h> | ||
31 | #include <plat/board.h> | ||
32 | |||
33 | #include "omapfb.h" | ||
34 | #include "dispc.h" | ||
35 | |||
36 | #define MODULE_NAME "dispc" | ||
37 | |||
38 | #define DSS_BASE 0x48050000 | ||
39 | #define DSS_SYSCONFIG 0x0010 | ||
40 | |||
41 | #define DISPC_BASE 0x48050400 | ||
42 | |||
43 | /* DISPC common */ | ||
44 | #define DISPC_REVISION 0x0000 | ||
45 | #define DISPC_SYSCONFIG 0x0010 | ||
46 | #define DISPC_SYSSTATUS 0x0014 | ||
47 | #define DISPC_IRQSTATUS 0x0018 | ||
48 | #define DISPC_IRQENABLE 0x001C | ||
49 | #define DISPC_CONTROL 0x0040 | ||
50 | #define DISPC_CONFIG 0x0044 | ||
51 | #define DISPC_CAPABLE 0x0048 | ||
52 | #define DISPC_DEFAULT_COLOR0 0x004C | ||
53 | #define DISPC_DEFAULT_COLOR1 0x0050 | ||
54 | #define DISPC_TRANS_COLOR0 0x0054 | ||
55 | #define DISPC_TRANS_COLOR1 0x0058 | ||
56 | #define DISPC_LINE_STATUS 0x005C | ||
57 | #define DISPC_LINE_NUMBER 0x0060 | ||
58 | #define DISPC_TIMING_H 0x0064 | ||
59 | #define DISPC_TIMING_V 0x0068 | ||
60 | #define DISPC_POL_FREQ 0x006C | ||
61 | #define DISPC_DIVISOR 0x0070 | ||
62 | #define DISPC_SIZE_DIG 0x0078 | ||
63 | #define DISPC_SIZE_LCD 0x007C | ||
64 | |||
65 | #define DISPC_DATA_CYCLE1 0x01D4 | ||
66 | #define DISPC_DATA_CYCLE2 0x01D8 | ||
67 | #define DISPC_DATA_CYCLE3 0x01DC | ||
68 | |||
69 | /* DISPC GFX plane */ | ||
70 | #define DISPC_GFX_BA0 0x0080 | ||
71 | #define DISPC_GFX_BA1 0x0084 | ||
72 | #define DISPC_GFX_POSITION 0x0088 | ||
73 | #define DISPC_GFX_SIZE 0x008C | ||
74 | #define DISPC_GFX_ATTRIBUTES 0x00A0 | ||
75 | #define DISPC_GFX_FIFO_THRESHOLD 0x00A4 | ||
76 | #define DISPC_GFX_FIFO_SIZE_STATUS 0x00A8 | ||
77 | #define DISPC_GFX_ROW_INC 0x00AC | ||
78 | #define DISPC_GFX_PIXEL_INC 0x00B0 | ||
79 | #define DISPC_GFX_WINDOW_SKIP 0x00B4 | ||
80 | #define DISPC_GFX_TABLE_BA 0x00B8 | ||
81 | |||
82 | /* DISPC Video plane 1/2 */ | ||
83 | #define DISPC_VID1_BASE 0x00BC | ||
84 | #define DISPC_VID2_BASE 0x014C | ||
85 | |||
86 | /* Offsets into DISPC_VID1/2_BASE */ | ||
87 | #define DISPC_VID_BA0 0x0000 | ||
88 | #define DISPC_VID_BA1 0x0004 | ||
89 | #define DISPC_VID_POSITION 0x0008 | ||
90 | #define DISPC_VID_SIZE 0x000C | ||
91 | #define DISPC_VID_ATTRIBUTES 0x0010 | ||
92 | #define DISPC_VID_FIFO_THRESHOLD 0x0014 | ||
93 | #define DISPC_VID_FIFO_SIZE_STATUS 0x0018 | ||
94 | #define DISPC_VID_ROW_INC 0x001C | ||
95 | #define DISPC_VID_PIXEL_INC 0x0020 | ||
96 | #define DISPC_VID_FIR 0x0024 | ||
97 | #define DISPC_VID_PICTURE_SIZE 0x0028 | ||
98 | #define DISPC_VID_ACCU0 0x002C | ||
99 | #define DISPC_VID_ACCU1 0x0030 | ||
100 | |||
101 | /* 8 elements in 8 byte increments */ | ||
102 | #define DISPC_VID_FIR_COEF_H0 0x0034 | ||
103 | /* 8 elements in 8 byte increments */ | ||
104 | #define DISPC_VID_FIR_COEF_HV0 0x0038 | ||
105 | /* 5 elements in 4 byte increments */ | ||
106 | #define DISPC_VID_CONV_COEF0 0x0074 | ||
107 | |||
108 | #define DISPC_IRQ_FRAMEMASK 0x0001 | ||
109 | #define DISPC_IRQ_VSYNC 0x0002 | ||
110 | #define DISPC_IRQ_EVSYNC_EVEN 0x0004 | ||
111 | #define DISPC_IRQ_EVSYNC_ODD 0x0008 | ||
112 | #define DISPC_IRQ_ACBIAS_COUNT_STAT 0x0010 | ||
113 | #define DISPC_IRQ_PROG_LINE_NUM 0x0020 | ||
114 | #define DISPC_IRQ_GFX_FIFO_UNDERFLOW 0x0040 | ||
115 | #define DISPC_IRQ_GFX_END_WIN 0x0080 | ||
116 | #define DISPC_IRQ_PAL_GAMMA_MASK 0x0100 | ||
117 | #define DISPC_IRQ_OCP_ERR 0x0200 | ||
118 | #define DISPC_IRQ_VID1_FIFO_UNDERFLOW 0x0400 | ||
119 | #define DISPC_IRQ_VID1_END_WIN 0x0800 | ||
120 | #define DISPC_IRQ_VID2_FIFO_UNDERFLOW 0x1000 | ||
121 | #define DISPC_IRQ_VID2_END_WIN 0x2000 | ||
122 | #define DISPC_IRQ_SYNC_LOST 0x4000 | ||
123 | |||
124 | #define DISPC_IRQ_MASK_ALL 0x7fff | ||
125 | |||
126 | #define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \ | ||
127 | DISPC_IRQ_VID1_FIFO_UNDERFLOW | \ | ||
128 | DISPC_IRQ_VID2_FIFO_UNDERFLOW | \ | ||
129 | DISPC_IRQ_SYNC_LOST) | ||
130 | |||
131 | #define RFBI_CONTROL 0x48050040 | ||
132 | |||
133 | #define MAX_PALETTE_SIZE (256 * 16) | ||
134 | |||
135 | #define FLD_MASK(pos, len) (((1 << len) - 1) << pos) | ||
136 | |||
137 | #define MOD_REG_FLD(reg, mask, val) \ | ||
138 | dispc_write_reg((reg), (dispc_read_reg(reg) & ~(mask)) | (val)); | ||
139 | |||
140 | #define OMAP2_SRAM_START 0x40200000 | ||
141 | /* Maximum size, in reality this is smaller if SRAM is partially locked. */ | ||
142 | #define OMAP2_SRAM_SIZE 0xa0000 /* 640k */ | ||
143 | |||
144 | /* We support the SDRAM / SRAM types. See OMAPFB_PLANE_MEMTYPE_* in omapfb.h */ | ||
145 | #define DISPC_MEMTYPE_NUM 2 | ||
146 | |||
147 | #define RESMAP_SIZE(_page_cnt) \ | ||
148 | ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8) | ||
149 | #define RESMAP_PTR(_res_map, _page_nr) \ | ||
150 | (((_res_map)->map) + (_page_nr) / (sizeof(unsigned long) * 8)) | ||
151 | #define RESMAP_MASK(_page_nr) \ | ||
152 | (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1))) | ||
153 | |||
154 | struct resmap { | ||
155 | unsigned long start; | ||
156 | unsigned page_cnt; | ||
157 | unsigned long *map; | ||
158 | }; | ||
159 | |||
160 | #define MAX_IRQ_HANDLERS 4 | ||
161 | |||
162 | static struct { | ||
163 | void __iomem *base; | ||
164 | |||
165 | struct omapfb_mem_desc mem_desc; | ||
166 | struct resmap *res_map[DISPC_MEMTYPE_NUM]; | ||
167 | atomic_t map_count[OMAPFB_PLANE_NUM]; | ||
168 | |||
169 | dma_addr_t palette_paddr; | ||
170 | void *palette_vaddr; | ||
171 | |||
172 | int ext_mode; | ||
173 | |||
174 | struct { | ||
175 | u32 irq_mask; | ||
176 | void (*callback)(void *); | ||
177 | void *data; | ||
178 | } irq_handlers[MAX_IRQ_HANDLERS]; | ||
179 | struct completion frame_done; | ||
180 | |||
181 | int fir_hinc[OMAPFB_PLANE_NUM]; | ||
182 | int fir_vinc[OMAPFB_PLANE_NUM]; | ||
183 | |||
184 | struct clk *dss_ick, *dss1_fck; | ||
185 | struct clk *dss_54m_fck; | ||
186 | |||
187 | enum omapfb_update_mode update_mode; | ||
188 | struct omapfb_device *fbdev; | ||
189 | |||
190 | struct omapfb_color_key color_key; | ||
191 | } dispc; | ||
192 | |||
193 | static void enable_lcd_clocks(int enable); | ||
194 | |||
195 | static void inline dispc_write_reg(int idx, u32 val) | ||
196 | { | ||
197 | __raw_writel(val, dispc.base + idx); | ||
198 | } | ||
199 | |||
200 | static u32 inline dispc_read_reg(int idx) | ||
201 | { | ||
202 | u32 l = __raw_readl(dispc.base + idx); | ||
203 | return l; | ||
204 | } | ||
205 | |||
206 | /* Select RFBI or bypass mode */ | ||
207 | static void enable_rfbi_mode(int enable) | ||
208 | { | ||
209 | void __iomem *rfbi_control; | ||
210 | u32 l; | ||
211 | |||
212 | l = dispc_read_reg(DISPC_CONTROL); | ||
213 | /* Enable RFBI, GPIO0/1 */ | ||
214 | l &= ~((1 << 11) | (1 << 15) | (1 << 16)); | ||
215 | l |= enable ? (1 << 11) : 0; | ||
216 | /* RFBI En: GPIO0/1=10 RFBI Dis: GPIO0/1=11 */ | ||
217 | l |= 1 << 15; | ||
218 | l |= enable ? 0 : (1 << 16); | ||
219 | dispc_write_reg(DISPC_CONTROL, l); | ||
220 | |||
221 | /* Set bypass mode in RFBI module */ | ||
222 | rfbi_control = ioremap(RFBI_CONTROL, SZ_1K); | ||
223 | if (!rfbi_control) { | ||
224 | pr_err("Unable to ioremap rfbi_control\n"); | ||
225 | return; | ||
226 | } | ||
227 | l = __raw_readl(rfbi_control); | ||
228 | l |= enable ? 0 : (1 << 1); | ||
229 | __raw_writel(l, rfbi_control); | ||
230 | iounmap(rfbi_control); | ||
231 | } | ||
232 | |||
233 | static void set_lcd_data_lines(int data_lines) | ||
234 | { | ||
235 | u32 l; | ||
236 | int code = 0; | ||
237 | |||
238 | switch (data_lines) { | ||
239 | case 12: | ||
240 | code = 0; | ||
241 | break; | ||
242 | case 16: | ||
243 | code = 1; | ||
244 | break; | ||
245 | case 18: | ||
246 | code = 2; | ||
247 | break; | ||
248 | case 24: | ||
249 | code = 3; | ||
250 | break; | ||
251 | default: | ||
252 | BUG(); | ||
253 | } | ||
254 | |||
255 | l = dispc_read_reg(DISPC_CONTROL); | ||
256 | l &= ~(0x03 << 8); | ||
257 | l |= code << 8; | ||
258 | dispc_write_reg(DISPC_CONTROL, l); | ||
259 | } | ||
260 | |||
261 | static void set_load_mode(int mode) | ||
262 | { | ||
263 | BUG_ON(mode & ~(DISPC_LOAD_CLUT_ONLY | DISPC_LOAD_FRAME_ONLY | | ||
264 | DISPC_LOAD_CLUT_ONCE_FRAME)); | ||
265 | MOD_REG_FLD(DISPC_CONFIG, 0x03 << 1, mode << 1); | ||
266 | } | ||
267 | |||
268 | void omap_dispc_set_lcd_size(int x, int y) | ||
269 | { | ||
270 | BUG_ON((x > (1 << 11)) || (y > (1 << 11))); | ||
271 | enable_lcd_clocks(1); | ||
272 | MOD_REG_FLD(DISPC_SIZE_LCD, FLD_MASK(16, 11) | FLD_MASK(0, 11), | ||
273 | ((y - 1) << 16) | (x - 1)); | ||
274 | enable_lcd_clocks(0); | ||
275 | } | ||
276 | EXPORT_SYMBOL(omap_dispc_set_lcd_size); | ||
277 | |||
278 | void omap_dispc_set_digit_size(int x, int y) | ||
279 | { | ||
280 | BUG_ON((x > (1 << 11)) || (y > (1 << 11))); | ||
281 | enable_lcd_clocks(1); | ||
282 | MOD_REG_FLD(DISPC_SIZE_DIG, FLD_MASK(16, 11) | FLD_MASK(0, 11), | ||
283 | ((y - 1) << 16) | (x - 1)); | ||
284 | enable_lcd_clocks(0); | ||
285 | } | ||
286 | EXPORT_SYMBOL(omap_dispc_set_digit_size); | ||
287 | |||
288 | static void setup_plane_fifo(int plane, int ext_mode) | ||
289 | { | ||
290 | const u32 ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD, | ||
291 | DISPC_VID1_BASE + DISPC_VID_FIFO_THRESHOLD, | ||
292 | DISPC_VID2_BASE + DISPC_VID_FIFO_THRESHOLD }; | ||
293 | const u32 fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS, | ||
294 | DISPC_VID1_BASE + DISPC_VID_FIFO_SIZE_STATUS, | ||
295 | DISPC_VID2_BASE + DISPC_VID_FIFO_SIZE_STATUS }; | ||
296 | int low, high; | ||
297 | u32 l; | ||
298 | |||
299 | BUG_ON(plane > 2); | ||
300 | |||
301 | l = dispc_read_reg(fsz_reg[plane]); | ||
302 | l &= FLD_MASK(0, 11); | ||
303 | if (ext_mode) { | ||
304 | low = l * 3 / 4; | ||
305 | high = l; | ||
306 | } else { | ||
307 | low = l / 4; | ||
308 | high = l * 3 / 4; | ||
309 | } | ||
310 | MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 12) | FLD_MASK(0, 12), | ||
311 | (high << 16) | low); | ||
312 | } | ||
313 | |||
314 | void omap_dispc_enable_lcd_out(int enable) | ||
315 | { | ||
316 | enable_lcd_clocks(1); | ||
317 | MOD_REG_FLD(DISPC_CONTROL, 1, enable ? 1 : 0); | ||
318 | enable_lcd_clocks(0); | ||
319 | } | ||
320 | EXPORT_SYMBOL(omap_dispc_enable_lcd_out); | ||
321 | |||
322 | void omap_dispc_enable_digit_out(int enable) | ||
323 | { | ||
324 | enable_lcd_clocks(1); | ||
325 | MOD_REG_FLD(DISPC_CONTROL, 1 << 1, enable ? 1 << 1 : 0); | ||
326 | enable_lcd_clocks(0); | ||
327 | } | ||
328 | EXPORT_SYMBOL(omap_dispc_enable_digit_out); | ||
329 | |||
330 | static inline int _setup_plane(int plane, int channel_out, | ||
331 | u32 paddr, int screen_width, | ||
332 | int pos_x, int pos_y, int width, int height, | ||
333 | int color_mode) | ||
334 | { | ||
335 | const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES, | ||
336 | DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES, | ||
337 | DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES }; | ||
338 | const u32 ba_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0, | ||
339 | DISPC_VID2_BASE + DISPC_VID_BA0 }; | ||
340 | const u32 ps_reg[] = { DISPC_GFX_POSITION, | ||
341 | DISPC_VID1_BASE + DISPC_VID_POSITION, | ||
342 | DISPC_VID2_BASE + DISPC_VID_POSITION }; | ||
343 | const u32 sz_reg[] = { DISPC_GFX_SIZE, | ||
344 | DISPC_VID1_BASE + DISPC_VID_PICTURE_SIZE, | ||
345 | DISPC_VID2_BASE + DISPC_VID_PICTURE_SIZE }; | ||
346 | const u32 ri_reg[] = { DISPC_GFX_ROW_INC, | ||
347 | DISPC_VID1_BASE + DISPC_VID_ROW_INC, | ||
348 | DISPC_VID2_BASE + DISPC_VID_ROW_INC }; | ||
349 | const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE, | ||
350 | DISPC_VID2_BASE + DISPC_VID_SIZE }; | ||
351 | |||
352 | int chout_shift, burst_shift; | ||
353 | int chout_val; | ||
354 | int color_code; | ||
355 | int bpp; | ||
356 | int cconv_en; | ||
357 | int set_vsize; | ||
358 | u32 l; | ||
359 | |||
360 | #ifdef VERBOSE | ||
361 | dev_dbg(dispc.fbdev->dev, "plane %d channel %d paddr %#08x scr_width %d" | ||
362 | " pos_x %d pos_y %d width %d height %d color_mode %d\n", | ||
363 | plane, channel_out, paddr, screen_width, pos_x, pos_y, | ||
364 | width, height, color_mode); | ||
365 | #endif | ||
366 | |||
367 | set_vsize = 0; | ||
368 | switch (plane) { | ||
369 | case OMAPFB_PLANE_GFX: | ||
370 | burst_shift = 6; | ||
371 | chout_shift = 8; | ||
372 | break; | ||
373 | case OMAPFB_PLANE_VID1: | ||
374 | case OMAPFB_PLANE_VID2: | ||
375 | burst_shift = 14; | ||
376 | chout_shift = 16; | ||
377 | set_vsize = 1; | ||
378 | break; | ||
379 | default: | ||
380 | return -EINVAL; | ||
381 | } | ||
382 | |||
383 | switch (channel_out) { | ||
384 | case OMAPFB_CHANNEL_OUT_LCD: | ||
385 | chout_val = 0; | ||
386 | break; | ||
387 | case OMAPFB_CHANNEL_OUT_DIGIT: | ||
388 | chout_val = 1; | ||
389 | break; | ||
390 | default: | ||
391 | return -EINVAL; | ||
392 | } | ||
393 | |||
394 | cconv_en = 0; | ||
395 | switch (color_mode) { | ||
396 | case OMAPFB_COLOR_RGB565: | ||
397 | color_code = DISPC_RGB_16_BPP; | ||
398 | bpp = 16; | ||
399 | break; | ||
400 | case OMAPFB_COLOR_YUV422: | ||
401 | if (plane == 0) | ||
402 | return -EINVAL; | ||
403 | color_code = DISPC_UYVY_422; | ||
404 | cconv_en = 1; | ||
405 | bpp = 16; | ||
406 | break; | ||
407 | case OMAPFB_COLOR_YUY422: | ||
408 | if (plane == 0) | ||
409 | return -EINVAL; | ||
410 | color_code = DISPC_YUV2_422; | ||
411 | cconv_en = 1; | ||
412 | bpp = 16; | ||
413 | break; | ||
414 | default: | ||
415 | return -EINVAL; | ||
416 | } | ||
417 | |||
418 | l = dispc_read_reg(at_reg[plane]); | ||
419 | |||
420 | l &= ~(0x0f << 1); | ||
421 | l |= color_code << 1; | ||
422 | l &= ~(1 << 9); | ||
423 | l |= cconv_en << 9; | ||
424 | |||
425 | l &= ~(0x03 << burst_shift); | ||
426 | l |= DISPC_BURST_8x32 << burst_shift; | ||
427 | |||
428 | l &= ~(1 << chout_shift); | ||
429 | l |= chout_val << chout_shift; | ||
430 | |||
431 | dispc_write_reg(at_reg[plane], l); | ||
432 | |||
433 | dispc_write_reg(ba_reg[plane], paddr); | ||
434 | MOD_REG_FLD(ps_reg[plane], | ||
435 | FLD_MASK(16, 11) | FLD_MASK(0, 11), (pos_y << 16) | pos_x); | ||
436 | |||
437 | MOD_REG_FLD(sz_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11), | ||
438 | ((height - 1) << 16) | (width - 1)); | ||
439 | |||
440 | if (set_vsize) { | ||
441 | /* Set video size if set_scale hasn't set it */ | ||
442 | if (!dispc.fir_vinc[plane]) | ||
443 | MOD_REG_FLD(vs_reg[plane], | ||
444 | FLD_MASK(16, 11), (height - 1) << 16); | ||
445 | if (!dispc.fir_hinc[plane]) | ||
446 | MOD_REG_FLD(vs_reg[plane], | ||
447 | FLD_MASK(0, 11), width - 1); | ||
448 | } | ||
449 | |||
450 | dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1); | ||
451 | |||
452 | return height * screen_width * bpp / 8; | ||
453 | } | ||
454 | |||
455 | static int omap_dispc_setup_plane(int plane, int channel_out, | ||
456 | unsigned long offset, | ||
457 | int screen_width, | ||
458 | int pos_x, int pos_y, int width, int height, | ||
459 | int color_mode) | ||
460 | { | ||
461 | u32 paddr; | ||
462 | int r; | ||
463 | |||
464 | if ((unsigned)plane > dispc.mem_desc.region_cnt) | ||
465 | return -EINVAL; | ||
466 | paddr = dispc.mem_desc.region[plane].paddr + offset; | ||
467 | enable_lcd_clocks(1); | ||
468 | r = _setup_plane(plane, channel_out, paddr, | ||
469 | screen_width, | ||
470 | pos_x, pos_y, width, height, color_mode); | ||
471 | enable_lcd_clocks(0); | ||
472 | return r; | ||
473 | } | ||
474 | |||
475 | static void write_firh_reg(int plane, int reg, u32 value) | ||
476 | { | ||
477 | u32 base; | ||
478 | |||
479 | if (plane == 1) | ||
480 | base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_H0; | ||
481 | else | ||
482 | base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_H0; | ||
483 | dispc_write_reg(base + reg * 8, value); | ||
484 | } | ||
485 | |||
486 | static void write_firhv_reg(int plane, int reg, u32 value) | ||
487 | { | ||
488 | u32 base; | ||
489 | |||
490 | if (plane == 1) | ||
491 | base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_HV0; | ||
492 | else | ||
493 | base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_HV0; | ||
494 | dispc_write_reg(base + reg * 8, value); | ||
495 | } | ||
496 | |||
497 | static void set_upsampling_coef_table(int plane) | ||
498 | { | ||
499 | const u32 coef[][2] = { | ||
500 | { 0x00800000, 0x00800000 }, | ||
501 | { 0x0D7CF800, 0x037B02FF }, | ||
502 | { 0x1E70F5FF, 0x0C6F05FE }, | ||
503 | { 0x335FF5FE, 0x205907FB }, | ||
504 | { 0xF74949F7, 0x00404000 }, | ||
505 | { 0xF55F33FB, 0x075920FE }, | ||
506 | { 0xF5701EFE, 0x056F0CFF }, | ||
507 | { 0xF87C0DFF, 0x027B0300 }, | ||
508 | }; | ||
509 | int i; | ||
510 | |||
511 | for (i = 0; i < 8; i++) { | ||
512 | write_firh_reg(plane, i, coef[i][0]); | ||
513 | write_firhv_reg(plane, i, coef[i][1]); | ||
514 | } | ||
515 | } | ||
516 | |||
517 | static int omap_dispc_set_scale(int plane, | ||
518 | int orig_width, int orig_height, | ||
519 | int out_width, int out_height) | ||
520 | { | ||
521 | const u32 at_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES, | ||
522 | DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES }; | ||
523 | const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE, | ||
524 | DISPC_VID2_BASE + DISPC_VID_SIZE }; | ||
525 | const u32 fir_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_FIR, | ||
526 | DISPC_VID2_BASE + DISPC_VID_FIR }; | ||
527 | |||
528 | u32 l; | ||
529 | int fir_hinc; | ||
530 | int fir_vinc; | ||
531 | |||
532 | if ((unsigned)plane > OMAPFB_PLANE_NUM) | ||
533 | return -ENODEV; | ||
534 | |||
535 | if (plane == OMAPFB_PLANE_GFX && | ||
536 | (out_width != orig_width || out_height != orig_height)) | ||
537 | return -EINVAL; | ||
538 | |||
539 | enable_lcd_clocks(1); | ||
540 | if (orig_width < out_width) { | ||
541 | /* | ||
542 | * Upsampling. | ||
543 | * Currently you can only scale both dimensions in one way. | ||
544 | */ | ||
545 | if (orig_height > out_height || | ||
546 | orig_width * 8 < out_width || | ||
547 | orig_height * 8 < out_height) { | ||
548 | enable_lcd_clocks(0); | ||
549 | return -EINVAL; | ||
550 | } | ||
551 | set_upsampling_coef_table(plane); | ||
552 | } else if (orig_width > out_width) { | ||
553 | /* Downsampling not yet supported | ||
554 | */ | ||
555 | |||
556 | enable_lcd_clocks(0); | ||
557 | return -EINVAL; | ||
558 | } | ||
559 | if (!orig_width || orig_width == out_width) | ||
560 | fir_hinc = 0; | ||
561 | else | ||
562 | fir_hinc = 1024 * orig_width / out_width; | ||
563 | if (!orig_height || orig_height == out_height) | ||
564 | fir_vinc = 0; | ||
565 | else | ||
566 | fir_vinc = 1024 * orig_height / out_height; | ||
567 | dispc.fir_hinc[plane] = fir_hinc; | ||
568 | dispc.fir_vinc[plane] = fir_vinc; | ||
569 | |||
570 | MOD_REG_FLD(fir_reg[plane], | ||
571 | FLD_MASK(16, 12) | FLD_MASK(0, 12), | ||
572 | ((fir_vinc & 4095) << 16) | | ||
573 | (fir_hinc & 4095)); | ||
574 | |||
575 | dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d " | ||
576 | "orig_height %d fir_hinc %d fir_vinc %d\n", | ||
577 | out_width, out_height, orig_width, orig_height, | ||
578 | fir_hinc, fir_vinc); | ||
579 | |||
580 | MOD_REG_FLD(vs_reg[plane], | ||
581 | FLD_MASK(16, 11) | FLD_MASK(0, 11), | ||
582 | ((out_height - 1) << 16) | (out_width - 1)); | ||
583 | |||
584 | l = dispc_read_reg(at_reg[plane]); | ||
585 | l &= ~(0x03 << 5); | ||
586 | l |= fir_hinc ? (1 << 5) : 0; | ||
587 | l |= fir_vinc ? (1 << 6) : 0; | ||
588 | dispc_write_reg(at_reg[plane], l); | ||
589 | |||
590 | enable_lcd_clocks(0); | ||
591 | return 0; | ||
592 | } | ||
593 | |||
594 | static int omap_dispc_enable_plane(int plane, int enable) | ||
595 | { | ||
596 | const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES, | ||
597 | DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES, | ||
598 | DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES }; | ||
599 | if ((unsigned int)plane > dispc.mem_desc.region_cnt) | ||
600 | return -EINVAL; | ||
601 | |||
602 | enable_lcd_clocks(1); | ||
603 | MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0); | ||
604 | enable_lcd_clocks(0); | ||
605 | |||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | static int omap_dispc_set_color_key(struct omapfb_color_key *ck) | ||
610 | { | ||
611 | u32 df_reg, tr_reg; | ||
612 | int shift, val; | ||
613 | |||
614 | switch (ck->channel_out) { | ||
615 | case OMAPFB_CHANNEL_OUT_LCD: | ||
616 | df_reg = DISPC_DEFAULT_COLOR0; | ||
617 | tr_reg = DISPC_TRANS_COLOR0; | ||
618 | shift = 10; | ||
619 | break; | ||
620 | case OMAPFB_CHANNEL_OUT_DIGIT: | ||
621 | df_reg = DISPC_DEFAULT_COLOR1; | ||
622 | tr_reg = DISPC_TRANS_COLOR1; | ||
623 | shift = 12; | ||
624 | break; | ||
625 | default: | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | switch (ck->key_type) { | ||
629 | case OMAPFB_COLOR_KEY_DISABLED: | ||
630 | val = 0; | ||
631 | break; | ||
632 | case OMAPFB_COLOR_KEY_GFX_DST: | ||
633 | val = 1; | ||
634 | break; | ||
635 | case OMAPFB_COLOR_KEY_VID_SRC: | ||
636 | val = 3; | ||
637 | break; | ||
638 | default: | ||
639 | return -EINVAL; | ||
640 | } | ||
641 | enable_lcd_clocks(1); | ||
642 | MOD_REG_FLD(DISPC_CONFIG, FLD_MASK(shift, 2), val << shift); | ||
643 | |||
644 | if (val != 0) | ||
645 | dispc_write_reg(tr_reg, ck->trans_key); | ||
646 | dispc_write_reg(df_reg, ck->background); | ||
647 | enable_lcd_clocks(0); | ||
648 | |||
649 | dispc.color_key = *ck; | ||
650 | |||
651 | return 0; | ||
652 | } | ||
653 | |||
654 | static int omap_dispc_get_color_key(struct omapfb_color_key *ck) | ||
655 | { | ||
656 | *ck = dispc.color_key; | ||
657 | return 0; | ||
658 | } | ||
659 | |||
660 | static void load_palette(void) | ||
661 | { | ||
662 | } | ||
663 | |||
664 | static int omap_dispc_set_update_mode(enum omapfb_update_mode mode) | ||
665 | { | ||
666 | int r = 0; | ||
667 | |||
668 | if (mode != dispc.update_mode) { | ||
669 | switch (mode) { | ||
670 | case OMAPFB_AUTO_UPDATE: | ||
671 | case OMAPFB_MANUAL_UPDATE: | ||
672 | enable_lcd_clocks(1); | ||
673 | omap_dispc_enable_lcd_out(1); | ||
674 | dispc.update_mode = mode; | ||
675 | break; | ||
676 | case OMAPFB_UPDATE_DISABLED: | ||
677 | init_completion(&dispc.frame_done); | ||
678 | omap_dispc_enable_lcd_out(0); | ||
679 | if (!wait_for_completion_timeout(&dispc.frame_done, | ||
680 | msecs_to_jiffies(500))) { | ||
681 | dev_err(dispc.fbdev->dev, | ||
682 | "timeout waiting for FRAME DONE\n"); | ||
683 | } | ||
684 | dispc.update_mode = mode; | ||
685 | enable_lcd_clocks(0); | ||
686 | break; | ||
687 | default: | ||
688 | r = -EINVAL; | ||
689 | } | ||
690 | } | ||
691 | |||
692 | return r; | ||
693 | } | ||
694 | |||
695 | static void omap_dispc_get_caps(int plane, struct omapfb_caps *caps) | ||
696 | { | ||
697 | caps->ctrl |= OMAPFB_CAPS_PLANE_RELOCATE_MEM; | ||
698 | if (plane > 0) | ||
699 | caps->ctrl |= OMAPFB_CAPS_PLANE_SCALE; | ||
700 | caps->plane_color |= (1 << OMAPFB_COLOR_RGB565) | | ||
701 | (1 << OMAPFB_COLOR_YUV422) | | ||
702 | (1 << OMAPFB_COLOR_YUY422); | ||
703 | if (plane == 0) | ||
704 | caps->plane_color |= (1 << OMAPFB_COLOR_CLUT_8BPP) | | ||
705 | (1 << OMAPFB_COLOR_CLUT_4BPP) | | ||
706 | (1 << OMAPFB_COLOR_CLUT_2BPP) | | ||
707 | (1 << OMAPFB_COLOR_CLUT_1BPP) | | ||
708 | (1 << OMAPFB_COLOR_RGB444); | ||
709 | } | ||
710 | |||
711 | static enum omapfb_update_mode omap_dispc_get_update_mode(void) | ||
712 | { | ||
713 | return dispc.update_mode; | ||
714 | } | ||
715 | |||
716 | static void setup_color_conv_coef(void) | ||
717 | { | ||
718 | u32 mask = FLD_MASK(16, 11) | FLD_MASK(0, 11); | ||
719 | int cf1_reg = DISPC_VID1_BASE + DISPC_VID_CONV_COEF0; | ||
720 | int cf2_reg = DISPC_VID2_BASE + DISPC_VID_CONV_COEF0; | ||
721 | int at1_reg = DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES; | ||
722 | int at2_reg = DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES; | ||
723 | const struct color_conv_coef { | ||
724 | int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb; | ||
725 | int full_range; | ||
726 | } ctbl_bt601_5 = { | ||
727 | 298, 409, 0, 298, -208, -100, 298, 0, 517, 0, | ||
728 | }; | ||
729 | const struct color_conv_coef *ct; | ||
730 | #define CVAL(x, y) (((x & 2047) << 16) | (y & 2047)) | ||
731 | |||
732 | ct = &ctbl_bt601_5; | ||
733 | |||
734 | MOD_REG_FLD(cf1_reg, mask, CVAL(ct->rcr, ct->ry)); | ||
735 | MOD_REG_FLD(cf1_reg + 4, mask, CVAL(ct->gy, ct->rcb)); | ||
736 | MOD_REG_FLD(cf1_reg + 8, mask, CVAL(ct->gcb, ct->gcr)); | ||
737 | MOD_REG_FLD(cf1_reg + 12, mask, CVAL(ct->bcr, ct->by)); | ||
738 | MOD_REG_FLD(cf1_reg + 16, mask, CVAL(0, ct->bcb)); | ||
739 | |||
740 | MOD_REG_FLD(cf2_reg, mask, CVAL(ct->rcr, ct->ry)); | ||
741 | MOD_REG_FLD(cf2_reg + 4, mask, CVAL(ct->gy, ct->rcb)); | ||
742 | MOD_REG_FLD(cf2_reg + 8, mask, CVAL(ct->gcb, ct->gcr)); | ||
743 | MOD_REG_FLD(cf2_reg + 12, mask, CVAL(ct->bcr, ct->by)); | ||
744 | MOD_REG_FLD(cf2_reg + 16, mask, CVAL(0, ct->bcb)); | ||
745 | #undef CVAL | ||
746 | |||
747 | MOD_REG_FLD(at1_reg, (1 << 11), ct->full_range); | ||
748 | MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range); | ||
749 | } | ||
750 | |||
751 | static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div) | ||
752 | { | ||
753 | unsigned long fck, lck; | ||
754 | |||
755 | *lck_div = 1; | ||
756 | pck = max(1, pck); | ||
757 | fck = clk_get_rate(dispc.dss1_fck); | ||
758 | lck = fck; | ||
759 | *pck_div = (lck + pck - 1) / pck; | ||
760 | if (is_tft) | ||
761 | *pck_div = max(2, *pck_div); | ||
762 | else | ||
763 | *pck_div = max(3, *pck_div); | ||
764 | if (*pck_div > 255) { | ||
765 | *pck_div = 255; | ||
766 | lck = pck * *pck_div; | ||
767 | *lck_div = fck / lck; | ||
768 | BUG_ON(*lck_div < 1); | ||
769 | if (*lck_div > 255) { | ||
770 | *lck_div = 255; | ||
771 | dev_warn(dispc.fbdev->dev, "pixclock %d kHz too low.\n", | ||
772 | pck / 1000); | ||
773 | } | ||
774 | } | ||
775 | } | ||
776 | |||
777 | static void set_lcd_tft_mode(int enable) | ||
778 | { | ||
779 | u32 mask; | ||
780 | |||
781 | mask = 1 << 3; | ||
782 | MOD_REG_FLD(DISPC_CONTROL, mask, enable ? mask : 0); | ||
783 | } | ||
784 | |||
785 | static void set_lcd_timings(void) | ||
786 | { | ||
787 | u32 l; | ||
788 | int lck_div, pck_div; | ||
789 | struct lcd_panel *panel = dispc.fbdev->panel; | ||
790 | int is_tft = panel->config & OMAP_LCDC_PANEL_TFT; | ||
791 | unsigned long fck; | ||
792 | |||
793 | l = dispc_read_reg(DISPC_TIMING_H); | ||
794 | l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8)); | ||
795 | l |= ( max(1, (min(64, panel->hsw))) - 1 ) << 0; | ||
796 | l |= ( max(1, (min(256, panel->hfp))) - 1 ) << 8; | ||
797 | l |= ( max(1, (min(256, panel->hbp))) - 1 ) << 20; | ||
798 | dispc_write_reg(DISPC_TIMING_H, l); | ||
799 | |||
800 | l = dispc_read_reg(DISPC_TIMING_V); | ||
801 | l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8)); | ||
802 | l |= ( max(1, (min(64, panel->vsw))) - 1 ) << 0; | ||
803 | l |= ( max(0, (min(255, panel->vfp))) - 0 ) << 8; | ||
804 | l |= ( max(0, (min(255, panel->vbp))) - 0 ) << 20; | ||
805 | dispc_write_reg(DISPC_TIMING_V, l); | ||
806 | |||
807 | l = dispc_read_reg(DISPC_POL_FREQ); | ||
808 | l &= ~FLD_MASK(12, 6); | ||
809 | l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 12; | ||
810 | l |= panel->acb & 0xff; | ||
811 | dispc_write_reg(DISPC_POL_FREQ, l); | ||
812 | |||
813 | calc_ck_div(is_tft, panel->pixel_clock * 1000, &lck_div, &pck_div); | ||
814 | |||
815 | l = dispc_read_reg(DISPC_DIVISOR); | ||
816 | l &= ~(FLD_MASK(16, 8) | FLD_MASK(0, 8)); | ||
817 | l |= (lck_div << 16) | (pck_div << 0); | ||
818 | dispc_write_reg(DISPC_DIVISOR, l); | ||
819 | |||
820 | /* update panel info with the exact clock */ | ||
821 | fck = clk_get_rate(dispc.dss1_fck); | ||
822 | panel->pixel_clock = fck / lck_div / pck_div / 1000; | ||
823 | } | ||
824 | |||
825 | static void recalc_irq_mask(void) | ||
826 | { | ||
827 | int i; | ||
828 | unsigned long irq_mask = DISPC_IRQ_MASK_ERROR; | ||
829 | |||
830 | for (i = 0; i < MAX_IRQ_HANDLERS; i++) { | ||
831 | if (!dispc.irq_handlers[i].callback) | ||
832 | continue; | ||
833 | |||
834 | irq_mask |= dispc.irq_handlers[i].irq_mask; | ||
835 | } | ||
836 | |||
837 | enable_lcd_clocks(1); | ||
838 | MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask); | ||
839 | enable_lcd_clocks(0); | ||
840 | } | ||
841 | |||
842 | int omap_dispc_request_irq(unsigned long irq_mask, void (*callback)(void *data), | ||
843 | void *data) | ||
844 | { | ||
845 | int i; | ||
846 | |||
847 | BUG_ON(callback == NULL); | ||
848 | |||
849 | for (i = 0; i < MAX_IRQ_HANDLERS; i++) { | ||
850 | if (dispc.irq_handlers[i].callback) | ||
851 | continue; | ||
852 | |||
853 | dispc.irq_handlers[i].irq_mask = irq_mask; | ||
854 | dispc.irq_handlers[i].callback = callback; | ||
855 | dispc.irq_handlers[i].data = data; | ||
856 | recalc_irq_mask(); | ||
857 | |||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | return -EBUSY; | ||
862 | } | ||
863 | EXPORT_SYMBOL(omap_dispc_request_irq); | ||
864 | |||
865 | void omap_dispc_free_irq(unsigned long irq_mask, void (*callback)(void *data), | ||
866 | void *data) | ||
867 | { | ||
868 | int i; | ||
869 | |||
870 | for (i = 0; i < MAX_IRQ_HANDLERS; i++) { | ||
871 | if (dispc.irq_handlers[i].callback == callback && | ||
872 | dispc.irq_handlers[i].data == data) { | ||
873 | dispc.irq_handlers[i].irq_mask = 0; | ||
874 | dispc.irq_handlers[i].callback = NULL; | ||
875 | dispc.irq_handlers[i].data = NULL; | ||
876 | recalc_irq_mask(); | ||
877 | return; | ||
878 | } | ||
879 | } | ||
880 | |||
881 | BUG(); | ||
882 | } | ||
883 | EXPORT_SYMBOL(omap_dispc_free_irq); | ||
884 | |||
885 | static irqreturn_t omap_dispc_irq_handler(int irq, void *dev) | ||
886 | { | ||
887 | u32 stat; | ||
888 | int i = 0; | ||
889 | |||
890 | enable_lcd_clocks(1); | ||
891 | |||
892 | stat = dispc_read_reg(DISPC_IRQSTATUS); | ||
893 | if (stat & DISPC_IRQ_FRAMEMASK) | ||
894 | complete(&dispc.frame_done); | ||
895 | |||
896 | if (stat & DISPC_IRQ_MASK_ERROR) { | ||
897 | if (printk_ratelimit()) { | ||
898 | dev_err(dispc.fbdev->dev, "irq error status %04x\n", | ||
899 | stat & 0x7fff); | ||
900 | } | ||
901 | } | ||
902 | |||
903 | for (i = 0; i < MAX_IRQ_HANDLERS; i++) { | ||
904 | if (unlikely(dispc.irq_handlers[i].callback && | ||
905 | (stat & dispc.irq_handlers[i].irq_mask))) | ||
906 | dispc.irq_handlers[i].callback( | ||
907 | dispc.irq_handlers[i].data); | ||
908 | } | ||
909 | |||
910 | dispc_write_reg(DISPC_IRQSTATUS, stat); | ||
911 | |||
912 | enable_lcd_clocks(0); | ||
913 | |||
914 | return IRQ_HANDLED; | ||
915 | } | ||
916 | |||
917 | static int get_dss_clocks(void) | ||
918 | { | ||
919 | dispc.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick"); | ||
920 | if (IS_ERR(dispc.dss_ick)) { | ||
921 | dev_err(dispc.fbdev->dev, "can't get ick\n"); | ||
922 | return PTR_ERR(dispc.dss_ick); | ||
923 | } | ||
924 | |||
925 | dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "fck"); | ||
926 | if (IS_ERR(dispc.dss1_fck)) { | ||
927 | dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); | ||
928 | clk_put(dispc.dss_ick); | ||
929 | return PTR_ERR(dispc.dss1_fck); | ||
930 | } | ||
931 | |||
932 | dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_clk"); | ||
933 | if (IS_ERR(dispc.dss_54m_fck)) { | ||
934 | dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); | ||
935 | clk_put(dispc.dss_ick); | ||
936 | clk_put(dispc.dss1_fck); | ||
937 | return PTR_ERR(dispc.dss_54m_fck); | ||
938 | } | ||
939 | |||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | static void put_dss_clocks(void) | ||
944 | { | ||
945 | clk_put(dispc.dss_54m_fck); | ||
946 | clk_put(dispc.dss1_fck); | ||
947 | clk_put(dispc.dss_ick); | ||
948 | } | ||
949 | |||
950 | static void enable_lcd_clocks(int enable) | ||
951 | { | ||
952 | if (enable) { | ||
953 | clk_enable(dispc.dss_ick); | ||
954 | clk_enable(dispc.dss1_fck); | ||
955 | } else { | ||
956 | clk_disable(dispc.dss1_fck); | ||
957 | clk_disable(dispc.dss_ick); | ||
958 | } | ||
959 | } | ||
960 | |||
961 | static void enable_digit_clocks(int enable) | ||
962 | { | ||
963 | if (enable) | ||
964 | clk_enable(dispc.dss_54m_fck); | ||
965 | else | ||
966 | clk_disable(dispc.dss_54m_fck); | ||
967 | } | ||
968 | |||
969 | static void omap_dispc_suspend(void) | ||
970 | { | ||
971 | if (dispc.update_mode == OMAPFB_AUTO_UPDATE) { | ||
972 | init_completion(&dispc.frame_done); | ||
973 | omap_dispc_enable_lcd_out(0); | ||
974 | if (!wait_for_completion_timeout(&dispc.frame_done, | ||
975 | msecs_to_jiffies(500))) { | ||
976 | dev_err(dispc.fbdev->dev, | ||
977 | "timeout waiting for FRAME DONE\n"); | ||
978 | } | ||
979 | enable_lcd_clocks(0); | ||
980 | } | ||
981 | } | ||
982 | |||
983 | static void omap_dispc_resume(void) | ||
984 | { | ||
985 | if (dispc.update_mode == OMAPFB_AUTO_UPDATE) { | ||
986 | enable_lcd_clocks(1); | ||
987 | if (!dispc.ext_mode) { | ||
988 | set_lcd_timings(); | ||
989 | load_palette(); | ||
990 | } | ||
991 | omap_dispc_enable_lcd_out(1); | ||
992 | } | ||
993 | } | ||
994 | |||
995 | |||
996 | static int omap_dispc_update_window(struct fb_info *fbi, | ||
997 | struct omapfb_update_window *win, | ||
998 | void (*complete_callback)(void *arg), | ||
999 | void *complete_callback_data) | ||
1000 | { | ||
1001 | return dispc.update_mode == OMAPFB_UPDATE_DISABLED ? -ENODEV : 0; | ||
1002 | } | ||
1003 | |||
1004 | static int mmap_kern(struct omapfb_mem_region *region) | ||
1005 | { | ||
1006 | struct vm_struct *kvma; | ||
1007 | struct vm_area_struct vma; | ||
1008 | pgprot_t pgprot; | ||
1009 | unsigned long vaddr; | ||
1010 | |||
1011 | kvma = get_vm_area(region->size, VM_IOREMAP); | ||
1012 | if (kvma == NULL) { | ||
1013 | dev_err(dispc.fbdev->dev, "can't get kernel vm area\n"); | ||
1014 | return -ENOMEM; | ||
1015 | } | ||
1016 | vma.vm_mm = &init_mm; | ||
1017 | |||
1018 | vaddr = (unsigned long)kvma->addr; | ||
1019 | |||
1020 | pgprot = pgprot_writecombine(pgprot_kernel); | ||
1021 | vma.vm_start = vaddr; | ||
1022 | vma.vm_end = vaddr + region->size; | ||
1023 | if (io_remap_pfn_range(&vma, vaddr, region->paddr >> PAGE_SHIFT, | ||
1024 | region->size, pgprot) < 0) { | ||
1025 | dev_err(dispc.fbdev->dev, "kernel mmap for FBMEM failed\n"); | ||
1026 | return -EAGAIN; | ||
1027 | } | ||
1028 | region->vaddr = (void *)vaddr; | ||
1029 | |||
1030 | return 0; | ||
1031 | } | ||
1032 | |||
1033 | static void mmap_user_open(struct vm_area_struct *vma) | ||
1034 | { | ||
1035 | int plane = (int)vma->vm_private_data; | ||
1036 | |||
1037 | atomic_inc(&dispc.map_count[plane]); | ||
1038 | } | ||
1039 | |||
1040 | static void mmap_user_close(struct vm_area_struct *vma) | ||
1041 | { | ||
1042 | int plane = (int)vma->vm_private_data; | ||
1043 | |||
1044 | atomic_dec(&dispc.map_count[plane]); | ||
1045 | } | ||
1046 | |||
1047 | static const struct vm_operations_struct mmap_user_ops = { | ||
1048 | .open = mmap_user_open, | ||
1049 | .close = mmap_user_close, | ||
1050 | }; | ||
1051 | |||
1052 | static int omap_dispc_mmap_user(struct fb_info *info, | ||
1053 | struct vm_area_struct *vma) | ||
1054 | { | ||
1055 | struct omapfb_plane_struct *plane = info->par; | ||
1056 | unsigned long off; | ||
1057 | unsigned long start; | ||
1058 | u32 len; | ||
1059 | |||
1060 | if (vma->vm_end - vma->vm_start == 0) | ||
1061 | return 0; | ||
1062 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) | ||
1063 | return -EINVAL; | ||
1064 | off = vma->vm_pgoff << PAGE_SHIFT; | ||
1065 | |||
1066 | start = info->fix.smem_start; | ||
1067 | len = info->fix.smem_len; | ||
1068 | if (off >= len) | ||
1069 | return -EINVAL; | ||
1070 | if ((vma->vm_end - vma->vm_start + off) > len) | ||
1071 | return -EINVAL; | ||
1072 | off += start; | ||
1073 | vma->vm_pgoff = off >> PAGE_SHIFT; | ||
1074 | vma->vm_flags |= VM_IO | VM_RESERVED; | ||
1075 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
1076 | vma->vm_ops = &mmap_user_ops; | ||
1077 | vma->vm_private_data = (void *)plane->idx; | ||
1078 | if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, | ||
1079 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
1080 | return -EAGAIN; | ||
1081 | /* vm_ops.open won't be called for mmap itself. */ | ||
1082 | atomic_inc(&dispc.map_count[plane->idx]); | ||
1083 | return 0; | ||
1084 | } | ||
1085 | |||
1086 | static void unmap_kern(struct omapfb_mem_region *region) | ||
1087 | { | ||
1088 | vunmap(region->vaddr); | ||
1089 | } | ||
1090 | |||
1091 | static int alloc_palette_ram(void) | ||
1092 | { | ||
1093 | dispc.palette_vaddr = dma_alloc_writecombine(dispc.fbdev->dev, | ||
1094 | MAX_PALETTE_SIZE, &dispc.palette_paddr, GFP_KERNEL); | ||
1095 | if (dispc.palette_vaddr == NULL) { | ||
1096 | dev_err(dispc.fbdev->dev, "failed to alloc palette memory\n"); | ||
1097 | return -ENOMEM; | ||
1098 | } | ||
1099 | |||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
1103 | static void free_palette_ram(void) | ||
1104 | { | ||
1105 | dma_free_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE, | ||
1106 | dispc.palette_vaddr, dispc.palette_paddr); | ||
1107 | } | ||
1108 | |||
1109 | static int alloc_fbmem(struct omapfb_mem_region *region) | ||
1110 | { | ||
1111 | region->vaddr = dma_alloc_writecombine(dispc.fbdev->dev, | ||
1112 | region->size, ®ion->paddr, GFP_KERNEL); | ||
1113 | |||
1114 | if (region->vaddr == NULL) { | ||
1115 | dev_err(dispc.fbdev->dev, "unable to allocate FB DMA memory\n"); | ||
1116 | return -ENOMEM; | ||
1117 | } | ||
1118 | |||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | static void free_fbmem(struct omapfb_mem_region *region) | ||
1123 | { | ||
1124 | dma_free_writecombine(dispc.fbdev->dev, region->size, | ||
1125 | region->vaddr, region->paddr); | ||
1126 | } | ||
1127 | |||
1128 | static struct resmap *init_resmap(unsigned long start, size_t size) | ||
1129 | { | ||
1130 | unsigned page_cnt; | ||
1131 | struct resmap *res_map; | ||
1132 | |||
1133 | page_cnt = PAGE_ALIGN(size) / PAGE_SIZE; | ||
1134 | res_map = | ||
1135 | kzalloc(sizeof(struct resmap) + RESMAP_SIZE(page_cnt), GFP_KERNEL); | ||
1136 | if (res_map == NULL) | ||
1137 | return NULL; | ||
1138 | res_map->start = start; | ||
1139 | res_map->page_cnt = page_cnt; | ||
1140 | res_map->map = (unsigned long *)(res_map + 1); | ||
1141 | return res_map; | ||
1142 | } | ||
1143 | |||
1144 | static void cleanup_resmap(struct resmap *res_map) | ||
1145 | { | ||
1146 | kfree(res_map); | ||
1147 | } | ||
1148 | |||
1149 | static inline int resmap_mem_type(unsigned long start) | ||
1150 | { | ||
1151 | if (start >= OMAP2_SRAM_START && | ||
1152 | start < OMAP2_SRAM_START + OMAP2_SRAM_SIZE) | ||
1153 | return OMAPFB_MEMTYPE_SRAM; | ||
1154 | else | ||
1155 | return OMAPFB_MEMTYPE_SDRAM; | ||
1156 | } | ||
1157 | |||
1158 | static inline int resmap_page_reserved(struct resmap *res_map, unsigned page_nr) | ||
1159 | { | ||
1160 | return *RESMAP_PTR(res_map, page_nr) & RESMAP_MASK(page_nr) ? 1 : 0; | ||
1161 | } | ||
1162 | |||
1163 | static inline void resmap_reserve_page(struct resmap *res_map, unsigned page_nr) | ||
1164 | { | ||
1165 | BUG_ON(resmap_page_reserved(res_map, page_nr)); | ||
1166 | *RESMAP_PTR(res_map, page_nr) |= RESMAP_MASK(page_nr); | ||
1167 | } | ||
1168 | |||
1169 | static inline void resmap_free_page(struct resmap *res_map, unsigned page_nr) | ||
1170 | { | ||
1171 | BUG_ON(!resmap_page_reserved(res_map, page_nr)); | ||
1172 | *RESMAP_PTR(res_map, page_nr) &= ~RESMAP_MASK(page_nr); | ||
1173 | } | ||
1174 | |||
1175 | static void resmap_reserve_region(unsigned long start, size_t size) | ||
1176 | { | ||
1177 | |||
1178 | struct resmap *res_map; | ||
1179 | unsigned start_page; | ||
1180 | unsigned end_page; | ||
1181 | int mtype; | ||
1182 | unsigned i; | ||
1183 | |||
1184 | mtype = resmap_mem_type(start); | ||
1185 | res_map = dispc.res_map[mtype]; | ||
1186 | dev_dbg(dispc.fbdev->dev, "reserve mem type %d start %08lx size %d\n", | ||
1187 | mtype, start, size); | ||
1188 | start_page = (start - res_map->start) / PAGE_SIZE; | ||
1189 | end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE; | ||
1190 | for (i = start_page; i < end_page; i++) | ||
1191 | resmap_reserve_page(res_map, i); | ||
1192 | } | ||
1193 | |||
1194 | static void resmap_free_region(unsigned long start, size_t size) | ||
1195 | { | ||
1196 | struct resmap *res_map; | ||
1197 | unsigned start_page; | ||
1198 | unsigned end_page; | ||
1199 | unsigned i; | ||
1200 | int mtype; | ||
1201 | |||
1202 | mtype = resmap_mem_type(start); | ||
1203 | res_map = dispc.res_map[mtype]; | ||
1204 | dev_dbg(dispc.fbdev->dev, "free mem type %d start %08lx size %d\n", | ||
1205 | mtype, start, size); | ||
1206 | start_page = (start - res_map->start) / PAGE_SIZE; | ||
1207 | end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE; | ||
1208 | for (i = start_page; i < end_page; i++) | ||
1209 | resmap_free_page(res_map, i); | ||
1210 | } | ||
1211 | |||
1212 | static unsigned long resmap_alloc_region(int mtype, size_t size) | ||
1213 | { | ||
1214 | unsigned i; | ||
1215 | unsigned total; | ||
1216 | unsigned start_page; | ||
1217 | unsigned long start; | ||
1218 | struct resmap *res_map = dispc.res_map[mtype]; | ||
1219 | |||
1220 | BUG_ON(mtype >= DISPC_MEMTYPE_NUM || res_map == NULL || !size); | ||
1221 | |||
1222 | size = PAGE_ALIGN(size) / PAGE_SIZE; | ||
1223 | start_page = 0; | ||
1224 | total = 0; | ||
1225 | for (i = 0; i < res_map->page_cnt; i++) { | ||
1226 | if (resmap_page_reserved(res_map, i)) { | ||
1227 | start_page = i + 1; | ||
1228 | total = 0; | ||
1229 | } else if (++total == size) | ||
1230 | break; | ||
1231 | } | ||
1232 | if (total < size) | ||
1233 | return 0; | ||
1234 | |||
1235 | start = res_map->start + start_page * PAGE_SIZE; | ||
1236 | resmap_reserve_region(start, size * PAGE_SIZE); | ||
1237 | |||
1238 | return start; | ||
1239 | } | ||
1240 | |||
1241 | /* Note that this will only work for user mappings, we don't deal with | ||
1242 | * kernel mappings here, so fbcon will keep using the old region. | ||
1243 | */ | ||
1244 | static int omap_dispc_setup_mem(int plane, size_t size, int mem_type, | ||
1245 | unsigned long *paddr) | ||
1246 | { | ||
1247 | struct omapfb_mem_region *rg; | ||
1248 | unsigned long new_addr = 0; | ||
1249 | |||
1250 | if ((unsigned)plane > dispc.mem_desc.region_cnt) | ||
1251 | return -EINVAL; | ||
1252 | if (mem_type >= DISPC_MEMTYPE_NUM) | ||
1253 | return -EINVAL; | ||
1254 | if (dispc.res_map[mem_type] == NULL) | ||
1255 | return -ENOMEM; | ||
1256 | rg = &dispc.mem_desc.region[plane]; | ||
1257 | if (size == rg->size && mem_type == rg->type) | ||
1258 | return 0; | ||
1259 | if (atomic_read(&dispc.map_count[plane])) | ||
1260 | return -EBUSY; | ||
1261 | if (rg->size != 0) | ||
1262 | resmap_free_region(rg->paddr, rg->size); | ||
1263 | if (size != 0) { | ||
1264 | new_addr = resmap_alloc_region(mem_type, size); | ||
1265 | if (!new_addr) { | ||
1266 | /* Reallocate old region. */ | ||
1267 | resmap_reserve_region(rg->paddr, rg->size); | ||
1268 | return -ENOMEM; | ||
1269 | } | ||
1270 | } | ||
1271 | rg->paddr = new_addr; | ||
1272 | rg->size = size; | ||
1273 | rg->type = mem_type; | ||
1274 | |||
1275 | *paddr = new_addr; | ||
1276 | |||
1277 | return 0; | ||
1278 | } | ||
1279 | |||
1280 | static int setup_fbmem(struct omapfb_mem_desc *req_md) | ||
1281 | { | ||
1282 | struct omapfb_mem_region *rg; | ||
1283 | int i; | ||
1284 | int r; | ||
1285 | unsigned long mem_start[DISPC_MEMTYPE_NUM]; | ||
1286 | unsigned long mem_end[DISPC_MEMTYPE_NUM]; | ||
1287 | |||
1288 | if (!req_md->region_cnt) { | ||
1289 | dev_err(dispc.fbdev->dev, "no memory regions defined\n"); | ||
1290 | return -ENOENT; | ||
1291 | } | ||
1292 | |||
1293 | rg = &req_md->region[0]; | ||
1294 | memset(mem_start, 0xff, sizeof(mem_start)); | ||
1295 | memset(mem_end, 0, sizeof(mem_end)); | ||
1296 | |||
1297 | for (i = 0; i < req_md->region_cnt; i++, rg++) { | ||
1298 | int mtype; | ||
1299 | if (rg->paddr) { | ||
1300 | rg->alloc = 0; | ||
1301 | if (rg->vaddr == NULL) { | ||
1302 | rg->map = 1; | ||
1303 | if ((r = mmap_kern(rg)) < 0) | ||
1304 | return r; | ||
1305 | } | ||
1306 | } else { | ||
1307 | if (rg->type != OMAPFB_MEMTYPE_SDRAM) { | ||
1308 | dev_err(dispc.fbdev->dev, | ||
1309 | "unsupported memory type\n"); | ||
1310 | return -EINVAL; | ||
1311 | } | ||
1312 | rg->alloc = rg->map = 1; | ||
1313 | if ((r = alloc_fbmem(rg)) < 0) | ||
1314 | return r; | ||
1315 | } | ||
1316 | mtype = rg->type; | ||
1317 | |||
1318 | if (rg->paddr < mem_start[mtype]) | ||
1319 | mem_start[mtype] = rg->paddr; | ||
1320 | if (rg->paddr + rg->size > mem_end[mtype]) | ||
1321 | mem_end[mtype] = rg->paddr + rg->size; | ||
1322 | } | ||
1323 | |||
1324 | for (i = 0; i < DISPC_MEMTYPE_NUM; i++) { | ||
1325 | unsigned long start; | ||
1326 | size_t size; | ||
1327 | if (mem_end[i] == 0) | ||
1328 | continue; | ||
1329 | start = mem_start[i]; | ||
1330 | size = mem_end[i] - start; | ||
1331 | dispc.res_map[i] = init_resmap(start, size); | ||
1332 | r = -ENOMEM; | ||
1333 | if (dispc.res_map[i] == NULL) | ||
1334 | goto fail; | ||
1335 | /* Initial state is that everything is reserved. This | ||
1336 | * includes possible holes as well, which will never be | ||
1337 | * freed. | ||
1338 | */ | ||
1339 | resmap_reserve_region(start, size); | ||
1340 | } | ||
1341 | |||
1342 | dispc.mem_desc = *req_md; | ||
1343 | |||
1344 | return 0; | ||
1345 | fail: | ||
1346 | for (i = 0; i < DISPC_MEMTYPE_NUM; i++) { | ||
1347 | if (dispc.res_map[i] != NULL) | ||
1348 | cleanup_resmap(dispc.res_map[i]); | ||
1349 | } | ||
1350 | return r; | ||
1351 | } | ||
1352 | |||
1353 | static void cleanup_fbmem(void) | ||
1354 | { | ||
1355 | struct omapfb_mem_region *rg; | ||
1356 | int i; | ||
1357 | |||
1358 | for (i = 0; i < DISPC_MEMTYPE_NUM; i++) { | ||
1359 | if (dispc.res_map[i] != NULL) | ||
1360 | cleanup_resmap(dispc.res_map[i]); | ||
1361 | } | ||
1362 | rg = &dispc.mem_desc.region[0]; | ||
1363 | for (i = 0; i < dispc.mem_desc.region_cnt; i++, rg++) { | ||
1364 | if (rg->alloc) | ||
1365 | free_fbmem(rg); | ||
1366 | else { | ||
1367 | if (rg->map) | ||
1368 | unmap_kern(rg); | ||
1369 | } | ||
1370 | } | ||
1371 | } | ||
1372 | |||
1373 | static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode, | ||
1374 | struct omapfb_mem_desc *req_vram) | ||
1375 | { | ||
1376 | int r; | ||
1377 | u32 l; | ||
1378 | struct lcd_panel *panel = fbdev->panel; | ||
1379 | void __iomem *ram_fw_base; | ||
1380 | int tmo = 10000; | ||
1381 | int skip_init = 0; | ||
1382 | int i; | ||
1383 | |||
1384 | memset(&dispc, 0, sizeof(dispc)); | ||
1385 | |||
1386 | dispc.base = ioremap(DISPC_BASE, SZ_1K); | ||
1387 | if (!dispc.base) { | ||
1388 | dev_err(fbdev->dev, "can't ioremap DISPC\n"); | ||
1389 | return -ENOMEM; | ||
1390 | } | ||
1391 | |||
1392 | dispc.fbdev = fbdev; | ||
1393 | dispc.ext_mode = ext_mode; | ||
1394 | |||
1395 | init_completion(&dispc.frame_done); | ||
1396 | |||
1397 | if ((r = get_dss_clocks()) < 0) | ||
1398 | goto fail0; | ||
1399 | |||
1400 | enable_lcd_clocks(1); | ||
1401 | |||
1402 | #ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT | ||
1403 | l = dispc_read_reg(DISPC_CONTROL); | ||
1404 | /* LCD enabled ? */ | ||
1405 | if (l & 1) { | ||
1406 | pr_info("omapfb: skipping hardware initialization\n"); | ||
1407 | skip_init = 1; | ||
1408 | } | ||
1409 | #endif | ||
1410 | |||
1411 | if (!skip_init) { | ||
1412 | /* Reset monitoring works only w/ the 54M clk */ | ||
1413 | enable_digit_clocks(1); | ||
1414 | |||
1415 | /* Soft reset */ | ||
1416 | MOD_REG_FLD(DISPC_SYSCONFIG, 1 << 1, 1 << 1); | ||
1417 | |||
1418 | while (!(dispc_read_reg(DISPC_SYSSTATUS) & 1)) { | ||
1419 | if (!--tmo) { | ||
1420 | dev_err(dispc.fbdev->dev, "soft reset failed\n"); | ||
1421 | r = -ENODEV; | ||
1422 | enable_digit_clocks(0); | ||
1423 | goto fail1; | ||
1424 | } | ||
1425 | } | ||
1426 | |||
1427 | enable_digit_clocks(0); | ||
1428 | } | ||
1429 | |||
1430 | /* Enable smart standby/idle, autoidle and wakeup */ | ||
1431 | l = dispc_read_reg(DISPC_SYSCONFIG); | ||
1432 | l &= ~((3 << 12) | (3 << 3)); | ||
1433 | l |= (2 << 12) | (2 << 3) | (1 << 2) | (1 << 0); | ||
1434 | dispc_write_reg(DISPC_SYSCONFIG, l); | ||
1435 | omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG); | ||
1436 | |||
1437 | /* Set functional clock autogating */ | ||
1438 | l = dispc_read_reg(DISPC_CONFIG); | ||
1439 | l |= 1 << 9; | ||
1440 | dispc_write_reg(DISPC_CONFIG, l); | ||
1441 | |||
1442 | l = dispc_read_reg(DISPC_IRQSTATUS); | ||
1443 | dispc_write_reg(DISPC_IRQSTATUS, l); | ||
1444 | |||
1445 | recalc_irq_mask(); | ||
1446 | |||
1447 | if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler, | ||
1448 | 0, MODULE_NAME, fbdev)) < 0) { | ||
1449 | dev_err(dispc.fbdev->dev, "can't get DSS IRQ\n"); | ||
1450 | goto fail1; | ||
1451 | } | ||
1452 | |||
1453 | /* L3 firewall setting: enable access to OCM RAM */ | ||
1454 | ram_fw_base = ioremap(0x68005000, SZ_1K); | ||
1455 | if (!ram_fw_base) { | ||
1456 | dev_err(dispc.fbdev->dev, "Cannot ioremap to enable OCM RAM\n"); | ||
1457 | goto fail1; | ||
1458 | } | ||
1459 | __raw_writel(0x402000b0, ram_fw_base + 0xa0); | ||
1460 | iounmap(ram_fw_base); | ||
1461 | |||
1462 | if ((r = alloc_palette_ram()) < 0) | ||
1463 | goto fail2; | ||
1464 | |||
1465 | if ((r = setup_fbmem(req_vram)) < 0) | ||
1466 | goto fail3; | ||
1467 | |||
1468 | if (!skip_init) { | ||
1469 | for (i = 0; i < dispc.mem_desc.region_cnt; i++) { | ||
1470 | memset(dispc.mem_desc.region[i].vaddr, 0, | ||
1471 | dispc.mem_desc.region[i].size); | ||
1472 | } | ||
1473 | |||
1474 | /* Set logic clock to fck, pixel clock to fck/2 for now */ | ||
1475 | MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(16, 8), 1 << 16); | ||
1476 | MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(0, 8), 2 << 0); | ||
1477 | |||
1478 | setup_plane_fifo(0, ext_mode); | ||
1479 | setup_plane_fifo(1, ext_mode); | ||
1480 | setup_plane_fifo(2, ext_mode); | ||
1481 | |||
1482 | setup_color_conv_coef(); | ||
1483 | |||
1484 | set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT); | ||
1485 | set_load_mode(DISPC_LOAD_FRAME_ONLY); | ||
1486 | |||
1487 | if (!ext_mode) { | ||
1488 | set_lcd_data_lines(panel->data_lines); | ||
1489 | omap_dispc_set_lcd_size(panel->x_res, panel->y_res); | ||
1490 | set_lcd_timings(); | ||
1491 | } else | ||
1492 | set_lcd_data_lines(panel->bpp); | ||
1493 | enable_rfbi_mode(ext_mode); | ||
1494 | } | ||
1495 | |||
1496 | l = dispc_read_reg(DISPC_REVISION); | ||
1497 | pr_info("omapfb: DISPC version %d.%d initialized\n", | ||
1498 | l >> 4 & 0x0f, l & 0x0f); | ||
1499 | enable_lcd_clocks(0); | ||
1500 | |||
1501 | return 0; | ||
1502 | fail3: | ||
1503 | free_palette_ram(); | ||
1504 | fail2: | ||
1505 | free_irq(INT_24XX_DSS_IRQ, fbdev); | ||
1506 | fail1: | ||
1507 | enable_lcd_clocks(0); | ||
1508 | put_dss_clocks(); | ||
1509 | fail0: | ||
1510 | iounmap(dispc.base); | ||
1511 | return r; | ||
1512 | } | ||
1513 | |||
1514 | static void omap_dispc_cleanup(void) | ||
1515 | { | ||
1516 | int i; | ||
1517 | |||
1518 | omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED); | ||
1519 | /* This will also disable clocks that are on */ | ||
1520 | for (i = 0; i < dispc.mem_desc.region_cnt; i++) | ||
1521 | omap_dispc_enable_plane(i, 0); | ||
1522 | cleanup_fbmem(); | ||
1523 | free_palette_ram(); | ||
1524 | free_irq(INT_24XX_DSS_IRQ, dispc.fbdev); | ||
1525 | put_dss_clocks(); | ||
1526 | iounmap(dispc.base); | ||
1527 | } | ||
1528 | |||
1529 | const struct lcd_ctrl omap2_int_ctrl = { | ||
1530 | .name = "internal", | ||
1531 | .init = omap_dispc_init, | ||
1532 | .cleanup = omap_dispc_cleanup, | ||
1533 | .get_caps = omap_dispc_get_caps, | ||
1534 | .set_update_mode = omap_dispc_set_update_mode, | ||
1535 | .get_update_mode = omap_dispc_get_update_mode, | ||
1536 | .update_window = omap_dispc_update_window, | ||
1537 | .suspend = omap_dispc_suspend, | ||
1538 | .resume = omap_dispc_resume, | ||
1539 | .setup_plane = omap_dispc_setup_plane, | ||
1540 | .setup_mem = omap_dispc_setup_mem, | ||
1541 | .set_scale = omap_dispc_set_scale, | ||
1542 | .enable_plane = omap_dispc_enable_plane, | ||
1543 | .set_color_key = omap_dispc_set_color_key, | ||
1544 | .get_color_key = omap_dispc_get_color_key, | ||
1545 | .mmap = omap_dispc_mmap_user, | ||
1546 | }; | ||
diff --git a/drivers/video/omap/dispc.h b/drivers/video/omap/dispc.h new file mode 100644 index 00000000000..c15ea77f060 --- /dev/null +++ b/drivers/video/omap/dispc.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef _DISPC_H | ||
2 | #define _DISPC_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | #define DISPC_PLANE_GFX 0 | ||
7 | #define DISPC_PLANE_VID1 1 | ||
8 | #define DISPC_PLANE_VID2 2 | ||
9 | |||
10 | #define DISPC_RGB_1_BPP 0x00 | ||
11 | #define DISPC_RGB_2_BPP 0x01 | ||
12 | #define DISPC_RGB_4_BPP 0x02 | ||
13 | #define DISPC_RGB_8_BPP 0x03 | ||
14 | #define DISPC_RGB_12_BPP 0x04 | ||
15 | #define DISPC_RGB_16_BPP 0x06 | ||
16 | #define DISPC_RGB_24_BPP 0x08 | ||
17 | #define DISPC_RGB_24_BPP_UNPACK_32 0x09 | ||
18 | #define DISPC_YUV2_422 0x0a | ||
19 | #define DISPC_UYVY_422 0x0b | ||
20 | |||
21 | #define DISPC_BURST_4x32 0 | ||
22 | #define DISPC_BURST_8x32 1 | ||
23 | #define DISPC_BURST_16x32 2 | ||
24 | |||
25 | #define DISPC_LOAD_CLUT_AND_FRAME 0x00 | ||
26 | #define DISPC_LOAD_CLUT_ONLY 0x01 | ||
27 | #define DISPC_LOAD_FRAME_ONLY 0x02 | ||
28 | #define DISPC_LOAD_CLUT_ONCE_FRAME 0x03 | ||
29 | |||
30 | #define DISPC_TFT_DATA_LINES_12 0 | ||
31 | #define DISPC_TFT_DATA_LINES_16 1 | ||
32 | #define DISPC_TFT_DATA_LINES_18 2 | ||
33 | #define DISPC_TFT_DATA_LINES_24 3 | ||
34 | |||
35 | extern void omap_dispc_set_lcd_size(int width, int height); | ||
36 | |||
37 | extern void omap_dispc_enable_lcd_out(int enable); | ||
38 | extern void omap_dispc_enable_digit_out(int enable); | ||
39 | |||
40 | extern int omap_dispc_request_irq(unsigned long irq_mask, | ||
41 | void (*callback)(void *data), void *data); | ||
42 | extern void omap_dispc_free_irq(unsigned long irq_mask, | ||
43 | void (*callback)(void *data), void *data); | ||
44 | |||
45 | extern const struct lcd_ctrl omap2_int_ctrl; | ||
46 | #endif | ||
diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c new file mode 100644 index 00000000000..e3eccc9af78 --- /dev/null +++ b/drivers/video/omap/lcd_2430sdp.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * LCD panel support for the TI 2430SDP board | ||
3 | * | ||
4 | * Copyright (C) 2007 MontaVista | ||
5 | * Author: Hunyue Yau <hyau@mvista.com> | ||
6 | * | ||
7 | * Derived from drivers/video/omap/lcd-apollon.c | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/gpio.h> | ||
28 | #include <linux/i2c/twl.h> | ||
29 | |||
30 | #include <plat/mux.h> | ||
31 | #include <asm/mach-types.h> | ||
32 | |||
33 | #include "omapfb.h" | ||
34 | |||
35 | #define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91 | ||
36 | #define SDP2430_LCD_PANEL_ENABLE_GPIO 154 | ||
37 | #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24 | ||
38 | #define SDP3430_LCD_PANEL_ENABLE_GPIO 28 | ||
39 | |||
40 | static unsigned backlight_gpio; | ||
41 | static unsigned enable_gpio; | ||
42 | |||
43 | #define LCD_PIXCLOCK_MAX 5400 /* freq 5.4 MHz */ | ||
44 | #define PM_RECEIVER TWL4030_MODULE_PM_RECEIVER | ||
45 | #define ENABLE_VAUX2_DEDICATED 0x09 | ||
46 | #define ENABLE_VAUX2_DEV_GRP 0x20 | ||
47 | #define ENABLE_VAUX3_DEDICATED 0x03 | ||
48 | #define ENABLE_VAUX3_DEV_GRP 0x20 | ||
49 | |||
50 | #define ENABLE_VPLL2_DEDICATED 0x05 | ||
51 | #define ENABLE_VPLL2_DEV_GRP 0xE0 | ||
52 | #define TWL4030_VPLL2_DEV_GRP 0x33 | ||
53 | #define TWL4030_VPLL2_DEDICATED 0x36 | ||
54 | |||
55 | #define t2_out(c, r, v) twl_i2c_write_u8(c, r, v) | ||
56 | |||
57 | |||
58 | static int sdp2430_panel_init(struct lcd_panel *panel, | ||
59 | struct omapfb_device *fbdev) | ||
60 | { | ||
61 | if (machine_is_omap_3430sdp()) { | ||
62 | enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO; | ||
63 | backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO; | ||
64 | } else { | ||
65 | enable_gpio = SDP2430_LCD_PANEL_ENABLE_GPIO; | ||
66 | backlight_gpio = SDP2430_LCD_PANEL_BACKLIGHT_GPIO; | ||
67 | } | ||
68 | |||
69 | gpio_request(enable_gpio, "LCD enable"); /* LCD panel */ | ||
70 | gpio_request(backlight_gpio, "LCD bl"); /* LCD backlight */ | ||
71 | gpio_direction_output(enable_gpio, 0); | ||
72 | gpio_direction_output(backlight_gpio, 0); | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void sdp2430_panel_cleanup(struct lcd_panel *panel) | ||
78 | { | ||
79 | gpio_free(backlight_gpio); | ||
80 | gpio_free(enable_gpio); | ||
81 | } | ||
82 | |||
83 | static int sdp2430_panel_enable(struct lcd_panel *panel) | ||
84 | { | ||
85 | u8 ded_val, ded_reg; | ||
86 | u8 grp_val, grp_reg; | ||
87 | |||
88 | if (machine_is_omap_3430sdp()) { | ||
89 | ded_reg = TWL4030_VAUX3_DEDICATED; | ||
90 | ded_val = ENABLE_VAUX3_DEDICATED; | ||
91 | grp_reg = TWL4030_VAUX3_DEV_GRP; | ||
92 | grp_val = ENABLE_VAUX3_DEV_GRP; | ||
93 | |||
94 | if (omap_rev() > OMAP3430_REV_ES1_0) { | ||
95 | t2_out(PM_RECEIVER, ENABLE_VPLL2_DEDICATED, | ||
96 | TWL4030_VPLL2_DEDICATED); | ||
97 | t2_out(PM_RECEIVER, ENABLE_VPLL2_DEV_GRP, | ||
98 | TWL4030_VPLL2_DEV_GRP); | ||
99 | } | ||
100 | } else { | ||
101 | ded_reg = TWL4030_VAUX2_DEDICATED; | ||
102 | ded_val = ENABLE_VAUX2_DEDICATED; | ||
103 | grp_reg = TWL4030_VAUX2_DEV_GRP; | ||
104 | grp_val = ENABLE_VAUX2_DEV_GRP; | ||
105 | } | ||
106 | |||
107 | gpio_set_value(enable_gpio, 1); | ||
108 | gpio_set_value(backlight_gpio, 1); | ||
109 | |||
110 | if (0 != t2_out(PM_RECEIVER, ded_val, ded_reg)) | ||
111 | return -EIO; | ||
112 | if (0 != t2_out(PM_RECEIVER, grp_val, grp_reg)) | ||
113 | return -EIO; | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static void sdp2430_panel_disable(struct lcd_panel *panel) | ||
119 | { | ||
120 | gpio_set_value(enable_gpio, 0); | ||
121 | gpio_set_value(backlight_gpio, 0); | ||
122 | if (omap_rev() > OMAP3430_REV_ES1_0) { | ||
123 | t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEDICATED); | ||
124 | t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEV_GRP); | ||
125 | msleep(4); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static unsigned long sdp2430_panel_get_caps(struct lcd_panel *panel) | ||
130 | { | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | struct lcd_panel sdp2430_panel = { | ||
135 | .name = "sdp2430", | ||
136 | .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | | ||
137 | OMAP_LCDC_INV_HSYNC, | ||
138 | |||
139 | .bpp = 16, | ||
140 | .data_lines = 16, | ||
141 | .x_res = 240, | ||
142 | .y_res = 320, | ||
143 | .hsw = 3, /* hsync_len (4) - 1 */ | ||
144 | .hfp = 3, /* right_margin (4) - 1 */ | ||
145 | .hbp = 39, /* left_margin (40) - 1 */ | ||
146 | .vsw = 1, /* vsync_len (2) - 1 */ | ||
147 | .vfp = 2, /* lower_margin */ | ||
148 | .vbp = 7, /* upper_margin (8) - 1 */ | ||
149 | |||
150 | .pixel_clock = LCD_PIXCLOCK_MAX, | ||
151 | |||
152 | .init = sdp2430_panel_init, | ||
153 | .cleanup = sdp2430_panel_cleanup, | ||
154 | .enable = sdp2430_panel_enable, | ||
155 | .disable = sdp2430_panel_disable, | ||
156 | .get_caps = sdp2430_panel_get_caps, | ||
157 | }; | ||
158 | |||
159 | static int sdp2430_panel_probe(struct platform_device *pdev) | ||
160 | { | ||
161 | omapfb_register_panel(&sdp2430_panel); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int sdp2430_panel_remove(struct platform_device *pdev) | ||
166 | { | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static int sdp2430_panel_suspend(struct platform_device *pdev, | ||
171 | pm_message_t mesg) | ||
172 | { | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int sdp2430_panel_resume(struct platform_device *pdev) | ||
177 | { | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | struct platform_driver sdp2430_panel_driver = { | ||
182 | .probe = sdp2430_panel_probe, | ||
183 | .remove = sdp2430_panel_remove, | ||
184 | .suspend = sdp2430_panel_suspend, | ||
185 | .resume = sdp2430_panel_resume, | ||
186 | .driver = { | ||
187 | .name = "sdp2430_lcd", | ||
188 | .owner = THIS_MODULE, | ||
189 | }, | ||
190 | }; | ||
191 | |||
192 | static int __init sdp2430_panel_drv_init(void) | ||
193 | { | ||
194 | return platform_driver_register(&sdp2430_panel_driver); | ||
195 | } | ||
196 | |||
197 | static void __exit sdp2430_panel_drv_exit(void) | ||
198 | { | ||
199 | platform_driver_unregister(&sdp2430_panel_driver); | ||
200 | } | ||
201 | |||
202 | module_init(sdp2430_panel_drv_init); | ||
203 | module_exit(sdp2430_panel_drv_exit); | ||
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c new file mode 100644 index 00000000000..10459d8bd9a --- /dev/null +++ b/drivers/video/omap/lcd_apollon.c | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * LCD panel support for the Samsung OMAP2 Apollon board | ||
3 | * | ||
4 | * Copyright (C) 2005,2006 Samsung Electronics | ||
5 | * Author: Kyungmin Park <kyungmin.park@samsung.com> | ||
6 | * | ||
7 | * Derived from drivers/video/omap/lcd-h4.c | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | |||
27 | #include <mach/gpio.h> | ||
28 | |||
29 | #include "omapfb.h" | ||
30 | |||
31 | /* #define USE_35INCH_LCD 1 */ | ||
32 | |||
33 | static int apollon_panel_init(struct lcd_panel *panel, | ||
34 | struct omapfb_device *fbdev) | ||
35 | { | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | static void apollon_panel_cleanup(struct lcd_panel *panel) | ||
40 | { | ||
41 | } | ||
42 | |||
43 | static int apollon_panel_enable(struct lcd_panel *panel) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static void apollon_panel_disable(struct lcd_panel *panel) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | static unsigned long apollon_panel_get_caps(struct lcd_panel *panel) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | struct lcd_panel apollon_panel = { | ||
58 | .name = "apollon", | ||
59 | .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | | ||
60 | OMAP_LCDC_INV_HSYNC, | ||
61 | |||
62 | .bpp = 16, | ||
63 | .data_lines = 18, | ||
64 | #ifdef USE_35INCH_LCD | ||
65 | .x_res = 240, | ||
66 | .y_res = 320, | ||
67 | .hsw = 2, | ||
68 | .hfp = 3, | ||
69 | .hbp = 9, | ||
70 | .vsw = 4, | ||
71 | .vfp = 3, | ||
72 | .vbp = 5, | ||
73 | #else | ||
74 | .x_res = 480, | ||
75 | .y_res = 272, | ||
76 | .hsw = 41, | ||
77 | .hfp = 2, | ||
78 | .hbp = 2, | ||
79 | .vsw = 10, | ||
80 | .vfp = 2, | ||
81 | .vbp = 2, | ||
82 | #endif | ||
83 | .pixel_clock = 6250, | ||
84 | |||
85 | .init = apollon_panel_init, | ||
86 | .cleanup = apollon_panel_cleanup, | ||
87 | .enable = apollon_panel_enable, | ||
88 | .disable = apollon_panel_disable, | ||
89 | .get_caps = apollon_panel_get_caps, | ||
90 | }; | ||
91 | |||
92 | static int apollon_panel_probe(struct platform_device *pdev) | ||
93 | { | ||
94 | omapfb_register_panel(&apollon_panel); | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static int apollon_panel_remove(struct platform_device *pdev) | ||
99 | { | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int apollon_panel_suspend(struct platform_device *pdev, | ||
104 | pm_message_t mesg) | ||
105 | { | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static int apollon_panel_resume(struct platform_device *pdev) | ||
110 | { | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | struct platform_driver apollon_panel_driver = { | ||
115 | .probe = apollon_panel_probe, | ||
116 | .remove = apollon_panel_remove, | ||
117 | .suspend = apollon_panel_suspend, | ||
118 | .resume = apollon_panel_resume, | ||
119 | .driver = { | ||
120 | .name = "apollon_lcd", | ||
121 | .owner = THIS_MODULE, | ||
122 | }, | ||
123 | }; | ||
124 | |||
125 | static int __init apollon_panel_drv_init(void) | ||
126 | { | ||
127 | return platform_driver_register(&apollon_panel_driver); | ||
128 | } | ||
129 | |||
130 | static void __exit apollon_panel_drv_exit(void) | ||
131 | { | ||
132 | platform_driver_unregister(&apollon_panel_driver); | ||
133 | } | ||
134 | |||
135 | module_init(apollon_panel_drv_init); | ||
136 | module_exit(apollon_panel_drv_exit); | ||
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c new file mode 100644 index 00000000000..03a06a98275 --- /dev/null +++ b/drivers/video/omap/lcd_h4.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * LCD panel support for the TI OMAP H4 board | ||
3 | * | ||
4 | * Copyright (C) 2004 Nokia Corporation | ||
5 | * Author: Imre Deak <imre.deak@nokia.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | |||
25 | #include "omapfb.h" | ||
26 | |||
27 | static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) | ||
28 | { | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | static void h4_panel_cleanup(struct lcd_panel *panel) | ||
33 | { | ||
34 | } | ||
35 | |||
36 | static int h4_panel_enable(struct lcd_panel *panel) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void h4_panel_disable(struct lcd_panel *panel) | ||
42 | { | ||
43 | } | ||
44 | |||
45 | static unsigned long h4_panel_get_caps(struct lcd_panel *panel) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static struct lcd_panel h4_panel = { | ||
51 | .name = "h4", | ||
52 | .config = OMAP_LCDC_PANEL_TFT, | ||
53 | |||
54 | .bpp = 16, | ||
55 | .data_lines = 16, | ||
56 | .x_res = 240, | ||
57 | .y_res = 320, | ||
58 | .pixel_clock = 6250, | ||
59 | .hsw = 15, | ||
60 | .hfp = 15, | ||
61 | .hbp = 60, | ||
62 | .vsw = 1, | ||
63 | .vfp = 1, | ||
64 | .vbp = 1, | ||
65 | |||
66 | .init = h4_panel_init, | ||
67 | .cleanup = h4_panel_cleanup, | ||
68 | .enable = h4_panel_enable, | ||
69 | .disable = h4_panel_disable, | ||
70 | .get_caps = h4_panel_get_caps, | ||
71 | }; | ||
72 | |||
73 | static int h4_panel_probe(struct platform_device *pdev) | ||
74 | { | ||
75 | omapfb_register_panel(&h4_panel); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static int h4_panel_remove(struct platform_device *pdev) | ||
80 | { | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int h4_panel_suspend(struct platform_device *pdev, pm_message_t mesg) | ||
85 | { | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int h4_panel_resume(struct platform_device *pdev) | ||
90 | { | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static struct platform_driver h4_panel_driver = { | ||
95 | .probe = h4_panel_probe, | ||
96 | .remove = h4_panel_remove, | ||
97 | .suspend = h4_panel_suspend, | ||
98 | .resume = h4_panel_resume, | ||
99 | .driver = { | ||
100 | .name = "lcd_h4", | ||
101 | .owner = THIS_MODULE, | ||
102 | }, | ||
103 | }; | ||
104 | |||
105 | static int __init h4_panel_drv_init(void) | ||
106 | { | ||
107 | return platform_driver_register(&h4_panel_driver); | ||
108 | } | ||
109 | |||
110 | static void __exit h4_panel_drv_cleanup(void) | ||
111 | { | ||
112 | platform_driver_unregister(&h4_panel_driver); | ||
113 | } | ||
114 | |||
115 | module_init(h4_panel_drv_init); | ||
116 | module_exit(h4_panel_drv_cleanup); | ||
117 | |||
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c new file mode 100644 index 00000000000..0f5952cae85 --- /dev/null +++ b/drivers/video/omap/lcd_ldp.c | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * LCD panel support for the TI LDP board | ||
3 | * | ||
4 | * Copyright (C) 2007 WindRiver | ||
5 | * Author: Stanley Miao <stanley.miao@windriver.com> | ||
6 | * | ||
7 | * Derived from drivers/video/omap/lcd-2430sdp.c | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/i2c/twl.h> | ||
28 | |||
29 | #include <mach/gpio.h> | ||
30 | #include <plat/mux.h> | ||
31 | #include <asm/mach-types.h> | ||
32 | |||
33 | #include "omapfb.h" | ||
34 | |||
35 | #define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES) | ||
36 | #define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES) | ||
37 | |||
38 | #define LCD_PANEL_RESET_GPIO 55 | ||
39 | #define LCD_PANEL_QVGA_GPIO 56 | ||
40 | |||
41 | #ifdef CONFIG_FB_OMAP_LCD_VGA | ||
42 | #define LCD_XRES 480 | ||
43 | #define LCD_YRES 640 | ||
44 | #define LCD_PIXCLOCK_MAX 41700 | ||
45 | #else | ||
46 | #define LCD_XRES 240 | ||
47 | #define LCD_YRES 320 | ||
48 | #define LCD_PIXCLOCK_MAX 185186 | ||
49 | #endif | ||
50 | |||
51 | #define PM_RECEIVER TWL4030_MODULE_PM_RECEIVER | ||
52 | #define ENABLE_VAUX2_DEDICATED 0x09 | ||
53 | #define ENABLE_VAUX2_DEV_GRP 0x20 | ||
54 | #define ENABLE_VAUX3_DEDICATED 0x03 | ||
55 | #define ENABLE_VAUX3_DEV_GRP 0x20 | ||
56 | |||
57 | #define ENABLE_VPLL2_DEDICATED 0x05 | ||
58 | #define ENABLE_VPLL2_DEV_GRP 0xE0 | ||
59 | #define TWL4030_VPLL2_DEV_GRP 0x33 | ||
60 | #define TWL4030_VPLL2_DEDICATED 0x36 | ||
61 | |||
62 | #define t2_out(c, r, v) twl_i2c_write_u8(c, r, v) | ||
63 | |||
64 | |||
65 | static int ldp_panel_init(struct lcd_panel *panel, | ||
66 | struct omapfb_device *fbdev) | ||
67 | { | ||
68 | gpio_request(LCD_PANEL_RESET_GPIO, "lcd reset"); | ||
69 | gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga"); | ||
70 | gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd panel"); | ||
71 | gpio_request(LCD_PANEL_BACKLIGHT_GPIO, "lcd backlight"); | ||
72 | |||
73 | gpio_direction_output(LCD_PANEL_QVGA_GPIO, 0); | ||
74 | gpio_direction_output(LCD_PANEL_RESET_GPIO, 0); | ||
75 | gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0); | ||
76 | gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 0); | ||
77 | |||
78 | #ifdef CONFIG_FB_OMAP_LCD_VGA | ||
79 | gpio_set_value(LCD_PANEL_QVGA_GPIO, 0); | ||
80 | #else | ||
81 | gpio_set_value(LCD_PANEL_QVGA_GPIO, 1); | ||
82 | #endif | ||
83 | gpio_set_value(LCD_PANEL_RESET_GPIO, 1); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static void ldp_panel_cleanup(struct lcd_panel *panel) | ||
89 | { | ||
90 | gpio_free(LCD_PANEL_BACKLIGHT_GPIO); | ||
91 | gpio_free(LCD_PANEL_ENABLE_GPIO); | ||
92 | gpio_free(LCD_PANEL_QVGA_GPIO); | ||
93 | gpio_free(LCD_PANEL_RESET_GPIO); | ||
94 | } | ||
95 | |||
96 | static int ldp_panel_enable(struct lcd_panel *panel) | ||
97 | { | ||
98 | if (0 != t2_out(PM_RECEIVER, ENABLE_VPLL2_DEDICATED, | ||
99 | TWL4030_VPLL2_DEDICATED)) | ||
100 | return -EIO; | ||
101 | if (0 != t2_out(PM_RECEIVER, ENABLE_VPLL2_DEV_GRP, | ||
102 | TWL4030_VPLL2_DEV_GRP)) | ||
103 | return -EIO; | ||
104 | |||
105 | gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 1); | ||
106 | gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 1); | ||
107 | |||
108 | if (0 != t2_out(PM_RECEIVER, ENABLE_VAUX3_DEDICATED, | ||
109 | TWL4030_VAUX3_DEDICATED)) | ||
110 | return -EIO; | ||
111 | if (0 != t2_out(PM_RECEIVER, ENABLE_VAUX3_DEV_GRP, | ||
112 | TWL4030_VAUX3_DEV_GRP)) | ||
113 | return -EIO; | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static void ldp_panel_disable(struct lcd_panel *panel) | ||
119 | { | ||
120 | gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0); | ||
121 | gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 0); | ||
122 | |||
123 | t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEDICATED); | ||
124 | t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEV_GRP); | ||
125 | msleep(4); | ||
126 | } | ||
127 | |||
128 | static unsigned long ldp_panel_get_caps(struct lcd_panel *panel) | ||
129 | { | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | struct lcd_panel ldp_panel = { | ||
134 | .name = "ldp", | ||
135 | .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | | ||
136 | OMAP_LCDC_INV_HSYNC, | ||
137 | |||
138 | .bpp = 16, | ||
139 | .data_lines = 18, | ||
140 | .x_res = LCD_XRES, | ||
141 | .y_res = LCD_YRES, | ||
142 | .hsw = 3, /* hsync_len (4) - 1 */ | ||
143 | .hfp = 3, /* right_margin (4) - 1 */ | ||
144 | .hbp = 39, /* left_margin (40) - 1 */ | ||
145 | .vsw = 1, /* vsync_len (2) - 1 */ | ||
146 | .vfp = 2, /* lower_margin */ | ||
147 | .vbp = 7, /* upper_margin (8) - 1 */ | ||
148 | |||
149 | .pixel_clock = LCD_PIXCLOCK_MAX, | ||
150 | |||
151 | .init = ldp_panel_init, | ||
152 | .cleanup = ldp_panel_cleanup, | ||
153 | .enable = ldp_panel_enable, | ||
154 | .disable = ldp_panel_disable, | ||
155 | .get_caps = ldp_panel_get_caps, | ||
156 | }; | ||
157 | |||
158 | static int ldp_panel_probe(struct platform_device *pdev) | ||
159 | { | ||
160 | omapfb_register_panel(&ldp_panel); | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int ldp_panel_remove(struct platform_device *pdev) | ||
165 | { | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int ldp_panel_suspend(struct platform_device *pdev, pm_message_t mesg) | ||
170 | { | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int ldp_panel_resume(struct platform_device *pdev) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | struct platform_driver ldp_panel_driver = { | ||
180 | .probe = ldp_panel_probe, | ||
181 | .remove = ldp_panel_remove, | ||
182 | .suspend = ldp_panel_suspend, | ||
183 | .resume = ldp_panel_resume, | ||
184 | .driver = { | ||
185 | .name = "ldp_lcd", | ||
186 | .owner = THIS_MODULE, | ||
187 | }, | ||
188 | }; | ||
189 | |||
190 | static int __init ldp_panel_drv_init(void) | ||
191 | { | ||
192 | return platform_driver_register(&ldp_panel_driver); | ||
193 | } | ||
194 | |||
195 | static void __exit ldp_panel_drv_exit(void) | ||
196 | { | ||
197 | platform_driver_unregister(&ldp_panel_driver); | ||
198 | } | ||
199 | |||
200 | module_init(ldp_panel_drv_init); | ||
201 | module_exit(ldp_panel_drv_exit); | ||
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c new file mode 100644 index 00000000000..d7c6c3e0afc --- /dev/null +++ b/drivers/video/omap/lcd_omap3beagle.c | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * LCD panel support for the TI OMAP3 Beagle board | ||
3 | * | ||
4 | * Author: Koen Kooi <koen@openembedded.org> | ||
5 | * | ||
6 | * Derived from drivers/video/omap/lcd-omap3evm.c | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/gpio.h> | ||
26 | #include <linux/i2c/twl.h> | ||
27 | |||
28 | #include <asm/mach-types.h> | ||
29 | |||
30 | #include "omapfb.h" | ||
31 | |||
32 | #define LCD_PANEL_ENABLE_GPIO 170 | ||
33 | |||
34 | static int omap3beagle_panel_init(struct lcd_panel *panel, | ||
35 | struct omapfb_device *fbdev) | ||
36 | { | ||
37 | gpio_request(LCD_PANEL_ENABLE_GPIO, "LCD enable"); | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void omap3beagle_panel_cleanup(struct lcd_panel *panel) | ||
42 | { | ||
43 | gpio_free(LCD_PANEL_ENABLE_GPIO); | ||
44 | } | ||
45 | |||
46 | static int omap3beagle_panel_enable(struct lcd_panel *panel) | ||
47 | { | ||
48 | gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static void omap3beagle_panel_disable(struct lcd_panel *panel) | ||
53 | { | ||
54 | gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0); | ||
55 | } | ||
56 | |||
57 | static unsigned long omap3beagle_panel_get_caps(struct lcd_panel *panel) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | struct lcd_panel omap3beagle_panel = { | ||
63 | .name = "omap3beagle", | ||
64 | .config = OMAP_LCDC_PANEL_TFT, | ||
65 | |||
66 | .bpp = 16, | ||
67 | .data_lines = 24, | ||
68 | .x_res = 1024, | ||
69 | .y_res = 768, | ||
70 | .hsw = 3, /* hsync_len (4) - 1 */ | ||
71 | .hfp = 3, /* right_margin (4) - 1 */ | ||
72 | .hbp = 39, /* left_margin (40) - 1 */ | ||
73 | .vsw = 1, /* vsync_len (2) - 1 */ | ||
74 | .vfp = 2, /* lower_margin */ | ||
75 | .vbp = 7, /* upper_margin (8) - 1 */ | ||
76 | |||
77 | .pixel_clock = 64000, | ||
78 | |||
79 | .init = omap3beagle_panel_init, | ||
80 | .cleanup = omap3beagle_panel_cleanup, | ||
81 | .enable = omap3beagle_panel_enable, | ||
82 | .disable = omap3beagle_panel_disable, | ||
83 | .get_caps = omap3beagle_panel_get_caps, | ||
84 | }; | ||
85 | |||
86 | static int omap3beagle_panel_probe(struct platform_device *pdev) | ||
87 | { | ||
88 | omapfb_register_panel(&omap3beagle_panel); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int omap3beagle_panel_remove(struct platform_device *pdev) | ||
93 | { | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int omap3beagle_panel_suspend(struct platform_device *pdev, | ||
98 | pm_message_t mesg) | ||
99 | { | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int omap3beagle_panel_resume(struct platform_device *pdev) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | struct platform_driver omap3beagle_panel_driver = { | ||
109 | .probe = omap3beagle_panel_probe, | ||
110 | .remove = omap3beagle_panel_remove, | ||
111 | .suspend = omap3beagle_panel_suspend, | ||
112 | .resume = omap3beagle_panel_resume, | ||
113 | .driver = { | ||
114 | .name = "omap3beagle_lcd", | ||
115 | .owner = THIS_MODULE, | ||
116 | }, | ||
117 | }; | ||
118 | |||
119 | static int __init omap3beagle_panel_drv_init(void) | ||
120 | { | ||
121 | return platform_driver_register(&omap3beagle_panel_driver); | ||
122 | } | ||
123 | |||
124 | static void __exit omap3beagle_panel_drv_exit(void) | ||
125 | { | ||
126 | platform_driver_unregister(&omap3beagle_panel_driver); | ||
127 | } | ||
128 | |||
129 | module_init(omap3beagle_panel_drv_init); | ||
130 | module_exit(omap3beagle_panel_drv_exit); | ||
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c new file mode 100644 index 00000000000..06840da0b09 --- /dev/null +++ b/drivers/video/omap/lcd_omap3evm.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * LCD panel support for the TI OMAP3 EVM board | ||
3 | * | ||
4 | * Author: Steve Sakoman <steve@sakoman.com> | ||
5 | * | ||
6 | * Derived from drivers/video/omap/lcd-apollon.c | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/gpio.h> | ||
26 | #include <linux/i2c/twl.h> | ||
27 | |||
28 | #include <plat/mux.h> | ||
29 | #include <asm/mach-types.h> | ||
30 | |||
31 | #include "omapfb.h" | ||
32 | |||
33 | #define LCD_PANEL_ENABLE_GPIO 153 | ||
34 | #define LCD_PANEL_LR 2 | ||
35 | #define LCD_PANEL_UD 3 | ||
36 | #define LCD_PANEL_INI 152 | ||
37 | #define LCD_PANEL_QVGA 154 | ||
38 | #define LCD_PANEL_RESB 155 | ||
39 | |||
40 | #define ENABLE_VDAC_DEDICATED 0x03 | ||
41 | #define ENABLE_VDAC_DEV_GRP 0x20 | ||
42 | #define ENABLE_VPLL2_DEDICATED 0x05 | ||
43 | #define ENABLE_VPLL2_DEV_GRP 0xE0 | ||
44 | |||
45 | #define TWL_LED_LEDEN 0x00 | ||
46 | #define TWL_PWMA_PWMAON 0x00 | ||
47 | #define TWL_PWMA_PWMAOFF 0x01 | ||
48 | |||
49 | static unsigned int bklight_level; | ||
50 | |||
51 | static int omap3evm_panel_init(struct lcd_panel *panel, | ||
52 | struct omapfb_device *fbdev) | ||
53 | { | ||
54 | gpio_request(LCD_PANEL_LR, "LCD lr"); | ||
55 | gpio_request(LCD_PANEL_UD, "LCD ud"); | ||
56 | gpio_request(LCD_PANEL_INI, "LCD ini"); | ||
57 | gpio_request(LCD_PANEL_RESB, "LCD resb"); | ||
58 | gpio_request(LCD_PANEL_QVGA, "LCD qvga"); | ||
59 | |||
60 | gpio_direction_output(LCD_PANEL_RESB, 1); | ||
61 | gpio_direction_output(LCD_PANEL_INI, 1); | ||
62 | gpio_direction_output(LCD_PANEL_QVGA, 0); | ||
63 | gpio_direction_output(LCD_PANEL_LR, 1); | ||
64 | gpio_direction_output(LCD_PANEL_UD, 1); | ||
65 | |||
66 | twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN); | ||
67 | twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON); | ||
68 | twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF); | ||
69 | bklight_level = 100; | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static void omap3evm_panel_cleanup(struct lcd_panel *panel) | ||
75 | { | ||
76 | gpio_free(LCD_PANEL_QVGA); | ||
77 | gpio_free(LCD_PANEL_RESB); | ||
78 | gpio_free(LCD_PANEL_INI); | ||
79 | gpio_free(LCD_PANEL_UD); | ||
80 | gpio_free(LCD_PANEL_LR); | ||
81 | } | ||
82 | |||
83 | static int omap3evm_panel_enable(struct lcd_panel *panel) | ||
84 | { | ||
85 | gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static void omap3evm_panel_disable(struct lcd_panel *panel) | ||
90 | { | ||
91 | gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1); | ||
92 | } | ||
93 | |||
94 | static unsigned long omap3evm_panel_get_caps(struct lcd_panel *panel) | ||
95 | { | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static int omap3evm_bklight_setlevel(struct lcd_panel *panel, | ||
100 | unsigned int level) | ||
101 | { | ||
102 | u8 c; | ||
103 | if ((level >= 0) && (level <= 100)) { | ||
104 | c = (125 * (100 - level)) / 100 + 2; | ||
105 | twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF); | ||
106 | bklight_level = level; | ||
107 | } | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static unsigned int omap3evm_bklight_getlevel(struct lcd_panel *panel) | ||
112 | { | ||
113 | return bklight_level; | ||
114 | } | ||
115 | |||
116 | static unsigned int omap3evm_bklight_getmaxlevel(struct lcd_panel *panel) | ||
117 | { | ||
118 | return 100; | ||
119 | } | ||
120 | |||
121 | struct lcd_panel omap3evm_panel = { | ||
122 | .name = "omap3evm", | ||
123 | .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | | ||
124 | OMAP_LCDC_INV_HSYNC, | ||
125 | |||
126 | .bpp = 16, | ||
127 | .data_lines = 18, | ||
128 | .x_res = 480, | ||
129 | .y_res = 640, | ||
130 | .hsw = 3, /* hsync_len (4) - 1 */ | ||
131 | .hfp = 3, /* right_margin (4) - 1 */ | ||
132 | .hbp = 39, /* left_margin (40) - 1 */ | ||
133 | .vsw = 1, /* vsync_len (2) - 1 */ | ||
134 | .vfp = 2, /* lower_margin */ | ||
135 | .vbp = 7, /* upper_margin (8) - 1 */ | ||
136 | |||
137 | .pixel_clock = 26000, | ||
138 | |||
139 | .init = omap3evm_panel_init, | ||
140 | .cleanup = omap3evm_panel_cleanup, | ||
141 | .enable = omap3evm_panel_enable, | ||
142 | .disable = omap3evm_panel_disable, | ||
143 | .get_caps = omap3evm_panel_get_caps, | ||
144 | .set_bklight_level = omap3evm_bklight_setlevel, | ||
145 | .get_bklight_level = omap3evm_bklight_getlevel, | ||
146 | .get_bklight_max = omap3evm_bklight_getmaxlevel, | ||
147 | }; | ||
148 | |||
149 | static int omap3evm_panel_probe(struct platform_device *pdev) | ||
150 | { | ||
151 | omapfb_register_panel(&omap3evm_panel); | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static int omap3evm_panel_remove(struct platform_device *pdev) | ||
156 | { | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static int omap3evm_panel_suspend(struct platform_device *pdev, | ||
161 | pm_message_t mesg) | ||
162 | { | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static int omap3evm_panel_resume(struct platform_device *pdev) | ||
167 | { | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | struct platform_driver omap3evm_panel_driver = { | ||
172 | .probe = omap3evm_panel_probe, | ||
173 | .remove = omap3evm_panel_remove, | ||
174 | .suspend = omap3evm_panel_suspend, | ||
175 | .resume = omap3evm_panel_resume, | ||
176 | .driver = { | ||
177 | .name = "omap3evm_lcd", | ||
178 | .owner = THIS_MODULE, | ||
179 | }, | ||
180 | }; | ||
181 | |||
182 | static int __init omap3evm_panel_drv_init(void) | ||
183 | { | ||
184 | return platform_driver_register(&omap3evm_panel_driver); | ||
185 | } | ||
186 | |||
187 | static void __exit omap3evm_panel_drv_exit(void) | ||
188 | { | ||
189 | platform_driver_unregister(&omap3evm_panel_driver); | ||
190 | } | ||
191 | |||
192 | module_init(omap3evm_panel_drv_init); | ||
193 | module_exit(omap3evm_panel_drv_exit); | ||
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c new file mode 100644 index 00000000000..564933ffac6 --- /dev/null +++ b/drivers/video/omap/lcd_overo.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * LCD panel support for the Gumstix Overo | ||
3 | * | ||
4 | * Author: Steve Sakoman <steve@sakoman.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along | ||
17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/i2c/twl.h> | ||
25 | |||
26 | #include <mach/gpio.h> | ||
27 | #include <plat/mux.h> | ||
28 | #include <asm/mach-types.h> | ||
29 | |||
30 | #include "omapfb.h" | ||
31 | |||
32 | #define LCD_ENABLE 144 | ||
33 | |||
34 | static int overo_panel_init(struct lcd_panel *panel, | ||
35 | struct omapfb_device *fbdev) | ||
36 | { | ||
37 | if ((gpio_request(LCD_ENABLE, "LCD_ENABLE") == 0) && | ||
38 | (gpio_direction_output(LCD_ENABLE, 1) == 0)) | ||
39 | gpio_export(LCD_ENABLE, 0); | ||
40 | else | ||
41 | printk(KERN_ERR "could not obtain gpio for LCD_ENABLE\n"); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static void overo_panel_cleanup(struct lcd_panel *panel) | ||
47 | { | ||
48 | gpio_free(LCD_ENABLE); | ||
49 | } | ||
50 | |||
51 | static int overo_panel_enable(struct lcd_panel *panel) | ||
52 | { | ||
53 | gpio_set_value(LCD_ENABLE, 1); | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void overo_panel_disable(struct lcd_panel *panel) | ||
58 | { | ||
59 | gpio_set_value(LCD_ENABLE, 0); | ||
60 | } | ||
61 | |||
62 | static unsigned long overo_panel_get_caps(struct lcd_panel *panel) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | struct lcd_panel overo_panel = { | ||
68 | .name = "overo", | ||
69 | .config = OMAP_LCDC_PANEL_TFT, | ||
70 | .bpp = 16, | ||
71 | .data_lines = 24, | ||
72 | |||
73 | #if defined CONFIG_FB_OMAP_031M3R | ||
74 | |||
75 | /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */ | ||
76 | .x_res = 640, | ||
77 | .y_res = 480, | ||
78 | .hfp = 48, | ||
79 | .hsw = 32, | ||
80 | .hbp = 80, | ||
81 | .vfp = 3, | ||
82 | .vsw = 4, | ||
83 | .vbp = 7, | ||
84 | .pixel_clock = 23500, | ||
85 | |||
86 | #elif defined CONFIG_FB_OMAP_048M3R | ||
87 | |||
88 | /* 800 x 600 @ 60 Hz Reduced blanking VESA CVT 0.48M3-R */ | ||
89 | .x_res = 800, | ||
90 | .y_res = 600, | ||
91 | .hfp = 48, | ||
92 | .hsw = 32, | ||
93 | .hbp = 80, | ||
94 | .vfp = 3, | ||
95 | .vsw = 4, | ||
96 | .vbp = 11, | ||
97 | .pixel_clock = 35500, | ||
98 | |||
99 | #elif defined CONFIG_FB_OMAP_079M3R | ||
100 | |||
101 | /* 1024 x 768 @ 60 Hz Reduced blanking VESA CVT 0.79M3-R */ | ||
102 | .x_res = 1024, | ||
103 | .y_res = 768, | ||
104 | .hfp = 48, | ||
105 | .hsw = 32, | ||
106 | .hbp = 80, | ||
107 | .vfp = 3, | ||
108 | .vsw = 4, | ||
109 | .vbp = 15, | ||
110 | .pixel_clock = 56000, | ||
111 | |||
112 | #elif defined CONFIG_FB_OMAP_092M9R | ||
113 | |||
114 | /* 1280 x 720 @ 60 Hz Reduced blanking VESA CVT 0.92M9-R */ | ||
115 | .x_res = 1280, | ||
116 | .y_res = 720, | ||
117 | .hfp = 48, | ||
118 | .hsw = 32, | ||
119 | .hbp = 80, | ||
120 | .vfp = 3, | ||
121 | .vsw = 5, | ||
122 | .vbp = 13, | ||
123 | .pixel_clock = 64000, | ||
124 | |||
125 | #else | ||
126 | |||
127 | /* use 640 x 480 if no config option */ | ||
128 | /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */ | ||
129 | .x_res = 640, | ||
130 | .y_res = 480, | ||
131 | .hfp = 48, | ||
132 | .hsw = 32, | ||
133 | .hbp = 80, | ||
134 | .vfp = 3, | ||
135 | .vsw = 4, | ||
136 | .vbp = 7, | ||
137 | .pixel_clock = 23500, | ||
138 | |||
139 | #endif | ||
140 | |||
141 | .init = overo_panel_init, | ||
142 | .cleanup = overo_panel_cleanup, | ||
143 | .enable = overo_panel_enable, | ||
144 | .disable = overo_panel_disable, | ||
145 | .get_caps = overo_panel_get_caps, | ||
146 | }; | ||
147 | |||
148 | static int overo_panel_probe(struct platform_device *pdev) | ||
149 | { | ||
150 | omapfb_register_panel(&overo_panel); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int overo_panel_remove(struct platform_device *pdev) | ||
155 | { | ||
156 | /* omapfb does not have unregister_panel */ | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static struct platform_driver overo_panel_driver = { | ||
161 | .probe = overo_panel_probe, | ||
162 | .remove = overo_panel_remove, | ||
163 | .driver = { | ||
164 | .name = "overo_lcd", | ||
165 | .owner = THIS_MODULE, | ||
166 | }, | ||
167 | }; | ||
168 | |||
169 | static int __init overo_panel_drv_init(void) | ||
170 | { | ||
171 | return platform_driver_register(&overo_panel_driver); | ||
172 | } | ||
173 | |||
174 | static void __exit overo_panel_drv_exit(void) | ||
175 | { | ||
176 | platform_driver_unregister(&overo_panel_driver); | ||
177 | } | ||
178 | |||
179 | module_init(overo_panel_drv_init); | ||
180 | module_exit(overo_panel_drv_exit); | ||
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c new file mode 100644 index 00000000000..0c6981f1a4a --- /dev/null +++ b/drivers/video/omap/rfbi.c | |||
@@ -0,0 +1,598 @@ | |||
1 | /* | ||
2 | * OMAP2 Remote Frame Buffer Interface support | ||
3 | * | ||
4 | * Copyright (C) 2005 Nokia Corporation | ||
5 | * Author: Juha Yrjölä <juha.yrjola@nokia.com> | ||
6 | * Imre Deak <imre.deak@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | */ | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/i2c.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | |||
31 | #include "omapfb.h" | ||
32 | #include "dispc.h" | ||
33 | |||
34 | /* To work around an RFBI transfer rate limitation */ | ||
35 | #define OMAP_RFBI_RATE_LIMIT 1 | ||
36 | |||
37 | #define RFBI_BASE 0x48050800 | ||
38 | #define RFBI_REVISION 0x0000 | ||
39 | #define RFBI_SYSCONFIG 0x0010 | ||
40 | #define RFBI_SYSSTATUS 0x0014 | ||
41 | #define RFBI_CONTROL 0x0040 | ||
42 | #define RFBI_PIXEL_CNT 0x0044 | ||
43 | #define RFBI_LINE_NUMBER 0x0048 | ||
44 | #define RFBI_CMD 0x004c | ||
45 | #define RFBI_PARAM 0x0050 | ||
46 | #define RFBI_DATA 0x0054 | ||
47 | #define RFBI_READ 0x0058 | ||
48 | #define RFBI_STATUS 0x005c | ||
49 | #define RFBI_CONFIG0 0x0060 | ||
50 | #define RFBI_ONOFF_TIME0 0x0064 | ||
51 | #define RFBI_CYCLE_TIME0 0x0068 | ||
52 | #define RFBI_DATA_CYCLE1_0 0x006c | ||
53 | #define RFBI_DATA_CYCLE2_0 0x0070 | ||
54 | #define RFBI_DATA_CYCLE3_0 0x0074 | ||
55 | #define RFBI_VSYNC_WIDTH 0x0090 | ||
56 | #define RFBI_HSYNC_WIDTH 0x0094 | ||
57 | |||
58 | #define DISPC_BASE 0x48050400 | ||
59 | #define DISPC_CONTROL 0x0040 | ||
60 | #define DISPC_IRQ_FRAMEMASK 0x0001 | ||
61 | |||
62 | static struct { | ||
63 | void __iomem *base; | ||
64 | void (*lcdc_callback)(void *data); | ||
65 | void *lcdc_callback_data; | ||
66 | unsigned long l4_khz; | ||
67 | int bits_per_cycle; | ||
68 | struct omapfb_device *fbdev; | ||
69 | struct clk *dss_ick; | ||
70 | struct clk *dss1_fck; | ||
71 | unsigned tearsync_pin_cnt; | ||
72 | unsigned tearsync_mode; | ||
73 | } rfbi; | ||
74 | |||
75 | static inline void rfbi_write_reg(int idx, u32 val) | ||
76 | { | ||
77 | __raw_writel(val, rfbi.base + idx); | ||
78 | } | ||
79 | |||
80 | static inline u32 rfbi_read_reg(int idx) | ||
81 | { | ||
82 | return __raw_readl(rfbi.base + idx); | ||
83 | } | ||
84 | |||
85 | static int rfbi_get_clocks(void) | ||
86 | { | ||
87 | rfbi.dss_ick = clk_get(&rfbi.fbdev->dssdev->dev, "ick"); | ||
88 | if (IS_ERR(rfbi.dss_ick)) { | ||
89 | dev_err(rfbi.fbdev->dev, "can't get ick\n"); | ||
90 | return PTR_ERR(rfbi.dss_ick); | ||
91 | } | ||
92 | |||
93 | rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "fck"); | ||
94 | if (IS_ERR(rfbi.dss1_fck)) { | ||
95 | dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); | ||
96 | clk_put(rfbi.dss_ick); | ||
97 | return PTR_ERR(rfbi.dss1_fck); | ||
98 | } | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static void rfbi_put_clocks(void) | ||
104 | { | ||
105 | clk_put(rfbi.dss1_fck); | ||
106 | clk_put(rfbi.dss_ick); | ||
107 | } | ||
108 | |||
109 | static void rfbi_enable_clocks(int enable) | ||
110 | { | ||
111 | if (enable) { | ||
112 | clk_enable(rfbi.dss_ick); | ||
113 | clk_enable(rfbi.dss1_fck); | ||
114 | } else { | ||
115 | clk_disable(rfbi.dss1_fck); | ||
116 | clk_disable(rfbi.dss_ick); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | |||
121 | #ifdef VERBOSE | ||
122 | static void rfbi_print_timings(void) | ||
123 | { | ||
124 | u32 l; | ||
125 | u32 time; | ||
126 | |||
127 | l = rfbi_read_reg(RFBI_CONFIG0); | ||
128 | time = 1000000000 / rfbi.l4_khz; | ||
129 | if (l & (1 << 4)) | ||
130 | time *= 2; | ||
131 | |||
132 | dev_dbg(rfbi.fbdev->dev, "Tick time %u ps\n", time); | ||
133 | l = rfbi_read_reg(RFBI_ONOFF_TIME0); | ||
134 | dev_dbg(rfbi.fbdev->dev, | ||
135 | "CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, " | ||
136 | "REONTIME %d, REOFFTIME %d\n", | ||
137 | l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f, | ||
138 | (l >> 20) & 0x0f, (l >> 24) & 0x3f); | ||
139 | |||
140 | l = rfbi_read_reg(RFBI_CYCLE_TIME0); | ||
141 | dev_dbg(rfbi.fbdev->dev, | ||
142 | "WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, " | ||
143 | "ACCESSTIME %d\n", | ||
144 | (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f, | ||
145 | (l >> 22) & 0x3f); | ||
146 | } | ||
147 | #else | ||
148 | static void rfbi_print_timings(void) {} | ||
149 | #endif | ||
150 | |||
151 | static void rfbi_set_timings(const struct extif_timings *t) | ||
152 | { | ||
153 | u32 l; | ||
154 | |||
155 | BUG_ON(!t->converted); | ||
156 | |||
157 | rfbi_enable_clocks(1); | ||
158 | rfbi_write_reg(RFBI_ONOFF_TIME0, t->tim[0]); | ||
159 | rfbi_write_reg(RFBI_CYCLE_TIME0, t->tim[1]); | ||
160 | |||
161 | l = rfbi_read_reg(RFBI_CONFIG0); | ||
162 | l &= ~(1 << 4); | ||
163 | l |= (t->tim[2] ? 1 : 0) << 4; | ||
164 | rfbi_write_reg(RFBI_CONFIG0, l); | ||
165 | |||
166 | rfbi_print_timings(); | ||
167 | rfbi_enable_clocks(0); | ||
168 | } | ||
169 | |||
170 | static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div) | ||
171 | { | ||
172 | *clk_period = 1000000000 / rfbi.l4_khz; | ||
173 | *max_clk_div = 2; | ||
174 | } | ||
175 | |||
176 | static int ps_to_rfbi_ticks(int time, int div) | ||
177 | { | ||
178 | unsigned long tick_ps; | ||
179 | int ret; | ||
180 | |||
181 | /* Calculate in picosecs to yield more exact results */ | ||
182 | tick_ps = 1000000000 / (rfbi.l4_khz) * div; | ||
183 | |||
184 | ret = (time + tick_ps - 1) / tick_ps; | ||
185 | |||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | #ifdef OMAP_RFBI_RATE_LIMIT | ||
190 | static unsigned long rfbi_get_max_tx_rate(void) | ||
191 | { | ||
192 | unsigned long l4_rate, dss1_rate; | ||
193 | int min_l4_ticks = 0; | ||
194 | int i; | ||
195 | |||
196 | /* According to TI this can't be calculated so make the | ||
197 | * adjustments for a couple of known frequencies and warn for | ||
198 | * others. | ||
199 | */ | ||
200 | static const struct { | ||
201 | unsigned long l4_clk; /* HZ */ | ||
202 | unsigned long dss1_clk; /* HZ */ | ||
203 | unsigned long min_l4_ticks; | ||
204 | } ftab[] = { | ||
205 | { 55, 132, 7, }, /* 7.86 MPix/s */ | ||
206 | { 110, 110, 12, }, /* 9.16 MPix/s */ | ||
207 | { 110, 132, 10, }, /* 11 Mpix/s */ | ||
208 | { 120, 120, 10, }, /* 12 Mpix/s */ | ||
209 | { 133, 133, 10, }, /* 13.3 Mpix/s */ | ||
210 | }; | ||
211 | |||
212 | l4_rate = rfbi.l4_khz / 1000; | ||
213 | dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000; | ||
214 | |||
215 | for (i = 0; i < ARRAY_SIZE(ftab); i++) { | ||
216 | /* Use a window instead of an exact match, to account | ||
217 | * for different DPLL multiplier / divider pairs. | ||
218 | */ | ||
219 | if (abs(ftab[i].l4_clk - l4_rate) < 3 && | ||
220 | abs(ftab[i].dss1_clk - dss1_rate) < 3) { | ||
221 | min_l4_ticks = ftab[i].min_l4_ticks; | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | if (i == ARRAY_SIZE(ftab)) { | ||
226 | /* Can't be sure, return anyway the maximum not | ||
227 | * rate-limited. This might cause a problem only for the | ||
228 | * tearing synchronisation. | ||
229 | */ | ||
230 | dev_err(rfbi.fbdev->dev, | ||
231 | "can't determine maximum RFBI transfer rate\n"); | ||
232 | return rfbi.l4_khz * 1000; | ||
233 | } | ||
234 | return rfbi.l4_khz * 1000 / min_l4_ticks; | ||
235 | } | ||
236 | #else | ||
237 | static int rfbi_get_max_tx_rate(void) | ||
238 | { | ||
239 | return rfbi.l4_khz * 1000; | ||
240 | } | ||
241 | #endif | ||
242 | |||
243 | |||
244 | static int rfbi_convert_timings(struct extif_timings *t) | ||
245 | { | ||
246 | u32 l; | ||
247 | int reon, reoff, weon, weoff, cson, csoff, cs_pulse; | ||
248 | int actim, recyc, wecyc; | ||
249 | int div = t->clk_div; | ||
250 | |||
251 | if (div <= 0 || div > 2) | ||
252 | return -1; | ||
253 | |||
254 | /* Make sure that after conversion it still holds that: | ||
255 | * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff, | ||
256 | * csoff > cson, csoff >= max(weoff, reoff), actim > reon | ||
257 | */ | ||
258 | weon = ps_to_rfbi_ticks(t->we_on_time, div); | ||
259 | weoff = ps_to_rfbi_ticks(t->we_off_time, div); | ||
260 | if (weoff <= weon) | ||
261 | weoff = weon + 1; | ||
262 | if (weon > 0x0f) | ||
263 | return -1; | ||
264 | if (weoff > 0x3f) | ||
265 | return -1; | ||
266 | |||
267 | reon = ps_to_rfbi_ticks(t->re_on_time, div); | ||
268 | reoff = ps_to_rfbi_ticks(t->re_off_time, div); | ||
269 | if (reoff <= reon) | ||
270 | reoff = reon + 1; | ||
271 | if (reon > 0x0f) | ||
272 | return -1; | ||
273 | if (reoff > 0x3f) | ||
274 | return -1; | ||
275 | |||
276 | cson = ps_to_rfbi_ticks(t->cs_on_time, div); | ||
277 | csoff = ps_to_rfbi_ticks(t->cs_off_time, div); | ||
278 | if (csoff <= cson) | ||
279 | csoff = cson + 1; | ||
280 | if (csoff < max(weoff, reoff)) | ||
281 | csoff = max(weoff, reoff); | ||
282 | if (cson > 0x0f) | ||
283 | return -1; | ||
284 | if (csoff > 0x3f) | ||
285 | return -1; | ||
286 | |||
287 | l = cson; | ||
288 | l |= csoff << 4; | ||
289 | l |= weon << 10; | ||
290 | l |= weoff << 14; | ||
291 | l |= reon << 20; | ||
292 | l |= reoff << 24; | ||
293 | |||
294 | t->tim[0] = l; | ||
295 | |||
296 | actim = ps_to_rfbi_ticks(t->access_time, div); | ||
297 | if (actim <= reon) | ||
298 | actim = reon + 1; | ||
299 | if (actim > 0x3f) | ||
300 | return -1; | ||
301 | |||
302 | wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div); | ||
303 | if (wecyc < weoff) | ||
304 | wecyc = weoff; | ||
305 | if (wecyc > 0x3f) | ||
306 | return -1; | ||
307 | |||
308 | recyc = ps_to_rfbi_ticks(t->re_cycle_time, div); | ||
309 | if (recyc < reoff) | ||
310 | recyc = reoff; | ||
311 | if (recyc > 0x3f) | ||
312 | return -1; | ||
313 | |||
314 | cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div); | ||
315 | if (cs_pulse > 0x3f) | ||
316 | return -1; | ||
317 | |||
318 | l = wecyc; | ||
319 | l |= recyc << 6; | ||
320 | l |= cs_pulse << 12; | ||
321 | l |= actim << 22; | ||
322 | |||
323 | t->tim[1] = l; | ||
324 | |||
325 | t->tim[2] = div - 1; | ||
326 | |||
327 | t->converted = 1; | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static int rfbi_setup_tearsync(unsigned pin_cnt, | ||
333 | unsigned hs_pulse_time, unsigned vs_pulse_time, | ||
334 | int hs_pol_inv, int vs_pol_inv, int extif_div) | ||
335 | { | ||
336 | int hs, vs; | ||
337 | int min; | ||
338 | u32 l; | ||
339 | |||
340 | if (pin_cnt != 1 && pin_cnt != 2) | ||
341 | return -EINVAL; | ||
342 | |||
343 | hs = ps_to_rfbi_ticks(hs_pulse_time, 1); | ||
344 | vs = ps_to_rfbi_ticks(vs_pulse_time, 1); | ||
345 | if (hs < 2) | ||
346 | return -EDOM; | ||
347 | if (pin_cnt == 2) | ||
348 | min = 2; | ||
349 | else | ||
350 | min = 4; | ||
351 | if (vs < min) | ||
352 | return -EDOM; | ||
353 | if (vs == hs) | ||
354 | return -EINVAL; | ||
355 | rfbi.tearsync_pin_cnt = pin_cnt; | ||
356 | dev_dbg(rfbi.fbdev->dev, | ||
357 | "setup_tearsync: pins %d hs %d vs %d hs_inv %d vs_inv %d\n", | ||
358 | pin_cnt, hs, vs, hs_pol_inv, vs_pol_inv); | ||
359 | |||
360 | rfbi_enable_clocks(1); | ||
361 | rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); | ||
362 | rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); | ||
363 | |||
364 | l = rfbi_read_reg(RFBI_CONFIG0); | ||
365 | if (hs_pol_inv) | ||
366 | l &= ~(1 << 21); | ||
367 | else | ||
368 | l |= 1 << 21; | ||
369 | if (vs_pol_inv) | ||
370 | l &= ~(1 << 20); | ||
371 | else | ||
372 | l |= 1 << 20; | ||
373 | rfbi_enable_clocks(0); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static int rfbi_enable_tearsync(int enable, unsigned line) | ||
379 | { | ||
380 | u32 l; | ||
381 | |||
382 | dev_dbg(rfbi.fbdev->dev, "tearsync %d line %d mode %d\n", | ||
383 | enable, line, rfbi.tearsync_mode); | ||
384 | if (line > (1 << 11) - 1) | ||
385 | return -EINVAL; | ||
386 | |||
387 | rfbi_enable_clocks(1); | ||
388 | l = rfbi_read_reg(RFBI_CONFIG0); | ||
389 | l &= ~(0x3 << 2); | ||
390 | if (enable) { | ||
391 | rfbi.tearsync_mode = rfbi.tearsync_pin_cnt; | ||
392 | l |= rfbi.tearsync_mode << 2; | ||
393 | } else | ||
394 | rfbi.tearsync_mode = 0; | ||
395 | rfbi_write_reg(RFBI_CONFIG0, l); | ||
396 | rfbi_write_reg(RFBI_LINE_NUMBER, line); | ||
397 | rfbi_enable_clocks(0); | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static void rfbi_write_command(const void *buf, unsigned int len) | ||
403 | { | ||
404 | rfbi_enable_clocks(1); | ||
405 | if (rfbi.bits_per_cycle == 16) { | ||
406 | const u16 *w = buf; | ||
407 | BUG_ON(len & 1); | ||
408 | for (; len; len -= 2) | ||
409 | rfbi_write_reg(RFBI_CMD, *w++); | ||
410 | } else { | ||
411 | const u8 *b = buf; | ||
412 | BUG_ON(rfbi.bits_per_cycle != 8); | ||
413 | for (; len; len--) | ||
414 | rfbi_write_reg(RFBI_CMD, *b++); | ||
415 | } | ||
416 | rfbi_enable_clocks(0); | ||
417 | } | ||
418 | |||
419 | static void rfbi_read_data(void *buf, unsigned int len) | ||
420 | { | ||
421 | rfbi_enable_clocks(1); | ||
422 | if (rfbi.bits_per_cycle == 16) { | ||
423 | u16 *w = buf; | ||
424 | BUG_ON(len & ~1); | ||
425 | for (; len; len -= 2) { | ||
426 | rfbi_write_reg(RFBI_READ, 0); | ||
427 | *w++ = rfbi_read_reg(RFBI_READ); | ||
428 | } | ||
429 | } else { | ||
430 | u8 *b = buf; | ||
431 | BUG_ON(rfbi.bits_per_cycle != 8); | ||
432 | for (; len; len--) { | ||
433 | rfbi_write_reg(RFBI_READ, 0); | ||
434 | *b++ = rfbi_read_reg(RFBI_READ); | ||
435 | } | ||
436 | } | ||
437 | rfbi_enable_clocks(0); | ||
438 | } | ||
439 | |||
440 | static void rfbi_write_data(const void *buf, unsigned int len) | ||
441 | { | ||
442 | rfbi_enable_clocks(1); | ||
443 | if (rfbi.bits_per_cycle == 16) { | ||
444 | const u16 *w = buf; | ||
445 | BUG_ON(len & 1); | ||
446 | for (; len; len -= 2) | ||
447 | rfbi_write_reg(RFBI_PARAM, *w++); | ||
448 | } else { | ||
449 | const u8 *b = buf; | ||
450 | BUG_ON(rfbi.bits_per_cycle != 8); | ||
451 | for (; len; len--) | ||
452 | rfbi_write_reg(RFBI_PARAM, *b++); | ||
453 | } | ||
454 | rfbi_enable_clocks(0); | ||
455 | } | ||
456 | |||
457 | static void rfbi_transfer_area(int width, int height, | ||
458 | void (callback)(void * data), void *data) | ||
459 | { | ||
460 | u32 w; | ||
461 | |||
462 | BUG_ON(callback == NULL); | ||
463 | |||
464 | rfbi_enable_clocks(1); | ||
465 | omap_dispc_set_lcd_size(width, height); | ||
466 | |||
467 | rfbi.lcdc_callback = callback; | ||
468 | rfbi.lcdc_callback_data = data; | ||
469 | |||
470 | rfbi_write_reg(RFBI_PIXEL_CNT, width * height); | ||
471 | |||
472 | w = rfbi_read_reg(RFBI_CONTROL); | ||
473 | w |= 1; /* enable */ | ||
474 | if (!rfbi.tearsync_mode) | ||
475 | w |= 1 << 4; /* internal trigger, reset by HW */ | ||
476 | rfbi_write_reg(RFBI_CONTROL, w); | ||
477 | |||
478 | omap_dispc_enable_lcd_out(1); | ||
479 | } | ||
480 | |||
481 | static inline void _stop_transfer(void) | ||
482 | { | ||
483 | u32 w; | ||
484 | |||
485 | w = rfbi_read_reg(RFBI_CONTROL); | ||
486 | rfbi_write_reg(RFBI_CONTROL, w & ~(1 << 0)); | ||
487 | rfbi_enable_clocks(0); | ||
488 | } | ||
489 | |||
490 | static void rfbi_dma_callback(void *data) | ||
491 | { | ||
492 | _stop_transfer(); | ||
493 | rfbi.lcdc_callback(rfbi.lcdc_callback_data); | ||
494 | } | ||
495 | |||
496 | static void rfbi_set_bits_per_cycle(int bpc) | ||
497 | { | ||
498 | u32 l; | ||
499 | |||
500 | rfbi_enable_clocks(1); | ||
501 | l = rfbi_read_reg(RFBI_CONFIG0); | ||
502 | l &= ~(0x03 << 0); | ||
503 | |||
504 | switch (bpc) { | ||
505 | case 8: | ||
506 | break; | ||
507 | case 16: | ||
508 | l |= 3; | ||
509 | break; | ||
510 | default: | ||
511 | BUG(); | ||
512 | } | ||
513 | rfbi_write_reg(RFBI_CONFIG0, l); | ||
514 | rfbi.bits_per_cycle = bpc; | ||
515 | rfbi_enable_clocks(0); | ||
516 | } | ||
517 | |||
518 | static int rfbi_init(struct omapfb_device *fbdev) | ||
519 | { | ||
520 | u32 l; | ||
521 | int r; | ||
522 | |||
523 | rfbi.fbdev = fbdev; | ||
524 | rfbi.base = ioremap(RFBI_BASE, SZ_1K); | ||
525 | if (!rfbi.base) { | ||
526 | dev_err(fbdev->dev, "can't ioremap RFBI\n"); | ||
527 | return -ENOMEM; | ||
528 | } | ||
529 | |||
530 | if ((r = rfbi_get_clocks()) < 0) | ||
531 | return r; | ||
532 | rfbi_enable_clocks(1); | ||
533 | |||
534 | rfbi.l4_khz = clk_get_rate(rfbi.dss_ick) / 1000; | ||
535 | |||
536 | /* Reset */ | ||
537 | rfbi_write_reg(RFBI_SYSCONFIG, 1 << 1); | ||
538 | while (!(rfbi_read_reg(RFBI_SYSSTATUS) & (1 << 0))); | ||
539 | |||
540 | l = rfbi_read_reg(RFBI_SYSCONFIG); | ||
541 | /* Enable autoidle and smart-idle */ | ||
542 | l |= (1 << 0) | (2 << 3); | ||
543 | rfbi_write_reg(RFBI_SYSCONFIG, l); | ||
544 | |||
545 | /* 16-bit interface, ITE trigger mode, 16-bit data */ | ||
546 | l = (0x03 << 0) | (0x00 << 2) | (0x01 << 5) | (0x02 << 7); | ||
547 | l |= (0 << 9) | (1 << 20) | (1 << 21); | ||
548 | rfbi_write_reg(RFBI_CONFIG0, l); | ||
549 | |||
550 | rfbi_write_reg(RFBI_DATA_CYCLE1_0, 0x00000010); | ||
551 | |||
552 | l = rfbi_read_reg(RFBI_CONTROL); | ||
553 | /* Select CS0, clear bypass mode */ | ||
554 | l = (0x01 << 2); | ||
555 | rfbi_write_reg(RFBI_CONTROL, l); | ||
556 | |||
557 | r = omap_dispc_request_irq(DISPC_IRQ_FRAMEMASK, rfbi_dma_callback, | ||
558 | NULL); | ||
559 | if (r < 0) { | ||
560 | dev_err(fbdev->dev, "can't get DISPC irq\n"); | ||
561 | rfbi_enable_clocks(0); | ||
562 | return r; | ||
563 | } | ||
564 | |||
565 | l = rfbi_read_reg(RFBI_REVISION); | ||
566 | pr_info("omapfb: RFBI version %d.%d initialized\n", | ||
567 | (l >> 4) & 0x0f, l & 0x0f); | ||
568 | |||
569 | rfbi_enable_clocks(0); | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | static void rfbi_cleanup(void) | ||
575 | { | ||
576 | omap_dispc_free_irq(DISPC_IRQ_FRAMEMASK, rfbi_dma_callback, NULL); | ||
577 | rfbi_put_clocks(); | ||
578 | iounmap(rfbi.base); | ||
579 | } | ||
580 | |||
581 | const struct lcd_ctrl_extif omap2_ext_if = { | ||
582 | .init = rfbi_init, | ||
583 | .cleanup = rfbi_cleanup, | ||
584 | .get_clk_info = rfbi_get_clk_info, | ||
585 | .get_max_tx_rate = rfbi_get_max_tx_rate, | ||
586 | .set_bits_per_cycle = rfbi_set_bits_per_cycle, | ||
587 | .convert_timings = rfbi_convert_timings, | ||
588 | .set_timings = rfbi_set_timings, | ||
589 | .write_command = rfbi_write_command, | ||
590 | .read_data = rfbi_read_data, | ||
591 | .write_data = rfbi_write_data, | ||
592 | .transfer_area = rfbi_transfer_area, | ||
593 | .setup_tearsync = rfbi_setup_tearsync, | ||
594 | .enable_tearsync = rfbi_enable_tearsync, | ||
595 | |||
596 | .max_transmit_size = (u32) ~0, | ||
597 | }; | ||
598 | |||
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h new file mode 100644 index 00000000000..c885f9cb065 --- /dev/null +++ b/drivers/video/omap2/dss/hdmi.h | |||
@@ -0,0 +1,631 @@ | |||
1 | /* | ||
2 | * hdmi.h | ||
3 | * | ||
4 | * HDMI driver definition for TI OMAP4 processors. | ||
5 | * | ||
6 | * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published by | ||
10 | * the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef _OMAP4_DSS_HDMI_H_ | ||
22 | #define _OMAP4_DSS_HDMI_H_ | ||
23 | |||
24 | #include <linux/string.h> | ||
25 | #include <video/omapdss.h> | ||
26 | |||
27 | #define HDMI_WP 0x0 | ||
28 | #define HDMI_CORE_SYS 0x400 | ||
29 | #define HDMI_CORE_AV 0x900 | ||
30 | #define HDMI_PLLCTRL 0x200 | ||
31 | #define HDMI_PHY 0x300 | ||
32 | |||
33 | struct hdmi_reg { u16 idx; }; | ||
34 | |||
35 | #define HDMI_REG(idx) ((const struct hdmi_reg) { idx }) | ||
36 | |||
37 | /* HDMI Wrapper */ | ||
38 | #define HDMI_WP_REG(idx) HDMI_REG(HDMI_WP + idx) | ||
39 | |||
40 | #define HDMI_WP_REVISION HDMI_WP_REG(0x0) | ||
41 | #define HDMI_WP_SYSCONFIG HDMI_WP_REG(0x10) | ||
42 | #define HDMI_WP_IRQSTATUS_RAW HDMI_WP_REG(0x24) | ||
43 | #define HDMI_WP_IRQSTATUS HDMI_WP_REG(0x28) | ||
44 | #define HDMI_WP_PWR_CTRL HDMI_WP_REG(0x40) | ||
45 | #define HDMI_WP_IRQENABLE_SET HDMI_WP_REG(0x2C) | ||
46 | #define HDMI_WP_VIDEO_CFG HDMI_WP_REG(0x50) | ||
47 | #define HDMI_WP_VIDEO_SIZE HDMI_WP_REG(0x60) | ||
48 | #define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68) | ||
49 | #define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C) | ||
50 | #define HDMI_WP_WP_CLK HDMI_WP_REG(0x70) | ||
51 | #define HDMI_WP_AUDIO_CFG HDMI_WP_REG(0x80) | ||
52 | #define HDMI_WP_AUDIO_CFG2 HDMI_WP_REG(0x84) | ||
53 | #define HDMI_WP_AUDIO_CTRL HDMI_WP_REG(0x88) | ||
54 | #define HDMI_WP_AUDIO_DATA HDMI_WP_REG(0x8C) | ||
55 | |||
56 | /* HDMI IP Core System */ | ||
57 | #define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx) | ||
58 | |||
59 | #define HDMI_CORE_SYS_VND_IDL HDMI_CORE_SYS_REG(0x0) | ||
60 | #define HDMI_CORE_SYS_DEV_IDL HDMI_CORE_SYS_REG(0x8) | ||
61 | #define HDMI_CORE_SYS_DEV_IDH HDMI_CORE_SYS_REG(0xC) | ||
62 | #define HDMI_CORE_SYS_DEV_REV HDMI_CORE_SYS_REG(0x10) | ||
63 | #define HDMI_CORE_SYS_SRST HDMI_CORE_SYS_REG(0x14) | ||
64 | #define HDMI_CORE_CTRL1 HDMI_CORE_SYS_REG(0x20) | ||
65 | #define HDMI_CORE_SYS_SYS_STAT HDMI_CORE_SYS_REG(0x24) | ||
66 | #define HDMI_CORE_SYS_VID_ACEN HDMI_CORE_SYS_REG(0x124) | ||
67 | #define HDMI_CORE_SYS_VID_MODE HDMI_CORE_SYS_REG(0x128) | ||
68 | #define HDMI_CORE_SYS_INTR_STATE HDMI_CORE_SYS_REG(0x1C0) | ||
69 | #define HDMI_CORE_SYS_INTR1 HDMI_CORE_SYS_REG(0x1C4) | ||
70 | #define HDMI_CORE_SYS_INTR2 HDMI_CORE_SYS_REG(0x1C8) | ||
71 | #define HDMI_CORE_SYS_INTR3 HDMI_CORE_SYS_REG(0x1CC) | ||
72 | #define HDMI_CORE_SYS_INTR4 HDMI_CORE_SYS_REG(0x1D0) | ||
73 | #define HDMI_CORE_SYS_UMASK1 HDMI_CORE_SYS_REG(0x1D4) | ||
74 | #define HDMI_CORE_SYS_TMDS_CTRL HDMI_CORE_SYS_REG(0x208) | ||
75 | #define HDMI_CORE_SYS_DE_DLY HDMI_CORE_SYS_REG(0xC8) | ||
76 | #define HDMI_CORE_SYS_DE_CTRL HDMI_CORE_SYS_REG(0xCC) | ||
77 | #define HDMI_CORE_SYS_DE_TOP HDMI_CORE_SYS_REG(0xD0) | ||
78 | #define HDMI_CORE_SYS_DE_CNTL HDMI_CORE_SYS_REG(0xD8) | ||
79 | #define HDMI_CORE_SYS_DE_CNTH HDMI_CORE_SYS_REG(0xDC) | ||
80 | #define HDMI_CORE_SYS_DE_LINL HDMI_CORE_SYS_REG(0xE0) | ||
81 | #define HDMI_CORE_SYS_DE_LINH_1 HDMI_CORE_SYS_REG(0xE4) | ||
82 | #define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1 | ||
83 | #define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1 | ||
84 | #define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1 | ||
85 | #define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1 | ||
86 | |||
87 | /* HDMI DDC E-DID */ | ||
88 | #define HDMI_CORE_DDC_CMD HDMI_CORE_SYS_REG(0x3CC) | ||
89 | #define HDMI_CORE_DDC_STATUS HDMI_CORE_SYS_REG(0x3C8) | ||
90 | #define HDMI_CORE_DDC_ADDR HDMI_CORE_SYS_REG(0x3B4) | ||
91 | #define HDMI_CORE_DDC_OFFSET HDMI_CORE_SYS_REG(0x3BC) | ||
92 | #define HDMI_CORE_DDC_COUNT1 HDMI_CORE_SYS_REG(0x3C0) | ||
93 | #define HDMI_CORE_DDC_COUNT2 HDMI_CORE_SYS_REG(0x3C4) | ||
94 | #define HDMI_CORE_DDC_DATA HDMI_CORE_SYS_REG(0x3D0) | ||
95 | #define HDMI_CORE_DDC_SEGM HDMI_CORE_SYS_REG(0x3B8) | ||
96 | |||
97 | /* HDMI IP Core Audio Video */ | ||
98 | #define HDMI_CORE_AV_REG(idx) HDMI_REG(HDMI_CORE_AV + idx) | ||
99 | |||
100 | #define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC) | ||
101 | #define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4) | ||
102 | #define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8) | ||
103 | #define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC) | ||
104 | #define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100) | ||
105 | #define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104) | ||
106 | #define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108) | ||
107 | #define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C) | ||
108 | #define HDMI_CORE_AV_AVI_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x110) | ||
109 | #define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15) | ||
110 | #define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190) | ||
111 | #define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27) | ||
112 | #define HDMI_CORE_AV_AUD_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x210) | ||
113 | #define HDMI_CORE_AV_AUD_DBYTE_NELEMS HDMI_CORE_AV_REG(10) | ||
114 | #define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290) | ||
115 | #define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27) | ||
116 | #define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300) | ||
117 | #define HDMI_CORE_AV_GEN_DBYTE_NELEMS HDMI_CORE_AV_REG(31) | ||
118 | #define HDMI_CORE_AV_GEN2_DBYTE HDMI_CORE_AV_REG(0x380) | ||
119 | #define HDMI_CORE_AV_GEN2_DBYTE_NELEMS HDMI_CORE_AV_REG(31) | ||
120 | #define HDMI_CORE_AV_ACR_CTRL HDMI_CORE_AV_REG(0x4) | ||
121 | #define HDMI_CORE_AV_FREQ_SVAL HDMI_CORE_AV_REG(0x8) | ||
122 | #define HDMI_CORE_AV_N_SVAL1 HDMI_CORE_AV_REG(0xC) | ||
123 | #define HDMI_CORE_AV_N_SVAL2 HDMI_CORE_AV_REG(0x10) | ||
124 | #define HDMI_CORE_AV_N_SVAL3 HDMI_CORE_AV_REG(0x14) | ||
125 | #define HDMI_CORE_AV_CTS_SVAL1 HDMI_CORE_AV_REG(0x18) | ||
126 | #define HDMI_CORE_AV_CTS_SVAL2 HDMI_CORE_AV_REG(0x1C) | ||
127 | #define HDMI_CORE_AV_CTS_SVAL3 HDMI_CORE_AV_REG(0x20) | ||
128 | #define HDMI_CORE_AV_CTS_HVAL1 HDMI_CORE_AV_REG(0x24) | ||
129 | #define HDMI_CORE_AV_CTS_HVAL2 HDMI_CORE_AV_REG(0x28) | ||
130 | #define HDMI_CORE_AV_CTS_HVAL3 HDMI_CORE_AV_REG(0x2C) | ||
131 | #define HDMI_CORE_AV_AUD_MODE HDMI_CORE_AV_REG(0x50) | ||
132 | #define HDMI_CORE_AV_SPDIF_CTRL HDMI_CORE_AV_REG(0x54) | ||
133 | #define HDMI_CORE_AV_HW_SPDIF_FS HDMI_CORE_AV_REG(0x60) | ||
134 | #define HDMI_CORE_AV_SWAP_I2S HDMI_CORE_AV_REG(0x64) | ||
135 | #define HDMI_CORE_AV_SPDIF_ERTH HDMI_CORE_AV_REG(0x6C) | ||
136 | #define HDMI_CORE_AV_I2S_IN_MAP HDMI_CORE_AV_REG(0x70) | ||
137 | #define HDMI_CORE_AV_I2S_IN_CTRL HDMI_CORE_AV_REG(0x74) | ||
138 | #define HDMI_CORE_AV_I2S_CHST0 HDMI_CORE_AV_REG(0x78) | ||
139 | #define HDMI_CORE_AV_I2S_CHST1 HDMI_CORE_AV_REG(0x7C) | ||
140 | #define HDMI_CORE_AV_I2S_CHST2 HDMI_CORE_AV_REG(0x80) | ||
141 | #define HDMI_CORE_AV_I2S_CHST4 HDMI_CORE_AV_REG(0x84) | ||
142 | #define HDMI_CORE_AV_I2S_CHST5 HDMI_CORE_AV_REG(0x88) | ||
143 | #define HDMI_CORE_AV_ASRC HDMI_CORE_AV_REG(0x8C) | ||
144 | #define HDMI_CORE_AV_I2S_IN_LEN HDMI_CORE_AV_REG(0x90) | ||
145 | #define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC) | ||
146 | #define HDMI_CORE_AV_AUDO_TXSTAT HDMI_CORE_AV_REG(0xC0) | ||
147 | #define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 HDMI_CORE_AV_REG(0xCC) | ||
148 | #define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 HDMI_CORE_AV_REG(0xD0) | ||
149 | #define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 HDMI_CORE_AV_REG(0xD4) | ||
150 | #define HDMI_CORE_AV_TEST_TXCTRL HDMI_CORE_AV_REG(0xF0) | ||
151 | #define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4) | ||
152 | #define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8) | ||
153 | #define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC) | ||
154 | #define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100) | ||
155 | #define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104) | ||
156 | #define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108) | ||
157 | #define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C) | ||
158 | #define HDMI_CORE_AV_SPD_TYPE HDMI_CORE_AV_REG(0x180) | ||
159 | #define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184) | ||
160 | #define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188) | ||
161 | #define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C) | ||
162 | #define HDMI_CORE_AV_AUDIO_TYPE HDMI_CORE_AV_REG(0x200) | ||
163 | #define HDMI_CORE_AV_AUDIO_VERS HDMI_CORE_AV_REG(0x204) | ||
164 | #define HDMI_CORE_AV_AUDIO_LEN HDMI_CORE_AV_REG(0x208) | ||
165 | #define HDMI_CORE_AV_AUDIO_CHSUM HDMI_CORE_AV_REG(0x20C) | ||
166 | #define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280) | ||
167 | #define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284) | ||
168 | #define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288) | ||
169 | #define HDMI_CORE_AV_MPEG_CHSUM HDMI_CORE_AV_REG(0x28C) | ||
170 | #define HDMI_CORE_AV_CP_BYTE1 HDMI_CORE_AV_REG(0x37C) | ||
171 | #define HDMI_CORE_AV_CEC_ADDR_ID HDMI_CORE_AV_REG(0x3FC) | ||
172 | #define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4 | ||
173 | #define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4 | ||
174 | #define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4 | ||
175 | #define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4 | ||
176 | |||
177 | /* PLL */ | ||
178 | #define HDMI_PLL_REG(idx) HDMI_REG(HDMI_PLLCTRL + idx) | ||
179 | |||
180 | #define PLLCTRL_PLL_CONTROL HDMI_PLL_REG(0x0) | ||
181 | #define PLLCTRL_PLL_STATUS HDMI_PLL_REG(0x4) | ||
182 | #define PLLCTRL_PLL_GO HDMI_PLL_REG(0x8) | ||
183 | #define PLLCTRL_CFG1 HDMI_PLL_REG(0xC) | ||
184 | #define PLLCTRL_CFG2 HDMI_PLL_REG(0x10) | ||
185 | #define PLLCTRL_CFG3 HDMI_PLL_REG(0x14) | ||
186 | #define PLLCTRL_CFG4 HDMI_PLL_REG(0x20) | ||
187 | |||
188 | /* HDMI PHY */ | ||
189 | #define HDMI_PHY_REG(idx) HDMI_REG(HDMI_PHY + idx) | ||
190 | |||
191 | #define HDMI_TXPHY_TX_CTRL HDMI_PHY_REG(0x0) | ||
192 | #define HDMI_TXPHY_DIGITAL_CTRL HDMI_PHY_REG(0x4) | ||
193 | #define HDMI_TXPHY_POWER_CTRL HDMI_PHY_REG(0x8) | ||
194 | #define HDMI_TXPHY_PAD_CFG_CTRL HDMI_PHY_REG(0xC) | ||
195 | |||
196 | /* HDMI EDID Length */ | ||
197 | #define HDMI_EDID_MAX_LENGTH 256 | ||
198 | #define EDID_TIMING_DESCRIPTOR_SIZE 0x12 | ||
199 | #define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36 | ||
200 | #define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80 | ||
201 | #define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4 | ||
202 | #define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4 | ||
203 | |||
204 | #define OMAP_HDMI_TIMINGS_NB 34 | ||
205 | |||
206 | #define REG_FLD_MOD(idx, val, start, end) \ | ||
207 | hdmi_write_reg(idx, FLD_MOD(hdmi_read_reg(idx), val, start, end)) | ||
208 | #define REG_GET(idx, start, end) \ | ||
209 | FLD_GET(hdmi_read_reg(idx), start, end) | ||
210 | |||
211 | /* HDMI timing structure */ | ||
212 | struct hdmi_timings { | ||
213 | struct omap_video_timings timings; | ||
214 | int vsync_pol; | ||
215 | int hsync_pol; | ||
216 | }; | ||
217 | |||
218 | enum hdmi_phy_pwr { | ||
219 | HDMI_PHYPWRCMD_OFF = 0, | ||
220 | HDMI_PHYPWRCMD_LDOON = 1, | ||
221 | HDMI_PHYPWRCMD_TXON = 2 | ||
222 | }; | ||
223 | |||
224 | enum hdmi_pll_pwr { | ||
225 | HDMI_PLLPWRCMD_ALLOFF = 0, | ||
226 | HDMI_PLLPWRCMD_PLLONLY = 1, | ||
227 | HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2, | ||
228 | HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3 | ||
229 | }; | ||
230 | |||
231 | enum hdmi_clk_refsel { | ||
232 | HDMI_REFSEL_PCLK = 0, | ||
233 | HDMI_REFSEL_REF1 = 1, | ||
234 | HDMI_REFSEL_REF2 = 2, | ||
235 | HDMI_REFSEL_SYSCLK = 3 | ||
236 | }; | ||
237 | |||
238 | enum hdmi_core_inputbus_width { | ||
239 | HDMI_INPUT_8BIT = 0, | ||
240 | HDMI_INPUT_10BIT = 1, | ||
241 | HDMI_INPUT_12BIT = 2 | ||
242 | }; | ||
243 | |||
244 | enum hdmi_core_dither_trunc { | ||
245 | HDMI_OUTPUTTRUNCATION_8BIT = 0, | ||
246 | HDMI_OUTPUTTRUNCATION_10BIT = 1, | ||
247 | HDMI_OUTPUTTRUNCATION_12BIT = 2, | ||
248 | HDMI_OUTPUTDITHER_8BIT = 3, | ||
249 | HDMI_OUTPUTDITHER_10BIT = 4, | ||
250 | HDMI_OUTPUTDITHER_12BIT = 5 | ||
251 | }; | ||
252 | |||
253 | enum hdmi_core_deepcolor_ed { | ||
254 | HDMI_DEEPCOLORPACKECTDISABLE = 0, | ||
255 | HDMI_DEEPCOLORPACKECTENABLE = 1 | ||
256 | }; | ||
257 | |||
258 | enum hdmi_core_packet_mode { | ||
259 | HDMI_PACKETMODERESERVEDVALUE = 0, | ||
260 | HDMI_PACKETMODE24BITPERPIXEL = 4, | ||
261 | HDMI_PACKETMODE30BITPERPIXEL = 5, | ||
262 | HDMI_PACKETMODE36BITPERPIXEL = 6, | ||
263 | HDMI_PACKETMODE48BITPERPIXEL = 7 | ||
264 | }; | ||
265 | |||
266 | enum hdmi_core_hdmi_dvi { | ||
267 | HDMI_DVI = 0, | ||
268 | HDMI_HDMI = 1 | ||
269 | }; | ||
270 | |||
271 | enum hdmi_core_tclkselclkmult { | ||
272 | HDMI_FPLL05IDCK = 0, | ||
273 | HDMI_FPLL10IDCK = 1, | ||
274 | HDMI_FPLL20IDCK = 2, | ||
275 | HDMI_FPLL40IDCK = 3 | ||
276 | }; | ||
277 | |||
278 | enum hdmi_core_packet_ctrl { | ||
279 | HDMI_PACKETENABLE = 1, | ||
280 | HDMI_PACKETDISABLE = 0, | ||
281 | HDMI_PACKETREPEATON = 1, | ||
282 | HDMI_PACKETREPEATOFF = 0 | ||
283 | }; | ||
284 | |||
285 | /* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */ | ||
286 | enum hdmi_core_infoframe { | ||
287 | HDMI_INFOFRAME_AVI_DB1Y_RGB = 0, | ||
288 | HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1, | ||
289 | HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2, | ||
290 | HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0, | ||
291 | HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1, | ||
292 | HDMI_INFOFRAME_AVI_DB1B_NO = 0, | ||
293 | HDMI_INFOFRAME_AVI_DB1B_VERT = 1, | ||
294 | HDMI_INFOFRAME_AVI_DB1B_HORI = 2, | ||
295 | HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3, | ||
296 | HDMI_INFOFRAME_AVI_DB1S_0 = 0, | ||
297 | HDMI_INFOFRAME_AVI_DB1S_1 = 1, | ||
298 | HDMI_INFOFRAME_AVI_DB1S_2 = 2, | ||
299 | HDMI_INFOFRAME_AVI_DB2C_NO = 0, | ||
300 | HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1, | ||
301 | HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2, | ||
302 | HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3, | ||
303 | HDMI_INFOFRAME_AVI_DB2M_NO = 0, | ||
304 | HDMI_INFOFRAME_AVI_DB2M_43 = 1, | ||
305 | HDMI_INFOFRAME_AVI_DB2M_169 = 2, | ||
306 | HDMI_INFOFRAME_AVI_DB2R_SAME = 8, | ||
307 | HDMI_INFOFRAME_AVI_DB2R_43 = 9, | ||
308 | HDMI_INFOFRAME_AVI_DB2R_169 = 10, | ||
309 | HDMI_INFOFRAME_AVI_DB2R_149 = 11, | ||
310 | HDMI_INFOFRAME_AVI_DB3ITC_NO = 0, | ||
311 | HDMI_INFOFRAME_AVI_DB3ITC_YES = 1, | ||
312 | HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0, | ||
313 | HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1, | ||
314 | HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0, | ||
315 | HDMI_INFOFRAME_AVI_DB3Q_LR = 1, | ||
316 | HDMI_INFOFRAME_AVI_DB3Q_FR = 2, | ||
317 | HDMI_INFOFRAME_AVI_DB3SC_NO = 0, | ||
318 | HDMI_INFOFRAME_AVI_DB3SC_HORI = 1, | ||
319 | HDMI_INFOFRAME_AVI_DB3SC_VERT = 2, | ||
320 | HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3, | ||
321 | HDMI_INFOFRAME_AVI_DB5PR_NO = 0, | ||
322 | HDMI_INFOFRAME_AVI_DB5PR_2 = 1, | ||
323 | HDMI_INFOFRAME_AVI_DB5PR_3 = 2, | ||
324 | HDMI_INFOFRAME_AVI_DB5PR_4 = 3, | ||
325 | HDMI_INFOFRAME_AVI_DB5PR_5 = 4, | ||
326 | HDMI_INFOFRAME_AVI_DB5PR_6 = 5, | ||
327 | HDMI_INFOFRAME_AVI_DB5PR_7 = 6, | ||
328 | HDMI_INFOFRAME_AVI_DB5PR_8 = 7, | ||
329 | HDMI_INFOFRAME_AVI_DB5PR_9 = 8, | ||
330 | HDMI_INFOFRAME_AVI_DB5PR_10 = 9, | ||
331 | HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0, | ||
332 | HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1, | ||
333 | HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2, | ||
334 | HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3, | ||
335 | HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4, | ||
336 | HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5, | ||
337 | HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6, | ||
338 | HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7, | ||
339 | HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8, | ||
340 | HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9, | ||
341 | HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10, | ||
342 | HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11, | ||
343 | HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12, | ||
344 | HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13, | ||
345 | HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14, | ||
346 | HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0, | ||
347 | HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1, | ||
348 | HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2, | ||
349 | HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3, | ||
350 | HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4, | ||
351 | HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5, | ||
352 | HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6, | ||
353 | HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7, | ||
354 | HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0, | ||
355 | HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1, | ||
356 | HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2, | ||
357 | HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3, | ||
358 | HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0, | ||
359 | HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1 | ||
360 | }; | ||
361 | |||
362 | enum hdmi_packing_mode { | ||
363 | HDMI_PACK_10b_RGB_YUV444 = 0, | ||
364 | HDMI_PACK_24b_RGB_YUV444_YUV422 = 1, | ||
365 | HDMI_PACK_20b_YUV422 = 2, | ||
366 | HDMI_PACK_ALREADYPACKED = 7 | ||
367 | }; | ||
368 | |||
369 | enum hdmi_core_audio_sample_freq { | ||
370 | HDMI_AUDIO_FS_32000 = 0x3, | ||
371 | HDMI_AUDIO_FS_44100 = 0x0, | ||
372 | HDMI_AUDIO_FS_48000 = 0x2, | ||
373 | HDMI_AUDIO_FS_88200 = 0x8, | ||
374 | HDMI_AUDIO_FS_96000 = 0xA, | ||
375 | HDMI_AUDIO_FS_176400 = 0xC, | ||
376 | HDMI_AUDIO_FS_192000 = 0xE, | ||
377 | HDMI_AUDIO_FS_NOT_INDICATED = 0x1 | ||
378 | }; | ||
379 | |||
380 | enum hdmi_core_audio_layout { | ||
381 | HDMI_AUDIO_LAYOUT_2CH = 0, | ||
382 | HDMI_AUDIO_LAYOUT_8CH = 1 | ||
383 | }; | ||
384 | |||
385 | enum hdmi_core_cts_mode { | ||
386 | HDMI_AUDIO_CTS_MODE_HW = 0, | ||
387 | HDMI_AUDIO_CTS_MODE_SW = 1 | ||
388 | }; | ||
389 | |||
390 | enum hdmi_stereo_channels { | ||
391 | HDMI_AUDIO_STEREO_NOCHANNELS = 0, | ||
392 | HDMI_AUDIO_STEREO_ONECHANNEL = 1, | ||
393 | HDMI_AUDIO_STEREO_TWOCHANNELS = 2, | ||
394 | HDMI_AUDIO_STEREO_THREECHANNELS = 3, | ||
395 | HDMI_AUDIO_STEREO_FOURCHANNELS = 4 | ||
396 | }; | ||
397 | |||
398 | enum hdmi_audio_type { | ||
399 | HDMI_AUDIO_TYPE_LPCM = 0, | ||
400 | HDMI_AUDIO_TYPE_IEC = 1 | ||
401 | }; | ||
402 | |||
403 | enum hdmi_audio_justify { | ||
404 | HDMI_AUDIO_JUSTIFY_LEFT = 0, | ||
405 | HDMI_AUDIO_JUSTIFY_RIGHT = 1 | ||
406 | }; | ||
407 | |||
408 | enum hdmi_audio_sample_order { | ||
409 | HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0, | ||
410 | HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1 | ||
411 | }; | ||
412 | |||
413 | enum hdmi_audio_samples_perword { | ||
414 | HDMI_AUDIO_ONEWORD_ONESAMPLE = 0, | ||
415 | HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1 | ||
416 | }; | ||
417 | |||
418 | enum hdmi_audio_sample_size { | ||
419 | HDMI_AUDIO_SAMPLE_16BITS = 0, | ||
420 | HDMI_AUDIO_SAMPLE_24BITS = 1 | ||
421 | }; | ||
422 | |||
423 | enum hdmi_audio_transf_mode { | ||
424 | HDMI_AUDIO_TRANSF_DMA = 0, | ||
425 | HDMI_AUDIO_TRANSF_IRQ = 1 | ||
426 | }; | ||
427 | |||
428 | enum hdmi_audio_blk_strt_end_sig { | ||
429 | HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0, | ||
430 | HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1 | ||
431 | }; | ||
432 | |||
433 | enum hdmi_audio_i2s_config { | ||
434 | HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0, | ||
435 | HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1, | ||
436 | HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0, | ||
437 | HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1, | ||
438 | HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0, | ||
439 | HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1, | ||
440 | HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0, | ||
441 | HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1, | ||
442 | HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6, | ||
443 | HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2, | ||
444 | HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4, | ||
445 | HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5, | ||
446 | HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1, | ||
447 | HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6, | ||
448 | HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2, | ||
449 | HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4, | ||
450 | HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5, | ||
451 | HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0, | ||
452 | HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1, | ||
453 | HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0, | ||
454 | HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1, | ||
455 | HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0, | ||
456 | HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2, | ||
457 | HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12, | ||
458 | HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4, | ||
459 | HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8, | ||
460 | HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10, | ||
461 | HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13, | ||
462 | HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5, | ||
463 | HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9, | ||
464 | HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11, | ||
465 | HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0, | ||
466 | HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1, | ||
467 | HDMI_AUDIO_I2S_SD0_EN = 1, | ||
468 | HDMI_AUDIO_I2S_SD1_EN = 1 << 1, | ||
469 | HDMI_AUDIO_I2S_SD2_EN = 1 << 2, | ||
470 | HDMI_AUDIO_I2S_SD3_EN = 1 << 3, | ||
471 | }; | ||
472 | |||
473 | enum hdmi_audio_mclk_mode { | ||
474 | HDMI_AUDIO_MCLK_128FS = 0, | ||
475 | HDMI_AUDIO_MCLK_256FS = 1, | ||
476 | HDMI_AUDIO_MCLK_384FS = 2, | ||
477 | HDMI_AUDIO_MCLK_512FS = 3, | ||
478 | HDMI_AUDIO_MCLK_768FS = 4, | ||
479 | HDMI_AUDIO_MCLK_1024FS = 5, | ||
480 | HDMI_AUDIO_MCLK_1152FS = 6, | ||
481 | HDMI_AUDIO_MCLK_192FS = 7 | ||
482 | }; | ||
483 | |||
484 | struct hdmi_core_video_config { | ||
485 | enum hdmi_core_inputbus_width ip_bus_width; | ||
486 | enum hdmi_core_dither_trunc op_dither_truc; | ||
487 | enum hdmi_core_deepcolor_ed deep_color_pkt; | ||
488 | enum hdmi_core_packet_mode pkt_mode; | ||
489 | enum hdmi_core_hdmi_dvi hdmi_dvi; | ||
490 | enum hdmi_core_tclkselclkmult tclk_sel_clkmult; | ||
491 | }; | ||
492 | |||
493 | /* | ||
494 | * Refer to section 8.2 in HDMI 1.3 specification for | ||
495 | * details about infoframe databytes | ||
496 | */ | ||
497 | struct hdmi_core_infoframe_avi { | ||
498 | u8 db1_format; | ||
499 | /* Y0, Y1 rgb,yCbCr */ | ||
500 | u8 db1_active_info; | ||
501 | /* A0 Active information Present */ | ||
502 | u8 db1_bar_info_dv; | ||
503 | /* B0, B1 Bar info data valid */ | ||
504 | u8 db1_scan_info; | ||
505 | /* S0, S1 scan information */ | ||
506 | u8 db2_colorimetry; | ||
507 | /* C0, C1 colorimetry */ | ||
508 | u8 db2_aspect_ratio; | ||
509 | /* M0, M1 Aspect ratio (4:3, 16:9) */ | ||
510 | u8 db2_active_fmt_ar; | ||
511 | /* R0...R3 Active format aspect ratio */ | ||
512 | u8 db3_itc; | ||
513 | /* ITC IT content. */ | ||
514 | u8 db3_ec; | ||
515 | /* EC0, EC1, EC2 Extended colorimetry */ | ||
516 | u8 db3_q_range; | ||
517 | /* Q1, Q0 Quantization range */ | ||
518 | u8 db3_nup_scaling; | ||
519 | /* SC1, SC0 Non-uniform picture scaling */ | ||
520 | u8 db4_videocode; | ||
521 | /* VIC0..6 Video format identification */ | ||
522 | u8 db5_pixel_repeat; | ||
523 | /* PR0..PR3 Pixel repetition factor */ | ||
524 | u16 db6_7_line_eoftop; | ||
525 | /* Line number end of top bar */ | ||
526 | u16 db8_9_line_sofbottom; | ||
527 | /* Line number start of bottom bar */ | ||
528 | u16 db10_11_pixel_eofleft; | ||
529 | /* Pixel number end of left bar */ | ||
530 | u16 db12_13_pixel_sofright; | ||
531 | /* Pixel number start of right bar */ | ||
532 | }; | ||
533 | /* | ||
534 | * Refer to section 8.2 in HDMI 1.3 specification for | ||
535 | * details about infoframe databytes | ||
536 | */ | ||
537 | struct hdmi_core_infoframe_audio { | ||
538 | u8 db1_coding_type; | ||
539 | u8 db1_channel_count; | ||
540 | u8 db2_sample_freq; | ||
541 | u8 db2_sample_size; | ||
542 | u8 db4_channel_alloc; | ||
543 | bool db5_downmix_inh; | ||
544 | u8 db5_lsv; /* Level shift values for downmix */ | ||
545 | }; | ||
546 | |||
547 | struct hdmi_core_packet_enable_repeat { | ||
548 | u32 audio_pkt; | ||
549 | u32 audio_pkt_repeat; | ||
550 | u32 avi_infoframe; | ||
551 | u32 avi_infoframe_repeat; | ||
552 | u32 gen_cntrl_pkt; | ||
553 | u32 gen_cntrl_pkt_repeat; | ||
554 | u32 generic_pkt; | ||
555 | u32 generic_pkt_repeat; | ||
556 | }; | ||
557 | |||
558 | struct hdmi_video_format { | ||
559 | enum hdmi_packing_mode packing_mode; | ||
560 | u32 y_res; /* Line per panel */ | ||
561 | u32 x_res; /* pixel per line */ | ||
562 | }; | ||
563 | |||
564 | struct hdmi_video_interface { | ||
565 | int vsp; /* Vsync polarity */ | ||
566 | int hsp; /* Hsync polarity */ | ||
567 | int interlacing; | ||
568 | int tm; /* Timing mode */ | ||
569 | }; | ||
570 | |||
571 | struct hdmi_cm { | ||
572 | int code; | ||
573 | int mode; | ||
574 | }; | ||
575 | |||
576 | struct hdmi_config { | ||
577 | struct hdmi_timings timings; | ||
578 | u16 interlace; | ||
579 | struct hdmi_cm cm; | ||
580 | }; | ||
581 | |||
582 | struct hdmi_audio_format { | ||
583 | enum hdmi_stereo_channels stereo_channels; | ||
584 | u8 active_chnnls_msk; | ||
585 | enum hdmi_audio_type type; | ||
586 | enum hdmi_audio_justify justification; | ||
587 | enum hdmi_audio_sample_order sample_order; | ||
588 | enum hdmi_audio_samples_perword samples_per_word; | ||
589 | enum hdmi_audio_sample_size sample_size; | ||
590 | enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end; | ||
591 | }; | ||
592 | |||
593 | struct hdmi_audio_dma { | ||
594 | u8 transfer_size; | ||
595 | u8 block_size; | ||
596 | enum hdmi_audio_transf_mode mode; | ||
597 | u16 fifo_threshold; | ||
598 | }; | ||
599 | |||
600 | struct hdmi_core_audio_i2s_config { | ||
601 | u8 word_max_length; | ||
602 | u8 word_length; | ||
603 | u8 in_length_bits; | ||
604 | u8 justification; | ||
605 | u8 en_high_bitrate_aud; | ||
606 | u8 sck_edge_mode; | ||
607 | u8 cbit_order; | ||
608 | u8 vbit; | ||
609 | u8 ws_polarity; | ||
610 | u8 direction; | ||
611 | u8 shift; | ||
612 | u8 active_sds; | ||
613 | }; | ||
614 | |||
615 | struct hdmi_core_audio_config { | ||
616 | struct hdmi_core_audio_i2s_config i2s_cfg; | ||
617 | enum hdmi_core_audio_sample_freq freq_sample; | ||
618 | bool fs_override; | ||
619 | u32 n; | ||
620 | u32 cts; | ||
621 | u32 aud_par_busclk; | ||
622 | enum hdmi_core_audio_layout layout; | ||
623 | enum hdmi_core_cts_mode cts_mode; | ||
624 | bool use_mclk; | ||
625 | enum hdmi_audio_mclk_mode mclk_mode; | ||
626 | bool en_acr_pkt; | ||
627 | bool en_dsd_audio; | ||
628 | bool en_parallel_aud_input; | ||
629 | bool en_spdif; | ||
630 | }; | ||
631 | #endif | ||
diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_omap4_panel.c new file mode 100644 index 00000000000..7d4f2bd7c50 --- /dev/null +++ b/drivers/video/omap2/dss/hdmi_omap4_panel.c | |||
@@ -0,0 +1,222 @@ | |||
1 | /* | ||
2 | * hdmi_omap4_panel.c | ||
3 | * | ||
4 | * HDMI library support functions for TI OMAP4 processors. | ||
5 | * | ||
6 | * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ | ||
7 | * Authors: Mythri P k <mythripk@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License version 2 as published by | ||
11 | * the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/mutex.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <video/omapdss.h> | ||
28 | |||
29 | #include "dss.h" | ||
30 | |||
31 | static struct { | ||
32 | struct mutex hdmi_lock; | ||
33 | } hdmi; | ||
34 | |||
35 | |||
36 | static int hdmi_panel_probe(struct omap_dss_device *dssdev) | ||
37 | { | ||
38 | DSSDBG("ENTER hdmi_panel_probe\n"); | ||
39 | |||
40 | dssdev->panel.config = OMAP_DSS_LCD_TFT | | ||
41 | OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS; | ||
42 | |||
43 | /* | ||
44 | * Initialize the timings to 640 * 480 | ||
45 | * This is only for framebuffer update not for TV timing setting | ||
46 | * Setting TV timing will be done only on enable | ||
47 | */ | ||
48 | dssdev->panel.timings.x_res = 640; | ||
49 | dssdev->panel.timings.y_res = 480; | ||
50 | |||
51 | DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n", | ||
52 | dssdev->panel.timings.x_res, | ||
53 | dssdev->panel.timings.y_res); | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void hdmi_panel_remove(struct omap_dss_device *dssdev) | ||
58 | { | ||
59 | |||
60 | } | ||
61 | |||
62 | static int hdmi_panel_enable(struct omap_dss_device *dssdev) | ||
63 | { | ||
64 | int r = 0; | ||
65 | DSSDBG("ENTER hdmi_panel_enable\n"); | ||
66 | |||
67 | mutex_lock(&hdmi.hdmi_lock); | ||
68 | |||
69 | if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { | ||
70 | r = -EINVAL; | ||
71 | goto err; | ||
72 | } | ||
73 | |||
74 | r = omapdss_hdmi_display_enable(dssdev); | ||
75 | if (r) { | ||
76 | DSSERR("failed to power on\n"); | ||
77 | goto err; | ||
78 | } | ||
79 | |||
80 | dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; | ||
81 | |||
82 | err: | ||
83 | mutex_unlock(&hdmi.hdmi_lock); | ||
84 | |||
85 | return r; | ||
86 | } | ||
87 | |||
88 | static void hdmi_panel_disable(struct omap_dss_device *dssdev) | ||
89 | { | ||
90 | mutex_lock(&hdmi.hdmi_lock); | ||
91 | |||
92 | if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) | ||
93 | omapdss_hdmi_display_disable(dssdev); | ||
94 | |||
95 | dssdev->state = OMAP_DSS_DISPLAY_DISABLED; | ||
96 | |||
97 | mutex_unlock(&hdmi.hdmi_lock); | ||
98 | } | ||
99 | |||
100 | static int hdmi_panel_suspend(struct omap_dss_device *dssdev) | ||
101 | { | ||
102 | int r = 0; | ||
103 | |||
104 | mutex_lock(&hdmi.hdmi_lock); | ||
105 | |||
106 | if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { | ||
107 | r = -EINVAL; | ||
108 | goto err; | ||
109 | } | ||
110 | |||
111 | dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; | ||
112 | |||
113 | omapdss_hdmi_display_disable(dssdev); | ||
114 | |||
115 | err: | ||
116 | mutex_unlock(&hdmi.hdmi_lock); | ||
117 | |||
118 | return r; | ||
119 | } | ||
120 | |||
121 | static int hdmi_panel_resume(struct omap_dss_device *dssdev) | ||
122 | { | ||
123 | int r = 0; | ||
124 | |||
125 | mutex_lock(&hdmi.hdmi_lock); | ||
126 | |||
127 | if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { | ||
128 | r = -EINVAL; | ||
129 | goto err; | ||
130 | } | ||
131 | |||
132 | r = omapdss_hdmi_display_enable(dssdev); | ||
133 | if (r) { | ||
134 | DSSERR("failed to power on\n"); | ||
135 | goto err; | ||
136 | } | ||
137 | |||
138 | dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; | ||
139 | |||
140 | err: | ||
141 | mutex_unlock(&hdmi.hdmi_lock); | ||
142 | |||
143 | return r; | ||
144 | } | ||
145 | |||
146 | static void hdmi_get_timings(struct omap_dss_device *dssdev, | ||
147 | struct omap_video_timings *timings) | ||
148 | { | ||
149 | mutex_lock(&hdmi.hdmi_lock); | ||
150 | |||
151 | *timings = dssdev->panel.timings; | ||
152 | |||
153 | mutex_unlock(&hdmi.hdmi_lock); | ||
154 | } | ||
155 | |||
156 | static void hdmi_set_timings(struct omap_dss_device *dssdev, | ||
157 | struct omap_video_timings *timings) | ||
158 | { | ||
159 | DSSDBG("hdmi_set_timings\n"); | ||
160 | |||
161 | mutex_lock(&hdmi.hdmi_lock); | ||
162 | |||
163 | dssdev->panel.timings = *timings; | ||
164 | |||
165 | if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { | ||
166 | /* turn the hdmi off and on to get new timings to use */ | ||
167 | omapdss_hdmi_display_disable(dssdev); | ||
168 | omapdss_hdmi_display_set_timing(dssdev); | ||
169 | } | ||
170 | |||
171 | mutex_unlock(&hdmi.hdmi_lock); | ||
172 | } | ||
173 | |||
174 | static int hdmi_check_timings(struct omap_dss_device *dssdev, | ||
175 | struct omap_video_timings *timings) | ||
176 | { | ||
177 | int r = 0; | ||
178 | |||
179 | DSSDBG("hdmi_check_timings\n"); | ||
180 | |||
181 | mutex_lock(&hdmi.hdmi_lock); | ||
182 | |||
183 | r = omapdss_hdmi_display_check_timing(dssdev, timings); | ||
184 | if (r) { | ||
185 | DSSERR("Timing cannot be applied\n"); | ||
186 | goto err; | ||
187 | } | ||
188 | err: | ||
189 | mutex_unlock(&hdmi.hdmi_lock); | ||
190 | return r; | ||
191 | } | ||
192 | |||
193 | static struct omap_dss_driver hdmi_driver = { | ||
194 | .probe = hdmi_panel_probe, | ||
195 | .remove = hdmi_panel_remove, | ||
196 | .enable = hdmi_panel_enable, | ||
197 | .disable = hdmi_panel_disable, | ||
198 | .suspend = hdmi_panel_suspend, | ||
199 | .resume = hdmi_panel_resume, | ||
200 | .get_timings = hdmi_get_timings, | ||
201 | .set_timings = hdmi_set_timings, | ||
202 | .check_timings = hdmi_check_timings, | ||
203 | .driver = { | ||
204 | .name = "hdmi_panel", | ||
205 | .owner = THIS_MODULE, | ||
206 | }, | ||
207 | }; | ||
208 | |||
209 | int hdmi_panel_init(void) | ||
210 | { | ||
211 | mutex_init(&hdmi.hdmi_lock); | ||
212 | |||
213 | omap_dss_register_driver(&hdmi_driver); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | void hdmi_panel_exit(void) | ||
219 | { | ||
220 | omap_dss_unregister_driver(&hdmi_driver); | ||
221 | |||
222 | } | ||
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c new file mode 100644 index 00000000000..9441e2eb3de --- /dev/null +++ b/drivers/video/omap2/vram.c | |||
@@ -0,0 +1,659 @@ | |||
1 | /* | ||
2 | * VRAM manager for OMAP | ||
3 | * | ||
4 | * Copyright (C) 2009 Nokia Corporation | ||
5 | * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along | ||
17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /*#define DEBUG*/ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/seq_file.h> | ||
28 | #include <linux/memblock.h> | ||
29 | #include <linux/completion.h> | ||
30 | #include <linux/debugfs.h> | ||
31 | #include <linux/jiffies.h> | ||
32 | #include <linux/module.h> | ||
33 | |||
34 | #include <asm/setup.h> | ||
35 | |||
36 | #include <plat/sram.h> | ||
37 | #include <plat/vram.h> | ||
38 | #include <plat/dma.h> | ||
39 | |||
40 | #ifdef DEBUG | ||
41 | #define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__) | ||
42 | #else | ||
43 | #define DBG(format, ...) | ||
44 | #endif | ||
45 | |||
46 | #define OMAP2_SRAM_START 0x40200000 | ||
47 | /* Maximum size, in reality this is smaller if SRAM is partially locked. */ | ||
48 | #define OMAP2_SRAM_SIZE 0xa0000 /* 640k */ | ||
49 | |||
50 | /* postponed regions are used to temporarily store region information at boot | ||
51 | * time when we cannot yet allocate the region list */ | ||
52 | #define MAX_POSTPONED_REGIONS 10 | ||
53 | |||
54 | static bool vram_initialized; | ||
55 | static int postponed_cnt; | ||
56 | static struct { | ||
57 | unsigned long paddr; | ||
58 | size_t size; | ||
59 | } postponed_regions[MAX_POSTPONED_REGIONS]; | ||
60 | |||
61 | struct vram_alloc { | ||
62 | struct list_head list; | ||
63 | unsigned long paddr; | ||
64 | unsigned pages; | ||
65 | }; | ||
66 | |||
67 | struct vram_region { | ||
68 | struct list_head list; | ||
69 | struct list_head alloc_list; | ||
70 | unsigned long paddr; | ||
71 | unsigned pages; | ||
72 | }; | ||
73 | |||
74 | static DEFINE_MUTEX(region_mutex); | ||
75 | static LIST_HEAD(region_list); | ||
76 | |||
77 | static inline int region_mem_type(unsigned long paddr) | ||
78 | { | ||
79 | if (paddr >= OMAP2_SRAM_START && | ||
80 | paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE) | ||
81 | return OMAP_VRAM_MEMTYPE_SRAM; | ||
82 | else | ||
83 | return OMAP_VRAM_MEMTYPE_SDRAM; | ||
84 | } | ||
85 | |||
86 | static struct vram_region *omap_vram_create_region(unsigned long paddr, | ||
87 | unsigned pages) | ||
88 | { | ||
89 | struct vram_region *rm; | ||
90 | |||
91 | rm = kzalloc(sizeof(*rm), GFP_KERNEL); | ||
92 | |||
93 | if (rm) { | ||
94 | INIT_LIST_HEAD(&rm->alloc_list); | ||
95 | rm->paddr = paddr; | ||
96 | rm->pages = pages; | ||
97 | } | ||
98 | |||
99 | return rm; | ||
100 | } | ||
101 | |||
102 | #if 0 | ||
103 | static void omap_vram_free_region(struct vram_region *vr) | ||
104 | { | ||
105 | list_del(&vr->list); | ||
106 | kfree(vr); | ||
107 | } | ||
108 | #endif | ||
109 | |||
110 | static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr, | ||
111 | unsigned long paddr, unsigned pages) | ||
112 | { | ||
113 | struct vram_alloc *va; | ||
114 | struct vram_alloc *new; | ||
115 | |||
116 | new = kzalloc(sizeof(*va), GFP_KERNEL); | ||
117 | |||
118 | if (!new) | ||
119 | return NULL; | ||
120 | |||
121 | new->paddr = paddr; | ||
122 | new->pages = pages; | ||
123 | |||
124 | list_for_each_entry(va, &vr->alloc_list, list) { | ||
125 | if (va->paddr > new->paddr) | ||
126 | break; | ||
127 | } | ||
128 | |||
129 | list_add_tail(&new->list, &va->list); | ||
130 | |||
131 | return new; | ||
132 | } | ||
133 | |||
134 | static void omap_vram_free_allocation(struct vram_alloc *va) | ||
135 | { | ||
136 | list_del(&va->list); | ||
137 | kfree(va); | ||
138 | } | ||
139 | |||
140 | int omap_vram_add_region(unsigned long paddr, size_t size) | ||
141 | { | ||
142 | struct vram_region *rm; | ||
143 | unsigned pages; | ||
144 | |||
145 | if (vram_initialized) { | ||
146 | DBG("adding region paddr %08lx size %d\n", | ||
147 | paddr, size); | ||
148 | |||
149 | size &= PAGE_MASK; | ||
150 | pages = size >> PAGE_SHIFT; | ||
151 | |||
152 | rm = omap_vram_create_region(paddr, pages); | ||
153 | if (rm == NULL) | ||
154 | return -ENOMEM; | ||
155 | |||
156 | list_add(&rm->list, ®ion_list); | ||
157 | } else { | ||
158 | if (postponed_cnt == MAX_POSTPONED_REGIONS) | ||
159 | return -ENOMEM; | ||
160 | |||
161 | postponed_regions[postponed_cnt].paddr = paddr; | ||
162 | postponed_regions[postponed_cnt].size = size; | ||
163 | |||
164 | ++postponed_cnt; | ||
165 | } | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | int omap_vram_free(unsigned long paddr, size_t size) | ||
170 | { | ||
171 | struct vram_region *rm; | ||
172 | struct vram_alloc *alloc; | ||
173 | unsigned start, end; | ||
174 | |||
175 | DBG("free mem paddr %08lx size %d\n", paddr, size); | ||
176 | |||
177 | size = PAGE_ALIGN(size); | ||
178 | |||
179 | mutex_lock(®ion_mutex); | ||
180 | |||
181 | list_for_each_entry(rm, ®ion_list, list) { | ||
182 | list_for_each_entry(alloc, &rm->alloc_list, list) { | ||
183 | start = alloc->paddr; | ||
184 | end = alloc->paddr + (alloc->pages >> PAGE_SHIFT); | ||
185 | |||
186 | if (start >= paddr && end < paddr + size) | ||
187 | goto found; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | mutex_unlock(®ion_mutex); | ||
192 | return -EINVAL; | ||
193 | |||
194 | found: | ||
195 | omap_vram_free_allocation(alloc); | ||
196 | |||
197 | mutex_unlock(®ion_mutex); | ||
198 | return 0; | ||
199 | } | ||
200 | EXPORT_SYMBOL(omap_vram_free); | ||
201 | |||
202 | static int _omap_vram_reserve(unsigned long paddr, unsigned pages) | ||
203 | { | ||
204 | struct vram_region *rm; | ||
205 | struct vram_alloc *alloc; | ||
206 | size_t size; | ||
207 | |||
208 | size = pages << PAGE_SHIFT; | ||
209 | |||
210 | list_for_each_entry(rm, ®ion_list, list) { | ||
211 | unsigned long start, end; | ||
212 | |||
213 | DBG("checking region %lx %d\n", rm->paddr, rm->pages); | ||
214 | |||
215 | if (region_mem_type(rm->paddr) != region_mem_type(paddr)) | ||
216 | continue; | ||
217 | |||
218 | start = rm->paddr; | ||
219 | end = start + (rm->pages << PAGE_SHIFT) - 1; | ||
220 | if (start > paddr || end < paddr + size - 1) | ||
221 | continue; | ||
222 | |||
223 | DBG("block ok, checking allocs\n"); | ||
224 | |||
225 | list_for_each_entry(alloc, &rm->alloc_list, list) { | ||
226 | end = alloc->paddr - 1; | ||
227 | |||
228 | if (start <= paddr && end >= paddr + size - 1) | ||
229 | goto found; | ||
230 | |||
231 | start = alloc->paddr + (alloc->pages << PAGE_SHIFT); | ||
232 | } | ||
233 | |||
234 | end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1; | ||
235 | |||
236 | if (!(start <= paddr && end >= paddr + size - 1)) | ||
237 | continue; | ||
238 | found: | ||
239 | DBG("found area start %lx, end %lx\n", start, end); | ||
240 | |||
241 | if (omap_vram_create_allocation(rm, paddr, pages) == NULL) | ||
242 | return -ENOMEM; | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | return -ENOMEM; | ||
248 | } | ||
249 | |||
250 | int omap_vram_reserve(unsigned long paddr, size_t size) | ||
251 | { | ||
252 | unsigned pages; | ||
253 | int r; | ||
254 | |||
255 | DBG("reserve mem paddr %08lx size %d\n", paddr, size); | ||
256 | |||
257 | size = PAGE_ALIGN(size); | ||
258 | pages = size >> PAGE_SHIFT; | ||
259 | |||
260 | mutex_lock(®ion_mutex); | ||
261 | |||
262 | r = _omap_vram_reserve(paddr, pages); | ||
263 | |||
264 | mutex_unlock(®ion_mutex); | ||
265 | |||
266 | return r; | ||
267 | } | ||
268 | EXPORT_SYMBOL(omap_vram_reserve); | ||
269 | |||
270 | static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data) | ||
271 | { | ||
272 | struct completion *compl = data; | ||
273 | complete(compl); | ||
274 | } | ||
275 | |||
276 | static int _omap_vram_clear(u32 paddr, unsigned pages) | ||
277 | { | ||
278 | struct completion compl; | ||
279 | unsigned elem_count; | ||
280 | unsigned frame_count; | ||
281 | int r; | ||
282 | int lch; | ||
283 | |||
284 | init_completion(&compl); | ||
285 | |||
286 | r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA", | ||
287 | _omap_vram_dma_cb, | ||
288 | &compl, &lch); | ||
289 | if (r) { | ||
290 | pr_err("VRAM: request_dma failed for memory clear\n"); | ||
291 | return -EBUSY; | ||
292 | } | ||
293 | |||
294 | elem_count = pages * PAGE_SIZE / 4; | ||
295 | frame_count = 1; | ||
296 | |||
297 | omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32, | ||
298 | elem_count, frame_count, | ||
299 | OMAP_DMA_SYNC_ELEMENT, | ||
300 | 0, 0); | ||
301 | |||
302 | omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC, | ||
303 | paddr, 0, 0); | ||
304 | |||
305 | omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000); | ||
306 | |||
307 | omap_start_dma(lch); | ||
308 | |||
309 | if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) { | ||
310 | omap_stop_dma(lch); | ||
311 | pr_err("VRAM: dma timeout while clearing memory\n"); | ||
312 | r = -EIO; | ||
313 | goto err; | ||
314 | } | ||
315 | |||
316 | r = 0; | ||
317 | err: | ||
318 | omap_free_dma(lch); | ||
319 | |||
320 | return r; | ||
321 | } | ||
322 | |||
323 | static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr) | ||
324 | { | ||
325 | struct vram_region *rm; | ||
326 | struct vram_alloc *alloc; | ||
327 | |||
328 | list_for_each_entry(rm, ®ion_list, list) { | ||
329 | unsigned long start, end; | ||
330 | |||
331 | DBG("checking region %lx %d\n", rm->paddr, rm->pages); | ||
332 | |||
333 | if (region_mem_type(rm->paddr) != mtype) | ||
334 | continue; | ||
335 | |||
336 | start = rm->paddr; | ||
337 | |||
338 | list_for_each_entry(alloc, &rm->alloc_list, list) { | ||
339 | end = alloc->paddr; | ||
340 | |||
341 | if (end - start >= pages << PAGE_SHIFT) | ||
342 | goto found; | ||
343 | |||
344 | start = alloc->paddr + (alloc->pages << PAGE_SHIFT); | ||
345 | } | ||
346 | |||
347 | end = rm->paddr + (rm->pages << PAGE_SHIFT); | ||
348 | found: | ||
349 | if (end - start < pages << PAGE_SHIFT) | ||
350 | continue; | ||
351 | |||
352 | DBG("found %lx, end %lx\n", start, end); | ||
353 | |||
354 | alloc = omap_vram_create_allocation(rm, start, pages); | ||
355 | if (alloc == NULL) | ||
356 | return -ENOMEM; | ||
357 | |||
358 | *paddr = start; | ||
359 | |||
360 | _omap_vram_clear(start, pages); | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | return -ENOMEM; | ||
366 | } | ||
367 | |||
368 | int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr) | ||
369 | { | ||
370 | unsigned pages; | ||
371 | int r; | ||
372 | |||
373 | BUG_ON(mtype > OMAP_VRAM_MEMTYPE_MAX || !size); | ||
374 | |||
375 | DBG("alloc mem type %d size %d\n", mtype, size); | ||
376 | |||
377 | size = PAGE_ALIGN(size); | ||
378 | pages = size >> PAGE_SHIFT; | ||
379 | |||
380 | mutex_lock(®ion_mutex); | ||
381 | |||
382 | r = _omap_vram_alloc(mtype, pages, paddr); | ||
383 | |||
384 | mutex_unlock(®ion_mutex); | ||
385 | |||
386 | return r; | ||
387 | } | ||
388 | EXPORT_SYMBOL(omap_vram_alloc); | ||
389 | |||
390 | void omap_vram_get_info(unsigned long *vram, | ||
391 | unsigned long *free_vram, | ||
392 | unsigned long *largest_free_block) | ||
393 | { | ||
394 | struct vram_region *vr; | ||
395 | struct vram_alloc *va; | ||
396 | |||
397 | *vram = 0; | ||
398 | *free_vram = 0; | ||
399 | *largest_free_block = 0; | ||
400 | |||
401 | mutex_lock(®ion_mutex); | ||
402 | |||
403 | list_for_each_entry(vr, ®ion_list, list) { | ||
404 | unsigned free; | ||
405 | unsigned long pa; | ||
406 | |||
407 | pa = vr->paddr; | ||
408 | *vram += vr->pages << PAGE_SHIFT; | ||
409 | |||
410 | list_for_each_entry(va, &vr->alloc_list, list) { | ||
411 | free = va->paddr - pa; | ||
412 | *free_vram += free; | ||
413 | if (free > *largest_free_block) | ||
414 | *largest_free_block = free; | ||
415 | pa = va->paddr + (va->pages << PAGE_SHIFT); | ||
416 | } | ||
417 | |||
418 | free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa; | ||
419 | *free_vram += free; | ||
420 | if (free > *largest_free_block) | ||
421 | *largest_free_block = free; | ||
422 | } | ||
423 | |||
424 | mutex_unlock(®ion_mutex); | ||
425 | } | ||
426 | EXPORT_SYMBOL(omap_vram_get_info); | ||
427 | |||
428 | #if defined(CONFIG_DEBUG_FS) | ||
429 | static int vram_debug_show(struct seq_file *s, void *unused) | ||
430 | { | ||
431 | struct vram_region *vr; | ||
432 | struct vram_alloc *va; | ||
433 | unsigned size; | ||
434 | |||
435 | mutex_lock(®ion_mutex); | ||
436 | |||
437 | list_for_each_entry(vr, ®ion_list, list) { | ||
438 | size = vr->pages << PAGE_SHIFT; | ||
439 | seq_printf(s, "%08lx-%08lx (%d bytes)\n", | ||
440 | vr->paddr, vr->paddr + size - 1, | ||
441 | size); | ||
442 | |||
443 | list_for_each_entry(va, &vr->alloc_list, list) { | ||
444 | size = va->pages << PAGE_SHIFT; | ||
445 | seq_printf(s, " %08lx-%08lx (%d bytes)\n", | ||
446 | va->paddr, va->paddr + size - 1, | ||
447 | size); | ||
448 | } | ||
449 | } | ||
450 | |||
451 | mutex_unlock(®ion_mutex); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int vram_debug_open(struct inode *inode, struct file *file) | ||
457 | { | ||
458 | return single_open(file, vram_debug_show, inode->i_private); | ||
459 | } | ||
460 | |||
461 | static const struct file_operations vram_debug_fops = { | ||
462 | .open = vram_debug_open, | ||
463 | .read = seq_read, | ||
464 | .llseek = seq_lseek, | ||
465 | .release = single_release, | ||
466 | }; | ||
467 | |||
468 | static int __init omap_vram_create_debugfs(void) | ||
469 | { | ||
470 | struct dentry *d; | ||
471 | |||
472 | d = debugfs_create_file("vram", S_IRUGO, NULL, | ||
473 | NULL, &vram_debug_fops); | ||
474 | if (IS_ERR(d)) | ||
475 | return PTR_ERR(d); | ||
476 | |||
477 | return 0; | ||
478 | } | ||
479 | #endif | ||
480 | |||
481 | static __init int omap_vram_init(void) | ||
482 | { | ||
483 | int i; | ||
484 | |||
485 | vram_initialized = 1; | ||
486 | |||
487 | for (i = 0; i < postponed_cnt; i++) | ||
488 | omap_vram_add_region(postponed_regions[i].paddr, | ||
489 | postponed_regions[i].size); | ||
490 | |||
491 | #ifdef CONFIG_DEBUG_FS | ||
492 | if (omap_vram_create_debugfs()) | ||
493 | pr_err("VRAM: Failed to create debugfs file\n"); | ||
494 | #endif | ||
495 | |||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | arch_initcall(omap_vram_init); | ||
500 | |||
501 | /* boottime vram alloc stuff */ | ||
502 | |||
503 | /* set from board file */ | ||
504 | static u32 omap_vram_sram_start __initdata; | ||
505 | static u32 omap_vram_sram_size __initdata; | ||
506 | |||
507 | /* set from board file */ | ||
508 | static u32 omap_vram_sdram_start __initdata; | ||
509 | static u32 omap_vram_sdram_size __initdata; | ||
510 | |||
511 | /* set from kernel cmdline */ | ||
512 | static u32 omap_vram_def_sdram_size __initdata; | ||
513 | static u32 omap_vram_def_sdram_start __initdata; | ||
514 | |||
515 | static int __init omap_vram_early_vram(char *p) | ||
516 | { | ||
517 | omap_vram_def_sdram_size = memparse(p, &p); | ||
518 | if (*p == ',') | ||
519 | omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16); | ||
520 | return 0; | ||
521 | } | ||
522 | early_param("vram", omap_vram_early_vram); | ||
523 | |||
524 | /* | ||
525 | * Called from map_io. We need to call to this early enough so that we | ||
526 | * can reserve the fixed SDRAM regions before VM could get hold of them. | ||
527 | */ | ||
528 | void __init omap_vram_reserve_sdram_memblock(void) | ||
529 | { | ||
530 | u32 paddr; | ||
531 | u32 size = 0; | ||
532 | |||
533 | /* cmdline arg overrides the board file definition */ | ||
534 | if (omap_vram_def_sdram_size) { | ||
535 | size = omap_vram_def_sdram_size; | ||
536 | paddr = omap_vram_def_sdram_start; | ||
537 | } | ||
538 | |||
539 | if (!size) { | ||
540 | size = omap_vram_sdram_size; | ||
541 | paddr = omap_vram_sdram_start; | ||
542 | } | ||
543 | |||
544 | #ifdef CONFIG_OMAP2_VRAM_SIZE | ||
545 | if (!size) { | ||
546 | size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024; | ||
547 | paddr = 0; | ||
548 | } | ||
549 | #endif | ||
550 | |||
551 | if (!size) | ||
552 | return; | ||
553 | |||
554 | size = ALIGN(size, SZ_2M); | ||
555 | |||
556 | if (paddr) { | ||
557 | if (paddr & ~PAGE_MASK) { | ||
558 | pr_err("VRAM start address 0x%08x not page aligned\n", | ||
559 | paddr); | ||
560 | return; | ||
561 | } | ||
562 | |||
563 | if (!memblock_is_region_memory(paddr, size)) { | ||
564 | pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n", | ||
565 | paddr, paddr + size - 1); | ||
566 | return; | ||
567 | } | ||
568 | |||
569 | if (memblock_is_region_reserved(paddr, size)) { | ||
570 | pr_err("FB: failed to reserve VRAM - busy\n"); | ||
571 | return; | ||
572 | } | ||
573 | |||
574 | if (memblock_reserve(paddr, size) < 0) { | ||
575 | pr_err("FB: failed to reserve VRAM - no memory\n"); | ||
576 | return; | ||
577 | } | ||
578 | } else { | ||
579 | paddr = memblock_alloc(size, SZ_2M); | ||
580 | } | ||
581 | |||
582 | memblock_free(paddr, size); | ||
583 | memblock_remove(paddr, size); | ||
584 | |||
585 | omap_vram_add_region(paddr, size); | ||
586 | |||
587 | pr_info("Reserving %u bytes SDRAM for VRAM\n", size); | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * Called at sram init time, before anything is pushed to the SRAM stack. | ||
592 | * Because of the stack scheme, we will allocate everything from the | ||
593 | * start of the lowest address region to the end of SRAM. This will also | ||
594 | * include padding for page alignment and possible holes between regions. | ||
595 | * | ||
596 | * As opposed to the SDRAM case, we'll also do any dynamic allocations at | ||
597 | * this point, since the driver built as a module would have problem with | ||
598 | * freeing / reallocating the regions. | ||
599 | */ | ||
600 | unsigned long __init omap_vram_reserve_sram(unsigned long sram_pstart, | ||
601 | unsigned long sram_vstart, | ||
602 | unsigned long sram_size, | ||
603 | unsigned long pstart_avail, | ||
604 | unsigned long size_avail) | ||
605 | { | ||
606 | unsigned long pend_avail; | ||
607 | unsigned long reserved; | ||
608 | u32 paddr; | ||
609 | u32 size; | ||
610 | |||
611 | paddr = omap_vram_sram_start; | ||
612 | size = omap_vram_sram_size; | ||
613 | |||
614 | if (!size) | ||
615 | return 0; | ||
616 | |||
617 | reserved = 0; | ||
618 | pend_avail = pstart_avail + size_avail; | ||
619 | |||
620 | if (!paddr) { | ||
621 | /* Dynamic allocation */ | ||
622 | if ((size_avail & PAGE_MASK) < size) { | ||
623 | pr_err("Not enough SRAM for VRAM\n"); | ||
624 | return 0; | ||
625 | } | ||
626 | size_avail = (size_avail - size) & PAGE_MASK; | ||
627 | paddr = pstart_avail + size_avail; | ||
628 | } | ||
629 | |||
630 | if (paddr < sram_pstart || | ||
631 | paddr + size > sram_pstart + sram_size) { | ||
632 | pr_err("Illegal SRAM region for VRAM\n"); | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | /* Reserve everything above the start of the region. */ | ||
637 | if (pend_avail - paddr > reserved) | ||
638 | reserved = pend_avail - paddr; | ||
639 | size_avail = pend_avail - reserved - pstart_avail; | ||
640 | |||
641 | omap_vram_add_region(paddr, size); | ||
642 | |||
643 | if (reserved) | ||
644 | pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved); | ||
645 | |||
646 | return reserved; | ||
647 | } | ||
648 | |||
649 | void __init omap_vram_set_sdram_vram(u32 size, u32 start) | ||
650 | { | ||
651 | omap_vram_sdram_start = start; | ||
652 | omap_vram_sdram_size = size; | ||
653 | } | ||
654 | |||
655 | void __init omap_vram_set_sram_vram(u32 size, u32 start) | ||
656 | { | ||
657 | omap_vram_sram_start = start; | ||
658 | omap_vram_sram_size = size; | ||
659 | } | ||
diff --git a/drivers/video/pnx4008/Makefile b/drivers/video/pnx4008/Makefile new file mode 100644 index 00000000000..636aaccf01f --- /dev/null +++ b/drivers/video/pnx4008/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for the new PNX4008 framebuffer device driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_FB_PNX4008_DUM) += sdum.o | ||
6 | obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnxrgbfb.o | ||
7 | |||
diff --git a/drivers/video/pnx4008/dum.h b/drivers/video/pnx4008/dum.h new file mode 100644 index 00000000000..1234d4375d9 --- /dev/null +++ b/drivers/video/pnx4008/dum.h | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * linux/drivers/video/pnx4008/dum.h | ||
3 | * | ||
4 | * Internal header for SDUM | ||
5 | * | ||
6 | * 2005 (c) Koninklijke Philips N.V. This file is licensed under | ||
7 | * the terms of the GNU General Public License version 2. This program | ||
8 | * is licensed "as is" without any warranty of any kind, whether express | ||
9 | * or implied. | ||
10 | */ | ||
11 | |||
12 | #ifndef __PNX008_DUM_H__ | ||
13 | #define __PNX008_DUM_H__ | ||
14 | |||
15 | #include <mach/platform.h> | ||
16 | |||
17 | #define PNX4008_DUMCONF_VA_BASE IO_ADDRESS(PNX4008_DUMCONF_BASE) | ||
18 | #define PNX4008_DUM_MAIN_VA_BASE IO_ADDRESS(PNX4008_DUM_MAINCFG_BASE) | ||
19 | |||
20 | /* DUM CFG ADDRESSES */ | ||
21 | #define DUM_CH_BASE_ADR (PNX4008_DUMCONF_VA_BASE + 0x00) | ||
22 | #define DUM_CH_MIN_ADR (PNX4008_DUMCONF_VA_BASE + 0x00) | ||
23 | #define DUM_CH_MAX_ADR (PNX4008_DUMCONF_VA_BASE + 0x04) | ||
24 | #define DUM_CH_CONF_ADR (PNX4008_DUMCONF_VA_BASE + 0x08) | ||
25 | #define DUM_CH_STAT_ADR (PNX4008_DUMCONF_VA_BASE + 0x0C) | ||
26 | #define DUM_CH_CTRL_ADR (PNX4008_DUMCONF_VA_BASE + 0x10) | ||
27 | |||
28 | #define CH_MARG (0x100 / sizeof(u32)) | ||
29 | #define DUM_CH_MIN(i) (*((volatile u32 *)DUM_CH_MIN_ADR + (i) * CH_MARG)) | ||
30 | #define DUM_CH_MAX(i) (*((volatile u32 *)DUM_CH_MAX_ADR + (i) * CH_MARG)) | ||
31 | #define DUM_CH_CONF(i) (*((volatile u32 *)DUM_CH_CONF_ADR + (i) * CH_MARG)) | ||
32 | #define DUM_CH_STAT(i) (*((volatile u32 *)DUM_CH_STAT_ADR + (i) * CH_MARG)) | ||
33 | #define DUM_CH_CTRL(i) (*((volatile u32 *)DUM_CH_CTRL_ADR + (i) * CH_MARG)) | ||
34 | |||
35 | #define DUM_CONF_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x00) | ||
36 | #define DUM_CTRL_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x04) | ||
37 | #define DUM_STAT_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x08) | ||
38 | #define DUM_DECODE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x0C) | ||
39 | #define DUM_COM_BASE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x10) | ||
40 | #define DUM_SYNC_C_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x14) | ||
41 | #define DUM_CLK_DIV_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x18) | ||
42 | #define DUM_DIRTY_LOW_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x20) | ||
43 | #define DUM_DIRTY_HIGH_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x24) | ||
44 | #define DUM_FORMAT_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x28) | ||
45 | #define DUM_WTCFG1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x30) | ||
46 | #define DUM_RTCFG1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x34) | ||
47 | #define DUM_WTCFG2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x38) | ||
48 | #define DUM_RTCFG2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x3C) | ||
49 | #define DUM_TCFG_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x40) | ||
50 | #define DUM_OUTP_FORMAT1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x44) | ||
51 | #define DUM_OUTP_FORMAT2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x48) | ||
52 | #define DUM_SYNC_MODE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x4C) | ||
53 | #define DUM_SYNC_OUT_C_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x50) | ||
54 | |||
55 | #define DUM_CONF (*(volatile u32 *)(DUM_CONF_ADR)) | ||
56 | #define DUM_CTRL (*(volatile u32 *)(DUM_CTRL_ADR)) | ||
57 | #define DUM_STAT (*(volatile u32 *)(DUM_STAT_ADR)) | ||
58 | #define DUM_DECODE (*(volatile u32 *)(DUM_DECODE_ADR)) | ||
59 | #define DUM_COM_BASE (*(volatile u32 *)(DUM_COM_BASE_ADR)) | ||
60 | #define DUM_SYNC_C (*(volatile u32 *)(DUM_SYNC_C_ADR)) | ||
61 | #define DUM_CLK_DIV (*(volatile u32 *)(DUM_CLK_DIV_ADR)) | ||
62 | #define DUM_DIRTY_LOW (*(volatile u32 *)(DUM_DIRTY_LOW_ADR)) | ||
63 | #define DUM_DIRTY_HIGH (*(volatile u32 *)(DUM_DIRTY_HIGH_ADR)) | ||
64 | #define DUM_FORMAT (*(volatile u32 *)(DUM_FORMAT_ADR)) | ||
65 | #define DUM_WTCFG1 (*(volatile u32 *)(DUM_WTCFG1_ADR)) | ||
66 | #define DUM_RTCFG1 (*(volatile u32 *)(DUM_RTCFG1_ADR)) | ||
67 | #define DUM_WTCFG2 (*(volatile u32 *)(DUM_WTCFG2_ADR)) | ||
68 | #define DUM_RTCFG2 (*(volatile u32 *)(DUM_RTCFG2_ADR)) | ||
69 | #define DUM_TCFG (*(volatile u32 *)(DUM_TCFG_ADR)) | ||
70 | #define DUM_OUTP_FORMAT1 (*(volatile u32 *)(DUM_OUTP_FORMAT1_ADR)) | ||
71 | #define DUM_OUTP_FORMAT2 (*(volatile u32 *)(DUM_OUTP_FORMAT2_ADR)) | ||
72 | #define DUM_SYNC_MODE (*(volatile u32 *)(DUM_SYNC_MODE_ADR)) | ||
73 | #define DUM_SYNC_OUT_C (*(volatile u32 *)(DUM_SYNC_OUT_C_ADR)) | ||
74 | |||
75 | /* DUM SLAVE ADDRESSES */ | ||
76 | #define DUM_SLAVE_WRITE_ADR (PNX4008_DUM_MAINCFG_BASE + 0x0000000) | ||
77 | #define DUM_SLAVE_READ1_I_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000000) | ||
78 | #define DUM_SLAVE_READ1_R_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000004) | ||
79 | #define DUM_SLAVE_READ2_I_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000008) | ||
80 | #define DUM_SLAVE_READ2_R_ADR (PNX4008_DUM_MAINCFG_BASE + 0x100000C) | ||
81 | |||
82 | #define DUM_SLAVE_WRITE_W ((volatile u32 *)(DUM_SLAVE_WRITE_ADR)) | ||
83 | #define DUM_SLAVE_WRITE_HW ((volatile u16 *)(DUM_SLAVE_WRITE_ADR)) | ||
84 | #define DUM_SLAVE_READ1_I ((volatile u8 *)(DUM_SLAVE_READ1_I_ADR)) | ||
85 | #define DUM_SLAVE_READ1_R ((volatile u16 *)(DUM_SLAVE_READ1_R_ADR)) | ||
86 | #define DUM_SLAVE_READ2_I ((volatile u8 *)(DUM_SLAVE_READ2_I_ADR)) | ||
87 | #define DUM_SLAVE_READ2_R ((volatile u16 *)(DUM_SLAVE_READ2_R_ADR)) | ||
88 | |||
89 | /* Sony display register addresses */ | ||
90 | #define DISP_0_REG (0x00) | ||
91 | #define DISP_1_REG (0x01) | ||
92 | #define DISP_CAL_REG (0x20) | ||
93 | #define DISP_ID_REG (0x2A) | ||
94 | #define DISP_XMIN_L_REG (0x30) | ||
95 | #define DISP_XMIN_H_REG (0x31) | ||
96 | #define DISP_YMIN_REG (0x32) | ||
97 | #define DISP_XMAX_L_REG (0x34) | ||
98 | #define DISP_XMAX_H_REG (0x35) | ||
99 | #define DISP_YMAX_REG (0x36) | ||
100 | #define DISP_SYNC_EN_REG (0x38) | ||
101 | #define DISP_SYNC_RISE_L_REG (0x3C) | ||
102 | #define DISP_SYNC_RISE_H_REG (0x3D) | ||
103 | #define DISP_SYNC_FALL_L_REG (0x3E) | ||
104 | #define DISP_SYNC_FALL_H_REG (0x3F) | ||
105 | #define DISP_PIXEL_REG (0x0B) | ||
106 | #define DISP_DUMMY1_REG (0x28) | ||
107 | #define DISP_DUMMY2_REG (0x29) | ||
108 | #define DISP_TIMING_REG (0x98) | ||
109 | #define DISP_DUMP_REG (0x99) | ||
110 | |||
111 | /* Sony display constants */ | ||
112 | #define SONY_ID1 (0x22) | ||
113 | #define SONY_ID2 (0x23) | ||
114 | |||
115 | /* Philips display register addresses */ | ||
116 | #define PH_DISP_ORIENT_REG (0x003) | ||
117 | #define PH_DISP_YPOINT_REG (0x200) | ||
118 | #define PH_DISP_XPOINT_REG (0x201) | ||
119 | #define PH_DISP_PIXEL_REG (0x202) | ||
120 | #define PH_DISP_YMIN_REG (0x406) | ||
121 | #define PH_DISP_YMAX_REG (0x407) | ||
122 | #define PH_DISP_XMIN_REG (0x408) | ||
123 | #define PH_DISP_XMAX_REG (0x409) | ||
124 | |||
125 | /* Misc constants */ | ||
126 | #define NO_VALID_DISPLAY_FOUND (0) | ||
127 | #define DISPLAY2_IS_NOT_CONNECTED (0) | ||
128 | |||
129 | /* register values */ | ||
130 | #define V_BAC_ENABLE (BIT(0)) | ||
131 | #define V_BAC_DISABLE_IDLE (BIT(1)) | ||
132 | #define V_BAC_DISABLE_TRIG (BIT(2)) | ||
133 | #define V_DUM_RESET (BIT(3)) | ||
134 | #define V_MUX_RESET (BIT(4)) | ||
135 | #define BAC_ENABLED (BIT(0)) | ||
136 | #define BAC_DISABLED 0 | ||
137 | |||
138 | /* Sony LCD commands */ | ||
139 | #define V_LCD_STANDBY_OFF ((BIT(25)) | (0 << 16) | DISP_0_REG) | ||
140 | #define V_LCD_USE_9BIT_BUS ((BIT(25)) | (2 << 16) | DISP_1_REG) | ||
141 | #define V_LCD_SYNC_RISE_L ((BIT(25)) | (0 << 16) | DISP_SYNC_RISE_L_REG) | ||
142 | #define V_LCD_SYNC_RISE_H ((BIT(25)) | (0 << 16) | DISP_SYNC_RISE_H_REG) | ||
143 | #define V_LCD_SYNC_FALL_L ((BIT(25)) | (160 << 16) | DISP_SYNC_FALL_L_REG) | ||
144 | #define V_LCD_SYNC_FALL_H ((BIT(25)) | (0 << 16) | DISP_SYNC_FALL_H_REG) | ||
145 | #define V_LCD_SYNC_ENABLE ((BIT(25)) | (128 << 16) | DISP_SYNC_EN_REG) | ||
146 | #define V_LCD_DISPLAY_ON ((BIT(25)) | (64 << 16) | DISP_0_REG) | ||
147 | |||
148 | enum { | ||
149 | PAD_NONE, | ||
150 | PAD_512, | ||
151 | PAD_1024 | ||
152 | }; | ||
153 | |||
154 | enum { | ||
155 | RGB888, | ||
156 | RGB666, | ||
157 | RGB565, | ||
158 | BGR565, | ||
159 | ARGB1555, | ||
160 | ABGR1555, | ||
161 | ARGB4444, | ||
162 | ABGR4444 | ||
163 | }; | ||
164 | |||
165 | struct dum_setup { | ||
166 | int sync_neg_edge; | ||
167 | int round_robin; | ||
168 | int mux_int; | ||
169 | int synced_dirty_flag_int; | ||
170 | int dirty_flag_int; | ||
171 | int error_int; | ||
172 | int pf_empty_int; | ||
173 | int sf_empty_int; | ||
174 | int bac_dis_int; | ||
175 | u32 dirty_base_adr; | ||
176 | u32 command_base_adr; | ||
177 | u32 sync_clk_div; | ||
178 | int sync_output; | ||
179 | u32 sync_restart_val; | ||
180 | u32 set_sync_high; | ||
181 | u32 set_sync_low; | ||
182 | }; | ||
183 | |||
184 | struct dum_ch_setup { | ||
185 | int disp_no; | ||
186 | u32 xmin; | ||
187 | u32 ymin; | ||
188 | u32 xmax; | ||
189 | u32 ymax; | ||
190 | int xmirror; | ||
191 | int ymirror; | ||
192 | int rotate; | ||
193 | u32 minadr; | ||
194 | u32 maxadr; | ||
195 | u32 dirtybuffer; | ||
196 | int pad; | ||
197 | int format; | ||
198 | int hwdirty; | ||
199 | int slave_trans; | ||
200 | }; | ||
201 | |||
202 | struct disp_window { | ||
203 | u32 xmin_l; | ||
204 | u32 xmin_h; | ||
205 | u32 ymin; | ||
206 | u32 xmax_l; | ||
207 | u32 xmax_h; | ||
208 | u32 ymax; | ||
209 | }; | ||
210 | |||
211 | #endif /* #ifndef __PNX008_DUM_H__ */ | ||
diff --git a/drivers/video/pnx4008/fbcommon.h b/drivers/video/pnx4008/fbcommon.h new file mode 100644 index 00000000000..4ebc87dafaf --- /dev/null +++ b/drivers/video/pnx4008/fbcommon.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Philips Semiconductors | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; see the file COPYING. If not, write to | ||
16 | * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
17 | * Boston, MA 02111-1307, USA, or http://www.gnu.org/licenses/gpl.html | ||
18 | */ | ||
19 | |||
20 | #define QCIF_W (176) | ||
21 | #define QCIF_H (144) | ||
22 | |||
23 | #define CIF_W (352) | ||
24 | #define CIF_H (288) | ||
25 | |||
26 | #define LCD_X_RES 208 | ||
27 | #define LCD_Y_RES 320 | ||
28 | #define LCD_X_PAD 256 | ||
29 | #define LCD_BBP 4 /* Bytes Per Pixel */ | ||
30 | |||
31 | #define DISP_MAX_X_SIZE (320) | ||
32 | #define DISP_MAX_Y_SIZE (208) | ||
33 | |||
34 | #define RETURNVAL_BASE (0x400) | ||
35 | |||
36 | enum fb_ioctl_returntype { | ||
37 | ENORESOURCESLEFT = RETURNVAL_BASE, | ||
38 | ERESOURCESNOTFREED, | ||
39 | EPROCNOTOWNER, | ||
40 | EFBNOTOWNER, | ||
41 | ECOPYFAILED, | ||
42 | EIOREMAPFAILED, | ||
43 | }; | ||
diff --git a/drivers/video/pnx4008/pnxrgbfb.c b/drivers/video/pnx4008/pnxrgbfb.c new file mode 100644 index 00000000000..b2252fea285 --- /dev/null +++ b/drivers/video/pnx4008/pnxrgbfb.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * drivers/video/pnx4008/pnxrgbfb.c | ||
3 | * | ||
4 | * PNX4008's framebuffer support | ||
5 | * | ||
6 | * Author: Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com> | ||
7 | * Based on Philips Semiconductors's code | ||
8 | * | ||
9 | * Copyrght (c) 2005 MontaVista Software, Inc. | ||
10 | * Copyright (c) 2005 Philips Semiconductors | ||
11 | * This file is licensed under the terms of the GNU General Public License | ||
12 | * version 2. This program is licensed "as is" without any warranty of any | ||
13 | * kind, whether express or implied. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/vmalloc.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/fb.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | |||
28 | #include "sdum.h" | ||
29 | #include "fbcommon.h" | ||
30 | |||
31 | static u32 colreg[16]; | ||
32 | |||
33 | static struct fb_var_screeninfo rgbfb_var __initdata = { | ||
34 | .xres = LCD_X_RES, | ||
35 | .yres = LCD_Y_RES, | ||
36 | .xres_virtual = LCD_X_RES, | ||
37 | .yres_virtual = LCD_Y_RES, | ||
38 | .bits_per_pixel = 32, | ||
39 | .red.offset = 16, | ||
40 | .red.length = 8, | ||
41 | .green.offset = 8, | ||
42 | .green.length = 8, | ||
43 | .blue.offset = 0, | ||
44 | .blue.length = 8, | ||
45 | .left_margin = 0, | ||
46 | .right_margin = 0, | ||
47 | .upper_margin = 0, | ||
48 | .lower_margin = 0, | ||
49 | .vmode = FB_VMODE_NONINTERLACED, | ||
50 | }; | ||
51 | static struct fb_fix_screeninfo rgbfb_fix __initdata = { | ||
52 | .id = "RGBFB", | ||
53 | .line_length = LCD_X_RES * LCD_BBP, | ||
54 | .type = FB_TYPE_PACKED_PIXELS, | ||
55 | .visual = FB_VISUAL_TRUECOLOR, | ||
56 | .xpanstep = 0, | ||
57 | .ypanstep = 0, | ||
58 | .ywrapstep = 0, | ||
59 | .accel = FB_ACCEL_NONE, | ||
60 | }; | ||
61 | |||
62 | static int channel_owned; | ||
63 | |||
64 | static int no_cursor(struct fb_info *info, struct fb_cursor *cursor) | ||
65 | { | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int rgbfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, | ||
70 | u_int transp, struct fb_info *info) | ||
71 | { | ||
72 | if (regno > 15) | ||
73 | return 1; | ||
74 | |||
75 | colreg[regno] = ((red & 0xff00) << 8) | (green & 0xff00) | | ||
76 | ((blue & 0xff00) >> 8); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int rgbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) | ||
81 | { | ||
82 | return pnx4008_sdum_mmap(info, vma, NULL); | ||
83 | } | ||
84 | |||
85 | static struct fb_ops rgbfb_ops = { | ||
86 | .fb_mmap = rgbfb_mmap, | ||
87 | .fb_setcolreg = rgbfb_setcolreg, | ||
88 | .fb_fillrect = cfb_fillrect, | ||
89 | .fb_copyarea = cfb_copyarea, | ||
90 | .fb_imageblit = cfb_imageblit, | ||
91 | }; | ||
92 | |||
93 | static int rgbfb_remove(struct platform_device *pdev) | ||
94 | { | ||
95 | struct fb_info *info = platform_get_drvdata(pdev); | ||
96 | |||
97 | if (info) { | ||
98 | unregister_framebuffer(info); | ||
99 | fb_dealloc_cmap(&info->cmap); | ||
100 | framebuffer_release(info); | ||
101 | platform_set_drvdata(pdev, NULL); | ||
102 | } | ||
103 | |||
104 | pnx4008_free_dum_channel(channel_owned, pdev->id); | ||
105 | pnx4008_set_dum_exit_notification(pdev->id); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int __devinit rgbfb_probe(struct platform_device *pdev) | ||
111 | { | ||
112 | struct fb_info *info; | ||
113 | struct dumchannel_uf chan_uf; | ||
114 | int ret; | ||
115 | char *option; | ||
116 | |||
117 | info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev); | ||
118 | if (!info) { | ||
119 | ret = -ENOMEM; | ||
120 | goto err; | ||
121 | } | ||
122 | |||
123 | pnx4008_get_fb_addresses(FB_TYPE_RGB, (void **)&info->screen_base, | ||
124 | (dma_addr_t *) &rgbfb_fix.smem_start, | ||
125 | &rgbfb_fix.smem_len); | ||
126 | |||
127 | if ((ret = pnx4008_alloc_dum_channel(pdev->id)) < 0) | ||
128 | goto err0; | ||
129 | else { | ||
130 | channel_owned = ret; | ||
131 | chan_uf.channelnr = channel_owned; | ||
132 | chan_uf.dirty = (u32 *) NULL; | ||
133 | chan_uf.source = (u32 *) rgbfb_fix.smem_start; | ||
134 | chan_uf.x_offset = 0; | ||
135 | chan_uf.y_offset = 0; | ||
136 | chan_uf.width = LCD_X_RES; | ||
137 | chan_uf.height = LCD_Y_RES; | ||
138 | |||
139 | if ((ret = pnx4008_put_dum_channel_uf(chan_uf, pdev->id))< 0) | ||
140 | goto err1; | ||
141 | |||
142 | if ((ret = | ||
143 | pnx4008_set_dum_channel_sync(channel_owned, CONF_SYNC_ON, | ||
144 | pdev->id)) < 0) | ||
145 | goto err1; | ||
146 | |||
147 | if ((ret = | ||
148 | pnx4008_set_dum_channel_dirty_detect(channel_owned, | ||
149 | CONF_DIRTYDETECTION_ON, | ||
150 | pdev->id)) < 0) | ||
151 | goto err1; | ||
152 | } | ||
153 | |||
154 | if (!fb_get_options("pnxrgbfb", &option) && option && | ||
155 | !strcmp(option, "nocursor")) | ||
156 | rgbfb_ops.fb_cursor = no_cursor; | ||
157 | |||
158 | info->node = -1; | ||
159 | info->flags = FBINFO_FLAG_DEFAULT; | ||
160 | info->fbops = &rgbfb_ops; | ||
161 | info->fix = rgbfb_fix; | ||
162 | info->var = rgbfb_var; | ||
163 | info->screen_size = rgbfb_fix.smem_len; | ||
164 | info->pseudo_palette = info->par; | ||
165 | info->par = NULL; | ||
166 | |||
167 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
168 | if (ret < 0) | ||
169 | goto err1; | ||
170 | |||
171 | ret = register_framebuffer(info); | ||
172 | if (ret < 0) | ||
173 | goto err2; | ||
174 | platform_set_drvdata(pdev, info); | ||
175 | |||
176 | return 0; | ||
177 | |||
178 | err2: | ||
179 | fb_dealloc_cmap(&info->cmap); | ||
180 | err1: | ||
181 | pnx4008_free_dum_channel(channel_owned, pdev->id); | ||
182 | err0: | ||
183 | framebuffer_release(info); | ||
184 | err: | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static struct platform_driver rgbfb_driver = { | ||
189 | .driver = { | ||
190 | .name = "pnx4008-rgbfb", | ||
191 | }, | ||
192 | .probe = rgbfb_probe, | ||
193 | .remove = rgbfb_remove, | ||
194 | }; | ||
195 | |||
196 | static int __init rgbfb_init(void) | ||
197 | { | ||
198 | return platform_driver_register(&rgbfb_driver); | ||
199 | } | ||
200 | |||
201 | static void __exit rgbfb_exit(void) | ||
202 | { | ||
203 | platform_driver_unregister(&rgbfb_driver); | ||
204 | } | ||
205 | |||
206 | module_init(rgbfb_init); | ||
207 | module_exit(rgbfb_exit); | ||
208 | |||
209 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c new file mode 100644 index 00000000000..5ec4f2d439c --- /dev/null +++ b/drivers/video/pnx4008/sdum.c | |||
@@ -0,0 +1,872 @@ | |||
1 | /* | ||
2 | * drivers/video/pnx4008/sdum.c | ||
3 | * | ||
4 | * Display Update Master support | ||
5 | * | ||
6 | * Authors: Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com> | ||
7 | * Vitaly Wool <vitalywool@gmail.com> | ||
8 | * Based on Philips Semiconductors's code | ||
9 | * | ||
10 | * Copyrght (c) 2005-2006 MontaVista Software, Inc. | ||
11 | * Copyright (c) 2005 Philips Semiconductors | ||
12 | * This file is licensed under the terms of the GNU General Public License | ||
13 | * version 2. This program is licensed "as is" without any warranty of any | ||
14 | * kind, whether express or implied. | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/tty.h> | ||
23 | #include <linux/vmalloc.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/fb.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <linux/gfp.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include <mach/gpio.h> | ||
34 | |||
35 | #include "sdum.h" | ||
36 | #include "fbcommon.h" | ||
37 | #include "dum.h" | ||
38 | |||
39 | /* Framebuffers we have */ | ||
40 | |||
41 | static struct pnx4008_fb_addr { | ||
42 | int fb_type; | ||
43 | long addr_offset; | ||
44 | long fb_length; | ||
45 | } fb_addr[] = { | ||
46 | [0] = { | ||
47 | FB_TYPE_YUV, 0, 0xB0000 | ||
48 | }, | ||
49 | [1] = { | ||
50 | FB_TYPE_RGB, 0xB0000, 0x50000 | ||
51 | }, | ||
52 | }; | ||
53 | |||
54 | static struct dum_data { | ||
55 | u32 lcd_phys_start; | ||
56 | u32 lcd_virt_start; | ||
57 | u32 slave_phys_base; | ||
58 | u32 *slave_virt_base; | ||
59 | int fb_owning_channel[MAX_DUM_CHANNELS]; | ||
60 | struct dumchannel_uf chan_uf_store[MAX_DUM_CHANNELS]; | ||
61 | } dum_data; | ||
62 | |||
63 | /* Different local helper functions */ | ||
64 | |||
65 | static u32 nof_pixels_dx(struct dum_ch_setup *ch_setup) | ||
66 | { | ||
67 | return (ch_setup->xmax - ch_setup->xmin + 1); | ||
68 | } | ||
69 | |||
70 | static u32 nof_pixels_dy(struct dum_ch_setup *ch_setup) | ||
71 | { | ||
72 | return (ch_setup->ymax - ch_setup->ymin + 1); | ||
73 | } | ||
74 | |||
75 | static u32 nof_pixels_dxy(struct dum_ch_setup *ch_setup) | ||
76 | { | ||
77 | return (nof_pixels_dx(ch_setup) * nof_pixels_dy(ch_setup)); | ||
78 | } | ||
79 | |||
80 | static u32 nof_bytes(struct dum_ch_setup *ch_setup) | ||
81 | { | ||
82 | u32 r = nof_pixels_dxy(ch_setup); | ||
83 | switch (ch_setup->format) { | ||
84 | case RGB888: | ||
85 | case RGB666: | ||
86 | r *= 4; | ||
87 | break; | ||
88 | |||
89 | default: | ||
90 | r *= 2; | ||
91 | break; | ||
92 | } | ||
93 | return r; | ||
94 | } | ||
95 | |||
96 | static u32 build_command(int disp_no, u32 reg, u32 val) | ||
97 | { | ||
98 | return ((disp_no << 26) | BIT(25) | (val << 16) | (disp_no << 10) | | ||
99 | (reg << 0)); | ||
100 | } | ||
101 | |||
102 | static u32 build_double_index(int disp_no, u32 val) | ||
103 | { | ||
104 | return ((disp_no << 26) | (val << 16) | (disp_no << 10) | (val << 0)); | ||
105 | } | ||
106 | |||
107 | static void build_disp_window(struct dum_ch_setup * ch_setup, struct disp_window * dw) | ||
108 | { | ||
109 | dw->ymin = ch_setup->ymin; | ||
110 | dw->ymax = ch_setup->ymax; | ||
111 | dw->xmin_l = ch_setup->xmin & 0xFF; | ||
112 | dw->xmin_h = (ch_setup->xmin & BIT(8)) >> 8; | ||
113 | dw->xmax_l = ch_setup->xmax & 0xFF; | ||
114 | dw->xmax_h = (ch_setup->xmax & BIT(8)) >> 8; | ||
115 | } | ||
116 | |||
117 | static int put_channel(struct dumchannel chan) | ||
118 | { | ||
119 | int i = chan.channelnr; | ||
120 | |||
121 | if (i < 0 || i > MAX_DUM_CHANNELS) | ||
122 | return -EINVAL; | ||
123 | else { | ||
124 | DUM_CH_MIN(i) = chan.dum_ch_min; | ||
125 | DUM_CH_MAX(i) = chan.dum_ch_max; | ||
126 | DUM_CH_CONF(i) = chan.dum_ch_conf; | ||
127 | DUM_CH_CTRL(i) = chan.dum_ch_ctrl; | ||
128 | } | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static void clear_channel(int channr) | ||
134 | { | ||
135 | struct dumchannel chan; | ||
136 | |||
137 | chan.channelnr = channr; | ||
138 | chan.dum_ch_min = 0; | ||
139 | chan.dum_ch_max = 0; | ||
140 | chan.dum_ch_conf = 0; | ||
141 | chan.dum_ch_ctrl = 0; | ||
142 | |||
143 | put_channel(chan); | ||
144 | } | ||
145 | |||
146 | static int put_cmd_string(struct cmdstring cmds) | ||
147 | { | ||
148 | u16 *cmd_str_virtaddr; | ||
149 | u32 *cmd_ptr0_virtaddr; | ||
150 | u32 cmd_str_physaddr; | ||
151 | |||
152 | int i = cmds.channelnr; | ||
153 | |||
154 | if (i < 0 || i > MAX_DUM_CHANNELS) | ||
155 | return -EINVAL; | ||
156 | else if ((cmd_ptr0_virtaddr = | ||
157 | (int *)ioremap_nocache(DUM_COM_BASE, | ||
158 | sizeof(int) * MAX_DUM_CHANNELS)) == | ||
159 | NULL) | ||
160 | return -EIOREMAPFAILED; | ||
161 | else { | ||
162 | cmd_str_physaddr = ioread32(&cmd_ptr0_virtaddr[cmds.channelnr]); | ||
163 | if ((cmd_str_virtaddr = | ||
164 | (u16 *) ioremap_nocache(cmd_str_physaddr, | ||
165 | sizeof(cmds))) == NULL) { | ||
166 | iounmap(cmd_ptr0_virtaddr); | ||
167 | return -EIOREMAPFAILED; | ||
168 | } else { | ||
169 | int t; | ||
170 | for (t = 0; t < 8; t++) | ||
171 | iowrite16(*((u16 *)&cmds.prestringlen + t), | ||
172 | cmd_str_virtaddr + t); | ||
173 | |||
174 | for (t = 0; t < cmds.prestringlen / 2; t++) | ||
175 | iowrite16(*((u16 *)&cmds.precmd + t), | ||
176 | cmd_str_virtaddr + t + 8); | ||
177 | |||
178 | for (t = 0; t < cmds.poststringlen / 2; t++) | ||
179 | iowrite16(*((u16 *)&cmds.postcmd + t), | ||
180 | cmd_str_virtaddr + t + 8 + | ||
181 | cmds.prestringlen / 2); | ||
182 | |||
183 | iounmap(cmd_ptr0_virtaddr); | ||
184 | iounmap(cmd_str_virtaddr); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static u32 dum_ch_setup(int ch_no, struct dum_ch_setup * ch_setup) | ||
192 | { | ||
193 | struct cmdstring cmds_c; | ||
194 | struct cmdstring *cmds = &cmds_c; | ||
195 | struct disp_window dw; | ||
196 | int standard; | ||
197 | u32 orientation = 0; | ||
198 | struct dumchannel chan = { 0 }; | ||
199 | int ret; | ||
200 | |||
201 | if ((ch_setup->xmirror) || (ch_setup->ymirror) || (ch_setup->rotate)) { | ||
202 | standard = 0; | ||
203 | |||
204 | orientation = BIT(1); /* always set 9-bit-bus */ | ||
205 | if (ch_setup->xmirror) | ||
206 | orientation |= BIT(4); | ||
207 | if (ch_setup->ymirror) | ||
208 | orientation |= BIT(3); | ||
209 | if (ch_setup->rotate) | ||
210 | orientation |= BIT(0); | ||
211 | } else | ||
212 | standard = 1; | ||
213 | |||
214 | cmds->channelnr = ch_no; | ||
215 | |||
216 | /* build command string header */ | ||
217 | if (standard) { | ||
218 | cmds->prestringlen = 32; | ||
219 | cmds->poststringlen = 0; | ||
220 | } else { | ||
221 | cmds->prestringlen = 48; | ||
222 | cmds->poststringlen = 16; | ||
223 | } | ||
224 | |||
225 | cmds->format = | ||
226 | (u16) ((ch_setup->disp_no << 4) | (BIT(3)) | (ch_setup->format)); | ||
227 | cmds->reserved = 0x0; | ||
228 | cmds->startaddr_low = (ch_setup->minadr & 0xFFFF); | ||
229 | cmds->startaddr_high = (ch_setup->minadr >> 16); | ||
230 | |||
231 | if ((ch_setup->minadr == 0) && (ch_setup->maxadr == 0) | ||
232 | && (ch_setup->xmin == 0) | ||
233 | && (ch_setup->ymin == 0) && (ch_setup->xmax == 0) | ||
234 | && (ch_setup->ymax == 0)) { | ||
235 | cmds->pixdatlen_low = 0; | ||
236 | cmds->pixdatlen_high = 0; | ||
237 | } else { | ||
238 | u32 nbytes = nof_bytes(ch_setup); | ||
239 | cmds->pixdatlen_low = (nbytes & 0xFFFF); | ||
240 | cmds->pixdatlen_high = (nbytes >> 16); | ||
241 | } | ||
242 | |||
243 | if (ch_setup->slave_trans) | ||
244 | cmds->pixdatlen_high |= BIT(15); | ||
245 | |||
246 | /* build pre-string */ | ||
247 | build_disp_window(ch_setup, &dw); | ||
248 | |||
249 | if (standard) { | ||
250 | cmds->precmd[0] = | ||
251 | build_command(ch_setup->disp_no, DISP_XMIN_L_REG, 0x99); | ||
252 | cmds->precmd[1] = | ||
253 | build_command(ch_setup->disp_no, DISP_XMIN_L_REG, | ||
254 | dw.xmin_l); | ||
255 | cmds->precmd[2] = | ||
256 | build_command(ch_setup->disp_no, DISP_XMIN_H_REG, | ||
257 | dw.xmin_h); | ||
258 | cmds->precmd[3] = | ||
259 | build_command(ch_setup->disp_no, DISP_YMIN_REG, dw.ymin); | ||
260 | cmds->precmd[4] = | ||
261 | build_command(ch_setup->disp_no, DISP_XMAX_L_REG, | ||
262 | dw.xmax_l); | ||
263 | cmds->precmd[5] = | ||
264 | build_command(ch_setup->disp_no, DISP_XMAX_H_REG, | ||
265 | dw.xmax_h); | ||
266 | cmds->precmd[6] = | ||
267 | build_command(ch_setup->disp_no, DISP_YMAX_REG, dw.ymax); | ||
268 | cmds->precmd[7] = | ||
269 | build_double_index(ch_setup->disp_no, DISP_PIXEL_REG); | ||
270 | } else { | ||
271 | if (dw.xmin_l == ch_no) | ||
272 | cmds->precmd[0] = | ||
273 | build_command(ch_setup->disp_no, DISP_XMIN_L_REG, | ||
274 | 0x99); | ||
275 | else | ||
276 | cmds->precmd[0] = | ||
277 | build_command(ch_setup->disp_no, DISP_XMIN_L_REG, | ||
278 | ch_no); | ||
279 | |||
280 | cmds->precmd[1] = | ||
281 | build_command(ch_setup->disp_no, DISP_XMIN_L_REG, | ||
282 | dw.xmin_l); | ||
283 | cmds->precmd[2] = | ||
284 | build_command(ch_setup->disp_no, DISP_XMIN_H_REG, | ||
285 | dw.xmin_h); | ||
286 | cmds->precmd[3] = | ||
287 | build_command(ch_setup->disp_no, DISP_YMIN_REG, dw.ymin); | ||
288 | cmds->precmd[4] = | ||
289 | build_command(ch_setup->disp_no, DISP_XMAX_L_REG, | ||
290 | dw.xmax_l); | ||
291 | cmds->precmd[5] = | ||
292 | build_command(ch_setup->disp_no, DISP_XMAX_H_REG, | ||
293 | dw.xmax_h); | ||
294 | cmds->precmd[6] = | ||
295 | build_command(ch_setup->disp_no, DISP_YMAX_REG, dw.ymax); | ||
296 | cmds->precmd[7] = | ||
297 | build_command(ch_setup->disp_no, DISP_1_REG, orientation); | ||
298 | cmds->precmd[8] = | ||
299 | build_double_index(ch_setup->disp_no, DISP_PIXEL_REG); | ||
300 | cmds->precmd[9] = | ||
301 | build_double_index(ch_setup->disp_no, DISP_PIXEL_REG); | ||
302 | cmds->precmd[0xA] = | ||
303 | build_double_index(ch_setup->disp_no, DISP_PIXEL_REG); | ||
304 | cmds->precmd[0xB] = | ||
305 | build_double_index(ch_setup->disp_no, DISP_PIXEL_REG); | ||
306 | cmds->postcmd[0] = | ||
307 | build_command(ch_setup->disp_no, DISP_1_REG, BIT(1)); | ||
308 | cmds->postcmd[1] = | ||
309 | build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 1); | ||
310 | cmds->postcmd[2] = | ||
311 | build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 2); | ||
312 | cmds->postcmd[3] = | ||
313 | build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 3); | ||
314 | } | ||
315 | |||
316 | if ((ret = put_cmd_string(cmds_c)) != 0) { | ||
317 | return ret; | ||
318 | } | ||
319 | |||
320 | chan.channelnr = cmds->channelnr; | ||
321 | chan.dum_ch_min = ch_setup->dirtybuffer + ch_setup->minadr; | ||
322 | chan.dum_ch_max = ch_setup->dirtybuffer + ch_setup->maxadr; | ||
323 | chan.dum_ch_conf = 0x002; | ||
324 | chan.dum_ch_ctrl = 0x04; | ||
325 | |||
326 | put_channel(chan); | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static u32 display_open(int ch_no, int auto_update, u32 * dirty_buffer, | ||
332 | u32 * frame_buffer, u32 xpos, u32 ypos, u32 w, u32 h) | ||
333 | { | ||
334 | |||
335 | struct dum_ch_setup k; | ||
336 | int ret; | ||
337 | |||
338 | /* keep width & height within display area */ | ||
339 | if ((xpos + w) > DISP_MAX_X_SIZE) | ||
340 | w = DISP_MAX_X_SIZE - xpos; | ||
341 | |||
342 | if ((ypos + h) > DISP_MAX_Y_SIZE) | ||
343 | h = DISP_MAX_Y_SIZE - ypos; | ||
344 | |||
345 | /* assume 1 display only */ | ||
346 | k.disp_no = 0; | ||
347 | k.xmin = xpos; | ||
348 | k.ymin = ypos; | ||
349 | k.xmax = xpos + (w - 1); | ||
350 | k.ymax = ypos + (h - 1); | ||
351 | |||
352 | /* adjust min and max values if necessary */ | ||
353 | if (k.xmin > DISP_MAX_X_SIZE - 1) | ||
354 | k.xmin = DISP_MAX_X_SIZE - 1; | ||
355 | if (k.ymin > DISP_MAX_Y_SIZE - 1) | ||
356 | k.ymin = DISP_MAX_Y_SIZE - 1; | ||
357 | |||
358 | if (k.xmax > DISP_MAX_X_SIZE - 1) | ||
359 | k.xmax = DISP_MAX_X_SIZE - 1; | ||
360 | if (k.ymax > DISP_MAX_Y_SIZE - 1) | ||
361 | k.ymax = DISP_MAX_Y_SIZE - 1; | ||
362 | |||
363 | k.xmirror = 0; | ||
364 | k.ymirror = 0; | ||
365 | k.rotate = 0; | ||
366 | k.minadr = (u32) frame_buffer; | ||
367 | k.maxadr = (u32) frame_buffer + (((w - 1) << 10) | ((h << 2) - 2)); | ||
368 | k.pad = PAD_1024; | ||
369 | k.dirtybuffer = (u32) dirty_buffer; | ||
370 | k.format = RGB888; | ||
371 | k.hwdirty = 0; | ||
372 | k.slave_trans = 0; | ||
373 | |||
374 | ret = dum_ch_setup(ch_no, &k); | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | static void lcd_reset(void) | ||
380 | { | ||
381 | u32 *dum_pio_base = (u32 *)IO_ADDRESS(PNX4008_PIO_BASE); | ||
382 | |||
383 | udelay(1); | ||
384 | iowrite32(BIT(19), &dum_pio_base[2]); | ||
385 | udelay(1); | ||
386 | iowrite32(BIT(19), &dum_pio_base[1]); | ||
387 | udelay(1); | ||
388 | } | ||
389 | |||
390 | static int dum_init(struct platform_device *pdev) | ||
391 | { | ||
392 | struct clk *clk; | ||
393 | |||
394 | /* enable DUM clock */ | ||
395 | clk = clk_get(&pdev->dev, "dum_ck"); | ||
396 | if (IS_ERR(clk)) { | ||
397 | printk(KERN_ERR "pnx4008_dum: Unable to access DUM clock\n"); | ||
398 | return PTR_ERR(clk); | ||
399 | } | ||
400 | |||
401 | clk_set_rate(clk, 1); | ||
402 | clk_put(clk); | ||
403 | |||
404 | DUM_CTRL = V_DUM_RESET; | ||
405 | |||
406 | /* set priority to "round-robin". All other params to "false" */ | ||
407 | DUM_CONF = BIT(9); | ||
408 | |||
409 | /* Display 1 */ | ||
410 | DUM_WTCFG1 = PNX4008_DUM_WT_CFG; | ||
411 | DUM_RTCFG1 = PNX4008_DUM_RT_CFG; | ||
412 | DUM_TCFG = PNX4008_DUM_T_CFG; | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | static void dum_chan_init(void) | ||
418 | { | ||
419 | int i = 0, ch = 0; | ||
420 | u32 *cmdptrs; | ||
421 | u32 *cmdstrings; | ||
422 | |||
423 | DUM_COM_BASE = | ||
424 | CMDSTRING_BASEADDR + BYTES_PER_CMDSTRING * NR_OF_CMDSTRINGS; | ||
425 | |||
426 | if ((cmdptrs = | ||
427 | (u32 *) ioremap_nocache(DUM_COM_BASE, | ||
428 | sizeof(u32) * NR_OF_CMDSTRINGS)) == NULL) | ||
429 | return; | ||
430 | |||
431 | for (ch = 0; ch < NR_OF_CMDSTRINGS; ch++) | ||
432 | iowrite32(CMDSTRING_BASEADDR + BYTES_PER_CMDSTRING * ch, | ||
433 | cmdptrs + ch); | ||
434 | |||
435 | for (ch = 0; ch < MAX_DUM_CHANNELS; ch++) | ||
436 | clear_channel(ch); | ||
437 | |||
438 | /* Clear the cmdstrings */ | ||
439 | cmdstrings = | ||
440 | (u32 *)ioremap_nocache(*cmdptrs, | ||
441 | BYTES_PER_CMDSTRING * NR_OF_CMDSTRINGS); | ||
442 | |||
443 | if (!cmdstrings) | ||
444 | goto out; | ||
445 | |||
446 | for (i = 0; i < NR_OF_CMDSTRINGS * BYTES_PER_CMDSTRING / sizeof(u32); | ||
447 | i++) | ||
448 | iowrite32(0, cmdstrings + i); | ||
449 | |||
450 | iounmap((u32 *)cmdstrings); | ||
451 | |||
452 | out: | ||
453 | iounmap((u32 *)cmdptrs); | ||
454 | } | ||
455 | |||
456 | static void lcd_init(void) | ||
457 | { | ||
458 | lcd_reset(); | ||
459 | |||
460 | DUM_OUTP_FORMAT1 = 0; /* RGB666 */ | ||
461 | |||
462 | udelay(1); | ||
463 | iowrite32(V_LCD_STANDBY_OFF, dum_data.slave_virt_base); | ||
464 | udelay(1); | ||
465 | iowrite32(V_LCD_USE_9BIT_BUS, dum_data.slave_virt_base); | ||
466 | udelay(1); | ||
467 | iowrite32(V_LCD_SYNC_RISE_L, dum_data.slave_virt_base); | ||
468 | udelay(1); | ||
469 | iowrite32(V_LCD_SYNC_RISE_H, dum_data.slave_virt_base); | ||
470 | udelay(1); | ||
471 | iowrite32(V_LCD_SYNC_FALL_L, dum_data.slave_virt_base); | ||
472 | udelay(1); | ||
473 | iowrite32(V_LCD_SYNC_FALL_H, dum_data.slave_virt_base); | ||
474 | udelay(1); | ||
475 | iowrite32(V_LCD_SYNC_ENABLE, dum_data.slave_virt_base); | ||
476 | udelay(1); | ||
477 | iowrite32(V_LCD_DISPLAY_ON, dum_data.slave_virt_base); | ||
478 | udelay(1); | ||
479 | } | ||
480 | |||
481 | /* Interface exported to framebuffer drivers */ | ||
482 | |||
483 | int pnx4008_get_fb_addresses(int fb_type, void **virt_addr, | ||
484 | dma_addr_t *phys_addr, int *fb_length) | ||
485 | { | ||
486 | int i; | ||
487 | int ret = -1; | ||
488 | for (i = 0; i < ARRAY_SIZE(fb_addr); i++) | ||
489 | if (fb_addr[i].fb_type == fb_type) { | ||
490 | *virt_addr = (void *)(dum_data.lcd_virt_start + | ||
491 | fb_addr[i].addr_offset); | ||
492 | *phys_addr = | ||
493 | dum_data.lcd_phys_start + fb_addr[i].addr_offset; | ||
494 | *fb_length = fb_addr[i].fb_length; | ||
495 | ret = 0; | ||
496 | break; | ||
497 | } | ||
498 | |||
499 | return ret; | ||
500 | } | ||
501 | |||
502 | EXPORT_SYMBOL(pnx4008_get_fb_addresses); | ||
503 | |||
504 | int pnx4008_alloc_dum_channel(int dev_id) | ||
505 | { | ||
506 | int i = 0; | ||
507 | |||
508 | while ((i < MAX_DUM_CHANNELS) && (dum_data.fb_owning_channel[i] != -1)) | ||
509 | i++; | ||
510 | |||
511 | if (i == MAX_DUM_CHANNELS) | ||
512 | return -ENORESOURCESLEFT; | ||
513 | else { | ||
514 | dum_data.fb_owning_channel[i] = dev_id; | ||
515 | return i; | ||
516 | } | ||
517 | } | ||
518 | |||
519 | EXPORT_SYMBOL(pnx4008_alloc_dum_channel); | ||
520 | |||
521 | int pnx4008_free_dum_channel(int channr, int dev_id) | ||
522 | { | ||
523 | if (channr < 0 || channr > MAX_DUM_CHANNELS) | ||
524 | return -EINVAL; | ||
525 | else if (dum_data.fb_owning_channel[channr] != dev_id) | ||
526 | return -EFBNOTOWNER; | ||
527 | else { | ||
528 | clear_channel(channr); | ||
529 | dum_data.fb_owning_channel[channr] = -1; | ||
530 | } | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | EXPORT_SYMBOL(pnx4008_free_dum_channel); | ||
536 | |||
537 | int pnx4008_put_dum_channel_uf(struct dumchannel_uf chan_uf, int dev_id) | ||
538 | { | ||
539 | int i = chan_uf.channelnr; | ||
540 | int ret; | ||
541 | |||
542 | if (i < 0 || i > MAX_DUM_CHANNELS) | ||
543 | return -EINVAL; | ||
544 | else if (dum_data.fb_owning_channel[i] != dev_id) | ||
545 | return -EFBNOTOWNER; | ||
546 | else if ((ret = | ||
547 | display_open(chan_uf.channelnr, 0, chan_uf.dirty, | ||
548 | chan_uf.source, chan_uf.y_offset, | ||
549 | chan_uf.x_offset, chan_uf.height, | ||
550 | chan_uf.width)) != 0) | ||
551 | return ret; | ||
552 | else { | ||
553 | dum_data.chan_uf_store[i].dirty = chan_uf.dirty; | ||
554 | dum_data.chan_uf_store[i].source = chan_uf.source; | ||
555 | dum_data.chan_uf_store[i].x_offset = chan_uf.x_offset; | ||
556 | dum_data.chan_uf_store[i].y_offset = chan_uf.y_offset; | ||
557 | dum_data.chan_uf_store[i].width = chan_uf.width; | ||
558 | dum_data.chan_uf_store[i].height = chan_uf.height; | ||
559 | } | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | EXPORT_SYMBOL(pnx4008_put_dum_channel_uf); | ||
565 | |||
566 | int pnx4008_set_dum_channel_sync(int channr, int val, int dev_id) | ||
567 | { | ||
568 | if (channr < 0 || channr > MAX_DUM_CHANNELS) | ||
569 | return -EINVAL; | ||
570 | else if (dum_data.fb_owning_channel[channr] != dev_id) | ||
571 | return -EFBNOTOWNER; | ||
572 | else { | ||
573 | if (val == CONF_SYNC_ON) { | ||
574 | DUM_CH_CONF(channr) |= CONF_SYNCENABLE; | ||
575 | DUM_CH_CONF(channr) |= DUM_CHANNEL_CFG_SYNC_MASK | | ||
576 | DUM_CHANNEL_CFG_SYNC_MASK_SET; | ||
577 | } else if (val == CONF_SYNC_OFF) | ||
578 | DUM_CH_CONF(channr) &= ~CONF_SYNCENABLE; | ||
579 | else | ||
580 | return -EINVAL; | ||
581 | } | ||
582 | |||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | EXPORT_SYMBOL(pnx4008_set_dum_channel_sync); | ||
587 | |||
588 | int pnx4008_set_dum_channel_dirty_detect(int channr, int val, int dev_id) | ||
589 | { | ||
590 | if (channr < 0 || channr > MAX_DUM_CHANNELS) | ||
591 | return -EINVAL; | ||
592 | else if (dum_data.fb_owning_channel[channr] != dev_id) | ||
593 | return -EFBNOTOWNER; | ||
594 | else { | ||
595 | if (val == CONF_DIRTYDETECTION_ON) | ||
596 | DUM_CH_CONF(channr) |= CONF_DIRTYENABLE; | ||
597 | else if (val == CONF_DIRTYDETECTION_OFF) | ||
598 | DUM_CH_CONF(channr) &= ~CONF_DIRTYENABLE; | ||
599 | else | ||
600 | return -EINVAL; | ||
601 | } | ||
602 | |||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | EXPORT_SYMBOL(pnx4008_set_dum_channel_dirty_detect); | ||
607 | |||
608 | #if 0 /* Functions not used currently, but likely to be used in future */ | ||
609 | |||
610 | static int get_channel(struct dumchannel *p_chan) | ||
611 | { | ||
612 | int i = p_chan->channelnr; | ||
613 | |||
614 | if (i < 0 || i > MAX_DUM_CHANNELS) | ||
615 | return -EINVAL; | ||
616 | else { | ||
617 | p_chan->dum_ch_min = DUM_CH_MIN(i); | ||
618 | p_chan->dum_ch_max = DUM_CH_MAX(i); | ||
619 | p_chan->dum_ch_conf = DUM_CH_CONF(i); | ||
620 | p_chan->dum_ch_stat = DUM_CH_STAT(i); | ||
621 | p_chan->dum_ch_ctrl = 0; /* WriteOnly control register */ | ||
622 | } | ||
623 | |||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | int pnx4008_get_dum_channel_uf(struct dumchannel_uf *p_chan_uf, int dev_id) | ||
628 | { | ||
629 | int i = p_chan_uf->channelnr; | ||
630 | |||
631 | if (i < 0 || i > MAX_DUM_CHANNELS) | ||
632 | return -EINVAL; | ||
633 | else if (dum_data.fb_owning_channel[i] != dev_id) | ||
634 | return -EFBNOTOWNER; | ||
635 | else { | ||
636 | p_chan_uf->dirty = dum_data.chan_uf_store[i].dirty; | ||
637 | p_chan_uf->source = dum_data.chan_uf_store[i].source; | ||
638 | p_chan_uf->x_offset = dum_data.chan_uf_store[i].x_offset; | ||
639 | p_chan_uf->y_offset = dum_data.chan_uf_store[i].y_offset; | ||
640 | p_chan_uf->width = dum_data.chan_uf_store[i].width; | ||
641 | p_chan_uf->height = dum_data.chan_uf_store[i].height; | ||
642 | } | ||
643 | |||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | EXPORT_SYMBOL(pnx4008_get_dum_channel_uf); | ||
648 | |||
649 | int pnx4008_get_dum_channel_config(int channr, int dev_id) | ||
650 | { | ||
651 | int ret; | ||
652 | struct dumchannel chan; | ||
653 | |||
654 | if (channr < 0 || channr > MAX_DUM_CHANNELS) | ||
655 | return -EINVAL; | ||
656 | else if (dum_data.fb_owning_channel[channr] != dev_id) | ||
657 | return -EFBNOTOWNER; | ||
658 | else { | ||
659 | chan.channelnr = channr; | ||
660 | if ((ret = get_channel(&chan)) != 0) | ||
661 | return ret; | ||
662 | } | ||
663 | |||
664 | return (chan.dum_ch_conf & DUM_CHANNEL_CFG_MASK); | ||
665 | } | ||
666 | |||
667 | EXPORT_SYMBOL(pnx4008_get_dum_channel_config); | ||
668 | |||
669 | int pnx4008_force_update_dum_channel(int channr, int dev_id) | ||
670 | { | ||
671 | if (channr < 0 || channr > MAX_DUM_CHANNELS) | ||
672 | return -EINVAL; | ||
673 | |||
674 | else if (dum_data.fb_owning_channel[channr] != dev_id) | ||
675 | return -EFBNOTOWNER; | ||
676 | else | ||
677 | DUM_CH_CTRL(channr) = CTRL_SETDIRTY; | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | EXPORT_SYMBOL(pnx4008_force_update_dum_channel); | ||
683 | |||
684 | #endif | ||
685 | |||
686 | int pnx4008_sdum_mmap(struct fb_info *info, struct vm_area_struct *vma, | ||
687 | struct device *dev) | ||
688 | { | ||
689 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; | ||
690 | |||
691 | if (off < info->fix.smem_len) { | ||
692 | vma->vm_pgoff += 1; | ||
693 | return dma_mmap_writecombine(dev, vma, | ||
694 | (void *)dum_data.lcd_virt_start, | ||
695 | dum_data.lcd_phys_start, | ||
696 | FB_DMA_SIZE); | ||
697 | } | ||
698 | return -EINVAL; | ||
699 | } | ||
700 | |||
701 | EXPORT_SYMBOL(pnx4008_sdum_mmap); | ||
702 | |||
703 | int pnx4008_set_dum_exit_notification(int dev_id) | ||
704 | { | ||
705 | int i; | ||
706 | |||
707 | for (i = 0; i < MAX_DUM_CHANNELS; i++) | ||
708 | if (dum_data.fb_owning_channel[i] == dev_id) | ||
709 | return -ERESOURCESNOTFREED; | ||
710 | |||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | EXPORT_SYMBOL(pnx4008_set_dum_exit_notification); | ||
715 | |||
716 | /* Platform device driver for DUM */ | ||
717 | |||
718 | static int sdum_suspend(struct platform_device *pdev, pm_message_t state) | ||
719 | { | ||
720 | int retval = 0; | ||
721 | struct clk *clk; | ||
722 | |||
723 | clk = clk_get(0, "dum_ck"); | ||
724 | if (!IS_ERR(clk)) { | ||
725 | clk_set_rate(clk, 0); | ||
726 | clk_put(clk); | ||
727 | } else | ||
728 | retval = PTR_ERR(clk); | ||
729 | |||
730 | /* disable BAC */ | ||
731 | DUM_CTRL = V_BAC_DISABLE_IDLE; | ||
732 | |||
733 | /* LCD standby & turn off display */ | ||
734 | lcd_reset(); | ||
735 | |||
736 | return retval; | ||
737 | } | ||
738 | |||
739 | static int sdum_resume(struct platform_device *pdev) | ||
740 | { | ||
741 | int retval = 0; | ||
742 | struct clk *clk; | ||
743 | |||
744 | clk = clk_get(0, "dum_ck"); | ||
745 | if (!IS_ERR(clk)) { | ||
746 | clk_set_rate(clk, 1); | ||
747 | clk_put(clk); | ||
748 | } else | ||
749 | retval = PTR_ERR(clk); | ||
750 | |||
751 | /* wait for BAC disable */ | ||
752 | DUM_CTRL = V_BAC_DISABLE_TRIG; | ||
753 | |||
754 | while (DUM_CTRL & BAC_ENABLED) | ||
755 | udelay(10); | ||
756 | |||
757 | /* re-init LCD */ | ||
758 | lcd_init(); | ||
759 | |||
760 | /* enable BAC and reset MUX */ | ||
761 | DUM_CTRL = V_BAC_ENABLE; | ||
762 | udelay(1); | ||
763 | DUM_CTRL = V_MUX_RESET; | ||
764 | return 0; | ||
765 | } | ||
766 | |||
767 | static int __devinit sdum_probe(struct platform_device *pdev) | ||
768 | { | ||
769 | int ret = 0, i = 0; | ||
770 | |||
771 | /* map frame buffer */ | ||
772 | dum_data.lcd_virt_start = (u32) dma_alloc_writecombine(&pdev->dev, | ||
773 | FB_DMA_SIZE, | ||
774 | &dum_data.lcd_phys_start, | ||
775 | GFP_KERNEL); | ||
776 | |||
777 | if (!dum_data.lcd_virt_start) { | ||
778 | ret = -ENOMEM; | ||
779 | goto out_3; | ||
780 | } | ||
781 | |||
782 | /* map slave registers */ | ||
783 | dum_data.slave_phys_base = PNX4008_DUM_SLAVE_BASE; | ||
784 | dum_data.slave_virt_base = | ||
785 | (u32 *) ioremap_nocache(dum_data.slave_phys_base, sizeof(u32)); | ||
786 | |||
787 | if (dum_data.slave_virt_base == NULL) { | ||
788 | ret = -ENOMEM; | ||
789 | goto out_2; | ||
790 | } | ||
791 | |||
792 | /* initialize DUM and LCD display */ | ||
793 | ret = dum_init(pdev); | ||
794 | if (ret) | ||
795 | goto out_1; | ||
796 | |||
797 | dum_chan_init(); | ||
798 | lcd_init(); | ||
799 | |||
800 | DUM_CTRL = V_BAC_ENABLE; | ||
801 | udelay(1); | ||
802 | DUM_CTRL = V_MUX_RESET; | ||
803 | |||
804 | /* set decode address and sync clock divider */ | ||
805 | DUM_DECODE = dum_data.lcd_phys_start & DUM_DECODE_MASK; | ||
806 | DUM_CLK_DIV = PNX4008_DUM_CLK_DIV; | ||
807 | |||
808 | for (i = 0; i < MAX_DUM_CHANNELS; i++) | ||
809 | dum_data.fb_owning_channel[i] = -1; | ||
810 | |||
811 | /*setup wakeup interrupt */ | ||
812 | start_int_set_rising_edge(SE_DISP_SYNC_INT); | ||
813 | start_int_ack(SE_DISP_SYNC_INT); | ||
814 | start_int_umask(SE_DISP_SYNC_INT); | ||
815 | |||
816 | return 0; | ||
817 | |||
818 | out_1: | ||
819 | iounmap((void *)dum_data.slave_virt_base); | ||
820 | out_2: | ||
821 | dma_free_writecombine(&pdev->dev, FB_DMA_SIZE, | ||
822 | (void *)dum_data.lcd_virt_start, | ||
823 | dum_data.lcd_phys_start); | ||
824 | out_3: | ||
825 | return ret; | ||
826 | } | ||
827 | |||
828 | static int sdum_remove(struct platform_device *pdev) | ||
829 | { | ||
830 | struct clk *clk; | ||
831 | |||
832 | start_int_mask(SE_DISP_SYNC_INT); | ||
833 | |||
834 | clk = clk_get(0, "dum_ck"); | ||
835 | if (!IS_ERR(clk)) { | ||
836 | clk_set_rate(clk, 0); | ||
837 | clk_put(clk); | ||
838 | } | ||
839 | |||
840 | iounmap((void *)dum_data.slave_virt_base); | ||
841 | |||
842 | dma_free_writecombine(&pdev->dev, FB_DMA_SIZE, | ||
843 | (void *)dum_data.lcd_virt_start, | ||
844 | dum_data.lcd_phys_start); | ||
845 | |||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | static struct platform_driver sdum_driver = { | ||
850 | .driver = { | ||
851 | .name = "pnx4008-sdum", | ||
852 | }, | ||
853 | .probe = sdum_probe, | ||
854 | .remove = sdum_remove, | ||
855 | .suspend = sdum_suspend, | ||
856 | .resume = sdum_resume, | ||
857 | }; | ||
858 | |||
859 | int __init sdum_init(void) | ||
860 | { | ||
861 | return platform_driver_register(&sdum_driver); | ||
862 | } | ||
863 | |||
864 | static void __exit sdum_exit(void) | ||
865 | { | ||
866 | platform_driver_unregister(&sdum_driver); | ||
867 | }; | ||
868 | |||
869 | module_init(sdum_init); | ||
870 | module_exit(sdum_exit); | ||
871 | |||
872 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/pnx4008/sdum.h b/drivers/video/pnx4008/sdum.h new file mode 100644 index 00000000000..189c3d64138 --- /dev/null +++ b/drivers/video/pnx4008/sdum.h | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Philips Semiconductors | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; see the file COPYING. If not, write to | ||
16 | * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
17 | * Boston, MA 02111-1307, USA, or http://www.gnu.org/licenses/gpl.html | ||
18 | */ | ||
19 | |||
20 | #define MAX_DUM_CHANNELS 64 | ||
21 | |||
22 | #define RGB_MEM_WINDOW(x) (0x10000000 + (x)*0x00100000) | ||
23 | |||
24 | #define QCIF_OFFSET(x) (((x) == 0) ? 0x00000: ((x) == 1) ? 0x30000: -1) | ||
25 | #define CIF_OFFSET(x) (((x) == 0) ? 0x00000: ((x) == 1) ? 0x60000: -1) | ||
26 | |||
27 | #define CTRL_SETDIRTY (0x00000001) | ||
28 | #define CONF_DIRTYENABLE (0x00000020) | ||
29 | #define CONF_SYNCENABLE (0x00000004) | ||
30 | |||
31 | #define DIRTY_ENABLED(conf) ((conf) & 0x0020) | ||
32 | #define SYNC_ENABLED(conf) ((conf) & 0x0004) | ||
33 | |||
34 | /* Display 1 & 2 Write Timing Configuration */ | ||
35 | #define PNX4008_DUM_WT_CFG 0x00372000 | ||
36 | |||
37 | /* Display 1 & 2 Read Timing Configuration */ | ||
38 | #define PNX4008_DUM_RT_CFG 0x00003A47 | ||
39 | |||
40 | /* DUM Transit State Timing Configuration */ | ||
41 | #define PNX4008_DUM_T_CFG 0x1D /* 29 HCLK cycles */ | ||
42 | |||
43 | /* DUM Sync count clock divider */ | ||
44 | #define PNX4008_DUM_CLK_DIV 0x02DD | ||
45 | |||
46 | /* Memory size for framebuffer, allocated through dma_alloc_writecombine(). | ||
47 | * Must be PAGE aligned | ||
48 | */ | ||
49 | #define FB_DMA_SIZE (PAGE_ALIGN(SZ_1M + PAGE_SIZE)) | ||
50 | |||
51 | #define OFFSET_RGBBUFFER (0xB0000) | ||
52 | #define OFFSET_YUVBUFFER (0x00000) | ||
53 | |||
54 | #define YUVBUFFER (lcd_video_start + OFFSET_YUVBUFFER) | ||
55 | #define RGBBUFFER (lcd_video_start + OFFSET_RGBBUFFER) | ||
56 | |||
57 | #define CMDSTRING_BASEADDR (0x00C000) /* iram */ | ||
58 | #define BYTES_PER_CMDSTRING (0x80) | ||
59 | #define NR_OF_CMDSTRINGS (64) | ||
60 | |||
61 | #define MAX_NR_PRESTRINGS (0x40) | ||
62 | #define MAX_NR_POSTSTRINGS (0x40) | ||
63 | |||
64 | /* various mask definitions */ | ||
65 | #define DUM_CLK_ENABLE 0x01 | ||
66 | #define DUM_CLK_DISABLE 0 | ||
67 | #define DUM_DECODE_MASK 0x1FFFFFFF | ||
68 | #define DUM_CHANNEL_CFG_MASK 0x01FF | ||
69 | #define DUM_CHANNEL_CFG_SYNC_MASK 0xFFFE00FF | ||
70 | #define DUM_CHANNEL_CFG_SYNC_MASK_SET 0x0CA00 | ||
71 | |||
72 | #define SDUM_RETURNVAL_BASE (0x500) | ||
73 | |||
74 | #define CONF_SYNC_OFF (0x602) | ||
75 | #define CONF_SYNC_ON (0x603) | ||
76 | |||
77 | #define CONF_DIRTYDETECTION_OFF (0x600) | ||
78 | #define CONF_DIRTYDETECTION_ON (0x601) | ||
79 | |||
80 | struct dumchannel_uf { | ||
81 | int channelnr; | ||
82 | u32 *dirty; | ||
83 | u32 *source; | ||
84 | u32 x_offset; | ||
85 | u32 y_offset; | ||
86 | u32 width; | ||
87 | u32 height; | ||
88 | }; | ||
89 | |||
90 | enum { | ||
91 | FB_TYPE_YUV, | ||
92 | FB_TYPE_RGB | ||
93 | }; | ||
94 | |||
95 | struct cmdstring { | ||
96 | int channelnr; | ||
97 | uint16_t prestringlen; | ||
98 | uint16_t poststringlen; | ||
99 | uint16_t format; | ||
100 | uint16_t reserved; | ||
101 | uint16_t startaddr_low; | ||
102 | uint16_t startaddr_high; | ||
103 | uint16_t pixdatlen_low; | ||
104 | uint16_t pixdatlen_high; | ||
105 | u32 precmd[MAX_NR_PRESTRINGS]; | ||
106 | u32 postcmd[MAX_NR_POSTSTRINGS]; | ||
107 | |||
108 | }; | ||
109 | |||
110 | struct dumchannel { | ||
111 | int channelnr; | ||
112 | int dum_ch_min; | ||
113 | int dum_ch_max; | ||
114 | int dum_ch_conf; | ||
115 | int dum_ch_stat; | ||
116 | int dum_ch_ctrl; | ||
117 | }; | ||
118 | |||
119 | int pnx4008_alloc_dum_channel(int dev_id); | ||
120 | int pnx4008_free_dum_channel(int channr, int dev_id); | ||
121 | |||
122 | int pnx4008_get_dum_channel_uf(struct dumchannel_uf *pChan_uf, int dev_id); | ||
123 | int pnx4008_put_dum_channel_uf(struct dumchannel_uf chan_uf, int dev_id); | ||
124 | |||
125 | int pnx4008_set_dum_channel_sync(int channr, int val, int dev_id); | ||
126 | int pnx4008_set_dum_channel_dirty_detect(int channr, int val, int dev_id); | ||
127 | |||
128 | int pnx4008_force_dum_update_channel(int channr, int dev_id); | ||
129 | |||
130 | int pnx4008_get_dum_channel_config(int channr, int dev_id); | ||
131 | |||
132 | int pnx4008_sdum_mmap(struct fb_info *info, struct vm_area_struct *vma, struct device *dev); | ||
133 | int pnx4008_set_dum_exit_notification(int dev_id); | ||
134 | |||
135 | int pnx4008_get_fb_addresses(int fb_type, void **virt_addr, | ||
136 | dma_addr_t * phys_addr, int *fb_length); | ||
diff --git a/drivers/video/sh_mobile_meram.h b/drivers/video/sh_mobile_meram.h new file mode 100644 index 00000000000..82c54fbce8b --- /dev/null +++ b/drivers/video/sh_mobile_meram.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef __sh_mobile_meram_h__ | ||
2 | #define __sh_mobile_meram_h__ | ||
3 | |||
4 | #include <linux/mutex.h> | ||
5 | #include <video/sh_mobile_meram.h> | ||
6 | |||
7 | /* | ||
8 | * MERAM private | ||
9 | */ | ||
10 | |||
11 | #define MERAM_ICB_Y 0x1 | ||
12 | #define MERAM_ICB_C 0x2 | ||
13 | |||
14 | /* MERAM cache size */ | ||
15 | #define SH_MOBILE_MERAM_ICB_NUM 32 | ||
16 | |||
17 | #define SH_MOBILE_MERAM_CACHE_OFFSET(p) ((p) >> 16) | ||
18 | #define SH_MOBILE_MERAM_CACHE_SIZE(p) ((p) & 0xffff) | ||
19 | |||
20 | struct sh_mobile_meram_priv { | ||
21 | void __iomem *base; | ||
22 | struct mutex lock; | ||
23 | unsigned long used_icb; | ||
24 | int used_meram_cache_regions; | ||
25 | unsigned long used_meram_cache[SH_MOBILE_MERAM_ICB_NUM]; | ||
26 | }; | ||
27 | |||
28 | int sh_mobile_meram_alloc_icb(const struct sh_mobile_meram_cfg *cfg, | ||
29 | int xres, | ||
30 | int yres, | ||
31 | unsigned int base_addr, | ||
32 | int yuv_mode, | ||
33 | int *marker_icb, | ||
34 | int *out_pitch); | ||
35 | |||
36 | void sh_mobile_meram_free_icb(int marker_icb); | ||
37 | |||
38 | #define SH_MOBILE_MERAM_START(ind, ab) \ | ||
39 | (0xC0000000 | ((ab & 0x1) << 23) | ((ind & 0x1F) << 24)) | ||
40 | |||
41 | #endif /* !__sh_mobile_meram_h__ */ | ||
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig new file mode 100644 index 00000000000..7de26267155 --- /dev/null +++ b/drivers/video/tegra/Kconfig | |||
@@ -0,0 +1,126 @@ | |||
1 | if ARCH_TEGRA | ||
2 | |||
3 | comment "NVIDIA Tegra Display Driver options" | ||
4 | |||
5 | config TEGRA_GRHOST | ||
6 | tristate "Tegra graphics host driver" | ||
7 | help | ||
8 | Driver for the Tegra graphics host hardware. | ||
9 | |||
10 | config TEGRA_DC | ||
11 | tristate "Tegra Display Contoller" | ||
12 | depends on ARCH_TEGRA && TEGRA_GRHOST | ||
13 | select FB_MODE_HELPERS | ||
14 | select I2C | ||
15 | help | ||
16 | Tegra display controller support. | ||
17 | |||
18 | config FB_TEGRA | ||
19 | tristate "Tegra Framebuffer driver" | ||
20 | depends on TEGRA_DC && FB = y | ||
21 | select FB_CFB_FILLRECT | ||
22 | select FB_CFB_COPYAREA | ||
23 | select FB_CFB_IMAGEBLIT | ||
24 | default FB | ||
25 | help | ||
26 | Framebuffer device support for the Tegra display controller. | ||
27 | |||
28 | config TEGRA_DC_EXTENSIONS | ||
29 | bool "Tegra Display Controller Extensions" | ||
30 | depends on TEGRA_DC | ||
31 | default y | ||
32 | help | ||
33 | This exposes support for extended capabilities of the Tegra display | ||
34 | controller to userspace drivers. | ||
35 | |||
36 | config TEGRA_NVMAP | ||
37 | bool "Tegra GPU memory management driver (nvmap)" | ||
38 | default y | ||
39 | help | ||
40 | Say Y here to include the memory management driver for the Tegra | ||
41 | GPU, multimedia and display subsystems | ||
42 | |||
43 | config NVMAP_RECLAIM_UNPINNED_VM | ||
44 | bool "Virtualize IOVMM memory in nvmap" | ||
45 | depends on TEGRA_NVMAP && TEGRA_IOVMM | ||
46 | default y | ||
47 | help | ||
48 | Say Y here to enable nvmap to reclaim I/O virtual memory after | ||
49 | it has been unpinned, and re-use it for other handles. This can | ||
50 | allow a larger virtual I/O VM space than would normally be | ||
51 | supported by the hardware, at a slight cost in performance. | ||
52 | |||
53 | config NVMAP_ALLOW_SYSMEM | ||
54 | bool "Allow physical system memory to be used by nvmap" | ||
55 | depends on TEGRA_NVMAP | ||
56 | default y | ||
57 | help | ||
58 | Say Y here to allow nvmap to use physical system memory (i.e., | ||
59 | shared with the operating system but not translated through | ||
60 | an IOVMM device) for allocations. | ||
61 | |||
62 | config NVMAP_HIGHMEM_ONLY | ||
63 | bool "Use only HIGHMEM for nvmap" | ||
64 | depends on TEGRA_NVMAP && (NVMAP_ALLOW_SYSMEM || TEGRA_IOVMM) && HIGHMEM | ||
65 | default n | ||
66 | help | ||
67 | Say Y here to restrict nvmap system memory allocations (both | ||
68 | physical system memory and IOVMM) to just HIGHMEM pages. | ||
69 | |||
70 | config NVMAP_CARVEOUT_KILLER | ||
71 | bool "Reclaim nvmap carveout by killing processes" | ||
72 | depends on TEGRA_NVMAP | ||
73 | default n | ||
74 | help | ||
75 | Say Y here to allow the system to reclaim carveout space by killing | ||
76 | processes. This will kill the largest consumers of lowest priority | ||
77 | first. | ||
78 | |||
79 | config NVMAP_CARVEOUT_COMPACTOR | ||
80 | bool "Compact carveout when it gets fragmented" | ||
81 | depends on TEGRA_NVMAP | ||
82 | default y | ||
83 | help | ||
84 | When carveout allocation attempt fails, compactor defragements | ||
85 | heap and retries the failed allocation. | ||
86 | Say Y here to let nvmap to keep carveout fragmentation under control. | ||
87 | |||
88 | |||
89 | config NVMAP_VPR | ||
90 | bool "Enable VPR Heap." | ||
91 | depends on TEGRA_NVMAP | ||
92 | default n | ||
93 | help | ||
94 | Say Y here to enable Video Protection Region(VPR) heap. | ||
95 | if unsure, say N. | ||
96 | |||
97 | config TEGRA_DSI | ||
98 | bool "Enable DSI panel." | ||
99 | default n | ||
100 | help | ||
101 | Say Y here to enable the DSI panel. | ||
102 | |||
103 | config NVMAP_CONVERT_CARVEOUT_TO_IOVMM | ||
104 | bool "Convert carveout to IOVMM" | ||
105 | depends on TEGRA_NVMAP && TEGRA_IOVMM_SMMU | ||
106 | default y | ||
107 | help | ||
108 | Say Y here to force to convert carveout memory requests to | ||
109 | I/O virtual memory requests. | ||
110 | |||
111 | config TEGRA_NVHDCP | ||
112 | bool "Support NVHDCP content protection on HDMI" | ||
113 | default n | ||
114 | help | ||
115 | Say Y here to support NVHDCP upstream and downstream protocols, this | ||
116 | requires a correctly fused chip to negotiate keys. | ||
117 | |||
118 | config TEGRA_HDMI_74MHZ_LIMIT | ||
119 | bool "Support only up to 74.25 MHz HDMI pixel frequency" | ||
120 | default n | ||
121 | help | ||
122 | Say Y here to make kernel report only low bandwidth modes. | ||
123 | Useful only for boards which can't deliver 148.50 MHz. | ||
124 | |||
125 | endif | ||
126 | |||
diff --git a/drivers/video/tegra/Makefile b/drivers/video/tegra/Makefile new file mode 100644 index 00000000000..2299a3c5eaa --- /dev/null +++ b/drivers/video/tegra/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | obj-$(CONFIG_TEGRA_GRHOST) += host/ | ||
3 | obj-$(CONFIG_TEGRA_DC) += dc/ | ||
4 | obj-$(CONFIG_FB_TEGRA) += fb.o | ||
5 | obj-$(CONFIG_TEGRA_NVMAP) += nvmap/ | ||
diff --git a/drivers/video/tegra/dc/Makefile b/drivers/video/tegra/dc/Makefile new file mode 100644 index 00000000000..01f13918ca6 --- /dev/null +++ b/drivers/video/tegra/dc/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | obj-y += dc.o | ||
3 | obj-y += rgb.o | ||
4 | obj-y += hdmi.o | ||
5 | obj-$(CONFIG_TEGRA_NVHDCP) += nvhdcp.o | ||
6 | obj-y += edid.o | ||
7 | obj-y += nvsd.o | ||
8 | obj-y += dsi.o | ||
9 | obj-y += dc_sysfs.o | ||
10 | obj-$(CONFIG_TEGRA_DC_EXTENSIONS) += ext/ | ||
diff --git a/drivers/video/tegra/dc/dc.c b/drivers/video/tegra/dc/dc.c new file mode 100644 index 00000000000..8b3bf041a7d --- /dev/null +++ b/drivers/video/tegra/dc/dc.c | |||
@@ -0,0 +1,3120 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dc.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * Copyright (C) 2010-2012 NVIDIA Corporation | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/workqueue.h> | ||
32 | #include <linux/ktime.h> | ||
33 | #include <linux/debugfs.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | #include <linux/backlight.h> | ||
36 | #include <video/tegrafb.h> | ||
37 | #include <drm/drm_fixed.h> | ||
38 | #ifdef CONFIG_SWITCH | ||
39 | #include <linux/switch.h> | ||
40 | #endif | ||
41 | |||
42 | |||
43 | #include <mach/clk.h> | ||
44 | #include <mach/dc.h> | ||
45 | #include <mach/fb.h> | ||
46 | #include <mach/mc.h> | ||
47 | #include <linux/nvhost.h> | ||
48 | #include <mach/latency_allowance.h> | ||
49 | |||
50 | #include "dc_reg.h" | ||
51 | #include "dc_priv.h" | ||
52 | #include "nvsd.h" | ||
53 | |||
54 | #define TEGRA_CRC_LATCHED_DELAY 34 | ||
55 | |||
56 | #define DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL 0x01000000 | ||
57 | #define DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL 0x0 | ||
58 | |||
59 | #ifndef CONFIG_TEGRA_FPGA_PLATFORM | ||
60 | #define ALL_UF_INT (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT) | ||
61 | #else | ||
62 | /* ignore underflows when on simulation and fpga platform */ | ||
63 | #define ALL_UF_INT (0) | ||
64 | #endif | ||
65 | |||
66 | static int no_vsync; | ||
67 | |||
68 | static void _tegra_dc_controller_disable(struct tegra_dc *dc); | ||
69 | |||
70 | module_param_named(no_vsync, no_vsync, int, S_IRUGO | S_IWUSR); | ||
71 | |||
72 | static int use_dynamic_emc = 1; | ||
73 | |||
74 | module_param_named(use_dynamic_emc, use_dynamic_emc, int, S_IRUGO | S_IWUSR); | ||
75 | |||
76 | struct tegra_dc *tegra_dcs[TEGRA_MAX_DC]; | ||
77 | |||
78 | DEFINE_MUTEX(tegra_dc_lock); | ||
79 | DEFINE_MUTEX(shared_lock); | ||
80 | |||
81 | static const struct { | ||
82 | bool h; | ||
83 | bool v; | ||
84 | } can_filter[] = { | ||
85 | /* Window A has no filtering */ | ||
86 | { false, false }, | ||
87 | /* Window B has both H and V filtering */ | ||
88 | { true, true }, | ||
89 | /* Window C has only H filtering */ | ||
90 | { false, true }, | ||
91 | }; | ||
92 | static inline bool win_use_v_filter(const struct tegra_dc_win *win) | ||
93 | { | ||
94 | return can_filter[win->idx].v && | ||
95 | win->h.full != dfixed_const(win->out_h); | ||
96 | } | ||
97 | static inline bool win_use_h_filter(const struct tegra_dc_win *win) | ||
98 | { | ||
99 | return can_filter[win->idx].h && | ||
100 | win->w.full != dfixed_const(win->out_w); | ||
101 | } | ||
102 | |||
103 | static inline int tegra_dc_fmt_bpp(int fmt) | ||
104 | { | ||
105 | switch (fmt) { | ||
106 | case TEGRA_WIN_FMT_P1: | ||
107 | return 1; | ||
108 | |||
109 | case TEGRA_WIN_FMT_P2: | ||
110 | return 2; | ||
111 | |||
112 | case TEGRA_WIN_FMT_P4: | ||
113 | return 4; | ||
114 | |||
115 | case TEGRA_WIN_FMT_P8: | ||
116 | return 8; | ||
117 | |||
118 | case TEGRA_WIN_FMT_B4G4R4A4: | ||
119 | case TEGRA_WIN_FMT_B5G5R5A: | ||
120 | case TEGRA_WIN_FMT_B5G6R5: | ||
121 | case TEGRA_WIN_FMT_AB5G5R5: | ||
122 | return 16; | ||
123 | |||
124 | case TEGRA_WIN_FMT_B8G8R8A8: | ||
125 | case TEGRA_WIN_FMT_R8G8B8A8: | ||
126 | case TEGRA_WIN_FMT_B6x2G6x2R6x2A8: | ||
127 | case TEGRA_WIN_FMT_R6x2G6x2B6x2A8: | ||
128 | return 32; | ||
129 | |||
130 | /* for planar formats, size of the Y plane, 8bit */ | ||
131 | case TEGRA_WIN_FMT_YCbCr420P: | ||
132 | case TEGRA_WIN_FMT_YUV420P: | ||
133 | case TEGRA_WIN_FMT_YCbCr422P: | ||
134 | case TEGRA_WIN_FMT_YUV422P: | ||
135 | case TEGRA_WIN_FMT_YCbCr422R: | ||
136 | case TEGRA_WIN_FMT_YUV422R: | ||
137 | case TEGRA_WIN_FMT_YCbCr422RA: | ||
138 | case TEGRA_WIN_FMT_YUV422RA: | ||
139 | return 8; | ||
140 | |||
141 | case TEGRA_WIN_FMT_YCbCr422: | ||
142 | case TEGRA_WIN_FMT_YUV422: | ||
143 | /* FIXME: need to know the bpp of these formats */ | ||
144 | return 0; | ||
145 | } | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static inline bool tegra_dc_is_yuv_planar(int fmt) | ||
150 | { | ||
151 | switch (fmt) { | ||
152 | case TEGRA_WIN_FMT_YUV420P: | ||
153 | case TEGRA_WIN_FMT_YCbCr420P: | ||
154 | case TEGRA_WIN_FMT_YCbCr422P: | ||
155 | case TEGRA_WIN_FMT_YUV422P: | ||
156 | case TEGRA_WIN_FMT_YCbCr422R: | ||
157 | case TEGRA_WIN_FMT_YUV422R: | ||
158 | case TEGRA_WIN_FMT_YCbCr422RA: | ||
159 | case TEGRA_WIN_FMT_YUV422RA: | ||
160 | return true; | ||
161 | } | ||
162 | return false; | ||
163 | } | ||
164 | |||
165 | #define DUMP_REG(a) do { \ | ||
166 | snprintf(buff, sizeof(buff), "%-32s\t%03x\t%08lx\n", \ | ||
167 | #a, a, tegra_dc_readl(dc, a)); \ | ||
168 | print(data, buff); \ | ||
169 | } while (0) | ||
170 | |||
171 | static void _dump_regs(struct tegra_dc *dc, void *data, | ||
172 | void (* print)(void *data, const char *str)) | ||
173 | { | ||
174 | int i; | ||
175 | char buff[256]; | ||
176 | |||
177 | tegra_dc_io_start(dc); | ||
178 | clk_enable(dc->clk); | ||
179 | |||
180 | DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0); | ||
181 | DUMP_REG(DC_CMD_DISPLAY_COMMAND); | ||
182 | DUMP_REG(DC_CMD_SIGNAL_RAISE); | ||
183 | DUMP_REG(DC_CMD_INT_STATUS); | ||
184 | DUMP_REG(DC_CMD_INT_MASK); | ||
185 | DUMP_REG(DC_CMD_INT_ENABLE); | ||
186 | DUMP_REG(DC_CMD_INT_TYPE); | ||
187 | DUMP_REG(DC_CMD_INT_POLARITY); | ||
188 | DUMP_REG(DC_CMD_SIGNAL_RAISE1); | ||
189 | DUMP_REG(DC_CMD_SIGNAL_RAISE2); | ||
190 | DUMP_REG(DC_CMD_SIGNAL_RAISE3); | ||
191 | DUMP_REG(DC_CMD_STATE_ACCESS); | ||
192 | DUMP_REG(DC_CMD_STATE_CONTROL); | ||
193 | DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER); | ||
194 | DUMP_REG(DC_CMD_REG_ACT_CONTROL); | ||
195 | |||
196 | DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0); | ||
197 | DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1); | ||
198 | DUMP_REG(DC_DISP_DISP_WIN_OPTIONS); | ||
199 | DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY); | ||
200 | DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY_TIMER); | ||
201 | DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS); | ||
202 | DUMP_REG(DC_DISP_REF_TO_SYNC); | ||
203 | DUMP_REG(DC_DISP_SYNC_WIDTH); | ||
204 | DUMP_REG(DC_DISP_BACK_PORCH); | ||
205 | DUMP_REG(DC_DISP_DISP_ACTIVE); | ||
206 | DUMP_REG(DC_DISP_FRONT_PORCH); | ||
207 | DUMP_REG(DC_DISP_H_PULSE0_CONTROL); | ||
208 | DUMP_REG(DC_DISP_H_PULSE0_POSITION_A); | ||
209 | DUMP_REG(DC_DISP_H_PULSE0_POSITION_B); | ||
210 | DUMP_REG(DC_DISP_H_PULSE0_POSITION_C); | ||
211 | DUMP_REG(DC_DISP_H_PULSE0_POSITION_D); | ||
212 | DUMP_REG(DC_DISP_H_PULSE1_CONTROL); | ||
213 | DUMP_REG(DC_DISP_H_PULSE1_POSITION_A); | ||
214 | DUMP_REG(DC_DISP_H_PULSE1_POSITION_B); | ||
215 | DUMP_REG(DC_DISP_H_PULSE1_POSITION_C); | ||
216 | DUMP_REG(DC_DISP_H_PULSE1_POSITION_D); | ||
217 | DUMP_REG(DC_DISP_H_PULSE2_CONTROL); | ||
218 | DUMP_REG(DC_DISP_H_PULSE2_POSITION_A); | ||
219 | DUMP_REG(DC_DISP_H_PULSE2_POSITION_B); | ||
220 | DUMP_REG(DC_DISP_H_PULSE2_POSITION_C); | ||
221 | DUMP_REG(DC_DISP_H_PULSE2_POSITION_D); | ||
222 | DUMP_REG(DC_DISP_V_PULSE0_CONTROL); | ||
223 | DUMP_REG(DC_DISP_V_PULSE0_POSITION_A); | ||
224 | DUMP_REG(DC_DISP_V_PULSE0_POSITION_B); | ||
225 | DUMP_REG(DC_DISP_V_PULSE0_POSITION_C); | ||
226 | DUMP_REG(DC_DISP_V_PULSE1_CONTROL); | ||
227 | DUMP_REG(DC_DISP_V_PULSE1_POSITION_A); | ||
228 | DUMP_REG(DC_DISP_V_PULSE1_POSITION_B); | ||
229 | DUMP_REG(DC_DISP_V_PULSE1_POSITION_C); | ||
230 | DUMP_REG(DC_DISP_V_PULSE2_CONTROL); | ||
231 | DUMP_REG(DC_DISP_V_PULSE2_POSITION_A); | ||
232 | DUMP_REG(DC_DISP_V_PULSE3_CONTROL); | ||
233 | DUMP_REG(DC_DISP_V_PULSE3_POSITION_A); | ||
234 | DUMP_REG(DC_DISP_M0_CONTROL); | ||
235 | DUMP_REG(DC_DISP_M1_CONTROL); | ||
236 | DUMP_REG(DC_DISP_DI_CONTROL); | ||
237 | DUMP_REG(DC_DISP_PP_CONTROL); | ||
238 | DUMP_REG(DC_DISP_PP_SELECT_A); | ||
239 | DUMP_REG(DC_DISP_PP_SELECT_B); | ||
240 | DUMP_REG(DC_DISP_PP_SELECT_C); | ||
241 | DUMP_REG(DC_DISP_PP_SELECT_D); | ||
242 | DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL); | ||
243 | DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL); | ||
244 | DUMP_REG(DC_DISP_DISP_COLOR_CONTROL); | ||
245 | DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS); | ||
246 | DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS); | ||
247 | DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS); | ||
248 | DUMP_REG(DC_DISP_LCD_SPI_OPTIONS); | ||
249 | DUMP_REG(DC_DISP_BORDER_COLOR); | ||
250 | DUMP_REG(DC_DISP_COLOR_KEY0_LOWER); | ||
251 | DUMP_REG(DC_DISP_COLOR_KEY0_UPPER); | ||
252 | DUMP_REG(DC_DISP_COLOR_KEY1_LOWER); | ||
253 | DUMP_REG(DC_DISP_COLOR_KEY1_UPPER); | ||
254 | DUMP_REG(DC_DISP_CURSOR_FOREGROUND); | ||
255 | DUMP_REG(DC_DISP_CURSOR_BACKGROUND); | ||
256 | DUMP_REG(DC_DISP_CURSOR_START_ADDR); | ||
257 | DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS); | ||
258 | DUMP_REG(DC_DISP_CURSOR_POSITION); | ||
259 | DUMP_REG(DC_DISP_CURSOR_POSITION_NS); | ||
260 | DUMP_REG(DC_DISP_INIT_SEQ_CONTROL); | ||
261 | DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A); | ||
262 | DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B); | ||
263 | DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C); | ||
264 | DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D); | ||
265 | DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL); | ||
266 | DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST); | ||
267 | DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST); | ||
268 | DUMP_REG(DC_DISP_MCCIF_DISPLAY0C_HYST); | ||
269 | DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST); | ||
270 | DUMP_REG(DC_DISP_DAC_CRT_CTRL); | ||
271 | DUMP_REG(DC_DISP_DISP_MISC_CONTROL); | ||
272 | |||
273 | |||
274 | for (i = 0; i < 3; i++) { | ||
275 | print(data, "\n"); | ||
276 | snprintf(buff, sizeof(buff), "WINDOW %c:\n", 'A' + i); | ||
277 | print(data, buff); | ||
278 | |||
279 | tegra_dc_writel(dc, WINDOW_A_SELECT << i, | ||
280 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
281 | DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER); | ||
282 | DUMP_REG(DC_WIN_WIN_OPTIONS); | ||
283 | DUMP_REG(DC_WIN_BYTE_SWAP); | ||
284 | DUMP_REG(DC_WIN_BUFFER_CONTROL); | ||
285 | DUMP_REG(DC_WIN_COLOR_DEPTH); | ||
286 | DUMP_REG(DC_WIN_POSITION); | ||
287 | DUMP_REG(DC_WIN_SIZE); | ||
288 | DUMP_REG(DC_WIN_PRESCALED_SIZE); | ||
289 | DUMP_REG(DC_WIN_H_INITIAL_DDA); | ||
290 | DUMP_REG(DC_WIN_V_INITIAL_DDA); | ||
291 | DUMP_REG(DC_WIN_DDA_INCREMENT); | ||
292 | DUMP_REG(DC_WIN_LINE_STRIDE); | ||
293 | DUMP_REG(DC_WIN_BUF_STRIDE); | ||
294 | DUMP_REG(DC_WIN_UV_BUF_STRIDE); | ||
295 | DUMP_REG(DC_WIN_BLEND_NOKEY); | ||
296 | DUMP_REG(DC_WIN_BLEND_1WIN); | ||
297 | DUMP_REG(DC_WIN_BLEND_2WIN_X); | ||
298 | DUMP_REG(DC_WIN_BLEND_2WIN_Y); | ||
299 | DUMP_REG(DC_WIN_BLEND_3WIN_XY); | ||
300 | DUMP_REG(DC_WINBUF_START_ADDR); | ||
301 | DUMP_REG(DC_WINBUF_START_ADDR_U); | ||
302 | DUMP_REG(DC_WINBUF_START_ADDR_V); | ||
303 | DUMP_REG(DC_WINBUF_ADDR_H_OFFSET); | ||
304 | DUMP_REG(DC_WINBUF_ADDR_V_OFFSET); | ||
305 | DUMP_REG(DC_WINBUF_UFLOW_STATUS); | ||
306 | DUMP_REG(DC_WIN_CSC_YOF); | ||
307 | DUMP_REG(DC_WIN_CSC_KYRGB); | ||
308 | DUMP_REG(DC_WIN_CSC_KUR); | ||
309 | DUMP_REG(DC_WIN_CSC_KVR); | ||
310 | DUMP_REG(DC_WIN_CSC_KUG); | ||
311 | DUMP_REG(DC_WIN_CSC_KVG); | ||
312 | DUMP_REG(DC_WIN_CSC_KUB); | ||
313 | DUMP_REG(DC_WIN_CSC_KVB); | ||
314 | } | ||
315 | |||
316 | DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL); | ||
317 | DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE2); | ||
318 | DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY2); | ||
319 | DUMP_REG(DC_COM_PIN_OUTPUT_DATA2); | ||
320 | DUMP_REG(DC_COM_PIN_INPUT_ENABLE2); | ||
321 | DUMP_REG(DC_COM_PIN_OUTPUT_SELECT5); | ||
322 | DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0); | ||
323 | DUMP_REG(DC_DISP_M1_CONTROL); | ||
324 | DUMP_REG(DC_COM_PM1_CONTROL); | ||
325 | DUMP_REG(DC_COM_PM1_DUTY_CYCLE); | ||
326 | DUMP_REG(DC_DISP_SD_CONTROL); | ||
327 | |||
328 | clk_disable(dc->clk); | ||
329 | tegra_dc_io_end(dc); | ||
330 | } | ||
331 | |||
332 | #undef DUMP_REG | ||
333 | |||
334 | #ifdef DEBUG | ||
335 | static void dump_regs_print(void *data, const char *str) | ||
336 | { | ||
337 | struct tegra_dc *dc = data; | ||
338 | dev_dbg(&dc->ndev->dev, "%s", str); | ||
339 | } | ||
340 | |||
341 | static void dump_regs(struct tegra_dc *dc) | ||
342 | { | ||
343 | _dump_regs(dc, dc, dump_regs_print); | ||
344 | } | ||
345 | #else /* !DEBUG */ | ||
346 | |||
347 | static void dump_regs(struct tegra_dc *dc) {} | ||
348 | |||
349 | #endif /* DEBUG */ | ||
350 | |||
351 | #ifdef CONFIG_DEBUG_FS | ||
352 | |||
353 | static void dbg_regs_print(void *data, const char *str) | ||
354 | { | ||
355 | struct seq_file *s = data; | ||
356 | |||
357 | seq_printf(s, "%s", str); | ||
358 | } | ||
359 | |||
360 | #undef DUMP_REG | ||
361 | |||
362 | static int dbg_dc_show(struct seq_file *s, void *unused) | ||
363 | { | ||
364 | struct tegra_dc *dc = s->private; | ||
365 | |||
366 | _dump_regs(dc, s, dbg_regs_print); | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | |||
372 | static int dbg_dc_open(struct inode *inode, struct file *file) | ||
373 | { | ||
374 | return single_open(file, dbg_dc_show, inode->i_private); | ||
375 | } | ||
376 | |||
377 | static const struct file_operations regs_fops = { | ||
378 | .open = dbg_dc_open, | ||
379 | .read = seq_read, | ||
380 | .llseek = seq_lseek, | ||
381 | .release = single_release, | ||
382 | }; | ||
383 | |||
384 | static int dbg_dc_mode_show(struct seq_file *s, void *unused) | ||
385 | { | ||
386 | struct tegra_dc *dc = s->private; | ||
387 | struct tegra_dc_mode *m; | ||
388 | |||
389 | mutex_lock(&dc->lock); | ||
390 | m = &dc->mode; | ||
391 | seq_printf(s, | ||
392 | "pclk: %d\n" | ||
393 | "h_ref_to_sync: %d\n" | ||
394 | "v_ref_to_sync: %d\n" | ||
395 | "h_sync_width: %d\n" | ||
396 | "v_sync_width: %d\n" | ||
397 | "h_back_porch: %d\n" | ||
398 | "v_back_porch: %d\n" | ||
399 | "h_active: %d\n" | ||
400 | "v_active: %d\n" | ||
401 | "h_front_porch: %d\n" | ||
402 | "v_front_porch: %d\n" | ||
403 | "stereo_mode: %d\n", | ||
404 | m->pclk, m->h_ref_to_sync, m->v_ref_to_sync, | ||
405 | m->h_sync_width, m->v_sync_width, | ||
406 | m->h_back_porch, m->v_back_porch, | ||
407 | m->h_active, m->v_active, | ||
408 | m->h_front_porch, m->v_front_porch, | ||
409 | m->stereo_mode); | ||
410 | mutex_unlock(&dc->lock); | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static int dbg_dc_mode_open(struct inode *inode, struct file *file) | ||
415 | { | ||
416 | return single_open(file, dbg_dc_mode_show, inode->i_private); | ||
417 | } | ||
418 | |||
419 | static const struct file_operations mode_fops = { | ||
420 | .open = dbg_dc_mode_open, | ||
421 | .read = seq_read, | ||
422 | .llseek = seq_lseek, | ||
423 | .release = single_release, | ||
424 | }; | ||
425 | |||
426 | static int dbg_dc_stats_show(struct seq_file *s, void *unused) | ||
427 | { | ||
428 | struct tegra_dc *dc = s->private; | ||
429 | |||
430 | mutex_lock(&dc->lock); | ||
431 | seq_printf(s, | ||
432 | "underflows: %llu\n" | ||
433 | "underflows_a: %llu\n" | ||
434 | "underflows_b: %llu\n" | ||
435 | "underflows_c: %llu\n", | ||
436 | dc->stats.underflows, | ||
437 | dc->stats.underflows_a, | ||
438 | dc->stats.underflows_b, | ||
439 | dc->stats.underflows_c); | ||
440 | mutex_unlock(&dc->lock); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static int dbg_dc_stats_open(struct inode *inode, struct file *file) | ||
446 | { | ||
447 | return single_open(file, dbg_dc_stats_show, inode->i_private); | ||
448 | } | ||
449 | |||
450 | static const struct file_operations stats_fops = { | ||
451 | .open = dbg_dc_stats_open, | ||
452 | .read = seq_read, | ||
453 | .llseek = seq_lseek, | ||
454 | .release = single_release, | ||
455 | }; | ||
456 | |||
457 | static void __devexit tegra_dc_remove_debugfs(struct tegra_dc *dc) | ||
458 | { | ||
459 | if (dc->debugdir) | ||
460 | debugfs_remove_recursive(dc->debugdir); | ||
461 | dc->debugdir = NULL; | ||
462 | } | ||
463 | |||
464 | static void tegra_dc_create_debugfs(struct tegra_dc *dc) | ||
465 | { | ||
466 | struct dentry *retval; | ||
467 | |||
468 | dc->debugdir = debugfs_create_dir(dev_name(&dc->ndev->dev), NULL); | ||
469 | if (!dc->debugdir) | ||
470 | goto remove_out; | ||
471 | |||
472 | retval = debugfs_create_file("regs", S_IRUGO, dc->debugdir, dc, | ||
473 | ®s_fops); | ||
474 | if (!retval) | ||
475 | goto remove_out; | ||
476 | |||
477 | retval = debugfs_create_file("mode", S_IRUGO, dc->debugdir, dc, | ||
478 | &mode_fops); | ||
479 | if (!retval) | ||
480 | goto remove_out; | ||
481 | |||
482 | retval = debugfs_create_file("stats", S_IRUGO, dc->debugdir, dc, | ||
483 | &stats_fops); | ||
484 | if (!retval) | ||
485 | goto remove_out; | ||
486 | |||
487 | return; | ||
488 | remove_out: | ||
489 | dev_err(&dc->ndev->dev, "could not create debugfs\n"); | ||
490 | tegra_dc_remove_debugfs(dc); | ||
491 | } | ||
492 | |||
493 | #else /* !CONFIG_DEBUGFS */ | ||
494 | static inline void tegra_dc_create_debugfs(struct tegra_dc *dc) { }; | ||
495 | static inline void __devexit tegra_dc_remove_debugfs(struct tegra_dc *dc) { }; | ||
496 | #endif /* CONFIG_DEBUGFS */ | ||
497 | |||
498 | static int tegra_dc_set(struct tegra_dc *dc, int index) | ||
499 | { | ||
500 | int ret = 0; | ||
501 | |||
502 | mutex_lock(&tegra_dc_lock); | ||
503 | if (index >= TEGRA_MAX_DC) { | ||
504 | ret = -EINVAL; | ||
505 | goto out; | ||
506 | } | ||
507 | |||
508 | if (dc != NULL && tegra_dcs[index] != NULL) { | ||
509 | ret = -EBUSY; | ||
510 | goto out; | ||
511 | } | ||
512 | |||
513 | tegra_dcs[index] = dc; | ||
514 | |||
515 | out: | ||
516 | mutex_unlock(&tegra_dc_lock); | ||
517 | |||
518 | return ret; | ||
519 | } | ||
520 | |||
521 | static unsigned int tegra_dc_has_multiple_dc(void) | ||
522 | { | ||
523 | unsigned int idx; | ||
524 | unsigned int cnt = 0; | ||
525 | struct tegra_dc *dc; | ||
526 | |||
527 | mutex_lock(&tegra_dc_lock); | ||
528 | for (idx = 0; idx < TEGRA_MAX_DC; idx++) | ||
529 | cnt += ((dc = tegra_dcs[idx]) != NULL && dc->enabled) ? 1 : 0; | ||
530 | mutex_unlock(&tegra_dc_lock); | ||
531 | |||
532 | return (cnt > 1); | ||
533 | } | ||
534 | |||
535 | struct tegra_dc *tegra_dc_get_dc(unsigned idx) | ||
536 | { | ||
537 | if (idx < TEGRA_MAX_DC) | ||
538 | return tegra_dcs[idx]; | ||
539 | else | ||
540 | return NULL; | ||
541 | } | ||
542 | EXPORT_SYMBOL(tegra_dc_get_dc); | ||
543 | |||
544 | struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win) | ||
545 | { | ||
546 | if (win >= dc->n_windows) | ||
547 | return NULL; | ||
548 | |||
549 | return &dc->windows[win]; | ||
550 | } | ||
551 | EXPORT_SYMBOL(tegra_dc_get_window); | ||
552 | |||
553 | static int get_topmost_window(u32 *depths, unsigned long *wins) | ||
554 | { | ||
555 | int idx, best = -1; | ||
556 | |||
557 | for_each_set_bit(idx, wins, DC_N_WINDOWS) { | ||
558 | if (best == -1 || depths[idx] < depths[best]) | ||
559 | best = idx; | ||
560 | } | ||
561 | clear_bit(best, wins); | ||
562 | return best; | ||
563 | } | ||
564 | |||
565 | bool tegra_dc_get_connected(struct tegra_dc *dc) | ||
566 | { | ||
567 | return dc->connected; | ||
568 | } | ||
569 | EXPORT_SYMBOL(tegra_dc_get_connected); | ||
570 | |||
571 | static u32 blend_topwin(u32 flags) | ||
572 | { | ||
573 | if (flags & TEGRA_WIN_FLAG_BLEND_COVERAGE) | ||
574 | return BLEND(NOKEY, ALPHA, 0xff, 0xff); | ||
575 | else if (flags & TEGRA_WIN_FLAG_BLEND_PREMULT) | ||
576 | return BLEND(NOKEY, PREMULT, 0xff, 0xff); | ||
577 | else | ||
578 | return BLEND(NOKEY, FIX, 0xff, 0xff); | ||
579 | } | ||
580 | |||
581 | static u32 blend_2win(int idx, unsigned long behind_mask, u32* flags, int xy) | ||
582 | { | ||
583 | int other; | ||
584 | |||
585 | for (other = 0; other < DC_N_WINDOWS; other++) { | ||
586 | if (other != idx && (xy-- == 0)) | ||
587 | break; | ||
588 | } | ||
589 | if (BIT(other) & behind_mask) | ||
590 | return blend_topwin(flags[idx]); | ||
591 | else if (flags[other]) | ||
592 | return BLEND(NOKEY, DEPENDANT, 0x00, 0x00); | ||
593 | else | ||
594 | return BLEND(NOKEY, FIX, 0x00, 0x00); | ||
595 | } | ||
596 | |||
597 | static u32 blend_3win(int idx, unsigned long behind_mask, u32* flags) | ||
598 | { | ||
599 | unsigned long infront_mask; | ||
600 | int first; | ||
601 | |||
602 | infront_mask = ~(behind_mask | BIT(idx)); | ||
603 | infront_mask &= (BIT(DC_N_WINDOWS) - 1); | ||
604 | first = ffs(infront_mask) - 1; | ||
605 | |||
606 | if (!infront_mask) | ||
607 | return blend_topwin(flags[idx]); | ||
608 | else if (behind_mask && first != -1 && flags[first]) | ||
609 | return BLEND(NOKEY, DEPENDANT, 0x00, 0x00); | ||
610 | else | ||
611 | return BLEND(NOKEY, FIX, 0x0, 0x0); | ||
612 | } | ||
613 | |||
614 | static void tegra_dc_set_blending(struct tegra_dc *dc, struct tegra_dc_blend *blend) | ||
615 | { | ||
616 | unsigned long mask = BIT(DC_N_WINDOWS) - 1; | ||
617 | |||
618 | while (mask) { | ||
619 | int idx = get_topmost_window(blend->z, &mask); | ||
620 | |||
621 | tegra_dc_writel(dc, WINDOW_A_SELECT << idx, | ||
622 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
623 | tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff), | ||
624 | DC_WIN_BLEND_NOKEY); | ||
625 | tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff), | ||
626 | DC_WIN_BLEND_1WIN); | ||
627 | tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 0), | ||
628 | DC_WIN_BLEND_2WIN_X); | ||
629 | tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 1), | ||
630 | DC_WIN_BLEND_2WIN_Y); | ||
631 | tegra_dc_writel(dc, blend_3win(idx, mask, blend->flags), | ||
632 | DC_WIN_BLEND_3WIN_XY); | ||
633 | } | ||
634 | } | ||
635 | |||
636 | static void tegra_dc_init_csc_defaults(struct tegra_dc_csc *csc) | ||
637 | { | ||
638 | csc->yof = 0x00f0; | ||
639 | csc->kyrgb = 0x012a; | ||
640 | csc->kur = 0x0000; | ||
641 | csc->kvr = 0x0198; | ||
642 | csc->kug = 0x039b; | ||
643 | csc->kvg = 0x032f; | ||
644 | csc->kub = 0x0204; | ||
645 | csc->kvb = 0x0000; | ||
646 | } | ||
647 | |||
648 | static void tegra_dc_set_csc(struct tegra_dc *dc, struct tegra_dc_csc *csc) | ||
649 | { | ||
650 | tegra_dc_writel(dc, csc->yof, DC_WIN_CSC_YOF); | ||
651 | tegra_dc_writel(dc, csc->kyrgb, DC_WIN_CSC_KYRGB); | ||
652 | tegra_dc_writel(dc, csc->kur, DC_WIN_CSC_KUR); | ||
653 | tegra_dc_writel(dc, csc->kvr, DC_WIN_CSC_KVR); | ||
654 | tegra_dc_writel(dc, csc->kug, DC_WIN_CSC_KUG); | ||
655 | tegra_dc_writel(dc, csc->kvg, DC_WIN_CSC_KVG); | ||
656 | tegra_dc_writel(dc, csc->kub, DC_WIN_CSC_KUB); | ||
657 | tegra_dc_writel(dc, csc->kvb, DC_WIN_CSC_KVB); | ||
658 | } | ||
659 | |||
660 | int tegra_dc_update_csc(struct tegra_dc *dc, int win_idx) | ||
661 | { | ||
662 | mutex_lock(&dc->lock); | ||
663 | |||
664 | if (!dc->enabled) { | ||
665 | mutex_unlock(&dc->lock); | ||
666 | return -EFAULT; | ||
667 | } | ||
668 | |||
669 | tegra_dc_writel(dc, WINDOW_A_SELECT << win_idx, | ||
670 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
671 | |||
672 | tegra_dc_set_csc(dc, &dc->windows[win_idx].csc); | ||
673 | |||
674 | mutex_unlock(&dc->lock); | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | EXPORT_SYMBOL(tegra_dc_update_csc); | ||
679 | |||
680 | static void tegra_dc_init_lut_defaults(struct tegra_dc_lut *lut) | ||
681 | { | ||
682 | int i; | ||
683 | for (i = 0; i < 256; i++) | ||
684 | lut->r[i] = lut->g[i] = lut->b[i] = (u8)i; | ||
685 | } | ||
686 | |||
687 | static int tegra_dc_loop_lut(struct tegra_dc *dc, | ||
688 | struct tegra_dc_win *win, | ||
689 | int(*lambda)(struct tegra_dc *dc, int i, u32 rgb)) | ||
690 | { | ||
691 | struct tegra_dc_lut *lut = &win->lut; | ||
692 | struct tegra_dc_lut *global_lut = &dc->fb_lut; | ||
693 | int i; | ||
694 | for (i = 0; i < 256; i++) { | ||
695 | |||
696 | u32 r = (u32)lut->r[i]; | ||
697 | u32 g = (u32)lut->g[i]; | ||
698 | u32 b = (u32)lut->b[i]; | ||
699 | |||
700 | if (!(win->ppflags & TEGRA_WIN_PPFLAG_CP_FBOVERRIDE)) { | ||
701 | r = (u32)global_lut->r[r]; | ||
702 | g = (u32)global_lut->g[g]; | ||
703 | b = (u32)global_lut->b[b]; | ||
704 | } | ||
705 | |||
706 | if (!lambda(dc, i, r | (g<<8) | (b<<16))) | ||
707 | return 0; | ||
708 | } | ||
709 | return 1; | ||
710 | } | ||
711 | |||
712 | static int tegra_dc_lut_isdefaults_lambda(struct tegra_dc *dc, int i, u32 rgb) | ||
713 | { | ||
714 | if (rgb != (i | (i<<8) | (i<<16))) | ||
715 | return 0; | ||
716 | return 1; | ||
717 | } | ||
718 | |||
719 | static int tegra_dc_set_lut_setreg_lambda(struct tegra_dc *dc, int i, u32 rgb) | ||
720 | { | ||
721 | tegra_dc_writel(dc, rgb, DC_WIN_COLOR_PALETTE(i)); | ||
722 | return 1; | ||
723 | } | ||
724 | |||
725 | static void tegra_dc_set_lut(struct tegra_dc *dc, struct tegra_dc_win* win) | ||
726 | { | ||
727 | unsigned long val = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); | ||
728 | |||
729 | tegra_dc_loop_lut(dc, win, tegra_dc_set_lut_setreg_lambda); | ||
730 | |||
731 | if (win->ppflags & TEGRA_WIN_PPFLAG_CP_ENABLE) | ||
732 | val |= CP_ENABLE; | ||
733 | else | ||
734 | val &= ~CP_ENABLE; | ||
735 | |||
736 | tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS); | ||
737 | } | ||
738 | |||
739 | static int tegra_dc_update_winlut(struct tegra_dc *dc, int win_idx, int fbovr) | ||
740 | { | ||
741 | struct tegra_dc_win *win = &dc->windows[win_idx]; | ||
742 | |||
743 | mutex_lock(&dc->lock); | ||
744 | |||
745 | if (!dc->enabled) { | ||
746 | mutex_unlock(&dc->lock); | ||
747 | return -EFAULT; | ||
748 | } | ||
749 | |||
750 | if (fbovr > 0) | ||
751 | win->ppflags |= TEGRA_WIN_PPFLAG_CP_FBOVERRIDE; | ||
752 | else if (fbovr == 0) | ||
753 | win->ppflags &= ~TEGRA_WIN_PPFLAG_CP_FBOVERRIDE; | ||
754 | |||
755 | if (!tegra_dc_loop_lut(dc, win, tegra_dc_lut_isdefaults_lambda)) | ||
756 | win->ppflags |= TEGRA_WIN_PPFLAG_CP_ENABLE; | ||
757 | else | ||
758 | win->ppflags &= ~TEGRA_WIN_PPFLAG_CP_ENABLE; | ||
759 | |||
760 | tegra_dc_writel(dc, WINDOW_A_SELECT << win_idx, | ||
761 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
762 | |||
763 | tegra_dc_set_lut(dc, win); | ||
764 | |||
765 | mutex_unlock(&dc->lock); | ||
766 | |||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | int tegra_dc_update_lut(struct tegra_dc *dc, int win_idx, int fboveride) | ||
771 | { | ||
772 | if (win_idx > -1) | ||
773 | return tegra_dc_update_winlut(dc, win_idx, fboveride); | ||
774 | |||
775 | for (win_idx = 0; win_idx < DC_N_WINDOWS; win_idx++) { | ||
776 | int err = tegra_dc_update_winlut(dc, win_idx, fboveride); | ||
777 | if (err) | ||
778 | return err; | ||
779 | } | ||
780 | |||
781 | return 0; | ||
782 | } | ||
783 | EXPORT_SYMBOL(tegra_dc_update_lut); | ||
784 | |||
785 | static void tegra_dc_set_scaling_filter(struct tegra_dc *dc) | ||
786 | { | ||
787 | unsigned i; | ||
788 | unsigned v0 = 128; | ||
789 | unsigned v1 = 0; | ||
790 | /* linear horizontal and vertical filters */ | ||
791 | for (i = 0; i < 16; i++) { | ||
792 | tegra_dc_writel(dc, (v1 << 16) | (v0 << 8), | ||
793 | DC_WIN_H_FILTER_P(i)); | ||
794 | |||
795 | tegra_dc_writel(dc, v0, | ||
796 | DC_WIN_V_FILTER_P(i)); | ||
797 | v0 -= 8; | ||
798 | v1 += 8; | ||
799 | } | ||
800 | } | ||
801 | |||
802 | static void tegra_dc_set_latency_allowance(struct tegra_dc *dc, | ||
803 | struct tegra_dc_win *w) | ||
804 | { | ||
805 | /* windows A, B, C for first and second display */ | ||
806 | static const enum tegra_la_id la_id_tab[2][3] = { | ||
807 | /* first display */ | ||
808 | { TEGRA_LA_DISPLAY_0A, TEGRA_LA_DISPLAY_0B, | ||
809 | TEGRA_LA_DISPLAY_0C }, | ||
810 | /* second display */ | ||
811 | { TEGRA_LA_DISPLAY_0AB, TEGRA_LA_DISPLAY_0BB, | ||
812 | TEGRA_LA_DISPLAY_0CB }, | ||
813 | }; | ||
814 | /* window B V-filter tap for first and second display. */ | ||
815 | static const enum tegra_la_id vfilter_tab[2] = { | ||
816 | TEGRA_LA_DISPLAY_1B, TEGRA_LA_DISPLAY_1BB, | ||
817 | }; | ||
818 | unsigned long bw; | ||
819 | |||
820 | BUG_ON(dc->ndev->id >= ARRAY_SIZE(la_id_tab)); | ||
821 | BUG_ON(dc->ndev->id >= ARRAY_SIZE(vfilter_tab)); | ||
822 | BUG_ON(w->idx >= ARRAY_SIZE(*la_id_tab)); | ||
823 | |||
824 | bw = w->new_bandwidth; | ||
825 | |||
826 | /* tegra_dc_get_bandwidth() treats V filter windows as double | ||
827 | * bandwidth, but LA has a seperate client for V filter */ | ||
828 | if (w->idx == 1 && win_use_v_filter(w)) | ||
829 | bw /= 2; | ||
830 | |||
831 | /* our bandwidth is in bytes/sec, but LA takes MBps. | ||
832 | * round up bandwidth to 1MBps */ | ||
833 | bw = bw / 1000000 + 1; | ||
834 | |||
835 | #ifdef CONFIG_TEGRA_SILICON_PLATFORM | ||
836 | tegra_set_latency_allowance(la_id_tab[dc->ndev->id][w->idx], bw); | ||
837 | /* if window B, also set the 1B client for the 2-tap V filter. */ | ||
838 | if (w->idx == 1) | ||
839 | tegra_set_latency_allowance(vfilter_tab[dc->ndev->id], bw); | ||
840 | #endif | ||
841 | |||
842 | w->bandwidth = w->new_bandwidth; | ||
843 | } | ||
844 | |||
845 | static unsigned int tegra_dc_windows_is_overlapped(struct tegra_dc_win *a, | ||
846 | struct tegra_dc_win *b) | ||
847 | { | ||
848 | if (!WIN_IS_ENABLED(a) || !WIN_IS_ENABLED(b)) | ||
849 | return 0; | ||
850 | |||
851 | /* because memory access to load the fifo can overlap, only care | ||
852 | * if windows overlap vertically */ | ||
853 | return ((a->out_y + a->out_h > b->out_y) && (a->out_y <= b->out_y)) || | ||
854 | ((b->out_y + b->out_h > a->out_y) && (b->out_y <= a->out_y)); | ||
855 | } | ||
856 | |||
857 | static unsigned long tegra_dc_find_max_bandwidth(struct tegra_dc_win *wins[], | ||
858 | int n) | ||
859 | { | ||
860 | unsigned i; | ||
861 | unsigned j; | ||
862 | unsigned overlap_count; | ||
863 | unsigned max_bw = 0; | ||
864 | |||
865 | WARN_ONCE(n > 3, "Code assumes at most 3 windows, bandwidth is likely" | ||
866 | "inaccurate.\n"); | ||
867 | |||
868 | /* If we had a large number of windows, we would compute adjacency | ||
869 | * graph representing 2 window overlaps, find all cliques in the graph, | ||
870 | * assign bandwidth to each clique, and then select the clique with | ||
871 | * maximum bandwidth. But because we have at most 3 windows, | ||
872 | * implementing proper Bron-Kerbosh algorithm would be an overkill, | ||
873 | * brute force will suffice. | ||
874 | * | ||
875 | * Thus: find maximum bandwidth for either single or a pair of windows | ||
876 | * and count number of window pair overlaps. If there are three | ||
877 | * pairs, all 3 window overlap. | ||
878 | */ | ||
879 | |||
880 | overlap_count = 0; | ||
881 | for (i = 0; i < n; i++) { | ||
882 | unsigned int bw1; | ||
883 | |||
884 | if (wins[i] == NULL) | ||
885 | continue; | ||
886 | bw1 = wins[i]->new_bandwidth; | ||
887 | if (bw1 > max_bw) | ||
888 | /* Single window */ | ||
889 | max_bw = bw1; | ||
890 | |||
891 | for (j = i + 1; j < n; j++) { | ||
892 | if (wins[j] == NULL) | ||
893 | continue; | ||
894 | if (tegra_dc_windows_is_overlapped(wins[i], wins[j])) { | ||
895 | unsigned int bw2 = wins[j]->new_bandwidth; | ||
896 | if (bw1 + bw2 > max_bw) | ||
897 | /* Window pair overlaps */ | ||
898 | max_bw = bw1 + bw2; | ||
899 | overlap_count++; | ||
900 | } | ||
901 | } | ||
902 | } | ||
903 | |||
904 | if (overlap_count == 3) | ||
905 | /* All three windows overlap */ | ||
906 | max_bw = wins[0]->new_bandwidth + wins[1]->new_bandwidth + | ||
907 | wins[2]->new_bandwidth; | ||
908 | |||
909 | return max_bw; | ||
910 | } | ||
911 | |||
912 | /* | ||
913 | * Calculate peak EMC bandwidth for each enabled window = | ||
914 | * pixel_clock * win_bpp * (use_v_filter ? 2 : 1)) * H_scale_factor * | ||
915 | * (windows_tiling ? 2 : 1) | ||
916 | * | ||
917 | * | ||
918 | * note: | ||
919 | * (*) We use 2 tap V filter, so need double BW if use V filter | ||
920 | * (*) Tiling mode on T30 and DDR3 requires double BW | ||
921 | */ | ||
922 | static unsigned long tegra_dc_calc_win_bandwidth(struct tegra_dc *dc, | ||
923 | struct tegra_dc_win *w) | ||
924 | { | ||
925 | unsigned long ret; | ||
926 | int tiled_windows_bw_multiplier; | ||
927 | unsigned long bpp; | ||
928 | |||
929 | if (!WIN_IS_ENABLED(w)) | ||
930 | return 0; | ||
931 | |||
932 | if (dfixed_trunc(w->w) == 0 || dfixed_trunc(w->h) == 0 || | ||
933 | w->out_w == 0 || w->out_h == 0) | ||
934 | return 0; | ||
935 | |||
936 | tiled_windows_bw_multiplier = | ||
937 | tegra_mc_get_tiled_memory_bandwidth_multiplier(); | ||
938 | |||
939 | /* all of tegra's YUV formats(420 and 422) fetch 2 bytes per pixel, | ||
940 | * but the size reported by tegra_dc_fmt_bpp for the planar version | ||
941 | * is of the luma plane's size only. */ | ||
942 | bpp = tegra_dc_is_yuv_planar(w->fmt) ? | ||
943 | 2 * tegra_dc_fmt_bpp(w->fmt) : tegra_dc_fmt_bpp(w->fmt); | ||
944 | /* perform calculations with most significant bits of pixel clock | ||
945 | * to prevent overflow of long. */ | ||
946 | ret = (unsigned long)(dc->mode.pclk >> 16) * | ||
947 | bpp / 8 * | ||
948 | (win_use_v_filter(w) ? 2 : 1) * dfixed_trunc(w->w) / w->out_w * | ||
949 | (WIN_IS_TILED(w) ? tiled_windows_bw_multiplier : 1); | ||
950 | |||
951 | /* | ||
952 | * Assuming 48% efficiency: i.e. if we calculate we need 70MBps, we | ||
953 | * will request 147MBps from EMC. | ||
954 | */ | ||
955 | ret = ret * 2 + ret / 10; | ||
956 | |||
957 | /* if overflowed */ | ||
958 | if (ret > (1UL << 31)) | ||
959 | return ULONG_MAX; | ||
960 | |||
961 | return ret << 16; /* restore the scaling we did above */ | ||
962 | } | ||
963 | |||
964 | static unsigned long tegra_dc_get_bandwidth( | ||
965 | struct tegra_dc_win *windows[], int n) | ||
966 | { | ||
967 | int i; | ||
968 | |||
969 | BUG_ON(n > DC_N_WINDOWS); | ||
970 | |||
971 | /* emc rate and latency allowance both need to know per window | ||
972 | * bandwidths */ | ||
973 | for (i = 0; i < n; i++) { | ||
974 | struct tegra_dc_win *w = windows[i]; | ||
975 | if (w) | ||
976 | w->new_bandwidth = tegra_dc_calc_win_bandwidth(w->dc, w); | ||
977 | } | ||
978 | |||
979 | return tegra_dc_find_max_bandwidth(windows, n); | ||
980 | } | ||
981 | |||
982 | /* to save power, call when display memory clients would be idle */ | ||
983 | static void tegra_dc_clear_bandwidth(struct tegra_dc *dc) | ||
984 | { | ||
985 | if (tegra_is_clk_enabled(dc->emc_clk)) | ||
986 | clk_disable(dc->emc_clk); | ||
987 | dc->emc_clk_rate = 0; | ||
988 | } | ||
989 | |||
990 | static void tegra_dc_program_bandwidth(struct tegra_dc *dc) | ||
991 | { | ||
992 | unsigned i; | ||
993 | |||
994 | if (dc->emc_clk_rate != dc->new_emc_clk_rate) { | ||
995 | /* going from 0 to non-zero */ | ||
996 | if (!dc->emc_clk_rate && !tegra_is_clk_enabled(dc->emc_clk)) | ||
997 | clk_enable(dc->emc_clk); | ||
998 | |||
999 | dc->emc_clk_rate = dc->new_emc_clk_rate; | ||
1000 | clk_set_rate(dc->emc_clk, dc->emc_clk_rate); | ||
1001 | |||
1002 | if (!dc->new_emc_clk_rate) /* going from non-zero to 0 */ | ||
1003 | clk_disable(dc->emc_clk); | ||
1004 | } | ||
1005 | |||
1006 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
1007 | struct tegra_dc_win *w = &dc->windows[i]; | ||
1008 | if (w->bandwidth != w->new_bandwidth && w->new_bandwidth != 0) | ||
1009 | tegra_dc_set_latency_allowance(dc, w); | ||
1010 | } | ||
1011 | } | ||
1012 | |||
1013 | static int tegra_dc_set_dynamic_emc(struct tegra_dc_win *windows[], int n) | ||
1014 | { | ||
1015 | unsigned long new_rate; | ||
1016 | struct tegra_dc *dc; | ||
1017 | |||
1018 | if (!use_dynamic_emc) | ||
1019 | return 0; | ||
1020 | |||
1021 | dc = windows[0]->dc; | ||
1022 | |||
1023 | /* calculate the new rate based on this POST */ | ||
1024 | new_rate = tegra_dc_get_bandwidth(windows, n); | ||
1025 | new_rate = EMC_BW_TO_FREQ(new_rate); | ||
1026 | |||
1027 | if (tegra_dc_has_multiple_dc()) | ||
1028 | new_rate = ULONG_MAX; | ||
1029 | |||
1030 | dc->new_emc_clk_rate = new_rate; | ||
1031 | |||
1032 | return 0; | ||
1033 | } | ||
1034 | |||
1035 | static inline u32 compute_dda_inc(fixed20_12 in, unsigned out_int, | ||
1036 | bool v, unsigned Bpp) | ||
1037 | { | ||
1038 | /* | ||
1039 | * min(round((prescaled_size_in_pixels - 1) * 0x1000 / | ||
1040 | * (post_scaled_size_in_pixels - 1)), MAX) | ||
1041 | * Where the value of MAX is as follows: | ||
1042 | * For V_DDA_INCREMENT: 15.0 (0xF000) | ||
1043 | * For H_DDA_INCREMENT: 4.0 (0x4000) for 4 Bytes/pix formats. | ||
1044 | * 8.0 (0x8000) for 2 Bytes/pix formats. | ||
1045 | */ | ||
1046 | |||
1047 | fixed20_12 out = dfixed_init(out_int); | ||
1048 | u32 dda_inc; | ||
1049 | int max; | ||
1050 | |||
1051 | if (v) { | ||
1052 | max = 15; | ||
1053 | } else { | ||
1054 | switch (Bpp) { | ||
1055 | default: | ||
1056 | WARN_ON_ONCE(1); | ||
1057 | /* fallthrough */ | ||
1058 | case 4: | ||
1059 | max = 4; | ||
1060 | break; | ||
1061 | case 2: | ||
1062 | max = 8; | ||
1063 | break; | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | out.full = max_t(u32, out.full - dfixed_const(1), dfixed_const(1)); | ||
1068 | in.full -= dfixed_const(1); | ||
1069 | |||
1070 | dda_inc = dfixed_div(in, out); | ||
1071 | |||
1072 | dda_inc = min_t(u32, dda_inc, dfixed_const(max)); | ||
1073 | |||
1074 | return dda_inc; | ||
1075 | } | ||
1076 | |||
1077 | static inline u32 compute_initial_dda(fixed20_12 in) | ||
1078 | { | ||
1079 | return dfixed_frac(in); | ||
1080 | } | ||
1081 | |||
1082 | /* does not support updating windows on multiple dcs in one call */ | ||
1083 | int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n) | ||
1084 | { | ||
1085 | struct tegra_dc *dc; | ||
1086 | unsigned long update_mask = GENERAL_ACT_REQ; | ||
1087 | unsigned long val; | ||
1088 | bool update_blend = false; | ||
1089 | int i; | ||
1090 | |||
1091 | dc = windows[0]->dc; | ||
1092 | |||
1093 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) { | ||
1094 | /* Acquire one_shot_lock to avoid race condition between | ||
1095 | * cancellation of old delayed work and schedule of new | ||
1096 | * delayed work. */ | ||
1097 | mutex_lock(&dc->one_shot_lock); | ||
1098 | cancel_delayed_work_sync(&dc->one_shot_work); | ||
1099 | } | ||
1100 | mutex_lock(&dc->lock); | ||
1101 | |||
1102 | if (!dc->enabled) { | ||
1103 | mutex_unlock(&dc->lock); | ||
1104 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
1105 | mutex_unlock(&dc->one_shot_lock); | ||
1106 | return -EFAULT; | ||
1107 | } | ||
1108 | |||
1109 | if (no_vsync) | ||
1110 | tegra_dc_writel(dc, WRITE_MUX_ACTIVE | READ_MUX_ACTIVE, DC_CMD_STATE_ACCESS); | ||
1111 | else | ||
1112 | tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | READ_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS); | ||
1113 | |||
1114 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
1115 | tegra_dc_writel(dc, WINDOW_A_SELECT << i, | ||
1116 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
1117 | tegra_dc_writel(dc, 0, DC_WIN_WIN_OPTIONS); | ||
1118 | if (!no_vsync) | ||
1119 | update_mask |= WIN_A_ACT_REQ << i; | ||
1120 | } | ||
1121 | |||
1122 | for (i = 0; i < n; i++) { | ||
1123 | struct tegra_dc_win *win = windows[i]; | ||
1124 | unsigned h_dda; | ||
1125 | unsigned v_dda; | ||
1126 | fixed20_12 h_offset, v_offset; | ||
1127 | bool invert_h = (win->flags & TEGRA_WIN_FLAG_INVERT_H) != 0; | ||
1128 | bool invert_v = (win->flags & TEGRA_WIN_FLAG_INVERT_V) != 0; | ||
1129 | bool yuvp = tegra_dc_is_yuv_planar(win->fmt); | ||
1130 | unsigned Bpp = tegra_dc_fmt_bpp(win->fmt) / 8; | ||
1131 | /* Bytes per pixel of bandwidth, used for dda_inc calculation */ | ||
1132 | unsigned Bpp_bw = Bpp * (yuvp ? 2 : 1); | ||
1133 | const bool filter_h = win_use_h_filter(win); | ||
1134 | const bool filter_v = win_use_v_filter(win); | ||
1135 | |||
1136 | if (win->z != dc->blend.z[win->idx]) { | ||
1137 | dc->blend.z[win->idx] = win->z; | ||
1138 | update_blend = true; | ||
1139 | } | ||
1140 | if ((win->flags & TEGRA_WIN_BLEND_FLAGS_MASK) != | ||
1141 | dc->blend.flags[win->idx]) { | ||
1142 | dc->blend.flags[win->idx] = | ||
1143 | win->flags & TEGRA_WIN_BLEND_FLAGS_MASK; | ||
1144 | update_blend = true; | ||
1145 | } | ||
1146 | |||
1147 | tegra_dc_writel(dc, WINDOW_A_SELECT << win->idx, | ||
1148 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
1149 | |||
1150 | if (!no_vsync) | ||
1151 | update_mask |= WIN_A_ACT_REQ << win->idx; | ||
1152 | |||
1153 | if (!WIN_IS_ENABLED(win)) { | ||
1154 | tegra_dc_writel(dc, 0, DC_WIN_WIN_OPTIONS); | ||
1155 | continue; | ||
1156 | } | ||
1157 | |||
1158 | tegra_dc_writel(dc, win->fmt, DC_WIN_COLOR_DEPTH); | ||
1159 | tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP); | ||
1160 | |||
1161 | tegra_dc_writel(dc, | ||
1162 | V_POSITION(win->out_y) | H_POSITION(win->out_x), | ||
1163 | DC_WIN_POSITION); | ||
1164 | tegra_dc_writel(dc, | ||
1165 | V_SIZE(win->out_h) | H_SIZE(win->out_w), | ||
1166 | DC_WIN_SIZE); | ||
1167 | tegra_dc_writel(dc, | ||
1168 | V_PRESCALED_SIZE(dfixed_trunc(win->h)) | | ||
1169 | H_PRESCALED_SIZE(dfixed_trunc(win->w) * Bpp), | ||
1170 | DC_WIN_PRESCALED_SIZE); | ||
1171 | |||
1172 | h_dda = compute_dda_inc(win->w, win->out_w, false, Bpp_bw); | ||
1173 | v_dda = compute_dda_inc(win->h, win->out_h, true, Bpp_bw); | ||
1174 | tegra_dc_writel(dc, V_DDA_INC(v_dda) | H_DDA_INC(h_dda), | ||
1175 | DC_WIN_DDA_INCREMENT); | ||
1176 | h_dda = compute_initial_dda(win->x); | ||
1177 | v_dda = compute_initial_dda(win->y); | ||
1178 | tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA); | ||
1179 | tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA); | ||
1180 | |||
1181 | tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE); | ||
1182 | tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE); | ||
1183 | tegra_dc_writel(dc, | ||
1184 | (unsigned long)win->phys_addr, | ||
1185 | DC_WINBUF_START_ADDR); | ||
1186 | |||
1187 | if (!yuvp) { | ||
1188 | tegra_dc_writel(dc, win->stride, DC_WIN_LINE_STRIDE); | ||
1189 | } else { | ||
1190 | tegra_dc_writel(dc, | ||
1191 | (unsigned long)win->phys_addr_u, | ||
1192 | DC_WINBUF_START_ADDR_U); | ||
1193 | tegra_dc_writel(dc, | ||
1194 | (unsigned long)win->phys_addr_v, | ||
1195 | DC_WINBUF_START_ADDR_V); | ||
1196 | tegra_dc_writel(dc, | ||
1197 | LINE_STRIDE(win->stride) | | ||
1198 | UV_LINE_STRIDE(win->stride_uv), | ||
1199 | DC_WIN_LINE_STRIDE); | ||
1200 | } | ||
1201 | |||
1202 | h_offset = win->x; | ||
1203 | if (invert_h) { | ||
1204 | h_offset.full += win->w.full - dfixed_const(1); | ||
1205 | } | ||
1206 | |||
1207 | v_offset = win->y; | ||
1208 | if (invert_v) { | ||
1209 | v_offset.full += win->h.full - dfixed_const(1); | ||
1210 | } | ||
1211 | |||
1212 | tegra_dc_writel(dc, dfixed_trunc(h_offset) * Bpp, | ||
1213 | DC_WINBUF_ADDR_H_OFFSET); | ||
1214 | tegra_dc_writel(dc, dfixed_trunc(v_offset), | ||
1215 | DC_WINBUF_ADDR_V_OFFSET); | ||
1216 | |||
1217 | if (WIN_IS_TILED(win)) | ||
1218 | tegra_dc_writel(dc, | ||
1219 | DC_WIN_BUFFER_ADDR_MODE_TILE | | ||
1220 | DC_WIN_BUFFER_ADDR_MODE_TILE_UV, | ||
1221 | DC_WIN_BUFFER_ADDR_MODE); | ||
1222 | else | ||
1223 | tegra_dc_writel(dc, | ||
1224 | DC_WIN_BUFFER_ADDR_MODE_LINEAR | | ||
1225 | DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV, | ||
1226 | DC_WIN_BUFFER_ADDR_MODE); | ||
1227 | |||
1228 | val = WIN_ENABLE; | ||
1229 | if (yuvp) | ||
1230 | val |= CSC_ENABLE; | ||
1231 | else if (tegra_dc_fmt_bpp(win->fmt) < 24) | ||
1232 | val |= COLOR_EXPAND; | ||
1233 | |||
1234 | if (win->ppflags & TEGRA_WIN_PPFLAG_CP_ENABLE) | ||
1235 | val |= CP_ENABLE; | ||
1236 | |||
1237 | if (filter_h) | ||
1238 | val |= H_FILTER_ENABLE; | ||
1239 | if (filter_v) | ||
1240 | val |= V_FILTER_ENABLE; | ||
1241 | |||
1242 | if (invert_h) | ||
1243 | val |= H_DIRECTION_DECREMENT; | ||
1244 | if (invert_v) | ||
1245 | val |= V_DIRECTION_DECREMENT; | ||
1246 | |||
1247 | tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS); | ||
1248 | |||
1249 | win->dirty = no_vsync ? 0 : 1; | ||
1250 | |||
1251 | dev_dbg(&dc->ndev->dev, "%s():idx=%d z=%d x=%d y=%d w=%d h=%d " | ||
1252 | "out_x=%u out_y=%u out_w=%u out_h=%u " | ||
1253 | "fmt=%d yuvp=%d Bpp=%u filter_h=%d filter_v=%d", | ||
1254 | __func__, win->idx, win->z, | ||
1255 | dfixed_trunc(win->x), dfixed_trunc(win->y), | ||
1256 | dfixed_trunc(win->w), dfixed_trunc(win->h), | ||
1257 | win->out_x, win->out_y, win->out_w, win->out_h, | ||
1258 | win->fmt, yuvp, Bpp, filter_h, filter_v); | ||
1259 | } | ||
1260 | |||
1261 | if (update_blend) { | ||
1262 | tegra_dc_set_blending(dc, &dc->blend); | ||
1263 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
1264 | if (!no_vsync) | ||
1265 | dc->windows[i].dirty = 1; | ||
1266 | update_mask |= WIN_A_ACT_REQ << i; | ||
1267 | } | ||
1268 | } | ||
1269 | |||
1270 | tegra_dc_set_dynamic_emc(windows, n); | ||
1271 | |||
1272 | tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL); | ||
1273 | |||
1274 | tegra_dc_writel(dc, FRAME_END_INT | V_BLANK_INT, DC_CMD_INT_STATUS); | ||
1275 | if (!no_vsync) { | ||
1276 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
1277 | val |= (FRAME_END_INT | V_BLANK_INT | ALL_UF_INT); | ||
1278 | tegra_dc_writel(dc, val, DC_CMD_INT_MASK); | ||
1279 | } else { | ||
1280 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
1281 | val &= ~(FRAME_END_INT | V_BLANK_INT | ALL_UF_INT); | ||
1282 | tegra_dc_writel(dc, val, DC_CMD_INT_MASK); | ||
1283 | } | ||
1284 | |||
1285 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
1286 | schedule_delayed_work(&dc->one_shot_work, | ||
1287 | msecs_to_jiffies(dc->one_shot_delay_ms)); | ||
1288 | |||
1289 | /* update EMC clock if calculated bandwidth has changed */ | ||
1290 | tegra_dc_program_bandwidth(dc); | ||
1291 | |||
1292 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
1293 | update_mask |= NC_HOST_TRIG; | ||
1294 | |||
1295 | tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL); | ||
1296 | |||
1297 | mutex_unlock(&dc->lock); | ||
1298 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
1299 | mutex_unlock(&dc->one_shot_lock); | ||
1300 | |||
1301 | return 0; | ||
1302 | } | ||
1303 | EXPORT_SYMBOL(tegra_dc_update_windows); | ||
1304 | |||
1305 | u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc, int i) | ||
1306 | { | ||
1307 | return dc->syncpt[i].id; | ||
1308 | } | ||
1309 | EXPORT_SYMBOL(tegra_dc_get_syncpt_id); | ||
1310 | |||
1311 | u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc, int i) | ||
1312 | { | ||
1313 | u32 max; | ||
1314 | |||
1315 | mutex_lock(&dc->lock); | ||
1316 | max = nvhost_syncpt_incr_max(&nvhost_get_host(dc->ndev)->syncpt, | ||
1317 | dc->syncpt[i].id, ((dc->enabled) ? 1 : 0)); | ||
1318 | dc->syncpt[i].max = max; | ||
1319 | mutex_unlock(&dc->lock); | ||
1320 | |||
1321 | return max; | ||
1322 | } | ||
1323 | |||
1324 | void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, int i, u32 val) | ||
1325 | { | ||
1326 | mutex_lock(&dc->lock); | ||
1327 | if ( dc->enabled ) | ||
1328 | while (dc->syncpt[i].min < val) { | ||
1329 | dc->syncpt[i].min++; | ||
1330 | nvhost_syncpt_cpu_incr( | ||
1331 | &nvhost_get_host(dc->ndev)->syncpt, | ||
1332 | dc->syncpt[i].id); | ||
1333 | } | ||
1334 | mutex_unlock(&dc->lock); | ||
1335 | } | ||
1336 | |||
1337 | static bool tegra_dc_windows_are_clean(struct tegra_dc_win *windows[], | ||
1338 | int n) | ||
1339 | { | ||
1340 | int i; | ||
1341 | |||
1342 | for (i = 0; i < n; i++) { | ||
1343 | if (windows[i]->dirty) | ||
1344 | return false; | ||
1345 | } | ||
1346 | |||
1347 | return true; | ||
1348 | } | ||
1349 | |||
1350 | /* does not support syncing windows on multiple dcs in one call */ | ||
1351 | int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n) | ||
1352 | { | ||
1353 | if (n < 1 || n > DC_N_WINDOWS) | ||
1354 | return -EINVAL; | ||
1355 | |||
1356 | if (!windows[0]->dc->enabled) | ||
1357 | return -EFAULT; | ||
1358 | |||
1359 | #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM | ||
1360 | /* Don't want to timeout on simulator */ | ||
1361 | return wait_event_interruptible(windows[0]->dc->wq, | ||
1362 | tegra_dc_windows_are_clean(windows, n)); | ||
1363 | #else | ||
1364 | return wait_event_interruptible_timeout(windows[0]->dc->wq, | ||
1365 | tegra_dc_windows_are_clean(windows, n), | ||
1366 | HZ); | ||
1367 | #endif | ||
1368 | } | ||
1369 | EXPORT_SYMBOL(tegra_dc_sync_windows); | ||
1370 | |||
1371 | static unsigned long tegra_dc_clk_get_rate(struct tegra_dc *dc) | ||
1372 | { | ||
1373 | #ifdef CONFIG_TEGRA_SILICON_PLATFORM | ||
1374 | return clk_get_rate(dc->clk); | ||
1375 | #else | ||
1376 | return 27000000; | ||
1377 | #endif | ||
1378 | } | ||
1379 | |||
1380 | static unsigned long tegra_dc_pclk_round_rate(struct tegra_dc *dc, int pclk) | ||
1381 | { | ||
1382 | unsigned long rate; | ||
1383 | unsigned long div; | ||
1384 | |||
1385 | rate = tegra_dc_clk_get_rate(dc); | ||
1386 | |||
1387 | div = DIV_ROUND_CLOSEST(rate * 2, pclk); | ||
1388 | |||
1389 | if (div < 2) | ||
1390 | return 0; | ||
1391 | |||
1392 | return rate * 2 / div; | ||
1393 | } | ||
1394 | |||
1395 | static unsigned long tegra_dc_pclk_predict_rate(struct clk *parent, int pclk) | ||
1396 | { | ||
1397 | unsigned long rate; | ||
1398 | unsigned long div; | ||
1399 | |||
1400 | rate = clk_get_rate(parent); | ||
1401 | |||
1402 | div = DIV_ROUND_CLOSEST(rate * 2, pclk); | ||
1403 | |||
1404 | if (div < 2) | ||
1405 | return 0; | ||
1406 | |||
1407 | return rate * 2 / div; | ||
1408 | } | ||
1409 | |||
1410 | void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk) | ||
1411 | { | ||
1412 | int pclk; | ||
1413 | |||
1414 | if (dc->out->type == TEGRA_DC_OUT_RGB) { | ||
1415 | unsigned long rate; | ||
1416 | struct clk *parent_clk = | ||
1417 | clk_get_sys(NULL, dc->out->parent_clk ? : "pll_p"); | ||
1418 | |||
1419 | if (dc->out->parent_clk_backup && | ||
1420 | (parent_clk == clk_get_sys(NULL, "pll_p"))) { | ||
1421 | rate = tegra_dc_pclk_predict_rate( | ||
1422 | parent_clk, dc->mode.pclk); | ||
1423 | /* use pll_d as last resort */ | ||
1424 | if (rate < (dc->mode.pclk / 100 * 99) || | ||
1425 | rate > (dc->mode.pclk / 100 * 109)) | ||
1426 | parent_clk = clk_get_sys( | ||
1427 | NULL, dc->out->parent_clk_backup); | ||
1428 | } | ||
1429 | |||
1430 | if (clk_get_parent(clk) != parent_clk) | ||
1431 | clk_set_parent(clk, parent_clk); | ||
1432 | |||
1433 | if (parent_clk != clk_get_sys(NULL, "pll_p")) { | ||
1434 | struct clk *base_clk = clk_get_parent(parent_clk); | ||
1435 | |||
1436 | /* Assuming either pll_d or pll_d2 is used */ | ||
1437 | rate = dc->mode.pclk * 2; | ||
1438 | |||
1439 | if (rate != clk_get_rate(base_clk)) | ||
1440 | clk_set_rate(base_clk, rate); | ||
1441 | } | ||
1442 | } | ||
1443 | |||
1444 | if (dc->out->type == TEGRA_DC_OUT_HDMI) { | ||
1445 | unsigned long rate; | ||
1446 | struct clk *parent_clk = | ||
1447 | clk_get_sys(NULL, dc->out->parent_clk ? : "pll_d_out0"); | ||
1448 | struct clk *base_clk = clk_get_parent(parent_clk); | ||
1449 | |||
1450 | /* | ||
1451 | * Providing dynamic frequency rate setting for T20/T30 HDMI. | ||
1452 | * The required rate needs to be setup at 4x multiplier, | ||
1453 | * as out0 is 1/2 of the actual PLL output. | ||
1454 | */ | ||
1455 | |||
1456 | rate = dc->mode.pclk * 4; | ||
1457 | if (rate != clk_get_rate(base_clk)) | ||
1458 | clk_set_rate(base_clk, rate); | ||
1459 | |||
1460 | if (clk_get_parent(clk) != parent_clk) | ||
1461 | clk_set_parent(clk, parent_clk); | ||
1462 | } | ||
1463 | |||
1464 | if (dc->out->type == TEGRA_DC_OUT_DSI) { | ||
1465 | unsigned long rate; | ||
1466 | struct clk *parent_clk; | ||
1467 | struct clk *base_clk; | ||
1468 | |||
1469 | if (clk == dc->clk) { | ||
1470 | parent_clk = clk_get_sys(NULL, | ||
1471 | dc->out->parent_clk ? : "pll_d_out0"); | ||
1472 | base_clk = clk_get_parent(parent_clk); | ||
1473 | tegra_clk_cfg_ex(base_clk, | ||
1474 | TEGRA_CLK_PLLD_DSI_OUT_ENB, 1); | ||
1475 | } else { | ||
1476 | if (dc->pdata->default_out->dsi->dsi_instance) { | ||
1477 | parent_clk = clk_get_sys(NULL, | ||
1478 | dc->out->parent_clk ? : "pll_d2_out0"); | ||
1479 | base_clk = clk_get_parent(parent_clk); | ||
1480 | tegra_clk_cfg_ex(base_clk, | ||
1481 | TEGRA_CLK_PLLD_CSI_OUT_ENB, 1); | ||
1482 | } else { | ||
1483 | parent_clk = clk_get_sys(NULL, | ||
1484 | dc->out->parent_clk ? : "pll_d_out0"); | ||
1485 | base_clk = clk_get_parent(parent_clk); | ||
1486 | tegra_clk_cfg_ex(base_clk, | ||
1487 | TEGRA_CLK_PLLD_DSI_OUT_ENB, 1); | ||
1488 | } | ||
1489 | } | ||
1490 | |||
1491 | rate = dc->mode.pclk * dc->shift_clk_div * 2; | ||
1492 | if (rate != clk_get_rate(base_clk)) | ||
1493 | clk_set_rate(base_clk, rate); | ||
1494 | |||
1495 | if (clk_get_parent(clk) != parent_clk) | ||
1496 | clk_set_parent(clk, parent_clk); | ||
1497 | } | ||
1498 | |||
1499 | pclk = tegra_dc_pclk_round_rate(dc, dc->mode.pclk); | ||
1500 | tegra_dvfs_set_rate(clk, pclk); | ||
1501 | } | ||
1502 | |||
1503 | /* return non-zero if constraint is violated */ | ||
1504 | static int calc_h_ref_to_sync(const struct tegra_dc_mode *mode, int *href) | ||
1505 | { | ||
1506 | long a, b; | ||
1507 | |||
1508 | /* Constraint 5: H_REF_TO_SYNC >= 0 */ | ||
1509 | a = 0; | ||
1510 | |||
1511 | /* Constraint 6: H_FRONT_PORT >= (H_REF_TO_SYNC + 1) */ | ||
1512 | b = mode->h_front_porch - 1; | ||
1513 | |||
1514 | /* Constraint 1: H_REF_TO_SYNC + H_SYNC_WIDTH + H_BACK_PORCH > 11 */ | ||
1515 | if (a + mode->h_sync_width + mode->h_back_porch <= 11) | ||
1516 | a = 1 + 11 - mode->h_sync_width - mode->h_back_porch; | ||
1517 | /* check Constraint 1 and 6 */ | ||
1518 | if (a > b) | ||
1519 | return 1; | ||
1520 | |||
1521 | /* Constraint 4: H_SYNC_WIDTH >= 1 */ | ||
1522 | if (mode->h_sync_width < 1) | ||
1523 | return 4; | ||
1524 | |||
1525 | /* Constraint 7: H_DISP_ACTIVE >= 16 */ | ||
1526 | if (mode->h_active < 16) | ||
1527 | return 7; | ||
1528 | |||
1529 | if (href) { | ||
1530 | if (b > a && a % 2) | ||
1531 | *href = a + 1; /* use smallest even value */ | ||
1532 | else | ||
1533 | *href = a; /* even or only possible value */ | ||
1534 | } | ||
1535 | |||
1536 | return 0; | ||
1537 | } | ||
1538 | |||
1539 | static int calc_v_ref_to_sync(const struct tegra_dc_mode *mode, int *vref) | ||
1540 | { | ||
1541 | long a; | ||
1542 | a = 1; /* Constraint 5: V_REF_TO_SYNC >= 1 */ | ||
1543 | |||
1544 | /* Constraint 2: V_REF_TO_SYNC + V_SYNC_WIDTH + V_BACK_PORCH > 1 */ | ||
1545 | if (a + mode->v_sync_width + mode->v_back_porch <= 1) | ||
1546 | a = 1 + 1 - mode->v_sync_width - mode->v_back_porch; | ||
1547 | |||
1548 | /* Constraint 6 */ | ||
1549 | if (mode->v_front_porch < a + 1) | ||
1550 | a = mode->v_front_porch - 1; | ||
1551 | |||
1552 | /* Constraint 4: V_SYNC_WIDTH >= 1 */ | ||
1553 | if (mode->v_sync_width < 1) | ||
1554 | return 4; | ||
1555 | |||
1556 | /* Constraint 7: V_DISP_ACTIVE >= 16 */ | ||
1557 | if (mode->v_active < 16) | ||
1558 | return 7; | ||
1559 | |||
1560 | if (vref) | ||
1561 | *vref = a; | ||
1562 | return 0; | ||
1563 | } | ||
1564 | |||
1565 | static int calc_ref_to_sync(struct tegra_dc_mode *mode) | ||
1566 | { | ||
1567 | int ret; | ||
1568 | ret = calc_h_ref_to_sync(mode, &mode->h_ref_to_sync); | ||
1569 | if (ret) | ||
1570 | return ret; | ||
1571 | ret = calc_v_ref_to_sync(mode, &mode->v_ref_to_sync); | ||
1572 | if (ret) | ||
1573 | return ret; | ||
1574 | |||
1575 | return 0; | ||
1576 | } | ||
1577 | |||
1578 | static bool check_ref_to_sync(struct tegra_dc_mode *mode) | ||
1579 | { | ||
1580 | /* Constraint 1: H_REF_TO_SYNC + H_SYNC_WIDTH + H_BACK_PORCH > 11. */ | ||
1581 | if (mode->h_ref_to_sync + mode->h_sync_width + mode->h_back_porch <= 11) | ||
1582 | return false; | ||
1583 | |||
1584 | /* Constraint 2: V_REF_TO_SYNC + V_SYNC_WIDTH + V_BACK_PORCH > 1. */ | ||
1585 | if (mode->v_ref_to_sync + mode->v_sync_width + mode->v_back_porch <= 1) | ||
1586 | return false; | ||
1587 | |||
1588 | /* Constraint 3: V_FRONT_PORCH + V_SYNC_WIDTH + V_BACK_PORCH > 1 | ||
1589 | * (vertical blank). */ | ||
1590 | if (mode->v_front_porch + mode->v_sync_width + mode->v_back_porch <= 1) | ||
1591 | return false; | ||
1592 | |||
1593 | /* Constraint 4: V_SYNC_WIDTH >= 1; H_SYNC_WIDTH >= 1. */ | ||
1594 | if (mode->v_sync_width < 1 || mode->h_sync_width < 1) | ||
1595 | return false; | ||
1596 | |||
1597 | /* Constraint 5: V_REF_TO_SYNC >= 1; H_REF_TO_SYNC >= 0. */ | ||
1598 | if (mode->v_ref_to_sync < 1 || mode->h_ref_to_sync < 0) | ||
1599 | return false; | ||
1600 | |||
1601 | /* Constraint 6: V_FRONT_PORT >= (V_REF_TO_SYNC + 1); | ||
1602 | * H_FRONT_PORT >= (H_REF_TO_SYNC + 1). */ | ||
1603 | if (mode->v_front_porch < mode->v_ref_to_sync + 1 || | ||
1604 | mode->h_front_porch < mode->h_ref_to_sync + 1) | ||
1605 | return false; | ||
1606 | |||
1607 | /* Constraint 7: H_DISP_ACTIVE >= 16; V_DISP_ACTIVE >= 16. */ | ||
1608 | if (mode->h_active < 16 || mode->v_active < 16) | ||
1609 | return false; | ||
1610 | |||
1611 | return true; | ||
1612 | } | ||
1613 | |||
1614 | #ifdef DEBUG | ||
1615 | /* return in 1000ths of a Hertz */ | ||
1616 | static int calc_refresh(const struct tegra_dc_mode *m) | ||
1617 | { | ||
1618 | long h_total, v_total, refresh; | ||
1619 | h_total = m->h_active + m->h_front_porch + m->h_back_porch + | ||
1620 | m->h_sync_width; | ||
1621 | v_total = m->v_active + m->v_front_porch + m->v_back_porch + | ||
1622 | m->v_sync_width; | ||
1623 | refresh = m->pclk / h_total; | ||
1624 | refresh *= 1000; | ||
1625 | refresh /= v_total; | ||
1626 | return refresh; | ||
1627 | } | ||
1628 | |||
1629 | static void print_mode(struct tegra_dc *dc, | ||
1630 | const struct tegra_dc_mode *mode, const char *note) | ||
1631 | { | ||
1632 | if (mode) { | ||
1633 | int refresh = calc_refresh(dc, mode); | ||
1634 | dev_info(&dc->ndev->dev, "%s():MODE:%dx%d@%d.%03uHz pclk=%d\n", | ||
1635 | note ? note : "", | ||
1636 | mode->h_active, mode->v_active, | ||
1637 | refresh / 1000, refresh % 1000, | ||
1638 | mode->pclk); | ||
1639 | } | ||
1640 | } | ||
1641 | #else /* !DEBUG */ | ||
1642 | static inline void print_mode(struct tegra_dc *dc, | ||
1643 | const struct tegra_dc_mode *mode, const char *note) { } | ||
1644 | #endif /* DEBUG */ | ||
1645 | |||
1646 | static inline void enable_dc_irq(unsigned int irq) | ||
1647 | { | ||
1648 | #ifndef CONFIG_TEGRA_FPGA_PLATFORM | ||
1649 | enable_irq(irq); | ||
1650 | #else | ||
1651 | /* Always disable DC interrupts on FPGA. */ | ||
1652 | disable_irq(irq); | ||
1653 | #endif | ||
1654 | } | ||
1655 | |||
1656 | static inline void disable_dc_irq(unsigned int irq) | ||
1657 | { | ||
1658 | disable_irq(irq); | ||
1659 | } | ||
1660 | |||
1661 | static int tegra_dc_program_mode(struct tegra_dc *dc, struct tegra_dc_mode *mode) | ||
1662 | { | ||
1663 | unsigned long val; | ||
1664 | unsigned long rate; | ||
1665 | unsigned long div; | ||
1666 | unsigned long pclk; | ||
1667 | |||
1668 | print_mode(dc, mode, __func__); | ||
1669 | |||
1670 | /* use default EMC rate when switching modes */ | ||
1671 | dc->new_emc_clk_rate = tegra_dc_get_default_emc_clk_rate(dc); | ||
1672 | tegra_dc_program_bandwidth(dc); | ||
1673 | |||
1674 | tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS); | ||
1675 | tegra_dc_writel(dc, mode->h_ref_to_sync | (mode->v_ref_to_sync << 16), | ||
1676 | DC_DISP_REF_TO_SYNC); | ||
1677 | tegra_dc_writel(dc, mode->h_sync_width | (mode->v_sync_width << 16), | ||
1678 | DC_DISP_SYNC_WIDTH); | ||
1679 | tegra_dc_writel(dc, mode->h_back_porch | (mode->v_back_porch << 16), | ||
1680 | DC_DISP_BACK_PORCH); | ||
1681 | tegra_dc_writel(dc, mode->h_active | (mode->v_active << 16), | ||
1682 | DC_DISP_DISP_ACTIVE); | ||
1683 | tegra_dc_writel(dc, mode->h_front_porch | (mode->v_front_porch << 16), | ||
1684 | DC_DISP_FRONT_PORCH); | ||
1685 | |||
1686 | tegra_dc_writel(dc, DE_SELECT_ACTIVE | DE_CONTROL_NORMAL, | ||
1687 | DC_DISP_DATA_ENABLE_OPTIONS); | ||
1688 | |||
1689 | /* TODO: MIPI/CRT/HDMI clock cals */ | ||
1690 | |||
1691 | val = DISP_DATA_FORMAT_DF1P1C; | ||
1692 | |||
1693 | if (dc->out->align == TEGRA_DC_ALIGN_MSB) | ||
1694 | val |= DISP_DATA_ALIGNMENT_MSB; | ||
1695 | else | ||
1696 | val |= DISP_DATA_ALIGNMENT_LSB; | ||
1697 | |||
1698 | if (dc->out->order == TEGRA_DC_ORDER_RED_BLUE) | ||
1699 | val |= DISP_DATA_ORDER_RED_BLUE; | ||
1700 | else | ||
1701 | val |= DISP_DATA_ORDER_BLUE_RED; | ||
1702 | |||
1703 | tegra_dc_writel(dc, val, DC_DISP_DISP_INTERFACE_CONTROL); | ||
1704 | |||
1705 | rate = tegra_dc_clk_get_rate(dc); | ||
1706 | |||
1707 | pclk = tegra_dc_pclk_round_rate(dc, mode->pclk); | ||
1708 | if (pclk < (mode->pclk / 100 * 99) || | ||
1709 | pclk > (mode->pclk / 100 * 109)) { | ||
1710 | dev_err(&dc->ndev->dev, | ||
1711 | "can't divide %ld clock to %d -1/+9%% %ld %d %d\n", | ||
1712 | rate, mode->pclk, | ||
1713 | pclk, (mode->pclk / 100 * 99), | ||
1714 | (mode->pclk / 100 * 109)); | ||
1715 | return -EINVAL; | ||
1716 | } | ||
1717 | |||
1718 | div = (rate * 2 / pclk) - 2; | ||
1719 | |||
1720 | tegra_dc_writel(dc, 0x00010001, | ||
1721 | DC_DISP_SHIFT_CLOCK_OPTIONS); | ||
1722 | tegra_dc_writel(dc, PIXEL_CLK_DIVIDER_PCD1 | SHIFT_CLK_DIVIDER(div), | ||
1723 | DC_DISP_DISP_CLOCK_CONTROL); | ||
1724 | |||
1725 | #ifdef CONFIG_SWITCH | ||
1726 | switch_set_state(&dc->modeset_switch, | ||
1727 | (mode->h_active << 16) | mode->v_active); | ||
1728 | #endif | ||
1729 | |||
1730 | tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL); | ||
1731 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
1732 | |||
1733 | return 0; | ||
1734 | } | ||
1735 | |||
1736 | |||
1737 | int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode) | ||
1738 | { | ||
1739 | memcpy(&dc->mode, mode, sizeof(dc->mode)); | ||
1740 | |||
1741 | print_mode(dc, mode, __func__); | ||
1742 | |||
1743 | return 0; | ||
1744 | } | ||
1745 | EXPORT_SYMBOL(tegra_dc_set_mode); | ||
1746 | |||
1747 | int tegra_dc_set_fb_mode(struct tegra_dc *dc, | ||
1748 | const struct fb_videomode *fbmode, bool stereo_mode) | ||
1749 | { | ||
1750 | struct tegra_dc_mode mode; | ||
1751 | |||
1752 | if (!fbmode->pixclock) | ||
1753 | return -EINVAL; | ||
1754 | |||
1755 | mode.pclk = PICOS2KHZ(fbmode->pixclock) * 1000; | ||
1756 | mode.h_sync_width = fbmode->hsync_len; | ||
1757 | mode.v_sync_width = fbmode->vsync_len; | ||
1758 | mode.h_back_porch = fbmode->left_margin; | ||
1759 | mode.v_back_porch = fbmode->upper_margin; | ||
1760 | mode.h_active = fbmode->xres; | ||
1761 | mode.v_active = fbmode->yres; | ||
1762 | mode.h_front_porch = fbmode->right_margin; | ||
1763 | mode.v_front_porch = fbmode->lower_margin; | ||
1764 | mode.stereo_mode = stereo_mode; | ||
1765 | if (dc->out->type == TEGRA_DC_OUT_HDMI) { | ||
1766 | /* HDMI controller requires h_ref=1, v_ref=1 */ | ||
1767 | mode.h_ref_to_sync = 1; | ||
1768 | mode.v_ref_to_sync = 1; | ||
1769 | } else { | ||
1770 | calc_ref_to_sync(&mode); | ||
1771 | } | ||
1772 | if (!check_ref_to_sync(&mode)) { | ||
1773 | dev_err(&dc->ndev->dev, | ||
1774 | "Display timing doesn't meet restrictions.\n"); | ||
1775 | return -EINVAL; | ||
1776 | } | ||
1777 | dev_info(&dc->ndev->dev, "Using mode %dx%d pclk=%d href=%d vref=%d\n", | ||
1778 | mode.h_active, mode.v_active, mode.pclk, | ||
1779 | mode.h_ref_to_sync, mode.v_ref_to_sync | ||
1780 | ); | ||
1781 | |||
1782 | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
1783 | /* Double the pixel clock and update v_active only for frame packed mode */ | ||
1784 | if (mode.stereo_mode) { | ||
1785 | mode.pclk *= 2; | ||
1786 | /* total v_active = yres*2 + activespace */ | ||
1787 | mode.v_active = fbmode->yres*2 + | ||
1788 | fbmode->vsync_len + | ||
1789 | fbmode->upper_margin + | ||
1790 | fbmode->lower_margin; | ||
1791 | } | ||
1792 | #endif | ||
1793 | |||
1794 | mode.flags = 0; | ||
1795 | |||
1796 | if (!(fbmode->sync & FB_SYNC_HOR_HIGH_ACT)) | ||
1797 | mode.flags |= TEGRA_DC_MODE_FLAG_NEG_H_SYNC; | ||
1798 | |||
1799 | if (!(fbmode->sync & FB_SYNC_VERT_HIGH_ACT)) | ||
1800 | mode.flags |= TEGRA_DC_MODE_FLAG_NEG_V_SYNC; | ||
1801 | |||
1802 | return tegra_dc_set_mode(dc, &mode); | ||
1803 | } | ||
1804 | EXPORT_SYMBOL(tegra_dc_set_fb_mode); | ||
1805 | |||
1806 | void | ||
1807 | tegra_dc_config_pwm(struct tegra_dc *dc, struct tegra_dc_pwm_params *cfg) | ||
1808 | { | ||
1809 | unsigned int ctrl; | ||
1810 | unsigned long out_sel; | ||
1811 | unsigned long cmd_state; | ||
1812 | |||
1813 | mutex_lock(&dc->lock); | ||
1814 | if (!dc->enabled) { | ||
1815 | mutex_unlock(&dc->lock); | ||
1816 | return; | ||
1817 | } | ||
1818 | |||
1819 | ctrl = ((cfg->period << PM_PERIOD_SHIFT) | | ||
1820 | (cfg->clk_div << PM_CLK_DIVIDER_SHIFT) | | ||
1821 | cfg->clk_select); | ||
1822 | |||
1823 | /* The new value should be effected immediately */ | ||
1824 | cmd_state = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS); | ||
1825 | tegra_dc_writel(dc, (cmd_state | (1 << 2)), DC_CMD_STATE_ACCESS); | ||
1826 | |||
1827 | if (cfg->switch_to_sfio && cfg->gpio_conf_to_sfio) | ||
1828 | cfg->switch_to_sfio(cfg->gpio_conf_to_sfio); | ||
1829 | else | ||
1830 | dev_err(&dc->ndev->dev, "Error: Need gpio_conf_to_sfio\n"); | ||
1831 | |||
1832 | switch (cfg->which_pwm) { | ||
1833 | case TEGRA_PWM_PM0: | ||
1834 | /* Select the LM0 on PM0 */ | ||
1835 | out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5); | ||
1836 | out_sel &= ~(7 << 0); | ||
1837 | out_sel |= (3 << 0); | ||
1838 | tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5); | ||
1839 | tegra_dc_writel(dc, ctrl, DC_COM_PM0_CONTROL); | ||
1840 | tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM0_DUTY_CYCLE); | ||
1841 | break; | ||
1842 | case TEGRA_PWM_PM1: | ||
1843 | /* Select the LM1 on PM1 */ | ||
1844 | out_sel = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_SELECT5); | ||
1845 | out_sel &= ~(7 << 4); | ||
1846 | out_sel |= (3 << 4); | ||
1847 | tegra_dc_writel(dc, out_sel, DC_COM_PIN_OUTPUT_SELECT5); | ||
1848 | tegra_dc_writel(dc, ctrl, DC_COM_PM1_CONTROL); | ||
1849 | tegra_dc_writel(dc, cfg->duty_cycle, DC_COM_PM1_DUTY_CYCLE); | ||
1850 | break; | ||
1851 | default: | ||
1852 | dev_err(&dc->ndev->dev, "Error: Need which_pwm\n"); | ||
1853 | break; | ||
1854 | } | ||
1855 | tegra_dc_writel(dc, cmd_state, DC_CMD_STATE_ACCESS); | ||
1856 | mutex_unlock(&dc->lock); | ||
1857 | } | ||
1858 | EXPORT_SYMBOL(tegra_dc_config_pwm); | ||
1859 | |||
1860 | void tegra_dc_set_out_pin_polars(struct tegra_dc *dc, | ||
1861 | const struct tegra_dc_out_pin *pins, | ||
1862 | const unsigned int n_pins) | ||
1863 | { | ||
1864 | unsigned int i; | ||
1865 | |||
1866 | int name; | ||
1867 | int pol; | ||
1868 | |||
1869 | u32 pol1, pol3; | ||
1870 | |||
1871 | u32 set1, unset1; | ||
1872 | u32 set3, unset3; | ||
1873 | |||
1874 | set1 = set3 = unset1 = unset3 = 0; | ||
1875 | |||
1876 | for (i = 0; i < n_pins; i++) { | ||
1877 | name = (pins + i)->name; | ||
1878 | pol = (pins + i)->pol; | ||
1879 | |||
1880 | /* set polarity by name */ | ||
1881 | switch (name) { | ||
1882 | case TEGRA_DC_OUT_PIN_DATA_ENABLE: | ||
1883 | if (pol == TEGRA_DC_OUT_PIN_POL_LOW) | ||
1884 | set3 |= LSPI_OUTPUT_POLARITY_LOW; | ||
1885 | else | ||
1886 | unset3 |= LSPI_OUTPUT_POLARITY_LOW; | ||
1887 | break; | ||
1888 | case TEGRA_DC_OUT_PIN_H_SYNC: | ||
1889 | if (pol == TEGRA_DC_OUT_PIN_POL_LOW) | ||
1890 | set1 |= LHS_OUTPUT_POLARITY_LOW; | ||
1891 | else | ||
1892 | unset1 |= LHS_OUTPUT_POLARITY_LOW; | ||
1893 | break; | ||
1894 | case TEGRA_DC_OUT_PIN_V_SYNC: | ||
1895 | if (pol == TEGRA_DC_OUT_PIN_POL_LOW) | ||
1896 | set1 |= LVS_OUTPUT_POLARITY_LOW; | ||
1897 | else | ||
1898 | unset1 |= LVS_OUTPUT_POLARITY_LOW; | ||
1899 | break; | ||
1900 | case TEGRA_DC_OUT_PIN_PIXEL_CLOCK: | ||
1901 | if (pol == TEGRA_DC_OUT_PIN_POL_LOW) | ||
1902 | set1 |= LSC0_OUTPUT_POLARITY_LOW; | ||
1903 | else | ||
1904 | unset1 |= LSC0_OUTPUT_POLARITY_LOW; | ||
1905 | break; | ||
1906 | default: | ||
1907 | printk("Invalid argument in function %s\n", | ||
1908 | __FUNCTION__); | ||
1909 | break; | ||
1910 | } | ||
1911 | } | ||
1912 | |||
1913 | pol1 = DC_COM_PIN_OUTPUT_POLARITY1_INIT_VAL; | ||
1914 | pol3 = DC_COM_PIN_OUTPUT_POLARITY3_INIT_VAL; | ||
1915 | |||
1916 | pol1 |= set1; | ||
1917 | pol1 &= ~unset1; | ||
1918 | |||
1919 | pol3 |= set3; | ||
1920 | pol3 &= ~unset3; | ||
1921 | |||
1922 | tegra_dc_writel(dc, pol1, DC_COM_PIN_OUTPUT_POLARITY1); | ||
1923 | tegra_dc_writel(dc, pol3, DC_COM_PIN_OUTPUT_POLARITY3); | ||
1924 | } | ||
1925 | |||
1926 | static void tegra_dc_set_out(struct tegra_dc *dc, struct tegra_dc_out *out) | ||
1927 | { | ||
1928 | dc->out = out; | ||
1929 | |||
1930 | if (out->n_modes > 0) | ||
1931 | tegra_dc_set_mode(dc, &dc->out->modes[0]); | ||
1932 | |||
1933 | switch (out->type) { | ||
1934 | case TEGRA_DC_OUT_RGB: | ||
1935 | dc->out_ops = &tegra_dc_rgb_ops; | ||
1936 | break; | ||
1937 | |||
1938 | case TEGRA_DC_OUT_HDMI: | ||
1939 | dc->out_ops = &tegra_dc_hdmi_ops; | ||
1940 | break; | ||
1941 | |||
1942 | case TEGRA_DC_OUT_DSI: | ||
1943 | dc->out_ops = &tegra_dc_dsi_ops; | ||
1944 | break; | ||
1945 | |||
1946 | default: | ||
1947 | dc->out_ops = NULL; | ||
1948 | break; | ||
1949 | } | ||
1950 | |||
1951 | if (dc->out_ops && dc->out_ops->init) | ||
1952 | dc->out_ops->init(dc); | ||
1953 | |||
1954 | } | ||
1955 | |||
1956 | unsigned tegra_dc_get_out_height(const struct tegra_dc *dc) | ||
1957 | { | ||
1958 | if (dc->out) | ||
1959 | return dc->out->height; | ||
1960 | else | ||
1961 | return 0; | ||
1962 | } | ||
1963 | EXPORT_SYMBOL(tegra_dc_get_out_height); | ||
1964 | |||
1965 | unsigned tegra_dc_get_out_width(const struct tegra_dc *dc) | ||
1966 | { | ||
1967 | if (dc->out) | ||
1968 | return dc->out->width; | ||
1969 | else | ||
1970 | return 0; | ||
1971 | } | ||
1972 | EXPORT_SYMBOL(tegra_dc_get_out_width); | ||
1973 | |||
1974 | unsigned tegra_dc_get_out_max_pixclock(const struct tegra_dc *dc) | ||
1975 | { | ||
1976 | if (dc->out && dc->out->max_pixclock) | ||
1977 | return dc->out->max_pixclock; | ||
1978 | else | ||
1979 | return 0; | ||
1980 | } | ||
1981 | EXPORT_SYMBOL(tegra_dc_get_out_max_pixclock); | ||
1982 | |||
1983 | void tegra_dc_enable_crc(struct tegra_dc *dc) | ||
1984 | { | ||
1985 | u32 val; | ||
1986 | tegra_dc_io_start(dc); | ||
1987 | |||
1988 | val = CRC_ALWAYS_ENABLE | CRC_INPUT_DATA_ACTIVE_DATA | | ||
1989 | CRC_ENABLE_ENABLE; | ||
1990 | tegra_dc_writel(dc, val, DC_COM_CRC_CONTROL); | ||
1991 | tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL); | ||
1992 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
1993 | } | ||
1994 | |||
1995 | void tegra_dc_disable_crc(struct tegra_dc *dc) | ||
1996 | { | ||
1997 | tegra_dc_writel(dc, 0x0, DC_COM_CRC_CONTROL); | ||
1998 | tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL); | ||
1999 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
2000 | |||
2001 | tegra_dc_io_end(dc); | ||
2002 | } | ||
2003 | |||
2004 | u32 tegra_dc_read_checksum_latched(struct tegra_dc *dc) | ||
2005 | { | ||
2006 | int crc = 0; | ||
2007 | |||
2008 | if(!dc) { | ||
2009 | dev_err(&dc->ndev->dev, "Failed to get dc.\n"); | ||
2010 | goto crc_error; | ||
2011 | } | ||
2012 | |||
2013 | /* TODO: Replace mdelay with code to sync VBlANK, since | ||
2014 | * DC_COM_CRC_CHECKSUM_LATCHED is available after VBLANK */ | ||
2015 | mdelay(TEGRA_CRC_LATCHED_DELAY); | ||
2016 | |||
2017 | crc = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM_LATCHED); | ||
2018 | crc_error: | ||
2019 | return crc; | ||
2020 | } | ||
2021 | |||
2022 | static void tegra_dc_vblank(struct work_struct *work) | ||
2023 | { | ||
2024 | struct tegra_dc *dc = container_of(work, struct tegra_dc, vblank_work); | ||
2025 | bool nvsd_updated = false; | ||
2026 | |||
2027 | mutex_lock(&dc->lock); | ||
2028 | |||
2029 | /* Update the SD brightness */ | ||
2030 | if (dc->enabled && dc->out->sd_settings) | ||
2031 | nvsd_updated = nvsd_update_brightness(dc); | ||
2032 | |||
2033 | mutex_unlock(&dc->lock); | ||
2034 | |||
2035 | /* Do the actual brightness update outside of the mutex */ | ||
2036 | if (nvsd_updated && dc->out->sd_settings && | ||
2037 | dc->out->sd_settings->bl_device) { | ||
2038 | |||
2039 | struct platform_device *pdev = dc->out->sd_settings->bl_device; | ||
2040 | struct backlight_device *bl = platform_get_drvdata(pdev); | ||
2041 | if (bl) | ||
2042 | backlight_update_status(bl); | ||
2043 | } | ||
2044 | } | ||
2045 | |||
2046 | /* Must acquire dc lock and dc one-shot lock before invoking this function. | ||
2047 | * Acquire dc one-shot lock first and then dc lock. */ | ||
2048 | void tegra_dc_host_trigger(struct tegra_dc *dc) | ||
2049 | { | ||
2050 | /* We release the lock here to prevent deadlock between | ||
2051 | * cancel_delayed_work_sync and one-shot work. */ | ||
2052 | mutex_unlock(&dc->lock); | ||
2053 | |||
2054 | cancel_delayed_work_sync(&dc->one_shot_work); | ||
2055 | mutex_lock(&dc->lock); | ||
2056 | |||
2057 | schedule_delayed_work(&dc->one_shot_work, | ||
2058 | msecs_to_jiffies(dc->one_shot_delay_ms)); | ||
2059 | tegra_dc_program_bandwidth(dc); | ||
2060 | tegra_dc_writel(dc, NC_HOST_TRIG, DC_CMD_STATE_CONTROL); | ||
2061 | } | ||
2062 | |||
2063 | static void tegra_dc_one_shot_worker(struct work_struct *work) | ||
2064 | { | ||
2065 | struct tegra_dc *dc = container_of( | ||
2066 | to_delayed_work(work), struct tegra_dc, one_shot_work); | ||
2067 | mutex_lock(&dc->lock); | ||
2068 | /* memory client has gone idle */ | ||
2069 | tegra_dc_clear_bandwidth(dc); | ||
2070 | mutex_unlock(&dc->lock); | ||
2071 | } | ||
2072 | |||
2073 | /* return an arbitrarily large number if count overflow occurs. | ||
2074 | * make it a nice base-10 number to show up in stats output */ | ||
2075 | static u64 tegra_dc_underflow_count(struct tegra_dc *dc, unsigned reg) | ||
2076 | { | ||
2077 | unsigned count = tegra_dc_readl(dc, reg); | ||
2078 | tegra_dc_writel(dc, 0, reg); | ||
2079 | return ((count & 0x80000000) == 0) ? count : 10000000000ll; | ||
2080 | } | ||
2081 | |||
2082 | static void tegra_dc_underflow_handler(struct tegra_dc *dc) | ||
2083 | { | ||
2084 | u32 val; | ||
2085 | int i; | ||
2086 | |||
2087 | dc->stats.underflows++; | ||
2088 | if (dc->underflow_mask & WIN_A_UF_INT) | ||
2089 | dc->stats.underflows_a += tegra_dc_underflow_count(dc, | ||
2090 | DC_WINBUF_AD_UFLOW_STATUS); | ||
2091 | if (dc->underflow_mask & WIN_B_UF_INT) | ||
2092 | dc->stats.underflows_b += tegra_dc_underflow_count(dc, | ||
2093 | DC_WINBUF_BD_UFLOW_STATUS); | ||
2094 | if (dc->underflow_mask & WIN_C_UF_INT) | ||
2095 | dc->stats.underflows_c += tegra_dc_underflow_count(dc, | ||
2096 | DC_WINBUF_CD_UFLOW_STATUS); | ||
2097 | |||
2098 | /* Check for any underflow reset conditions */ | ||
2099 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
2100 | if (dc->underflow_mask & (WIN_A_UF_INT << i)) { | ||
2101 | dc->windows[i].underflows++; | ||
2102 | |||
2103 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | ||
2104 | if (dc->windows[i].underflows > 4) | ||
2105 | schedule_work(&dc->reset_work); | ||
2106 | #endif | ||
2107 | } else { | ||
2108 | dc->windows[i].underflows = 0; | ||
2109 | } | ||
2110 | } | ||
2111 | |||
2112 | /* Clear the underflow mask now that we've checked it. */ | ||
2113 | tegra_dc_writel(dc, dc->underflow_mask, DC_CMD_INT_STATUS); | ||
2114 | dc->underflow_mask = 0; | ||
2115 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
2116 | tegra_dc_writel(dc, val | ALL_UF_INT, DC_CMD_INT_MASK); | ||
2117 | } | ||
2118 | |||
2119 | #ifndef CONFIG_TEGRA_FPGA_PLATFORM | ||
2120 | static bool tegra_dc_windows_are_dirty(struct tegra_dc *dc) | ||
2121 | { | ||
2122 | #ifndef CONFIG_TEGRA_SIMULATION_PLATFORM | ||
2123 | u32 val; | ||
2124 | |||
2125 | val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); | ||
2126 | if (val & (WIN_A_UPDATE | WIN_B_UPDATE | WIN_C_UPDATE)) | ||
2127 | return true; | ||
2128 | #endif | ||
2129 | return false; | ||
2130 | } | ||
2131 | |||
2132 | static void tegra_dc_trigger_windows(struct tegra_dc *dc) | ||
2133 | { | ||
2134 | u32 val, i; | ||
2135 | u32 completed = 0; | ||
2136 | u32 dirty = 0; | ||
2137 | |||
2138 | val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); | ||
2139 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
2140 | #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM | ||
2141 | /* FIXME: this is not needed when the simulator | ||
2142 | clears WIN_x_UPDATE bits as in HW */ | ||
2143 | dc->windows[i].dirty = 0; | ||
2144 | completed = 1; | ||
2145 | #else | ||
2146 | if (!(val & (WIN_A_UPDATE << i))) { | ||
2147 | dc->windows[i].dirty = 0; | ||
2148 | completed = 1; | ||
2149 | } else { | ||
2150 | dirty = 1; | ||
2151 | } | ||
2152 | #endif | ||
2153 | } | ||
2154 | |||
2155 | if (!dirty) { | ||
2156 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
2157 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
2158 | val &= ~V_BLANK_INT; | ||
2159 | else | ||
2160 | val &= ~FRAME_END_INT; | ||
2161 | tegra_dc_writel(dc, val, DC_CMD_INT_MASK); | ||
2162 | } | ||
2163 | |||
2164 | if (completed) { | ||
2165 | if (!dirty) { | ||
2166 | /* With the last completed window, go ahead | ||
2167 | and enable the vblank interrupt for nvsd. */ | ||
2168 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
2169 | val |= V_BLANK_INT; | ||
2170 | tegra_dc_writel(dc, val, DC_CMD_INT_MASK); | ||
2171 | } | ||
2172 | |||
2173 | wake_up(&dc->wq); | ||
2174 | } | ||
2175 | } | ||
2176 | |||
2177 | static void tegra_dc_one_shot_irq(struct tegra_dc *dc, unsigned long status) | ||
2178 | { | ||
2179 | if (status & V_BLANK_INT) { | ||
2180 | /* Sync up windows. */ | ||
2181 | tegra_dc_trigger_windows(dc); | ||
2182 | |||
2183 | /* Schedule any additional bottom-half vblank actvities. */ | ||
2184 | schedule_work(&dc->vblank_work); | ||
2185 | } | ||
2186 | |||
2187 | if (status & FRAME_END_INT) { | ||
2188 | /* Mark the frame_end as complete. */ | ||
2189 | if (!completion_done(&dc->frame_end_complete)) | ||
2190 | complete(&dc->frame_end_complete); | ||
2191 | } | ||
2192 | } | ||
2193 | |||
2194 | static void tegra_dc_continuous_irq(struct tegra_dc *dc, unsigned long status) | ||
2195 | { | ||
2196 | if (status & V_BLANK_INT) { | ||
2197 | /* Schedule any additional bottom-half vblank actvities. */ | ||
2198 | schedule_work(&dc->vblank_work); | ||
2199 | |||
2200 | /* All windows updated. Mask subsequent V_BLANK interrupts */ | ||
2201 | if (!tegra_dc_windows_are_dirty(dc)) { | ||
2202 | u32 val; | ||
2203 | |||
2204 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
2205 | val &= ~V_BLANK_INT; | ||
2206 | tegra_dc_writel(dc, val, DC_CMD_INT_MASK); | ||
2207 | } | ||
2208 | } | ||
2209 | |||
2210 | if (status & FRAME_END_INT) { | ||
2211 | /* Mark the frame_end as complete. */ | ||
2212 | if (!completion_done(&dc->frame_end_complete)) | ||
2213 | complete(&dc->frame_end_complete); | ||
2214 | |||
2215 | tegra_dc_trigger_windows(dc); | ||
2216 | } | ||
2217 | } | ||
2218 | #endif | ||
2219 | |||
2220 | static irqreturn_t tegra_dc_irq(int irq, void *ptr) | ||
2221 | { | ||
2222 | #ifndef CONFIG_TEGRA_FPGA_PLATFORM | ||
2223 | struct tegra_dc *dc = ptr; | ||
2224 | unsigned long status; | ||
2225 | unsigned long underflow_mask; | ||
2226 | u32 val; | ||
2227 | |||
2228 | if (!nvhost_module_powered(nvhost_get_host(dc->ndev)->dev)) { | ||
2229 | WARN(1, "IRQ when DC not powered!\n"); | ||
2230 | tegra_dc_io_start(dc); | ||
2231 | status = tegra_dc_readl(dc, DC_CMD_INT_STATUS); | ||
2232 | tegra_dc_writel(dc, status, DC_CMD_INT_STATUS); | ||
2233 | tegra_dc_io_end(dc); | ||
2234 | return IRQ_HANDLED; | ||
2235 | } | ||
2236 | |||
2237 | /* clear all status flags except underflow, save those for the worker */ | ||
2238 | status = tegra_dc_readl(dc, DC_CMD_INT_STATUS); | ||
2239 | tegra_dc_writel(dc, status & ~ALL_UF_INT, DC_CMD_INT_STATUS); | ||
2240 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
2241 | tegra_dc_writel(dc, val & ~ALL_UF_INT, DC_CMD_INT_MASK); | ||
2242 | |||
2243 | /* | ||
2244 | * Overlays can get thier internal state corrupted during and underflow | ||
2245 | * condition. The only way to fix this state is to reset the DC. | ||
2246 | * if we get 4 consecutive frames with underflows, assume we're | ||
2247 | * hosed and reset. | ||
2248 | */ | ||
2249 | underflow_mask = status & ALL_UF_INT; | ||
2250 | |||
2251 | /* Check underflow */ | ||
2252 | if (underflow_mask) { | ||
2253 | dc->underflow_mask |= underflow_mask; | ||
2254 | schedule_delayed_work(&dc->underflow_work, | ||
2255 | msecs_to_jiffies(1)); | ||
2256 | } | ||
2257 | |||
2258 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
2259 | tegra_dc_one_shot_irq(dc, status); | ||
2260 | else | ||
2261 | tegra_dc_continuous_irq(dc, status); | ||
2262 | |||
2263 | return IRQ_HANDLED; | ||
2264 | #else /* CONFIG_TEGRA_FPGA_PLATFORM */ | ||
2265 | return IRQ_NONE; | ||
2266 | #endif /* !CONFIG_TEGRA_FPGA_PLATFORM */ | ||
2267 | } | ||
2268 | |||
2269 | static void tegra_dc_set_color_control(struct tegra_dc *dc) | ||
2270 | { | ||
2271 | u32 color_control; | ||
2272 | |||
2273 | switch (dc->out->depth) { | ||
2274 | case 3: | ||
2275 | color_control = BASE_COLOR_SIZE111; | ||
2276 | break; | ||
2277 | |||
2278 | case 6: | ||
2279 | color_control = BASE_COLOR_SIZE222; | ||
2280 | break; | ||
2281 | |||
2282 | case 8: | ||
2283 | color_control = BASE_COLOR_SIZE332; | ||
2284 | break; | ||
2285 | |||
2286 | case 9: | ||
2287 | color_control = BASE_COLOR_SIZE333; | ||
2288 | break; | ||
2289 | |||
2290 | case 12: | ||
2291 | color_control = BASE_COLOR_SIZE444; | ||
2292 | break; | ||
2293 | |||
2294 | case 15: | ||
2295 | color_control = BASE_COLOR_SIZE555; | ||
2296 | break; | ||
2297 | |||
2298 | case 16: | ||
2299 | color_control = BASE_COLOR_SIZE565; | ||
2300 | break; | ||
2301 | |||
2302 | case 18: | ||
2303 | color_control = BASE_COLOR_SIZE666; | ||
2304 | break; | ||
2305 | |||
2306 | default: | ||
2307 | color_control = BASE_COLOR_SIZE888; | ||
2308 | break; | ||
2309 | } | ||
2310 | |||
2311 | switch (dc->out->dither) { | ||
2312 | case TEGRA_DC_DISABLE_DITHER: | ||
2313 | color_control |= DITHER_CONTROL_DISABLE; | ||
2314 | break; | ||
2315 | case TEGRA_DC_ORDERED_DITHER: | ||
2316 | color_control |= DITHER_CONTROL_ORDERED; | ||
2317 | break; | ||
2318 | case TEGRA_DC_ERRDIFF_DITHER: | ||
2319 | /* The line buffer for error-diffusion dither is limited | ||
2320 | * to 1280 pixels per line. This limits the maximum | ||
2321 | * horizontal active area size to 1280 pixels when error | ||
2322 | * diffusion is enabled. | ||
2323 | */ | ||
2324 | BUG_ON(dc->mode.h_active > 1280); | ||
2325 | color_control |= DITHER_CONTROL_ERRDIFF; | ||
2326 | break; | ||
2327 | } | ||
2328 | |||
2329 | tegra_dc_writel(dc, color_control, DC_DISP_DISP_COLOR_CONTROL); | ||
2330 | } | ||
2331 | |||
2332 | static u32 get_syncpt(struct tegra_dc *dc, int idx) | ||
2333 | { | ||
2334 | u32 syncpt_id; | ||
2335 | |||
2336 | switch (dc->ndev->id) { | ||
2337 | case 0: | ||
2338 | switch (idx) { | ||
2339 | case 0: | ||
2340 | syncpt_id = NVSYNCPT_DISP0_A; | ||
2341 | break; | ||
2342 | case 1: | ||
2343 | syncpt_id = NVSYNCPT_DISP0_B; | ||
2344 | break; | ||
2345 | case 2: | ||
2346 | syncpt_id = NVSYNCPT_DISP0_C; | ||
2347 | break; | ||
2348 | default: | ||
2349 | BUG(); | ||
2350 | break; | ||
2351 | } | ||
2352 | break; | ||
2353 | case 1: | ||
2354 | switch (idx) { | ||
2355 | case 0: | ||
2356 | syncpt_id = NVSYNCPT_DISP1_A; | ||
2357 | break; | ||
2358 | case 1: | ||
2359 | syncpt_id = NVSYNCPT_DISP1_B; | ||
2360 | break; | ||
2361 | case 2: | ||
2362 | syncpt_id = NVSYNCPT_DISP1_C; | ||
2363 | break; | ||
2364 | default: | ||
2365 | BUG(); | ||
2366 | break; | ||
2367 | } | ||
2368 | break; | ||
2369 | default: | ||
2370 | BUG(); | ||
2371 | break; | ||
2372 | } | ||
2373 | |||
2374 | return syncpt_id; | ||
2375 | } | ||
2376 | |||
2377 | static int tegra_dc_init(struct tegra_dc *dc) | ||
2378 | { | ||
2379 | int i; | ||
2380 | |||
2381 | tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); | ||
2382 | if (dc->ndev->id == 0) { | ||
2383 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0A, | ||
2384 | TEGRA_MC_PRIO_MED); | ||
2385 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0B, | ||
2386 | TEGRA_MC_PRIO_MED); | ||
2387 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0C, | ||
2388 | TEGRA_MC_PRIO_MED); | ||
2389 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1B, | ||
2390 | TEGRA_MC_PRIO_MED); | ||
2391 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHC, | ||
2392 | TEGRA_MC_PRIO_HIGH); | ||
2393 | } else if (dc->ndev->id == 1) { | ||
2394 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0AB, | ||
2395 | TEGRA_MC_PRIO_MED); | ||
2396 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0BB, | ||
2397 | TEGRA_MC_PRIO_MED); | ||
2398 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0CB, | ||
2399 | TEGRA_MC_PRIO_MED); | ||
2400 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1BB, | ||
2401 | TEGRA_MC_PRIO_MED); | ||
2402 | tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHCB, | ||
2403 | TEGRA_MC_PRIO_HIGH); | ||
2404 | } | ||
2405 | tegra_dc_writel(dc, 0x00000100 | dc->vblank_syncpt, | ||
2406 | DC_CMD_CONT_SYNCPT_VSYNC); | ||
2407 | tegra_dc_writel(dc, 0x00004700, DC_CMD_INT_TYPE); | ||
2408 | tegra_dc_writel(dc, 0x0001c700, DC_CMD_INT_POLARITY); | ||
2409 | tegra_dc_writel(dc, 0x00202020, DC_DISP_MEM_HIGH_PRIORITY); | ||
2410 | tegra_dc_writel(dc, 0x00010101, DC_DISP_MEM_HIGH_PRIORITY_TIMER); | ||
2411 | |||
2412 | /* enable interrupts for vblank, frame_end and underflows */ | ||
2413 | tegra_dc_writel(dc, (FRAME_END_INT | V_BLANK_INT | ALL_UF_INT), | ||
2414 | DC_CMD_INT_ENABLE); | ||
2415 | tegra_dc_writel(dc, ALL_UF_INT, DC_CMD_INT_MASK); | ||
2416 | |||
2417 | tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR); | ||
2418 | |||
2419 | tegra_dc_set_color_control(dc); | ||
2420 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
2421 | struct tegra_dc_win *win = &dc->windows[i]; | ||
2422 | tegra_dc_writel(dc, WINDOW_A_SELECT << i, | ||
2423 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
2424 | tegra_dc_set_csc(dc, &win->csc); | ||
2425 | tegra_dc_set_lut(dc, win); | ||
2426 | tegra_dc_set_scaling_filter(dc); | ||
2427 | } | ||
2428 | |||
2429 | |||
2430 | for (i = 0; i < dc->n_windows; i++) { | ||
2431 | u32 syncpt = get_syncpt(dc, i); | ||
2432 | |||
2433 | dc->syncpt[i].id = syncpt; | ||
2434 | |||
2435 | dc->syncpt[i].min = dc->syncpt[i].max = | ||
2436 | nvhost_syncpt_read(&nvhost_get_host(dc->ndev)->syncpt, | ||
2437 | syncpt); | ||
2438 | } | ||
2439 | |||
2440 | print_mode(dc, &dc->mode, __func__); | ||
2441 | |||
2442 | if (dc->mode.pclk) | ||
2443 | if (tegra_dc_program_mode(dc, &dc->mode)) | ||
2444 | return -EINVAL; | ||
2445 | |||
2446 | /* Initialize SD AFTER the modeset. | ||
2447 | nvsd_init handles the sd_settings = NULL case. */ | ||
2448 | nvsd_init(dc, dc->out->sd_settings); | ||
2449 | |||
2450 | return 0; | ||
2451 | } | ||
2452 | |||
2453 | static bool _tegra_dc_controller_enable(struct tegra_dc *dc) | ||
2454 | { | ||
2455 | int failed_init = 0; | ||
2456 | |||
2457 | if (dc->out->enable) | ||
2458 | dc->out->enable(); | ||
2459 | |||
2460 | tegra_dc_setup_clk(dc, dc->clk); | ||
2461 | clk_enable(dc->clk); | ||
2462 | |||
2463 | /* do not accept interrupts during initialization */ | ||
2464 | tegra_dc_writel(dc, 0, DC_CMD_INT_ENABLE); | ||
2465 | tegra_dc_writel(dc, 0, DC_CMD_INT_MASK); | ||
2466 | |||
2467 | enable_dc_irq(dc->irq); | ||
2468 | |||
2469 | failed_init = tegra_dc_init(dc); | ||
2470 | if (failed_init) { | ||
2471 | _tegra_dc_controller_disable(dc); | ||
2472 | return false; | ||
2473 | } | ||
2474 | |||
2475 | if (dc->out_ops && dc->out_ops->enable) | ||
2476 | dc->out_ops->enable(dc); | ||
2477 | |||
2478 | if (dc->out->postpoweron) | ||
2479 | dc->out->postpoweron(); | ||
2480 | |||
2481 | /* force a full blending update */ | ||
2482 | dc->blend.z[0] = -1; | ||
2483 | |||
2484 | tegra_dc_ext_enable(dc->ext); | ||
2485 | |||
2486 | return true; | ||
2487 | } | ||
2488 | |||
2489 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | ||
2490 | static bool _tegra_dc_controller_reset_enable(struct tegra_dc *dc) | ||
2491 | { | ||
2492 | bool ret = true; | ||
2493 | |||
2494 | if (dc->out->enable) | ||
2495 | dc->out->enable(); | ||
2496 | |||
2497 | tegra_dc_setup_clk(dc, dc->clk); | ||
2498 | clk_enable(dc->clk); | ||
2499 | |||
2500 | if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) { | ||
2501 | mutex_lock(&tegra_dcs[1]->lock); | ||
2502 | disable_irq(tegra_dcs[1]->irq); | ||
2503 | } else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) { | ||
2504 | mutex_lock(&tegra_dcs[0]->lock); | ||
2505 | disable_irq(tegra_dcs[0]->irq); | ||
2506 | } | ||
2507 | |||
2508 | msleep(5); | ||
2509 | tegra_periph_reset_assert(dc->clk); | ||
2510 | msleep(2); | ||
2511 | #ifdef CONFIG_TEGRA_SILICON_PLATFORM | ||
2512 | tegra_periph_reset_deassert(dc->clk); | ||
2513 | msleep(1); | ||
2514 | #endif | ||
2515 | |||
2516 | if (dc->ndev->id == 0 && tegra_dcs[1] != NULL) { | ||
2517 | enable_dc_irq(tegra_dcs[1]->irq); | ||
2518 | mutex_unlock(&tegra_dcs[1]->lock); | ||
2519 | } else if (dc->ndev->id == 1 && tegra_dcs[0] != NULL) { | ||
2520 | enable_dc_irq(tegra_dcs[0]->irq); | ||
2521 | mutex_unlock(&tegra_dcs[0]->lock); | ||
2522 | } | ||
2523 | |||
2524 | enable_dc_irq(dc->irq); | ||
2525 | |||
2526 | if (tegra_dc_init(dc)) { | ||
2527 | dev_err(&dc->ndev->dev, "cannot initialize\n"); | ||
2528 | ret = false; | ||
2529 | } | ||
2530 | |||
2531 | if (dc->out_ops && dc->out_ops->enable) | ||
2532 | dc->out_ops->enable(dc); | ||
2533 | |||
2534 | if (dc->out->postpoweron) | ||
2535 | dc->out->postpoweron(); | ||
2536 | |||
2537 | /* force a full blending update */ | ||
2538 | dc->blend.z[0] = -1; | ||
2539 | |||
2540 | tegra_dc_ext_enable(dc->ext); | ||
2541 | |||
2542 | if (!ret) { | ||
2543 | dev_err(&dc->ndev->dev, "initialization failed,disabling"); | ||
2544 | _tegra_dc_controller_disable(dc); | ||
2545 | } | ||
2546 | |||
2547 | return ret; | ||
2548 | } | ||
2549 | #endif | ||
2550 | |||
2551 | static bool _tegra_dc_enable(struct tegra_dc *dc) | ||
2552 | { | ||
2553 | if (dc->mode.pclk == 0) | ||
2554 | return false; | ||
2555 | |||
2556 | if (!dc->out) | ||
2557 | return false; | ||
2558 | |||
2559 | tegra_dc_io_start(dc); | ||
2560 | |||
2561 | return _tegra_dc_controller_enable(dc); | ||
2562 | } | ||
2563 | |||
2564 | void tegra_dc_enable(struct tegra_dc *dc) | ||
2565 | { | ||
2566 | mutex_lock(&dc->lock); | ||
2567 | |||
2568 | if (!dc->enabled) | ||
2569 | dc->enabled = _tegra_dc_enable(dc); | ||
2570 | |||
2571 | mutex_unlock(&dc->lock); | ||
2572 | } | ||
2573 | |||
2574 | static void _tegra_dc_controller_disable(struct tegra_dc *dc) | ||
2575 | { | ||
2576 | unsigned i; | ||
2577 | |||
2578 | if (dc->out_ops && dc->out_ops->disable) | ||
2579 | dc->out_ops->disable(dc); | ||
2580 | |||
2581 | tegra_dc_writel(dc, 0, DC_CMD_INT_MASK); | ||
2582 | tegra_dc_writel(dc, 0, DC_CMD_INT_ENABLE); | ||
2583 | disable_irq(dc->irq); | ||
2584 | |||
2585 | tegra_dc_clear_bandwidth(dc); | ||
2586 | clk_disable(dc->clk); | ||
2587 | tegra_dvfs_set_rate(dc->clk, 0); | ||
2588 | |||
2589 | if (dc->out && dc->out->disable) | ||
2590 | dc->out->disable(); | ||
2591 | |||
2592 | for (i = 0; i < dc->n_windows; i++) { | ||
2593 | struct tegra_dc_win *w = &dc->windows[i]; | ||
2594 | |||
2595 | /* reset window bandwidth */ | ||
2596 | w->bandwidth = 0; | ||
2597 | w->new_bandwidth = 0; | ||
2598 | |||
2599 | /* disable windows */ | ||
2600 | w->flags &= ~TEGRA_WIN_FLAG_ENABLED; | ||
2601 | |||
2602 | /* flush any pending syncpt waits */ | ||
2603 | while (dc->syncpt[i].min < dc->syncpt[i].max) { | ||
2604 | dc->syncpt[i].min++; | ||
2605 | nvhost_syncpt_cpu_incr( | ||
2606 | &nvhost_get_host(dc->ndev)->syncpt, | ||
2607 | dc->syncpt[i].id); | ||
2608 | } | ||
2609 | } | ||
2610 | } | ||
2611 | |||
2612 | void tegra_dc_stats_enable(struct tegra_dc *dc, bool enable) | ||
2613 | { | ||
2614 | #if 0 /* underflow interrupt is already enabled by dc reset worker */ | ||
2615 | u32 val; | ||
2616 | if (dc->enabled) { | ||
2617 | val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE); | ||
2618 | if (enable) | ||
2619 | val |= (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT); | ||
2620 | else | ||
2621 | val &= ~(WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT); | ||
2622 | tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE); | ||
2623 | } | ||
2624 | #endif | ||
2625 | } | ||
2626 | |||
2627 | bool tegra_dc_stats_get(struct tegra_dc *dc) | ||
2628 | { | ||
2629 | #if 0 /* right now it is always enabled */ | ||
2630 | u32 val; | ||
2631 | bool res; | ||
2632 | |||
2633 | if (dc->enabled) { | ||
2634 | val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE); | ||
2635 | res = !!(val & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)); | ||
2636 | } else { | ||
2637 | res = false; | ||
2638 | } | ||
2639 | |||
2640 | return res; | ||
2641 | #endif | ||
2642 | return true; | ||
2643 | } | ||
2644 | |||
2645 | /* make the screen blank by disabling all windows */ | ||
2646 | void tegra_dc_blank(struct tegra_dc *dc) | ||
2647 | { | ||
2648 | struct tegra_dc_win *dcwins[DC_N_WINDOWS]; | ||
2649 | unsigned i; | ||
2650 | |||
2651 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
2652 | dcwins[i] = tegra_dc_get_window(dc, i); | ||
2653 | dcwins[i]->flags &= ~TEGRA_WIN_FLAG_ENABLED; | ||
2654 | } | ||
2655 | |||
2656 | tegra_dc_update_windows(dcwins, DC_N_WINDOWS); | ||
2657 | tegra_dc_sync_windows(dcwins, DC_N_WINDOWS); | ||
2658 | } | ||
2659 | |||
2660 | static void _tegra_dc_disable(struct tegra_dc *dc) | ||
2661 | { | ||
2662 | _tegra_dc_controller_disable(dc); | ||
2663 | tegra_dc_io_end(dc); | ||
2664 | } | ||
2665 | |||
2666 | void tegra_dc_disable(struct tegra_dc *dc) | ||
2667 | { | ||
2668 | tegra_dc_ext_disable(dc->ext); | ||
2669 | |||
2670 | /* it's important that new underflow work isn't scheduled before the | ||
2671 | * lock is acquired. */ | ||
2672 | cancel_delayed_work_sync(&dc->underflow_work); | ||
2673 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) { | ||
2674 | mutex_lock(&dc->one_shot_lock); | ||
2675 | cancel_delayed_work_sync(&dc->one_shot_work); | ||
2676 | } | ||
2677 | |||
2678 | mutex_lock(&dc->lock); | ||
2679 | |||
2680 | if (dc->enabled) { | ||
2681 | dc->enabled = false; | ||
2682 | |||
2683 | if (!dc->suspended) | ||
2684 | _tegra_dc_disable(dc); | ||
2685 | } | ||
2686 | |||
2687 | #ifdef CONFIG_SWITCH | ||
2688 | switch_set_state(&dc->modeset_switch, 0); | ||
2689 | #endif | ||
2690 | |||
2691 | mutex_unlock(&dc->lock); | ||
2692 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
2693 | mutex_unlock(&dc->one_shot_lock); | ||
2694 | } | ||
2695 | |||
2696 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | ||
2697 | static void tegra_dc_reset_worker(struct work_struct *work) | ||
2698 | { | ||
2699 | struct tegra_dc *dc = | ||
2700 | container_of(work, struct tegra_dc, reset_work); | ||
2701 | |||
2702 | unsigned long val = 0; | ||
2703 | |||
2704 | mutex_lock(&shared_lock); | ||
2705 | |||
2706 | dev_warn(&dc->ndev->dev, "overlay stuck in underflow state. resetting.\n"); | ||
2707 | |||
2708 | tegra_dc_ext_disable(dc->ext); | ||
2709 | |||
2710 | mutex_lock(&dc->lock); | ||
2711 | |||
2712 | if (dc->enabled == false) | ||
2713 | goto unlock; | ||
2714 | |||
2715 | dc->enabled = false; | ||
2716 | |||
2717 | /* | ||
2718 | * off host read bus | ||
2719 | */ | ||
2720 | val = tegra_dc_readl(dc, DC_CMD_CONT_SYNCPT_VSYNC); | ||
2721 | val &= ~(0x00000100); | ||
2722 | tegra_dc_writel(dc, val, DC_CMD_CONT_SYNCPT_VSYNC); | ||
2723 | |||
2724 | /* | ||
2725 | * set DC to STOP mode | ||
2726 | */ | ||
2727 | tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); | ||
2728 | |||
2729 | msleep(10); | ||
2730 | |||
2731 | _tegra_dc_controller_disable(dc); | ||
2732 | |||
2733 | /* _tegra_dc_controller_reset_enable deasserts reset */ | ||
2734 | _tegra_dc_controller_reset_enable(dc); | ||
2735 | |||
2736 | dc->enabled = true; | ||
2737 | unlock: | ||
2738 | mutex_unlock(&dc->lock); | ||
2739 | mutex_unlock(&shared_lock); | ||
2740 | } | ||
2741 | #endif | ||
2742 | |||
2743 | static void tegra_dc_underflow_worker(struct work_struct *work) | ||
2744 | { | ||
2745 | struct tegra_dc *dc = container_of( | ||
2746 | to_delayed_work(work), struct tegra_dc, underflow_work); | ||
2747 | |||
2748 | mutex_lock(&dc->lock); | ||
2749 | if (dc->enabled) { | ||
2750 | tegra_dc_underflow_handler(dc); | ||
2751 | } | ||
2752 | mutex_unlock(&dc->lock); | ||
2753 | } | ||
2754 | |||
2755 | #ifdef CONFIG_SWITCH | ||
2756 | static ssize_t switch_modeset_print_mode(struct switch_dev *sdev, char *buf) | ||
2757 | { | ||
2758 | struct tegra_dc *dc = | ||
2759 | container_of(sdev, struct tegra_dc, modeset_switch); | ||
2760 | |||
2761 | if (!sdev->state) | ||
2762 | return sprintf(buf, "offline\n"); | ||
2763 | |||
2764 | return sprintf(buf, "%dx%d\n", dc->mode.h_active, dc->mode.v_active); | ||
2765 | } | ||
2766 | #endif | ||
2767 | |||
2768 | static int tegra_dc_probe(struct nvhost_device *ndev) | ||
2769 | { | ||
2770 | struct tegra_dc *dc; | ||
2771 | struct clk *clk; | ||
2772 | struct clk *emc_clk; | ||
2773 | struct resource *res; | ||
2774 | struct resource *base_res; | ||
2775 | struct resource *fb_mem = NULL; | ||
2776 | int ret = 0; | ||
2777 | void __iomem *base; | ||
2778 | int irq; | ||
2779 | int i; | ||
2780 | |||
2781 | if (!ndev->dev.platform_data) { | ||
2782 | dev_err(&ndev->dev, "no platform data\n"); | ||
2783 | return -ENOENT; | ||
2784 | } | ||
2785 | |||
2786 | dc = kzalloc(sizeof(struct tegra_dc), GFP_KERNEL); | ||
2787 | if (!dc) { | ||
2788 | dev_err(&ndev->dev, "can't allocate memory for tegra_dc\n"); | ||
2789 | return -ENOMEM; | ||
2790 | } | ||
2791 | |||
2792 | irq = nvhost_get_irq_byname(ndev, "irq"); | ||
2793 | if (irq <= 0) { | ||
2794 | dev_err(&ndev->dev, "no irq\n"); | ||
2795 | ret = -ENOENT; | ||
2796 | goto err_free; | ||
2797 | } | ||
2798 | |||
2799 | res = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "regs"); | ||
2800 | if (!res) { | ||
2801 | dev_err(&ndev->dev, "no mem resource\n"); | ||
2802 | ret = -ENOENT; | ||
2803 | goto err_free; | ||
2804 | } | ||
2805 | |||
2806 | base_res = request_mem_region(res->start, resource_size(res), ndev->name); | ||
2807 | if (!base_res) { | ||
2808 | dev_err(&ndev->dev, "request_mem_region failed\n"); | ||
2809 | ret = -EBUSY; | ||
2810 | goto err_free; | ||
2811 | } | ||
2812 | |||
2813 | base = ioremap(res->start, resource_size(res)); | ||
2814 | if (!base) { | ||
2815 | dev_err(&ndev->dev, "registers can't be mapped\n"); | ||
2816 | ret = -EBUSY; | ||
2817 | goto err_release_resource_reg; | ||
2818 | } | ||
2819 | |||
2820 | fb_mem = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "fbmem"); | ||
2821 | |||
2822 | clk = clk_get(&ndev->dev, NULL); | ||
2823 | if (IS_ERR_OR_NULL(clk)) { | ||
2824 | dev_err(&ndev->dev, "can't get clock\n"); | ||
2825 | ret = -ENOENT; | ||
2826 | goto err_iounmap_reg; | ||
2827 | } | ||
2828 | |||
2829 | emc_clk = clk_get(&ndev->dev, "emc"); | ||
2830 | if (IS_ERR_OR_NULL(emc_clk)) { | ||
2831 | dev_err(&ndev->dev, "can't get emc clock\n"); | ||
2832 | ret = -ENOENT; | ||
2833 | goto err_put_clk; | ||
2834 | } | ||
2835 | |||
2836 | dc->clk = clk; | ||
2837 | dc->emc_clk = emc_clk; | ||
2838 | dc->shift_clk_div = 1; | ||
2839 | /* Initialize one shot work delay, it will be assigned by dsi | ||
2840 | * according to refresh rate later. */ | ||
2841 | dc->one_shot_delay_ms = 40; | ||
2842 | |||
2843 | dc->base_res = base_res; | ||
2844 | dc->base = base; | ||
2845 | dc->irq = irq; | ||
2846 | dc->ndev = ndev; | ||
2847 | dc->pdata = ndev->dev.platform_data; | ||
2848 | |||
2849 | /* | ||
2850 | * The emc is a shared clock, it will be set based on | ||
2851 | * the requirements for each user on the bus. | ||
2852 | */ | ||
2853 | dc->emc_clk_rate = 0; | ||
2854 | |||
2855 | if (dc->pdata->flags & TEGRA_DC_FLAG_ENABLED) | ||
2856 | dc->enabled = true; | ||
2857 | |||
2858 | mutex_init(&dc->lock); | ||
2859 | mutex_init(&dc->one_shot_lock); | ||
2860 | init_completion(&dc->frame_end_complete); | ||
2861 | init_waitqueue_head(&dc->wq); | ||
2862 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | ||
2863 | INIT_WORK(&dc->reset_work, tegra_dc_reset_worker); | ||
2864 | #endif | ||
2865 | INIT_WORK(&dc->vblank_work, tegra_dc_vblank); | ||
2866 | INIT_DELAYED_WORK(&dc->underflow_work, tegra_dc_underflow_worker); | ||
2867 | INIT_DELAYED_WORK(&dc->one_shot_work, tegra_dc_one_shot_worker); | ||
2868 | |||
2869 | tegra_dc_init_lut_defaults(&dc->fb_lut); | ||
2870 | |||
2871 | dc->n_windows = DC_N_WINDOWS; | ||
2872 | for (i = 0; i < dc->n_windows; i++) { | ||
2873 | struct tegra_dc_win *win = &dc->windows[i]; | ||
2874 | win->idx = i; | ||
2875 | win->dc = dc; | ||
2876 | tegra_dc_init_csc_defaults(&win->csc); | ||
2877 | tegra_dc_init_lut_defaults(&win->lut); | ||
2878 | } | ||
2879 | |||
2880 | ret = tegra_dc_set(dc, ndev->id); | ||
2881 | if (ret < 0) { | ||
2882 | dev_err(&ndev->dev, "can't add dc\n"); | ||
2883 | goto err_free_irq; | ||
2884 | } | ||
2885 | |||
2886 | nvhost_set_drvdata(ndev, dc); | ||
2887 | |||
2888 | #ifdef CONFIG_SWITCH | ||
2889 | dc->modeset_switch.name = dev_name(&ndev->dev); | ||
2890 | dc->modeset_switch.state = 0; | ||
2891 | dc->modeset_switch.print_state = switch_modeset_print_mode; | ||
2892 | switch_dev_register(&dc->modeset_switch); | ||
2893 | #endif | ||
2894 | |||
2895 | if (dc->pdata->default_out) | ||
2896 | tegra_dc_set_out(dc, dc->pdata->default_out); | ||
2897 | else | ||
2898 | dev_err(&ndev->dev, "No default output specified. Leaving output disabled.\n"); | ||
2899 | |||
2900 | dc->vblank_syncpt = (dc->ndev->id == 0) ? | ||
2901 | NVSYNCPT_VBLANK0 : NVSYNCPT_VBLANK1; | ||
2902 | |||
2903 | dc->ext = tegra_dc_ext_register(ndev, dc); | ||
2904 | if (IS_ERR_OR_NULL(dc->ext)) { | ||
2905 | dev_warn(&ndev->dev, "Failed to enable Tegra DC extensions.\n"); | ||
2906 | dc->ext = NULL; | ||
2907 | } | ||
2908 | |||
2909 | /* interrupt handler must be registered before tegra_fb_register() */ | ||
2910 | if (request_irq(irq, tegra_dc_irq, IRQF_DISABLED, | ||
2911 | dev_name(&ndev->dev), dc)) { | ||
2912 | dev_err(&ndev->dev, "request_irq %d failed\n", irq); | ||
2913 | ret = -EBUSY; | ||
2914 | goto err_put_emc_clk; | ||
2915 | } | ||
2916 | |||
2917 | /* hack to balance enable_irq calls in _tegra_dc_enable() */ | ||
2918 | disable_dc_irq(dc->irq); | ||
2919 | |||
2920 | mutex_lock(&dc->lock); | ||
2921 | if (dc->enabled) | ||
2922 | _tegra_dc_enable(dc); | ||
2923 | mutex_unlock(&dc->lock); | ||
2924 | |||
2925 | tegra_dc_create_debugfs(dc); | ||
2926 | |||
2927 | dev_info(&ndev->dev, "probed\n"); | ||
2928 | |||
2929 | if (dc->pdata->fb) { | ||
2930 | if (dc->pdata->fb->bits_per_pixel == -1) { | ||
2931 | unsigned long fmt; | ||
2932 | tegra_dc_writel(dc, | ||
2933 | WINDOW_A_SELECT << dc->pdata->fb->win, | ||
2934 | DC_CMD_DISPLAY_WINDOW_HEADER); | ||
2935 | |||
2936 | fmt = tegra_dc_readl(dc, DC_WIN_COLOR_DEPTH); | ||
2937 | dc->pdata->fb->bits_per_pixel = | ||
2938 | tegra_dc_fmt_bpp(fmt); | ||
2939 | } | ||
2940 | |||
2941 | dc->fb = tegra_fb_register(ndev, dc, dc->pdata->fb, fb_mem); | ||
2942 | if (IS_ERR_OR_NULL(dc->fb)) | ||
2943 | dc->fb = NULL; | ||
2944 | } | ||
2945 | |||
2946 | if (dc->out && dc->out->hotplug_init) | ||
2947 | dc->out->hotplug_init(); | ||
2948 | |||
2949 | if (dc->out_ops && dc->out_ops->detect) | ||
2950 | dc->out_ops->detect(dc); | ||
2951 | else | ||
2952 | dc->connected = true; | ||
2953 | |||
2954 | tegra_dc_create_sysfs(&dc->ndev->dev); | ||
2955 | |||
2956 | return 0; | ||
2957 | |||
2958 | err_free_irq: | ||
2959 | free_irq(irq, dc); | ||
2960 | err_put_emc_clk: | ||
2961 | clk_put(emc_clk); | ||
2962 | err_put_clk: | ||
2963 | clk_put(clk); | ||
2964 | err_iounmap_reg: | ||
2965 | iounmap(base); | ||
2966 | if (fb_mem) | ||
2967 | release_resource(fb_mem); | ||
2968 | err_release_resource_reg: | ||
2969 | release_resource(base_res); | ||
2970 | err_free: | ||
2971 | kfree(dc); | ||
2972 | |||
2973 | return ret; | ||
2974 | } | ||
2975 | |||
2976 | static int tegra_dc_remove(struct nvhost_device *ndev) | ||
2977 | { | ||
2978 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
2979 | |||
2980 | tegra_dc_remove_sysfs(&dc->ndev->dev); | ||
2981 | tegra_dc_remove_debugfs(dc); | ||
2982 | |||
2983 | if (dc->fb) { | ||
2984 | tegra_fb_unregister(dc->fb); | ||
2985 | if (dc->fb_mem) | ||
2986 | release_resource(dc->fb_mem); | ||
2987 | } | ||
2988 | |||
2989 | tegra_dc_ext_disable(dc->ext); | ||
2990 | |||
2991 | if (dc->ext) | ||
2992 | tegra_dc_ext_unregister(dc->ext); | ||
2993 | |||
2994 | if (dc->enabled) | ||
2995 | _tegra_dc_disable(dc); | ||
2996 | |||
2997 | #ifdef CONFIG_SWITCH | ||
2998 | switch_dev_unregister(&dc->modeset_switch); | ||
2999 | #endif | ||
3000 | free_irq(dc->irq, dc); | ||
3001 | clk_put(dc->emc_clk); | ||
3002 | clk_put(dc->clk); | ||
3003 | iounmap(dc->base); | ||
3004 | if (dc->fb_mem) | ||
3005 | release_resource(dc->base_res); | ||
3006 | kfree(dc); | ||
3007 | tegra_dc_set(NULL, ndev->id); | ||
3008 | return 0; | ||
3009 | } | ||
3010 | |||
3011 | #ifdef CONFIG_PM | ||
3012 | static int tegra_dc_suspend(struct nvhost_device *ndev, pm_message_t state) | ||
3013 | { | ||
3014 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
3015 | |||
3016 | dev_info(&ndev->dev, "suspend\n"); | ||
3017 | |||
3018 | tegra_dc_ext_disable(dc->ext); | ||
3019 | |||
3020 | mutex_lock(&dc->lock); | ||
3021 | |||
3022 | if (dc->out_ops && dc->out_ops->suspend) | ||
3023 | dc->out_ops->suspend(dc); | ||
3024 | |||
3025 | if (dc->enabled) { | ||
3026 | _tegra_dc_disable(dc); | ||
3027 | |||
3028 | dc->suspended = true; | ||
3029 | } | ||
3030 | |||
3031 | if (dc->out && dc->out->postsuspend) { | ||
3032 | dc->out->postsuspend(); | ||
3033 | if (dc->out->type && dc->out->type == TEGRA_DC_OUT_HDMI) | ||
3034 | /* | ||
3035 | * avoid resume event due to voltage falling | ||
3036 | */ | ||
3037 | msleep(100); | ||
3038 | } | ||
3039 | |||
3040 | mutex_unlock(&dc->lock); | ||
3041 | |||
3042 | return 0; | ||
3043 | } | ||
3044 | |||
3045 | static int tegra_dc_resume(struct nvhost_device *ndev) | ||
3046 | { | ||
3047 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
3048 | |||
3049 | dev_info(&ndev->dev, "resume\n"); | ||
3050 | |||
3051 | mutex_lock(&dc->lock); | ||
3052 | dc->suspended = false; | ||
3053 | |||
3054 | if (dc->enabled) | ||
3055 | _tegra_dc_enable(dc); | ||
3056 | |||
3057 | if (dc->out && dc->out->hotplug_init) | ||
3058 | dc->out->hotplug_init(); | ||
3059 | |||
3060 | if (dc->out_ops && dc->out_ops->resume) | ||
3061 | dc->out_ops->resume(dc); | ||
3062 | mutex_unlock(&dc->lock); | ||
3063 | |||
3064 | return 0; | ||
3065 | } | ||
3066 | |||
3067 | #endif /* CONFIG_PM */ | ||
3068 | |||
3069 | extern int suspend_set(const char *val, struct kernel_param *kp) | ||
3070 | { | ||
3071 | if (!strcmp(val, "dump")) | ||
3072 | dump_regs(tegra_dcs[0]); | ||
3073 | #ifdef CONFIG_PM | ||
3074 | else if (!strcmp(val, "suspend")) | ||
3075 | tegra_dc_suspend(tegra_dcs[0]->ndev, PMSG_SUSPEND); | ||
3076 | else if (!strcmp(val, "resume")) | ||
3077 | tegra_dc_resume(tegra_dcs[0]->ndev); | ||
3078 | #endif | ||
3079 | |||
3080 | return 0; | ||
3081 | } | ||
3082 | |||
3083 | extern int suspend_get(char *buffer, struct kernel_param *kp) | ||
3084 | { | ||
3085 | return 0; | ||
3086 | } | ||
3087 | |||
3088 | int suspend; | ||
3089 | |||
3090 | module_param_call(suspend, suspend_set, suspend_get, &suspend, 0644); | ||
3091 | |||
3092 | struct nvhost_driver tegra_dc_driver = { | ||
3093 | .driver = { | ||
3094 | .name = "tegradc", | ||
3095 | .owner = THIS_MODULE, | ||
3096 | }, | ||
3097 | .probe = tegra_dc_probe, | ||
3098 | .remove = tegra_dc_remove, | ||
3099 | #ifdef CONFIG_PM | ||
3100 | .suspend = tegra_dc_suspend, | ||
3101 | .resume = tegra_dc_resume, | ||
3102 | #endif | ||
3103 | }; | ||
3104 | |||
3105 | static int __init tegra_dc_module_init(void) | ||
3106 | { | ||
3107 | int ret = tegra_dc_ext_module_init(); | ||
3108 | if (ret) | ||
3109 | return ret; | ||
3110 | return nvhost_driver_register(&tegra_dc_driver); | ||
3111 | } | ||
3112 | |||
3113 | static void __exit tegra_dc_module_exit(void) | ||
3114 | { | ||
3115 | nvhost_driver_unregister(&tegra_dc_driver); | ||
3116 | tegra_dc_ext_module_exit(); | ||
3117 | } | ||
3118 | |||
3119 | module_exit(tegra_dc_module_exit); | ||
3120 | module_init(tegra_dc_module_init); | ||
diff --git a/drivers/video/tegra/dc/dc_priv.h b/drivers/video/tegra/dc/dc_priv.h new file mode 100644 index 00000000000..a10e648debc --- /dev/null +++ b/drivers/video/tegra/dc/dc_priv.h | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dc_priv.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H | ||
19 | #define __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H | ||
20 | |||
21 | #include <linux/io.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/completion.h> | ||
25 | #include <linux/switch.h> | ||
26 | |||
27 | #include <mach/dc.h> | ||
28 | |||
29 | #include "../host/dev.h" | ||
30 | #include "../host/host1x/host1x_syncpt.h" | ||
31 | |||
32 | #include <mach/tegra_dc_ext.h> | ||
33 | |||
34 | #define WIN_IS_TILED(win) ((win)->flags & TEGRA_WIN_FLAG_TILED) | ||
35 | #define WIN_IS_ENABLED(win) ((win)->flags & TEGRA_WIN_FLAG_ENABLED) | ||
36 | |||
37 | #define NEED_UPDATE_EMC_ON_EVERY_FRAME (windows_idle_detection_time == 0) | ||
38 | |||
39 | /* DDR: 8 bytes transfer per clock */ | ||
40 | #define DDR_BW_TO_FREQ(bw) ((bw) / 8) | ||
41 | |||
42 | #if defined(CONFIG_TEGRA_EMC_TO_DDR_CLOCK) | ||
43 | #define EMC_BW_TO_FREQ(bw) (DDR_BW_TO_FREQ(bw) * CONFIG_TEGRA_EMC_TO_DDR_CLOCK) | ||
44 | #else | ||
45 | #define EMC_BW_TO_FREQ(bw) (DDR_BW_TO_FREQ(bw) * 2) | ||
46 | #endif | ||
47 | |||
48 | struct tegra_dc; | ||
49 | |||
50 | struct tegra_dc_blend { | ||
51 | unsigned z[DC_N_WINDOWS]; | ||
52 | unsigned flags[DC_N_WINDOWS]; | ||
53 | }; | ||
54 | |||
55 | struct tegra_dc_out_ops { | ||
56 | /* initialize output. dc clocks are not on at this point */ | ||
57 | int (*init)(struct tegra_dc *dc); | ||
58 | /* destroy output. dc clocks are not on at this point */ | ||
59 | void (*destroy)(struct tegra_dc *dc); | ||
60 | /* detect connected display. can sleep.*/ | ||
61 | bool (*detect)(struct tegra_dc *dc); | ||
62 | /* enable output. dc clocks are on at this point */ | ||
63 | void (*enable)(struct tegra_dc *dc); | ||
64 | /* disable output. dc clocks are on at this point */ | ||
65 | void (*disable)(struct tegra_dc *dc); | ||
66 | |||
67 | /* suspend output. dc clocks are on at this point */ | ||
68 | void (*suspend)(struct tegra_dc *dc); | ||
69 | /* resume output. dc clocks are on at this point */ | ||
70 | void (*resume)(struct tegra_dc *dc); | ||
71 | }; | ||
72 | |||
73 | struct tegra_dc { | ||
74 | struct nvhost_device *ndev; | ||
75 | struct tegra_dc_platform_data *pdata; | ||
76 | |||
77 | struct resource *base_res; | ||
78 | void __iomem *base; | ||
79 | int irq; | ||
80 | |||
81 | struct clk *clk; | ||
82 | struct clk *emc_clk; | ||
83 | int emc_clk_rate; | ||
84 | int new_emc_clk_rate; | ||
85 | u32 shift_clk_div; | ||
86 | |||
87 | bool connected; | ||
88 | bool enabled; | ||
89 | bool suspended; | ||
90 | |||
91 | struct tegra_dc_out *out; | ||
92 | struct tegra_dc_out_ops *out_ops; | ||
93 | void *out_data; | ||
94 | |||
95 | struct tegra_dc_mode mode; | ||
96 | |||
97 | struct tegra_dc_win windows[DC_N_WINDOWS]; | ||
98 | struct tegra_dc_blend blend; | ||
99 | int n_windows; | ||
100 | |||
101 | wait_queue_head_t wq; | ||
102 | |||
103 | struct mutex lock; | ||
104 | struct mutex one_shot_lock; | ||
105 | |||
106 | struct resource *fb_mem; | ||
107 | struct tegra_fb_info *fb; | ||
108 | |||
109 | struct { | ||
110 | u32 id; | ||
111 | u32 min; | ||
112 | u32 max; | ||
113 | } syncpt[DC_N_WINDOWS]; | ||
114 | u32 vblank_syncpt; | ||
115 | |||
116 | unsigned long underflow_mask; | ||
117 | struct work_struct reset_work; | ||
118 | |||
119 | #ifdef CONFIG_SWITCH | ||
120 | struct switch_dev modeset_switch; | ||
121 | #endif | ||
122 | |||
123 | struct completion frame_end_complete; | ||
124 | |||
125 | struct work_struct vblank_work; | ||
126 | |||
127 | struct { | ||
128 | u64 underflows; | ||
129 | u64 underflows_a; | ||
130 | u64 underflows_b; | ||
131 | u64 underflows_c; | ||
132 | } stats; | ||
133 | |||
134 | struct tegra_dc_ext *ext; | ||
135 | |||
136 | #ifdef CONFIG_DEBUG_FS | ||
137 | struct dentry *debugdir; | ||
138 | #endif | ||
139 | struct tegra_dc_lut fb_lut; | ||
140 | struct delayed_work underflow_work; | ||
141 | u32 one_shot_delay_ms; | ||
142 | struct delayed_work one_shot_work; | ||
143 | }; | ||
144 | |||
145 | static inline void tegra_dc_io_start(struct tegra_dc *dc) | ||
146 | { | ||
147 | nvhost_module_busy(nvhost_get_host(dc->ndev)->dev); | ||
148 | } | ||
149 | |||
150 | static inline void tegra_dc_io_end(struct tegra_dc *dc) | ||
151 | { | ||
152 | nvhost_module_idle(nvhost_get_host(dc->ndev)->dev); | ||
153 | } | ||
154 | |||
155 | static inline unsigned long tegra_dc_readl(struct tegra_dc *dc, | ||
156 | unsigned long reg) | ||
157 | { | ||
158 | BUG_ON(!nvhost_module_powered(nvhost_get_host(dc->ndev)->dev)); | ||
159 | return readl(dc->base + reg * 4); | ||
160 | } | ||
161 | |||
162 | static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long val, | ||
163 | unsigned long reg) | ||
164 | { | ||
165 | BUG_ON(!nvhost_module_powered(nvhost_get_host(dc->ndev)->dev)); | ||
166 | writel(val, dc->base + reg * 4); | ||
167 | } | ||
168 | |||
169 | static inline void _tegra_dc_write_table(struct tegra_dc *dc, const u32 *table, | ||
170 | unsigned len) | ||
171 | { | ||
172 | int i; | ||
173 | |||
174 | for (i = 0; i < len; i++) | ||
175 | tegra_dc_writel(dc, table[i * 2 + 1], table[i * 2]); | ||
176 | } | ||
177 | |||
178 | #define tegra_dc_write_table(dc, table) \ | ||
179 | _tegra_dc_write_table(dc, table, ARRAY_SIZE(table) / 2) | ||
180 | |||
181 | static inline void tegra_dc_set_outdata(struct tegra_dc *dc, void *data) | ||
182 | { | ||
183 | dc->out_data = data; | ||
184 | } | ||
185 | |||
186 | static inline void *tegra_dc_get_outdata(struct tegra_dc *dc) | ||
187 | { | ||
188 | return dc->out_data; | ||
189 | } | ||
190 | |||
191 | static inline unsigned long tegra_dc_get_default_emc_clk_rate( | ||
192 | struct tegra_dc *dc) | ||
193 | { | ||
194 | return dc->pdata->emc_clk_rate ? dc->pdata->emc_clk_rate : ULONG_MAX; | ||
195 | } | ||
196 | |||
197 | void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk); | ||
198 | |||
199 | extern struct tegra_dc_out_ops tegra_dc_rgb_ops; | ||
200 | extern struct tegra_dc_out_ops tegra_dc_hdmi_ops; | ||
201 | extern struct tegra_dc_out_ops tegra_dc_dsi_ops; | ||
202 | |||
203 | /* defined in dc_sysfs.c, used by dc.c */ | ||
204 | void __devexit tegra_dc_remove_sysfs(struct device *dev); | ||
205 | void tegra_dc_create_sysfs(struct device *dev); | ||
206 | |||
207 | /* defined in dc.c, used by dc_sysfs.c */ | ||
208 | void tegra_dc_stats_enable(struct tegra_dc *dc, bool enable); | ||
209 | bool tegra_dc_stats_get(struct tegra_dc *dc); | ||
210 | |||
211 | /* defined in dc.c, used by dc_sysfs.c */ | ||
212 | u32 tegra_dc_read_checksum_latched(struct tegra_dc *dc); | ||
213 | void tegra_dc_enable_crc(struct tegra_dc *dc); | ||
214 | void tegra_dc_disable_crc(struct tegra_dc *dc); | ||
215 | |||
216 | void tegra_dc_set_out_pin_polars(struct tegra_dc *dc, | ||
217 | const struct tegra_dc_out_pin *pins, | ||
218 | const unsigned int n_pins); | ||
219 | #endif | ||
220 | |||
diff --git a/drivers/video/tegra/dc/dc_reg.h b/drivers/video/tegra/dc/dc_reg.h new file mode 100644 index 00000000000..22379a19408 --- /dev/null +++ b/drivers/video/tegra/dc/dc_reg.h | |||
@@ -0,0 +1,555 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dc_reg.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * Copyright (C) 2010-2011 NVIDIA Corporation | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H | ||
21 | #define __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H | ||
22 | |||
23 | #define DC_CMD_GENERAL_INCR_SYNCPT 0x000 | ||
24 | #define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001 | ||
25 | #define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002 | ||
26 | #define DC_CMD_WIN_A_INCR_SYNCPT 0x008 | ||
27 | #define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009 | ||
28 | #define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a | ||
29 | #define DC_CMD_WIN_B_INCR_SYNCPT 0x010 | ||
30 | #define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011 | ||
31 | #define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012 | ||
32 | #define DC_CMD_WIN_C_INCR_SYNCPT 0x018 | ||
33 | #define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019 | ||
34 | #define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a | ||
35 | #define DC_CMD_CONT_SYNCPT_VSYNC 0x028 | ||
36 | #define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031 | ||
37 | #define MSF_POLARITY_HIGH (0 << 0) | ||
38 | #define MSF_POLARITY_LOW (1 << 0) | ||
39 | #define MSF_DISABLE (0 << 1) | ||
40 | #define MSF_ENABLE (1 << 1) | ||
41 | #define MSF_LSPI (0 << 2) | ||
42 | #define MSF_LDC (1 << 2) | ||
43 | #define MSF_LSDI (2 << 2) | ||
44 | |||
45 | #define DC_CMD_DISPLAY_COMMAND 0x032 | ||
46 | #define DISP_COMMAND_RAISE (1 << 0) | ||
47 | #define DISP_CTRL_MODE_STOP (0 << 5) | ||
48 | #define DISP_CTRL_MODE_C_DISPLAY (1 << 5) | ||
49 | #define DISP_CTRL_MODE_NC_DISPLAY (2 << 5) | ||
50 | #define DISP_COMMAND_RAISE_VECTOR(x) (((x) & 0x1f) << 22) | ||
51 | #define DISP_COMMAND_RAISE_CHANNEL_ID(x) (((x) & 0xf) << 27) | ||
52 | |||
53 | #define DC_CMD_SIGNAL_RAISE 0x033 | ||
54 | #define DC_CMD_DISPLAY_POWER_CONTROL 0x036 | ||
55 | #define PW0_ENABLE (1 << 0) | ||
56 | #define PW1_ENABLE (1 << 2) | ||
57 | #define PW2_ENABLE (1 << 4) | ||
58 | #define PW3_ENABLE (1 << 6) | ||
59 | #define PW4_ENABLE (1 << 8) | ||
60 | #define PM0_ENABLE (1 << 16) | ||
61 | #define PM1_ENABLE (1 << 18) | ||
62 | #define SPI_ENABLE (1 << 24) | ||
63 | #define HSPI_ENABLE (1 << 25) | ||
64 | |||
65 | #define DC_CMD_INT_STATUS 0x037 | ||
66 | #define DC_CMD_INT_MASK 0x038 | ||
67 | #define DC_CMD_INT_ENABLE 0x039 | ||
68 | #define DC_CMD_INT_TYPE 0x03a | ||
69 | #define DC_CMD_INT_POLARITY 0x03b | ||
70 | #define CTXSW_INT (1 << 0) | ||
71 | #define FRAME_END_INT (1 << 1) | ||
72 | #define V_BLANK_INT (1 << 2) | ||
73 | #define H_BLANK_INT (1 << 3) | ||
74 | #define V_PULSE3_INT (1 << 4) | ||
75 | #define SPI_BUSY_INT (1 << 7) | ||
76 | #define WIN_A_UF_INT (1 << 8) | ||
77 | #define WIN_B_UF_INT (1 << 9) | ||
78 | #define WIN_C_UF_INT (1 << 10) | ||
79 | #define MSF_INT (1 << 12) | ||
80 | #define SSF_INT (1 << 13) | ||
81 | #define WIN_A_OF_INT (1 << 14) | ||
82 | #define WIN_B_OF_INT (1 << 15) | ||
83 | #define WIN_C_OF_INT (1 << 16) | ||
84 | #define GPIO_0_INT (1 << 18) | ||
85 | #define GPIO_1_INT (1 << 19) | ||
86 | #define GPIO_2_INT (1 << 20) | ||
87 | |||
88 | #define DC_CMD_SIGNAL_RAISE1 0x03c | ||
89 | #define DC_CMD_SIGNAL_RAISE2 0x03d | ||
90 | #define DC_CMD_SIGNAL_RAISE3 0x03e | ||
91 | #define DC_CMD_STATE_ACCESS 0x040 | ||
92 | #define READ_MUX_ASSEMBLY (0 << 0) | ||
93 | #define READ_MUX_ACTIVE (1 << 0) | ||
94 | #define WRITE_MUX_ASSEMBLY (0 << 2) | ||
95 | #define WRITE_MUX_ACTIVE (1 << 2) | ||
96 | |||
97 | #define DC_CMD_STATE_CONTROL 0x041 | ||
98 | #define GENERAL_ACT_REQ (1 << 0) | ||
99 | #define WIN_A_ACT_REQ (1 << 1) | ||
100 | #define WIN_B_ACT_REQ (1 << 2) | ||
101 | #define WIN_C_ACT_REQ (1 << 3) | ||
102 | #define GENERAL_UPDATE (1 << 8) | ||
103 | #define WIN_A_UPDATE (1 << 9) | ||
104 | #define WIN_B_UPDATE (1 << 10) | ||
105 | #define WIN_C_UPDATE (1 << 11) | ||
106 | #define NC_HOST_TRIG (1 << 24) | ||
107 | |||
108 | #define DC_CMD_DISPLAY_WINDOW_HEADER 0x042 | ||
109 | #define WINDOW_A_SELECT (1 << 4) | ||
110 | #define WINDOW_B_SELECT (1 << 5) | ||
111 | #define WINDOW_C_SELECT (1 << 6) | ||
112 | |||
113 | #define DC_CMD_REG_ACT_CONTROL 0x043 | ||
114 | |||
115 | #define DC_COM_CRC_CONTROL 0x300 | ||
116 | #define CRC_ALWAYS_ENABLE (1 << 3) | ||
117 | #define CRC_ALWAYS_DISABLE (0 << 3) | ||
118 | #define CRC_INPUT_DATA_ACTIVE_DATA (1 << 2) | ||
119 | #define CRC_INPUT_DATA_FULL_FRAME (0 << 2) | ||
120 | #define CRC_WAIT_TWO_VSYNC (1 << 1) | ||
121 | #define CRC_WAIT_ONE_VSYNC (0 << 1) | ||
122 | #define CRC_ENABLE_ENABLE (1 << 0) | ||
123 | #define CRC_ENABLE_DISABLE (0 << 0) | ||
124 | #define DC_COM_CRC_CHECKSUM 0x301 | ||
125 | #define DC_COM_PIN_OUTPUT_ENABLE0 0x302 | ||
126 | #define DC_COM_PIN_OUTPUT_ENABLE1 0x303 | ||
127 | #define DC_COM_PIN_OUTPUT_ENABLE2 0x304 | ||
128 | #define DC_COM_PIN_OUTPUT_ENABLE3 0x305 | ||
129 | #define PIN_OUTPUT_LSPI_OUTPUT_EN (1 << 8) | ||
130 | #define PIN_OUTPUT_LSPI_OUTPUT_DIS (1 << 8) | ||
131 | #define DC_COM_PIN_OUTPUT_POLARITY0 0x306 | ||
132 | |||
133 | #define DC_COM_PIN_OUTPUT_POLARITY1 0x307 | ||
134 | #define LHS_OUTPUT_POLARITY_LOW (1 << 30) | ||
135 | #define LVS_OUTPUT_POLARITY_LOW (1 << 28) | ||
136 | #define LSC0_OUTPUT_POLARITY_LOW (1 << 24) | ||
137 | |||
138 | #define DC_COM_PIN_OUTPUT_POLARITY2 0x308 | ||
139 | |||
140 | #define DC_COM_PIN_OUTPUT_POLARITY3 0x309 | ||
141 | #define LSPI_OUTPUT_POLARITY_LOW (1 << 8) | ||
142 | |||
143 | #define DC_COM_PIN_OUTPUT_DATA0 0x30a | ||
144 | #define DC_COM_PIN_OUTPUT_DATA1 0x30b | ||
145 | #define DC_COM_PIN_OUTPUT_DATA2 0x30c | ||
146 | #define DC_COM_PIN_OUTPUT_DATA3 0x30d | ||
147 | #define DC_COM_PIN_INPUT_ENABLE0 0x30e | ||
148 | #define DC_COM_PIN_INPUT_ENABLE1 0x30f | ||
149 | #define DC_COM_PIN_INPUT_ENABLE2 0x310 | ||
150 | #define DC_COM_PIN_INPUT_ENABLE3 0x311 | ||
151 | #define PIN_INPUT_LSPI_INPUT_EN (1 << 8) | ||
152 | #define PIN_INPUT_LSPI_INPUT_DIS (1 << 8) | ||
153 | #define DC_COM_PIN_INPUT_DATA0 0x312 | ||
154 | #define DC_COM_PIN_INPUT_DATA1 0x313 | ||
155 | #define DC_COM_PIN_OUTPUT_SELECT0 0x314 | ||
156 | #define DC_COM_PIN_OUTPUT_SELECT1 0x315 | ||
157 | #define DC_COM_PIN_OUTPUT_SELECT2 0x316 | ||
158 | #define DC_COM_PIN_OUTPUT_SELECT3 0x317 | ||
159 | #define DC_COM_PIN_OUTPUT_SELECT4 0x318 | ||
160 | #define DC_COM_PIN_OUTPUT_SELECT5 0x319 | ||
161 | #define DC_COM_PIN_OUTPUT_SELECT6 0x31a | ||
162 | |||
163 | #define PIN5_LM1_LCD_M1_OUTPUT_MASK (7 << 4) | ||
164 | #define PIN5_LM1_LCD_M1_OUTPUT_M1 (0 << 4) | ||
165 | #define PIN5_LM1_LCD_M1_OUTPUT_LD21 (2 << 4) | ||
166 | #define PIN5_LM1_LCD_M1_OUTPUT_PM1 (3 << 4) | ||
167 | |||
168 | #define PIN1_LHS_OUTPUT (1 << 30) | ||
169 | #define PIN1_LVS_OUTPUT (1 << 28) | ||
170 | |||
171 | #define DC_COM_PIN_MISC_CONTROL 0x31b | ||
172 | #define DC_COM_PM0_CONTROL 0x31c | ||
173 | #define DC_COM_PM0_DUTY_CYCLE 0x31d | ||
174 | #define DC_COM_PM1_CONTROL 0x31e | ||
175 | #define DC_COM_PM1_DUTY_CYCLE 0x31f | ||
176 | |||
177 | #define PM_PERIOD_SHIFT 18 | ||
178 | #define PM_CLK_DIVIDER_SHIFT 4 | ||
179 | |||
180 | #define DC_COM_SPI_CONTROL 0x320 | ||
181 | #define DC_COM_SPI_START_BYTE 0x321 | ||
182 | #define DC_COM_HSPI_WRITE_DATA_AB 0x322 | ||
183 | #define DC_COM_HSPI_WRITE_DATA_CD 0x323 | ||
184 | #define DC_COM_HSPI_CS_DC 0x324 | ||
185 | #define DC_COM_SCRATCH_REGISTER_A 0x325 | ||
186 | #define DC_COM_SCRATCH_REGISTER_B 0x326 | ||
187 | #define DC_COM_GPIO_CTRL 0x327 | ||
188 | #define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328 | ||
189 | #define DC_COM_CRC_CHECKSUM_LATCHED 0x329 | ||
190 | |||
191 | #define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400 | ||
192 | #define H_PULSE_0_ENABLE (1 << 8) | ||
193 | #define H_PULSE_1_ENABLE (1 << 10) | ||
194 | #define H_PULSE_2_ENABLE (1 << 12) | ||
195 | #define V_PULSE_0_ENABLE (1 << 16) | ||
196 | #define V_PULSE_1_ENABLE (1 << 18) | ||
197 | #define V_PULSE_2_ENABLE (1 << 19) | ||
198 | #define V_PULSE_3_ENABLE (1 << 20) | ||
199 | #define M0_ENABLE (1 << 24) | ||
200 | #define M1_ENABLE (1 << 26) | ||
201 | |||
202 | #define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401 | ||
203 | #define DI_ENABLE (1 << 16) | ||
204 | #define PP_ENABLE (1 << 18) | ||
205 | |||
206 | #define DC_DISP_DISP_WIN_OPTIONS 0x402 | ||
207 | #define CURSOR_ENABLE (1 << 16) | ||
208 | #define TVO_ENABLE (1 << 28) | ||
209 | #define DSI_ENABLE (1 << 29) | ||
210 | #define HDMI_ENABLE (1 << 30) | ||
211 | |||
212 | #define DC_DISP_MEM_HIGH_PRIORITY 0x403 | ||
213 | #define DC_DISP_MEM_HIGH_PRIORITY_TIMER 0x404 | ||
214 | #define DC_DISP_DISP_TIMING_OPTIONS 0x405 | ||
215 | #define VSYNC_H_POSITION(x) ((x) & 0xfff) | ||
216 | |||
217 | #define DC_DISP_REF_TO_SYNC 0x406 | ||
218 | #define DC_DISP_SYNC_WIDTH 0x407 | ||
219 | #define DC_DISP_BACK_PORCH 0x408 | ||
220 | #define DC_DISP_DISP_ACTIVE 0x409 | ||
221 | #define DC_DISP_FRONT_PORCH 0x40a | ||
222 | #define DC_DISP_H_PULSE0_CONTROL 0x40b | ||
223 | #define DC_DISP_H_PULSE0_POSITION_A 0x40c | ||
224 | #define DC_DISP_H_PULSE0_POSITION_B 0x40d | ||
225 | #define DC_DISP_H_PULSE0_POSITION_C 0x40e | ||
226 | #define DC_DISP_H_PULSE0_POSITION_D 0x40f | ||
227 | #define DC_DISP_H_PULSE1_CONTROL 0x410 | ||
228 | #define DC_DISP_H_PULSE1_POSITION_A 0x411 | ||
229 | #define DC_DISP_H_PULSE1_POSITION_B 0x412 | ||
230 | #define DC_DISP_H_PULSE1_POSITION_C 0x413 | ||
231 | #define DC_DISP_H_PULSE1_POSITION_D 0x414 | ||
232 | #define DC_DISP_H_PULSE2_CONTROL 0x415 | ||
233 | #define DC_DISP_H_PULSE2_POSITION_A 0x416 | ||
234 | #define DC_DISP_H_PULSE2_POSITION_B 0x417 | ||
235 | #define DC_DISP_H_PULSE2_POSITION_C 0x418 | ||
236 | #define DC_DISP_H_PULSE2_POSITION_D 0x419 | ||
237 | #define DC_DISP_V_PULSE0_CONTROL 0x41a | ||
238 | #define DC_DISP_V_PULSE0_POSITION_A 0x41b | ||
239 | #define DC_DISP_V_PULSE0_POSITION_B 0x41c | ||
240 | #define DC_DISP_V_PULSE0_POSITION_C 0x41d | ||
241 | #define DC_DISP_V_PULSE1_CONTROL 0x41e | ||
242 | #define DC_DISP_V_PULSE1_POSITION_A 0x41f | ||
243 | #define DC_DISP_V_PULSE1_POSITION_B 0x420 | ||
244 | #define DC_DISP_V_PULSE1_POSITION_C 0x421 | ||
245 | #define DC_DISP_V_PULSE2_CONTROL 0x422 | ||
246 | #define DC_DISP_V_PULSE2_POSITION_A 0x423 | ||
247 | #define DC_DISP_V_PULSE3_CONTROL 0x424 | ||
248 | #define DC_DISP_V_PULSE3_POSITION_A 0x425 | ||
249 | #define DC_DISP_M0_CONTROL 0x426 | ||
250 | #define DC_DISP_M1_CONTROL 0x427 | ||
251 | #define DC_DISP_DI_CONTROL 0x428 | ||
252 | #define DC_DISP_PP_CONTROL 0x429 | ||
253 | #define DC_DISP_PP_SELECT_A 0x42a | ||
254 | #define DC_DISP_PP_SELECT_B 0x42b | ||
255 | #define DC_DISP_PP_SELECT_C 0x42c | ||
256 | #define DC_DISP_PP_SELECT_D 0x42d | ||
257 | |||
258 | #define PULSE_MODE_NORMAL (0 << 3) | ||
259 | #define PULSE_MODE_ONE_CLOCK (1 << 3) | ||
260 | #define PULSE_POLARITY_HIGH (0 << 4) | ||
261 | #define PULSE_POLARITY_LOW (1 << 4) | ||
262 | #define PULSE_QUAL_ALWAYS (0 << 6) | ||
263 | #define PULSE_QUAL_VACTIVE (2 << 6) | ||
264 | #define PULSE_QUAL_VACTIVE1 (3 << 6) | ||
265 | #define PULSE_LAST_START_A (0 << 8) | ||
266 | #define PULSE_LAST_END_A (1 << 8) | ||
267 | #define PULSE_LAST_START_B (2 << 8) | ||
268 | #define PULSE_LAST_END_B (3 << 8) | ||
269 | #define PULSE_LAST_START_C (4 << 8) | ||
270 | #define PULSE_LAST_END_C (5 << 8) | ||
271 | #define PULSE_LAST_START_D (6 << 8) | ||
272 | #define PULSE_LAST_END_D (7 << 8) | ||
273 | |||
274 | #define PULSE_START(x) ((x) & 0xfff) | ||
275 | #define PULSE_END(x) (((x) & 0xfff) << 16) | ||
276 | |||
277 | #define DC_DISP_DISP_CLOCK_CONTROL 0x42e | ||
278 | #define PIXEL_CLK_DIVIDER_PCD1 (0 << 8) | ||
279 | #define PIXEL_CLK_DIVIDER_PCD1H (1 << 8) | ||
280 | #define PIXEL_CLK_DIVIDER_PCD2 (2 << 8) | ||
281 | #define PIXEL_CLK_DIVIDER_PCD3 (3 << 8) | ||
282 | #define PIXEL_CLK_DIVIDER_PCD4 (4 << 8) | ||
283 | #define PIXEL_CLK_DIVIDER_PCD6 (5 << 8) | ||
284 | #define PIXEL_CLK_DIVIDER_PCD8 (6 << 8) | ||
285 | #define PIXEL_CLK_DIVIDER_PCD9 (7 << 8) | ||
286 | #define PIXEL_CLK_DIVIDER_PCD12 (8 << 8) | ||
287 | #define PIXEL_CLK_DIVIDER_PCD16 (9 << 8) | ||
288 | #define PIXEL_CLK_DIVIDER_PCD18 (10 << 8) | ||
289 | #define PIXEL_CLK_DIVIDER_PCD24 (11 << 8) | ||
290 | #define PIXEL_CLK_DIVIDER_PCD13 (12 << 8) | ||
291 | #define SHIFT_CLK_DIVIDER(x) ((x) & 0xff) | ||
292 | |||
293 | #define DC_DISP_DISP_INTERFACE_CONTROL 0x42f | ||
294 | #define DISP_DATA_FORMAT_DF1P1C (0 << 0) | ||
295 | #define DISP_DATA_FORMAT_DF1P2C24B (1 << 0) | ||
296 | #define DISP_DATA_FORMAT_DF1P2C18B (2 << 0) | ||
297 | #define DISP_DATA_FORMAT_DF1P2C16B (3 << 0) | ||
298 | #define DISP_DATA_FORMAT_DF2S (5 << 0) | ||
299 | #define DISP_DATA_FORMAT_DF3S (6 << 0) | ||
300 | #define DISP_DATA_FORMAT_DFSPI (7 << 0) | ||
301 | #define DISP_DATA_FORMAT_DF1P3C24B (8 << 0) | ||
302 | #define DISP_DATA_FORMAT_DF1P3C18B (9 << 0) | ||
303 | #define DISP_DATA_ALIGNMENT_MSB (0 << 8) | ||
304 | #define DISP_DATA_ALIGNMENT_LSB (1 << 8) | ||
305 | #define DISP_DATA_ORDER_RED_BLUE (0 << 9) | ||
306 | #define DISP_DATA_ORDER_BLUE_RED (1 << 9) | ||
307 | |||
308 | #define DC_DISP_DISP_COLOR_CONTROL 0x430 | ||
309 | #define BASE_COLOR_SIZE666 (0 << 0) | ||
310 | #define BASE_COLOR_SIZE111 (1 << 0) | ||
311 | #define BASE_COLOR_SIZE222 (2 << 0) | ||
312 | #define BASE_COLOR_SIZE333 (3 << 0) | ||
313 | #define BASE_COLOR_SIZE444 (4 << 0) | ||
314 | #define BASE_COLOR_SIZE555 (5 << 0) | ||
315 | #define BASE_COLOR_SIZE565 (6 << 0) | ||
316 | #define BASE_COLOR_SIZE332 (7 << 0) | ||
317 | #define BASE_COLOR_SIZE888 (8 << 0) | ||
318 | |||
319 | #define DITHER_CONTROL_DISABLE (0 << 8) | ||
320 | #define DITHER_CONTROL_ORDERED (2 << 8) | ||
321 | #define DITHER_CONTROL_ERRDIFF (3 << 8) | ||
322 | |||
323 | #define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 | ||
324 | #define DC_DISP_DATA_ENABLE_OPTIONS 0x432 | ||
325 | #define DE_SELECT_ACTIVE_BLANK 0x0 | ||
326 | #define DE_SELECT_ACTIVE 0x1 | ||
327 | #define DE_SELECT_ACTIVE_IS 0x2 | ||
328 | #define DE_CONTROL_ONECLK (0 << 2) | ||
329 | #define DE_CONTROL_NORMAL (1 << 2) | ||
330 | #define DE_CONTROL_EARLY_EXT (2 << 2) | ||
331 | #define DE_CONTROL_EARLY (3 << 2) | ||
332 | #define DE_CONTROL_ACTIVE_BLANK (4 << 2) | ||
333 | |||
334 | #define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433 | ||
335 | #define DC_DISP_LCD_SPI_OPTIONS 0x434 | ||
336 | #define DC_DISP_BORDER_COLOR 0x435 | ||
337 | #define DC_DISP_COLOR_KEY0_LOWER 0x436 | ||
338 | #define DC_DISP_COLOR_KEY0_UPPER 0x437 | ||
339 | #define DC_DISP_COLOR_KEY1_LOWER 0x438 | ||
340 | #define DC_DISP_COLOR_KEY1_UPPER 0x439 | ||
341 | |||
342 | #define DC_DISP_CURSOR_FOREGROUND 0x43c | ||
343 | #define DC_DISP_CURSOR_BACKGROUND 0x43d | ||
344 | #define CURSOR_COLOR(_r, _g, _b) ((_r) | ((_g) << 8) | ((_b) << 16)) | ||
345 | |||
346 | #define DC_DISP_CURSOR_START_ADDR 0x43e | ||
347 | #define DC_DISP_CURSOR_START_ADDR_NS 0x43f | ||
348 | #define CURSOR_START_ADDR_MASK (((1 << 22) - 1) << 10) | ||
349 | #define CURSOR_START_ADDR(_addr) ((_addr) >> 10) | ||
350 | #define CURSOR_SIZE_64 (1 << 24) | ||
351 | |||
352 | #define DC_DISP_CURSOR_POSITION 0x440 | ||
353 | #define CURSOR_POSITION(_x, _y) \ | ||
354 | (((_x) & ((1 << 16) - 1)) | \ | ||
355 | (((_y) & ((1 << 16) - 1)) << 16)) | ||
356 | |||
357 | #define DC_DISP_CURSOR_POSITION_NS 0x441 | ||
358 | #define DC_DISP_INIT_SEQ_CONTROL 0x442 | ||
359 | #define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443 | ||
360 | #define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444 | ||
361 | #define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445 | ||
362 | #define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446 | ||
363 | #define DC_DISP_DC_MCCIF_FIFOCTRL 0x480 | ||
364 | #define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481 | ||
365 | #define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482 | ||
366 | #define DC_DISP_MCCIF_DISPLAY0C_HYST 0x483 | ||
367 | #define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484 | ||
368 | #define DC_DISP_DAC_CRT_CTRL 0x4c0 | ||
369 | #define DC_DISP_DISP_MISC_CONTROL 0x4c1 | ||
370 | |||
371 | #define DC_WIN_COLOR_PALETTE(x) (0x500 + (x)) | ||
372 | |||
373 | #define DC_WIN_PALETTE_COLOR_EXT 0x600 | ||
374 | #define DC_WIN_H_FILTER_P(x) (0x601 + (x)) | ||
375 | #define DC_WIN_CSC_YOF 0x611 | ||
376 | #define DC_WIN_CSC_KYRGB 0x612 | ||
377 | #define DC_WIN_CSC_KUR 0x613 | ||
378 | #define DC_WIN_CSC_KVR 0x614 | ||
379 | #define DC_WIN_CSC_KUG 0x615 | ||
380 | #define DC_WIN_CSC_KVG 0x616 | ||
381 | #define DC_WIN_CSC_KUB 0x617 | ||
382 | #define DC_WIN_CSC_KVB 0x618 | ||
383 | #define DC_WIN_V_FILTER_P(x) (0x619 + (x)) | ||
384 | #define DC_WIN_WIN_OPTIONS 0x700 | ||
385 | #define H_DIRECTION_INCREMENT (0 << 0) | ||
386 | #define H_DIRECTION_DECREMENT (1 << 0) | ||
387 | #define V_DIRECTION_INCREMENT (0 << 2) | ||
388 | #define V_DIRECTION_DECREMENT (1 << 2) | ||
389 | #define COLOR_EXPAND (1 << 6) | ||
390 | #define H_FILTER_ENABLE (1 << 8) | ||
391 | #define V_FILTER_ENABLE (1 << 10) | ||
392 | #define CP_ENABLE (1 << 16) | ||
393 | #define CSC_ENABLE (1 << 18) | ||
394 | #define DV_ENABLE (1 << 20) | ||
395 | #define WIN_ENABLE (1 << 30) | ||
396 | |||
397 | #define DC_WIN_BYTE_SWAP 0x701 | ||
398 | #define BYTE_SWAP_NOSWAP 0 | ||
399 | #define BYTE_SWAP_SWAP2 1 | ||
400 | #define BYTE_SWAP_SWAP4 2 | ||
401 | #define BYTE_SWAP_SWAP4HW 3 | ||
402 | |||
403 | #define DC_WIN_BUFFER_CONTROL 0x702 | ||
404 | #define BUFFER_CONTROL_HOST 0 | ||
405 | #define BUFFER_CONTROL_VI 1 | ||
406 | #define BUFFER_CONTROL_EPP 2 | ||
407 | #define BUFFER_CONTROL_MPEGE 3 | ||
408 | #define BUFFER_CONTROL_SB2D 4 | ||
409 | |||
410 | #define DC_WIN_COLOR_DEPTH 0x703 | ||
411 | |||
412 | #define DC_WIN_POSITION 0x704 | ||
413 | #define H_POSITION(x) (((x) & 0xfff) << 0) | ||
414 | #define V_POSITION(x) (((x) & 0xfff) << 16) | ||
415 | |||
416 | #define DC_WIN_SIZE 0x705 | ||
417 | #define H_SIZE(x) (((x) & 0xfff) << 0) | ||
418 | #define V_SIZE(x) (((x) & 0xfff) << 16) | ||
419 | |||
420 | #define DC_WIN_PRESCALED_SIZE 0x706 | ||
421 | #define H_PRESCALED_SIZE(x) (((x) & 0x3fff) << 0) | ||
422 | #define V_PRESCALED_SIZE(x) (((x) & 0xfff) << 16) | ||
423 | |||
424 | #define DC_WIN_H_INITIAL_DDA 0x707 | ||
425 | #define DC_WIN_V_INITIAL_DDA 0x708 | ||
426 | #define DC_WIN_DDA_INCREMENT 0x709 | ||
427 | #define H_DDA_INC(x) (((x) & 0xffff) << 0) | ||
428 | #define V_DDA_INC(x) (((x) & 0xffff) << 16) | ||
429 | |||
430 | #define DC_WIN_LINE_STRIDE 0x70a | ||
431 | #define LINE_STRIDE(x) (x) | ||
432 | #define UV_LINE_STRIDE(x) (((x) & 0xffff) << 16) | ||
433 | #define DC_WIN_BUF_STRIDE 0x70b | ||
434 | #define DC_WIN_UV_BUF_STRIDE 0x70c | ||
435 | #define DC_WIN_BUFFER_ADDR_MODE 0x70d | ||
436 | #define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0) | ||
437 | #define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16) | ||
438 | #define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0) | ||
439 | #define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16) | ||
440 | #define DC_WIN_DV_CONTROL 0x70e | ||
441 | #define DC_WIN_BLEND_NOKEY 0x70f | ||
442 | #define DC_WIN_BLEND_1WIN 0x710 | ||
443 | #define DC_WIN_BLEND_2WIN_X 0x711 | ||
444 | #define DC_WIN_BLEND_2WIN_Y 0x712 | ||
445 | #define DC_WIN_BLEND_3WIN_XY 0x713 | ||
446 | #define CKEY_NOKEY (0 << 0) | ||
447 | #define CKEY_KEY0 (1 << 0) | ||
448 | #define CKEY_KEY1 (2 << 0) | ||
449 | #define CKEY_KEY01 (3 << 0) | ||
450 | #define BLEND_CONTROL_FIX (0 << 2) | ||
451 | #define BLEND_CONTROL_ALPHA (1 << 2) | ||
452 | #define BLEND_CONTROL_DEPENDANT (2 << 2) | ||
453 | #define BLEND_CONTROL_PREMULT (3 << 2) | ||
454 | #define BLEND_WEIGHT0(x) (((x) & 0xff) << 8) | ||
455 | #define BLEND_WEIGHT1(x) (((x) & 0xff) << 16) | ||
456 | #define BLEND(key, control, weight0, weight1) \ | ||
457 | (CKEY_ ## key | BLEND_CONTROL_ ## control | \ | ||
458 | BLEND_WEIGHT0(weight0) | BLEND_WEIGHT1(weight1)) | ||
459 | |||
460 | |||
461 | #define DC_WIN_HP_FETCH_CONTROL 0x714 | ||
462 | #define DC_WINBUF_START_ADDR 0x800 | ||
463 | #define DC_WINBUF_START_ADDR_NS 0x801 | ||
464 | #define DC_WINBUF_START_ADDR_U 0x802 | ||
465 | #define DC_WINBUF_START_ADDR_U_NS 0x803 | ||
466 | #define DC_WINBUF_START_ADDR_V 0x804 | ||
467 | #define DC_WINBUF_START_ADDR_V_NS 0x805 | ||
468 | #define DC_WINBUF_ADDR_H_OFFSET 0x806 | ||
469 | #define DC_WINBUF_ADDR_H_OFFSET_NS 0x807 | ||
470 | #define DC_WINBUF_ADDR_V_OFFSET 0x808 | ||
471 | #define DC_WINBUF_ADDR_V_OFFSET_NS 0x809 | ||
472 | #define DC_WINBUF_UFLOW_STATUS 0x80a | ||
473 | |||
474 | /* direct versions of DC_WINBUF_UFLOW_STATUS */ | ||
475 | #define DC_WINBUF_AD_UFLOW_STATUS 0xbca | ||
476 | #define DC_WINBUF_BD_UFLOW_STATUS 0xdca | ||
477 | #define DC_WINBUF_CD_UFLOW_STATUS 0xfca | ||
478 | |||
479 | #define DC_DISP_SD_CONTROL 0x4c2 | ||
480 | #define SD_ENABLE_NORMAL (1 << 0) | ||
481 | #define SD_ENABLE_ONESHOT (2 << 0) | ||
482 | #define SD_USE_VID_LUMA (1 << 2) | ||
483 | #define SD_BIN_WIDTH_ONE (0 << 3) | ||
484 | #define SD_BIN_WIDTH_TWO (1 << 3) | ||
485 | #define SD_BIN_WIDTH_FOUR (2 << 3) | ||
486 | #define SD_BIN_WIDTH_EIGHT (3 << 3) | ||
487 | #define SD_BIN_WIDTH_MASK (3 << 3) | ||
488 | #define SD_AGGRESSIVENESS(x) (((x) & 0x7) << 5) | ||
489 | #define SD_HW_UPDATE_DLY(x) (((x) & 0x3) << 8) | ||
490 | #define SD_ONESHOT_ENABLE (1 << 10) | ||
491 | #define SD_CORRECTION_MODE_AUTO (0 << 11) | ||
492 | #define SD_CORRECTION_MODE_MAN (1 << 11) | ||
493 | |||
494 | #define NUM_BIN_WIDTHS 4 | ||
495 | #define STEPS_PER_AGG_LVL 64 | ||
496 | #define STEPS_PER_AGG_CHG_LOG2 5 | ||
497 | #define STEPS_PER_AGG_CHG (1<<STEPS_PER_AGG_CHG_LOG2) | ||
498 | #define ADJ_PHASE_STEP 8 | ||
499 | #define K_STEP 4 | ||
500 | |||
501 | #define DC_DISP_SD_CSC_COEFF 0x4c3 | ||
502 | #define SD_CSC_COEFF_R(x) (((x) & 0xf) << 4) | ||
503 | #define SD_CSC_COEFF_G(x) (((x) & 0xf) << 12) | ||
504 | #define SD_CSC_COEFF_B(x) (((x) & 0xf) << 20) | ||
505 | |||
506 | #define DC_DISP_SD_LUT(i) (0x4c4 + i) | ||
507 | #define DC_DISP_SD_LUT_NUM 9 | ||
508 | #define SD_LUT_R(x) (((x) & 0xff) << 0) | ||
509 | #define SD_LUT_G(x) (((x) & 0xff) << 8) | ||
510 | #define SD_LUT_B(x) (((x) & 0xff) << 16) | ||
511 | |||
512 | #define DC_DISP_SD_FLICKER_CONTROL 0x4cd | ||
513 | #define SD_FC_TIME_LIMIT(x) (((x) & 0xff) << 0) | ||
514 | #define SD_FC_THRESHOLD(x) (((x) & 0xff) << 8) | ||
515 | |||
516 | #define DC_DISP_SD_PIXEL_COUNT 0x4ce | ||
517 | |||
518 | #define DC_DISP_SD_HISTOGRAM(i) (0x4cf + i) | ||
519 | #define DC_DISP_SD_HISTOGRAM_NUM 8 | ||
520 | #define SD_HISTOGRAM_BIN_0(val) (((val) & (0xff << 0)) >> 0) | ||
521 | #define SD_HISTOGRAM_BIN_1(val) (((val) & (0xff << 8)) >> 8) | ||
522 | #define SD_HISTOGRAM_BIN_2(val) (((val) & (0xff << 16)) >> 16) | ||
523 | #define SD_HISTOGRAM_BIN_3(val) (((val) & (0xff << 24)) >> 24) | ||
524 | |||
525 | #define DC_DISP_SD_BL_PARAMETERS 0x4d7 | ||
526 | #define SD_BLP_TIME_CONSTANT(x) (((x) & 0x7ff) << 0) | ||
527 | #define SD_BLP_STEP(x) (((x) & 0xff) << 16) | ||
528 | |||
529 | #define DC_DISP_SD_BL_TF(i) (0x4d8 + i) | ||
530 | #define DC_DISP_SD_BL_TF_NUM 4 | ||
531 | #define SD_BL_TF_POINT_0(x) (((x) & 0xff) << 0) | ||
532 | #define SD_BL_TF_POINT_1(x) (((x) & 0xff) << 8) | ||
533 | #define SD_BL_TF_POINT_2(x) (((x) & 0xff) << 16) | ||
534 | #define SD_BL_TF_POINT_3(x) (((x) & 0xff) << 24) | ||
535 | |||
536 | #define DC_DISP_SD_BL_CONTROL 0x4dc | ||
537 | #define SD_BLC_MODE_MAN (0 << 0) | ||
538 | #define SD_BLC_MODE_AUTO (1 << 1) | ||
539 | #define SD_BLC_BRIGHTNESS(val) (((val) & (0xff << 8)) >> 8) | ||
540 | |||
541 | #define DC_DISP_SD_HW_K_VALUES 0x4dd | ||
542 | #define SD_HW_K_R(val) (((val) & (0x3ff << 0)) >> 0) | ||
543 | #define SD_HW_K_G(val) (((val) & (0x3ff << 10)) >> 10) | ||
544 | #define SD_HW_K_B(val) (((val) & (0x3ff << 20)) >> 20) | ||
545 | |||
546 | #define DC_DISP_SD_MAN_K_VALUES 0x4de | ||
547 | #define SD_MAN_K_R(x) (((x) & 0x3ff) << 0) | ||
548 | #define SD_MAN_K_G(x) (((x) & 0x3ff) << 10) | ||
549 | #define SD_MAN_K_B(x) (((x) & 0x3ff) << 20) | ||
550 | |||
551 | #define NUM_AGG_PRI_LVLS 4 | ||
552 | #define SD_AGG_PRI_LVL(x) ((x) >> 3) | ||
553 | #define SD_GET_AGG(x) ((x) & 0x7) | ||
554 | |||
555 | #endif | ||
diff --git a/drivers/video/tegra/dc/dc_sysfs.c b/drivers/video/tegra/dc/dc_sysfs.c new file mode 100644 index 00000000000..6bb18382e6e --- /dev/null +++ b/drivers/video/tegra/dc/dc_sysfs.c | |||
@@ -0,0 +1,327 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dc_sysfs.c | ||
3 | * | ||
4 | * Copyright (c) 2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along | ||
17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/kernel.h> | ||
23 | |||
24 | #include <mach/dc.h> | ||
25 | #include <mach/fb.h> | ||
26 | |||
27 | #include "dc_reg.h" | ||
28 | #include "dc_priv.h" | ||
29 | #include "nvsd.h" | ||
30 | |||
31 | static ssize_t mode_show(struct device *device, | ||
32 | struct device_attribute *attr, char *buf) | ||
33 | { | ||
34 | struct nvhost_device *ndev = to_nvhost_device(device); | ||
35 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
36 | struct tegra_dc_mode *m; | ||
37 | ssize_t res; | ||
38 | |||
39 | mutex_lock(&dc->lock); | ||
40 | m = &dc->mode; | ||
41 | res = snprintf(buf, PAGE_SIZE, | ||
42 | "pclk: %d\n" | ||
43 | "h_ref_to_sync: %d\n" | ||
44 | "v_ref_to_sync: %d\n" | ||
45 | "h_sync_width: %d\n" | ||
46 | "v_sync_width: %d\n" | ||
47 | "h_back_porch: %d\n" | ||
48 | "v_back_porch: %d\n" | ||
49 | "h_active: %d\n" | ||
50 | "v_active: %d\n" | ||
51 | "h_front_porch: %d\n" | ||
52 | "v_front_porch: %d\n" | ||
53 | "stereo_mode: %d\n", | ||
54 | m->pclk, m->h_ref_to_sync, m->v_ref_to_sync, | ||
55 | m->h_sync_width, m->v_sync_width, | ||
56 | m->h_back_porch, m->v_back_porch, | ||
57 | m->h_active, m->v_active, | ||
58 | m->h_front_porch, m->v_front_porch, | ||
59 | m->stereo_mode); | ||
60 | mutex_unlock(&dc->lock); | ||
61 | |||
62 | return res; | ||
63 | } | ||
64 | |||
65 | static DEVICE_ATTR(mode, S_IRUGO, mode_show, NULL); | ||
66 | |||
67 | static ssize_t stats_enable_show(struct device *dev, | ||
68 | struct device_attribute *attr, char *buf) | ||
69 | { | ||
70 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
71 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
72 | bool enabled; | ||
73 | |||
74 | if (mutex_lock_killable(&dc->lock)) | ||
75 | return -EINTR; | ||
76 | enabled = tegra_dc_stats_get(dc); | ||
77 | mutex_unlock(&dc->lock); | ||
78 | |||
79 | return snprintf(buf, PAGE_SIZE, "%d", enabled); | ||
80 | } | ||
81 | |||
82 | static ssize_t stats_enable_store(struct device *dev, | ||
83 | struct device_attribute *attr, const char *buf, size_t count) | ||
84 | { | ||
85 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
86 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
87 | unsigned long val = 0; | ||
88 | |||
89 | if (strict_strtoul(buf, 10, &val) < 0) | ||
90 | return -EINVAL; | ||
91 | |||
92 | if (mutex_lock_killable(&dc->lock)) | ||
93 | return -EINTR; | ||
94 | tegra_dc_stats_enable(dc, !!val); | ||
95 | mutex_unlock(&dc->lock); | ||
96 | |||
97 | return count; | ||
98 | } | ||
99 | |||
100 | static DEVICE_ATTR(stats_enable, S_IRUGO|S_IWUSR, | ||
101 | stats_enable_show, stats_enable_store); | ||
102 | |||
103 | static ssize_t enable_show(struct device *device, | ||
104 | struct device_attribute *attr, char *buf) | ||
105 | { | ||
106 | struct nvhost_device *ndev = to_nvhost_device(device); | ||
107 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
108 | ssize_t res; | ||
109 | |||
110 | mutex_lock(&dc->lock); | ||
111 | res = snprintf(buf, PAGE_SIZE, "%d\n", dc->enabled); | ||
112 | mutex_unlock(&dc->lock); | ||
113 | return res; | ||
114 | } | ||
115 | |||
116 | static ssize_t enable_store(struct device *dev, | ||
117 | struct device_attribute *attr, const char *buf, size_t count) | ||
118 | { | ||
119 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
120 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
121 | unsigned long val = 0; | ||
122 | |||
123 | if (strict_strtoul(buf, 10, &val) < 0) | ||
124 | return -EINVAL; | ||
125 | |||
126 | if (val) { | ||
127 | tegra_dc_enable(dc); | ||
128 | } else { | ||
129 | tegra_dc_disable(dc); | ||
130 | } | ||
131 | |||
132 | return count; | ||
133 | } | ||
134 | |||
135 | static DEVICE_ATTR(enable, S_IRUGO|S_IWUSR, enable_show, enable_store); | ||
136 | |||
137 | static ssize_t crc_checksum_latched_show(struct device *device, | ||
138 | struct device_attribute *attr, char *buf) | ||
139 | { | ||
140 | struct nvhost_device *ndev = to_nvhost_device(device); | ||
141 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
142 | |||
143 | u32 crc; | ||
144 | |||
145 | if (!dc->enabled) { | ||
146 | dev_err(&dc->ndev->dev, "Failed to get dc.\n"); | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | |||
150 | crc = tegra_dc_read_checksum_latched(dc); | ||
151 | |||
152 | return snprintf(buf, PAGE_SIZE, "%u", crc); | ||
153 | } | ||
154 | |||
155 | static ssize_t crc_checksum_latched_store(struct device *dev, | ||
156 | struct device_attribute *attr, const char *buf, size_t count) | ||
157 | { | ||
158 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
159 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
160 | unsigned long val = 0; | ||
161 | |||
162 | if (!dc->enabled) { | ||
163 | dev_err(&dc->ndev->dev, "Failed to get dc.\n"); | ||
164 | return -EFAULT; | ||
165 | } | ||
166 | |||
167 | if (strict_strtoul(buf, 10, &val) < 0) | ||
168 | return -EINVAL; | ||
169 | |||
170 | if (val == 1) { | ||
171 | tegra_dc_enable_crc(dc); | ||
172 | dev_err(&dc->ndev->dev, "crc is enabled.\n"); | ||
173 | } else if (val == 0) { | ||
174 | tegra_dc_disable_crc(dc); | ||
175 | dev_err(&dc->ndev->dev, "crc is disabled.\n"); | ||
176 | } else | ||
177 | dev_err(&dc->ndev->dev, "Invalid input.\n"); | ||
178 | |||
179 | return count; | ||
180 | } | ||
181 | static DEVICE_ATTR(crc_checksum_latched, S_IRUGO|S_IWUSR, | ||
182 | crc_checksum_latched_show, crc_checksum_latched_store); | ||
183 | |||
184 | #define ORIENTATION_PORTRAIT "portrait" | ||
185 | #define ORIENTATION_LANDSCAPE "landscape" | ||
186 | |||
187 | static ssize_t orientation_3d_show(struct device *dev, | ||
188 | struct device_attribute *attr, char *buf) | ||
189 | { | ||
190 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
191 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
192 | struct tegra_dc_out *dc_out = dc->out; | ||
193 | const char *orientation; | ||
194 | switch (dc_out->stereo->orientation) { | ||
195 | case TEGRA_DC_STEREO_LANDSCAPE: | ||
196 | orientation = ORIENTATION_LANDSCAPE; | ||
197 | break; | ||
198 | case TEGRA_DC_STEREO_PORTRAIT: | ||
199 | orientation = ORIENTATION_PORTRAIT; | ||
200 | break; | ||
201 | default: | ||
202 | pr_err("Invalid value is stored for stereo_orientation.\n"); | ||
203 | return -EINVAL; | ||
204 | } | ||
205 | return snprintf(buf, PAGE_SIZE, "%s\n", orientation); | ||
206 | } | ||
207 | |||
208 | static ssize_t orientation_3d_store(struct device *dev, | ||
209 | struct device_attribute *attr, const char *buf, size_t cnt) | ||
210 | { | ||
211 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
212 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
213 | struct tegra_dc_out *dc_out = dc->out; | ||
214 | struct tegra_stereo_out *stereo = dc_out->stereo; | ||
215 | int orientation; | ||
216 | |||
217 | if (0 == strncmp(buf, ORIENTATION_PORTRAIT, | ||
218 | min(cnt, ARRAY_SIZE(ORIENTATION_PORTRAIT) - 1))) { | ||
219 | orientation = TEGRA_DC_STEREO_PORTRAIT; | ||
220 | } else if (0 == strncmp(buf, ORIENTATION_LANDSCAPE, | ||
221 | min(cnt, ARRAY_SIZE(ORIENTATION_LANDSCAPE) - 1))) { | ||
222 | orientation = TEGRA_DC_STEREO_LANDSCAPE; | ||
223 | } else { | ||
224 | pr_err("Invalid property value for stereo_orientation.\n"); | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | stereo->orientation = orientation; | ||
228 | stereo->set_orientation(orientation); | ||
229 | return cnt; | ||
230 | } | ||
231 | |||
232 | static DEVICE_ATTR(stereo_orientation, | ||
233 | S_IRUGO|S_IWUSR, orientation_3d_show, orientation_3d_store); | ||
234 | |||
235 | #define MODE_2D "2d" | ||
236 | #define MODE_3D "3d" | ||
237 | |||
238 | static ssize_t mode_3d_show(struct device *dev, | ||
239 | struct device_attribute *attr, char *buf) | ||
240 | { | ||
241 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
242 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
243 | struct tegra_dc_out *dc_out = dc->out; | ||
244 | const char *mode; | ||
245 | switch (dc_out->stereo->mode_2d_3d) { | ||
246 | case TEGRA_DC_STEREO_MODE_2D: | ||
247 | mode = MODE_2D; | ||
248 | break; | ||
249 | case TEGRA_DC_STEREO_MODE_3D: | ||
250 | mode = MODE_3D; | ||
251 | break; | ||
252 | default: | ||
253 | pr_err("Invalid value is stored for stereo_mode.\n"); | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | return snprintf(buf, PAGE_SIZE, "%s\n", mode); | ||
257 | } | ||
258 | |||
259 | static ssize_t mode_3d_store(struct device *dev, | ||
260 | struct device_attribute *attr, const char *buf, size_t cnt) | ||
261 | { | ||
262 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
263 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
264 | struct tegra_dc_out *dc_out = dc->out; | ||
265 | struct tegra_stereo_out *stereo = dc_out->stereo; | ||
266 | int mode; | ||
267 | |||
268 | if (0 == strncmp(buf, MODE_2D, min(cnt, ARRAY_SIZE(MODE_2D) - 1))) { | ||
269 | mode = TEGRA_DC_STEREO_MODE_2D; | ||
270 | } else if (0 == strncmp(buf, MODE_3D, | ||
271 | min(cnt, ARRAY_SIZE(MODE_3D) - 1))) { | ||
272 | mode = TEGRA_DC_STEREO_MODE_3D; | ||
273 | } else { | ||
274 | pr_err("Invalid property value for stereo_mode.\n"); | ||
275 | return -EINVAL; | ||
276 | } | ||
277 | stereo->mode_2d_3d = mode; | ||
278 | stereo->set_mode(mode); | ||
279 | return cnt; | ||
280 | } | ||
281 | |||
282 | static DEVICE_ATTR(stereo_mode, | ||
283 | S_IRUGO|S_IWUSR, mode_3d_show, mode_3d_store); | ||
284 | |||
285 | void __devexit tegra_dc_remove_sysfs(struct device *dev) | ||
286 | { | ||
287 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
288 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
289 | struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings; | ||
290 | |||
291 | device_remove_file(dev, &dev_attr_mode); | ||
292 | device_remove_file(dev, &dev_attr_enable); | ||
293 | device_remove_file(dev, &dev_attr_stats_enable); | ||
294 | device_remove_file(dev, &dev_attr_crc_checksum_latched); | ||
295 | |||
296 | if (dc->out->stereo) { | ||
297 | device_remove_file(dev, &dev_attr_stereo_orientation); | ||
298 | device_remove_file(dev, &dev_attr_stereo_mode); | ||
299 | } | ||
300 | |||
301 | if (sd_settings) | ||
302 | nvsd_remove_sysfs(dev); | ||
303 | } | ||
304 | |||
305 | void tegra_dc_create_sysfs(struct device *dev) | ||
306 | { | ||
307 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
308 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
309 | struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings; | ||
310 | int error = 0; | ||
311 | |||
312 | error |= device_create_file(dev, &dev_attr_mode); | ||
313 | error |= device_create_file(dev, &dev_attr_enable); | ||
314 | error |= device_create_file(dev, &dev_attr_stats_enable); | ||
315 | error |= device_create_file(dev, &dev_attr_crc_checksum_latched); | ||
316 | |||
317 | if (dc->out->stereo) { | ||
318 | error |= device_create_file(dev, &dev_attr_stereo_orientation); | ||
319 | error |= device_create_file(dev, &dev_attr_stereo_mode); | ||
320 | } | ||
321 | |||
322 | if (sd_settings) | ||
323 | error |= nvsd_create_sysfs(dev); | ||
324 | |||
325 | if (error) | ||
326 | dev_err(&ndev->dev, "Failed to create sysfs attributes!\n"); | ||
327 | } | ||
diff --git a/drivers/video/tegra/dc/dsi.c b/drivers/video/tegra/dc/dsi.c new file mode 100644 index 00000000000..c33d6e0a58b --- /dev/null +++ b/drivers/video/tegra/dc/dsi.c | |||
@@ -0,0 +1,3042 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dsi.c | ||
3 | * | ||
4 | * Copyright (c) 2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/clk.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/fb.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/workqueue.h> | ||
27 | |||
28 | #include <mach/clk.h> | ||
29 | #include <mach/dc.h> | ||
30 | #include <mach/fb.h> | ||
31 | #include <mach/csi.h> | ||
32 | #include <linux/nvhost.h> | ||
33 | |||
34 | #include "dc_reg.h" | ||
35 | #include "dc_priv.h" | ||
36 | #include "dsi_regs.h" | ||
37 | #include "dsi.h" | ||
38 | |||
39 | #define DSI_USE_SYNC_POINTS 1 | ||
40 | #define S_TO_MS(x) (1000 * (x)) | ||
41 | |||
42 | #define DSI_MODULE_NOT_INIT 0x0 | ||
43 | #define DSI_MODULE_INIT 0x1 | ||
44 | |||
45 | #define DSI_LPHS_NOT_INIT 0x0 | ||
46 | #define DSI_LPHS_IN_LP_MODE 0x1 | ||
47 | #define DSI_LPHS_IN_HS_MODE 0x2 | ||
48 | |||
49 | #define DSI_VIDEO_TYPE_NOT_INIT 0x0 | ||
50 | #define DSI_VIDEO_TYPE_VIDEO_MODE 0x1 | ||
51 | #define DSI_VIDEO_TYPE_CMD_MODE 0x2 | ||
52 | |||
53 | #define DSI_DRIVEN_MODE_NOT_INIT 0x0 | ||
54 | #define DSI_DRIVEN_MODE_DC 0x1 | ||
55 | #define DSI_DRIVEN_MODE_HOST 0x2 | ||
56 | |||
57 | #define DSI_PHYCLK_OUT_DIS 0x0 | ||
58 | #define DSI_PHYCLK_OUT_EN 0x1 | ||
59 | |||
60 | #define DSI_PHYCLK_NOT_INIT 0x0 | ||
61 | #define DSI_PHYCLK_CONTINUOUS 0x1 | ||
62 | #define DSI_PHYCLK_TX_ONLY 0x2 | ||
63 | |||
64 | #define DSI_CLK_BURST_NOT_INIT 0x0 | ||
65 | #define DSI_CLK_BURST_NONE_BURST 0x1 | ||
66 | #define DSI_CLK_BURST_BURST_MODE 0x2 | ||
67 | |||
68 | #define DSI_DC_STREAM_DISABLE 0x0 | ||
69 | #define DSI_DC_STREAM_ENABLE 0x1 | ||
70 | |||
71 | #define DSI_LP_OP_NOT_INIT 0x0 | ||
72 | #define DSI_LP_OP_WRITE 0x1 | ||
73 | #define DSI_LP_OP_READ 0x2 | ||
74 | |||
75 | static bool enable_read_debug; | ||
76 | module_param(enable_read_debug, bool, 0644); | ||
77 | MODULE_PARM_DESC(enable_read_debug, | ||
78 | "Enable to print read fifo and return packet type"); | ||
79 | |||
80 | struct dsi_status { | ||
81 | unsigned init:2; | ||
82 | |||
83 | unsigned lphs:2; | ||
84 | |||
85 | unsigned vtype:2; | ||
86 | unsigned driven:2; | ||
87 | |||
88 | unsigned clk_out:2; | ||
89 | unsigned clk_mode:2; | ||
90 | unsigned clk_burst:2; | ||
91 | |||
92 | unsigned lp_op:2; | ||
93 | |||
94 | unsigned dc_stream:1; | ||
95 | }; | ||
96 | |||
97 | /* source of video data */ | ||
98 | enum { | ||
99 | TEGRA_DSI_DRIVEN_BY_DC, | ||
100 | TEGRA_DSI_DRIVEN_BY_HOST, | ||
101 | }; | ||
102 | |||
103 | struct tegra_dc_dsi_data { | ||
104 | struct tegra_dc *dc; | ||
105 | void __iomem *base; | ||
106 | struct resource *base_res; | ||
107 | |||
108 | struct clk *dc_clk; | ||
109 | struct clk *dsi_clk; | ||
110 | bool clk_ref; | ||
111 | |||
112 | struct mutex lock; | ||
113 | |||
114 | /* data from board info */ | ||
115 | struct tegra_dsi_out info; | ||
116 | |||
117 | struct dsi_status status; | ||
118 | |||
119 | struct dsi_phy_timing_inclk phy_timing; | ||
120 | |||
121 | u8 driven_mode; | ||
122 | u8 controller_index; | ||
123 | |||
124 | u8 pixel_scaler_mul; | ||
125 | u8 pixel_scaler_div; | ||
126 | |||
127 | u32 default_shift_clk_div; | ||
128 | u32 default_pixel_clk_khz; | ||
129 | u32 default_hs_clk_khz; | ||
130 | |||
131 | u32 shift_clk_div; | ||
132 | u32 target_hs_clk_khz; | ||
133 | u32 target_lp_clk_khz; | ||
134 | |||
135 | u32 syncpt_id; | ||
136 | u32 syncpt_val; | ||
137 | |||
138 | u16 current_bit_clk_ns; | ||
139 | u32 current_dsi_clk_khz; | ||
140 | |||
141 | u32 dsi_control_val; | ||
142 | |||
143 | bool ulpm; | ||
144 | bool enabled; | ||
145 | }; | ||
146 | |||
147 | const u32 dsi_pkt_seq_reg[NUMOF_PKT_SEQ] = { | ||
148 | DSI_PKT_SEQ_0_LO, | ||
149 | DSI_PKT_SEQ_0_HI, | ||
150 | DSI_PKT_SEQ_1_LO, | ||
151 | DSI_PKT_SEQ_1_HI, | ||
152 | DSI_PKT_SEQ_2_LO, | ||
153 | DSI_PKT_SEQ_2_HI, | ||
154 | DSI_PKT_SEQ_3_LO, | ||
155 | DSI_PKT_SEQ_3_HI, | ||
156 | DSI_PKT_SEQ_4_LO, | ||
157 | DSI_PKT_SEQ_4_HI, | ||
158 | DSI_PKT_SEQ_5_LO, | ||
159 | DSI_PKT_SEQ_5_HI, | ||
160 | }; | ||
161 | |||
162 | const u32 dsi_pkt_seq_video_non_burst_syne[NUMOF_PKT_SEQ] = { | ||
163 | PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
164 | 0, | ||
165 | PKT_ID0(CMD_VE) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
166 | 0, | ||
167 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
168 | 0, | ||
169 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) | | ||
170 | PKT_ID2(CMD_HE) | PKT_LEN2(0), | ||
171 | PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB) | PKT_LEN4(3) | | ||
172 | PKT_ID5(CMD_BLNK) | PKT_LEN5(4), | ||
173 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
174 | 0, | ||
175 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) | | ||
176 | PKT_ID2(CMD_HE) | PKT_LEN2(0), | ||
177 | PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB) | PKT_LEN4(3) | | ||
178 | PKT_ID5(CMD_BLNK) | PKT_LEN5(4), | ||
179 | }; | ||
180 | |||
181 | const u32 dsi_pkt_seq_video_non_burst[NUMOF_PKT_SEQ] = { | ||
182 | PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
183 | 0, | ||
184 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
185 | 0, | ||
186 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
187 | 0, | ||
188 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2) | | ||
189 | PKT_ID2(CMD_RGB) | PKT_LEN2(3), | ||
190 | PKT_ID3(CMD_BLNK) | PKT_LEN3(4), | ||
191 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
192 | 0, | ||
193 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2) | | ||
194 | PKT_ID2(CMD_RGB) | PKT_LEN2(3), | ||
195 | PKT_ID3(CMD_BLNK) | PKT_LEN3(4), | ||
196 | }; | ||
197 | |||
198 | static const u32 dsi_pkt_seq_video_burst[NUMOF_PKT_SEQ] = { | ||
199 | PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP, | ||
200 | 0, | ||
201 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP, | ||
202 | 0, | ||
203 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP, | ||
204 | 0, | ||
205 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)| | ||
206 | PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP, | ||
207 | PKT_ID0(CMD_EOT) | PKT_LEN0(7), | ||
208 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(7) | PKT_LP, | ||
209 | 0, | ||
210 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)| | ||
211 | PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP, | ||
212 | PKT_ID0(CMD_EOT) | PKT_LEN0(7), | ||
213 | }; | ||
214 | |||
215 | static const u32 dsi_pkt_seq_video_burst_no_eot[NUMOF_PKT_SEQ] = { | ||
216 | PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
217 | 0, | ||
218 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
219 | 0, | ||
220 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
221 | 0, | ||
222 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)| | ||
223 | PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP, | ||
224 | PKT_ID0(CMD_EOT) | PKT_LEN0(0), | ||
225 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_EOT) | PKT_LEN1(0) | PKT_LP, | ||
226 | 0, | ||
227 | PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(2)| | ||
228 | PKT_ID2(CMD_RGB) | PKT_LEN2(3) | PKT_LP, | ||
229 | PKT_ID0(CMD_EOT) | PKT_LEN0(0), | ||
230 | }; | ||
231 | |||
232 | /* TODO: verify with hw about this format */ | ||
233 | const u32 dsi_pkt_seq_cmd_mode[NUMOF_PKT_SEQ] = { | ||
234 | 0, | ||
235 | 0, | ||
236 | 0, | ||
237 | 0, | ||
238 | 0, | ||
239 | 0, | ||
240 | PKT_ID0(CMD_LONGW) | PKT_LEN0(3) | PKT_ID1(CMD_EOT) | PKT_LEN1(7), | ||
241 | 0, | ||
242 | 0, | ||
243 | 0, | ||
244 | PKT_ID0(CMD_LONGW) | PKT_LEN0(3) | PKT_ID1(CMD_EOT) | PKT_LEN1(7), | ||
245 | 0, | ||
246 | }; | ||
247 | |||
248 | const u32 init_reg[] = { | ||
249 | DSI_INT_ENABLE, | ||
250 | DSI_INT_STATUS, | ||
251 | DSI_INT_MASK, | ||
252 | DSI_INIT_SEQ_DATA_0, | ||
253 | DSI_INIT_SEQ_DATA_1, | ||
254 | DSI_INIT_SEQ_DATA_2, | ||
255 | DSI_INIT_SEQ_DATA_3, | ||
256 | DSI_INIT_SEQ_DATA_4, | ||
257 | DSI_INIT_SEQ_DATA_5, | ||
258 | DSI_INIT_SEQ_DATA_6, | ||
259 | DSI_INIT_SEQ_DATA_7, | ||
260 | DSI_DCS_CMDS, | ||
261 | DSI_PKT_SEQ_0_LO, | ||
262 | DSI_PKT_SEQ_1_LO, | ||
263 | DSI_PKT_SEQ_2_LO, | ||
264 | DSI_PKT_SEQ_3_LO, | ||
265 | DSI_PKT_SEQ_4_LO, | ||
266 | DSI_PKT_SEQ_5_LO, | ||
267 | DSI_PKT_SEQ_0_HI, | ||
268 | DSI_PKT_SEQ_1_HI, | ||
269 | DSI_PKT_SEQ_2_HI, | ||
270 | DSI_PKT_SEQ_3_HI, | ||
271 | DSI_PKT_SEQ_4_HI, | ||
272 | DSI_PKT_SEQ_5_HI, | ||
273 | DSI_CONTROL, | ||
274 | DSI_HOST_DSI_CONTROL, | ||
275 | DSI_PAD_CONTROL, | ||
276 | DSI_PAD_CONTROL_CD, | ||
277 | DSI_SOL_DELAY, | ||
278 | DSI_MAX_THRESHOLD, | ||
279 | DSI_TRIGGER, | ||
280 | DSI_TX_CRC, | ||
281 | DSI_INIT_SEQ_CONTROL, | ||
282 | DSI_PKT_LEN_0_1, | ||
283 | DSI_PKT_LEN_2_3, | ||
284 | DSI_PKT_LEN_4_5, | ||
285 | DSI_PKT_LEN_6_7, | ||
286 | }; | ||
287 | |||
288 | inline unsigned long tegra_dsi_readl(struct tegra_dc_dsi_data *dsi, u32 reg) | ||
289 | { | ||
290 | BUG_ON(!nvhost_module_powered(nvhost_get_host(dsi->dc->ndev)->dev)); | ||
291 | return readl(dsi->base + reg * 4); | ||
292 | } | ||
293 | EXPORT_SYMBOL(tegra_dsi_readl); | ||
294 | |||
295 | inline void tegra_dsi_writel(struct tegra_dc_dsi_data *dsi, u32 val, u32 reg) | ||
296 | { | ||
297 | BUG_ON(!nvhost_module_powered(nvhost_get_host(dsi->dc->ndev)->dev)); | ||
298 | writel(val, dsi->base + reg * 4); | ||
299 | } | ||
300 | EXPORT_SYMBOL(tegra_dsi_writel); | ||
301 | |||
302 | static int tegra_dsi_syncpt(struct tegra_dc_dsi_data *dsi) | ||
303 | { | ||
304 | u32 val; | ||
305 | int ret; | ||
306 | |||
307 | ret = 0; | ||
308 | |||
309 | dsi->syncpt_val = nvhost_syncpt_read( | ||
310 | &nvhost_get_host(dsi->dc->ndev)->syncpt, | ||
311 | dsi->syncpt_id); | ||
312 | |||
313 | val = DSI_INCR_SYNCPT_COND(OP_DONE) | | ||
314 | DSI_INCR_SYNCPT_INDX(dsi->syncpt_id); | ||
315 | tegra_dsi_writel(dsi, val, DSI_INCR_SYNCPT); | ||
316 | |||
317 | /* TODO: Use interrupt rather than polling */ | ||
318 | ret = nvhost_syncpt_wait(&nvhost_get_host(dsi->dc->ndev)->syncpt, | ||
319 | dsi->syncpt_id, dsi->syncpt_val + 1); | ||
320 | if (ret < 0) { | ||
321 | dev_err(&dsi->dc->ndev->dev, "DSI sync point failure\n"); | ||
322 | goto fail; | ||
323 | } | ||
324 | |||
325 | (dsi->syncpt_val)++; | ||
326 | return 0; | ||
327 | fail: | ||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | static u32 tegra_dsi_get_hs_clk_rate(struct tegra_dc_dsi_data *dsi) | ||
332 | { | ||
333 | u32 dsi_clock_rate_khz; | ||
334 | |||
335 | switch (dsi->info.video_burst_mode) { | ||
336 | case TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED: | ||
337 | case TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED: | ||
338 | case TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED: | ||
339 | case TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED: | ||
340 | /* Calculate DSI HS clock rate for DSI burst mode */ | ||
341 | dsi_clock_rate_khz = dsi->default_pixel_clk_khz * | ||
342 | dsi->shift_clk_div; | ||
343 | break; | ||
344 | case TEGRA_DSI_VIDEO_NONE_BURST_MODE: | ||
345 | case TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END: | ||
346 | case TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED: | ||
347 | default: | ||
348 | /* Clock rate is default DSI clock rate for non-burst mode */ | ||
349 | dsi_clock_rate_khz = dsi->default_hs_clk_khz; | ||
350 | break; | ||
351 | } | ||
352 | |||
353 | return dsi_clock_rate_khz; | ||
354 | } | ||
355 | |||
356 | static u32 tegra_dsi_get_lp_clk_rate(struct tegra_dc_dsi_data *dsi, u8 lp_op) | ||
357 | { | ||
358 | u32 dsi_clock_rate_khz; | ||
359 | |||
360 | if (dsi->info.enable_hs_clock_on_lp_cmd_mode) | ||
361 | if (dsi->info.hs_clk_in_lp_cmd_mode_freq_khz) | ||
362 | dsi_clock_rate_khz = | ||
363 | dsi->info.hs_clk_in_lp_cmd_mode_freq_khz; | ||
364 | else | ||
365 | dsi_clock_rate_khz = tegra_dsi_get_hs_clk_rate(dsi); | ||
366 | else | ||
367 | if (lp_op == DSI_LP_OP_READ) | ||
368 | dsi_clock_rate_khz = | ||
369 | dsi->info.lp_read_cmd_mode_freq_khz; | ||
370 | else | ||
371 | dsi_clock_rate_khz = | ||
372 | dsi->info.lp_cmd_mode_freq_khz; | ||
373 | |||
374 | return dsi_clock_rate_khz; | ||
375 | } | ||
376 | |||
377 | static u32 tegra_dsi_get_shift_clk_div(struct tegra_dc_dsi_data *dsi) | ||
378 | { | ||
379 | u32 shift_clk_div; | ||
380 | u32 max_shift_clk_div; | ||
381 | u32 burst_width; | ||
382 | u32 burst_width_max; | ||
383 | |||
384 | /* Get the real value of default shift_clk_div. default_shift_clk_div | ||
385 | * holds the real value of shift_clk_div. | ||
386 | */ | ||
387 | shift_clk_div = dsi->default_shift_clk_div; | ||
388 | |||
389 | /* Calculate shift_clk_div which can matche the video_burst_mode. */ | ||
390 | if (dsi->info.video_burst_mode >= | ||
391 | TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED) { | ||
392 | /* The max_shift_clk_div is multiplied by 10 to save the | ||
393 | * fraction | ||
394 | */ | ||
395 | if (dsi->info.max_panel_freq_khz >= dsi->default_hs_clk_khz) | ||
396 | max_shift_clk_div = dsi->info.max_panel_freq_khz | ||
397 | * shift_clk_div * 10 / dsi->default_hs_clk_khz; | ||
398 | else | ||
399 | max_shift_clk_div = shift_clk_div * 10; | ||
400 | |||
401 | burst_width = dsi->info.video_burst_mode | ||
402 | - TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED; | ||
403 | burst_width_max = TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED | ||
404 | - TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED; | ||
405 | |||
406 | shift_clk_div = (max_shift_clk_div - shift_clk_div * 10) * | ||
407 | burst_width / (burst_width_max * 10) + shift_clk_div; | ||
408 | } | ||
409 | |||
410 | return shift_clk_div; | ||
411 | } | ||
412 | |||
413 | static void tegra_dsi_init_sw(struct tegra_dc *dc, | ||
414 | struct tegra_dc_dsi_data *dsi) | ||
415 | { | ||
416 | u32 h_width_pixels; | ||
417 | u32 v_width_lines; | ||
418 | u32 pixel_clk_hz; | ||
419 | u32 byte_clk_hz; | ||
420 | u32 plld_clk_mhz; | ||
421 | |||
422 | switch (dsi->info.pixel_format) { | ||
423 | case TEGRA_DSI_PIXEL_FORMAT_16BIT_P: | ||
424 | /* 2 bytes per pixel */ | ||
425 | dsi->pixel_scaler_mul = 2; | ||
426 | dsi->pixel_scaler_div = 1; | ||
427 | break; | ||
428 | case TEGRA_DSI_PIXEL_FORMAT_18BIT_P: | ||
429 | /* 2.25 bytes per pixel */ | ||
430 | dsi->pixel_scaler_mul = 9; | ||
431 | dsi->pixel_scaler_div = 4; | ||
432 | break; | ||
433 | case TEGRA_DSI_PIXEL_FORMAT_18BIT_NP: | ||
434 | case TEGRA_DSI_PIXEL_FORMAT_24BIT_P: | ||
435 | /* 3 bytes per pixel */ | ||
436 | dsi->pixel_scaler_mul = 3; | ||
437 | dsi->pixel_scaler_div = 1; | ||
438 | break; | ||
439 | default: | ||
440 | break; | ||
441 | } | ||
442 | |||
443 | dsi->controller_index = dc->ndev->id; | ||
444 | dsi->ulpm = false; | ||
445 | dsi->enabled = false; | ||
446 | dsi->clk_ref = false; | ||
447 | |||
448 | dsi->dsi_control_val = | ||
449 | DSI_CONTROL_VIRTUAL_CHANNEL(dsi->info.virtual_channel) | | ||
450 | DSI_CONTROL_NUM_DATA_LANES(dsi->info.n_data_lanes - 1) | | ||
451 | DSI_CONTROL_VID_SOURCE(dsi->controller_index) | | ||
452 | DSI_CONTROL_DATA_FORMAT(dsi->info.pixel_format); | ||
453 | |||
454 | /* Below we are going to calculate dsi and dc clock rate. | ||
455 | * Calcuate the horizontal and vertical width. | ||
456 | */ | ||
457 | h_width_pixels = dc->mode.h_back_porch + dc->mode.h_front_porch + | ||
458 | dc->mode.h_sync_width + dc->mode.h_active; | ||
459 | v_width_lines = dc->mode.v_back_porch + dc->mode.v_front_porch + | ||
460 | dc->mode.v_sync_width + dc->mode.v_active; | ||
461 | |||
462 | /* Calculate minimum required pixel rate. */ | ||
463 | pixel_clk_hz = h_width_pixels * v_width_lines * dsi->info.refresh_rate; | ||
464 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) { | ||
465 | if (dsi->info.rated_refresh_rate >= dsi->info.refresh_rate) | ||
466 | dev_info(&dc->ndev->dev, "DSI: measured refresh rate " | ||
467 | "should be larger than rated refresh rate.\n"); | ||
468 | dc->mode.rated_pclk = h_width_pixels * v_width_lines * | ||
469 | dsi->info.rated_refresh_rate; | ||
470 | } | ||
471 | |||
472 | /* Calculate minimum byte rate on DSI interface. */ | ||
473 | byte_clk_hz = (pixel_clk_hz * dsi->pixel_scaler_mul) / | ||
474 | (dsi->pixel_scaler_div * dsi->info.n_data_lanes); | ||
475 | |||
476 | /* Round up to multiple of mega hz. */ | ||
477 | plld_clk_mhz = DIV_ROUND_UP((byte_clk_hz * NUMOF_BIT_PER_BYTE), | ||
478 | 1000000); | ||
479 | |||
480 | /* Calculate default real shift_clk_div. */ | ||
481 | dsi->default_shift_clk_div = (NUMOF_BIT_PER_BYTE / 2) * | ||
482 | dsi->pixel_scaler_mul / (dsi->pixel_scaler_div * | ||
483 | dsi->info.n_data_lanes); | ||
484 | /* Calculate default DSI hs clock. DSI interface is double data rate. | ||
485 | * Data is transferred on both rising and falling edge of clk, div by 2 | ||
486 | * to get the actual clock rate. | ||
487 | */ | ||
488 | dsi->default_hs_clk_khz = plld_clk_mhz * 1000 / 2; | ||
489 | dsi->default_pixel_clk_khz = plld_clk_mhz * 1000 / 2 | ||
490 | / dsi->default_shift_clk_div; | ||
491 | |||
492 | /* Get the actual shift_clk_div and clock rates. */ | ||
493 | dsi->shift_clk_div = tegra_dsi_get_shift_clk_div(dsi); | ||
494 | dsi->target_lp_clk_khz = | ||
495 | tegra_dsi_get_lp_clk_rate(dsi, DSI_LP_OP_WRITE); | ||
496 | dsi->target_hs_clk_khz = tegra_dsi_get_hs_clk_rate(dsi); | ||
497 | |||
498 | dev_info(&dc->ndev->dev, "DSI: HS clock rate is %d\n", | ||
499 | dsi->target_hs_clk_khz); | ||
500 | |||
501 | dsi->controller_index = dc->ndev->id; | ||
502 | |||
503 | #if DSI_USE_SYNC_POINTS | ||
504 | dsi->syncpt_id = NVSYNCPT_DSI; | ||
505 | #endif | ||
506 | |||
507 | /* | ||
508 | * Force video clock to be continuous mode if | ||
509 | * enable_hs_clock_on_lp_cmd_mode is set | ||
510 | */ | ||
511 | if (dsi->info.enable_hs_clock_on_lp_cmd_mode) { | ||
512 | if (dsi->info.video_clock_mode != | ||
513 | TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS) | ||
514 | dev_warn(&dc->ndev->dev, | ||
515 | "Force clock continuous mode\n"); | ||
516 | |||
517 | dsi->info.video_clock_mode = TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS; | ||
518 | } | ||
519 | |||
520 | } | ||
521 | |||
522 | #define SELECT_T_PHY(platform_t_phy_ns, default_phy, clk_ns, hw_inc) ( \ | ||
523 | (platform_t_phy_ns) ? ( \ | ||
524 | ((DSI_CONVERT_T_PHY_NS_TO_T_PHY(platform_t_phy_ns, clk_ns, hw_inc)) < 0 ? 0 : \ | ||
525 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY(platform_t_phy_ns, clk_ns, hw_inc)))) : \ | ||
526 | ((default_phy) < 0 ? 0 : (default_phy))) | ||
527 | |||
528 | static void tegra_dsi_get_clk_phy_timing(struct tegra_dc_dsi_data *dsi, | ||
529 | struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns) | ||
530 | { | ||
531 | phy_timing_clk->t_tlpx = SELECT_T_PHY( | ||
532 | dsi->info.phy_timing.t_tlpx_ns, | ||
533 | T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC); | ||
534 | |||
535 | phy_timing_clk->t_clktrail = SELECT_T_PHY( | ||
536 | dsi->info.phy_timing.t_clktrail_ns, | ||
537 | T_CLKTRAIL_DEFAULT(clk_ns), clk_ns, T_CLKTRAIL_HW_INC); | ||
538 | |||
539 | phy_timing_clk->t_clkpost = SELECT_T_PHY( | ||
540 | dsi->info.phy_timing.t_clkpost_ns, | ||
541 | T_CLKPOST_DEFAULT(clk_ns), clk_ns, T_CLKPOST_HW_INC); | ||
542 | |||
543 | phy_timing_clk->t_clkzero = SELECT_T_PHY( | ||
544 | dsi->info.phy_timing.t_clkzero_ns, | ||
545 | T_CLKZERO_DEFAULT(clk_ns), clk_ns, T_CLKZERO_HW_INC); | ||
546 | |||
547 | phy_timing_clk->t_clkprepare = SELECT_T_PHY( | ||
548 | dsi->info.phy_timing.t_clkprepare_ns, | ||
549 | T_CLKPREPARE_DEFAULT(clk_ns), clk_ns, T_CLKPREPARE_HW_INC); | ||
550 | |||
551 | phy_timing_clk->t_clkpre = SELECT_T_PHY( | ||
552 | dsi->info.phy_timing.t_clkpre_ns, | ||
553 | T_CLKPRE_DEFAULT, clk_ns, T_CLKPRE_HW_INC); | ||
554 | } | ||
555 | |||
556 | static void tegra_dsi_get_hs_phy_timing(struct tegra_dc_dsi_data *dsi, | ||
557 | struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns) | ||
558 | { | ||
559 | phy_timing_clk->t_tlpx = SELECT_T_PHY( | ||
560 | dsi->info.phy_timing.t_tlpx_ns, | ||
561 | T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC); | ||
562 | |||
563 | phy_timing_clk->t_hsdexit = SELECT_T_PHY( | ||
564 | dsi->info.phy_timing.t_hsdexit_ns, | ||
565 | T_HSEXIT_DEFAULT(clk_ns), clk_ns, T_HSEXIT_HW_INC); | ||
566 | |||
567 | phy_timing_clk->t_hstrail = SELECT_T_PHY( | ||
568 | dsi->info.phy_timing.t_hstrail_ns, | ||
569 | T_HSTRAIL_DEFAULT(clk_ns), clk_ns, T_HSTRAIL_HW_INC); | ||
570 | |||
571 | phy_timing_clk->t_datzero = SELECT_T_PHY( | ||
572 | dsi->info.phy_timing.t_datzero_ns, | ||
573 | T_DATZERO_DEFAULT(clk_ns), clk_ns, T_DATZERO_HW_INC); | ||
574 | |||
575 | phy_timing_clk->t_hsprepare = SELECT_T_PHY( | ||
576 | dsi->info.phy_timing.t_hsprepare_ns, | ||
577 | T_HSPREPARE_DEFAULT(clk_ns), clk_ns, T_HSPREPARE_HW_INC); | ||
578 | } | ||
579 | |||
580 | static void tegra_dsi_get_escape_phy_timing(struct tegra_dc_dsi_data *dsi, | ||
581 | struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns) | ||
582 | { | ||
583 | phy_timing_clk->t_tlpx = SELECT_T_PHY( | ||
584 | dsi->info.phy_timing.t_tlpx_ns, | ||
585 | T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC); | ||
586 | } | ||
587 | |||
588 | static void tegra_dsi_get_bta_phy_timing(struct tegra_dc_dsi_data *dsi, | ||
589 | struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns) | ||
590 | { | ||
591 | phy_timing_clk->t_tlpx = SELECT_T_PHY( | ||
592 | dsi->info.phy_timing.t_tlpx_ns, | ||
593 | T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC); | ||
594 | |||
595 | phy_timing_clk->t_taget = SELECT_T_PHY( | ||
596 | dsi->info.phy_timing.t_taget_ns, | ||
597 | T_TAGET_DEFAULT(clk_ns), clk_ns, T_TAGET_HW_INC); | ||
598 | |||
599 | phy_timing_clk->t_tasure = SELECT_T_PHY( | ||
600 | dsi->info.phy_timing.t_tasure_ns, | ||
601 | T_TASURE_DEFAULT(clk_ns), clk_ns, T_TASURE_HW_INC); | ||
602 | |||
603 | phy_timing_clk->t_tago = SELECT_T_PHY( | ||
604 | dsi->info.phy_timing.t_tago_ns, | ||
605 | T_TAGO_DEFAULT(clk_ns), clk_ns, T_TAGO_HW_INC); | ||
606 | } | ||
607 | |||
608 | static void tegra_dsi_get_ulps_phy_timing(struct tegra_dc_dsi_data *dsi, | ||
609 | struct dsi_phy_timing_inclk *phy_timing_clk, u32 clk_ns) | ||
610 | { | ||
611 | phy_timing_clk->t_tlpx = SELECT_T_PHY( | ||
612 | dsi->info.phy_timing.t_tlpx_ns, | ||
613 | T_TLPX_DEFAULT(clk_ns), clk_ns, T_TLPX_HW_INC); | ||
614 | |||
615 | phy_timing_clk->t_wakeup = SELECT_T_PHY( | ||
616 | dsi->info.phy_timing.t_wakeup_ns, | ||
617 | T_WAKEUP_DEFAULT, clk_ns, T_WAKEUP_HW_INC); | ||
618 | } | ||
619 | |||
620 | #undef SELECT_T_PHY | ||
621 | |||
622 | static void tegra_dsi_get_phy_timing(struct tegra_dc_dsi_data *dsi, | ||
623 | struct dsi_phy_timing_inclk *phy_timing_clk, | ||
624 | u32 clk_ns, u8 lphs) | ||
625 | { | ||
626 | if (lphs == DSI_LPHS_IN_HS_MODE) { | ||
627 | tegra_dsi_get_clk_phy_timing(dsi, phy_timing_clk, clk_ns); | ||
628 | tegra_dsi_get_hs_phy_timing(dsi, phy_timing_clk, clk_ns); | ||
629 | } else { | ||
630 | /* default is LP mode */ | ||
631 | tegra_dsi_get_escape_phy_timing(dsi, phy_timing_clk, clk_ns); | ||
632 | tegra_dsi_get_bta_phy_timing(dsi, phy_timing_clk, clk_ns); | ||
633 | tegra_dsi_get_ulps_phy_timing(dsi, phy_timing_clk, clk_ns); | ||
634 | if (dsi->info.enable_hs_clock_on_lp_cmd_mode) | ||
635 | tegra_dsi_get_clk_phy_timing | ||
636 | (dsi, phy_timing_clk, clk_ns); | ||
637 | } | ||
638 | } | ||
639 | |||
640 | static int tegra_dsi_mipi_phy_timing_range(struct tegra_dc_dsi_data *dsi, | ||
641 | struct dsi_phy_timing_inclk *phy_timing, | ||
642 | u32 clk_ns, u8 lphs) | ||
643 | { | ||
644 | #define CHECK_RANGE(val, min, max) ( \ | ||
645 | ((min) == NOT_DEFINED ? 0 : (val) < (min)) || \ | ||
646 | ((max) == NOT_DEFINED ? 0 : (val) > (max)) ? -EINVAL : 0) | ||
647 | |||
648 | int err = 0; | ||
649 | |||
650 | err = CHECK_RANGE( | ||
651 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
652 | phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC), | ||
653 | MIPI_T_TLPX_NS_MIN, MIPI_T_TLPX_NS_MAX); | ||
654 | if (err < 0) { | ||
655 | dev_warn(&dsi->dc->ndev->dev, | ||
656 | "dsi: Tlpx mipi range violated\n"); | ||
657 | goto fail; | ||
658 | } | ||
659 | |||
660 | if (lphs == DSI_LPHS_IN_HS_MODE) { | ||
661 | err = CHECK_RANGE( | ||
662 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
663 | phy_timing->t_hsdexit, clk_ns, T_HSEXIT_HW_INC), | ||
664 | MIPI_T_HSEXIT_NS_MIN, MIPI_T_HSEXIT_NS_MAX); | ||
665 | if (err < 0) { | ||
666 | dev_warn(&dsi->dc->ndev->dev, | ||
667 | "dsi: HsExit mipi range violated\n"); | ||
668 | goto fail; | ||
669 | } | ||
670 | |||
671 | err = CHECK_RANGE( | ||
672 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
673 | phy_timing->t_hstrail, clk_ns, T_HSTRAIL_HW_INC), | ||
674 | MIPI_T_HSTRAIL_NS_MIN(clk_ns), MIPI_T_HSTRAIL_NS_MAX); | ||
675 | if (err < 0) { | ||
676 | dev_warn(&dsi->dc->ndev->dev, | ||
677 | "dsi: HsTrail mipi range violated\n"); | ||
678 | goto fail; | ||
679 | } | ||
680 | |||
681 | err = CHECK_RANGE( | ||
682 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
683 | phy_timing->t_datzero, clk_ns, T_DATZERO_HW_INC), | ||
684 | MIPI_T_HSZERO_NS_MIN, MIPI_T_HSZERO_NS_MAX); | ||
685 | if (err < 0) { | ||
686 | dev_warn(&dsi->dc->ndev->dev, | ||
687 | "dsi: HsZero mipi range violated\n"); | ||
688 | goto fail; | ||
689 | } | ||
690 | |||
691 | err = CHECK_RANGE( | ||
692 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
693 | phy_timing->t_hsprepare, clk_ns, T_HSPREPARE_HW_INC), | ||
694 | MIPI_T_HSPREPARE_NS_MIN(clk_ns), | ||
695 | MIPI_T_HSPREPARE_NS_MAX(clk_ns)); | ||
696 | if (err < 0) { | ||
697 | dev_warn(&dsi->dc->ndev->dev, | ||
698 | "dsi: HsPrepare mipi range violated\n"); | ||
699 | goto fail; | ||
700 | } | ||
701 | |||
702 | err = CHECK_RANGE( | ||
703 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
704 | phy_timing->t_hsprepare, clk_ns, T_HSPREPARE_HW_INC) + | ||
705 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
706 | phy_timing->t_datzero, clk_ns, T_DATZERO_HW_INC), | ||
707 | MIPI_T_HSPREPARE_ADD_HSZERO_NS_MIN(clk_ns), | ||
708 | MIPI_T_HSPREPARE_ADD_HSZERO_NS_MAX); | ||
709 | if (err < 0) { | ||
710 | dev_warn(&dsi->dc->ndev->dev, | ||
711 | "dsi: HsPrepare + HsZero mipi range violated\n"); | ||
712 | goto fail; | ||
713 | } | ||
714 | } else { | ||
715 | /* default is LP mode */ | ||
716 | err = CHECK_RANGE( | ||
717 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
718 | phy_timing->t_wakeup, clk_ns, T_WAKEUP_HW_INC), | ||
719 | MIPI_T_WAKEUP_NS_MIN, MIPI_T_WAKEUP_NS_MAX); | ||
720 | if (err < 0) { | ||
721 | dev_warn(&dsi->dc->ndev->dev, | ||
722 | "dsi: WakeUp mipi range violated\n"); | ||
723 | goto fail; | ||
724 | } | ||
725 | |||
726 | err = CHECK_RANGE( | ||
727 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
728 | phy_timing->t_tasure, clk_ns, T_TASURE_HW_INC), | ||
729 | MIPI_T_TASURE_NS_MIN(DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
730 | phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC)), | ||
731 | MIPI_T_TASURE_NS_MAX(DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
732 | phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC))); | ||
733 | if (err < 0) { | ||
734 | dev_warn(&dsi->dc->ndev->dev, | ||
735 | "dsi: TaSure mipi range violated\n"); | ||
736 | goto fail; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | if (lphs == DSI_LPHS_IN_HS_MODE || | ||
741 | dsi->info.enable_hs_clock_on_lp_cmd_mode) { | ||
742 | err = CHECK_RANGE( | ||
743 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
744 | phy_timing->t_clktrail, clk_ns, T_CLKTRAIL_HW_INC), | ||
745 | MIPI_T_CLKTRAIL_NS_MIN, MIPI_T_CLKTRAIL_NS_MAX); | ||
746 | if (err < 0) { | ||
747 | dev_warn(&dsi->dc->ndev->dev, | ||
748 | "dsi: ClkTrail mipi range violated\n"); | ||
749 | goto fail; | ||
750 | } | ||
751 | |||
752 | err = CHECK_RANGE( | ||
753 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
754 | phy_timing->t_clkpost, clk_ns, T_CLKPOST_HW_INC), | ||
755 | MIPI_T_CLKPOST_NS_MIN(clk_ns), MIPI_T_CLKPOST_NS_MAX); | ||
756 | if (err < 0) { | ||
757 | dev_warn(&dsi->dc->ndev->dev, | ||
758 | "dsi: ClkPost mipi range violated\n"); | ||
759 | goto fail; | ||
760 | } | ||
761 | |||
762 | err = CHECK_RANGE( | ||
763 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
764 | phy_timing->t_clkzero, clk_ns, T_CLKZERO_HW_INC), | ||
765 | MIPI_T_CLKZERO_NS_MIN, MIPI_T_CLKZERO_NS_MAX); | ||
766 | if (err < 0) { | ||
767 | dev_warn(&dsi->dc->ndev->dev, | ||
768 | "dsi: ClkZero mipi range violated\n"); | ||
769 | goto fail; | ||
770 | } | ||
771 | |||
772 | err = CHECK_RANGE( | ||
773 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
774 | phy_timing->t_clkprepare, clk_ns, T_CLKPREPARE_HW_INC), | ||
775 | MIPI_T_CLKPREPARE_NS_MIN, MIPI_T_CLKPREPARE_NS_MAX); | ||
776 | if (err < 0) { | ||
777 | dev_warn(&dsi->dc->ndev->dev, | ||
778 | "dsi: ClkPrepare mipi range violated\n"); | ||
779 | goto fail; | ||
780 | } | ||
781 | |||
782 | err = CHECK_RANGE( | ||
783 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
784 | phy_timing->t_clkpre, clk_ns, T_CLKPRE_HW_INC), | ||
785 | MIPI_T_CLKPRE_NS_MIN, MIPI_T_CLKPRE_NS_MAX); | ||
786 | if (err < 0) { | ||
787 | dev_warn(&dsi->dc->ndev->dev, | ||
788 | "dsi: ClkPre mipi range violated\n"); | ||
789 | goto fail; | ||
790 | } | ||
791 | |||
792 | err = CHECK_RANGE( | ||
793 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
794 | phy_timing->t_clkprepare, clk_ns, T_CLKPREPARE_HW_INC) + | ||
795 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
796 | phy_timing->t_clkzero, clk_ns, T_CLKZERO_HW_INC), | ||
797 | MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MIN, | ||
798 | MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MAX); | ||
799 | if (err < 0) { | ||
800 | dev_warn(&dsi->dc->ndev->dev, | ||
801 | "dsi: ClkPrepare + ClkZero mipi range violated\n"); | ||
802 | goto fail; | ||
803 | } | ||
804 | } | ||
805 | fail: | ||
806 | #undef CHECK_RANGE | ||
807 | return err; | ||
808 | } | ||
809 | |||
810 | static int tegra_dsi_hs_phy_len(struct tegra_dc_dsi_data *dsi, | ||
811 | struct dsi_phy_timing_inclk *phy_timing, | ||
812 | u32 clk_ns, u8 lphs) | ||
813 | { | ||
814 | u32 hs_t_phy_ns; | ||
815 | u32 clk_t_phy_ns; | ||
816 | u32 t_phy_ns; | ||
817 | u32 h_blank_ns; | ||
818 | struct tegra_dc_mode *modes; | ||
819 | u32 t_pix_ns; | ||
820 | int err = 0; | ||
821 | |||
822 | if (!(lphs == DSI_LPHS_IN_HS_MODE)) | ||
823 | goto fail; | ||
824 | |||
825 | modes = dsi->dc->out->modes; | ||
826 | t_pix_ns = clk_ns * BITS_PER_BYTE * | ||
827 | dsi->pixel_scaler_mul / dsi->pixel_scaler_div; | ||
828 | |||
829 | hs_t_phy_ns = | ||
830 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
831 | phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC) + | ||
832 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
833 | phy_timing->t_hsprepare, clk_ns, T_HSPREPARE_HW_INC) + | ||
834 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
835 | phy_timing->t_datzero, clk_ns, T_DATZERO_HW_INC) + | ||
836 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
837 | phy_timing->t_hstrail, clk_ns, T_HSTRAIL_HW_INC) + | ||
838 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
839 | phy_timing->t_hsdexit, clk_ns, T_HSEXIT_HW_INC); | ||
840 | |||
841 | clk_t_phy_ns = | ||
842 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
843 | phy_timing->t_clkpost, clk_ns, T_CLKPOST_HW_INC) + | ||
844 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
845 | phy_timing->t_clktrail, clk_ns, T_CLKTRAIL_HW_INC) + | ||
846 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
847 | phy_timing->t_hsdexit, clk_ns, T_HSEXIT_HW_INC) + | ||
848 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
849 | phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC) + | ||
850 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
851 | phy_timing->t_clkprepare, clk_ns, T_CLKPREPARE_HW_INC) + | ||
852 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
853 | phy_timing->t_clkzero, clk_ns, T_CLKZERO_HW_INC) + | ||
854 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
855 | phy_timing->t_clkpre, clk_ns, T_CLKPRE_HW_INC); | ||
856 | |||
857 | h_blank_ns = t_pix_ns * (modes->h_sync_width + modes->h_back_porch + | ||
858 | modes->h_front_porch); | ||
859 | |||
860 | /* Extra tlpx and byte cycle required by dsi HW */ | ||
861 | t_phy_ns = dsi->info.n_data_lanes * (hs_t_phy_ns + clk_t_phy_ns + | ||
862 | DSI_CONVERT_T_PHY_TO_T_PHY_NS( | ||
863 | phy_timing->t_tlpx, clk_ns, T_TLPX_HW_INC) + | ||
864 | clk_ns * BITS_PER_BYTE); | ||
865 | |||
866 | if (h_blank_ns < t_phy_ns) { | ||
867 | err = -EINVAL; | ||
868 | dev_err(&dsi->dc->ndev->dev, | ||
869 | "dsi: Hblank is smaller than HS trans phy timing\n"); | ||
870 | goto fail; | ||
871 | } | ||
872 | |||
873 | return 0; | ||
874 | fail: | ||
875 | return err; | ||
876 | } | ||
877 | |||
878 | static int tegra_dsi_constraint_phy_timing(struct tegra_dc_dsi_data *dsi, | ||
879 | struct dsi_phy_timing_inclk *phy_timing, | ||
880 | u32 clk_ns, u8 lphs) | ||
881 | { | ||
882 | int err = 0; | ||
883 | |||
884 | err = tegra_dsi_mipi_phy_timing_range(dsi, phy_timing, clk_ns, lphs); | ||
885 | if (err < 0) { | ||
886 | dev_warn(&dsi->dc->ndev->dev, "dsi: mipi range violated\n"); | ||
887 | goto fail; | ||
888 | } | ||
889 | |||
890 | err = tegra_dsi_hs_phy_len(dsi, phy_timing, clk_ns, lphs); | ||
891 | if (err < 0) { | ||
892 | dev_err(&dsi->dc->ndev->dev, "dsi: Hblank too short\n"); | ||
893 | goto fail; | ||
894 | } | ||
895 | |||
896 | /* TODO: add more contraints */ | ||
897 | fail: | ||
898 | return err; | ||
899 | } | ||
900 | |||
901 | static void tegra_dsi_set_phy_timing(struct tegra_dc_dsi_data *dsi, u8 lphs) | ||
902 | { | ||
903 | u32 val; | ||
904 | struct dsi_phy_timing_inclk phy_timing = dsi->phy_timing; | ||
905 | |||
906 | tegra_dsi_get_phy_timing | ||
907 | (dsi, &phy_timing, dsi->current_bit_clk_ns, lphs); | ||
908 | |||
909 | tegra_dsi_constraint_phy_timing(dsi, &phy_timing, | ||
910 | dsi->current_bit_clk_ns, lphs); | ||
911 | |||
912 | val = DSI_PHY_TIMING_0_THSDEXIT(phy_timing.t_hsdexit) | | ||
913 | DSI_PHY_TIMING_0_THSTRAIL(phy_timing.t_hstrail) | | ||
914 | DSI_PHY_TIMING_0_TDATZERO(phy_timing.t_datzero) | | ||
915 | DSI_PHY_TIMING_0_THSPREPR(phy_timing.t_hsprepare); | ||
916 | tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_0); | ||
917 | |||
918 | val = DSI_PHY_TIMING_1_TCLKTRAIL(phy_timing.t_clktrail) | | ||
919 | DSI_PHY_TIMING_1_TCLKPOST(phy_timing.t_clkpost) | | ||
920 | DSI_PHY_TIMING_1_TCLKZERO(phy_timing.t_clkzero) | | ||
921 | DSI_PHY_TIMING_1_TTLPX(phy_timing.t_tlpx); | ||
922 | tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_1); | ||
923 | |||
924 | val = DSI_PHY_TIMING_2_TCLKPREPARE(phy_timing.t_clkprepare) | | ||
925 | DSI_PHY_TIMING_2_TCLKPRE(phy_timing.t_clkpre) | | ||
926 | DSI_PHY_TIMING_2_TWAKEUP(phy_timing.t_wakeup); | ||
927 | tegra_dsi_writel(dsi, val, DSI_PHY_TIMING_2); | ||
928 | |||
929 | val = DSI_BTA_TIMING_TTAGET(phy_timing.t_taget) | | ||
930 | DSI_BTA_TIMING_TTASURE(phy_timing.t_tasure) | | ||
931 | DSI_BTA_TIMING_TTAGO(phy_timing.t_tago); | ||
932 | tegra_dsi_writel(dsi, val, DSI_BTA_TIMING); | ||
933 | |||
934 | dsi->phy_timing = phy_timing; | ||
935 | } | ||
936 | |||
937 | static u32 tegra_dsi_sol_delay_burst(struct tegra_dc *dc, | ||
938 | struct tegra_dc_dsi_data *dsi) | ||
939 | { | ||
940 | u32 dsi_to_pixel_clk_ratio; | ||
941 | u32 temp; | ||
942 | u32 temp1; | ||
943 | u32 mipi_clk_adj_kHz; | ||
944 | u32 sol_delay; | ||
945 | struct tegra_dc_mode *dc_modes = &dc->mode; | ||
946 | |||
947 | /* Get Fdsi/Fpixel ration (note: Fdsi is in bit format) */ | ||
948 | dsi_to_pixel_clk_ratio = (dsi->current_dsi_clk_khz * 2 + | ||
949 | dsi->default_pixel_clk_khz - 1) / dsi->default_pixel_clk_khz; | ||
950 | |||
951 | /* Convert Fdsi to byte format */ | ||
952 | dsi_to_pixel_clk_ratio *= 1000/8; | ||
953 | |||
954 | /* Multiplying by 1000 so that we don't loose the fraction part */ | ||
955 | temp = dc_modes->h_active * 1000; | ||
956 | temp1 = dc_modes->h_active + dc_modes->h_back_porch + | ||
957 | dc_modes->h_sync_width; | ||
958 | |||
959 | sol_delay = temp1 * dsi_to_pixel_clk_ratio - | ||
960 | temp * dsi->pixel_scaler_mul / | ||
961 | (dsi->pixel_scaler_div * dsi->info.n_data_lanes); | ||
962 | |||
963 | /* Do rounding on sol delay */ | ||
964 | sol_delay = (sol_delay + 1000 - 1)/1000; | ||
965 | |||
966 | /* TODO: | ||
967 | * 1. find out the correct sol fifo depth to use | ||
968 | * 2. verify with hw about the clamping function | ||
969 | */ | ||
970 | if (sol_delay > (480 * 4)) { | ||
971 | sol_delay = (480 * 4); | ||
972 | mipi_clk_adj_kHz = sol_delay + | ||
973 | (dc_modes->h_active * dsi->pixel_scaler_mul) / | ||
974 | (dsi->info.n_data_lanes * dsi->pixel_scaler_div); | ||
975 | |||
976 | mipi_clk_adj_kHz *= (dsi->default_pixel_clk_khz / temp1); | ||
977 | |||
978 | mipi_clk_adj_kHz *= 4; | ||
979 | } | ||
980 | |||
981 | dsi->target_hs_clk_khz = mipi_clk_adj_kHz; | ||
982 | |||
983 | return sol_delay; | ||
984 | } | ||
985 | |||
986 | static void tegra_dsi_set_sol_delay(struct tegra_dc *dc, | ||
987 | struct tegra_dc_dsi_data *dsi) | ||
988 | { | ||
989 | u32 sol_delay; | ||
990 | |||
991 | if (dsi->info.video_burst_mode == TEGRA_DSI_VIDEO_NONE_BURST_MODE || | ||
992 | dsi->info.video_burst_mode == | ||
993 | TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END) { | ||
994 | #define VIDEO_FIFO_LATENCY_PIXEL_CLK 8 | ||
995 | sol_delay = VIDEO_FIFO_LATENCY_PIXEL_CLK * | ||
996 | dsi->pixel_scaler_mul / dsi->pixel_scaler_div; | ||
997 | #undef VIDEO_FIFO_LATENCY_PIXEL_CLK | ||
998 | dsi->status.clk_burst = DSI_CLK_BURST_NONE_BURST; | ||
999 | } else { | ||
1000 | sol_delay = tegra_dsi_sol_delay_burst(dc, dsi); | ||
1001 | dsi->status.clk_burst = DSI_CLK_BURST_BURST_MODE; | ||
1002 | } | ||
1003 | |||
1004 | tegra_dsi_writel(dsi, DSI_SOL_DELAY_SOL_DELAY(sol_delay), | ||
1005 | DSI_SOL_DELAY); | ||
1006 | } | ||
1007 | |||
1008 | static void tegra_dsi_set_timeout(struct tegra_dc_dsi_data *dsi) | ||
1009 | { | ||
1010 | u32 val; | ||
1011 | u32 bytes_per_frame; | ||
1012 | u32 timeout = 0; | ||
1013 | |||
1014 | /* TODO: verify the following equation */ | ||
1015 | bytes_per_frame = dsi->current_dsi_clk_khz * 1000 * 2 / | ||
1016 | (dsi->info.refresh_rate * 8); | ||
1017 | timeout = bytes_per_frame / DSI_CYCLE_COUNTER_VALUE; | ||
1018 | timeout = (timeout + DSI_HTX_TO_MARGIN) & 0xffff; | ||
1019 | |||
1020 | val = DSI_TIMEOUT_0_LRXH_TO(DSI_LRXH_TO_VALUE) | | ||
1021 | DSI_TIMEOUT_0_HTX_TO(timeout); | ||
1022 | tegra_dsi_writel(dsi, val, DSI_TIMEOUT_0); | ||
1023 | |||
1024 | if (dsi->info.panel_reset_timeout_msec) | ||
1025 | timeout = (dsi->info.panel_reset_timeout_msec * 1000*1000) | ||
1026 | / dsi->current_bit_clk_ns; | ||
1027 | else | ||
1028 | timeout = DSI_PR_TO_VALUE; | ||
1029 | |||
1030 | val = DSI_TIMEOUT_1_PR_TO(timeout) | | ||
1031 | DSI_TIMEOUT_1_TA_TO(DSI_TA_TO_VALUE); | ||
1032 | tegra_dsi_writel(dsi, val, DSI_TIMEOUT_1); | ||
1033 | |||
1034 | val = DSI_TO_TALLY_P_RESET_STATUS(IN_RESET) | | ||
1035 | DSI_TO_TALLY_TA_TALLY(DSI_TA_TALLY_VALUE)| | ||
1036 | DSI_TO_TALLY_LRXH_TALLY(DSI_LRXH_TALLY_VALUE)| | ||
1037 | DSI_TO_TALLY_HTX_TALLY(DSI_HTX_TALLY_VALUE); | ||
1038 | tegra_dsi_writel(dsi, val, DSI_TO_TALLY); | ||
1039 | } | ||
1040 | |||
1041 | static void tegra_dsi_setup_video_mode_pkt_length(struct tegra_dc *dc, | ||
1042 | struct tegra_dc_dsi_data *dsi) | ||
1043 | { | ||
1044 | u32 val; | ||
1045 | u32 hact_pkt_len; | ||
1046 | u32 hsa_pkt_len; | ||
1047 | u32 hbp_pkt_len; | ||
1048 | u32 hfp_pkt_len; | ||
1049 | |||
1050 | hact_pkt_len = dc->mode.h_active * dsi->pixel_scaler_mul / | ||
1051 | dsi->pixel_scaler_div; | ||
1052 | hsa_pkt_len = dc->mode.h_sync_width * dsi->pixel_scaler_mul / | ||
1053 | dsi->pixel_scaler_div; | ||
1054 | hbp_pkt_len = dc->mode.h_back_porch * dsi->pixel_scaler_mul / | ||
1055 | dsi->pixel_scaler_div; | ||
1056 | hfp_pkt_len = dc->mode.h_front_porch * dsi->pixel_scaler_mul / | ||
1057 | dsi->pixel_scaler_div; | ||
1058 | |||
1059 | if (dsi->info.video_burst_mode != | ||
1060 | TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END) | ||
1061 | hbp_pkt_len += hsa_pkt_len; | ||
1062 | |||
1063 | hsa_pkt_len -= DSI_HSYNC_BLNK_PKT_OVERHEAD; | ||
1064 | hbp_pkt_len -= DSI_HBACK_PORCH_PKT_OVERHEAD; | ||
1065 | hfp_pkt_len -= DSI_HFRONT_PORCH_PKT_OVERHEAD; | ||
1066 | |||
1067 | val = DSI_PKT_LEN_0_1_LENGTH_0(0) | | ||
1068 | DSI_PKT_LEN_0_1_LENGTH_1(hsa_pkt_len); | ||
1069 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_0_1); | ||
1070 | |||
1071 | val = DSI_PKT_LEN_2_3_LENGTH_2(hbp_pkt_len) | | ||
1072 | DSI_PKT_LEN_2_3_LENGTH_3(hact_pkt_len); | ||
1073 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_2_3); | ||
1074 | |||
1075 | val = DSI_PKT_LEN_4_5_LENGTH_4(hfp_pkt_len) | | ||
1076 | DSI_PKT_LEN_4_5_LENGTH_5(0); | ||
1077 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_4_5); | ||
1078 | |||
1079 | val = DSI_PKT_LEN_6_7_LENGTH_6(0) | DSI_PKT_LEN_6_7_LENGTH_7(0); | ||
1080 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_6_7); | ||
1081 | } | ||
1082 | |||
1083 | static void tegra_dsi_setup_cmd_mode_pkt_length(struct tegra_dc *dc, | ||
1084 | struct tegra_dc_dsi_data *dsi) | ||
1085 | { | ||
1086 | unsigned long val; | ||
1087 | unsigned long act_bytes; | ||
1088 | |||
1089 | act_bytes = dc->mode.h_active * dsi->pixel_scaler_mul / | ||
1090 | dsi->pixel_scaler_div + 1; | ||
1091 | |||
1092 | val = DSI_PKT_LEN_0_1_LENGTH_0(0) | DSI_PKT_LEN_0_1_LENGTH_1(0); | ||
1093 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_0_1); | ||
1094 | |||
1095 | val = DSI_PKT_LEN_2_3_LENGTH_2(0) | DSI_PKT_LEN_2_3_LENGTH_3(act_bytes); | ||
1096 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_2_3); | ||
1097 | |||
1098 | val = DSI_PKT_LEN_4_5_LENGTH_4(0) | DSI_PKT_LEN_4_5_LENGTH_5(act_bytes); | ||
1099 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_4_5); | ||
1100 | |||
1101 | val = DSI_PKT_LEN_6_7_LENGTH_6(0) | DSI_PKT_LEN_6_7_LENGTH_7(0x0f0f); | ||
1102 | tegra_dsi_writel(dsi, val, DSI_PKT_LEN_6_7); | ||
1103 | } | ||
1104 | |||
1105 | static void tegra_dsi_set_pkt_length(struct tegra_dc *dc, | ||
1106 | struct tegra_dc_dsi_data *dsi) | ||
1107 | { | ||
1108 | if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST) | ||
1109 | return; | ||
1110 | |||
1111 | if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_VIDEO_MODE) | ||
1112 | tegra_dsi_setup_video_mode_pkt_length(dc, dsi); | ||
1113 | else | ||
1114 | tegra_dsi_setup_cmd_mode_pkt_length(dc, dsi); | ||
1115 | } | ||
1116 | |||
1117 | static void tegra_dsi_set_pkt_seq(struct tegra_dc *dc, | ||
1118 | struct tegra_dc_dsi_data *dsi) | ||
1119 | { | ||
1120 | const u32 *pkt_seq; | ||
1121 | u32 rgb_info; | ||
1122 | u32 pkt_seq_3_5_rgb_lo; | ||
1123 | u32 pkt_seq_3_5_rgb_hi; | ||
1124 | u32 val; | ||
1125 | u32 reg; | ||
1126 | u8 i; | ||
1127 | |||
1128 | if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST) | ||
1129 | return; | ||
1130 | |||
1131 | switch (dsi->info.pixel_format) { | ||
1132 | case TEGRA_DSI_PIXEL_FORMAT_16BIT_P: | ||
1133 | rgb_info = CMD_RGB_16BPP; | ||
1134 | break; | ||
1135 | case TEGRA_DSI_PIXEL_FORMAT_18BIT_P: | ||
1136 | rgb_info = CMD_RGB_18BPP; | ||
1137 | break; | ||
1138 | case TEGRA_DSI_PIXEL_FORMAT_18BIT_NP: | ||
1139 | rgb_info = CMD_RGB_18BPPNP; | ||
1140 | break; | ||
1141 | case TEGRA_DSI_PIXEL_FORMAT_24BIT_P: | ||
1142 | default: | ||
1143 | rgb_info = CMD_RGB_24BPP; | ||
1144 | break; | ||
1145 | } | ||
1146 | |||
1147 | pkt_seq_3_5_rgb_lo = 0; | ||
1148 | pkt_seq_3_5_rgb_hi = 0; | ||
1149 | if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE) | ||
1150 | pkt_seq = dsi_pkt_seq_cmd_mode; | ||
1151 | else { | ||
1152 | switch (dsi->info.video_burst_mode) { | ||
1153 | case TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED: | ||
1154 | case TEGRA_DSI_VIDEO_BURST_MODE_LOW_SPEED: | ||
1155 | case TEGRA_DSI_VIDEO_BURST_MODE_MEDIUM_SPEED: | ||
1156 | case TEGRA_DSI_VIDEO_BURST_MODE_FAST_SPEED: | ||
1157 | case TEGRA_DSI_VIDEO_BURST_MODE_FASTEST_SPEED: | ||
1158 | pkt_seq_3_5_rgb_lo = | ||
1159 | DSI_PKT_SEQ_3_LO_PKT_32_ID(rgb_info); | ||
1160 | if (!dsi->info.no_pkt_seq_eot) | ||
1161 | pkt_seq = dsi_pkt_seq_video_burst; | ||
1162 | else | ||
1163 | pkt_seq = dsi_pkt_seq_video_burst_no_eot; | ||
1164 | break; | ||
1165 | case TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END: | ||
1166 | pkt_seq_3_5_rgb_hi = | ||
1167 | DSI_PKT_SEQ_3_HI_PKT_34_ID(rgb_info); | ||
1168 | pkt_seq = dsi_pkt_seq_video_non_burst_syne; | ||
1169 | break; | ||
1170 | case TEGRA_DSI_VIDEO_NONE_BURST_MODE: | ||
1171 | default: | ||
1172 | pkt_seq_3_5_rgb_lo = | ||
1173 | DSI_PKT_SEQ_3_LO_PKT_32_ID(rgb_info); | ||
1174 | pkt_seq = dsi_pkt_seq_video_non_burst; | ||
1175 | break; | ||
1176 | } | ||
1177 | } | ||
1178 | |||
1179 | for (i = 0; i < NUMOF_PKT_SEQ; i++) { | ||
1180 | val = pkt_seq[i]; | ||
1181 | reg = dsi_pkt_seq_reg[i]; | ||
1182 | if ((reg == DSI_PKT_SEQ_3_LO) || (reg == DSI_PKT_SEQ_5_LO)) | ||
1183 | val |= pkt_seq_3_5_rgb_lo; | ||
1184 | if ((reg == DSI_PKT_SEQ_3_HI) || (reg == DSI_PKT_SEQ_5_HI)) | ||
1185 | val |= pkt_seq_3_5_rgb_hi; | ||
1186 | tegra_dsi_writel(dsi, val, reg); | ||
1187 | } | ||
1188 | } | ||
1189 | |||
1190 | static void tegra_dsi_reset_underflow_overflow | ||
1191 | (struct tegra_dc_dsi_data *dsi) | ||
1192 | { | ||
1193 | u32 val; | ||
1194 | |||
1195 | val = tegra_dsi_readl(dsi, DSI_STATUS); | ||
1196 | val &= (DSI_STATUS_LB_OVERFLOW(0x1) | DSI_STATUS_LB_UNDERFLOW(0x1)); | ||
1197 | if (val) { | ||
1198 | if (val & DSI_STATUS_LB_OVERFLOW(0x1)) | ||
1199 | dev_warn(&dsi->dc->ndev->dev, | ||
1200 | "dsi: video fifo overflow. Resetting flag\n"); | ||
1201 | if (val & DSI_STATUS_LB_UNDERFLOW(0x1)) | ||
1202 | dev_warn(&dsi->dc->ndev->dev, | ||
1203 | "dsi: video fifo underflow. Resetting flag\n"); | ||
1204 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
1205 | val |= DSI_HOST_CONTROL_FIFO_STAT_RESET(0x1); | ||
1206 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
1207 | udelay(5); | ||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | static void tegra_dsi_stop_dc_stream(struct tegra_dc *dc, | ||
1212 | struct tegra_dc_dsi_data *dsi) | ||
1213 | { | ||
1214 | tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); | ||
1215 | tegra_dc_writel(dc, 0, DC_DISP_DISP_WIN_OPTIONS); | ||
1216 | tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL); | ||
1217 | tegra_dc_writel(dc, GENERAL_ACT_REQ , DC_CMD_STATE_CONTROL); | ||
1218 | |||
1219 | dsi->status.dc_stream = DSI_DC_STREAM_DISABLE; | ||
1220 | } | ||
1221 | |||
1222 | static void tegra_dsi_stop_dc_stream_at_frame_end(struct tegra_dc *dc, | ||
1223 | struct tegra_dc_dsi_data *dsi) | ||
1224 | { | ||
1225 | int val; | ||
1226 | long timeout; | ||
1227 | u32 frame_period = DIV_ROUND_UP(S_TO_MS(1), dsi->info.refresh_rate); | ||
1228 | |||
1229 | /* stop dc */ | ||
1230 | tegra_dsi_stop_dc_stream(dc, dsi); | ||
1231 | |||
1232 | /* enable frame end interrupt */ | ||
1233 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
1234 | val |= FRAME_END_INT; | ||
1235 | tegra_dc_writel(dc, val, DC_CMD_INT_MASK); | ||
1236 | |||
1237 | /* wait for frame_end completion. | ||
1238 | * timeout is 2 frame duration to accomodate for | ||
1239 | * internal delay. | ||
1240 | */ | ||
1241 | timeout = wait_for_completion_interruptible_timeout( | ||
1242 | &dc->frame_end_complete, | ||
1243 | msecs_to_jiffies(2 * frame_period)); | ||
1244 | |||
1245 | /* disable frame end interrupt */ | ||
1246 | val = tegra_dc_readl(dc, DC_CMD_INT_MASK); | ||
1247 | val &= ~FRAME_END_INT; | ||
1248 | tegra_dc_writel(dc, val, DC_CMD_INT_MASK); | ||
1249 | |||
1250 | if (timeout == 0) | ||
1251 | dev_warn(&dc->ndev->dev, | ||
1252 | "DC doesn't stop at end of frame.\n"); | ||
1253 | |||
1254 | tegra_dsi_reset_underflow_overflow(dsi); | ||
1255 | } | ||
1256 | |||
1257 | static void tegra_dsi_start_dc_stream(struct tegra_dc *dc, | ||
1258 | struct tegra_dc_dsi_data *dsi) | ||
1259 | { | ||
1260 | u32 val; | ||
1261 | |||
1262 | tegra_dc_writel(dc, DSI_ENABLE, DC_DISP_DISP_WIN_OPTIONS); | ||
1263 | |||
1264 | /* TODO: clean up */ | ||
1265 | tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
1266 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE, | ||
1267 | DC_CMD_DISPLAY_POWER_CONTROL); | ||
1268 | |||
1269 | /* Configure one-shot mode or continuous mode */ | ||
1270 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) { | ||
1271 | /* disable LSPI/LCD_DE output */ | ||
1272 | val = PIN_OUTPUT_LSPI_OUTPUT_DIS; | ||
1273 | tegra_dc_writel(dc, val, DC_COM_PIN_OUTPUT_ENABLE3); | ||
1274 | |||
1275 | /* enable MSF & set MSF polarity */ | ||
1276 | val = MSF_ENABLE | MSF_LSPI; | ||
1277 | if (!dsi->info.te_polarity_low) | ||
1278 | val |= MSF_POLARITY_HIGH; | ||
1279 | else | ||
1280 | val |= MSF_POLARITY_LOW; | ||
1281 | tegra_dc_writel(dc, val, DC_CMD_DISPLAY_COMMAND_OPTION0); | ||
1282 | |||
1283 | /* set non-continuous mode */ | ||
1284 | tegra_dc_writel(dc, DISP_CTRL_MODE_NC_DISPLAY, | ||
1285 | DC_CMD_DISPLAY_COMMAND); | ||
1286 | tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL); | ||
1287 | tegra_dc_writel(dc, GENERAL_ACT_REQ | NC_HOST_TRIG, | ||
1288 | DC_CMD_STATE_CONTROL); | ||
1289 | } else { | ||
1290 | /* set continuous mode */ | ||
1291 | tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, | ||
1292 | DC_CMD_DISPLAY_COMMAND); | ||
1293 | tegra_dc_writel(dc, GENERAL_UPDATE, DC_CMD_STATE_CONTROL); | ||
1294 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
1295 | } | ||
1296 | |||
1297 | dsi->status.dc_stream = DSI_DC_STREAM_ENABLE; | ||
1298 | } | ||
1299 | |||
1300 | static void tegra_dsi_set_dc_clk(struct tegra_dc *dc, | ||
1301 | struct tegra_dc_dsi_data *dsi) | ||
1302 | { | ||
1303 | u32 shift_clk_div_register; | ||
1304 | u32 val; | ||
1305 | |||
1306 | /* Get the corresponding register value of shift_clk_div. */ | ||
1307 | shift_clk_div_register = dsi->shift_clk_div * 2 - 2; | ||
1308 | |||
1309 | #ifndef CONFIG_TEGRA_SILICON_PLATFORM | ||
1310 | shift_clk_div_register = 1; | ||
1311 | #endif | ||
1312 | |||
1313 | /* TODO: find out if PCD3 option is required */ | ||
1314 | val = PIXEL_CLK_DIVIDER_PCD1 | | ||
1315 | SHIFT_CLK_DIVIDER(shift_clk_div_register); | ||
1316 | tegra_dc_writel(dc, val, DC_DISP_DISP_CLOCK_CONTROL); | ||
1317 | } | ||
1318 | |||
1319 | static void tegra_dsi_set_dsi_clk(struct tegra_dc *dc, | ||
1320 | struct tegra_dc_dsi_data *dsi, u32 clk) | ||
1321 | { | ||
1322 | u32 rm; | ||
1323 | |||
1324 | /* Round up to MHz */ | ||
1325 | rm = clk % 1000; | ||
1326 | if (rm != 0) | ||
1327 | clk -= rm; | ||
1328 | |||
1329 | /* Set up pixel clock */ | ||
1330 | dc->shift_clk_div = dsi->shift_clk_div; | ||
1331 | dc->mode.pclk = (clk * 1000) / dsi->shift_clk_div; | ||
1332 | /* TODO: Define one shot work delay in board file. */ | ||
1333 | /* Since for one-shot mode, refresh rate is usually set larger than | ||
1334 | * expected refresh rate, it needs at least 3 frame period. Less | ||
1335 | * delay one shot work is, more powering saving we have. */ | ||
1336 | dc->one_shot_delay_ms = 4 * | ||
1337 | DIV_ROUND_UP(S_TO_MS(1), dsi->info.refresh_rate); | ||
1338 | |||
1339 | /* Enable DSI clock */ | ||
1340 | tegra_dc_setup_clk(dc, dsi->dsi_clk); | ||
1341 | if (!dsi->clk_ref) { | ||
1342 | dsi->clk_ref = true; | ||
1343 | clk_enable(dsi->dsi_clk); | ||
1344 | tegra_periph_reset_deassert(dsi->dsi_clk); | ||
1345 | } | ||
1346 | dsi->current_dsi_clk_khz = clk_get_rate(dsi->dsi_clk) / 1000; | ||
1347 | dsi->current_bit_clk_ns = 1000*1000 / (dsi->current_dsi_clk_khz * 2); | ||
1348 | } | ||
1349 | |||
1350 | static void tegra_dsi_hs_clk_out_enable(struct tegra_dc_dsi_data *dsi) | ||
1351 | { | ||
1352 | u32 val; | ||
1353 | |||
1354 | val = tegra_dsi_readl(dsi, DSI_CONTROL); | ||
1355 | val &= ~DSI_CONTROL_HS_CLK_CTRL(1); | ||
1356 | |||
1357 | if (dsi->info.video_clock_mode == TEGRA_DSI_VIDEO_CLOCK_CONTINUOUS) { | ||
1358 | val |= DSI_CONTROL_HS_CLK_CTRL(CONTINUOUS); | ||
1359 | dsi->status.clk_mode = DSI_PHYCLK_CONTINUOUS; | ||
1360 | } else { | ||
1361 | val |= DSI_CONTROL_HS_CLK_CTRL(TX_ONLY); | ||
1362 | dsi->status.clk_mode = DSI_PHYCLK_TX_ONLY; | ||
1363 | } | ||
1364 | tegra_dsi_writel(dsi, val, DSI_CONTROL); | ||
1365 | |||
1366 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
1367 | val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1); | ||
1368 | val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_HIGH); | ||
1369 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
1370 | |||
1371 | dsi->status.clk_out = DSI_PHYCLK_OUT_EN; | ||
1372 | } | ||
1373 | |||
1374 | static void tegra_dsi_hs_clk_out_enable_in_lp(struct tegra_dc_dsi_data *dsi) | ||
1375 | { | ||
1376 | u32 val; | ||
1377 | tegra_dsi_hs_clk_out_enable(dsi); | ||
1378 | |||
1379 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
1380 | val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1); | ||
1381 | val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW); | ||
1382 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
1383 | } | ||
1384 | |||
1385 | static void tegra_dsi_hs_clk_out_disable(struct tegra_dc *dc, | ||
1386 | struct tegra_dc_dsi_data *dsi) | ||
1387 | { | ||
1388 | u32 val; | ||
1389 | |||
1390 | if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) | ||
1391 | tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi); | ||
1392 | |||
1393 | tegra_dsi_writel(dsi, TEGRA_DSI_DISABLE, DSI_POWER_CONTROL); | ||
1394 | /* stabilization delay */ | ||
1395 | udelay(300); | ||
1396 | |||
1397 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
1398 | val &= ~DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(1); | ||
1399 | val |= DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW); | ||
1400 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
1401 | |||
1402 | tegra_dsi_writel(dsi, TEGRA_DSI_ENABLE, DSI_POWER_CONTROL); | ||
1403 | /* stabilization delay */ | ||
1404 | udelay(300); | ||
1405 | |||
1406 | dsi->status.clk_mode = DSI_PHYCLK_NOT_INIT; | ||
1407 | dsi->status.clk_out = DSI_PHYCLK_OUT_DIS; | ||
1408 | } | ||
1409 | |||
1410 | static void tegra_dsi_set_control_reg_lp(struct tegra_dc_dsi_data *dsi) | ||
1411 | { | ||
1412 | u32 dsi_control; | ||
1413 | u32 host_dsi_control; | ||
1414 | u32 max_threshold; | ||
1415 | |||
1416 | dsi_control = dsi->dsi_control_val | DSI_CTRL_HOST_DRIVEN; | ||
1417 | host_dsi_control = HOST_DSI_CTRL_COMMON | | ||
1418 | HOST_DSI_CTRL_HOST_DRIVEN | | ||
1419 | DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(TEGRA_DSI_LOW); | ||
1420 | max_threshold = DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_HOST_FIFO_DEPTH); | ||
1421 | |||
1422 | tegra_dsi_writel(dsi, max_threshold, DSI_MAX_THRESHOLD); | ||
1423 | tegra_dsi_writel(dsi, dsi_control, DSI_CONTROL); | ||
1424 | tegra_dsi_writel(dsi, host_dsi_control, DSI_HOST_DSI_CONTROL); | ||
1425 | |||
1426 | dsi->status.driven = DSI_DRIVEN_MODE_HOST; | ||
1427 | dsi->status.clk_burst = DSI_CLK_BURST_NOT_INIT; | ||
1428 | dsi->status.vtype = DSI_VIDEO_TYPE_NOT_INIT; | ||
1429 | } | ||
1430 | |||
1431 | static void tegra_dsi_set_control_reg_hs(struct tegra_dc_dsi_data *dsi) | ||
1432 | { | ||
1433 | u32 dsi_control; | ||
1434 | u32 host_dsi_control; | ||
1435 | u32 max_threshold; | ||
1436 | u32 dcs_cmd; | ||
1437 | |||
1438 | dsi_control = dsi->dsi_control_val; | ||
1439 | host_dsi_control = HOST_DSI_CTRL_COMMON; | ||
1440 | max_threshold = 0; | ||
1441 | dcs_cmd = 0; | ||
1442 | |||
1443 | if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_HOST) { | ||
1444 | dsi_control |= DSI_CTRL_HOST_DRIVEN; | ||
1445 | host_dsi_control |= HOST_DSI_CTRL_HOST_DRIVEN; | ||
1446 | max_threshold = | ||
1447 | DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_HOST_FIFO_DEPTH); | ||
1448 | dsi->status.driven = DSI_DRIVEN_MODE_HOST; | ||
1449 | } else { | ||
1450 | dsi_control |= DSI_CTRL_DC_DRIVEN; | ||
1451 | host_dsi_control |= HOST_DSI_CTRL_DC_DRIVEN; | ||
1452 | max_threshold = | ||
1453 | DSI_MAX_THRESHOLD_MAX_THRESHOLD(DSI_VIDEO_FIFO_DEPTH); | ||
1454 | dsi->status.driven = DSI_DRIVEN_MODE_DC; | ||
1455 | } | ||
1456 | |||
1457 | if (dsi->info.video_data_type == TEGRA_DSI_VIDEO_TYPE_COMMAND_MODE) { | ||
1458 | dsi_control |= DSI_CTRL_CMD_MODE; | ||
1459 | dcs_cmd = DSI_DCS_CMDS_LT5_DCS_CMD(DSI_WRITE_MEMORY_START)| | ||
1460 | DSI_DCS_CMDS_LT3_DCS_CMD(DSI_WRITE_MEMORY_CONTINUE); | ||
1461 | dsi->status.vtype = DSI_VIDEO_TYPE_CMD_MODE; | ||
1462 | |||
1463 | } else { | ||
1464 | dsi_control |= DSI_CTRL_VIDEO_MODE; | ||
1465 | dsi->status.vtype = DSI_VIDEO_TYPE_VIDEO_MODE; | ||
1466 | } | ||
1467 | |||
1468 | tegra_dsi_writel(dsi, max_threshold, DSI_MAX_THRESHOLD); | ||
1469 | tegra_dsi_writel(dsi, dcs_cmd, DSI_DCS_CMDS); | ||
1470 | tegra_dsi_writel(dsi, dsi_control, DSI_CONTROL); | ||
1471 | tegra_dsi_writel(dsi, host_dsi_control, DSI_HOST_DSI_CONTROL); | ||
1472 | } | ||
1473 | |||
1474 | static void tegra_dsi_pad_calibration(struct tegra_dc_dsi_data *dsi) | ||
1475 | { | ||
1476 | u32 val; | ||
1477 | |||
1478 | val = DSI_PAD_CONTROL_PAD_LPUPADJ(0x1) | | ||
1479 | DSI_PAD_CONTROL_PAD_LPDNADJ(0x1) | | ||
1480 | DSI_PAD_CONTROL_PAD_PREEMP_EN(0x1) | | ||
1481 | DSI_PAD_CONTROL_PAD_SLEWDNADJ(0x6) | | ||
1482 | DSI_PAD_CONTROL_PAD_SLEWUPADJ(0x6); | ||
1483 | if (!dsi->ulpm) { | ||
1484 | val |= DSI_PAD_CONTROL_PAD_PDIO(0) | | ||
1485 | DSI_PAD_CONTROL_PAD_PDIO_CLK(0) | | ||
1486 | DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_DISABLE); | ||
1487 | } else { | ||
1488 | val |= DSI_PAD_CONTROL_PAD_PDIO(0x3) | | ||
1489 | DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) | | ||
1490 | DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_ENABLE); | ||
1491 | } | ||
1492 | tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL); | ||
1493 | |||
1494 | val = MIPI_CAL_TERMOSA(0x4); | ||
1495 | tegra_vi_csi_writel(val, CSI_CILA_MIPI_CAL_CONFIG_0); | ||
1496 | |||
1497 | val = MIPI_CAL_TERMOSB(0x4); | ||
1498 | tegra_vi_csi_writel(val, CSI_CILB_MIPI_CAL_CONFIG_0); | ||
1499 | |||
1500 | val = MIPI_CAL_HSPUOSD(0x3) | MIPI_CAL_HSPDOSD(0x4); | ||
1501 | tegra_vi_csi_writel(val, CSI_DSI_MIPI_CAL_CONFIG); | ||
1502 | |||
1503 | val = PAD_DRIV_DN_REF(0x5) | PAD_DRIV_UP_REF(0x7); | ||
1504 | tegra_vi_csi_writel(val, CSI_MIPIBIAS_PAD_CONFIG); | ||
1505 | |||
1506 | val = PAD_CIL_PDVREG(0x0); | ||
1507 | tegra_vi_csi_writel(val, CSI_CIL_PAD_CONFIG); | ||
1508 | } | ||
1509 | |||
1510 | static int tegra_dsi_init_hw(struct tegra_dc *dc, | ||
1511 | struct tegra_dc_dsi_data *dsi) | ||
1512 | { | ||
1513 | u32 i; | ||
1514 | |||
1515 | tegra_dsi_writel(dsi, | ||
1516 | DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE), | ||
1517 | DSI_POWER_CONTROL); | ||
1518 | /* stabilization delay */ | ||
1519 | udelay(300); | ||
1520 | |||
1521 | tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_lp_clk_khz); | ||
1522 | if (dsi->info.dsi_instance) { | ||
1523 | /* TODO:Set the misc register*/ | ||
1524 | } | ||
1525 | |||
1526 | /* TODO: only need to change the timing for bta */ | ||
1527 | tegra_dsi_set_phy_timing(dsi, DSI_LPHS_IN_LP_MODE); | ||
1528 | |||
1529 | if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) | ||
1530 | tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi); | ||
1531 | |||
1532 | /* Initializing DSI registers */ | ||
1533 | for (i = 0; i < ARRAY_SIZE(init_reg); i++) | ||
1534 | tegra_dsi_writel(dsi, 0, init_reg[i]); | ||
1535 | |||
1536 | tegra_dsi_writel(dsi, dsi->dsi_control_val, DSI_CONTROL); | ||
1537 | |||
1538 | tegra_dsi_pad_calibration(dsi); | ||
1539 | |||
1540 | tegra_dsi_writel(dsi, | ||
1541 | DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_ENABLE), | ||
1542 | DSI_POWER_CONTROL); | ||
1543 | /* stabilization delay */ | ||
1544 | udelay(300); | ||
1545 | |||
1546 | dsi->status.init = DSI_MODULE_INIT; | ||
1547 | dsi->status.lphs = DSI_LPHS_NOT_INIT; | ||
1548 | dsi->status.vtype = DSI_VIDEO_TYPE_NOT_INIT; | ||
1549 | dsi->status.driven = DSI_DRIVEN_MODE_NOT_INIT; | ||
1550 | dsi->status.clk_out = DSI_PHYCLK_OUT_DIS; | ||
1551 | dsi->status.clk_mode = DSI_PHYCLK_NOT_INIT; | ||
1552 | dsi->status.clk_burst = DSI_CLK_BURST_NOT_INIT; | ||
1553 | dsi->status.dc_stream = DSI_DC_STREAM_DISABLE; | ||
1554 | dsi->status.lp_op = DSI_LP_OP_NOT_INIT; | ||
1555 | |||
1556 | return 0; | ||
1557 | } | ||
1558 | |||
1559 | static int tegra_dsi_set_to_lp_mode(struct tegra_dc *dc, | ||
1560 | struct tegra_dc_dsi_data *dsi, u8 lp_op) | ||
1561 | { | ||
1562 | int err; | ||
1563 | |||
1564 | if (dsi->status.init != DSI_MODULE_INIT) { | ||
1565 | err = -EPERM; | ||
1566 | goto fail; | ||
1567 | } | ||
1568 | |||
1569 | if (dsi->status.lphs == DSI_LPHS_IN_LP_MODE && | ||
1570 | dsi->status.lp_op == lp_op) | ||
1571 | goto success; | ||
1572 | |||
1573 | if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) | ||
1574 | tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi); | ||
1575 | |||
1576 | /* disable/enable hs clk according to enable_hs_clock_on_lp_cmd_mode */ | ||
1577 | if ((dsi->status.clk_out == DSI_PHYCLK_OUT_EN) && | ||
1578 | (!dsi->info.enable_hs_clock_on_lp_cmd_mode)) | ||
1579 | tegra_dsi_hs_clk_out_disable(dc, dsi); | ||
1580 | |||
1581 | dsi->target_lp_clk_khz = tegra_dsi_get_lp_clk_rate(dsi, lp_op); | ||
1582 | if (dsi->current_dsi_clk_khz != dsi->target_lp_clk_khz) { | ||
1583 | tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_lp_clk_khz); | ||
1584 | tegra_dsi_set_timeout(dsi); | ||
1585 | } | ||
1586 | |||
1587 | tegra_dsi_set_phy_timing(dsi, DSI_LPHS_IN_LP_MODE); | ||
1588 | |||
1589 | tegra_dsi_set_control_reg_lp(dsi); | ||
1590 | |||
1591 | if ((dsi->status.clk_out == DSI_PHYCLK_OUT_DIS) && | ||
1592 | (dsi->info.enable_hs_clock_on_lp_cmd_mode)) | ||
1593 | tegra_dsi_hs_clk_out_enable_in_lp(dsi); | ||
1594 | |||
1595 | dsi->status.lphs = DSI_LPHS_IN_LP_MODE; | ||
1596 | dsi->status.lp_op = lp_op; | ||
1597 | success: | ||
1598 | err = 0; | ||
1599 | fail: | ||
1600 | return err; | ||
1601 | } | ||
1602 | |||
1603 | static int tegra_dsi_set_to_hs_mode(struct tegra_dc *dc, | ||
1604 | struct tegra_dc_dsi_data *dsi) | ||
1605 | { | ||
1606 | int err; | ||
1607 | |||
1608 | if (dsi->status.init != DSI_MODULE_INIT) { | ||
1609 | err = -EPERM; | ||
1610 | goto fail; | ||
1611 | } | ||
1612 | |||
1613 | if (dsi->status.lphs == DSI_LPHS_IN_HS_MODE) | ||
1614 | goto success; | ||
1615 | |||
1616 | if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) | ||
1617 | tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi); | ||
1618 | |||
1619 | if ((dsi->status.clk_out == DSI_PHYCLK_OUT_EN) && | ||
1620 | (!dsi->info.enable_hs_clock_on_lp_cmd_mode)) | ||
1621 | tegra_dsi_hs_clk_out_disable(dc, dsi); | ||
1622 | |||
1623 | if (dsi->current_dsi_clk_khz != dsi->target_hs_clk_khz) { | ||
1624 | tegra_dsi_set_dsi_clk(dc, dsi, dsi->target_hs_clk_khz); | ||
1625 | tegra_dsi_set_timeout(dsi); | ||
1626 | } | ||
1627 | |||
1628 | tegra_dsi_set_phy_timing(dsi, DSI_LPHS_IN_HS_MODE); | ||
1629 | |||
1630 | if (dsi->driven_mode == TEGRA_DSI_DRIVEN_BY_DC) { | ||
1631 | tegra_dsi_set_pkt_seq(dc, dsi); | ||
1632 | tegra_dsi_set_pkt_length(dc, dsi); | ||
1633 | tegra_dsi_set_sol_delay(dc, dsi); | ||
1634 | tegra_dsi_set_dc_clk(dc, dsi); | ||
1635 | } | ||
1636 | |||
1637 | tegra_dsi_set_control_reg_hs(dsi); | ||
1638 | |||
1639 | if (dsi->status.clk_out == DSI_PHYCLK_OUT_DIS || | ||
1640 | dsi->info.enable_hs_clock_on_lp_cmd_mode) | ||
1641 | tegra_dsi_hs_clk_out_enable(dsi); | ||
1642 | |||
1643 | dsi->status.lphs = DSI_LPHS_IN_HS_MODE; | ||
1644 | success: | ||
1645 | dsi->status.lp_op = DSI_LP_OP_NOT_INIT; | ||
1646 | err = 0; | ||
1647 | fail: | ||
1648 | return err; | ||
1649 | } | ||
1650 | |||
1651 | static bool tegra_dsi_write_busy(struct tegra_dc_dsi_data *dsi) | ||
1652 | { | ||
1653 | u32 timeout = 0; | ||
1654 | bool retVal = true; | ||
1655 | |||
1656 | while (timeout <= DSI_MAX_COMMAND_DELAY_USEC) { | ||
1657 | if (!(DSI_TRIGGER_HOST_TRIGGER(0x1) & | ||
1658 | tegra_dsi_readl(dsi, DSI_TRIGGER))) { | ||
1659 | retVal = false; | ||
1660 | break; | ||
1661 | } | ||
1662 | udelay(DSI_COMMAND_DELAY_STEPS_USEC); | ||
1663 | timeout += DSI_COMMAND_DELAY_STEPS_USEC; | ||
1664 | } | ||
1665 | |||
1666 | return retVal; | ||
1667 | } | ||
1668 | |||
1669 | static bool tegra_dsi_read_busy(struct tegra_dc_dsi_data *dsi) | ||
1670 | { | ||
1671 | u32 timeout = 0; | ||
1672 | bool retVal = true; | ||
1673 | |||
1674 | while (timeout < DSI_STATUS_POLLING_DURATION_USEC) { | ||
1675 | if (!(DSI_HOST_DSI_CONTROL_IMM_BTA(0x1) & | ||
1676 | tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL))) { | ||
1677 | retVal = false; | ||
1678 | break; | ||
1679 | } | ||
1680 | udelay(DSI_STATUS_POLLING_DELAY_USEC); | ||
1681 | timeout += DSI_STATUS_POLLING_DELAY_USEC; | ||
1682 | } | ||
1683 | |||
1684 | return retVal; | ||
1685 | } | ||
1686 | |||
1687 | static bool tegra_dsi_host_busy(struct tegra_dc_dsi_data *dsi) | ||
1688 | { | ||
1689 | int err = 0; | ||
1690 | |||
1691 | if (tegra_dsi_write_busy(dsi)) { | ||
1692 | err = -EBUSY; | ||
1693 | dev_err(&dsi->dc->ndev->dev, | ||
1694 | "DSI trigger bit already set\n"); | ||
1695 | goto fail; | ||
1696 | } | ||
1697 | |||
1698 | if (tegra_dsi_read_busy(dsi)) { | ||
1699 | err = -EBUSY; | ||
1700 | dev_err(&dsi->dc->ndev->dev, | ||
1701 | "DSI immediate bta bit already set\n"); | ||
1702 | goto fail; | ||
1703 | } | ||
1704 | fail: | ||
1705 | return (err < 0 ? true : false); | ||
1706 | } | ||
1707 | |||
1708 | static void tegra_dsi_soft_reset(struct tegra_dc_dsi_data *dsi) | ||
1709 | { | ||
1710 | u32 trigger; | ||
1711 | u32 status; | ||
1712 | |||
1713 | tegra_dsi_writel(dsi, | ||
1714 | DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE), | ||
1715 | DSI_POWER_CONTROL); | ||
1716 | /* stabilization delay */ | ||
1717 | udelay(300); | ||
1718 | |||
1719 | tegra_dsi_writel(dsi, | ||
1720 | DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_ENABLE), | ||
1721 | DSI_POWER_CONTROL); | ||
1722 | /* stabilization delay */ | ||
1723 | udelay(300); | ||
1724 | |||
1725 | /* dsi HW does not clear host trigger bit automatically | ||
1726 | * on dsi interface disable if host fifo is empty | ||
1727 | */ | ||
1728 | trigger = tegra_dsi_readl(dsi, DSI_TRIGGER); | ||
1729 | status = tegra_dsi_readl(dsi, DSI_STATUS); | ||
1730 | if (trigger & DSI_TRIGGER_HOST_TRIGGER(0x1) && | ||
1731 | status & DSI_STATUS_IDLE(0x1)) { | ||
1732 | trigger &= ~(DSI_TRIGGER_HOST_TRIGGER(0x1)); | ||
1733 | tegra_dsi_writel(dsi, trigger, DSI_TRIGGER); | ||
1734 | } | ||
1735 | } | ||
1736 | |||
1737 | static void tegra_dsi_reset_read_count(struct tegra_dc_dsi_data *dsi) | ||
1738 | { | ||
1739 | u32 val; | ||
1740 | |||
1741 | val = tegra_dsi_readl(dsi, DSI_STATUS); | ||
1742 | val &= DSI_STATUS_RD_FIFO_COUNT(0x1f); | ||
1743 | if (val) { | ||
1744 | dev_warn(&dsi->dc->ndev->dev, | ||
1745 | "DSI read count not zero, resetting\n"); | ||
1746 | tegra_dsi_soft_reset(dsi); | ||
1747 | } | ||
1748 | } | ||
1749 | |||
1750 | static struct dsi_status *tegra_dsi_save_state_switch_to_host_cmd_mode( | ||
1751 | struct tegra_dc_dsi_data *dsi, | ||
1752 | struct tegra_dc *dc, | ||
1753 | u8 lp_op) | ||
1754 | { | ||
1755 | struct dsi_status *init_status; | ||
1756 | int err; | ||
1757 | |||
1758 | init_status = kzalloc(sizeof(*init_status), GFP_KERNEL); | ||
1759 | if (!init_status) | ||
1760 | return ERR_PTR(-ENOMEM); | ||
1761 | |||
1762 | *init_status = dsi->status; | ||
1763 | |||
1764 | if (dsi->status.lphs == DSI_LPHS_IN_HS_MODE) { | ||
1765 | if (dsi->status.driven == DSI_DRIVEN_MODE_DC) { | ||
1766 | if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) | ||
1767 | tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi); | ||
1768 | dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_HOST; | ||
1769 | if (dsi->info.hs_cmd_mode_supported) { | ||
1770 | err = tegra_dsi_set_to_hs_mode(dc, dsi); | ||
1771 | if (err < 0) { | ||
1772 | dev_err(&dc->ndev->dev, | ||
1773 | "Switch to HS host mode failed\n"); | ||
1774 | goto fail; | ||
1775 | } | ||
1776 | } | ||
1777 | } | ||
1778 | if (!dsi->info.hs_cmd_mode_supported) { | ||
1779 | err = | ||
1780 | tegra_dsi_set_to_lp_mode(dc, dsi, lp_op); | ||
1781 | if (err < 0) { | ||
1782 | dev_err(&dc->ndev->dev, | ||
1783 | "DSI failed to go to LP mode\n"); | ||
1784 | goto fail; | ||
1785 | } | ||
1786 | } | ||
1787 | } else if (dsi->status.lphs == DSI_LPHS_IN_LP_MODE) { | ||
1788 | if (dsi->status.lp_op != lp_op) { | ||
1789 | err = tegra_dsi_set_to_lp_mode(dc, dsi, lp_op); | ||
1790 | if (err < 0) { | ||
1791 | dev_err(&dc->ndev->dev, | ||
1792 | "DSI failed to go to LP mode\n"); | ||
1793 | goto fail; | ||
1794 | } | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | return init_status; | ||
1799 | fail: | ||
1800 | kfree(init_status); | ||
1801 | return ERR_PTR(err); | ||
1802 | } | ||
1803 | |||
1804 | static struct dsi_status *tegra_dsi_prepare_host_transmission( | ||
1805 | struct tegra_dc *dc, | ||
1806 | struct tegra_dc_dsi_data *dsi, | ||
1807 | u8 lp_op) | ||
1808 | { | ||
1809 | int err = 0; | ||
1810 | struct dsi_status *init_status; | ||
1811 | |||
1812 | if (dsi->status.init != DSI_MODULE_INIT || | ||
1813 | dsi->ulpm) { | ||
1814 | err = -EPERM; | ||
1815 | goto fail; | ||
1816 | } | ||
1817 | |||
1818 | if (tegra_dsi_host_busy(dsi)) { | ||
1819 | tegra_dsi_soft_reset(dsi); | ||
1820 | if (tegra_dsi_host_busy(dsi)) { | ||
1821 | err = -EBUSY; | ||
1822 | dev_err(&dc->ndev->dev, "DSI host busy\n"); | ||
1823 | goto fail; | ||
1824 | } | ||
1825 | } | ||
1826 | |||
1827 | if (lp_op == DSI_LP_OP_READ) | ||
1828 | tegra_dsi_reset_read_count(dsi); | ||
1829 | |||
1830 | if (dsi->status.lphs == DSI_LPHS_NOT_INIT) { | ||
1831 | err = tegra_dsi_set_to_lp_mode(dc, dsi, lp_op); | ||
1832 | if (err < 0) { | ||
1833 | dev_err(&dc->ndev->dev, "Failed to config LP write\n"); | ||
1834 | goto fail; | ||
1835 | } | ||
1836 | } | ||
1837 | |||
1838 | init_status = tegra_dsi_save_state_switch_to_host_cmd_mode | ||
1839 | (dsi, dc, lp_op); | ||
1840 | if (IS_ERR_OR_NULL(init_status)) { | ||
1841 | err = PTR_ERR(init_status); | ||
1842 | dev_err(&dc->ndev->dev, "DSI state saving failed\n"); | ||
1843 | goto fail; | ||
1844 | } | ||
1845 | |||
1846 | return init_status; | ||
1847 | fail: | ||
1848 | return ERR_PTR(err); | ||
1849 | } | ||
1850 | |||
1851 | static int tegra_dsi_restore_state(struct tegra_dc *dc, | ||
1852 | struct tegra_dc_dsi_data *dsi, | ||
1853 | struct dsi_status *init_status) | ||
1854 | { | ||
1855 | bool switch_back_to_dc_mode = false; | ||
1856 | bool switch_back_to_hs_mode = false; | ||
1857 | bool restart_dc_stream; | ||
1858 | int err = 0; | ||
1859 | |||
1860 | switch_back_to_dc_mode = (dsi->status.driven == | ||
1861 | DSI_DRIVEN_MODE_HOST && | ||
1862 | init_status->driven == | ||
1863 | DSI_DRIVEN_MODE_DC); | ||
1864 | switch_back_to_hs_mode = (dsi->status.lphs == | ||
1865 | DSI_LPHS_IN_LP_MODE && | ||
1866 | init_status->lphs == | ||
1867 | DSI_LPHS_IN_HS_MODE); | ||
1868 | restart_dc_stream = (dsi->status.dc_stream == | ||
1869 | DSI_DC_STREAM_DISABLE && | ||
1870 | init_status->dc_stream == | ||
1871 | DSI_DC_STREAM_ENABLE); | ||
1872 | |||
1873 | if (dsi->status.lphs == DSI_LPHS_IN_LP_MODE && | ||
1874 | init_status->lphs == DSI_LPHS_IN_LP_MODE) { | ||
1875 | if (dsi->status.lp_op != init_status->lp_op) { | ||
1876 | err = | ||
1877 | tegra_dsi_set_to_lp_mode(dc, dsi, init_status->lp_op); | ||
1878 | if (err < 0) { | ||
1879 | dev_err(&dc->ndev->dev, | ||
1880 | "Failed to config LP mode\n"); | ||
1881 | goto fail; | ||
1882 | } | ||
1883 | } | ||
1884 | goto success; | ||
1885 | } | ||
1886 | |||
1887 | if (switch_back_to_dc_mode) | ||
1888 | dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_DC; | ||
1889 | if (switch_back_to_dc_mode || switch_back_to_hs_mode) { | ||
1890 | err = tegra_dsi_set_to_hs_mode(dc, dsi); | ||
1891 | if (err < 0) { | ||
1892 | dev_err(&dc->ndev->dev, "Failed to config HS mode\n"); | ||
1893 | goto fail; | ||
1894 | } | ||
1895 | } | ||
1896 | if (restart_dc_stream) | ||
1897 | tegra_dsi_start_dc_stream(dc, dsi); | ||
1898 | |||
1899 | success: | ||
1900 | fail: | ||
1901 | kfree(init_status); | ||
1902 | return err; | ||
1903 | } | ||
1904 | |||
1905 | static int tegra_dsi_host_trigger(struct tegra_dc_dsi_data *dsi) | ||
1906 | { | ||
1907 | int status = 0; | ||
1908 | |||
1909 | if (tegra_dsi_readl(dsi, DSI_TRIGGER)) { | ||
1910 | status = -EBUSY; | ||
1911 | goto fail; | ||
1912 | } | ||
1913 | |||
1914 | tegra_dsi_writel(dsi, | ||
1915 | DSI_TRIGGER_HOST_TRIGGER(TEGRA_DSI_ENABLE), DSI_TRIGGER); | ||
1916 | |||
1917 | #if DSI_USE_SYNC_POINTS | ||
1918 | status = tegra_dsi_syncpt(dsi); | ||
1919 | if (status < 0) { | ||
1920 | dev_err(&dsi->dc->ndev->dev, | ||
1921 | "DSI syncpt for host trigger failed\n"); | ||
1922 | goto fail; | ||
1923 | } | ||
1924 | #else | ||
1925 | if (tegra_dsi_write_busy(dsi)) { | ||
1926 | status = -EBUSY; | ||
1927 | dev_err(&dsi->dc->ndev->dev, | ||
1928 | "Timeout waiting on write completion\n"); | ||
1929 | } | ||
1930 | #endif | ||
1931 | |||
1932 | fail: | ||
1933 | return status; | ||
1934 | } | ||
1935 | |||
1936 | static int _tegra_dsi_write_data(struct tegra_dc_dsi_data *dsi, | ||
1937 | u8 *pdata, u8 data_id, u16 data_len) | ||
1938 | { | ||
1939 | u8 virtual_channel; | ||
1940 | u8 *pval; | ||
1941 | u32 val; | ||
1942 | int err; | ||
1943 | |||
1944 | err = 0; | ||
1945 | |||
1946 | virtual_channel = dsi->info.virtual_channel << | ||
1947 | DSI_VIR_CHANNEL_BIT_POSITION; | ||
1948 | |||
1949 | /* always use hw for ecc */ | ||
1950 | val = (virtual_channel | data_id) << 0 | | ||
1951 | data_len << 8; | ||
1952 | tegra_dsi_writel(dsi, val, DSI_WR_DATA); | ||
1953 | |||
1954 | /* if pdata != NULL, pkt type is long pkt */ | ||
1955 | if (pdata != NULL) { | ||
1956 | while (data_len) { | ||
1957 | if (data_len >= 4) { | ||
1958 | val = ((u32 *) pdata)[0]; | ||
1959 | data_len -= 4; | ||
1960 | pdata += 4; | ||
1961 | } else { | ||
1962 | val = 0; | ||
1963 | pval = (u8 *) &val; | ||
1964 | do | ||
1965 | *pval++ = *pdata++; | ||
1966 | while (--data_len); | ||
1967 | } | ||
1968 | tegra_dsi_writel(dsi, val, DSI_WR_DATA); | ||
1969 | } | ||
1970 | } | ||
1971 | |||
1972 | err = tegra_dsi_host_trigger(dsi); | ||
1973 | if (err < 0) | ||
1974 | dev_err(&dsi->dc->ndev->dev, "DSI host trigger failed\n"); | ||
1975 | |||
1976 | return err; | ||
1977 | } | ||
1978 | |||
1979 | int tegra_dsi_write_data(struct tegra_dc *dc, | ||
1980 | struct tegra_dc_dsi_data *dsi, | ||
1981 | u8 *pdata, u8 data_id, u16 data_len) | ||
1982 | { | ||
1983 | int err = 0; | ||
1984 | struct dsi_status *init_status; | ||
1985 | |||
1986 | tegra_dc_io_start(dc); | ||
1987 | |||
1988 | init_status = tegra_dsi_prepare_host_transmission( | ||
1989 | dc, dsi, DSI_LP_OP_WRITE); | ||
1990 | if (IS_ERR_OR_NULL(init_status)) { | ||
1991 | err = PTR_ERR(init_status); | ||
1992 | dev_err(&dc->ndev->dev, "DSI host config failed\n"); | ||
1993 | goto fail; | ||
1994 | } | ||
1995 | |||
1996 | err = _tegra_dsi_write_data(dsi, pdata, data_id, data_len); | ||
1997 | fail: | ||
1998 | err = tegra_dsi_restore_state(dc, dsi, init_status); | ||
1999 | if (err < 0) | ||
2000 | dev_err(&dc->ndev->dev, "Failed to restore prev state\n"); | ||
2001 | tegra_dc_io_end(dc); | ||
2002 | return err; | ||
2003 | } | ||
2004 | EXPORT_SYMBOL(tegra_dsi_write_data); | ||
2005 | |||
2006 | static int tegra_dsi_send_panel_cmd(struct tegra_dc *dc, | ||
2007 | struct tegra_dc_dsi_data *dsi, | ||
2008 | struct tegra_dsi_cmd *cmd, | ||
2009 | u32 n_cmd) | ||
2010 | { | ||
2011 | u32 i; | ||
2012 | int err; | ||
2013 | |||
2014 | err = 0; | ||
2015 | for (i = 0; i < n_cmd; i++) { | ||
2016 | struct tegra_dsi_cmd *cur_cmd; | ||
2017 | cur_cmd = &cmd[i]; | ||
2018 | |||
2019 | if (cur_cmd->cmd_type == TEGRA_DSI_DELAY_MS) | ||
2020 | mdelay(cur_cmd->sp_len_dly.delay_ms); | ||
2021 | else { | ||
2022 | err = tegra_dsi_write_data(dc, dsi, | ||
2023 | cur_cmd->pdata, | ||
2024 | cur_cmd->data_id, | ||
2025 | cur_cmd->sp_len_dly.data_len); | ||
2026 | if (err < 0) | ||
2027 | break; | ||
2028 | } | ||
2029 | } | ||
2030 | return err; | ||
2031 | } | ||
2032 | |||
2033 | static u8 get_8bit_ecc(u32 header) | ||
2034 | { | ||
2035 | char ecc_parity[24] = { | ||
2036 | 0x07, 0x0b, 0x0d, 0x0e, 0x13, 0x15, 0x16, 0x19, | ||
2037 | 0x1a, 0x1c, 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c, | ||
2038 | 0x31, 0x32, 0x34, 0x38, 0x1f, 0x2f, 0x37, 0x3b | ||
2039 | }; | ||
2040 | u8 ecc_byte; | ||
2041 | int i; | ||
2042 | |||
2043 | ecc_byte = 0; | ||
2044 | for (i = 0; i < 24; i++) | ||
2045 | ecc_byte ^= ((header >> i) & 1) ? ecc_parity[i] : 0x00; | ||
2046 | |||
2047 | return ecc_byte; | ||
2048 | } | ||
2049 | |||
2050 | /* This function is written to send DCS short write (1 parameter) only. | ||
2051 | * This means the cmd will contain only 1 byte of index and 1 byte of value. | ||
2052 | * The data type ID is fixed at 0x15 and the ECC is calculated based on the | ||
2053 | * data in pdata. | ||
2054 | * The command will be sent by hardware every frame. | ||
2055 | * pdata should contain both the index + value for each cmd. | ||
2056 | * data_len will be the total number of bytes in pdata. | ||
2057 | */ | ||
2058 | int tegra_dsi_send_panel_short_cmd(struct tegra_dc *dc, u8 *pdata, u8 data_len) | ||
2059 | { | ||
2060 | u8 ecc8bits = 0, data_len_orig = 0; | ||
2061 | u32 val = 0, pkthdr = 0; | ||
2062 | int err = 0, count = 0; | ||
2063 | struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc); | ||
2064 | |||
2065 | data_len_orig = data_len; | ||
2066 | if (pdata != NULL) { | ||
2067 | while (data_len) { | ||
2068 | if (data_len >= 2) { | ||
2069 | pkthdr = (CMD_SHORTW | | ||
2070 | (((u16 *)pdata)[0]) << 8 | 0x00 << 24); | ||
2071 | ecc8bits = get_8bit_ecc(pkthdr); | ||
2072 | val = (pkthdr | (ecc8bits << 24)); | ||
2073 | data_len -= 2; | ||
2074 | pdata += 2; | ||
2075 | count++; | ||
2076 | } | ||
2077 | switch (count) { | ||
2078 | case 1: | ||
2079 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_0); | ||
2080 | break; | ||
2081 | case 2: | ||
2082 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_1); | ||
2083 | break; | ||
2084 | case 3: | ||
2085 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_2); | ||
2086 | break; | ||
2087 | case 4: | ||
2088 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_3); | ||
2089 | break; | ||
2090 | case 5: | ||
2091 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_4); | ||
2092 | break; | ||
2093 | case 6: | ||
2094 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_5); | ||
2095 | break; | ||
2096 | case 7: | ||
2097 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_6); | ||
2098 | break; | ||
2099 | case 8: | ||
2100 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_DATA_7); | ||
2101 | break; | ||
2102 | default: | ||
2103 | err = 1; | ||
2104 | break; | ||
2105 | } | ||
2106 | } | ||
2107 | } | ||
2108 | |||
2109 | val = DSI_INIT_SEQ_CONTROL_DSI_FRAME_INIT_BYTE_COUNT(data_len_orig * 2) | ||
2110 | | DSI_INIT_SEQ_CONTROL_DSI_SEND_INIT_SEQUENCE(1); | ||
2111 | tegra_dsi_writel(dsi, val, DSI_INIT_SEQ_CONTROL); | ||
2112 | |||
2113 | return err; | ||
2114 | } | ||
2115 | EXPORT_SYMBOL(tegra_dsi_send_panel_short_cmd); | ||
2116 | |||
2117 | static int tegra_dsi_bta(struct tegra_dc_dsi_data *dsi) | ||
2118 | { | ||
2119 | u32 val; | ||
2120 | u32 poll_time; | ||
2121 | int err; | ||
2122 | |||
2123 | poll_time = 0; | ||
2124 | err = 0; | ||
2125 | |||
2126 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
2127 | val |= DSI_HOST_DSI_CONTROL_IMM_BTA(TEGRA_DSI_ENABLE); | ||
2128 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
2129 | |||
2130 | #if DSI_USE_SYNC_POINTS | ||
2131 | /* FIXME: Workaround for nvhost_syncpt_read */ | ||
2132 | dsi->syncpt_val = nvhost_syncpt_update_min( | ||
2133 | &nvhost_get_host(dsi->dc->ndev)->syncpt, | ||
2134 | dsi->syncpt_id); | ||
2135 | |||
2136 | val = DSI_INCR_SYNCPT_COND(OP_DONE) | | ||
2137 | DSI_INCR_SYNCPT_INDX(dsi->syncpt_id); | ||
2138 | tegra_dsi_writel(dsi, val, DSI_INCR_SYNCPT); | ||
2139 | |||
2140 | /* TODO: Use interrupt rather than polling */ | ||
2141 | err = nvhost_syncpt_wait(&nvhost_get_host(dsi->dc->ndev)->syncpt, | ||
2142 | dsi->syncpt_id, dsi->syncpt_val + 1); | ||
2143 | if (err < 0) | ||
2144 | dev_err(&dsi->dc->ndev->dev, | ||
2145 | "DSI sync point failure\n"); | ||
2146 | else | ||
2147 | (dsi->syncpt_val)++; | ||
2148 | #else | ||
2149 | if (tegra_dsi_read_busy(dsi)) { | ||
2150 | err = -EBUSY; | ||
2151 | dev_err(&dsi->dc->ndev->dev, | ||
2152 | "Timeout wating on read completion\n"); | ||
2153 | } | ||
2154 | #endif | ||
2155 | |||
2156 | return err; | ||
2157 | } | ||
2158 | |||
2159 | static int tegra_dsi_parse_read_response(struct tegra_dc *dc, | ||
2160 | u32 rd_fifo_cnt, u8 *read_fifo) | ||
2161 | { | ||
2162 | int err; | ||
2163 | u32 payload_size; | ||
2164 | |||
2165 | payload_size = 0; | ||
2166 | err = 0; | ||
2167 | |||
2168 | switch (read_fifo[0]) { | ||
2169 | case DSI_ESCAPE_CMD: | ||
2170 | dev_info(&dc->ndev->dev, "escape cmd[0x%x]\n", read_fifo[0]); | ||
2171 | break; | ||
2172 | case DSI_ACK_NO_ERR: | ||
2173 | dev_info(&dc->ndev->dev, | ||
2174 | "Panel ack, no err[0x%x]\n", read_fifo[0]); | ||
2175 | return err; | ||
2176 | default: | ||
2177 | dev_info(&dc->ndev->dev, "Invalid read response\n"); | ||
2178 | break; | ||
2179 | } | ||
2180 | |||
2181 | switch (read_fifo[4] & 0xff) { | ||
2182 | case GEN_LONG_RD_RES: | ||
2183 | /* Fall through */ | ||
2184 | case DCS_LONG_RD_RES: | ||
2185 | payload_size = (read_fifo[5] | | ||
2186 | (read_fifo[6] << 8)) & 0xFFFF; | ||
2187 | dev_info(&dc->ndev->dev, "Long read response Packet\n" | ||
2188 | "payload_size[0x%x]\n", payload_size); | ||
2189 | break; | ||
2190 | case GEN_1_BYTE_SHORT_RD_RES: | ||
2191 | /* Fall through */ | ||
2192 | case DCS_1_BYTE_SHORT_RD_RES: | ||
2193 | payload_size = 1; | ||
2194 | dev_info(&dc->ndev->dev, "Short read response Packet\n" | ||
2195 | "payload_size[0x%x]\n", payload_size); | ||
2196 | break; | ||
2197 | case GEN_2_BYTE_SHORT_RD_RES: | ||
2198 | /* Fall through */ | ||
2199 | case DCS_2_BYTE_SHORT_RD_RES: | ||
2200 | payload_size = 2; | ||
2201 | dev_info(&dc->ndev->dev, "Short read response Packet\n" | ||
2202 | "payload_size[0x%x]\n", payload_size); | ||
2203 | break; | ||
2204 | case ACK_ERR_RES: | ||
2205 | payload_size = 2; | ||
2206 | dev_info(&dc->ndev->dev, "Acknowledge error report response\n" | ||
2207 | "Packet payload_size[0x%x]\n", payload_size); | ||
2208 | break; | ||
2209 | default: | ||
2210 | dev_info(&dc->ndev->dev, "Invalid response packet\n"); | ||
2211 | err = -EINVAL; | ||
2212 | break; | ||
2213 | } | ||
2214 | return err; | ||
2215 | } | ||
2216 | |||
2217 | static int tegra_dsi_read_fifo(struct tegra_dc *dc, | ||
2218 | struct tegra_dc_dsi_data *dsi, | ||
2219 | u8 *read_fifo) | ||
2220 | { | ||
2221 | u32 val; | ||
2222 | u32 i; | ||
2223 | u32 poll_time = 0; | ||
2224 | u32 rd_fifo_cnt; | ||
2225 | int err = 0; | ||
2226 | u8 *read_fifo_cp = read_fifo; | ||
2227 | |||
2228 | while (poll_time < DSI_DELAY_FOR_READ_FIFO) { | ||
2229 | mdelay(1); | ||
2230 | val = tegra_dsi_readl(dsi, DSI_STATUS); | ||
2231 | rd_fifo_cnt = val & DSI_STATUS_RD_FIFO_COUNT(0x1f); | ||
2232 | if (rd_fifo_cnt << 2 > DSI_READ_FIFO_DEPTH) | ||
2233 | dev_err(&dc->ndev->dev, | ||
2234 | "DSI RD_FIFO_CNT is greater than RD_FIFO_DEPTH\n"); | ||
2235 | break; | ||
2236 | poll_time++; | ||
2237 | } | ||
2238 | |||
2239 | if (rd_fifo_cnt == 0) { | ||
2240 | dev_info(&dc->ndev->dev, | ||
2241 | "DSI RD_FIFO_CNT is zero\n"); | ||
2242 | err = -EINVAL; | ||
2243 | goto fail; | ||
2244 | } | ||
2245 | |||
2246 | if (val & (DSI_STATUS_LB_UNDERFLOW(0x1) | | ||
2247 | DSI_STATUS_LB_OVERFLOW(0x1))) { | ||
2248 | dev_warn(&dc->ndev->dev, | ||
2249 | "DSI overflow/underflow error\n"); | ||
2250 | } | ||
2251 | |||
2252 | /* Read data from FIFO */ | ||
2253 | for (i = 0; i < rd_fifo_cnt; i++) { | ||
2254 | val = tegra_dsi_readl(dsi, DSI_RD_DATA); | ||
2255 | if (enable_read_debug) | ||
2256 | dev_info(&dc->ndev->dev, | ||
2257 | "Read data[%d]: 0x%x\n", i, val); | ||
2258 | memcpy(read_fifo, &val, 4); | ||
2259 | read_fifo += 4; | ||
2260 | } | ||
2261 | |||
2262 | /* Make sure all the data is read from the FIFO */ | ||
2263 | val = tegra_dsi_readl(dsi, DSI_STATUS); | ||
2264 | val &= DSI_STATUS_RD_FIFO_COUNT(0x1f); | ||
2265 | if (val) | ||
2266 | dev_err(&dc->ndev->dev, "DSI FIFO_RD_CNT not zero" | ||
2267 | " even after reading FIFO_RD_CNT words from read fifo\n"); | ||
2268 | |||
2269 | if (enable_read_debug) { | ||
2270 | err = | ||
2271 | tegra_dsi_parse_read_response(dc, rd_fifo_cnt, read_fifo_cp); | ||
2272 | if (err < 0) | ||
2273 | dev_warn(&dc->ndev->dev, "Unexpected read data\n"); | ||
2274 | } | ||
2275 | fail: | ||
2276 | return err; | ||
2277 | } | ||
2278 | |||
2279 | int tegra_dsi_read_data(struct tegra_dc *dc, | ||
2280 | struct tegra_dc_dsi_data *dsi, | ||
2281 | u32 max_ret_payload_size, | ||
2282 | u32 panel_reg_addr, u8 *read_data) | ||
2283 | { | ||
2284 | int err = 0; | ||
2285 | struct dsi_status *init_status; | ||
2286 | |||
2287 | tegra_dc_io_start(dc); | ||
2288 | |||
2289 | init_status = tegra_dsi_prepare_host_transmission( | ||
2290 | dc, dsi, DSI_LP_OP_WRITE); | ||
2291 | if (IS_ERR_OR_NULL(init_status)) { | ||
2292 | err = PTR_ERR(init_status); | ||
2293 | dev_err(&dc->ndev->dev, "DSI host config failed\n"); | ||
2294 | goto fail; | ||
2295 | } | ||
2296 | |||
2297 | /* Set max return payload size in words */ | ||
2298 | err = _tegra_dsi_write_data(dsi, NULL, | ||
2299 | dsi_command_max_return_pkt_size, | ||
2300 | max_ret_payload_size); | ||
2301 | if (err < 0) { | ||
2302 | dev_err(&dc->ndev->dev, | ||
2303 | "DSI write failed\n"); | ||
2304 | goto fail; | ||
2305 | } | ||
2306 | |||
2307 | /* DCS to read given panel register */ | ||
2308 | err = _tegra_dsi_write_data(dsi, NULL, | ||
2309 | dsi_command_dcs_read_with_no_params, | ||
2310 | panel_reg_addr); | ||
2311 | if (err < 0) { | ||
2312 | dev_err(&dc->ndev->dev, | ||
2313 | "DSI write failed\n"); | ||
2314 | goto fail; | ||
2315 | } | ||
2316 | |||
2317 | tegra_dsi_reset_read_count(dsi); | ||
2318 | |||
2319 | if (dsi->status.lp_op == DSI_LP_OP_WRITE) { | ||
2320 | err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_READ); | ||
2321 | if (err < 0) { | ||
2322 | dev_err(&dc->ndev->dev, | ||
2323 | "DSI failed to go to LP read mode\n"); | ||
2324 | goto fail; | ||
2325 | } | ||
2326 | } | ||
2327 | |||
2328 | err = tegra_dsi_bta(dsi); | ||
2329 | if (err < 0) { | ||
2330 | dev_err(&dc->ndev->dev, | ||
2331 | "DSI IMM BTA timeout\n"); | ||
2332 | goto fail; | ||
2333 | } | ||
2334 | |||
2335 | err = tegra_dsi_read_fifo(dc, dsi, read_data); | ||
2336 | if (err < 0) { | ||
2337 | dev_err(&dc->ndev->dev, "DSI read fifo failure\n"); | ||
2338 | goto fail; | ||
2339 | } | ||
2340 | fail: | ||
2341 | err = tegra_dsi_restore_state(dc, dsi, init_status); | ||
2342 | if (err < 0) | ||
2343 | dev_err(&dc->ndev->dev, "Failed to restore prev state\n"); | ||
2344 | tegra_dc_io_end(dc); | ||
2345 | return err; | ||
2346 | } | ||
2347 | EXPORT_SYMBOL(tegra_dsi_read_data); | ||
2348 | |||
2349 | int tegra_dsi_panel_sanity_check(struct tegra_dc *dc, | ||
2350 | struct tegra_dc_dsi_data *dsi) | ||
2351 | { | ||
2352 | int err = 0; | ||
2353 | u8 read_fifo[DSI_READ_FIFO_DEPTH]; | ||
2354 | struct dsi_status *init_status; | ||
2355 | static struct tegra_dsi_cmd dsi_nop_cmd = | ||
2356 | DSI_CMD_SHORT(0x05, 0x0, 0x0); | ||
2357 | |||
2358 | tegra_dc_io_start(dc); | ||
2359 | |||
2360 | init_status = tegra_dsi_prepare_host_transmission( | ||
2361 | dc, dsi, DSI_LP_OP_WRITE); | ||
2362 | if (IS_ERR_OR_NULL(init_status)) { | ||
2363 | err = PTR_ERR(init_status); | ||
2364 | dev_err(&dc->ndev->dev, "DSI host config failed\n"); | ||
2365 | goto fail; | ||
2366 | } | ||
2367 | |||
2368 | err = _tegra_dsi_write_data(dsi, NULL, dsi_nop_cmd.data_id, 0x0); | ||
2369 | if (err < 0) { | ||
2370 | dev_err(&dc->ndev->dev, "DSI nop write failed\n"); | ||
2371 | goto fail; | ||
2372 | } | ||
2373 | |||
2374 | tegra_dsi_reset_read_count(dsi); | ||
2375 | |||
2376 | if (dsi->status.lp_op == DSI_LP_OP_WRITE) { | ||
2377 | err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_READ); | ||
2378 | if (err < 0) { | ||
2379 | dev_err(&dc->ndev->dev, | ||
2380 | "DSI failed to go to LP read mode\n"); | ||
2381 | goto fail; | ||
2382 | } | ||
2383 | } | ||
2384 | |||
2385 | err = tegra_dsi_bta(dsi); | ||
2386 | if (err < 0) { | ||
2387 | dev_err(&dc->ndev->dev, "DSI BTA failed\n"); | ||
2388 | goto fail; | ||
2389 | } | ||
2390 | |||
2391 | err = tegra_dsi_read_fifo(dc, dsi, read_fifo); | ||
2392 | if (err < 0) { | ||
2393 | dev_err(&dc->ndev->dev, "DSI read fifo failure\n"); | ||
2394 | goto fail; | ||
2395 | } | ||
2396 | |||
2397 | if (read_fifo[0] != DSI_ACK_NO_ERR) { | ||
2398 | dev_warn(&dc->ndev->dev, | ||
2399 | "Ack no error trigger message not received\n"); | ||
2400 | err = -EAGAIN; | ||
2401 | } | ||
2402 | fail: | ||
2403 | err = tegra_dsi_restore_state(dc, dsi, init_status); | ||
2404 | if (err < 0) | ||
2405 | dev_err(&dc->ndev->dev, "Failed to restore prev state\n"); | ||
2406 | tegra_dc_io_end(dc); | ||
2407 | return err; | ||
2408 | } | ||
2409 | EXPORT_SYMBOL(tegra_dsi_panel_sanity_check); | ||
2410 | |||
2411 | static int tegra_dsi_enter_ulpm(struct tegra_dc_dsi_data *dsi) | ||
2412 | { | ||
2413 | u32 val; | ||
2414 | int ret; | ||
2415 | |||
2416 | ret = 0; | ||
2417 | |||
2418 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
2419 | val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(3); | ||
2420 | val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(ENTER_ULPM); | ||
2421 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
2422 | |||
2423 | #if DSI_USE_SYNC_POINTS | ||
2424 | ret = tegra_dsi_syncpt(dsi); | ||
2425 | if (ret < 0) { | ||
2426 | dev_err(&dsi->dc->ndev->dev, | ||
2427 | "DSI syncpt for ulpm enter failed\n"); | ||
2428 | goto fail; | ||
2429 | } | ||
2430 | #else | ||
2431 | /* TODO: Find exact delay required */ | ||
2432 | mdelay(10); | ||
2433 | #endif | ||
2434 | dsi->ulpm = true; | ||
2435 | fail: | ||
2436 | return ret; | ||
2437 | } | ||
2438 | |||
2439 | static int tegra_dsi_exit_ulpm(struct tegra_dc_dsi_data *dsi) | ||
2440 | { | ||
2441 | u32 val; | ||
2442 | int ret; | ||
2443 | |||
2444 | ret = 0; | ||
2445 | |||
2446 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
2447 | val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(3); | ||
2448 | val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(EXIT_ULPM); | ||
2449 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
2450 | |||
2451 | #if DSI_USE_SYNC_POINTS | ||
2452 | ret = tegra_dsi_syncpt(dsi); | ||
2453 | if (ret < 0) { | ||
2454 | dev_err(&dsi->dc->ndev->dev, | ||
2455 | "DSI syncpt for ulpm exit failed\n"); | ||
2456 | goto fail; | ||
2457 | } | ||
2458 | #else | ||
2459 | /* TODO: Find exact delay required */ | ||
2460 | mdelay(10); | ||
2461 | #endif | ||
2462 | dsi->ulpm = false; | ||
2463 | |||
2464 | val = tegra_dsi_readl(dsi, DSI_HOST_DSI_CONTROL); | ||
2465 | val &= ~DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(0x3); | ||
2466 | val |= DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(NORMAL); | ||
2467 | tegra_dsi_writel(dsi, val, DSI_HOST_DSI_CONTROL); | ||
2468 | fail: | ||
2469 | return ret; | ||
2470 | |||
2471 | } | ||
2472 | |||
2473 | static void tegra_dc_dsi_enable(struct tegra_dc *dc) | ||
2474 | { | ||
2475 | struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc); | ||
2476 | int err; | ||
2477 | u32 val; | ||
2478 | |||
2479 | tegra_dc_io_start(dc); | ||
2480 | mutex_lock(&dsi->lock); | ||
2481 | |||
2482 | /* Stop DC stream before configuring DSI registers | ||
2483 | * to avoid visible glitches on panel during transition | ||
2484 | * from bootloader to kernel driver | ||
2485 | */ | ||
2486 | tegra_dsi_stop_dc_stream(dc, dsi); | ||
2487 | |||
2488 | if (dsi->enabled) { | ||
2489 | if (dsi->ulpm) { | ||
2490 | if (tegra_dsi_exit_ulpm(dsi) < 0) { | ||
2491 | dev_err(&dc->ndev->dev, | ||
2492 | "DSI failed to exit ulpm\n"); | ||
2493 | goto fail; | ||
2494 | } | ||
2495 | } | ||
2496 | |||
2497 | if (dsi->info.panel_reset) { | ||
2498 | err = tegra_dsi_send_panel_cmd(dc, dsi, | ||
2499 | dsi->info.dsi_init_cmd, | ||
2500 | dsi->info.n_init_cmd); | ||
2501 | if (err < 0) { | ||
2502 | dev_err(&dc->ndev->dev, | ||
2503 | "dsi: error sending dsi init cmd\n"); | ||
2504 | goto fail; | ||
2505 | } | ||
2506 | } else if (dsi->info.dsi_late_resume_cmd) { | ||
2507 | err = tegra_dsi_send_panel_cmd(dc, dsi, | ||
2508 | dsi->info.dsi_late_resume_cmd, | ||
2509 | dsi->info.n_late_resume_cmd); | ||
2510 | if (err < 0) { | ||
2511 | dev_err(&dc->ndev->dev, | ||
2512 | "dsi: error sending late resume cmd\n"); | ||
2513 | goto fail; | ||
2514 | } | ||
2515 | } | ||
2516 | } else { | ||
2517 | err = tegra_dsi_init_hw(dc, dsi); | ||
2518 | if (err < 0) { | ||
2519 | dev_err(&dc->ndev->dev, | ||
2520 | "dsi: not able to init dsi hardware\n"); | ||
2521 | goto fail; | ||
2522 | } | ||
2523 | |||
2524 | if (dsi->ulpm) { | ||
2525 | if (tegra_dsi_enter_ulpm(dsi) < 0) { | ||
2526 | dev_err(&dc->ndev->dev, | ||
2527 | "DSI failed to enter ulpm\n"); | ||
2528 | goto fail; | ||
2529 | } | ||
2530 | |||
2531 | val = tegra_dsi_readl(dsi, DSI_PAD_CONTROL); | ||
2532 | |||
2533 | /* erase bits we're about to set */ | ||
2534 | val &= ~(DSI_PAD_CONTROL_PAD_PDIO(0x3) | | ||
2535 | DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) | | ||
2536 | DSI_PAD_CONTROL_PAD_PULLDN_ENAB(0x1)); | ||
2537 | |||
2538 | val |= (DSI_PAD_CONTROL_PAD_PDIO(0) | | ||
2539 | DSI_PAD_CONTROL_PAD_PDIO_CLK(0) | | ||
2540 | DSI_PAD_CONTROL_PAD_PULLDN_ENAB | ||
2541 | (TEGRA_DSI_DISABLE)); | ||
2542 | |||
2543 | tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL); | ||
2544 | if (tegra_dsi_exit_ulpm(dsi) < 0) { | ||
2545 | dev_err(&dc->ndev->dev, | ||
2546 | "DSI failed to exit ulpm\n"); | ||
2547 | goto fail; | ||
2548 | } | ||
2549 | } | ||
2550 | |||
2551 | err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_WRITE); | ||
2552 | if (err < 0) { | ||
2553 | dev_err(&dc->ndev->dev, | ||
2554 | "dsi: not able to set to lp mode\n"); | ||
2555 | goto fail; | ||
2556 | } | ||
2557 | |||
2558 | err = tegra_dsi_send_panel_cmd(dc, dsi, dsi->info.dsi_init_cmd, | ||
2559 | dsi->info.n_init_cmd); | ||
2560 | if (err < 0) { | ||
2561 | dev_err(&dc->ndev->dev, | ||
2562 | "dsi: error while sending dsi init cmd\n"); | ||
2563 | goto fail; | ||
2564 | } | ||
2565 | |||
2566 | err = tegra_dsi_set_to_hs_mode(dc, dsi); | ||
2567 | if (err < 0) { | ||
2568 | dev_err(&dc->ndev->dev, | ||
2569 | "dsi: not able to set to hs mode\n"); | ||
2570 | goto fail; | ||
2571 | } | ||
2572 | |||
2573 | dsi->enabled = true; | ||
2574 | } | ||
2575 | |||
2576 | if (dsi->status.driven == DSI_DRIVEN_MODE_DC) | ||
2577 | tegra_dsi_start_dc_stream(dc, dsi); | ||
2578 | fail: | ||
2579 | mutex_unlock(&dsi->lock); | ||
2580 | tegra_dc_io_end(dc); | ||
2581 | } | ||
2582 | |||
2583 | static void _tegra_dc_dsi_init(struct tegra_dc *dc) | ||
2584 | { | ||
2585 | struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc); | ||
2586 | |||
2587 | tegra_dsi_init_sw(dc, dsi); | ||
2588 | /* TODO: Configure the CSI pad configuration */ | ||
2589 | } | ||
2590 | |||
2591 | static int tegra_dc_dsi_cp_p_cmd(struct tegra_dsi_cmd *src, | ||
2592 | struct tegra_dsi_cmd *dst, u16 n_cmd) | ||
2593 | { | ||
2594 | u16 i; | ||
2595 | u16 len; | ||
2596 | |||
2597 | memcpy(dst, src, sizeof(*dst) * n_cmd); | ||
2598 | |||
2599 | for (i = 0; i < n_cmd; i++) | ||
2600 | if (src[i].pdata) { | ||
2601 | len = sizeof(*src[i].pdata) * | ||
2602 | src[i].sp_len_dly.data_len; | ||
2603 | dst[i].pdata = kzalloc(len, GFP_KERNEL); | ||
2604 | if (!dst[i].pdata) | ||
2605 | goto free_cmd_pdata; | ||
2606 | memcpy(dst[i].pdata, src[i].pdata, len); | ||
2607 | } | ||
2608 | |||
2609 | return 0; | ||
2610 | |||
2611 | free_cmd_pdata: | ||
2612 | for (--i; i >= 0; i--) | ||
2613 | if (dst[i].pdata) | ||
2614 | kfree(dst[i].pdata); | ||
2615 | return -ENOMEM; | ||
2616 | } | ||
2617 | |||
2618 | static int tegra_dc_dsi_cp_info(struct tegra_dc_dsi_data *dsi, | ||
2619 | struct tegra_dsi_out *p_dsi) | ||
2620 | { | ||
2621 | struct tegra_dsi_cmd *p_init_cmd; | ||
2622 | struct tegra_dsi_cmd *p_early_suspend_cmd; | ||
2623 | struct tegra_dsi_cmd *p_late_resume_cmd; | ||
2624 | struct tegra_dsi_cmd *p_suspend_cmd; | ||
2625 | int err; | ||
2626 | |||
2627 | if (p_dsi->n_data_lanes > MAX_DSI_DATA_LANES) | ||
2628 | return -EINVAL; | ||
2629 | |||
2630 | p_init_cmd = kzalloc(sizeof(*p_init_cmd) * | ||
2631 | p_dsi->n_init_cmd, GFP_KERNEL); | ||
2632 | if (!p_init_cmd) | ||
2633 | return -ENOMEM; | ||
2634 | |||
2635 | if (p_dsi->dsi_early_suspend_cmd) { | ||
2636 | p_early_suspend_cmd = kzalloc(sizeof(*p_early_suspend_cmd) * | ||
2637 | p_dsi->n_early_suspend_cmd, | ||
2638 | GFP_KERNEL); | ||
2639 | if (!p_early_suspend_cmd) { | ||
2640 | err = -ENOMEM; | ||
2641 | goto err_free_init_cmd; | ||
2642 | } | ||
2643 | } | ||
2644 | |||
2645 | if (p_dsi->dsi_late_resume_cmd) { | ||
2646 | p_late_resume_cmd = kzalloc(sizeof(*p_late_resume_cmd) * | ||
2647 | p_dsi->n_late_resume_cmd, | ||
2648 | GFP_KERNEL); | ||
2649 | if (!p_late_resume_cmd) { | ||
2650 | err = -ENOMEM; | ||
2651 | goto err_free_p_early_suspend_cmd; | ||
2652 | } | ||
2653 | } | ||
2654 | |||
2655 | p_suspend_cmd = kzalloc(sizeof(*p_suspend_cmd) * p_dsi->n_suspend_cmd, | ||
2656 | GFP_KERNEL); | ||
2657 | if (!p_suspend_cmd) { | ||
2658 | err = -ENOMEM; | ||
2659 | goto err_free_p_late_resume_cmd; | ||
2660 | } | ||
2661 | |||
2662 | memcpy(&dsi->info, p_dsi, sizeof(dsi->info)); | ||
2663 | |||
2664 | /* Copy panel init cmd */ | ||
2665 | err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_init_cmd, | ||
2666 | p_init_cmd, p_dsi->n_init_cmd); | ||
2667 | if (err < 0) | ||
2668 | goto err_free; | ||
2669 | dsi->info.dsi_init_cmd = p_init_cmd; | ||
2670 | |||
2671 | /* Copy panel early suspend cmd */ | ||
2672 | if (p_dsi->dsi_early_suspend_cmd) { | ||
2673 | err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_early_suspend_cmd, | ||
2674 | p_early_suspend_cmd, | ||
2675 | p_dsi->n_early_suspend_cmd); | ||
2676 | if (err < 0) | ||
2677 | goto err_free; | ||
2678 | dsi->info.dsi_early_suspend_cmd = p_early_suspend_cmd; | ||
2679 | } | ||
2680 | |||
2681 | /* Copy panel late resume cmd */ | ||
2682 | if (p_dsi->dsi_late_resume_cmd) { | ||
2683 | err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_late_resume_cmd, | ||
2684 | p_late_resume_cmd, | ||
2685 | p_dsi->n_late_resume_cmd); | ||
2686 | if (err < 0) | ||
2687 | goto err_free; | ||
2688 | dsi->info.dsi_late_resume_cmd = p_late_resume_cmd; | ||
2689 | } | ||
2690 | |||
2691 | /* Copy panel suspend cmd */ | ||
2692 | err = tegra_dc_dsi_cp_p_cmd(p_dsi->dsi_suspend_cmd, p_suspend_cmd, | ||
2693 | p_dsi->n_suspend_cmd); | ||
2694 | if (err < 0) | ||
2695 | goto err_free; | ||
2696 | dsi->info.dsi_suspend_cmd = p_suspend_cmd; | ||
2697 | |||
2698 | if (!dsi->info.panel_reset_timeout_msec) | ||
2699 | dsi->info.panel_reset_timeout_msec = | ||
2700 | DEFAULT_PANEL_RESET_TIMEOUT; | ||
2701 | |||
2702 | if (!dsi->info.panel_buffer_size_byte) | ||
2703 | dsi->info.panel_buffer_size_byte = DEFAULT_PANEL_BUFFER_BYTE; | ||
2704 | |||
2705 | if (!dsi->info.max_panel_freq_khz) { | ||
2706 | dsi->info.max_panel_freq_khz = DEFAULT_MAX_DSI_PHY_CLK_KHZ; | ||
2707 | |||
2708 | if (dsi->info.video_burst_mode > | ||
2709 | TEGRA_DSI_VIDEO_NONE_BURST_MODE_WITH_SYNC_END){ | ||
2710 | dev_err(&dsi->dc->ndev->dev, "DSI: max_panel_freq_khz" | ||
2711 | "is not set for DSI burst mode.\n"); | ||
2712 | dsi->info.video_burst_mode = | ||
2713 | TEGRA_DSI_VIDEO_BURST_MODE_LOWEST_SPEED; | ||
2714 | } | ||
2715 | } | ||
2716 | |||
2717 | if (!dsi->info.lp_cmd_mode_freq_khz) | ||
2718 | dsi->info.lp_cmd_mode_freq_khz = DEFAULT_LP_CMD_MODE_CLK_KHZ; | ||
2719 | |||
2720 | if (!dsi->info.chip_id || !dsi->info.chip_rev) | ||
2721 | dev_warn(&dsi->dc->ndev->dev, | ||
2722 | "DSI: Failed to get chip info\n"); | ||
2723 | |||
2724 | if (!dsi->info.lp_read_cmd_mode_freq_khz) | ||
2725 | dsi->info.lp_read_cmd_mode_freq_khz = | ||
2726 | dsi->info.lp_cmd_mode_freq_khz; | ||
2727 | |||
2728 | /* host mode is for testing only */ | ||
2729 | dsi->driven_mode = TEGRA_DSI_DRIVEN_BY_DC; | ||
2730 | return 0; | ||
2731 | |||
2732 | err_free: | ||
2733 | kfree(p_suspend_cmd); | ||
2734 | err_free_p_late_resume_cmd: | ||
2735 | kfree(p_late_resume_cmd); | ||
2736 | err_free_p_early_suspend_cmd: | ||
2737 | kfree(p_early_suspend_cmd); | ||
2738 | err_free_init_cmd: | ||
2739 | kfree(p_init_cmd); | ||
2740 | return err; | ||
2741 | } | ||
2742 | |||
2743 | static int tegra_dc_dsi_init(struct tegra_dc *dc) | ||
2744 | { | ||
2745 | struct tegra_dc_dsi_data *dsi; | ||
2746 | struct resource *res; | ||
2747 | struct resource *base_res; | ||
2748 | void __iomem *base; | ||
2749 | struct clk *dc_clk = NULL; | ||
2750 | struct clk *dsi_clk = NULL; | ||
2751 | struct tegra_dsi_out *dsi_pdata; | ||
2752 | int err; | ||
2753 | |||
2754 | err = 0; | ||
2755 | |||
2756 | dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); | ||
2757 | if (!dsi) | ||
2758 | return -ENOMEM; | ||
2759 | |||
2760 | res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM, | ||
2761 | "dsi_regs"); | ||
2762 | if (!res) { | ||
2763 | dev_err(&dc->ndev->dev, "dsi: no mem resource\n"); | ||
2764 | err = -ENOENT; | ||
2765 | goto err_free_dsi; | ||
2766 | } | ||
2767 | |||
2768 | base_res = request_mem_region(res->start, resource_size(res), | ||
2769 | dc->ndev->name); | ||
2770 | if (!base_res) { | ||
2771 | dev_err(&dc->ndev->dev, "dsi: request_mem_region failed\n"); | ||
2772 | err = -EBUSY; | ||
2773 | goto err_free_dsi; | ||
2774 | } | ||
2775 | |||
2776 | base = ioremap(res->start, resource_size(res)); | ||
2777 | if (!base) { | ||
2778 | dev_err(&dc->ndev->dev, "dsi: registers can't be mapped\n"); | ||
2779 | err = -EBUSY; | ||
2780 | goto err_release_regs; | ||
2781 | } | ||
2782 | |||
2783 | dsi_pdata = dc->pdata->default_out->dsi; | ||
2784 | if (!dsi_pdata) { | ||
2785 | dev_err(&dc->ndev->dev, "dsi: dsi data not available\n"); | ||
2786 | goto err_release_regs; | ||
2787 | } | ||
2788 | |||
2789 | if (dsi_pdata->dsi_instance) | ||
2790 | dsi_clk = clk_get(&dc->ndev->dev, "dsib"); | ||
2791 | else | ||
2792 | dsi_clk = clk_get(&dc->ndev->dev, "dsia"); | ||
2793 | |||
2794 | if (IS_ERR_OR_NULL(dsi_clk)) { | ||
2795 | dev_err(&dc->ndev->dev, "dsi: can't get clock\n"); | ||
2796 | err = -EBUSY; | ||
2797 | goto err_release_regs; | ||
2798 | } | ||
2799 | |||
2800 | dc_clk = clk_get_sys(dev_name(&dc->ndev->dev), NULL); | ||
2801 | if (IS_ERR_OR_NULL(dc_clk)) { | ||
2802 | dev_err(&dc->ndev->dev, "dsi: dc clock %s unavailable\n", | ||
2803 | dev_name(&dc->ndev->dev)); | ||
2804 | err = -EBUSY; | ||
2805 | goto err_clk_put; | ||
2806 | } | ||
2807 | |||
2808 | mutex_init(&dsi->lock); | ||
2809 | dsi->dc = dc; | ||
2810 | dsi->base = base; | ||
2811 | dsi->base_res = base_res; | ||
2812 | dsi->dc_clk = dc_clk; | ||
2813 | dsi->dsi_clk = dsi_clk; | ||
2814 | |||
2815 | err = tegra_dc_dsi_cp_info(dsi, dsi_pdata); | ||
2816 | if (err < 0) | ||
2817 | goto err_dsi_data; | ||
2818 | |||
2819 | tegra_dc_set_outdata(dc, dsi); | ||
2820 | _tegra_dc_dsi_init(dc); | ||
2821 | |||
2822 | return 0; | ||
2823 | |||
2824 | err_dsi_data: | ||
2825 | err_clk_put: | ||
2826 | clk_put(dsi_clk); | ||
2827 | err_release_regs: | ||
2828 | release_resource(base_res); | ||
2829 | err_free_dsi: | ||
2830 | kfree(dsi); | ||
2831 | |||
2832 | return err; | ||
2833 | } | ||
2834 | |||
2835 | static void tegra_dc_dsi_destroy(struct tegra_dc *dc) | ||
2836 | { | ||
2837 | struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc); | ||
2838 | u16 i; | ||
2839 | u32 val; | ||
2840 | |||
2841 | mutex_lock(&dsi->lock); | ||
2842 | |||
2843 | /* free up the pdata */ | ||
2844 | for (i = 0; i < dsi->info.n_init_cmd; i++) { | ||
2845 | if (dsi->info.dsi_init_cmd[i].pdata) | ||
2846 | kfree(dsi->info.dsi_init_cmd[i].pdata); | ||
2847 | } | ||
2848 | kfree(dsi->info.dsi_init_cmd); | ||
2849 | |||
2850 | /* Disable dc stream */ | ||
2851 | if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) | ||
2852 | tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi); | ||
2853 | |||
2854 | /* Disable dsi phy clock */ | ||
2855 | if (dsi->status.clk_out == DSI_PHYCLK_OUT_EN) | ||
2856 | tegra_dsi_hs_clk_out_disable(dc, dsi); | ||
2857 | |||
2858 | val = DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE); | ||
2859 | tegra_dsi_writel(dsi, val, DSI_POWER_CONTROL); | ||
2860 | |||
2861 | iounmap(dsi->base); | ||
2862 | release_resource(dsi->base_res); | ||
2863 | |||
2864 | clk_put(dsi->dc_clk); | ||
2865 | clk_put(dsi->dsi_clk); | ||
2866 | |||
2867 | mutex_unlock(&dsi->lock); | ||
2868 | |||
2869 | mutex_destroy(&dsi->lock); | ||
2870 | kfree(dsi); | ||
2871 | } | ||
2872 | |||
2873 | static int tegra_dsi_deep_sleep(struct tegra_dc *dc, | ||
2874 | struct tegra_dc_dsi_data *dsi) | ||
2875 | { | ||
2876 | int err = 0; | ||
2877 | int val; | ||
2878 | struct clk *parent_clk = NULL; | ||
2879 | struct clk *base_clk = NULL; | ||
2880 | |||
2881 | if (!dsi->enabled) { | ||
2882 | err = -EPERM; | ||
2883 | goto fail; | ||
2884 | } | ||
2885 | |||
2886 | err = tegra_dsi_set_to_lp_mode(dc, dsi, DSI_LP_OP_WRITE); | ||
2887 | if (err < 0) { | ||
2888 | dev_err(&dc->ndev->dev, | ||
2889 | "DSI failed to go to LP mode\n"); | ||
2890 | goto fail; | ||
2891 | } | ||
2892 | |||
2893 | /* Suspend panel */ | ||
2894 | err = tegra_dsi_send_panel_cmd(dc, dsi, | ||
2895 | dsi->info.dsi_suspend_cmd, | ||
2896 | dsi->info.n_suspend_cmd); | ||
2897 | if (err < 0) { | ||
2898 | dev_err(&dc->ndev->dev, | ||
2899 | "dsi: Error sending suspend cmd\n"); | ||
2900 | goto fail; | ||
2901 | } | ||
2902 | |||
2903 | if (!dsi->ulpm) { | ||
2904 | err = tegra_dsi_enter_ulpm(dsi); | ||
2905 | if (err < 0) { | ||
2906 | dev_err(&dc->ndev->dev, | ||
2907 | "DSI failed to enter ulpm\n"); | ||
2908 | goto fail; | ||
2909 | } | ||
2910 | } | ||
2911 | |||
2912 | /* | ||
2913 | * Suspend pad | ||
2914 | * It is ok to overwrite previous value of DSI_PAD_CONTROL reg | ||
2915 | * because it will be restored properly in resume sequence | ||
2916 | */ | ||
2917 | val = DSI_PAD_CONTROL_PAD_PDIO(0x3) | | ||
2918 | DSI_PAD_CONTROL_PAD_PDIO_CLK(0x1) | | ||
2919 | DSI_PAD_CONTROL_PAD_PULLDN_ENAB(TEGRA_DSI_ENABLE); | ||
2920 | tegra_dsi_writel(dsi, val, DSI_PAD_CONTROL); | ||
2921 | |||
2922 | /* Suspend core-logic */ | ||
2923 | val = DSI_POWER_CONTROL_LEG_DSI_ENABLE(TEGRA_DSI_DISABLE); | ||
2924 | tegra_dsi_writel(dsi, val, DSI_POWER_CONTROL); | ||
2925 | |||
2926 | /* Disable dsi fast and slow clock */ | ||
2927 | parent_clk = clk_get_parent(dsi->dsi_clk); | ||
2928 | base_clk = clk_get_parent(parent_clk); | ||
2929 | if (dsi->info.dsi_instance) | ||
2930 | tegra_clk_cfg_ex(base_clk, | ||
2931 | TEGRA_CLK_PLLD_CSI_OUT_ENB, | ||
2932 | 0); | ||
2933 | else | ||
2934 | tegra_clk_cfg_ex(base_clk, | ||
2935 | TEGRA_CLK_PLLD_DSI_OUT_ENB, | ||
2936 | 0); | ||
2937 | |||
2938 | /* Disable dsi source clock */ | ||
2939 | clk_disable(dsi->dsi_clk); | ||
2940 | |||
2941 | dsi->clk_ref = false; | ||
2942 | dsi->enabled = false; | ||
2943 | |||
2944 | return 0; | ||
2945 | fail: | ||
2946 | return err; | ||
2947 | } | ||
2948 | |||
2949 | static void tegra_dc_dsi_disable(struct tegra_dc *dc) | ||
2950 | { | ||
2951 | int err; | ||
2952 | struct tegra_dc_dsi_data *dsi = tegra_dc_get_outdata(dc); | ||
2953 | |||
2954 | tegra_dc_io_start(dc); | ||
2955 | mutex_lock(&dsi->lock); | ||
2956 | |||
2957 | if (dsi->status.dc_stream == DSI_DC_STREAM_ENABLE) | ||
2958 | tegra_dsi_stop_dc_stream_at_frame_end(dc, dsi); | ||
2959 | |||
2960 | if (dsi->info.power_saving_suspend) { | ||
2961 | if (tegra_dsi_deep_sleep(dc, dsi) < 0) { | ||
2962 | dev_err(&dc->ndev->dev, | ||
2963 | "DSI failed to enter deep sleep\n"); | ||
2964 | goto fail; | ||
2965 | } | ||
2966 | } else { | ||
2967 | if (dsi->info.dsi_early_suspend_cmd) { | ||
2968 | err = tegra_dsi_send_panel_cmd(dc, dsi, | ||
2969 | dsi->info.dsi_early_suspend_cmd, | ||
2970 | dsi->info.n_early_suspend_cmd); | ||
2971 | if (err < 0) { | ||
2972 | dev_err(&dc->ndev->dev, | ||
2973 | "dsi: Error sending early suspend cmd\n"); | ||
2974 | goto fail; | ||
2975 | } | ||
2976 | } | ||
2977 | |||
2978 | if (!dsi->ulpm) { | ||
2979 | if (tegra_dsi_enter_ulpm(dsi) < 0) { | ||
2980 | dev_err(&dc->ndev->dev, | ||
2981 | "DSI failed to enter ulpm\n"); | ||
2982 | goto fail; | ||
2983 | } | ||
2984 | } | ||
2985 | } | ||
2986 | |||
2987 | fail: | ||
2988 | mutex_unlock(&dsi->lock); | ||
2989 | tegra_dc_io_end(dc); | ||
2990 | } | ||
2991 | |||
2992 | #ifdef CONFIG_PM | ||
2993 | static void tegra_dc_dsi_suspend(struct tegra_dc *dc) | ||
2994 | { | ||
2995 | struct tegra_dc_dsi_data *dsi; | ||
2996 | |||
2997 | dsi = tegra_dc_get_outdata(dc); | ||
2998 | |||
2999 | if (!dsi->enabled) | ||
3000 | return; | ||
3001 | |||
3002 | tegra_dc_io_start(dc); | ||
3003 | mutex_lock(&dsi->lock); | ||
3004 | |||
3005 | if (!dsi->info.power_saving_suspend) { | ||
3006 | if (dsi->ulpm) { | ||
3007 | if (tegra_dsi_exit_ulpm(dsi) < 0) { | ||
3008 | dev_err(&dc->ndev->dev, | ||
3009 | "DSI failed to exit ulpm"); | ||
3010 | goto fail; | ||
3011 | } | ||
3012 | } | ||
3013 | |||
3014 | if (tegra_dsi_deep_sleep(dc, dsi) < 0) { | ||
3015 | dev_err(&dc->ndev->dev, | ||
3016 | "DSI failed to enter deep sleep\n"); | ||
3017 | goto fail; | ||
3018 | } | ||
3019 | } | ||
3020 | fail: | ||
3021 | mutex_unlock(&dsi->lock); | ||
3022 | tegra_dc_io_end(dc); | ||
3023 | } | ||
3024 | |||
3025 | static void tegra_dc_dsi_resume(struct tegra_dc *dc) | ||
3026 | { | ||
3027 | /* Not required since tegra_dc_dsi_enable | ||
3028 | * will reconfigure the controller from scratch | ||
3029 | */ | ||
3030 | } | ||
3031 | #endif | ||
3032 | |||
3033 | struct tegra_dc_out_ops tegra_dc_dsi_ops = { | ||
3034 | .init = tegra_dc_dsi_init, | ||
3035 | .destroy = tegra_dc_dsi_destroy, | ||
3036 | .enable = tegra_dc_dsi_enable, | ||
3037 | .disable = tegra_dc_dsi_disable, | ||
3038 | #ifdef CONFIG_PM | ||
3039 | .suspend = tegra_dc_dsi_suspend, | ||
3040 | .resume = tegra_dc_dsi_resume, | ||
3041 | #endif | ||
3042 | }; | ||
diff --git a/drivers/video/tegra/dc/dsi.h b/drivers/video/tegra/dc/dsi.h new file mode 100644 index 00000000000..18ea9c959e8 --- /dev/null +++ b/drivers/video/tegra/dc/dsi.h | |||
@@ -0,0 +1,375 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dsi.h | ||
3 | * | ||
4 | * Copyright (c) 2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_DSI_H__ | ||
18 | #define __DRIVERS_VIDEO_TEGRA_DC_DSI_H__ | ||
19 | |||
20 | /* source of video data */ | ||
21 | enum { | ||
22 | TEGRA_DSI_VIDEO_DRIVEN_BY_DC, | ||
23 | TEGRA_DSI_VIDEO_DRIVEN_BY_HOST, | ||
24 | }; | ||
25 | |||
26 | /* Max number of data lanes supported */ | ||
27 | #define MAX_DSI_DATA_LANES 2 | ||
28 | /* Default Peripheral reset timeout */ | ||
29 | #define DSI_PR_TO_VALUE 0x2000 | ||
30 | |||
31 | /* DCS commands for command mode */ | ||
32 | #define DSI_ENTER_PARTIAL_MODE 0x12 | ||
33 | #define DSI_SET_PIXEL_FORMAT 0x3A | ||
34 | #define DSI_AREA_COLOR_MODE 0x4C | ||
35 | #define DSI_SET_PARTIAL_AREA 0x30 | ||
36 | #define DSI_SET_PAGE_ADDRESS 0x2B | ||
37 | #define DSI_SET_ADDRESS_MODE 0x36 | ||
38 | #define DSI_SET_COLUMN_ADDRESS 0x2A | ||
39 | #define DSI_WRITE_MEMORY_START 0x2C | ||
40 | #define DSI_WRITE_MEMORY_CONTINUE 0x3C | ||
41 | #define DSI_MAX_COMMAND_DELAY_USEC 250000 | ||
42 | #define DSI_COMMAND_DELAY_STEPS_USEC 10 | ||
43 | |||
44 | /* Trigger message */ | ||
45 | #define DSI_ESCAPE_CMD 0x87 | ||
46 | #define DSI_ACK_NO_ERR 0x84 | ||
47 | |||
48 | /* DSI return packet types */ | ||
49 | #define GEN_LONG_RD_RES 0x1A | ||
50 | #define DCS_LONG_RD_RES 0x1C | ||
51 | #define GEN_1_BYTE_SHORT_RD_RES 0x11 | ||
52 | #define DCS_1_BYTE_SHORT_RD_RES 0x21 | ||
53 | #define GEN_2_BYTE_SHORT_RD_RES 0x12 | ||
54 | #define DCS_2_BYTE_SHORT_RD_RES 0x22 | ||
55 | #define ACK_ERR_RES 0x02 | ||
56 | |||
57 | /* End of Transmit command for HS mode */ | ||
58 | #define DSI_CMD_HS_EOT_PACKAGE 0x000F0F08 | ||
59 | |||
60 | /* Delay required after issuing the trigger*/ | ||
61 | #define DSI_COMMAND_COMPLETION_DELAY_USEC 5 | ||
62 | |||
63 | #define DSI_DELAY_FOR_READ_FIFO 5 | ||
64 | |||
65 | /* Dsi virtual channel bit position, refer to the DSI specs */ | ||
66 | #define DSI_VIR_CHANNEL_BIT_POSITION 6 | ||
67 | |||
68 | /* DSI packet commands from Host to peripherals */ | ||
69 | enum { | ||
70 | dsi_command_v_sync_start = 0x01, | ||
71 | dsi_command_v_sync_end = 0x11, | ||
72 | dsi_command_h_sync_start = 0x21, | ||
73 | dsi_command_h_sync_end = 0x31, | ||
74 | dsi_command_end_of_transaction = 0x08, | ||
75 | dsi_command_blanking = 0x19, | ||
76 | dsi_command_null_packet = 0x09, | ||
77 | dsi_command_h_active_length_16bpp = 0x0E, | ||
78 | dsi_command_h_active_length_18bpp = 0x1E, | ||
79 | dsi_command_h_active_length_18bpp_np = 0x2E, | ||
80 | dsi_command_h_active_length_24bpp = 0x3E, | ||
81 | dsi_command_h_sync_active = dsi_command_blanking, | ||
82 | dsi_command_h_back_porch = dsi_command_blanking, | ||
83 | dsi_command_h_front_porch = dsi_command_blanking, | ||
84 | dsi_command_writ_no_param = 0x05, | ||
85 | dsi_command_long_write = 0x39, | ||
86 | dsi_command_max_return_pkt_size = 0x37, | ||
87 | dsi_command_generic_read_request_with_2_param = 0x24, | ||
88 | dsi_command_dcs_read_with_no_params = 0x06, | ||
89 | }; | ||
90 | |||
91 | /* Maximum polling time for reading the dsi status register */ | ||
92 | #define DSI_STATUS_POLLING_DURATION_USEC 100000 | ||
93 | #define DSI_STATUS_POLLING_DELAY_USEC 100 | ||
94 | |||
95 | /* | ||
96 | * Horizontal Sync Blank Packet Over head | ||
97 | * DSI_overhead = size_of(HS packet header) | ||
98 | * + size_of(BLANK packet header) + size_of(checksum) | ||
99 | * DSI_overhead = 4 + 4 + 2 = 10 | ||
100 | */ | ||
101 | #define DSI_HSYNC_BLNK_PKT_OVERHEAD 10 | ||
102 | |||
103 | /* | ||
104 | * Horizontal Front Porch Packet Overhead | ||
105 | * DSI_overhead = size_of(checksum) | ||
106 | * + size_of(BLANK packet header) + size_of(checksum) | ||
107 | * DSI_overhead = 2 + 4 + 2 = 8 | ||
108 | */ | ||
109 | #define DSI_HFRONT_PORCH_PKT_OVERHEAD 8 | ||
110 | |||
111 | /* | ||
112 | * Horizontal Back Porch Packet | ||
113 | * DSI_overhead = size_of(HE packet header) | ||
114 | * + size_of(BLANK packet header) + size_of(checksum) | ||
115 | * + size_of(RGB packet header) | ||
116 | * DSI_overhead = 4 + 4 + 2 + 4 = 14 | ||
117 | */ | ||
118 | #define DSI_HBACK_PORCH_PKT_OVERHEAD 14 | ||
119 | |||
120 | /* Additional Hs TX timeout margin */ | ||
121 | #define DSI_HTX_TO_MARGIN 720 | ||
122 | |||
123 | #define DSI_CYCLE_COUNTER_VALUE 512 | ||
124 | |||
125 | #define DSI_LRXH_TO_VALUE 0x2000 | ||
126 | |||
127 | /* Turn around timeout terminal count */ | ||
128 | #define DSI_TA_TO_VALUE 0x2000 | ||
129 | |||
130 | /* Turn around timeout tally */ | ||
131 | #define DSI_TA_TALLY_VALUE 0x0 | ||
132 | /* LP Rx timeout tally */ | ||
133 | #define DSI_LRXH_TALLY_VALUE 0x0 | ||
134 | /* HS Tx Timeout tally */ | ||
135 | #define DSI_HTX_TALLY_VALUE 0x0 | ||
136 | |||
137 | /* DSI Power control settle time 10 micro seconds */ | ||
138 | #define DSI_POWER_CONTROL_SETTLE_TIME_US 10 | ||
139 | |||
140 | #define DSI_HOST_FIFO_DEPTH 64 | ||
141 | #define DSI_VIDEO_FIFO_DEPTH 480 | ||
142 | #define DSI_READ_FIFO_DEPTH (32 << 2) | ||
143 | |||
144 | #define NUMOF_BIT_PER_BYTE 8 | ||
145 | #define DEFAULT_LP_CMD_MODE_CLK_KHZ 10000 | ||
146 | #define DEFAULT_MAX_DSI_PHY_CLK_KHZ (500*1000) | ||
147 | #define DEFAULT_PANEL_RESET_TIMEOUT 2 | ||
148 | #define DEFAULT_PANEL_BUFFER_BYTE 512 | ||
149 | |||
150 | /* | ||
151 | * TODO: are DSI_HOST_DSI_CONTROL_CRC_RESET(RESET_CRC) and | ||
152 | * DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(IMMEDIATE) required for everyone? | ||
153 | */ | ||
154 | #define HOST_DSI_CTRL_COMMON \ | ||
155 | (DSI_HOST_DSI_CONTROL_PHY_CLK_DIV(DSI_PHY_CLK_DIV1) | \ | ||
156 | DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(NORMAL) | \ | ||
157 | DSI_HOST_DSI_CONTROL_PERIPH_RESET(TEGRA_DSI_DISABLE) | \ | ||
158 | DSI_HOST_DSI_CONTROL_RAW_DATA(TEGRA_DSI_DISABLE) | \ | ||
159 | DSI_HOST_DSI_CONTROL_IMM_BTA(TEGRA_DSI_DISABLE) | \ | ||
160 | DSI_HOST_DSI_CONTROL_PKT_BTA(TEGRA_DSI_DISABLE) | \ | ||
161 | DSI_HOST_DSI_CONTROL_CS_ENABLE(TEGRA_DSI_ENABLE) | \ | ||
162 | DSI_HOST_DSI_CONTROL_ECC_ENABLE(TEGRA_DSI_ENABLE) | \ | ||
163 | DSI_HOST_DSI_CONTROL_PKT_WR_FIFO_SEL(HOST_ONLY)) | ||
164 | |||
165 | #define HOST_DSI_CTRL_HOST_DRIVEN \ | ||
166 | (DSI_HOST_DSI_CONTROL_CRC_RESET(RESET_CRC) | \ | ||
167 | DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(IMMEDIATE)) | ||
168 | |||
169 | #define HOST_DSI_CTRL_DC_DRIVEN 0 | ||
170 | |||
171 | #define DSI_CTRL_HOST_DRIVEN (DSI_CONTROL_VID_ENABLE(TEGRA_DSI_DISABLE) | \ | ||
172 | DSI_CONTROL_HOST_ENABLE(TEGRA_DSI_ENABLE)) | ||
173 | |||
174 | #define DSI_CTRL_DC_DRIVEN (DSI_CONTROL_VID_TX_TRIG_SRC(SOL) | \ | ||
175 | DSI_CONTROL_VID_ENABLE(TEGRA_DSI_ENABLE) | \ | ||
176 | DSI_CONTROL_HOST_ENABLE(TEGRA_DSI_DISABLE)) | ||
177 | |||
178 | #define DSI_CTRL_CMD_MODE (DSI_CONTROL_VID_DCS_ENABLE(TEGRA_DSI_ENABLE)) | ||
179 | |||
180 | #define DSI_CTRL_VIDEO_MODE (DSI_CONTROL_VID_DCS_ENABLE(TEGRA_DSI_DISABLE)) | ||
181 | |||
182 | |||
183 | enum { | ||
184 | CMD_VS = 0x01, | ||
185 | CMD_VE = 0x11, | ||
186 | |||
187 | CMD_HS = 0x21, | ||
188 | CMD_HE = 0x31, | ||
189 | |||
190 | CMD_EOT = 0x08, | ||
191 | CMD_NULL = 0x09, | ||
192 | CMD_SHORTW = 0x15, | ||
193 | CMD_BLNK = 0x19, | ||
194 | CMD_LONGW = 0x39, | ||
195 | |||
196 | CMD_RGB = 0x00, | ||
197 | CMD_RGB_16BPP = 0x0E, | ||
198 | CMD_RGB_18BPP = 0x1E, | ||
199 | CMD_RGB_18BPPNP = 0x2E, | ||
200 | CMD_RGB_24BPP = 0x3E, | ||
201 | }; | ||
202 | |||
203 | #define PKT_ID0(id) (DSI_PKT_SEQ_0_LO_PKT_00_ID(id) | \ | ||
204 | DSI_PKT_SEQ_1_LO_PKT_10_EN(TEGRA_DSI_ENABLE)) | ||
205 | #define PKT_LEN0(len) (DSI_PKT_SEQ_0_LO_PKT_00_SIZE(len)) | ||
206 | |||
207 | #define PKT_ID1(id) (DSI_PKT_SEQ_0_LO_PKT_01_ID(id) | \ | ||
208 | DSI_PKT_SEQ_1_LO_PKT_11_EN(TEGRA_DSI_ENABLE)) | ||
209 | #define PKT_LEN1(len) (DSI_PKT_SEQ_0_LO_PKT_01_SIZE(len)) | ||
210 | |||
211 | #define PKT_ID2(id) (DSI_PKT_SEQ_0_LO_PKT_02_ID(id) | \ | ||
212 | DSI_PKT_SEQ_1_LO_PKT_12_EN(TEGRA_DSI_ENABLE)) | ||
213 | #define PKT_LEN2(len) (DSI_PKT_SEQ_0_LO_PKT_02_SIZE(len)) | ||
214 | |||
215 | #define PKT_ID3(id) (DSI_PKT_SEQ_0_HI_PKT_03_ID(id) | \ | ||
216 | DSI_PKT_SEQ_1_HI_PKT_13_EN(TEGRA_DSI_ENABLE)) | ||
217 | #define PKT_LEN3(len) (DSI_PKT_SEQ_0_HI_PKT_03_SIZE(len)) | ||
218 | |||
219 | #define PKT_ID4(id) (DSI_PKT_SEQ_0_HI_PKT_04_ID(id) | \ | ||
220 | DSI_PKT_SEQ_1_HI_PKT_14_EN(TEGRA_DSI_ENABLE)) | ||
221 | #define PKT_LEN4(len) (DSI_PKT_SEQ_0_HI_PKT_04_SIZE(len)) | ||
222 | |||
223 | #define PKT_ID5(id) (DSI_PKT_SEQ_0_HI_PKT_05_ID(id) | \ | ||
224 | DSI_PKT_SEQ_1_HI_PKT_15_EN(TEGRA_DSI_ENABLE)) | ||
225 | #define PKT_LEN5(len) (DSI_PKT_SEQ_0_HI_PKT_05_SIZE(len)) | ||
226 | |||
227 | #define PKT_LP (DSI_PKT_SEQ_0_LO_SEQ_0_FORCE_LP(TEGRA_DSI_ENABLE)) | ||
228 | |||
229 | #define NUMOF_PKT_SEQ 12 | ||
230 | |||
231 | /* Mipi v1.00.00 phy timing range */ | ||
232 | #define NOT_DEFINED -1 | ||
233 | #define MIPI_T_HSEXIT_NS_MIN 100 | ||
234 | #define MIPI_T_HSEXIT_NS_MAX NOT_DEFINED | ||
235 | #define MIPI_T_HSTRAIL_NS_MIN(clk_ns) max((8 * (clk_ns)), (60 + 4 * (clk_ns))) | ||
236 | #define MIPI_T_HSTRAIL_NS_MAX NOT_DEFINED | ||
237 | #define MIPI_T_HSZERO_NS_MIN NOT_DEFINED | ||
238 | #define MIPI_T_HSZERO_NS_MAX NOT_DEFINED | ||
239 | #define MIPI_T_HSPREPARE_NS_MIN(clk_ns) (40 + 4 * (clk_ns)) | ||
240 | #define MIPI_T_HSPREPARE_NS_MAX(clk_ns) (85 + 6 * (clk_ns)) | ||
241 | #define MIPI_T_CLKTRAIL_NS_MIN 60 | ||
242 | #define MIPI_T_CLKTRAIL_NS_MAX NOT_DEFINED | ||
243 | #define MIPI_T_CLKPOST_NS_MIN(clk_ns) (60 + 52 * (clk_ns)) | ||
244 | #define MIPI_T_CLKPOST_NS_MAX NOT_DEFINED | ||
245 | #define MIPI_T_CLKZERO_NS_MIN NOT_DEFINED | ||
246 | #define MIPI_T_CLKZERO_NS_MAX NOT_DEFINED | ||
247 | #define MIPI_T_TLPX_NS_MIN 50 | ||
248 | #define MIPI_T_TLPX_NS_MAX NOT_DEFINED | ||
249 | #define MIPI_T_CLKPREPARE_NS_MIN 38 | ||
250 | #define MIPI_T_CLKPREPARE_NS_MAX 95 | ||
251 | #define MIPI_T_CLKPRE_NS_MIN 8 | ||
252 | #define MIPI_T_CLKPRE_NS_MAX NOT_DEFINED | ||
253 | #define MIPI_T_WAKEUP_NS_MIN 1 | ||
254 | #define MIPI_T_WAKEUP_NS_MAX NOT_DEFINED | ||
255 | #define MIPI_T_TASURE_NS_MIN(tlpx_ns) (tlpx_ns) | ||
256 | #define MIPI_T_TASURE_NS_MAX(tlpx_ns) (2 * (tlpx_ns)) | ||
257 | #define MIPI_T_HSPREPARE_ADD_HSZERO_NS_MIN(clk_ns) (145 + 10 * (clk_ns)) | ||
258 | #define MIPI_T_HSPREPARE_ADD_HSZERO_NS_MAX NOT_DEFINED | ||
259 | #define MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MIN 300 | ||
260 | #define MIPI_T_CLKPREPARE_ADD_CLKZERO_NS_MAX NOT_DEFINED | ||
261 | |||
262 | #define DSI_TBYTE(clk_ns) ((clk_ns) * (BITS_PER_BYTE)) | ||
263 | #define DSI_CONVERT_T_PHY_NS_TO_T_PHY(t_phy_ns, clk_ns, hw_inc) \ | ||
264 | ((int)((DIV_ROUND_CLOSEST((t_phy_ns), \ | ||
265 | (DSI_TBYTE(clk_ns)))) - (hw_inc))) | ||
266 | |||
267 | #define DSI_CONVERT_T_PHY_TO_T_PHY_NS(t_phy, clk_ns, hw_inc) \ | ||
268 | (((t_phy) + (hw_inc)) * (DSI_TBYTE(clk_ns))) | ||
269 | |||
270 | /* Default phy timing in ns */ | ||
271 | #define T_HSEXIT_NS_DEFAULT 120 | ||
272 | #define T_HSTRAIL_NS_DEFAULT(clk_ns) \ | ||
273 | max((8 * (clk_ns)), (60 + 4 * (clk_ns))) | ||
274 | |||
275 | #define T_DATZERO_NS_DEFAULT(clk_ns) (145 + 5 * (clk_ns)) | ||
276 | #define T_HSPREPARE_NS_DEFAULT(clk_ns) (65 + 5 * (clk_ns)) | ||
277 | #define T_CLKTRAIL_NS_DEFAULT 80 | ||
278 | #define T_CLKPOST_NS_DEFAULT(clk_ns) (70 + 52 * (clk_ns)) | ||
279 | #define T_CLKZERO_NS_DEFAULT 260 | ||
280 | #define T_TLPX_NS_DEFAULT 60 | ||
281 | #define T_CLKPREPARE_NS_DEFAULT 65 | ||
282 | #define T_TAGO_NS_DEFAULT (4 * (T_TLPX_NS_DEFAULT)) | ||
283 | #define T_TASURE_NS_DEFAULT (2 * (T_TLPX_NS_DEFAULT)) | ||
284 | #define T_TAGET_NS_DEFAULT (5 * (T_TLPX_NS_DEFAULT)) | ||
285 | |||
286 | /* HW increment to phy register values */ | ||
287 | #define T_HSEXIT_HW_INC 1 | ||
288 | #define T_HSTRAIL_HW_INC 0 | ||
289 | #define T_DATZERO_HW_INC 3 | ||
290 | #define T_HSPREPARE_HW_INC 1 | ||
291 | #define T_CLKTRAIL_HW_INC 1 | ||
292 | #define T_CLKPOST_HW_INC 1 | ||
293 | #define T_CLKZERO_HW_INC 1 | ||
294 | #define T_TLPX_HW_INC 1 | ||
295 | #define T_CLKPREPARE_HW_INC 1 | ||
296 | #define T_TAGO_HW_INC 1 | ||
297 | #define T_TASURE_HW_INC 1 | ||
298 | #define T_TAGET_HW_INC 1 | ||
299 | #define T_CLKPRE_HW_INC 1 | ||
300 | #define T_WAKEUP_HW_INC 1 | ||
301 | |||
302 | /* Default phy timing reg values */ | ||
303 | #define T_HSEXIT_DEFAULT(clk_ns) \ | ||
304 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
305 | T_HSEXIT_NS_DEFAULT, clk_ns, T_HSEXIT_HW_INC)) | ||
306 | |||
307 | #define T_HSTRAIL_DEFAULT(clk_ns) \ | ||
308 | (3 + (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
309 | T_HSTRAIL_NS_DEFAULT(clk_ns), clk_ns, T_HSTRAIL_HW_INC))) | ||
310 | |||
311 | #define T_DATZERO_DEFAULT(clk_ns) \ | ||
312 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
313 | T_DATZERO_NS_DEFAULT(clk_ns), clk_ns, T_DATZERO_HW_INC)) | ||
314 | |||
315 | #define T_HSPREPARE_DEFAULT(clk_ns) \ | ||
316 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
317 | T_HSPREPARE_NS_DEFAULT(clk_ns), clk_ns, T_HSPREPARE_HW_INC)) | ||
318 | |||
319 | #define T_CLKTRAIL_DEFAULT(clk_ns) \ | ||
320 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
321 | T_CLKTRAIL_NS_DEFAULT, clk_ns, T_CLKTRAIL_HW_INC)) | ||
322 | |||
323 | #define T_CLKPOST_DEFAULT(clk_ns) \ | ||
324 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
325 | T_CLKPOST_NS_DEFAULT(clk_ns), clk_ns, T_CLKPOST_HW_INC)) | ||
326 | |||
327 | #define T_CLKZERO_DEFAULT(clk_ns) \ | ||
328 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
329 | T_CLKZERO_NS_DEFAULT, clk_ns, T_CLKZERO_HW_INC)) | ||
330 | |||
331 | #define T_TLPX_DEFAULT(clk_ns) \ | ||
332 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
333 | T_TLPX_NS_DEFAULT, clk_ns, T_TLPX_HW_INC)) | ||
334 | |||
335 | #define T_CLKPREPARE_DEFAULT(clk_ns) \ | ||
336 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
337 | T_CLKPREPARE_NS_DEFAULT, clk_ns, T_CLKPREPARE_HW_INC)) | ||
338 | |||
339 | #define T_CLKPRE_DEFAULT 0x1 | ||
340 | #define T_WAKEUP_DEFAULT 0x7f | ||
341 | |||
342 | #define T_TAGO_DEFAULT(clk_ns) \ | ||
343 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
344 | T_TAGO_NS_DEFAULT, clk_ns, T_TAGO_HW_INC)) | ||
345 | |||
346 | #define T_TASURE_DEFAULT(clk_ns) \ | ||
347 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
348 | T_TASURE_NS_DEFAULT, clk_ns, T_TASURE_HW_INC)) | ||
349 | |||
350 | #define T_TAGET_DEFAULT(clk_ns) \ | ||
351 | (DSI_CONVERT_T_PHY_NS_TO_T_PHY( \ | ||
352 | T_TAGET_NS_DEFAULT, clk_ns, T_TAGET_HW_INC)) | ||
353 | |||
354 | /* Defines the DSI phy timing parameters */ | ||
355 | struct dsi_phy_timing_inclk { | ||
356 | unsigned t_hsdexit; | ||
357 | unsigned t_hstrail; | ||
358 | unsigned t_hsprepare; | ||
359 | unsigned t_datzero; | ||
360 | |||
361 | unsigned t_clktrail; | ||
362 | unsigned t_clkpost; | ||
363 | unsigned t_clkzero; | ||
364 | unsigned t_tlpx; | ||
365 | |||
366 | unsigned t_clkpre; | ||
367 | unsigned t_clkprepare; | ||
368 | unsigned t_wakeup; | ||
369 | |||
370 | unsigned t_taget; | ||
371 | unsigned t_tasure; | ||
372 | unsigned t_tago; | ||
373 | }; | ||
374 | |||
375 | #endif | ||
diff --git a/drivers/video/tegra/dc/dsi_regs.h b/drivers/video/tegra/dc/dsi_regs.h new file mode 100644 index 00000000000..203ac32bd92 --- /dev/null +++ b/drivers/video/tegra/dc/dsi_regs.h | |||
@@ -0,0 +1,351 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dsi_regs.h | ||
3 | * | ||
4 | * Copyright (c) 2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_DSI_REG_H__ | ||
18 | #define __DRIVERS_VIDEO_TEGRA_DC_DSI_REG_H__ | ||
19 | |||
20 | enum { | ||
21 | TEGRA_DSI_DISABLE, | ||
22 | TEGRA_DSI_ENABLE, | ||
23 | }; | ||
24 | |||
25 | /* These are word offsets from base (not byte offsets) */ | ||
26 | enum { | ||
27 | OP_DONE = 1, | ||
28 | }; | ||
29 | #define DSI_INCR_SYNCPT 0x00 | ||
30 | #define DSI_INCR_SYNCPT_COND(x) (((x) & 0xff) << 8) | ||
31 | #define DSI_INCR_SYNCPT_INDX(x) (((x) & 0xff) << 0) | ||
32 | |||
33 | #define DSI_INCR_SYNCPT_CNTRL 0x01 | ||
34 | #define DSI_INCR_SYNCPT_ERROR 0x02 | ||
35 | #define DSI_CTXSW 0x08 | ||
36 | #define DSI_RD_DATA 0x09 | ||
37 | #define DSI_WR_DATA 0x0a | ||
38 | |||
39 | #define DSI_POWER_CONTROL 0x0b | ||
40 | #define DSI_POWER_CONTROL_LEG_DSI_ENABLE(x) (((x) & 0x1) << 0) | ||
41 | |||
42 | #define DSI_INT_ENABLE 0x0c | ||
43 | #define DSI_INT_STATUS 0x0d | ||
44 | #define DSI_INT_MASK 0x0e | ||
45 | |||
46 | #define DSI_HOST_DSI_CONTROL 0x0f | ||
47 | enum { | ||
48 | RESET_CRC = 1, | ||
49 | }; | ||
50 | #define DSI_HOST_CONTROL_FIFO_STAT_RESET(x) (((x) & 0x1) << 21) | ||
51 | #define DSI_HOST_DSI_CONTROL_CRC_RESET(x) (((x) & 0x1) << 20) | ||
52 | enum { | ||
53 | DSI_PHY_CLK_DIV1, | ||
54 | DSI_PHY_CLK_DIV2, | ||
55 | }; | ||
56 | #define DSI_HOST_DSI_CONTROL_PHY_CLK_DIV(x) (((x) & 0x7) << 16) | ||
57 | enum { | ||
58 | SOL, | ||
59 | FIFO_LEVEL, | ||
60 | IMMEDIATE, | ||
61 | }; | ||
62 | #define DSI_HOST_DSI_CONTROL_HOST_TX_TRIG_SRC(x) (((x) & 0x3) << 12) | ||
63 | enum { | ||
64 | NORMAL, | ||
65 | ENTER_ULPM, | ||
66 | EXIT_ULPM, | ||
67 | }; | ||
68 | #define DSI_HOST_DSI_CONTROL_ULTRA_LOW_POWER(x) (((x) & 0x3) << 8) | ||
69 | #define DSI_HOST_DSI_CONTROL_PERIPH_RESET(x) (((x) & 0x1) << 7) | ||
70 | #define DSI_HOST_DSI_CONTROL_RAW_DATA(x) (((x) & 0x1) << 6) | ||
71 | enum { | ||
72 | TEGRA_DSI_LOW, | ||
73 | TEGRA_DSI_HIGH, | ||
74 | }; | ||
75 | #define DSI_HOST_DSI_CONTROL_HIGH_SPEED_TRANS(x) (((x) & 0x1) << 5) | ||
76 | enum { | ||
77 | HOST_ONLY, | ||
78 | VIDEO_HOST, | ||
79 | }; | ||
80 | #define DSI_HOST_DSI_CONTROL_PKT_WR_FIFO_SEL(x) (((x) & 0x1) << 4) | ||
81 | #define DSI_HOST_DSI_CONTROL_IMM_BTA(x) (((x) & 0x1) << 3) | ||
82 | #define DSI_HOST_DSI_CONTROL_PKT_BTA(x) (((x) & 0x1) << 2) | ||
83 | #define DSI_HOST_DSI_CONTROL_CS_ENABLE(x) (((x) & 0x1) << 1) | ||
84 | #define DSI_HOST_DSI_CONTROL_ECC_ENABLE(x) (((x) & 0x1) << 0) | ||
85 | |||
86 | #define DSI_CONTROL 0x10 | ||
87 | #define DSI_CONTROL_DBG_ENABLE(x) (((x) & 0x1) << 31) | ||
88 | enum { | ||
89 | CONTINUOUS, | ||
90 | TX_ONLY, | ||
91 | }; | ||
92 | #define DSI_CONTROL_HS_CLK_CTRL(x) (((x) & 0x1) << 20) | ||
93 | #define DSI_CONTROL_VIRTUAL_CHANNEL(x) (((x) & 0x3) << 16) | ||
94 | #define DSI_CONTROL_DATA_FORMAT(x) (((x) & 0x3) << 12) | ||
95 | #define DSI_CONTROL_VID_TX_TRIG_SRC(x) (((x) & 0x3) << 8) | ||
96 | #define DSI_CONTROL_NUM_DATA_LANES(x) (((x) & 0x3) << 4) | ||
97 | #define DSI_CONTROL_VID_DCS_ENABLE(x) (((x) & 0x1) << 3) | ||
98 | #define DSI_CONTROL_VID_SOURCE(x) (((x) & 0x1) << 2) | ||
99 | #define DSI_CONTROL_VID_ENABLE(x) (((x) & 0x1) << 1) | ||
100 | #define DSI_CONTROL_HOST_ENABLE(x) (((x) & 0x1) << 0) | ||
101 | |||
102 | #define DSI_SOL_DELAY 0x11 | ||
103 | #define DSI_SOL_DELAY_SOL_DELAY(x) (((x) & 0xffff) << 0) | ||
104 | |||
105 | #define DSI_MAX_THRESHOLD 0x12 | ||
106 | #define DSI_MAX_THRESHOLD_MAX_THRESHOLD(x) (((x) & 0xffff) << 0) | ||
107 | |||
108 | #define DSI_TRIGGER 0x13 | ||
109 | #define DSI_TRIGGER_HOST_TRIGGER(x) (((x) & 0x1) << 1) | ||
110 | #define DSI_TRIGGER_VID_TRIGGER(x) (((x) & 0x1) << 0) | ||
111 | |||
112 | #define DSI_TX_CRC 0x14 | ||
113 | #define DSI_TX_CRC_TX_CRC(x) (((x) & 0xffffffff) << 0) | ||
114 | |||
115 | #define DSI_STATUS 0x15 | ||
116 | #define DSI_STATUS_IDLE(x) (((x) & 0x1) << 10) | ||
117 | #define DSI_STATUS_LB_UNDERFLOW(x) (((x) & 0x1) << 9) | ||
118 | #define DSI_STATUS_LB_OVERFLOW(x) (((x) & 0x1) << 8) | ||
119 | #define DSI_STATUS_RD_FIFO_COUNT(x) (((x) & 0x1f) << 0) | ||
120 | |||
121 | #define DSI_INIT_SEQ_CONTROL 0x1a | ||
122 | #define DSI_INIT_SEQ_CONTROL_DSI_FRAME_INIT_BYTE_COUNT(x) \ | ||
123 | (((x) & 0x3f) << 8) | ||
124 | #define DSI_INIT_SEQ_CONTROL_DSI_SEND_INIT_SEQUENCE(x) \ | ||
125 | (((x) & 0xff) << 0) | ||
126 | |||
127 | #define DSI_INIT_SEQ_DATA_0 0x1b | ||
128 | #define DSI_INIT_SEQ_DATA_1 0x1c | ||
129 | #define DSI_INIT_SEQ_DATA_2 0x1d | ||
130 | #define DSI_INIT_SEQ_DATA_3 0x1e | ||
131 | #define DSI_INIT_SEQ_DATA_4 0x1f | ||
132 | #define DSI_INIT_SEQ_DATA_5 0x20 | ||
133 | #define DSI_INIT_SEQ_DATA_6 0x21 | ||
134 | #define DSI_INIT_SEQ_DATA_7 0x22 | ||
135 | |||
136 | #define DSI_PKT_SEQ_0_LO 0x23 | ||
137 | #define DSI_PKT_SEQ_0_LO_SEQ_0_FORCE_LP(x) (((x) & 0x1) << 30) | ||
138 | #define DSI_PKT_SEQ_0_LO_PKT_02_EN(x) (((x) & 0x1) << 29) | ||
139 | #define DSI_PKT_SEQ_0_LO_PKT_02_ID(x) (((x) & 0x3f) << 23) | ||
140 | #define DSI_PKT_SEQ_0_LO_PKT_02_SIZE(x) (((x) & 0x7) << 20) | ||
141 | #define DSI_PKT_SEQ_0_LO_PKT_01_EN(x) (((x) & 0x1) << 19) | ||
142 | #define DSI_PKT_SEQ_0_LO_PKT_01_ID(x) (((x) & 0x3f) << 13) | ||
143 | #define DSI_PKT_SEQ_0_LO_PKT_01_SIZE(x) (((x) & 0x7) << 10) | ||
144 | #define DSI_PKT_SEQ_0_LO_PKT_00_EN(x) (((x) & 0x1) << 9) | ||
145 | #define DSI_PKT_SEQ_0_LO_PKT_00_ID(x) (((x) & 0x3f) << 3) | ||
146 | #define DSI_PKT_SEQ_0_LO_PKT_00_SIZE(x) (((x) & 0x7) << 0) | ||
147 | |||
148 | #define DSI_PKT_SEQ_0_HI 0x24 | ||
149 | #define DSI_PKT_SEQ_0_HI_PKT_05_EN(x) (((x) & 0x1) << 29) | ||
150 | #define DSI_PKT_SEQ_0_HI_PKT_05_ID(x) (((x) & 0x3f) << 23) | ||
151 | #define DSI_PKT_SEQ_0_HI_PKT_05_SIZE(x) (((x) & 0x7) << 20) | ||
152 | #define DSI_PKT_SEQ_0_HI_PKT_04_EN(x) (((x) & 0x1) << 19) | ||
153 | #define DSI_PKT_SEQ_0_HI_PKT_04_ID(x) (((x) & 0x3f) << 13) | ||
154 | #define DSI_PKT_SEQ_0_HI_PKT_04_SIZE(x) (((x) & 0x7) << 10) | ||
155 | #define DSI_PKT_SEQ_0_HI_PKT_03_EN(x) (((x) & 0x1) << 9) | ||
156 | #define DSI_PKT_SEQ_0_HI_PKT_03_ID(x) (((x) & 0x3f) << 3) | ||
157 | #define DSI_PKT_SEQ_0_HI_PKT_03_SIZE(x) (((x) & 0x7) << 0) | ||
158 | |||
159 | #define DSI_PKT_SEQ_1_LO 0x25 | ||
160 | #define DSI_PKT_SEQ_1_LO_SEQ_1_FORCE_LP(x) (((x) & 0x1) << 30) | ||
161 | #define DSI_PKT_SEQ_1_LO_PKT_12_EN(x) (((x) & 0x1) << 29) | ||
162 | #define DSI_PKT_SEQ_1_LO_PKT_12_ID(x) (((x) & 0x3f) << 23) | ||
163 | #define DSI_PKT_SEQ_1_LO_PKT_12_SIZE(x) (((x) & 0x7) << 20) | ||
164 | #define DSI_PKT_SEQ_1_LO_PKT_11_EN(x) (((x) & 0x1) << 19) | ||
165 | #define DSI_PKT_SEQ_1_LO_PKT_11_ID(x) (((x) & 0x3f) << 13) | ||
166 | #define DSI_PKT_SEQ_1_LO_PKT_11_SIZE(x) (((x) & 0x7) << 10) | ||
167 | #define DSI_PKT_SEQ_1_LO_PKT_10_EN(x) (((x) & 0x1) << 9) | ||
168 | #define DSI_PKT_SEQ_1_LO_PKT_10_ID(x) (((x) & 0x3f) << 3) | ||
169 | #define DSI_PKT_SEQ_1_LO_PKT_10_SIZE(x) (((x) & 0x7) << 0) | ||
170 | |||
171 | #define DSI_PKT_SEQ_1_HI 0x26 | ||
172 | #define DSI_PKT_SEQ_1_HI_PKT_15_EN(x) (((x) & 0x1) << 29) | ||
173 | #define DSI_PKT_SEQ_1_HI_PKT_15_ID(x) (((x) & 0x3f) << 23) | ||
174 | #define DSI_PKT_SEQ_1_HI_PKT_15_SIZE(x) (((x) & 0x7) << 20) | ||
175 | #define DSI_PKT_SEQ_1_HI_PKT_14_EN(x) (((x) & 0x1) << 19) | ||
176 | #define DSI_PKT_SEQ_1_HI_PKT_14_ID(x) (((x) & 0x3f) << 13) | ||
177 | #define DSI_PKT_SEQ_1_HI_PKT_14_SIZE(x) (((x) & 0x7) << 10) | ||
178 | #define DSI_PKT_SEQ_1_HI_PKT_13_EN(x) (((x) & 0x1) << 9) | ||
179 | #define DSI_PKT_SEQ_1_HI_PKT_13_ID(x) (((x) & 0x3f) << 3) | ||
180 | #define DSI_PKT_SEQ_1_HI_PKT_13_SIZE(x) (((x) & 0x7) << 0) | ||
181 | |||
182 | #define DSI_PKT_SEQ_2_LO 0x27 | ||
183 | #define DSI_PKT_SEQ_2_LO_SEQ_2_FORCE_LP(x) (((x) & 0x1) << 30) | ||
184 | #define DSI_PKT_SEQ_2_LO_PKT_22_EN(x) (((x) & 0x1) << 29) | ||
185 | #define DSI_PKT_SEQ_2_LO_PKT_22_ID(x) (((x) & 0x3f) << 23) | ||
186 | #define DSI_PKT_SEQ_2_LO_PKT_22_SIZE(x) (((x) & 0x7) << 20) | ||
187 | #define DSI_PKT_SEQ_2_LO_PKT_21_EN(x) (((x) & 0x1) << 19) | ||
188 | #define DSI_PKT_SEQ_2_LO_PKT_21_ID(x) (((x) & 0x3f) << 13) | ||
189 | #define DSI_PKT_SEQ_2_LO_PKT_21_SIZE(x) (((x) & 0x7) << 10) | ||
190 | #define DSI_PKT_SEQ_2_LO_PKT_20_EN(x) (((x) & 0x1) << 9) | ||
191 | #define DSI_PKT_SEQ_2_LO_PKT_20_ID(x) (((x) & 0x3f) << 3) | ||
192 | #define DSI_PKT_SEQ_2_LO_PKT_20_SIZE(x) (((x) & 0x7) << 0) | ||
193 | |||
194 | #define DSI_PKT_SEQ_2_HI 0x28 | ||
195 | #define DSI_PKT_SEQ_2_HI_PKT_25_EN(x) (((x) & 0x1) << 29) | ||
196 | #define DSI_PKT_SEQ_2_HI_PKT_25_ID(x) (((x) & 0x3f) << 23) | ||
197 | #define DSI_PKT_SEQ_2_HI_PKT_25_SIZE(x) (((x) & 0x7) << 20) | ||
198 | #define DSI_PKT_SEQ_2_HI_PKT_24_EN(x) (((x) & 0x1) << 19) | ||
199 | #define DSI_PKT_SEQ_2_HI_PKT_24_ID(x) (((x) & 0x3f) << 13) | ||
200 | #define DSI_PKT_SEQ_2_HI_PKT_24_SIZE(x) (((x) & 0x7) << 10) | ||
201 | #define DSI_PKT_SEQ_2_HI_PKT_23_EN(x) (((x) & 0x1) << 9) | ||
202 | #define DSI_PKT_SEQ_2_HI_PKT_23_ID(x) (((x) & 0x3f) << 3) | ||
203 | #define DSI_PKT_SEQ_2_HI_PKT_23_SIZE(x) (((x) & 0x7) << 0) | ||
204 | |||
205 | #define DSI_PKT_SEQ_3_LO 0x29 | ||
206 | #define DSI_PKT_SEQ_3_LO_SEQ_3_FORCE_LP(x) (((x) & 0x1) << 30) | ||
207 | #define DSI_PKT_SEQ_3_LO_PKT_32_EN(x) (((x) & 0x1) << 29) | ||
208 | #define DSI_PKT_SEQ_3_LO_PKT_32_ID(x) (((x) & 0x3f) << 23) | ||
209 | #define DSI_PKT_SEQ_3_LO_PKT_32_SIZE(x) (((x) & 0x7) << 20) | ||
210 | #define DSI_PKT_SEQ_3_LO_PKT_31_EN(x) (((x) & 0x1) << 19) | ||
211 | #define DSI_PKT_SEQ_3_LO_PKT_31_ID(x) (((x) & 0x3f) << 13) | ||
212 | #define DSI_PKT_SEQ_3_LO_PKT_31_SIZE(x) (((x) & 0x7) << 10) | ||
213 | #define DSI_PKT_SEQ_3_LO_PKT_30_EN(x) (((x) & 0x1) << 9) | ||
214 | #define DSI_PKT_SEQ_3_LO_PKT_30_ID(x) (((x) & 0x3f) << 3) | ||
215 | #define DSI_PKT_SEQ_3_LO_PKT_30_SIZE(x) (((x) & 0x7) << 0) | ||
216 | |||
217 | #define DSI_PKT_SEQ_3_HI 0x2a | ||
218 | #define DSI_PKT_SEQ_3_HI_PKT_35_EN(x) (((x) & 0x1) << 29) | ||
219 | #define DSI_PKT_SEQ_3_HI_PKT_35_ID(x) (((x) & 0x3f) << 23) | ||
220 | #define DSI_PKT_SEQ_3_HI_PKT_35_SIZE(x) (((x) & 0x7) << 20) | ||
221 | #define DSI_PKT_SEQ_3_HI_PKT_34_EN(x) (((x) & 0x1) << 19) | ||
222 | #define DSI_PKT_SEQ_3_HI_PKT_34_ID(x) (((x) & 0x3f) << 13) | ||
223 | #define DSI_PKT_SEQ_3_HI_PKT_34_SIZE(x) (((x) & 0x7) << 10) | ||
224 | #define DSI_PKT_SEQ_3_HI_PKT_33_EN(x) (((x) & 0x1) << 9) | ||
225 | #define DSI_PKT_SEQ_3_HI_PKT_33_ID(x) (((x) & 0x3f) << 3) | ||
226 | #define DSI_PKT_SEQ_3_HI_PKT_33_SIZE(x) (((x) & 0x7) << 0) | ||
227 | |||
228 | #define DSI_PKT_SEQ_4_LO 0x2b | ||
229 | #define DSI_PKT_SEQ_4_LO_SEQ_4_FORCE_LP(x) (((x) & 0x1) << 30) | ||
230 | #define DSI_PKT_SEQ_4_LO_PKT_42_EN(x) (((x) & 0x1) << 29) | ||
231 | #define DSI_PKT_SEQ_4_LO_PKT_42_ID(x) (((x) & 0x3f) << 23) | ||
232 | #define DSI_PKT_SEQ_4_LO_PKT_42_SIZE(x) (((x) & 0x7) << 20) | ||
233 | #define DSI_PKT_SEQ_4_LO_PKT_41_EN(x) (((x) & 0x1) << 19) | ||
234 | #define DSI_PKT_SEQ_4_LO_PKT_41_ID(x) (((x) & 0x3f) << 13) | ||
235 | #define DSI_PKT_SEQ_4_LO_PKT_41_SIZE(x) (((x) & 0x7) << 10) | ||
236 | #define DSI_PKT_SEQ_4_LO_PKT_40_EN(x) (((x) & 0x1) << 9) | ||
237 | #define DSI_PKT_SEQ_4_LO_PKT_40_ID(x) (((x) & 0x3f) << 3) | ||
238 | #define DSI_PKT_SEQ_4_LO_PKT_40_SIZE(x) (((x) & 0x7) << 0) | ||
239 | |||
240 | #define DSI_PKT_SEQ_4_HI 0x2c | ||
241 | #define DSI_PKT_SEQ_4_HI_PKT_45_EN(x) (((x) & 0x1) << 29) | ||
242 | #define DSI_PKT_SEQ_4_HI_PKT_45_ID(x) (((x) & 0x3f) << 23) | ||
243 | #define DSI_PKT_SEQ_4_HI_PKT_45_SIZE(x) (((x) & 0x7) << 20) | ||
244 | #define DSI_PKT_SEQ_4_HI_PKT_44_EN(x) (((x) & 0x1) << 19) | ||
245 | #define DSI_PKT_SEQ_4_HI_PKT_44_ID(x) (((x) & 0x3f) << 13) | ||
246 | #define DSI_PKT_SEQ_4_HI_PKT_44_SIZE(x) (((x) & 0x7) << 10) | ||
247 | #define DSI_PKT_SEQ_4_HI_PKT_43_EN(x) (((x) & 0x1) << 9) | ||
248 | #define DSI_PKT_SEQ_4_HI_PKT_43_ID(x) (((x) & 0x3f) << 3) | ||
249 | #define DSI_PKT_SEQ_4_HI_PKT_43_SIZE(x) (((x) & 0x7) << 0) | ||
250 | |||
251 | #define DSI_PKT_SEQ_5_LO 0x2d | ||
252 | #define DSI_PKT_SEQ_5_LO_SEQ_5_FORCE_LP(x) (((x) & 0x1) << 30) | ||
253 | #define DSI_PKT_SEQ_5_LO_PKT_52_EN(x) (((x) & 0x1) << 29) | ||
254 | #define DSI_PKT_SEQ_5_LO_PKT_52_ID(x) (((x) & 0x3f) << 23) | ||
255 | #define DSI_PKT_SEQ_5_LO_PKT_52_SIZE(x) (((x) & 0x7) << 20) | ||
256 | #define DSI_PKT_SEQ_5_LO_PKT_51_EN(x) (((x) & 0x1) << 19) | ||
257 | #define DSI_PKT_SEQ_5_LO_PKT_51_ID(x) (((x) & 0x3f) << 13) | ||
258 | #define DSI_PKT_SEQ_5_LO_PKT_51_SIZE(x) (((x) & 0x7) << 10) | ||
259 | #define DSI_PKT_SEQ_5_LO_PKT_50_EN(x) (((x) & 0x1) << 9) | ||
260 | #define DSI_PKT_SEQ_5_LO_PKT_50_ID(x) (((x) & 0x3f) << 3) | ||
261 | #define DSI_PKT_SEQ_5_LO_PKT_50_SIZE(x) (((x) & 0x7) << 0) | ||
262 | |||
263 | #define DSI_PKT_SEQ_5_HI 0x2e | ||
264 | #define DSI_PKT_SEQ_5_HI_PKT_55_EN(x) (((x) & 0x1) << 29) | ||
265 | #define DSI_PKT_SEQ_5_HI_PKT_55_ID(x) (((x) & 0x3f) << 23) | ||
266 | #define DSI_PKT_SEQ_5_HI_PKT_55_SIZE(x) (((x) & 0x7) << 20) | ||
267 | #define DSI_PKT_SEQ_5_HI_PKT_54_EN(x) (((x) & 0x1) << 19) | ||
268 | #define DSI_PKT_SEQ_5_HI_PKT_54_ID(x) (((x) & 0x3f) << 13) | ||
269 | #define DSI_PKT_SEQ_5_HI_PKT_54_SIZE(x) (((x) & 0x7) << 10) | ||
270 | #define DSI_PKT_SEQ_5_HI_PKT_53_EN(x) (((x) & 0x1) << 9) | ||
271 | #define DSI_PKT_SEQ_5_HI_PKT_53_ID(x) (((x) & 0x3f) << 3) | ||
272 | #define DSI_PKT_SEQ_5_HI_PKT_53_SIZE(x) (((x) & 0x7) << 0) | ||
273 | |||
274 | #define DSI_DCS_CMDS 0x33 | ||
275 | #define DSI_DCS_CMDS_LT5_DCS_CMD(x) (((x) & 0xff) << 8) | ||
276 | #define DSI_DCS_CMDS_LT3_DCS_CMD(x) (((x) & 0xff) << 0) | ||
277 | |||
278 | #define DSI_PKT_LEN_0_1 0x34 | ||
279 | #define DSI_PKT_LEN_0_1_LENGTH_1(x) (((x) & 0xffff) << 16) | ||
280 | #define DSI_PKT_LEN_0_1_LENGTH_0(x) (((x) & 0xffff) << 0) | ||
281 | |||
282 | #define DSI_PKT_LEN_2_3 0x35 | ||
283 | #define DSI_PKT_LEN_2_3_LENGTH_3(x) (((x) & 0xffff) << 16) | ||
284 | #define DSI_PKT_LEN_2_3_LENGTH_2(x) (((x) & 0xffff) << 0) | ||
285 | |||
286 | |||
287 | #define DSI_PKT_LEN_4_5 0x36 | ||
288 | #define DSI_PKT_LEN_4_5_LENGTH_5(x) (((x) & 0xffff) << 16) | ||
289 | #define DSI_PKT_LEN_4_5_LENGTH_4(x) (((x) & 0xffff) << 0) | ||
290 | |||
291 | #define DSI_PKT_LEN_6_7 0x37 | ||
292 | #define DSI_PKT_LEN_6_7_LENGTH_7(x) (((x) & 0xffff) << 16) | ||
293 | #define DSI_PKT_LEN_6_7_LENGTH_6(x) (((x) & 0xffff) << 0) | ||
294 | |||
295 | #define DSI_PHY_TIMING_0 0x3c | ||
296 | #define DSI_PHY_TIMING_0_THSDEXIT(x) (((x) & 0xff) << 24) | ||
297 | #define DSI_PHY_TIMING_0_THSTRAIL(x) (((x) & 0xff) << 16) | ||
298 | #define DSI_PHY_TIMING_0_TDATZERO(x) (((x) & 0xff) << 8) | ||
299 | #define DSI_PHY_TIMING_0_THSPREPR(x) (((x) & 0xff) << 0) | ||
300 | |||
301 | #define DSI_PHY_TIMING_1 0x3d | ||
302 | #define DSI_PHY_TIMING_1_TCLKTRAIL(x) (((x) & 0xff) << 24) | ||
303 | #define DSI_PHY_TIMING_1_TCLKPOST(x) (((x) & 0xff) << 16) | ||
304 | #define DSI_PHY_TIMING_1_TCLKZERO(x) (((x) & 0xff) << 8) | ||
305 | #define DSI_PHY_TIMING_1_TTLPX(x) (((x) & 0xff) << 0) | ||
306 | |||
307 | #define DSI_PHY_TIMING_2 0x3e | ||
308 | #define DSI_PHY_TIMING_2_TCLKPREPARE(x) (((x) & 0xff) << 16) | ||
309 | #define DSI_PHY_TIMING_2_TCLKPRE(x) (((x) & 0xff) << 8) | ||
310 | #define DSI_PHY_TIMING_2_TWAKEUP(x) (((x) & 0xff) << 0) | ||
311 | |||
312 | #define DSI_BTA_TIMING 0x3f | ||
313 | #define DSI_BTA_TIMING_TTAGET(x) (((x) & 0xff) << 16) | ||
314 | #define DSI_BTA_TIMING_TTASURE(x) (((x) & 0xff) << 8) | ||
315 | #define DSI_BTA_TIMING_TTAGO(x) (((x) & 0xff) << 0) | ||
316 | |||
317 | |||
318 | #define DSI_TIMEOUT_0 0x44 | ||
319 | #define DSI_TIMEOUT_0_LRXH_TO(x) (((x) & 0xffff) << 16) | ||
320 | #define DSI_TIMEOUT_0_HTX_TO(x) (((x) & 0xffff) << 0) | ||
321 | |||
322 | #define DSI_TIMEOUT_1 0x45 | ||
323 | #define DSI_TIMEOUT_1_PR_TO(x) (((x) & 0xffff) << 16) | ||
324 | #define DSI_TIMEOUT_1_TA_TO(x) (((x) & 0xffff) << 0) | ||
325 | |||
326 | #define DSI_TO_TALLY 0x46 | ||
327 | enum { | ||
328 | IN_RESET, | ||
329 | READY, | ||
330 | }; | ||
331 | #define DSI_TO_TALLY_P_RESET_STATUS(x) (((x) & 0x1) << 24) | ||
332 | #define DSI_TO_TALLY_TA_TALLY(x) (((x) & 0xff) << 16) | ||
333 | #define DSI_TO_TALLY_LRXH_TALLY(x) (((x) & 0xff) << 8) | ||
334 | #define DSI_TO_TALLY_HTX_TALLY(x) (((x) & 0xff) << 0) | ||
335 | |||
336 | #define DSI_PAD_CONTROL 0x4b | ||
337 | #define DSI_PAD_CONTROL_PAD_PULLDN_ENAB(x) (((x) & 0x1) << 28) | ||
338 | #define DSI_PAD_CONTROL_PAD_SLEWUPADJ(x) (((x) & 0x7) << 24) | ||
339 | #define DSI_PAD_CONTROL_PAD_SLEWDNADJ(x) (((x) & 0x7) << 20) | ||
340 | #define DSI_PAD_CONTROL_PAD_PREEMP_EN(x) (((x) & 0x1) << 19) | ||
341 | #define DSI_PAD_CONTROL_PAD_PDIO_CLK(x) (((x) & 0x1) << 18) | ||
342 | #define DSI_PAD_CONTROL_PAD_PDIO(x) (((x) & 0x3) << 16) | ||
343 | #define DSI_PAD_CONTROL_PAD_LPUPADJ(x) (((x) & 0x3) << 14) | ||
344 | #define DSI_PAD_CONTROL_PAD_LPDNADJ(x) (((x) & 0x3) << 12) | ||
345 | |||
346 | #define DSI_PAD_CONTROL_CD 0x4c | ||
347 | #define DSI_PAD_CD_STATUS 0x4d | ||
348 | #define DSI_VID_MODE_CONTROL 0x4e | ||
349 | |||
350 | #endif | ||
351 | |||
diff --git a/drivers/video/tegra/dc/edid.c b/drivers/video/tegra/dc/edid.c new file mode 100644 index 00000000000..fbcf2cc8e37 --- /dev/null +++ b/drivers/video/tegra/dc/edid.c | |||
@@ -0,0 +1,619 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/edid.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * Copyright (C) 2010-2011 NVIDIA Corporation | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | |||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/fb.h> | ||
23 | #include <linux/i2c.h> | ||
24 | #include <linux/seq_file.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | |||
27 | #include "edid.h" | ||
28 | |||
29 | struct tegra_edid_pvt { | ||
30 | struct kref refcnt; | ||
31 | struct tegra_edid_hdmi_eld eld; | ||
32 | bool support_stereo; | ||
33 | bool support_underscan; | ||
34 | /* Note: dc_edid must remain the last member */ | ||
35 | struct tegra_dc_edid dc_edid; | ||
36 | }; | ||
37 | |||
38 | struct tegra_edid { | ||
39 | struct i2c_client *client; | ||
40 | struct i2c_board_info info; | ||
41 | int bus; | ||
42 | |||
43 | struct tegra_edid_pvt *data; | ||
44 | |||
45 | struct mutex lock; | ||
46 | }; | ||
47 | |||
48 | #if defined(DEBUG) || defined(CONFIG_DEBUG_FS) | ||
49 | static int tegra_edid_show(struct seq_file *s, void *unused) | ||
50 | { | ||
51 | struct tegra_edid *edid = s->private; | ||
52 | struct tegra_dc_edid *data; | ||
53 | u8 *buf; | ||
54 | int i; | ||
55 | |||
56 | data = tegra_edid_get_data(edid); | ||
57 | if (!data) { | ||
58 | seq_printf(s, "No EDID\n"); | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | buf = data->buf; | ||
63 | |||
64 | for (i = 0; i < data->len; i++) { | ||
65 | if (i % 16 == 0) | ||
66 | seq_printf(s, "edid[%03x] =", i); | ||
67 | |||
68 | seq_printf(s, " %02x", buf[i]); | ||
69 | |||
70 | if (i % 16 == 15) | ||
71 | seq_printf(s, "\n"); | ||
72 | } | ||
73 | |||
74 | tegra_edid_put_data(data); | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | #endif | ||
79 | |||
80 | #ifdef CONFIG_DEBUG_FS | ||
81 | static int tegra_edid_debug_open(struct inode *inode, struct file *file) | ||
82 | { | ||
83 | return single_open(file, tegra_edid_show, inode->i_private); | ||
84 | } | ||
85 | |||
86 | static const struct file_operations tegra_edid_debug_fops = { | ||
87 | .open = tegra_edid_debug_open, | ||
88 | .read = seq_read, | ||
89 | .llseek = seq_lseek, | ||
90 | .release = single_release, | ||
91 | }; | ||
92 | |||
93 | void tegra_edid_debug_add(struct tegra_edid *edid) | ||
94 | { | ||
95 | char name[] = "edidX"; | ||
96 | |||
97 | snprintf(name, sizeof(name), "edid%1d", edid->bus); | ||
98 | debugfs_create_file(name, S_IRUGO, NULL, edid, &tegra_edid_debug_fops); | ||
99 | } | ||
100 | #else | ||
101 | void tegra_edid_debug_add(struct tegra_edid *edid) | ||
102 | { | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | #ifdef DEBUG | ||
107 | static char tegra_edid_dump_buff[16 * 1024]; | ||
108 | |||
109 | static void tegra_edid_dump(struct tegra_edid *edid) | ||
110 | { | ||
111 | struct seq_file s; | ||
112 | int i; | ||
113 | char c; | ||
114 | |||
115 | memset(&s, 0x0, sizeof(s)); | ||
116 | |||
117 | s.buf = tegra_edid_dump_buff; | ||
118 | s.size = sizeof(tegra_edid_dump_buff); | ||
119 | s.private = edid; | ||
120 | |||
121 | tegra_edid_show(&s, NULL); | ||
122 | |||
123 | i = 0; | ||
124 | while (i < s.count ) { | ||
125 | if ((s.count - i) > 256) { | ||
126 | c = s.buf[i + 256]; | ||
127 | s.buf[i + 256] = 0; | ||
128 | printk("%s", s.buf + i); | ||
129 | s.buf[i + 256] = c; | ||
130 | } else { | ||
131 | printk("%s", s.buf + i); | ||
132 | } | ||
133 | i += 256; | ||
134 | } | ||
135 | } | ||
136 | #else | ||
137 | static void tegra_edid_dump(struct tegra_edid *edid) | ||
138 | { | ||
139 | } | ||
140 | #endif | ||
141 | |||
142 | int tegra_edid_read_block(struct tegra_edid *edid, int block, u8 *data) | ||
143 | { | ||
144 | u8 block_buf[] = {block >> 1}; | ||
145 | u8 cmd_buf[] = {(block & 0x1) * 128}; | ||
146 | int status; | ||
147 | struct i2c_msg msg[] = { | ||
148 | { | ||
149 | .addr = 0x30, | ||
150 | .flags = 0, | ||
151 | .len = 1, | ||
152 | .buf = block_buf, | ||
153 | }, | ||
154 | { | ||
155 | .addr = 0x50, | ||
156 | .flags = 0, | ||
157 | .len = 1, | ||
158 | .buf = cmd_buf, | ||
159 | }, | ||
160 | { | ||
161 | .addr = 0x50, | ||
162 | .flags = I2C_M_RD, | ||
163 | .len = 128, | ||
164 | .buf = data, | ||
165 | }}; | ||
166 | struct i2c_msg *m; | ||
167 | int msg_len; | ||
168 | |||
169 | if (block > 1) { | ||
170 | msg_len = 3; | ||
171 | m = msg; | ||
172 | } else { | ||
173 | msg_len = 2; | ||
174 | m = &msg[1]; | ||
175 | } | ||
176 | |||
177 | status = i2c_transfer(edid->client->adapter, m, msg_len); | ||
178 | |||
179 | if (status < 0) | ||
180 | return status; | ||
181 | |||
182 | if (status != msg_len) | ||
183 | return -EIO; | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | int tegra_edid_parse_ext_block(const u8 *raw, int idx, | ||
189 | struct tegra_edid_pvt *edid) | ||
190 | { | ||
191 | const u8 *ptr; | ||
192 | u8 tmp; | ||
193 | u8 code; | ||
194 | int len; | ||
195 | int i; | ||
196 | bool basic_audio = false; | ||
197 | |||
198 | ptr = &raw[0]; | ||
199 | |||
200 | /* If CEA 861 block get info for eld struct */ | ||
201 | if (edid && ptr) { | ||
202 | if (*ptr <= 3) | ||
203 | edid->eld.eld_ver = 0x02; | ||
204 | edid->eld.cea_edid_ver = ptr[1]; | ||
205 | |||
206 | /* check for basic audio support in CEA 861 block */ | ||
207 | if(raw[3] & (1<<6)) { | ||
208 | /* For basic audio, set spk_alloc to Left+Right. | ||
209 | * If there is a Speaker Alloc block this will | ||
210 | * get over written with that value */ | ||
211 | basic_audio = true; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | if (raw[3] & 0x80) | ||
216 | edid->support_underscan = 1; | ||
217 | else | ||
218 | edid->support_underscan = 0; | ||
219 | |||
220 | ptr = &raw[4]; | ||
221 | |||
222 | while (ptr < &raw[idx]) { | ||
223 | tmp = *ptr; | ||
224 | len = tmp & 0x1f; | ||
225 | |||
226 | /* HDMI Specification v1.4a, section 8.3.2: | ||
227 | * see Table 8-16 for HDMI VSDB format. | ||
228 | * data blocks have tags in top 3 bits: | ||
229 | * tag code 2: video data block | ||
230 | * tag code 3: vendor specific data block | ||
231 | */ | ||
232 | code = (tmp >> 5) & 0x7; | ||
233 | switch (code) { | ||
234 | case 1: | ||
235 | { | ||
236 | edid->eld.sad_count = len; | ||
237 | edid->eld.conn_type = 0x00; | ||
238 | edid->eld.support_hdcp = 0x00; | ||
239 | for (i = 0; (i < len) && (i < ELD_MAX_SAD); i ++) | ||
240 | edid->eld.sad[i] = ptr[i + 1]; | ||
241 | len++; | ||
242 | ptr += len; /* adding the header */ | ||
243 | /* Got an audio data block so enable audio */ | ||
244 | if(basic_audio == true) | ||
245 | edid->eld.spk_alloc = 1; | ||
246 | break; | ||
247 | } | ||
248 | /* case 2 is commented out for now */ | ||
249 | case 3: | ||
250 | { | ||
251 | int j = 0; | ||
252 | |||
253 | if ((ptr[1] == 0x03) && | ||
254 | (ptr[2] == 0x0c) && | ||
255 | (ptr[3] == 0)) { | ||
256 | edid->eld.port_id[0] = ptr[4]; | ||
257 | edid->eld.port_id[1] = ptr[5]; | ||
258 | } | ||
259 | if ((len >= 8) && | ||
260 | (ptr[1] == 0x03) && | ||
261 | (ptr[2] == 0x0c) && | ||
262 | (ptr[3] == 0)) { | ||
263 | j = 8; | ||
264 | tmp = ptr[j++]; | ||
265 | /* HDMI_Video_present? */ | ||
266 | if (tmp & 0x20) { | ||
267 | /* Latency_Fields_present? */ | ||
268 | if (tmp & 0x80) | ||
269 | j += 2; | ||
270 | /* I_Latency_Fields_present? */ | ||
271 | if (tmp & 0x40) | ||
272 | j += 2; | ||
273 | /* 3D_present? */ | ||
274 | if (j <= len && (ptr[j] & 0x80)) | ||
275 | edid->support_stereo = 1; | ||
276 | } | ||
277 | } | ||
278 | if ((len > 5) && | ||
279 | (ptr[1] == 0x03) && | ||
280 | (ptr[2] == 0x0c) && | ||
281 | (ptr[3] == 0)) { | ||
282 | |||
283 | edid->eld.support_ai = (ptr[6] & 0x80); | ||
284 | } | ||
285 | |||
286 | if ((len > 9) && | ||
287 | (ptr[1] == 0x03) && | ||
288 | (ptr[2] == 0x0c) && | ||
289 | (ptr[3] == 0)) { | ||
290 | |||
291 | edid->eld.aud_synch_delay = ptr[10]; | ||
292 | } | ||
293 | len++; | ||
294 | ptr += len; /* adding the header */ | ||
295 | break; | ||
296 | } | ||
297 | case 4: | ||
298 | { | ||
299 | edid->eld.spk_alloc = ptr[1]; | ||
300 | len++; | ||
301 | ptr += len; /* adding the header */ | ||
302 | break; | ||
303 | } | ||
304 | default: | ||
305 | len++; /* len does not include header */ | ||
306 | ptr += len; | ||
307 | break; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | int tegra_edid_mode_support_stereo(struct fb_videomode *mode) | ||
315 | { | ||
316 | if (!mode) | ||
317 | return 0; | ||
318 | |||
319 | if (mode->xres == 1280 && | ||
320 | mode->yres == 720 && | ||
321 | ((mode->refresh == 60) || (mode->refresh == 50))) | ||
322 | return 1; | ||
323 | |||
324 | /* Disabling 1080p stereo mode due to bug 869099. */ | ||
325 | /* Must re-enable this to 1 once it is fixed. */ | ||
326 | if (mode->xres == 1920 && mode->yres == 1080 && mode->refresh == 24) | ||
327 | return 0; | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static void data_release(struct kref *ref) | ||
333 | { | ||
334 | struct tegra_edid_pvt *data = | ||
335 | container_of(ref, struct tegra_edid_pvt, refcnt); | ||
336 | vfree(data); | ||
337 | } | ||
338 | |||
339 | int tegra_edid_get_monspecs_test(struct tegra_edid *edid, | ||
340 | struct fb_monspecs *specs, unsigned char *edid_ptr) | ||
341 | { | ||
342 | int i, j, ret; | ||
343 | int extension_blocks; | ||
344 | struct tegra_edid_pvt *new_data, *old_data; | ||
345 | u8 *data; | ||
346 | |||
347 | new_data = vmalloc(SZ_32K + sizeof(struct tegra_edid_pvt)); | ||
348 | if (!new_data) | ||
349 | return -ENOMEM; | ||
350 | |||
351 | kref_init(&new_data->refcnt); | ||
352 | |||
353 | new_data->support_stereo = 0; | ||
354 | new_data->support_underscan = 0; | ||
355 | |||
356 | data = new_data->dc_edid.buf; | ||
357 | memcpy(data, edid_ptr, 128); | ||
358 | |||
359 | memset(specs, 0x0, sizeof(struct fb_monspecs)); | ||
360 | memset(&new_data->eld, 0x0, sizeof(new_data->eld)); | ||
361 | fb_edid_to_monspecs(data, specs); | ||
362 | if (specs->modedb == NULL) { | ||
363 | ret = -EINVAL; | ||
364 | goto fail; | ||
365 | } | ||
366 | |||
367 | memcpy(new_data->eld.monitor_name, specs->monitor, | ||
368 | sizeof(specs->monitor)); | ||
369 | |||
370 | new_data->eld.mnl = strlen(new_data->eld.monitor_name) + 1; | ||
371 | new_data->eld.product_id[0] = data[0x8]; | ||
372 | new_data->eld.product_id[1] = data[0x9]; | ||
373 | new_data->eld.manufacture_id[0] = data[0xA]; | ||
374 | new_data->eld.manufacture_id[1] = data[0xB]; | ||
375 | |||
376 | extension_blocks = data[0x7e]; | ||
377 | for (i = 1; i <= extension_blocks; i++) { | ||
378 | memcpy(data+128, edid_ptr+128, 128); | ||
379 | |||
380 | if (data[i * 128] == 0x2) { | ||
381 | fb_edid_add_monspecs(data + i * 128, specs); | ||
382 | |||
383 | tegra_edid_parse_ext_block(data + i * 128, | ||
384 | data[i * 128 + 2], new_data); | ||
385 | |||
386 | if (new_data->support_stereo) { | ||
387 | for (j = 0; j < specs->modedb_len; j++) { | ||
388 | if (tegra_edid_mode_support_stereo( | ||
389 | &specs->modedb[j])) | ||
390 | specs->modedb[j].vmode |= | ||
391 | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
392 | FB_VMODE_STEREO_FRAME_PACK; | ||
393 | #else | ||
394 | FB_VMODE_STEREO_LEFT_RIGHT; | ||
395 | #endif | ||
396 | } | ||
397 | } | ||
398 | } | ||
399 | } | ||
400 | |||
401 | new_data->dc_edid.len = i * 128; | ||
402 | |||
403 | mutex_lock(&edid->lock); | ||
404 | old_data = edid->data; | ||
405 | edid->data = new_data; | ||
406 | mutex_unlock(&edid->lock); | ||
407 | |||
408 | if (old_data) | ||
409 | kref_put(&old_data->refcnt, data_release); | ||
410 | |||
411 | tegra_edid_dump(edid); | ||
412 | return 0; | ||
413 | fail: | ||
414 | vfree(new_data); | ||
415 | return ret; | ||
416 | } | ||
417 | |||
418 | int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs) | ||
419 | { | ||
420 | int i; | ||
421 | int j; | ||
422 | int ret; | ||
423 | int extension_blocks; | ||
424 | struct tegra_edid_pvt *new_data, *old_data; | ||
425 | u8 *data; | ||
426 | |||
427 | new_data = vmalloc(SZ_32K + sizeof(struct tegra_edid_pvt)); | ||
428 | if (!new_data) | ||
429 | return -ENOMEM; | ||
430 | |||
431 | kref_init(&new_data->refcnt); | ||
432 | |||
433 | new_data->support_stereo = 0; | ||
434 | |||
435 | data = new_data->dc_edid.buf; | ||
436 | |||
437 | ret = tegra_edid_read_block(edid, 0, data); | ||
438 | if (ret) | ||
439 | goto fail; | ||
440 | |||
441 | memset(specs, 0x0, sizeof(struct fb_monspecs)); | ||
442 | memset(&new_data->eld, 0x0, sizeof(new_data->eld)); | ||
443 | fb_edid_to_monspecs(data, specs); | ||
444 | if (specs->modedb == NULL) { | ||
445 | ret = -EINVAL; | ||
446 | goto fail; | ||
447 | } | ||
448 | memcpy(new_data->eld.monitor_name, specs->monitor, sizeof(specs->monitor)); | ||
449 | new_data->eld.mnl = strlen(new_data->eld.monitor_name) + 1; | ||
450 | new_data->eld.product_id[0] = data[0x8]; | ||
451 | new_data->eld.product_id[1] = data[0x9]; | ||
452 | new_data->eld.manufacture_id[0] = data[0xA]; | ||
453 | new_data->eld.manufacture_id[1] = data[0xB]; | ||
454 | |||
455 | extension_blocks = data[0x7e]; | ||
456 | |||
457 | for (i = 1; i <= extension_blocks; i++) { | ||
458 | ret = tegra_edid_read_block(edid, i, data + i * 128); | ||
459 | if (ret < 0) | ||
460 | break; | ||
461 | |||
462 | if (data[i * 128] == 0x2) { | ||
463 | fb_edid_add_monspecs(data + i * 128, specs); | ||
464 | |||
465 | tegra_edid_parse_ext_block(data + i * 128, | ||
466 | data[i * 128 + 2], new_data); | ||
467 | |||
468 | if (new_data->support_stereo) { | ||
469 | for (j = 0; j < specs->modedb_len; j++) { | ||
470 | if (tegra_edid_mode_support_stereo( | ||
471 | &specs->modedb[j])) | ||
472 | specs->modedb[j].vmode |= | ||
473 | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
474 | FB_VMODE_STEREO_FRAME_PACK; | ||
475 | #else | ||
476 | FB_VMODE_STEREO_LEFT_RIGHT; | ||
477 | #endif | ||
478 | } | ||
479 | } | ||
480 | } | ||
481 | } | ||
482 | |||
483 | new_data->dc_edid.len = i * 128; | ||
484 | |||
485 | mutex_lock(&edid->lock); | ||
486 | old_data = edid->data; | ||
487 | edid->data = new_data; | ||
488 | mutex_unlock(&edid->lock); | ||
489 | |||
490 | if (old_data) | ||
491 | kref_put(&old_data->refcnt, data_release); | ||
492 | |||
493 | tegra_edid_dump(edid); | ||
494 | return 0; | ||
495 | |||
496 | fail: | ||
497 | vfree(new_data); | ||
498 | return ret; | ||
499 | } | ||
500 | |||
501 | int tegra_edid_underscan_supported(struct tegra_edid *edid) | ||
502 | { | ||
503 | if ((!edid) || (!edid->data)) | ||
504 | return 0; | ||
505 | |||
506 | return edid->data->support_underscan; | ||
507 | } | ||
508 | |||
509 | int tegra_edid_get_eld(struct tegra_edid *edid, struct tegra_edid_hdmi_eld *elddata) | ||
510 | { | ||
511 | if (!elddata || !edid->data) | ||
512 | return -EFAULT; | ||
513 | |||
514 | memcpy(elddata,&edid->data->eld,sizeof(struct tegra_edid_hdmi_eld)); | ||
515 | |||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | struct tegra_edid *tegra_edid_create(int bus) | ||
520 | { | ||
521 | struct tegra_edid *edid; | ||
522 | struct i2c_adapter *adapter; | ||
523 | int err; | ||
524 | |||
525 | edid = kzalloc(sizeof(struct tegra_edid), GFP_KERNEL); | ||
526 | if (!edid) | ||
527 | return ERR_PTR(-ENOMEM); | ||
528 | |||
529 | mutex_init(&edid->lock); | ||
530 | strlcpy(edid->info.type, "tegra_edid", sizeof(edid->info.type)); | ||
531 | edid->bus = bus; | ||
532 | edid->info.addr = 0x50; | ||
533 | edid->info.platform_data = edid; | ||
534 | |||
535 | adapter = i2c_get_adapter(bus); | ||
536 | if (!adapter) { | ||
537 | pr_err("can't get adpater for bus %d\n", bus); | ||
538 | err = -EBUSY; | ||
539 | goto free_edid; | ||
540 | } | ||
541 | |||
542 | edid->client = i2c_new_device(adapter, &edid->info); | ||
543 | i2c_put_adapter(adapter); | ||
544 | |||
545 | if (!edid->client) { | ||
546 | pr_err("can't create new device\n"); | ||
547 | err = -EBUSY; | ||
548 | goto free_edid; | ||
549 | } | ||
550 | |||
551 | tegra_edid_debug_add(edid); | ||
552 | |||
553 | return edid; | ||
554 | |||
555 | free_edid: | ||
556 | kfree(edid); | ||
557 | |||
558 | return ERR_PTR(err); | ||
559 | } | ||
560 | |||
561 | void tegra_edid_destroy(struct tegra_edid *edid) | ||
562 | { | ||
563 | i2c_release_client(edid->client); | ||
564 | if (edid->data) | ||
565 | kref_put(&edid->data->refcnt, data_release); | ||
566 | kfree(edid); | ||
567 | } | ||
568 | |||
569 | struct tegra_dc_edid *tegra_edid_get_data(struct tegra_edid *edid) | ||
570 | { | ||
571 | struct tegra_edid_pvt *data; | ||
572 | |||
573 | mutex_lock(&edid->lock); | ||
574 | data = edid->data; | ||
575 | if (data) | ||
576 | kref_get(&data->refcnt); | ||
577 | mutex_unlock(&edid->lock); | ||
578 | |||
579 | return data ? &data->dc_edid : NULL; | ||
580 | } | ||
581 | |||
582 | void tegra_edid_put_data(struct tegra_dc_edid *data) | ||
583 | { | ||
584 | struct tegra_edid_pvt *pvt; | ||
585 | |||
586 | if (!data) | ||
587 | return; | ||
588 | |||
589 | pvt = container_of(data, struct tegra_edid_pvt, dc_edid); | ||
590 | |||
591 | kref_put(&pvt->refcnt, data_release); | ||
592 | } | ||
593 | |||
594 | static const struct i2c_device_id tegra_edid_id[] = { | ||
595 | { "tegra_edid", 0 }, | ||
596 | { } | ||
597 | }; | ||
598 | |||
599 | MODULE_DEVICE_TABLE(i2c, tegra_edid_id); | ||
600 | |||
601 | static struct i2c_driver tegra_edid_driver = { | ||
602 | .id_table = tegra_edid_id, | ||
603 | .driver = { | ||
604 | .name = "tegra_edid", | ||
605 | }, | ||
606 | }; | ||
607 | |||
608 | static int __init tegra_edid_init(void) | ||
609 | { | ||
610 | return i2c_add_driver(&tegra_edid_driver); | ||
611 | } | ||
612 | |||
613 | static void __exit tegra_edid_exit(void) | ||
614 | { | ||
615 | i2c_del_driver(&tegra_edid_driver); | ||
616 | } | ||
617 | |||
618 | module_init(tegra_edid_init); | ||
619 | module_exit(tegra_edid_exit); | ||
diff --git a/drivers/video/tegra/dc/edid.h b/drivers/video/tegra/dc/edid.h new file mode 100644 index 00000000000..77db36f4adb --- /dev/null +++ b/drivers/video/tegra/dc/edid.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/edid.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_EDID_H | ||
19 | #define __DRIVERS_VIDEO_TEGRA_DC_EDID_H | ||
20 | |||
21 | #include <linux/i2c.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <mach/dc.h> | ||
24 | |||
25 | #define ELD_MAX_MNL 16 | ||
26 | #define ELD_MAX_SAD 16 | ||
27 | struct tegra_edid; | ||
28 | |||
29 | /* | ||
30 | * ELD: EDID Like Data | ||
31 | */ | ||
32 | struct tegra_edid_hdmi_eld { | ||
33 | u8 baseline_len; | ||
34 | u8 eld_ver; | ||
35 | u8 cea_edid_ver; | ||
36 | char monitor_name[ELD_MAX_MNL + 1]; | ||
37 | u8 mnl; | ||
38 | u8 manufacture_id[2]; | ||
39 | u8 product_id[2]; | ||
40 | u8 port_id[8]; | ||
41 | u8 support_hdcp; | ||
42 | u8 support_ai; | ||
43 | u8 conn_type; | ||
44 | u8 aud_synch_delay; | ||
45 | u8 spk_alloc; | ||
46 | u8 sad_count; | ||
47 | u8 sad[ELD_MAX_SAD]; | ||
48 | }; | ||
49 | |||
50 | struct tegra_edid *tegra_edid_create(int bus); | ||
51 | void tegra_edid_destroy(struct tegra_edid *edid); | ||
52 | |||
53 | int tegra_edid_get_monspecs_test(struct tegra_edid *edid, | ||
54 | struct fb_monspecs *specs, u8 *edid_ptr); | ||
55 | int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs); | ||
56 | int tegra_edid_get_eld(struct tegra_edid *edid, struct tegra_edid_hdmi_eld *elddata); | ||
57 | |||
58 | struct tegra_dc_edid *tegra_edid_get_data(struct tegra_edid *edid); | ||
59 | void tegra_edid_put_data(struct tegra_dc_edid *data); | ||
60 | |||
61 | int tegra_edid_underscan_supported(struct tegra_edid *edid); | ||
62 | #endif | ||
diff --git a/drivers/video/tegra/dc/ext/Makefile b/drivers/video/tegra/dc/ext/Makefile new file mode 100644 index 00000000000..19860ab5db1 --- /dev/null +++ b/drivers/video/tegra/dc/ext/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-y += dev.o | ||
2 | obj-y += util.o | ||
3 | obj-y += cursor.o | ||
4 | obj-y += events.o | ||
5 | obj-y += control.o | ||
diff --git a/drivers/video/tegra/dc/ext/control.c b/drivers/video/tegra/dc/ext/control.c new file mode 100644 index 00000000000..9caf3e11c16 --- /dev/null +++ b/drivers/video/tegra/dc/ext/control.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/ext/control.c | ||
3 | * | ||
4 | * Copyright (C) 2011, NVIDIA Corporation | ||
5 | * | ||
6 | * Author: Robert Morell <rmorell@nvidia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/device.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/file.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include "tegra_dc_ext_priv.h" | ||
28 | |||
29 | static struct tegra_dc_ext_control g_control; | ||
30 | |||
31 | int tegra_dc_ext_process_hotplug(int output) | ||
32 | { | ||
33 | return tegra_dc_ext_queue_hotplug(&g_control, output); | ||
34 | } | ||
35 | |||
36 | static int | ||
37 | get_output_properties(struct tegra_dc_ext_control_output_properties *properties) | ||
38 | { | ||
39 | struct tegra_dc *dc; | ||
40 | |||
41 | /* TODO: this should be more dynamic */ | ||
42 | if (properties->handle > 2) | ||
43 | return -EINVAL; | ||
44 | |||
45 | switch (properties->handle) { | ||
46 | case 0: | ||
47 | properties->type = TEGRA_DC_EXT_LVDS; | ||
48 | break; | ||
49 | case 1: | ||
50 | properties->type = TEGRA_DC_EXT_HDMI; | ||
51 | break; | ||
52 | default: | ||
53 | return -EINVAL; | ||
54 | } | ||
55 | |||
56 | properties->associated_head = properties->handle; | ||
57 | properties->head_mask = (1 << properties->associated_head); | ||
58 | |||
59 | dc = tegra_dc_get_dc(properties->associated_head); | ||
60 | properties->connected = tegra_dc_get_connected(dc); | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int get_output_edid(struct tegra_dc_ext_control_output_edid *edid) | ||
66 | { | ||
67 | struct tegra_dc *dc; | ||
68 | size_t user_size = edid->size; | ||
69 | struct tegra_dc_edid *dc_edid = NULL; | ||
70 | int ret; | ||
71 | |||
72 | /* TODO: this should be more dynamic */ | ||
73 | if (edid->handle > 2) | ||
74 | return -EINVAL; | ||
75 | |||
76 | dc = tegra_dc_get_dc(edid->handle); | ||
77 | |||
78 | dc_edid = tegra_dc_get_edid(dc); | ||
79 | if (IS_ERR(dc_edid)) | ||
80 | return PTR_ERR(dc_edid); | ||
81 | |||
82 | if (!dc_edid) { | ||
83 | edid->size = 0; | ||
84 | } else { | ||
85 | edid->size = dc_edid->len; | ||
86 | |||
87 | if (user_size < edid->size) { | ||
88 | ret = -EFBIG; | ||
89 | goto done; | ||
90 | } | ||
91 | |||
92 | if (copy_to_user(edid->data, dc_edid->buf, edid->size)) { | ||
93 | ret = -EFAULT; | ||
94 | goto done; | ||
95 | } | ||
96 | |||
97 | } | ||
98 | |||
99 | done: | ||
100 | if (dc_edid) | ||
101 | tegra_dc_put_edid(dc_edid); | ||
102 | |||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | static int set_event_mask(struct tegra_dc_ext_control_user *user, u32 mask) | ||
107 | { | ||
108 | struct list_head *list, *tmp; | ||
109 | |||
110 | if (mask & ~TEGRA_DC_EXT_EVENT_MASK_ALL) | ||
111 | return -EINVAL; | ||
112 | |||
113 | mutex_lock(&user->lock); | ||
114 | |||
115 | user->event_mask = mask; | ||
116 | |||
117 | list_for_each_safe(list, tmp, &user->event_list) { | ||
118 | struct tegra_dc_ext_event_list *ev_list; | ||
119 | ev_list = list_entry(list, struct tegra_dc_ext_event_list, | ||
120 | list); | ||
121 | if (!(mask & ev_list->event.type)) { | ||
122 | list_del(list); | ||
123 | kfree(ev_list); | ||
124 | } | ||
125 | } | ||
126 | mutex_unlock(&user->lock); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int get_capabilities(struct tegra_dc_ext_control_capabilities *caps) | ||
132 | { | ||
133 | caps->caps = TEGRA_DC_EXT_CAPABILITIES; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static long tegra_dc_ext_control_ioctl(struct file *filp, unsigned int cmd, | ||
138 | unsigned long arg) | ||
139 | { | ||
140 | void __user *user_arg = (void __user *)arg; | ||
141 | struct tegra_dc_ext_control_user *user = filp->private_data; | ||
142 | |||
143 | switch (cmd) { | ||
144 | case TEGRA_DC_EXT_CONTROL_GET_NUM_OUTPUTS: | ||
145 | { | ||
146 | u32 num = tegra_dc_ext_get_num_outputs(); | ||
147 | |||
148 | if (copy_to_user(user_arg, &num, sizeof(num))) | ||
149 | return -EFAULT; | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | case TEGRA_DC_EXT_CONTROL_GET_OUTPUT_PROPERTIES: | ||
154 | { | ||
155 | struct tegra_dc_ext_control_output_properties args; | ||
156 | int ret; | ||
157 | |||
158 | if (copy_from_user(&args, user_arg, sizeof(args))) | ||
159 | return -EFAULT; | ||
160 | |||
161 | ret = get_output_properties(&args); | ||
162 | |||
163 | if (copy_to_user(user_arg, &args, sizeof(args))) | ||
164 | return -EFAULT; | ||
165 | |||
166 | return ret; | ||
167 | } | ||
168 | case TEGRA_DC_EXT_CONTROL_GET_OUTPUT_EDID: | ||
169 | { | ||
170 | struct tegra_dc_ext_control_output_edid args; | ||
171 | int ret; | ||
172 | |||
173 | if (copy_from_user(&args, user_arg, sizeof(args))) | ||
174 | return -EFAULT; | ||
175 | |||
176 | ret = get_output_edid(&args); | ||
177 | |||
178 | if (copy_to_user(user_arg, &args, sizeof(args))) | ||
179 | return -EFAULT; | ||
180 | |||
181 | return ret; | ||
182 | } | ||
183 | case TEGRA_DC_EXT_CONTROL_SET_EVENT_MASK: | ||
184 | return set_event_mask(user, (u32) arg); | ||
185 | case TEGRA_DC_EXT_CONTROL_GET_CAPABILITIES: | ||
186 | { | ||
187 | struct tegra_dc_ext_control_capabilities args; | ||
188 | int ret; | ||
189 | |||
190 | ret = get_capabilities(&args); | ||
191 | |||
192 | if (copy_to_user(user_arg, &args, sizeof(args))) | ||
193 | return -EFAULT; | ||
194 | |||
195 | return ret; | ||
196 | } | ||
197 | default: | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static int tegra_dc_ext_control_open(struct inode *inode, struct file *filp) | ||
203 | { | ||
204 | struct tegra_dc_ext_control_user *user; | ||
205 | struct tegra_dc_ext_control *control; | ||
206 | |||
207 | user = kzalloc(sizeof(*user), GFP_KERNEL); | ||
208 | if (!user) | ||
209 | return -ENOMEM; | ||
210 | |||
211 | control = container_of(inode->i_cdev, struct tegra_dc_ext_control, | ||
212 | cdev); | ||
213 | user->control = control;; | ||
214 | |||
215 | INIT_LIST_HEAD(&user->event_list); | ||
216 | mutex_init(&user->lock); | ||
217 | |||
218 | filp->private_data = user; | ||
219 | |||
220 | mutex_lock(&control->lock); | ||
221 | list_add(&user->list, &control->users); | ||
222 | mutex_unlock(&control->lock); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int tegra_dc_ext_control_release(struct inode *inode, struct file *filp) | ||
228 | { | ||
229 | struct tegra_dc_ext_control_user *user = filp->private_data; | ||
230 | struct tegra_dc_ext_control *control = user->control; | ||
231 | |||
232 | /* This will free any pending events for this user */ | ||
233 | set_event_mask(user, 0); | ||
234 | |||
235 | mutex_lock(&control->lock); | ||
236 | list_del(&user->list); | ||
237 | mutex_unlock(&control->lock); | ||
238 | |||
239 | kfree(user); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static const struct file_operations tegra_dc_ext_event_devops = { | ||
245 | .owner = THIS_MODULE, | ||
246 | .open = tegra_dc_ext_control_open, | ||
247 | .release = tegra_dc_ext_control_release, | ||
248 | .read = tegra_dc_ext_event_read, | ||
249 | .poll = tegra_dc_ext_event_poll, | ||
250 | .unlocked_ioctl = tegra_dc_ext_control_ioctl, | ||
251 | }; | ||
252 | |||
253 | int tegra_dc_ext_control_init(void) | ||
254 | { | ||
255 | struct tegra_dc_ext_control *control = &g_control; | ||
256 | int ret; | ||
257 | |||
258 | cdev_init(&control->cdev, &tegra_dc_ext_event_devops); | ||
259 | control->cdev.owner = THIS_MODULE; | ||
260 | ret = cdev_add(&control->cdev, tegra_dc_ext_devno, 1); | ||
261 | if (ret) | ||
262 | return ret; | ||
263 | |||
264 | control->dev = device_create(tegra_dc_ext_class, | ||
265 | NULL, | ||
266 | tegra_dc_ext_devno, | ||
267 | NULL, | ||
268 | "tegra_dc_ctrl"); | ||
269 | if (IS_ERR(control->dev)) { | ||
270 | ret = PTR_ERR(control->dev); | ||
271 | cdev_del(&control->cdev); | ||
272 | } | ||
273 | |||
274 | mutex_init(&control->lock); | ||
275 | |||
276 | INIT_LIST_HEAD(&control->users); | ||
277 | |||
278 | return ret; | ||
279 | } | ||
diff --git a/drivers/video/tegra/dc/ext/cursor.c b/drivers/video/tegra/dc/ext/cursor.c new file mode 100644 index 00000000000..d8fa5fd8e6d --- /dev/null +++ b/drivers/video/tegra/dc/ext/cursor.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/ext/cursor.c | ||
3 | * | ||
4 | * Copyright (C) 2011, NVIDIA Corporation | ||
5 | * | ||
6 | * Author: Robert Morell <rmorell@nvidia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | */ | ||
18 | |||
19 | #include <video/tegra_dc_ext.h> | ||
20 | |||
21 | #include "tegra_dc_ext_priv.h" | ||
22 | |||
23 | /* ugh */ | ||
24 | #include "../dc_priv.h" | ||
25 | #include "../dc_reg.h" | ||
26 | |||
27 | int tegra_dc_ext_get_cursor(struct tegra_dc_ext_user *user) | ||
28 | { | ||
29 | struct tegra_dc_ext *ext = user->ext; | ||
30 | int ret = 0; | ||
31 | |||
32 | mutex_lock(&ext->cursor.lock); | ||
33 | |||
34 | if (!ext->cursor.user) | ||
35 | ext->cursor.user = user; | ||
36 | else if (ext->cursor.user != user) | ||
37 | ret = -EBUSY; | ||
38 | |||
39 | mutex_unlock(&ext->cursor.lock); | ||
40 | |||
41 | return ret; | ||
42 | } | ||
43 | |||
44 | int tegra_dc_ext_put_cursor(struct tegra_dc_ext_user *user) | ||
45 | { | ||
46 | struct tegra_dc_ext *ext = user->ext; | ||
47 | int ret = 0; | ||
48 | |||
49 | mutex_lock(&ext->cursor.lock); | ||
50 | |||
51 | if (ext->cursor.user == user) | ||
52 | ext->cursor.user = 0; | ||
53 | else | ||
54 | ret = -EACCES; | ||
55 | |||
56 | mutex_unlock(&ext->cursor.lock); | ||
57 | |||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | static void set_cursor_image_hw(struct tegra_dc *dc, | ||
62 | struct tegra_dc_ext_cursor_image *args, | ||
63 | dma_addr_t phys_addr) | ||
64 | { | ||
65 | tegra_dc_writel(dc, | ||
66 | CURSOR_COLOR(args->foreground.r, | ||
67 | args->foreground.g, | ||
68 | args->foreground.b), | ||
69 | DC_DISP_CURSOR_FOREGROUND); | ||
70 | tegra_dc_writel(dc, | ||
71 | CURSOR_COLOR(args->background.r, | ||
72 | args->background.g, | ||
73 | args->background.b), | ||
74 | DC_DISP_CURSOR_BACKGROUND); | ||
75 | |||
76 | BUG_ON(phys_addr & ~CURSOR_START_ADDR_MASK); | ||
77 | |||
78 | tegra_dc_writel(dc, | ||
79 | CURSOR_START_ADDR(((unsigned long) phys_addr)) | | ||
80 | ((args->flags & TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) ? | ||
81 | CURSOR_SIZE_64 : 0), | ||
82 | DC_DISP_CURSOR_START_ADDR); | ||
83 | } | ||
84 | |||
85 | int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user, | ||
86 | struct tegra_dc_ext_cursor_image *args) | ||
87 | { | ||
88 | struct tegra_dc_ext *ext = user->ext; | ||
89 | struct tegra_dc *dc = ext->dc; | ||
90 | struct nvmap_handle_ref *handle, *old_handle; | ||
91 | dma_addr_t phys_addr; | ||
92 | u32 size; | ||
93 | int ret; | ||
94 | |||
95 | if (!user->nvmap) | ||
96 | return -EFAULT; | ||
97 | |||
98 | size = args->flags & (TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 | | ||
99 | TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64); | ||
100 | |||
101 | if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 && | ||
102 | size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) | ||
103 | return -EINVAL; | ||
104 | |||
105 | mutex_lock(&ext->cursor.lock); | ||
106 | |||
107 | if (ext->cursor.user != user) { | ||
108 | ret = -EACCES; | ||
109 | goto unlock; | ||
110 | } | ||
111 | |||
112 | if (!ext->enabled) { | ||
113 | ret = -ENXIO; | ||
114 | goto unlock; | ||
115 | } | ||
116 | |||
117 | old_handle = ext->cursor.cur_handle; | ||
118 | |||
119 | ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr); | ||
120 | if (ret) | ||
121 | goto unlock; | ||
122 | |||
123 | ext->cursor.cur_handle = handle; | ||
124 | |||
125 | mutex_lock(&dc->lock); | ||
126 | |||
127 | set_cursor_image_hw(dc, args, phys_addr); | ||
128 | |||
129 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
130 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
131 | |||
132 | /* XXX sync here? */ | ||
133 | |||
134 | mutex_unlock(&dc->lock); | ||
135 | |||
136 | mutex_unlock(&ext->cursor.lock); | ||
137 | |||
138 | if (old_handle) { | ||
139 | nvmap_unpin(ext->nvmap, old_handle); | ||
140 | nvmap_free(ext->nvmap, old_handle); | ||
141 | } | ||
142 | |||
143 | return 0; | ||
144 | |||
145 | unlock: | ||
146 | mutex_unlock(&ext->cursor.lock); | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | int tegra_dc_ext_set_cursor(struct tegra_dc_ext_user *user, | ||
152 | struct tegra_dc_ext_cursor *args) | ||
153 | { | ||
154 | struct tegra_dc_ext *ext = user->ext; | ||
155 | struct tegra_dc *dc = ext->dc; | ||
156 | u32 win_options; | ||
157 | bool enable; | ||
158 | int ret; | ||
159 | |||
160 | mutex_lock(&ext->cursor.lock); | ||
161 | |||
162 | if (ext->cursor.user != user) { | ||
163 | ret = -EACCES; | ||
164 | goto unlock; | ||
165 | } | ||
166 | |||
167 | if (!ext->enabled) { | ||
168 | ret = -ENXIO; | ||
169 | goto unlock; | ||
170 | } | ||
171 | |||
172 | enable = !!(args->flags & TEGRA_DC_EXT_CURSOR_FLAGS_VISIBLE); | ||
173 | |||
174 | mutex_lock(&dc->lock); | ||
175 | |||
176 | win_options = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); | ||
177 | if (!!(win_options & CURSOR_ENABLE) != enable) { | ||
178 | win_options &= ~CURSOR_ENABLE; | ||
179 | if (enable) | ||
180 | win_options |= CURSOR_ENABLE; | ||
181 | tegra_dc_writel(dc, win_options, DC_DISP_DISP_WIN_OPTIONS); | ||
182 | } | ||
183 | |||
184 | tegra_dc_writel(dc, CURSOR_POSITION(args->x, args->y), | ||
185 | DC_DISP_CURSOR_POSITION); | ||
186 | |||
187 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
188 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
189 | |||
190 | /* TODO: need to sync here? hopefully can avoid this, but need to | ||
191 | * figure out interaction w/ rest of GENERAL_ACT_REQ */ | ||
192 | |||
193 | mutex_unlock(&dc->lock); | ||
194 | |||
195 | mutex_unlock(&ext->cursor.lock); | ||
196 | |||
197 | return 0; | ||
198 | |||
199 | unlock: | ||
200 | mutex_unlock(&ext->cursor.lock); | ||
201 | |||
202 | return ret; | ||
203 | } | ||
diff --git a/drivers/video/tegra/dc/ext/dev.c b/drivers/video/tegra/dc/ext/dev.c new file mode 100644 index 00000000000..04553e77839 --- /dev/null +++ b/drivers/video/tegra/dc/ext/dev.c | |||
@@ -0,0 +1,975 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/dev.c | ||
3 | * | ||
4 | * Copyright (C) 2011-2012, NVIDIA Corporation | ||
5 | * | ||
6 | * Author: Robert Morell <rmorell@nvidia.com> | ||
7 | * Some code based on fbdev extensions written by: | ||
8 | * Erik Gilling <konkers@android.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | */ | ||
20 | |||
21 | #include <linux/file.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | |||
27 | #include <video/tegra_dc_ext.h> | ||
28 | |||
29 | #include <mach/dc.h> | ||
30 | #include <mach/nvmap.h> | ||
31 | #include <mach/tegra_dc_ext.h> | ||
32 | |||
33 | /* XXX ew */ | ||
34 | #include "../dc_priv.h" | ||
35 | /* XXX ew 2 */ | ||
36 | #include "../../host/dev.h" | ||
37 | /* XXX ew 3 */ | ||
38 | #include "../../nvmap/nvmap.h" | ||
39 | #include "tegra_dc_ext_priv.h" | ||
40 | |||
41 | int tegra_dc_ext_devno; | ||
42 | struct class *tegra_dc_ext_class; | ||
43 | static int head_count; | ||
44 | |||
45 | struct tegra_dc_ext_flip_win { | ||
46 | struct tegra_dc_ext_flip_windowattr attr; | ||
47 | struct nvmap_handle_ref *handle[TEGRA_DC_NUM_PLANES]; | ||
48 | dma_addr_t phys_addr; | ||
49 | dma_addr_t phys_addr_u; | ||
50 | dma_addr_t phys_addr_v; | ||
51 | u32 syncpt_max; | ||
52 | }; | ||
53 | |||
54 | struct tegra_dc_ext_flip_data { | ||
55 | struct tegra_dc_ext *ext; | ||
56 | struct work_struct work; | ||
57 | struct tegra_dc_ext_flip_win win[DC_N_WINDOWS]; | ||
58 | }; | ||
59 | |||
60 | int tegra_dc_ext_get_num_outputs(void) | ||
61 | { | ||
62 | /* TODO: decouple output count from head count */ | ||
63 | return head_count; | ||
64 | } | ||
65 | |||
66 | static int tegra_dc_ext_set_nvmap_fd(struct tegra_dc_ext_user *user, | ||
67 | int fd) | ||
68 | { | ||
69 | struct nvmap_client *nvmap = NULL; | ||
70 | |||
71 | if (fd >= 0) { | ||
72 | nvmap = nvmap_client_get_file(fd); | ||
73 | if (IS_ERR(nvmap)) | ||
74 | return PTR_ERR(nvmap); | ||
75 | } | ||
76 | |||
77 | if (user->nvmap) | ||
78 | nvmap_client_put(user->nvmap); | ||
79 | |||
80 | user->nvmap = nvmap; | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static int tegra_dc_ext_get_window(struct tegra_dc_ext_user *user, | ||
86 | unsigned int n) | ||
87 | { | ||
88 | struct tegra_dc_ext *ext = user->ext; | ||
89 | struct tegra_dc_ext_win *win; | ||
90 | int ret = 0; | ||
91 | |||
92 | if (n >= DC_N_WINDOWS) | ||
93 | return -EINVAL; | ||
94 | |||
95 | win = &ext->win[n]; | ||
96 | |||
97 | mutex_lock(&win->lock); | ||
98 | |||
99 | if (!win->user) | ||
100 | win->user = user; | ||
101 | else if (win->user != user) | ||
102 | ret = -EBUSY; | ||
103 | |||
104 | mutex_unlock(&win->lock); | ||
105 | |||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | static int tegra_dc_ext_put_window(struct tegra_dc_ext_user *user, | ||
110 | unsigned int n) | ||
111 | { | ||
112 | struct tegra_dc_ext *ext = user->ext; | ||
113 | struct tegra_dc_ext_win *win; | ||
114 | int ret = 0; | ||
115 | |||
116 | if (n >= DC_N_WINDOWS) | ||
117 | return -EINVAL; | ||
118 | |||
119 | win = &ext->win[n]; | ||
120 | |||
121 | mutex_lock(&win->lock); | ||
122 | |||
123 | if (win->user == user) { | ||
124 | flush_workqueue(win->flip_wq); | ||
125 | win->user = 0; | ||
126 | } else { | ||
127 | ret = -EACCES; | ||
128 | } | ||
129 | |||
130 | mutex_unlock(&win->lock); | ||
131 | |||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | static void set_enable(struct tegra_dc_ext *ext, bool en) | ||
136 | { | ||
137 | int i; | ||
138 | |||
139 | /* | ||
140 | * Take all locks to make sure any flip requests or cursor moves are | ||
141 | * out of their critical sections | ||
142 | */ | ||
143 | for (i = 0; i < ext->dc->n_windows; i++) | ||
144 | mutex_lock(&ext->win[i].lock); | ||
145 | mutex_lock(&ext->cursor.lock); | ||
146 | |||
147 | ext->enabled = en; | ||
148 | |||
149 | mutex_unlock(&ext->cursor.lock); | ||
150 | for (i = ext->dc->n_windows - 1; i >= 0 ; i--) | ||
151 | mutex_unlock(&ext->win[i].lock); | ||
152 | } | ||
153 | |||
154 | void tegra_dc_ext_enable(struct tegra_dc_ext *ext) | ||
155 | { | ||
156 | set_enable(ext, true); | ||
157 | } | ||
158 | |||
159 | void tegra_dc_ext_disable(struct tegra_dc_ext *ext) | ||
160 | { | ||
161 | int i; | ||
162 | set_enable(ext, false); | ||
163 | |||
164 | /* | ||
165 | * Flush the flip queue -- note that this must be called with dc->lock | ||
166 | * unlocked or else it will hang. | ||
167 | */ | ||
168 | for (i = 0; i < ext->dc->n_windows; i++) { | ||
169 | struct tegra_dc_ext_win *win = &ext->win[i]; | ||
170 | |||
171 | flush_workqueue(win->flip_wq); | ||
172 | } | ||
173 | } | ||
174 | |||
175 | static int tegra_dc_ext_set_windowattr(struct tegra_dc_ext *ext, | ||
176 | struct tegra_dc_win *win, | ||
177 | const struct tegra_dc_ext_flip_win *flip_win) | ||
178 | { | ||
179 | struct tegra_dc_ext_win *ext_win = &ext->win[win->idx]; | ||
180 | |||
181 | if (flip_win->handle[TEGRA_DC_Y] == NULL) { | ||
182 | win->flags = 0; | ||
183 | memset(ext_win->cur_handle, 0, sizeof(ext_win->cur_handle)); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | win->flags = TEGRA_WIN_FLAG_ENABLED; | ||
188 | if (flip_win->attr.blend == TEGRA_DC_EXT_BLEND_PREMULT) | ||
189 | win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT; | ||
190 | else if (flip_win->attr.blend == TEGRA_DC_EXT_BLEND_COVERAGE) | ||
191 | win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE; | ||
192 | if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_TILED) | ||
193 | win->flags |= TEGRA_WIN_FLAG_TILED; | ||
194 | if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_INVERT_H) | ||
195 | win->flags |= TEGRA_WIN_FLAG_INVERT_H; | ||
196 | if (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_INVERT_V) | ||
197 | win->flags |= TEGRA_WIN_FLAG_INVERT_V; | ||
198 | win->fmt = flip_win->attr.pixformat; | ||
199 | win->x.full = flip_win->attr.x; | ||
200 | win->y.full = flip_win->attr.y; | ||
201 | win->w.full = flip_win->attr.w; | ||
202 | win->h.full = flip_win->attr.h; | ||
203 | /* XXX verify that this doesn't go outside display's active region */ | ||
204 | win->out_x = flip_win->attr.out_x; | ||
205 | win->out_y = flip_win->attr.out_y; | ||
206 | win->out_w = flip_win->attr.out_w; | ||
207 | win->out_h = flip_win->attr.out_h; | ||
208 | win->z = flip_win->attr.z; | ||
209 | memcpy(ext_win->cur_handle, flip_win->handle, | ||
210 | sizeof(ext_win->cur_handle)); | ||
211 | |||
212 | /* XXX verify that this won't read outside of the surface */ | ||
213 | win->phys_addr = flip_win->phys_addr + flip_win->attr.offset; | ||
214 | |||
215 | win->phys_addr_u = flip_win->handle[TEGRA_DC_U] ? | ||
216 | flip_win->phys_addr_u : flip_win->phys_addr; | ||
217 | win->phys_addr_u += flip_win->attr.offset_u; | ||
218 | |||
219 | win->phys_addr_v = flip_win->handle[TEGRA_DC_V] ? | ||
220 | flip_win->phys_addr_v : flip_win->phys_addr; | ||
221 | win->phys_addr_v += flip_win->attr.offset_v; | ||
222 | |||
223 | win->stride = flip_win->attr.stride; | ||
224 | win->stride_uv = flip_win->attr.stride_uv; | ||
225 | |||
226 | if ((s32)flip_win->attr.pre_syncpt_id >= 0) { | ||
227 | nvhost_syncpt_wait_timeout( | ||
228 | &nvhost_get_host(ext->dc->ndev)->syncpt, | ||
229 | flip_win->attr.pre_syncpt_id, | ||
230 | flip_win->attr.pre_syncpt_val, | ||
231 | msecs_to_jiffies(500), NULL); | ||
232 | } | ||
233 | |||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static void tegra_dc_ext_flip_worker(struct work_struct *work) | ||
239 | { | ||
240 | struct tegra_dc_ext_flip_data *data = | ||
241 | container_of(work, struct tegra_dc_ext_flip_data, work); | ||
242 | struct tegra_dc_ext *ext = data->ext; | ||
243 | struct tegra_dc_win *wins[DC_N_WINDOWS]; | ||
244 | struct nvmap_handle_ref *unpin_handles[DC_N_WINDOWS * | ||
245 | TEGRA_DC_NUM_PLANES]; | ||
246 | struct nvmap_handle_ref *old_handle; | ||
247 | int i, nr_unpin = 0, nr_win = 0; | ||
248 | bool skip_flip = false; | ||
249 | |||
250 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
251 | struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; | ||
252 | int index = flip_win->attr.index; | ||
253 | struct tegra_dc_win *win; | ||
254 | struct tegra_dc_ext_win *ext_win; | ||
255 | |||
256 | if (index < 0) | ||
257 | continue; | ||
258 | |||
259 | win = tegra_dc_get_window(ext->dc, index); | ||
260 | ext_win = &ext->win[index]; | ||
261 | |||
262 | if (!(atomic_dec_and_test(&ext_win->nr_pending_flips)) && | ||
263 | (flip_win->attr.flags & TEGRA_DC_EXT_FLIP_FLAG_CURSOR)) | ||
264 | skip_flip = true; | ||
265 | |||
266 | if (win->flags & TEGRA_WIN_FLAG_ENABLED) { | ||
267 | int j; | ||
268 | for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) { | ||
269 | if (skip_flip) | ||
270 | old_handle = flip_win->handle[j]; | ||
271 | else | ||
272 | old_handle = ext_win->cur_handle[j]; | ||
273 | |||
274 | if (!old_handle) | ||
275 | continue; | ||
276 | |||
277 | unpin_handles[nr_unpin++] = old_handle; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | if (!skip_flip) | ||
282 | tegra_dc_ext_set_windowattr(ext, win, &data->win[i]); | ||
283 | |||
284 | wins[nr_win++] = win; | ||
285 | } | ||
286 | |||
287 | if (!skip_flip) { | ||
288 | tegra_dc_update_windows(wins, nr_win); | ||
289 | /* TODO: implement swapinterval here */ | ||
290 | tegra_dc_sync_windows(wins, nr_win); | ||
291 | } | ||
292 | |||
293 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
294 | struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; | ||
295 | int index = flip_win->attr.index; | ||
296 | |||
297 | if (index < 0) | ||
298 | continue; | ||
299 | |||
300 | tegra_dc_incr_syncpt_min(ext->dc, index, | ||
301 | flip_win->syncpt_max); | ||
302 | } | ||
303 | |||
304 | /* unpin and deref previous front buffers */ | ||
305 | for (i = 0; i < nr_unpin; i++) { | ||
306 | nvmap_unpin(ext->nvmap, unpin_handles[i]); | ||
307 | nvmap_free(ext->nvmap, unpin_handles[i]); | ||
308 | } | ||
309 | |||
310 | kfree(data); | ||
311 | } | ||
312 | |||
313 | static int lock_windows_for_flip(struct tegra_dc_ext_user *user, | ||
314 | struct tegra_dc_ext_flip *args) | ||
315 | { | ||
316 | struct tegra_dc_ext *ext = user->ext; | ||
317 | u8 idx_mask = 0; | ||
318 | int i; | ||
319 | |||
320 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
321 | int index = args->win[i].index; | ||
322 | |||
323 | if (index < 0) | ||
324 | continue; | ||
325 | |||
326 | idx_mask |= BIT(index); | ||
327 | } | ||
328 | |||
329 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
330 | struct tegra_dc_ext_win *win; | ||
331 | |||
332 | if (!(idx_mask & BIT(i))) | ||
333 | continue; | ||
334 | |||
335 | win = &ext->win[i]; | ||
336 | |||
337 | mutex_lock(&win->lock); | ||
338 | |||
339 | if (win->user != user) | ||
340 | goto fail_unlock; | ||
341 | } | ||
342 | |||
343 | return 0; | ||
344 | |||
345 | fail_unlock: | ||
346 | do { | ||
347 | if (!(idx_mask & BIT(i))) | ||
348 | continue; | ||
349 | |||
350 | mutex_unlock(&ext->win[i].lock); | ||
351 | } while (i--); | ||
352 | |||
353 | return -EACCES; | ||
354 | } | ||
355 | |||
356 | static void unlock_windows_for_flip(struct tegra_dc_ext_user *user, | ||
357 | struct tegra_dc_ext_flip *args) | ||
358 | { | ||
359 | struct tegra_dc_ext *ext = user->ext; | ||
360 | u8 idx_mask = 0; | ||
361 | int i; | ||
362 | |||
363 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
364 | int index = args->win[i].index; | ||
365 | |||
366 | if (index < 0) | ||
367 | continue; | ||
368 | |||
369 | idx_mask |= BIT(index); | ||
370 | } | ||
371 | |||
372 | for (i = DC_N_WINDOWS - 1; i >= 0; i--) { | ||
373 | if (!(idx_mask & BIT(i))) | ||
374 | continue; | ||
375 | |||
376 | mutex_unlock(&ext->win[i].lock); | ||
377 | } | ||
378 | } | ||
379 | |||
380 | static int sanitize_flip_args(struct tegra_dc_ext_user *user, | ||
381 | struct tegra_dc_ext_flip *args) | ||
382 | { | ||
383 | int i, used_windows = 0; | ||
384 | |||
385 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
386 | int index = args->win[i].index; | ||
387 | |||
388 | if (index < 0) | ||
389 | continue; | ||
390 | |||
391 | if (index >= DC_N_WINDOWS) | ||
392 | return -EINVAL; | ||
393 | |||
394 | if (used_windows & BIT(index)) | ||
395 | return -EINVAL; | ||
396 | |||
397 | used_windows |= BIT(index); | ||
398 | } | ||
399 | |||
400 | if (!used_windows) | ||
401 | return -EINVAL; | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static int tegra_dc_ext_flip(struct tegra_dc_ext_user *user, | ||
407 | struct tegra_dc_ext_flip *args) | ||
408 | { | ||
409 | struct tegra_dc_ext *ext = user->ext; | ||
410 | struct tegra_dc_ext_flip_data *data; | ||
411 | int work_index; | ||
412 | int i, ret = 0; | ||
413 | |||
414 | #ifdef CONFIG_ANDROID | ||
415 | int index_check[DC_N_WINDOWS] = {0, }; | ||
416 | int zero_index_id = 0; | ||
417 | #endif | ||
418 | |||
419 | if (!user->nvmap) | ||
420 | return -EFAULT; | ||
421 | |||
422 | ret = sanitize_flip_args(user, args); | ||
423 | if (ret) | ||
424 | return ret; | ||
425 | |||
426 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
427 | if (!data) | ||
428 | return -ENOMEM; | ||
429 | |||
430 | INIT_WORK(&data->work, tegra_dc_ext_flip_worker); | ||
431 | data->ext = ext; | ||
432 | |||
433 | #ifdef CONFIG_ANDROID | ||
434 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
435 | index_check[i] = args->win[i].index; | ||
436 | if (index_check[i] == 0) | ||
437 | zero_index_id = i; | ||
438 | } | ||
439 | |||
440 | if (index_check[DC_N_WINDOWS - 1] != 0) { | ||
441 | struct tegra_dc_ext_flip_windowattr win_temp; | ||
442 | win_temp = args->win[DC_N_WINDOWS - 1]; | ||
443 | args->win[DC_N_WINDOWS - 1] = args->win[zero_index_id]; | ||
444 | args->win[zero_index_id] = win_temp; | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
449 | struct tegra_dc_ext_flip_win *flip_win = &data->win[i]; | ||
450 | int index = args->win[i].index; | ||
451 | |||
452 | memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr)); | ||
453 | |||
454 | if (index < 0) | ||
455 | continue; | ||
456 | |||
457 | ret = tegra_dc_ext_pin_window(user, flip_win->attr.buff_id, | ||
458 | &flip_win->handle[TEGRA_DC_Y], | ||
459 | &flip_win->phys_addr); | ||
460 | if (ret) | ||
461 | goto fail_pin; | ||
462 | |||
463 | if (flip_win->attr.buff_id_u) { | ||
464 | ret = tegra_dc_ext_pin_window(user, | ||
465 | flip_win->attr.buff_id_u, | ||
466 | &flip_win->handle[TEGRA_DC_U], | ||
467 | &flip_win->phys_addr_u); | ||
468 | if (ret) | ||
469 | goto fail_pin; | ||
470 | } else { | ||
471 | flip_win->handle[TEGRA_DC_U] = NULL; | ||
472 | flip_win->phys_addr_u = 0; | ||
473 | } | ||
474 | |||
475 | if (flip_win->attr.buff_id_v) { | ||
476 | ret = tegra_dc_ext_pin_window(user, | ||
477 | flip_win->attr.buff_id_v, | ||
478 | &flip_win->handle[TEGRA_DC_V], | ||
479 | &flip_win->phys_addr_v); | ||
480 | if (ret) | ||
481 | goto fail_pin; | ||
482 | } else { | ||
483 | flip_win->handle[TEGRA_DC_V] = NULL; | ||
484 | flip_win->phys_addr_v = 0; | ||
485 | } | ||
486 | } | ||
487 | |||
488 | ret = lock_windows_for_flip(user, args); | ||
489 | if (ret) | ||
490 | goto fail_pin; | ||
491 | |||
492 | if (!ext->enabled) { | ||
493 | ret = -ENXIO; | ||
494 | goto unlock; | ||
495 | } | ||
496 | |||
497 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
498 | u32 syncpt_max; | ||
499 | int index = args->win[i].index; | ||
500 | struct tegra_dc_win *win; | ||
501 | struct tegra_dc_ext_win *ext_win; | ||
502 | |||
503 | if (index < 0) | ||
504 | continue; | ||
505 | |||
506 | win = tegra_dc_get_window(ext->dc, index); | ||
507 | ext_win = &ext->win[index]; | ||
508 | |||
509 | syncpt_max = tegra_dc_incr_syncpt_max(ext->dc, index); | ||
510 | |||
511 | data->win[i].syncpt_max = syncpt_max; | ||
512 | |||
513 | /* | ||
514 | * Any of these windows' syncpoints should be equivalent for | ||
515 | * the client, so we just send back an arbitrary one of them | ||
516 | */ | ||
517 | args->post_syncpt_val = syncpt_max; | ||
518 | args->post_syncpt_id = tegra_dc_get_syncpt_id(ext->dc, index); | ||
519 | work_index = index; | ||
520 | |||
521 | atomic_inc(&ext->win[work_index].nr_pending_flips); | ||
522 | } | ||
523 | queue_work(ext->win[work_index].flip_wq, &data->work); | ||
524 | |||
525 | unlock_windows_for_flip(user, args); | ||
526 | |||
527 | return 0; | ||
528 | |||
529 | unlock: | ||
530 | unlock_windows_for_flip(user, args); | ||
531 | |||
532 | fail_pin: | ||
533 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
534 | int j; | ||
535 | for (j = 0; j < TEGRA_DC_NUM_PLANES; j++) { | ||
536 | if (!data->win[i].handle[j]) | ||
537 | continue; | ||
538 | |||
539 | nvmap_unpin(ext->nvmap, data->win[i].handle[j]); | ||
540 | nvmap_free(ext->nvmap, data->win[i].handle[j]); | ||
541 | } | ||
542 | } | ||
543 | kfree(data); | ||
544 | |||
545 | return ret; | ||
546 | } | ||
547 | |||
548 | static int tegra_dc_ext_set_csc(struct tegra_dc_ext_user *user, | ||
549 | struct tegra_dc_ext_csc *new_csc) | ||
550 | { | ||
551 | unsigned int index = new_csc->win_index; | ||
552 | struct tegra_dc *dc = user->ext->dc; | ||
553 | struct tegra_dc_ext_win *ext_win; | ||
554 | struct tegra_dc_csc *csc; | ||
555 | |||
556 | if (index >= DC_N_WINDOWS) | ||
557 | return -EINVAL; | ||
558 | |||
559 | ext_win = &user->ext->win[index]; | ||
560 | csc = &dc->windows[index].csc; | ||
561 | |||
562 | mutex_lock(&ext_win->lock); | ||
563 | |||
564 | if (ext_win->user != user) { | ||
565 | mutex_unlock(&ext_win->lock); | ||
566 | return -EACCES; | ||
567 | } | ||
568 | |||
569 | csc->yof = new_csc->yof; | ||
570 | csc->kyrgb = new_csc->kyrgb; | ||
571 | csc->kur = new_csc->kur; | ||
572 | csc->kvr = new_csc->kvr; | ||
573 | csc->kug = new_csc->kug; | ||
574 | csc->kvg = new_csc->kvg; | ||
575 | csc->kub = new_csc->kub; | ||
576 | csc->kvb = new_csc->kvb; | ||
577 | |||
578 | tegra_dc_update_csc(dc, index); | ||
579 | |||
580 | mutex_unlock(&ext_win->lock); | ||
581 | |||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int set_lut_channel(u16 *channel_from_user, | ||
586 | u8 *channel_to, | ||
587 | u32 start, | ||
588 | u32 len) | ||
589 | { | ||
590 | int i; | ||
591 | u16 lut16bpp[256]; | ||
592 | |||
593 | if (channel_from_user) { | ||
594 | if (copy_from_user(lut16bpp, channel_from_user, len<<1)) | ||
595 | return 1; | ||
596 | |||
597 | for (i = 0; i < len; i++) | ||
598 | channel_to[start+i] = lut16bpp[i]>>8; | ||
599 | } else { | ||
600 | for (i = 0; i < len; i++) | ||
601 | channel_to[start+i] = start+i; | ||
602 | } | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static int tegra_dc_ext_set_lut(struct tegra_dc_ext_user *user, | ||
608 | struct tegra_dc_ext_lut *new_lut) | ||
609 | { | ||
610 | int err; | ||
611 | unsigned int index = new_lut->win_index; | ||
612 | u32 start = new_lut->start; | ||
613 | u32 len = new_lut->len; | ||
614 | |||
615 | struct tegra_dc *dc = user->ext->dc; | ||
616 | struct tegra_dc_ext_win *ext_win; | ||
617 | struct tegra_dc_lut *lut; | ||
618 | |||
619 | if (index >= DC_N_WINDOWS) | ||
620 | return -EINVAL; | ||
621 | |||
622 | if ((start >= 256) || (len > 256) || ((start + len) > 256)) | ||
623 | return -EINVAL; | ||
624 | |||
625 | ext_win = &user->ext->win[index]; | ||
626 | lut = &dc->windows[index].lut; | ||
627 | |||
628 | mutex_lock(&ext_win->lock); | ||
629 | |||
630 | if (ext_win->user != user) { | ||
631 | mutex_unlock(&ext_win->lock); | ||
632 | return -EACCES; | ||
633 | } | ||
634 | |||
635 | err = set_lut_channel(new_lut->r, lut->r, start, len) | | ||
636 | set_lut_channel(new_lut->g, lut->g, start, len) | | ||
637 | set_lut_channel(new_lut->b, lut->b, start, len); | ||
638 | |||
639 | if (err) { | ||
640 | mutex_unlock(&ext_win->lock); | ||
641 | return -EFAULT; | ||
642 | } | ||
643 | |||
644 | tegra_dc_update_lut(dc, index, | ||
645 | new_lut->flags & TEGRA_DC_EXT_LUT_FLAGS_FBOVERRIDE); | ||
646 | |||
647 | mutex_unlock(&ext_win->lock); | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static u32 tegra_dc_ext_get_vblank_syncpt(struct tegra_dc_ext_user *user) | ||
653 | { | ||
654 | struct tegra_dc *dc = user->ext->dc; | ||
655 | |||
656 | return dc->vblank_syncpt; | ||
657 | } | ||
658 | |||
659 | static int tegra_dc_ext_get_status(struct tegra_dc_ext_user *user, | ||
660 | struct tegra_dc_ext_status *status) | ||
661 | { | ||
662 | struct tegra_dc *dc = user->ext->dc; | ||
663 | |||
664 | memset(status, 0, sizeof(*status)); | ||
665 | |||
666 | if (dc->enabled) | ||
667 | status->flags |= TEGRA_DC_EXT_FLAGS_ENABLED; | ||
668 | |||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | static long tegra_dc_ioctl(struct file *filp, unsigned int cmd, | ||
673 | unsigned long arg) | ||
674 | { | ||
675 | void __user *user_arg = (void __user *)arg; | ||
676 | struct tegra_dc_ext_user *user = filp->private_data; | ||
677 | |||
678 | switch (cmd) { | ||
679 | case TEGRA_DC_EXT_SET_NVMAP_FD: | ||
680 | return tegra_dc_ext_set_nvmap_fd(user, arg); | ||
681 | |||
682 | case TEGRA_DC_EXT_GET_WINDOW: | ||
683 | return tegra_dc_ext_get_window(user, arg); | ||
684 | case TEGRA_DC_EXT_PUT_WINDOW: | ||
685 | return tegra_dc_ext_put_window(user, arg); | ||
686 | |||
687 | case TEGRA_DC_EXT_FLIP: | ||
688 | { | ||
689 | struct tegra_dc_ext_flip args; | ||
690 | int ret; | ||
691 | |||
692 | if (copy_from_user(&args, user_arg, sizeof(args))) | ||
693 | return -EFAULT; | ||
694 | |||
695 | ret = tegra_dc_ext_flip(user, &args); | ||
696 | |||
697 | if (copy_to_user(user_arg, &args, sizeof(args))) | ||
698 | return -EFAULT; | ||
699 | |||
700 | return ret; | ||
701 | } | ||
702 | |||
703 | case TEGRA_DC_EXT_GET_CURSOR: | ||
704 | return tegra_dc_ext_get_cursor(user); | ||
705 | case TEGRA_DC_EXT_PUT_CURSOR: | ||
706 | return tegra_dc_ext_put_cursor(user); | ||
707 | case TEGRA_DC_EXT_SET_CURSOR_IMAGE: | ||
708 | { | ||
709 | struct tegra_dc_ext_cursor_image args; | ||
710 | |||
711 | if (copy_from_user(&args, user_arg, sizeof(args))) | ||
712 | return -EFAULT; | ||
713 | |||
714 | return tegra_dc_ext_set_cursor_image(user, &args); | ||
715 | } | ||
716 | case TEGRA_DC_EXT_SET_CURSOR: | ||
717 | { | ||
718 | struct tegra_dc_ext_cursor args; | ||
719 | |||
720 | if (copy_from_user(&args, user_arg, sizeof(args))) | ||
721 | return -EFAULT; | ||
722 | |||
723 | return tegra_dc_ext_set_cursor(user, &args); | ||
724 | } | ||
725 | |||
726 | case TEGRA_DC_EXT_SET_CSC: | ||
727 | { | ||
728 | struct tegra_dc_ext_csc args; | ||
729 | |||
730 | if (copy_from_user(&args, user_arg, sizeof(args))) | ||
731 | return -EFAULT; | ||
732 | |||
733 | return tegra_dc_ext_set_csc(user, &args); | ||
734 | } | ||
735 | |||
736 | case TEGRA_DC_EXT_GET_VBLANK_SYNCPT: | ||
737 | { | ||
738 | u32 syncpt = tegra_dc_ext_get_vblank_syncpt(user); | ||
739 | |||
740 | if (copy_to_user(user_arg, &syncpt, sizeof(syncpt))) | ||
741 | return -EFAULT; | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | case TEGRA_DC_EXT_GET_STATUS: | ||
747 | { | ||
748 | struct tegra_dc_ext_status args; | ||
749 | int ret; | ||
750 | |||
751 | ret = tegra_dc_ext_get_status(user, &args); | ||
752 | |||
753 | if (copy_to_user(user_arg, &args, sizeof(args))) | ||
754 | return -EFAULT; | ||
755 | |||
756 | return ret; | ||
757 | } | ||
758 | |||
759 | case TEGRA_DC_EXT_SET_LUT: | ||
760 | { | ||
761 | struct tegra_dc_ext_lut args; | ||
762 | |||
763 | if (copy_from_user(&args, user_arg, sizeof(args))) | ||
764 | return -EFAULT; | ||
765 | |||
766 | return tegra_dc_ext_set_lut(user, &args); | ||
767 | } | ||
768 | |||
769 | default: | ||
770 | return -EINVAL; | ||
771 | } | ||
772 | } | ||
773 | |||
774 | static int tegra_dc_open(struct inode *inode, struct file *filp) | ||
775 | { | ||
776 | struct tegra_dc_ext_user *user; | ||
777 | struct tegra_dc_ext *ext; | ||
778 | |||
779 | user = kzalloc(sizeof(*user), GFP_KERNEL); | ||
780 | if (!user) | ||
781 | return -ENOMEM; | ||
782 | |||
783 | ext = container_of(inode->i_cdev, struct tegra_dc_ext, cdev); | ||
784 | user->ext = ext; | ||
785 | |||
786 | filp->private_data = user; | ||
787 | |||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static int tegra_dc_release(struct inode *inode, struct file *filp) | ||
792 | { | ||
793 | struct tegra_dc_ext_user *user = filp->private_data; | ||
794 | struct tegra_dc_ext *ext = user->ext; | ||
795 | unsigned int i; | ||
796 | |||
797 | for (i = 0; i < DC_N_WINDOWS; i++) { | ||
798 | if (ext->win[i].user == user) | ||
799 | tegra_dc_ext_put_window(user, i); | ||
800 | } | ||
801 | if (ext->cursor.user == user) | ||
802 | tegra_dc_ext_put_cursor(user); | ||
803 | |||
804 | if (user->nvmap) | ||
805 | nvmap_client_put(user->nvmap); | ||
806 | |||
807 | kfree(user); | ||
808 | |||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | static int tegra_dc_ext_setup_windows(struct tegra_dc_ext *ext) | ||
813 | { | ||
814 | int i, ret; | ||
815 | |||
816 | for (i = 0; i < ext->dc->n_windows; i++) { | ||
817 | struct tegra_dc_ext_win *win = &ext->win[i]; | ||
818 | char name[32]; | ||
819 | |||
820 | win->ext = ext; | ||
821 | win->idx = i; | ||
822 | |||
823 | snprintf(name, sizeof(name), "tegradc.%d/%c", | ||
824 | ext->dc->ndev->id, 'a' + i); | ||
825 | win->flip_wq = create_singlethread_workqueue(name); | ||
826 | if (!win->flip_wq) { | ||
827 | ret = -ENOMEM; | ||
828 | goto cleanup; | ||
829 | } | ||
830 | |||
831 | mutex_init(&win->lock); | ||
832 | } | ||
833 | |||
834 | return 0; | ||
835 | |||
836 | cleanup: | ||
837 | while (i--) { | ||
838 | struct tegra_dc_ext_win *win = &ext->win[i]; | ||
839 | destroy_workqueue(win->flip_wq); | ||
840 | } | ||
841 | |||
842 | return ret; | ||
843 | } | ||
844 | |||
845 | static const struct file_operations tegra_dc_devops = { | ||
846 | .owner = THIS_MODULE, | ||
847 | .open = tegra_dc_open, | ||
848 | .release = tegra_dc_release, | ||
849 | .unlocked_ioctl = tegra_dc_ioctl, | ||
850 | }; | ||
851 | |||
852 | struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev, | ||
853 | struct tegra_dc *dc) | ||
854 | { | ||
855 | int ret; | ||
856 | struct tegra_dc_ext *ext; | ||
857 | int devno; | ||
858 | |||
859 | ext = kzalloc(sizeof(*ext), GFP_KERNEL); | ||
860 | if (!ext) | ||
861 | return ERR_PTR(-ENOMEM); | ||
862 | |||
863 | BUG_ON(!tegra_dc_ext_devno); | ||
864 | devno = tegra_dc_ext_devno + head_count + 1; | ||
865 | |||
866 | cdev_init(&ext->cdev, &tegra_dc_devops); | ||
867 | ext->cdev.owner = THIS_MODULE; | ||
868 | ret = cdev_add(&ext->cdev, devno, 1); | ||
869 | if (ret) { | ||
870 | dev_err(&ndev->dev, "Failed to create character device\n"); | ||
871 | goto cleanup_alloc; | ||
872 | } | ||
873 | |||
874 | ext->dev = device_create(tegra_dc_ext_class, | ||
875 | &ndev->dev, | ||
876 | devno, | ||
877 | NULL, | ||
878 | "tegra_dc_%d", | ||
879 | ndev->id); | ||
880 | |||
881 | if (IS_ERR(ext->dev)) { | ||
882 | ret = PTR_ERR(ext->dev); | ||
883 | goto cleanup_cdev; | ||
884 | } | ||
885 | |||
886 | ext->dc = dc; | ||
887 | |||
888 | ext->nvmap = nvmap_create_client(nvmap_dev, "tegra_dc_ext"); | ||
889 | if (!ext->nvmap) { | ||
890 | ret = -ENOMEM; | ||
891 | goto cleanup_device; | ||
892 | } | ||
893 | |||
894 | ret = tegra_dc_ext_setup_windows(ext); | ||
895 | if (ret) | ||
896 | goto cleanup_nvmap; | ||
897 | |||
898 | mutex_init(&ext->cursor.lock); | ||
899 | |||
900 | head_count++; | ||
901 | |||
902 | return ext; | ||
903 | |||
904 | cleanup_nvmap: | ||
905 | nvmap_client_put(ext->nvmap); | ||
906 | |||
907 | cleanup_device: | ||
908 | device_del(ext->dev); | ||
909 | |||
910 | cleanup_cdev: | ||
911 | cdev_del(&ext->cdev); | ||
912 | |||
913 | cleanup_alloc: | ||
914 | kfree(ext); | ||
915 | |||
916 | return ERR_PTR(ret); | ||
917 | } | ||
918 | |||
919 | void tegra_dc_ext_unregister(struct tegra_dc_ext *ext) | ||
920 | { | ||
921 | int i; | ||
922 | |||
923 | for (i = 0; i < ext->dc->n_windows; i++) { | ||
924 | struct tegra_dc_ext_win *win = &ext->win[i]; | ||
925 | |||
926 | flush_workqueue(win->flip_wq); | ||
927 | destroy_workqueue(win->flip_wq); | ||
928 | } | ||
929 | |||
930 | nvmap_client_put(ext->nvmap); | ||
931 | device_del(ext->dev); | ||
932 | cdev_del(&ext->cdev); | ||
933 | |||
934 | kfree(ext); | ||
935 | |||
936 | head_count--; | ||
937 | } | ||
938 | |||
939 | int __init tegra_dc_ext_module_init(void) | ||
940 | { | ||
941 | int ret; | ||
942 | |||
943 | tegra_dc_ext_class = class_create(THIS_MODULE, "tegra_dc_ext"); | ||
944 | if (!tegra_dc_ext_class) { | ||
945 | printk(KERN_ERR "tegra_dc_ext: failed to create class\n"); | ||
946 | return -ENOMEM; | ||
947 | } | ||
948 | |||
949 | /* Reserve one character device per head, plus the control device */ | ||
950 | ret = alloc_chrdev_region(&tegra_dc_ext_devno, | ||
951 | 0, TEGRA_MAX_DC + 1, | ||
952 | "tegra_dc_ext"); | ||
953 | if (ret) | ||
954 | goto cleanup_class; | ||
955 | |||
956 | ret = tegra_dc_ext_control_init(); | ||
957 | if (ret) | ||
958 | goto cleanup_region; | ||
959 | |||
960 | return 0; | ||
961 | |||
962 | cleanup_region: | ||
963 | unregister_chrdev_region(tegra_dc_ext_devno, TEGRA_MAX_DC); | ||
964 | |||
965 | cleanup_class: | ||
966 | class_destroy(tegra_dc_ext_class); | ||
967 | |||
968 | return ret; | ||
969 | } | ||
970 | |||
971 | void __exit tegra_dc_ext_module_exit(void) | ||
972 | { | ||
973 | unregister_chrdev_region(tegra_dc_ext_devno, TEGRA_MAX_DC); | ||
974 | class_destroy(tegra_dc_ext_class); | ||
975 | } | ||
diff --git a/drivers/video/tegra/dc/ext/events.c b/drivers/video/tegra/dc/ext/events.c new file mode 100644 index 00000000000..150a1501fce --- /dev/null +++ b/drivers/video/tegra/dc/ext/events.c | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/ext/events.c | ||
3 | * | ||
4 | * Copyright (C) 2011, NVIDIA Corporation | ||
5 | * | ||
6 | * Author: Robert Morell <rmorell@nvidia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/err.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/poll.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include "tegra_dc_ext_priv.h" | ||
28 | |||
29 | static DECLARE_WAIT_QUEUE_HEAD(event_wait); | ||
30 | |||
31 | unsigned int tegra_dc_ext_event_poll(struct file *filp, poll_table *wait) | ||
32 | { | ||
33 | struct tegra_dc_ext_control_user *user = filp->private_data; | ||
34 | unsigned int mask = 0; | ||
35 | |||
36 | poll_wait(filp, &event_wait, wait); | ||
37 | |||
38 | if (atomic_read(&user->num_events)) | ||
39 | mask |= POLLIN; | ||
40 | |||
41 | return mask; | ||
42 | } | ||
43 | |||
44 | static int get_next_event(struct tegra_dc_ext_control_user *user, | ||
45 | struct tegra_dc_ext_event_list *event, | ||
46 | bool block) | ||
47 | { | ||
48 | struct list_head *list = &user->event_list; | ||
49 | struct tegra_dc_ext_event_list *next_event; | ||
50 | int ret; | ||
51 | |||
52 | if (block) { | ||
53 | ret = wait_event_interruptible(event_wait, | ||
54 | atomic_read(&user->num_events)); | ||
55 | |||
56 | if (unlikely(ret)) { | ||
57 | if (ret == -ERESTARTSYS) | ||
58 | return -EAGAIN; | ||
59 | return ret; | ||
60 | } | ||
61 | } else { | ||
62 | if (!atomic_read(&user->num_events)) | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | mutex_lock(&user->lock); | ||
67 | |||
68 | BUG_ON(list_empty(list)); | ||
69 | next_event = list_first_entry(list, struct tegra_dc_ext_event_list, | ||
70 | list); | ||
71 | *event = *next_event; | ||
72 | list_del(&next_event->list); | ||
73 | kfree(next_event); | ||
74 | |||
75 | atomic_dec(&user->num_events); | ||
76 | |||
77 | mutex_unlock(&user->lock); | ||
78 | |||
79 | return 1; | ||
80 | } | ||
81 | |||
82 | ssize_t tegra_dc_ext_event_read(struct file *filp, char __user *buf, | ||
83 | size_t size, loff_t *ppos) | ||
84 | { | ||
85 | struct tegra_dc_ext_control_user *user = filp->private_data; | ||
86 | struct tegra_dc_ext_event_list event_elem; | ||
87 | struct tegra_dc_ext_event *event = &event_elem.event; | ||
88 | ssize_t retval = 0, to_copy, event_size, pending; | ||
89 | loff_t previously_copied = 0; | ||
90 | char *to_copy_ptr; | ||
91 | |||
92 | if (size == 0) | ||
93 | return 0; | ||
94 | |||
95 | if (user->partial_copy) { | ||
96 | /* | ||
97 | * We didn't transfer the entire event last time, need to | ||
98 | * finish it up | ||
99 | */ | ||
100 | event_elem = user->event_to_copy; | ||
101 | previously_copied = user->partial_copy; | ||
102 | } else { | ||
103 | /* Get the next event, if any */ | ||
104 | pending = get_next_event(user, &event_elem, | ||
105 | !(filp->f_flags & O_NONBLOCK)); | ||
106 | if (pending <= 0) | ||
107 | return pending; | ||
108 | } | ||
109 | |||
110 | /* Write the event to the user */ | ||
111 | event_size = sizeof(*event) + event->data_size; | ||
112 | BUG_ON(event_size <= previously_copied); | ||
113 | event_size -= previously_copied; | ||
114 | |||
115 | to_copy_ptr = (char *)event + previously_copied; | ||
116 | to_copy = min_t(ssize_t, size, event_size); | ||
117 | if (copy_to_user(buf, to_copy_ptr, to_copy)) { | ||
118 | retval = -EFAULT; | ||
119 | to_copy = 0; | ||
120 | } | ||
121 | |||
122 | /* Note that we currently only deliver one event at a time */ | ||
123 | |||
124 | if (event_size > to_copy) { | ||
125 | /* | ||
126 | * We were only able to copy part of this event. Stash it for | ||
127 | * next time. | ||
128 | */ | ||
129 | user->event_to_copy = event_elem; | ||
130 | user->partial_copy = previously_copied + to_copy; | ||
131 | } else { | ||
132 | user->partial_copy = 0; | ||
133 | } | ||
134 | |||
135 | return to_copy ? to_copy : retval; | ||
136 | } | ||
137 | |||
138 | static int tegra_dc_ext_queue_event(struct tegra_dc_ext_control *control, | ||
139 | struct tegra_dc_ext_event *event) | ||
140 | { | ||
141 | struct list_head *cur; | ||
142 | int retval = 0; | ||
143 | |||
144 | mutex_lock(&control->lock); | ||
145 | list_for_each(cur, &control->users) { | ||
146 | struct tegra_dc_ext_control_user *user; | ||
147 | struct tegra_dc_ext_event_list *ev_list; | ||
148 | |||
149 | user = container_of(cur, struct tegra_dc_ext_control_user, | ||
150 | list); | ||
151 | mutex_lock(&user->lock); | ||
152 | |||
153 | if (!(user->event_mask & event->type)) { | ||
154 | mutex_unlock(&user->lock); | ||
155 | continue; | ||
156 | } | ||
157 | |||
158 | ev_list = kmalloc(sizeof(*ev_list), GFP_KERNEL); | ||
159 | if (!ev_list) { | ||
160 | retval = -ENOMEM; | ||
161 | mutex_unlock(&user->lock); | ||
162 | continue; | ||
163 | } | ||
164 | |||
165 | memcpy(&ev_list->event, event, | ||
166 | sizeof(*event) + event->data_size); | ||
167 | |||
168 | list_add_tail(&ev_list->list, &user->event_list); | ||
169 | |||
170 | atomic_inc(&user->num_events); | ||
171 | |||
172 | mutex_unlock(&user->lock); | ||
173 | } | ||
174 | mutex_unlock(&control->lock); | ||
175 | |||
176 | /* Is it worth it to track waiters with more granularity? */ | ||
177 | wake_up(&event_wait); | ||
178 | |||
179 | return retval; | ||
180 | } | ||
181 | |||
182 | int tegra_dc_ext_queue_hotplug(struct tegra_dc_ext_control *control, int output) | ||
183 | { | ||
184 | struct { | ||
185 | struct tegra_dc_ext_event event; | ||
186 | struct tegra_dc_ext_control_event_hotplug hotplug; | ||
187 | } __packed pack; | ||
188 | |||
189 | pack.event.type = TEGRA_DC_EXT_EVENT_HOTPLUG; | ||
190 | pack.event.data_size = sizeof(pack.hotplug); | ||
191 | |||
192 | pack.hotplug.handle = output; | ||
193 | |||
194 | tegra_dc_ext_queue_event(control, &pack.event); | ||
195 | |||
196 | return 0; | ||
197 | } | ||
diff --git a/drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h b/drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h new file mode 100644 index 00000000000..95a637d5a52 --- /dev/null +++ b/drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/ext/tegra_dc_ext_priv.h | ||
3 | * | ||
4 | * Copyright (C) 2011, NVIDIA Corporation | ||
5 | * | ||
6 | * Author: Robert Morell <rmorell@nvidia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | */ | ||
18 | |||
19 | #ifndef __TEGRA_DC_EXT_PRIV_H | ||
20 | #define __TEGRA_DC_EXT_PRIV_H | ||
21 | |||
22 | #include <linux/cdev.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/mutex.h> | ||
25 | #include <linux/poll.h> | ||
26 | |||
27 | #include <mach/dc.h> | ||
28 | #include <mach/nvmap.h> | ||
29 | |||
30 | #include <video/tegra_dc_ext.h> | ||
31 | |||
32 | struct tegra_dc_ext; | ||
33 | |||
34 | struct tegra_dc_ext_user { | ||
35 | struct tegra_dc_ext *ext; | ||
36 | struct nvmap_client *nvmap; | ||
37 | }; | ||
38 | |||
39 | enum { | ||
40 | TEGRA_DC_Y, | ||
41 | TEGRA_DC_U, | ||
42 | TEGRA_DC_V, | ||
43 | TEGRA_DC_NUM_PLANES, | ||
44 | }; | ||
45 | |||
46 | struct tegra_dc_ext_win { | ||
47 | struct tegra_dc_ext *ext; | ||
48 | |||
49 | int idx; | ||
50 | |||
51 | struct tegra_dc_ext_user *user; | ||
52 | |||
53 | struct mutex lock; | ||
54 | |||
55 | /* Current nvmap handle (if any) for Y, U, V planes */ | ||
56 | struct nvmap_handle_ref *cur_handle[TEGRA_DC_NUM_PLANES]; | ||
57 | |||
58 | struct workqueue_struct *flip_wq; | ||
59 | |||
60 | atomic_t nr_pending_flips; | ||
61 | }; | ||
62 | |||
63 | struct tegra_dc_ext { | ||
64 | struct tegra_dc *dc; | ||
65 | |||
66 | struct cdev cdev; | ||
67 | struct device *dev; | ||
68 | |||
69 | struct nvmap_client *nvmap; | ||
70 | |||
71 | struct tegra_dc_ext_win win[DC_N_WINDOWS]; | ||
72 | |||
73 | struct { | ||
74 | struct tegra_dc_ext_user *user; | ||
75 | struct nvmap_handle_ref *cur_handle; | ||
76 | struct mutex lock; | ||
77 | } cursor; | ||
78 | |||
79 | bool enabled; | ||
80 | }; | ||
81 | |||
82 | #define TEGRA_DC_EXT_EVENT_MASK_ALL \ | ||
83 | TEGRA_DC_EXT_EVENT_HOTPLUG | ||
84 | |||
85 | #define TEGRA_DC_EXT_EVENT_MAX_SZ 8 | ||
86 | |||
87 | struct tegra_dc_ext_event_list { | ||
88 | struct tegra_dc_ext_event event; | ||
89 | /* The data field _must_ follow the event field. */ | ||
90 | char data[TEGRA_DC_EXT_EVENT_MAX_SZ]; | ||
91 | |||
92 | struct list_head list; | ||
93 | }; | ||
94 | |||
95 | #define TEGRA_DC_EXT_CAPABILITIES \ | ||
96 | TEGRA_DC_EXT_CAPABILITIES_CURSOR_MODE | ||
97 | |||
98 | struct tegra_dc_ext_control_user { | ||
99 | struct tegra_dc_ext_control *control; | ||
100 | |||
101 | struct list_head event_list; | ||
102 | atomic_t num_events; | ||
103 | |||
104 | u32 event_mask; | ||
105 | |||
106 | struct tegra_dc_ext_event_list event_to_copy; | ||
107 | loff_t partial_copy; | ||
108 | |||
109 | struct mutex lock; | ||
110 | |||
111 | struct list_head list; | ||
112 | }; | ||
113 | |||
114 | struct tegra_dc_ext_control { | ||
115 | struct cdev cdev; | ||
116 | struct device *dev; | ||
117 | |||
118 | struct list_head users; | ||
119 | |||
120 | struct mutex lock; | ||
121 | }; | ||
122 | |||
123 | extern int tegra_dc_ext_devno; | ||
124 | extern struct class *tegra_dc_ext_class; | ||
125 | |||
126 | extern int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id, | ||
127 | struct nvmap_handle_ref **handle, | ||
128 | dma_addr_t *phys_addr); | ||
129 | |||
130 | extern int tegra_dc_ext_get_cursor(struct tegra_dc_ext_user *user); | ||
131 | extern int tegra_dc_ext_put_cursor(struct tegra_dc_ext_user *user); | ||
132 | extern int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user, | ||
133 | struct tegra_dc_ext_cursor_image *); | ||
134 | extern int tegra_dc_ext_set_cursor(struct tegra_dc_ext_user *user, | ||
135 | struct tegra_dc_ext_cursor *); | ||
136 | |||
137 | extern int tegra_dc_ext_control_init(void); | ||
138 | |||
139 | extern int tegra_dc_ext_queue_hotplug(struct tegra_dc_ext_control *, | ||
140 | int output); | ||
141 | extern ssize_t tegra_dc_ext_event_read(struct file *filp, char __user *buf, | ||
142 | size_t size, loff_t *ppos); | ||
143 | extern unsigned int tegra_dc_ext_event_poll(struct file *, poll_table *); | ||
144 | |||
145 | extern int tegra_dc_ext_get_num_outputs(void); | ||
146 | |||
147 | #endif /* __TEGRA_DC_EXT_PRIV_H */ | ||
diff --git a/drivers/video/tegra/dc/ext/util.c b/drivers/video/tegra/dc/ext/util.c new file mode 100644 index 00000000000..747085579f1 --- /dev/null +++ b/drivers/video/tegra/dc/ext/util.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/ext/util.c | ||
3 | * | ||
4 | * Copyright (C) 2011, NVIDIA Corporation | ||
5 | * | ||
6 | * Author: Robert Morell <rmorell@nvidia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/err.h> | ||
20 | #include <linux/types.h> | ||
21 | |||
22 | #include <mach/dc.h> | ||
23 | #include <mach/nvmap.h> | ||
24 | |||
25 | /* ugh */ | ||
26 | #include "../../nvmap/nvmap.h" | ||
27 | |||
28 | #include "tegra_dc_ext_priv.h" | ||
29 | |||
30 | int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id, | ||
31 | struct nvmap_handle_ref **handle, | ||
32 | dma_addr_t *phys_addr) | ||
33 | { | ||
34 | struct tegra_dc_ext *ext = user->ext; | ||
35 | struct nvmap_handle_ref *win_dup; | ||
36 | struct nvmap_handle *win_handle; | ||
37 | dma_addr_t phys; | ||
38 | |||
39 | if (!id) { | ||
40 | *handle = NULL; | ||
41 | *phys_addr = -1; | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * Take a reference to the buffer using the user's nvmap context, to | ||
48 | * make sure they have permissions to access it. | ||
49 | */ | ||
50 | win_handle = nvmap_get_handle_id(user->nvmap, id); | ||
51 | if (!win_handle) | ||
52 | return -EACCES; | ||
53 | |||
54 | /* | ||
55 | * Duplicate the buffer's handle into the dc_ext driver's nvmap | ||
56 | * context, to ensure that the handle won't be freed as long as it is | ||
57 | * in use by display. | ||
58 | */ | ||
59 | win_dup = nvmap_duplicate_handle_id(ext->nvmap, id); | ||
60 | |||
61 | /* Release the reference we took in the user's context above */ | ||
62 | nvmap_handle_put(win_handle); | ||
63 | |||
64 | if (IS_ERR(win_dup)) | ||
65 | return PTR_ERR(win_dup); | ||
66 | |||
67 | phys = nvmap_pin(ext->nvmap, win_dup); | ||
68 | /* XXX this isn't correct for non-pointers... */ | ||
69 | if (IS_ERR((void *)phys)) { | ||
70 | nvmap_free(ext->nvmap, win_dup); | ||
71 | return PTR_ERR((void *)phys); | ||
72 | } | ||
73 | |||
74 | *phys_addr = phys; | ||
75 | *handle = win_dup; | ||
76 | |||
77 | return 0; | ||
78 | } | ||
diff --git a/drivers/video/tegra/dc/hdmi.c b/drivers/video/tegra/dc/hdmi.c new file mode 100644 index 00000000000..cb401a167fd --- /dev/null +++ b/drivers/video/tegra/dc/hdmi.c | |||
@@ -0,0 +1,2381 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/hdmi.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * Copyright (C) 2010-2011 NVIDIA Corporation | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/clk.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/fb.h> | ||
24 | #include <linux/gpio.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #ifdef CONFIG_SWITCH | ||
30 | #include <linux/switch.h> | ||
31 | #endif | ||
32 | #include <linux/workqueue.h> | ||
33 | #include <linux/debugfs.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | #include <linux/device.h> | ||
36 | |||
37 | #include <mach/clk.h> | ||
38 | #include <mach/dc.h> | ||
39 | #include <mach/fb.h> | ||
40 | #include <linux/nvhost.h> | ||
41 | #include <mach/hdmi-audio.h> | ||
42 | |||
43 | #include <video/tegrafb.h> | ||
44 | |||
45 | #include "dc_reg.h" | ||
46 | #include "dc_priv.h" | ||
47 | #include "hdmi_reg.h" | ||
48 | #include "hdmi.h" | ||
49 | #include "edid.h" | ||
50 | #include "nvhdcp.h" | ||
51 | |||
52 | /* datasheet claims this will always be 216MHz */ | ||
53 | #define HDMI_AUDIOCLK_FREQ 216000000 | ||
54 | |||
55 | #define HDMI_REKEY_DEFAULT 56 | ||
56 | |||
57 | #define HDMI_ELD_RESERVED1_INDEX 1 | ||
58 | #define HDMI_ELD_RESERVED2_INDEX 3 | ||
59 | #define HDMI_ELD_VER_INDEX 0 | ||
60 | #define HDMI_ELD_BASELINE_LEN_INDEX 2 | ||
61 | #define HDMI_ELD_CEA_VER_MNL_INDEX 4 | ||
62 | #define HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX 5 | ||
63 | #define HDMI_ELD_AUD_SYNC_DELAY_INDEX 6 | ||
64 | #define HDMI_ELD_SPK_ALLOC_INDEX 7 | ||
65 | #define HDMI_ELD_PORT_ID_INDEX 8 | ||
66 | #define HDMI_ELD_MANF_NAME_INDEX 16 | ||
67 | #define HDMI_ELD_PRODUCT_CODE_INDEX 18 | ||
68 | #define HDMI_ELD_MONITOR_NAME_INDEX 20 | ||
69 | |||
70 | struct tegra_dc_hdmi_data { | ||
71 | struct tegra_dc *dc; | ||
72 | struct tegra_edid *edid; | ||
73 | struct tegra_edid_hdmi_eld eld; | ||
74 | struct tegra_nvhdcp *nvhdcp; | ||
75 | struct delayed_work work; | ||
76 | |||
77 | struct resource *base_res; | ||
78 | void __iomem *base; | ||
79 | struct clk *clk; | ||
80 | |||
81 | struct clk *disp1_clk; | ||
82 | struct clk *disp2_clk; | ||
83 | struct clk *hda_clk; | ||
84 | struct clk *hda2codec_clk; | ||
85 | struct clk *hda2hdmi_clk; | ||
86 | |||
87 | #ifdef CONFIG_SWITCH | ||
88 | struct switch_dev hpd_switch; | ||
89 | #endif | ||
90 | |||
91 | spinlock_t suspend_lock; | ||
92 | bool suspended; | ||
93 | bool eld_retrieved; | ||
94 | bool clk_enabled; | ||
95 | unsigned audio_freq; | ||
96 | unsigned audio_source; | ||
97 | |||
98 | bool dvi; | ||
99 | }; | ||
100 | |||
101 | struct tegra_dc_hdmi_data *dc_hdmi; | ||
102 | |||
103 | const struct fb_videomode tegra_dc_hdmi_supported_modes[] = { | ||
104 | /* 1280x720p 60hz: EIA/CEA-861-B Format 4 */ | ||
105 | { | ||
106 | .xres = 1280, | ||
107 | .yres = 720, | ||
108 | .pixclock = KHZ2PICOS(74250), | ||
109 | .hsync_len = 40, /* h_sync_width */ | ||
110 | .vsync_len = 5, /* v_sync_width */ | ||
111 | .left_margin = 220, /* h_back_porch */ | ||
112 | .upper_margin = 20, /* v_back_porch */ | ||
113 | .right_margin = 110, /* h_front_porch */ | ||
114 | .lower_margin = 5, /* v_front_porch */ | ||
115 | .vmode = FB_VMODE_NONINTERLACED, | ||
116 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
117 | }, | ||
118 | |||
119 | /* 1280x720p 60hz: EIA/CEA-861-B Format 4 (Stereo)*/ | ||
120 | { | ||
121 | .xres = 1280, | ||
122 | .yres = 720, | ||
123 | .pixclock = KHZ2PICOS(74250), | ||
124 | .hsync_len = 40, /* h_sync_width */ | ||
125 | .vsync_len = 5, /* v_sync_width */ | ||
126 | .left_margin = 220, /* h_back_porch */ | ||
127 | .upper_margin = 20, /* v_back_porch */ | ||
128 | .right_margin = 110, /* h_front_porch */ | ||
129 | .lower_margin = 5, /* v_front_porch */ | ||
130 | .vmode = FB_VMODE_NONINTERLACED | | ||
131 | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
132 | FB_VMODE_STEREO_FRAME_PACK, | ||
133 | #else | ||
134 | FB_VMODE_STEREO_LEFT_RIGHT, | ||
135 | #endif | ||
136 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
137 | }, | ||
138 | |||
139 | /* 720x480p 59.94hz: EIA/CEA-861-B Formats 2 & 3 */ | ||
140 | { | ||
141 | .xres = 720, | ||
142 | .yres = 480, | ||
143 | .pixclock = KHZ2PICOS(27000), | ||
144 | .hsync_len = 62, /* h_sync_width */ | ||
145 | .vsync_len = 6, /* v_sync_width */ | ||
146 | .left_margin = 60, /* h_back_porch */ | ||
147 | .upper_margin = 30, /* v_back_porch */ | ||
148 | .right_margin = 16, /* h_front_porch */ | ||
149 | .lower_margin = 9, /* v_front_porch */ | ||
150 | .vmode = FB_VMODE_NONINTERLACED, | ||
151 | .sync = 0, | ||
152 | }, | ||
153 | |||
154 | /* 640x480p 60hz: EIA/CEA-861-B Format 1 */ | ||
155 | { | ||
156 | .xres = 640, | ||
157 | .yres = 480, | ||
158 | .pixclock = KHZ2PICOS(25200), | ||
159 | .hsync_len = 96, /* h_sync_width */ | ||
160 | .vsync_len = 2, /* v_sync_width */ | ||
161 | .left_margin = 48, /* h_back_porch */ | ||
162 | .upper_margin = 33, /* v_back_porch */ | ||
163 | .right_margin = 16, /* h_front_porch */ | ||
164 | .lower_margin = 10, /* v_front_porch */ | ||
165 | .vmode = FB_VMODE_NONINTERLACED, | ||
166 | .sync = 0, | ||
167 | }, | ||
168 | |||
169 | /* 720x576p 50hz EIA/CEA-861-B Formats 17 & 18 */ | ||
170 | { | ||
171 | .xres = 720, | ||
172 | .yres = 576, | ||
173 | .pixclock = KHZ2PICOS(27000), | ||
174 | .hsync_len = 64, /* h_sync_width */ | ||
175 | .vsync_len = 5, /* v_sync_width */ | ||
176 | .left_margin = 68, /* h_back_porch */ | ||
177 | .upper_margin = 39, /* v_back_porch */ | ||
178 | .right_margin = 12, /* h_front_porch */ | ||
179 | .lower_margin = 5, /* v_front_porch */ | ||
180 | .vmode = FB_VMODE_NONINTERLACED, | ||
181 | .sync = 0, | ||
182 | }, | ||
183 | |||
184 | /* 1920x1080p 23.98/24hz: EIA/CEA-861-B Format 32 (Stereo)*/ | ||
185 | { | ||
186 | .xres = 1920, | ||
187 | .yres = 1080, | ||
188 | .pixclock = KHZ2PICOS(74250), | ||
189 | .hsync_len = 44, /* h_sync_width */ | ||
190 | .vsync_len = 5, /* v_sync_width */ | ||
191 | .left_margin = 148, /* h_back_porch */ | ||
192 | .upper_margin = 36, /* v_back_porch */ | ||
193 | .right_margin = 638, /* h_front_porch */ | ||
194 | .lower_margin = 4, /* v_front_porch */ | ||
195 | .vmode = FB_VMODE_NONINTERLACED | | ||
196 | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
197 | FB_VMODE_STEREO_FRAME_PACK, | ||
198 | #else | ||
199 | FB_VMODE_STEREO_LEFT_RIGHT, | ||
200 | #endif | ||
201 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
202 | }, | ||
203 | |||
204 | /* 1920x1080p 30Hz EIA/CEA-861-B Format 34 */ | ||
205 | { | ||
206 | .xres = 1920, | ||
207 | .yres = 1080, | ||
208 | .pixclock = KHZ2PICOS(74250), | ||
209 | .hsync_len = 44, /* h_sync_width */ | ||
210 | .vsync_len = 5, /* v_sync_width */ | ||
211 | .left_margin = 148, /* h_back_porch */ | ||
212 | .upper_margin = 36, /* v_back_porch */ | ||
213 | .right_margin = 88, /* h_front_porch */ | ||
214 | .lower_margin = 4, /* v_front_porch */ | ||
215 | .vmode = FB_VMODE_NONINTERLACED, | ||
216 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
217 | }, | ||
218 | /* 1920x1080p 59.94/60hz EIA/CEA-861-B Format 16 */ | ||
219 | { | ||
220 | .xres = 1920, | ||
221 | .yres = 1080, | ||
222 | .pixclock = KHZ2PICOS(148500), | ||
223 | .hsync_len = 44, /* h_sync_width */ | ||
224 | .vsync_len = 5, /* v_sync_width */ | ||
225 | .left_margin = 148, /* h_back_porch */ | ||
226 | .upper_margin = 36, /* v_back_porch */ | ||
227 | .right_margin = 88, /* h_front_porch */ | ||
228 | .lower_margin = 4, /* v_front_porch */ | ||
229 | .vmode = FB_VMODE_NONINTERLACED, | ||
230 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
231 | }, | ||
232 | |||
233 | /* | ||
234 | * Few VGA/SVGA modes to support monitors with lower | ||
235 | * resolutions or to support HDMI<->DVI connection | ||
236 | */ | ||
237 | |||
238 | /* 640x480p 75hz */ | ||
239 | { | ||
240 | .xres = 640, | ||
241 | .yres = 480, | ||
242 | .pixclock = KHZ2PICOS(31500), | ||
243 | .hsync_len = 96, /* h_sync_width */ | ||
244 | .vsync_len = 2, /* v_sync_width */ | ||
245 | .left_margin = 48, /* h_back_porch */ | ||
246 | .upper_margin = 32, /* v_back_porch */ | ||
247 | .right_margin = 16, /* h_front_porch */ | ||
248 | .lower_margin = 1, /* v_front_porch */ | ||
249 | .vmode = FB_VMODE_NONINTERLACED, | ||
250 | .sync = 0, | ||
251 | }, | ||
252 | /* 720x400p 59hz */ | ||
253 | { | ||
254 | .xres = 720, | ||
255 | .yres = 400, | ||
256 | .pixclock = KHZ2PICOS(35500), | ||
257 | .hsync_len = 72, /* h_sync_width */ | ||
258 | .vsync_len = 3, /* v_sync_width */ | ||
259 | .left_margin = 108, /* h_back_porch */ | ||
260 | .upper_margin = 42, /* v_back_porch */ | ||
261 | .right_margin = 36, /* h_front_porch */ | ||
262 | .lower_margin = 1, /* v_front_porch */ | ||
263 | .vmode = FB_VMODE_NONINTERLACED, | ||
264 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
265 | }, | ||
266 | /* 800x600p 60hz */ | ||
267 | { | ||
268 | .xres = 800, | ||
269 | .yres = 600, | ||
270 | .pixclock = KHZ2PICOS(40000), | ||
271 | .hsync_len = 128, /* h_sync_width */ | ||
272 | .vsync_len = 4, /* v_sync_width */ | ||
273 | .left_margin = 88, /* h_back_porch */ | ||
274 | .upper_margin = 23, /* v_back_porch */ | ||
275 | .right_margin = 40, /* h_front_porch */ | ||
276 | .lower_margin = 1, /* v_front_porch */ | ||
277 | .vmode = FB_VMODE_NONINTERLACED, | ||
278 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
279 | }, | ||
280 | /* 800x600p 75hz */ | ||
281 | { | ||
282 | .xres = 800, | ||
283 | .yres = 600, | ||
284 | .pixclock = KHZ2PICOS(49500), | ||
285 | .hsync_len = 80, /* h_sync_width */ | ||
286 | .vsync_len = 2, /* v_sync_width */ | ||
287 | .left_margin = 160, /* h_back_porch */ | ||
288 | .upper_margin = 21, /* v_back_porch */ | ||
289 | .right_margin = 16, /* h_front_porch */ | ||
290 | .lower_margin = 1, /* v_front_porch */ | ||
291 | .vmode = FB_VMODE_NONINTERLACED, | ||
292 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
293 | }, | ||
294 | /* 1024x768p 60hz */ | ||
295 | { | ||
296 | .xres = 1024, | ||
297 | .yres = 768, | ||
298 | .pixclock = KHZ2PICOS(65000), | ||
299 | .hsync_len = 136, /* h_sync_width */ | ||
300 | .vsync_len = 6, /* v_sync_width */ | ||
301 | .left_margin = 160, /* h_back_porch */ | ||
302 | .upper_margin = 29, /* v_back_porch */ | ||
303 | .right_margin = 24, /* h_front_porch */ | ||
304 | .lower_margin = 3, /* v_front_porch */ | ||
305 | .vmode = FB_VMODE_NONINTERLACED, | ||
306 | .sync = 0, | ||
307 | }, | ||
308 | /* 1024x768p 75hz */ | ||
309 | { | ||
310 | .xres = 1024, | ||
311 | .yres = 768, | ||
312 | .pixclock = KHZ2PICOS(78800), | ||
313 | .hsync_len = 96, /* h_sync_width */ | ||
314 | .vsync_len = 3, /* v_sync_width */ | ||
315 | .left_margin = 176, /* h_back_porch */ | ||
316 | .upper_margin = 28, /* v_back_porch */ | ||
317 | .right_margin = 16, /* h_front_porch */ | ||
318 | .lower_margin = 1, /* v_front_porch */ | ||
319 | .vmode = FB_VMODE_NONINTERLACED, | ||
320 | .sync = 0, | ||
321 | }, | ||
322 | /* 1152x864p 75hz */ | ||
323 | { | ||
324 | .xres = 1152, | ||
325 | .yres = 864, | ||
326 | .pixclock = KHZ2PICOS(108000), | ||
327 | .hsync_len = 128, /* h_sync_width */ | ||
328 | .vsync_len = 3, /* v_sync_width */ | ||
329 | .left_margin = 256, /* h_back_porch */ | ||
330 | .upper_margin = 32, /* v_back_porch */ | ||
331 | .right_margin = 64, /* h_front_porch */ | ||
332 | .lower_margin = 1, /* v_front_porch */ | ||
333 | .vmode = FB_VMODE_NONINTERLACED, | ||
334 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
335 | }, | ||
336 | /* 1280x800p 60hz */ | ||
337 | { | ||
338 | .xres = 1280, | ||
339 | .yres = 800, | ||
340 | .pixclock = KHZ2PICOS(83460), | ||
341 | .hsync_len = 136, /* h_sync_width */ | ||
342 | .vsync_len = 3, /* v_sync_width */ | ||
343 | .left_margin = 200, /* h_back_porch */ | ||
344 | .upper_margin = 24, /* v_back_porch */ | ||
345 | .right_margin = 64, /* h_front_porch */ | ||
346 | .lower_margin = 1, /* v_front_porch */ | ||
347 | .vmode = FB_VMODE_NONINTERLACED, | ||
348 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
349 | }, | ||
350 | /* 1280x960p 60hz */ | ||
351 | { | ||
352 | .xres = 1280, | ||
353 | .yres = 960, | ||
354 | .pixclock = KHZ2PICOS(108000), | ||
355 | .hsync_len = 136, /* h_sync_width */ | ||
356 | .vsync_len = 3, /* v_sync_width */ | ||
357 | .left_margin = 216, /* h_back_porch */ | ||
358 | .upper_margin = 30, /* v_back_porch */ | ||
359 | .right_margin = 80, /* h_front_porch */ | ||
360 | .lower_margin = 1, /* v_front_porch */ | ||
361 | .vmode = FB_VMODE_NONINTERLACED, | ||
362 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
363 | }, | ||
364 | /* 1280x1024p 60hz */ | ||
365 | { | ||
366 | .xres = 1280, | ||
367 | .yres = 1024, | ||
368 | .pixclock = KHZ2PICOS(108000), | ||
369 | .hsync_len = 112, /* h_sync_width */ | ||
370 | .vsync_len = 3, /* v_sync_width */ | ||
371 | .left_margin = 248, /* h_back_porch */ | ||
372 | .upper_margin = 38, /* v_back_porch */ | ||
373 | .right_margin = 48, /* h_front_porch */ | ||
374 | .lower_margin = 1, /* v_front_porch */ | ||
375 | .vmode = FB_VMODE_NONINTERLACED, | ||
376 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
377 | }, | ||
378 | /* 1280x1024p 75hz */ | ||
379 | { | ||
380 | .xres = 1280, | ||
381 | .yres = 1024, | ||
382 | .pixclock = KHZ2PICOS(135000), | ||
383 | .hsync_len = 144, /* h_sync_width */ | ||
384 | .vsync_len = 3, /* v_sync_width */ | ||
385 | .left_margin = 248, /* h_back_porch */ | ||
386 | .upper_margin = 38, /* v_back_porch */ | ||
387 | .right_margin = 16, /* h_front_porch */ | ||
388 | .lower_margin = 1, /* v_front_porch */ | ||
389 | .vmode = FB_VMODE_NONINTERLACED, | ||
390 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
391 | }, | ||
392 | /* 1368x768p 60hz */ | ||
393 | { | ||
394 | .xres = 1368, | ||
395 | .yres = 768, | ||
396 | .pixclock = KHZ2PICOS(85860), | ||
397 | .hsync_len = 144, /* h_sync_width */ | ||
398 | .vsync_len = 3, /* v_sync_width */ | ||
399 | .left_margin = 216, /* h_back_porch */ | ||
400 | .upper_margin = 23, /* v_back_porch */ | ||
401 | .right_margin = 72, /* h_front_porch */ | ||
402 | .lower_margin = 1, /* v_front_porch */ | ||
403 | .vmode = FB_VMODE_NONINTERLACED, | ||
404 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
405 | }, | ||
406 | /* 1440x900p 60hz */ | ||
407 | { | ||
408 | .xres = 1440, | ||
409 | .yres = 900, | ||
410 | .pixclock = KHZ2PICOS(106470), | ||
411 | .hsync_len = 152, /* h_sync_width */ | ||
412 | .vsync_len = 3, /* v_sync_width */ | ||
413 | .left_margin = 232, /* h_back_porch */ | ||
414 | .upper_margin = 28, /* v_back_porch */ | ||
415 | .right_margin = 80, /* h_front_porch */ | ||
416 | .lower_margin = 1, /* v_front_porch */ | ||
417 | .vmode = FB_VMODE_NONINTERLACED, | ||
418 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
419 | }, | ||
420 | /* 1600x1200p 60hz */ | ||
421 | { | ||
422 | .xres = 1600, | ||
423 | .yres = 1200, | ||
424 | .pixclock = KHZ2PICOS(162000), | ||
425 | .hsync_len = 192, /* h_sync_width */ | ||
426 | .vsync_len = 3, /* v_sync_width */ | ||
427 | .left_margin = 304, /* h_back_porch */ | ||
428 | .upper_margin = 46, /* v_back_porch */ | ||
429 | .right_margin = 64, /* h_front_porch */ | ||
430 | .lower_margin = 1, /* v_front_porch */ | ||
431 | .vmode = FB_VMODE_NONINTERLACED, | ||
432 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
433 | }, | ||
434 | /* 1600x1200p 75hz */ | ||
435 | { | ||
436 | .xres = 1600, | ||
437 | .yres = 1200, | ||
438 | .pixclock = KHZ2PICOS(202500), | ||
439 | .hsync_len = 192, /* h_sync_width */ | ||
440 | .vsync_len = 3, /* v_sync_width */ | ||
441 | .left_margin = 304, /* h_back_porch */ | ||
442 | .upper_margin = 46, /* v_back_porch */ | ||
443 | .right_margin = 64, /* h_front_porch */ | ||
444 | .lower_margin = 1, /* v_front_porch */ | ||
445 | .vmode = FB_VMODE_NONINTERLACED, | ||
446 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | ||
447 | }, | ||
448 | /* 1680x1050p 59.94/60hz */ | ||
449 | { | ||
450 | .xres = 1680, | ||
451 | .yres = 1050, | ||
452 | .pixclock = KHZ2PICOS(147140), | ||
453 | .hsync_len = 184, /* h_sync_width */ | ||
454 | .vsync_len = 3, /* v_sync_width */ | ||
455 | .left_margin = 288, /* h_back_porch */ | ||
456 | .upper_margin = 33, /* v_back_porch */ | ||
457 | .right_margin = 104, /* h_front_porch */ | ||
458 | .lower_margin = 1, /* v_front_porch */ | ||
459 | .vmode = FB_VMODE_NONINTERLACED, | ||
460 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
461 | }, | ||
462 | }; | ||
463 | |||
464 | /* CVT timing representation of VESA modes*/ | ||
465 | const struct fb_videomode tegra_dc_hdmi_supported_cvt_modes[] = { | ||
466 | |||
467 | /* 640x480p 60hz */ | ||
468 | { | ||
469 | .refresh = 60, | ||
470 | .xres = 640, | ||
471 | .yres = 480, | ||
472 | .pixclock = KHZ2PICOS(23750), | ||
473 | .hsync_len = 64, /* h_sync_width */ | ||
474 | .vsync_len = 4, /* v_sync_width */ | ||
475 | .left_margin = 80, /* h_back_porch */ | ||
476 | .upper_margin = 17, /* v_back_porch */ | ||
477 | .right_margin = 16, /* h_front_porch */ | ||
478 | .lower_margin = 3, /* v_front_porch */ | ||
479 | .vmode = FB_VMODE_NONINTERLACED, | ||
480 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
481 | }, | ||
482 | /* 640x480p 75hz */ | ||
483 | { | ||
484 | .refresh = 75, | ||
485 | .xres = 640, | ||
486 | .yres = 480, | ||
487 | .pixclock = KHZ2PICOS(30750), | ||
488 | .hsync_len = 64, /* h_sync_width */ | ||
489 | .vsync_len = 4, /* v_sync_width */ | ||
490 | .left_margin = 88, /* h_back_porch */ | ||
491 | .upper_margin = 21, /* v_back_porch */ | ||
492 | .right_margin = 24, /* h_front_porch */ | ||
493 | .lower_margin = 3, /* v_front_porch */ | ||
494 | .vmode = FB_VMODE_NONINTERLACED, | ||
495 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
496 | }, | ||
497 | /* 720x400p 59hz */ | ||
498 | { | ||
499 | .refresh = 59, | ||
500 | .xres = 720, | ||
501 | .yres = 400, | ||
502 | .pixclock = KHZ2PICOS(22000), | ||
503 | .hsync_len = 64, /* h_sync_width */ | ||
504 | .vsync_len = 10, /* v_sync_width */ | ||
505 | .left_margin = 88, /* h_back_porch */ | ||
506 | .upper_margin = 14, /* v_back_porch */ | ||
507 | .right_margin = 24, /* h_front_porch */ | ||
508 | .lower_margin = 3, /* v_front_porch */ | ||
509 | .vmode = FB_VMODE_NONINTERLACED, | ||
510 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
511 | }, | ||
512 | /* 800x600p 60hz */ | ||
513 | { | ||
514 | .refresh = 60, | ||
515 | .xres = 800, | ||
516 | .yres = 600, | ||
517 | .pixclock = KHZ2PICOS(38250), | ||
518 | .hsync_len = 80, /* h_sync_width */ | ||
519 | .vsync_len = 4, /* v_sync_width */ | ||
520 | .left_margin = 112, /* h_back_porch */ | ||
521 | .upper_margin = 21, /* v_back_porch */ | ||
522 | .right_margin = 32, /* h_front_porch */ | ||
523 | .lower_margin = 3, /* v_front_porch */ | ||
524 | .vmode = FB_VMODE_NONINTERLACED, | ||
525 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
526 | }, | ||
527 | /* 800x600p 75hz */ | ||
528 | { | ||
529 | .refresh = 75, | ||
530 | .xres = 800, | ||
531 | .yres = 600, | ||
532 | .pixclock = KHZ2PICOS(49000), | ||
533 | .hsync_len = 80, /* h_sync_width */ | ||
534 | .vsync_len = 4, /* v_sync_width */ | ||
535 | .left_margin = 120, /* h_back_porch */ | ||
536 | .upper_margin = 26, /* v_back_porch */ | ||
537 | .right_margin = 40, /* h_front_porch */ | ||
538 | .lower_margin = 3, /* v_front_porch */ | ||
539 | .vmode = FB_VMODE_NONINTERLACED, | ||
540 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
541 | }, | ||
542 | /* 1024x768p 60hz */ | ||
543 | { | ||
544 | .refresh = 60, | ||
545 | .xres = 1024, | ||
546 | .yres = 768, | ||
547 | .pixclock = KHZ2PICOS(63500), | ||
548 | .hsync_len = 104, /* h_sync_width */ | ||
549 | .vsync_len = 4, /* v_sync_width */ | ||
550 | .left_margin = 152, /* h_back_porch */ | ||
551 | .upper_margin = 27, /* v_back_porch */ | ||
552 | .right_margin = 48, /* h_front_porch */ | ||
553 | .lower_margin = 3, /* v_front_porch */ | ||
554 | .vmode = FB_VMODE_NONINTERLACED, | ||
555 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
556 | }, | ||
557 | /* 1024x768p 75hz */ | ||
558 | { | ||
559 | .refresh = 75, | ||
560 | .xres = 1024, | ||
561 | .yres = 768, | ||
562 | .pixclock = KHZ2PICOS(82000), | ||
563 | .hsync_len = 104, /* h_sync_width */ | ||
564 | .vsync_len = 4, /* v_sync_width */ | ||
565 | .left_margin = 168, /* h_back_porch */ | ||
566 | .upper_margin = 34, /* v_back_porch */ | ||
567 | .right_margin = 64, /* h_front_porch */ | ||
568 | .lower_margin = 3, /* v_front_porch */ | ||
569 | .vmode = FB_VMODE_NONINTERLACED, | ||
570 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
571 | }, | ||
572 | /* 1152x864p 75hz */ | ||
573 | { | ||
574 | .refresh = 75, | ||
575 | .xres = 1152, | ||
576 | .yres = 864, | ||
577 | .pixclock = KHZ2PICOS(104500), | ||
578 | .hsync_len = 120, /* h_sync_width */ | ||
579 | .vsync_len = 10, /* v_sync_width */ | ||
580 | .left_margin = 192, /* h_back_porch */ | ||
581 | .upper_margin = 38, /* v_back_porch */ | ||
582 | .right_margin = 72, /* h_front_porch */ | ||
583 | .lower_margin = 3, /* v_front_porch */ | ||
584 | .vmode = FB_VMODE_NONINTERLACED, | ||
585 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
586 | }, | ||
587 | /* 1280x800p 60hz */ | ||
588 | { | ||
589 | .refresh = 60, | ||
590 | .xres = 1280, | ||
591 | .yres = 800, | ||
592 | .pixclock = KHZ2PICOS(83500), | ||
593 | .hsync_len = 128, /* h_sync_width */ | ||
594 | .vsync_len = 6, /* v_sync_width */ | ||
595 | .left_margin = 200, /* h_back_porch */ | ||
596 | .upper_margin = 28, /* v_back_porch */ | ||
597 | .right_margin = 72, /* h_front_porch */ | ||
598 | .lower_margin = 3, /* v_front_porch */ | ||
599 | .vmode = FB_VMODE_NONINTERLACED, | ||
600 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
601 | }, | ||
602 | /* 1280x960p 60hz */ | ||
603 | { | ||
604 | .refresh = 60, | ||
605 | .xres = 1280, | ||
606 | .yres = 960, | ||
607 | .pixclock = KHZ2PICOS(101250), | ||
608 | .hsync_len = 128, /* h_sync_width */ | ||
609 | .vsync_len = 4, /* v_sync_width */ | ||
610 | .left_margin = 208, /* h_back_porch */ | ||
611 | .upper_margin = 33, /* v_back_porch */ | ||
612 | .right_margin = 80, /* h_front_porch */ | ||
613 | .lower_margin = 3, /* v_front_porch */ | ||
614 | .vmode = FB_VMODE_NONINTERLACED, | ||
615 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
616 | }, | ||
617 | /* 1280x1024p 60hz */ | ||
618 | { | ||
619 | .refresh = 60, | ||
620 | .xres = 1280, | ||
621 | .yres = 1024, | ||
622 | .pixclock = KHZ2PICOS(109000), | ||
623 | .hsync_len = 136, /* h_sync_width */ | ||
624 | .vsync_len = 7, /* v_sync_width */ | ||
625 | .left_margin = 216, /* h_back_porch */ | ||
626 | .upper_margin = 36, /* v_back_porch */ | ||
627 | .right_margin = 80, /* h_front_porch */ | ||
628 | .lower_margin = 3, /* v_front_porch */ | ||
629 | .vmode = FB_VMODE_NONINTERLACED, | ||
630 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
631 | }, | ||
632 | |||
633 | /* 1280x1024p 75hz */ | ||
634 | { | ||
635 | .refresh = 75, | ||
636 | .xres = 1280, | ||
637 | .yres = 1024, | ||
638 | .pixclock = KHZ2PICOS(138750), | ||
639 | .hsync_len = 136, /* h_sync_width */ | ||
640 | .vsync_len = 7, /* v_sync_width */ | ||
641 | .left_margin = 224, /* h_back_porch */ | ||
642 | .upper_margin = 45, /* v_back_porch */ | ||
643 | .right_margin = 88, /* h_front_porch */ | ||
644 | .lower_margin = 3, /* v_front_porch */ | ||
645 | .vmode = FB_VMODE_NONINTERLACED, | ||
646 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
647 | }, | ||
648 | /* 1368x768p 60hz */ | ||
649 | { | ||
650 | .refresh = 60, | ||
651 | .xres = 1368, | ||
652 | .yres = 768, | ||
653 | .pixclock = KHZ2PICOS(85250), | ||
654 | .hsync_len = 136, /* h_sync_width */ | ||
655 | .vsync_len = 10, /* v_sync_width */ | ||
656 | .left_margin = 208, /* h_back_porch */ | ||
657 | .upper_margin = 27, /* v_back_porch */ | ||
658 | .right_margin = 72, /* h_front_porch */ | ||
659 | .lower_margin = 3, /* v_front_porch */ | ||
660 | .vmode = FB_VMODE_NONINTERLACED, | ||
661 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
662 | }, | ||
663 | /* 1440x900p 60hz */ | ||
664 | { | ||
665 | .refresh = 60, | ||
666 | .xres = 1440, | ||
667 | .yres = 900, | ||
668 | .pixclock = KHZ2PICOS(106500), | ||
669 | .hsync_len = 152, /* h_sync_width */ | ||
670 | .vsync_len = 6, /* v_sync_width */ | ||
671 | .left_margin = 232, /* h_back_porch */ | ||
672 | .upper_margin = 31, /* v_back_porch */ | ||
673 | .right_margin = 80, /* h_front_porch */ | ||
674 | .lower_margin = 3, /* v_front_porch */ | ||
675 | .vmode = FB_VMODE_NONINTERLACED, | ||
676 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
677 | }, | ||
678 | /* 1600x1200p 60hz */ | ||
679 | { | ||
680 | .refresh = 60, | ||
681 | .xres = 1600, | ||
682 | .yres = 1200, | ||
683 | .pixclock = KHZ2PICOS(161000), | ||
684 | .hsync_len = 168, /* h_sync_width */ | ||
685 | .vsync_len = 4, /* v_sync_width */ | ||
686 | .left_margin = 280, /* h_back_porch */ | ||
687 | .upper_margin = 42, /* v_back_porch */ | ||
688 | .right_margin = 112, /* h_front_porch */ | ||
689 | .lower_margin = 3, /* v_front_porch */ | ||
690 | .vmode = FB_VMODE_NONINTERLACED, | ||
691 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
692 | }, | ||
693 | /* 1600x1200p 75hz */ | ||
694 | { | ||
695 | .refresh = 75, | ||
696 | .xres = 1600, | ||
697 | .yres = 1200, | ||
698 | .pixclock = KHZ2PICOS(204750), | ||
699 | .hsync_len = 168, /* h_sync_width */ | ||
700 | .vsync_len = 4, /* v_sync_width */ | ||
701 | .left_margin = 288, /* h_back_porch */ | ||
702 | .upper_margin = 52, /* v_back_porch */ | ||
703 | .right_margin = 120, /* h_front_porch */ | ||
704 | .lower_margin = 3, /* v_front_porch */ | ||
705 | .vmode = FB_VMODE_NONINTERLACED, | ||
706 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
707 | }, | ||
708 | /* 1680x1050p 59.94/60hz */ | ||
709 | { | ||
710 | .refresh = 60, | ||
711 | .xres = 1680, | ||
712 | .yres = 1050, | ||
713 | .pixclock = KHZ2PICOS(140000), | ||
714 | .hsync_len = 168, /* h_sync_width */ | ||
715 | .vsync_len = 10, /* v_sync_width */ | ||
716 | .left_margin = 272, /* h_back_porch */ | ||
717 | .upper_margin = 36, /* v_back_porch */ | ||
718 | .right_margin = 104, /* h_front_porch */ | ||
719 | .lower_margin = 3, /* v_front_porch */ | ||
720 | .vmode = FB_VMODE_NONINTERLACED, | ||
721 | .sync = FB_SYNC_VERT_HIGH_ACT, | ||
722 | }, | ||
723 | }; | ||
724 | |||
725 | /* table of electrical settings, must be in acending order. */ | ||
726 | struct tdms_config { | ||
727 | int pclk; | ||
728 | u32 pll0; | ||
729 | u32 pll1; | ||
730 | u32 pe_current; /* pre-emphasis */ | ||
731 | u32 drive_current; | ||
732 | }; | ||
733 | |||
734 | #ifndef CONFIG_ARCH_TEGRA_2x_SOC | ||
735 | const struct tdms_config tdms_config[] = { | ||
736 | { /* 480p modes */ | ||
737 | .pclk = 27000000, | ||
738 | .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | | ||
739 | SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(0), | ||
740 | .pll1 = SOR_PLL_TMDS_TERM_ENABLE, | ||
741 | .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | | ||
742 | PE_CURRENT1(PE_CURRENT_0_0_mA) | | ||
743 | PE_CURRENT2(PE_CURRENT_0_0_mA) | | ||
744 | PE_CURRENT3(PE_CURRENT_0_0_mA), | ||
745 | .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | | ||
746 | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | | ||
747 | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | | ||
748 | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), | ||
749 | }, | ||
750 | { /* 720p modes */ | ||
751 | .pclk = 74250000, | ||
752 | .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | | ||
753 | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(0), | ||
754 | .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, | ||
755 | .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | | ||
756 | PE_CURRENT1(PE_CURRENT_5_0_mA) | | ||
757 | PE_CURRENT2(PE_CURRENT_5_0_mA) | | ||
758 | PE_CURRENT3(PE_CURRENT_5_0_mA), | ||
759 | .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | | ||
760 | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | | ||
761 | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | | ||
762 | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), | ||
763 | }, | ||
764 | { /* 1080p modes */ | ||
765 | .pclk = INT_MAX, | ||
766 | .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | | ||
767 | SOR_PLL_VCOCAP(3) | SOR_PLL_TX_REG_LOAD(0), | ||
768 | .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, | ||
769 | .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | | ||
770 | PE_CURRENT1(PE_CURRENT_5_0_mA) | | ||
771 | PE_CURRENT2(PE_CURRENT_5_0_mA) | | ||
772 | PE_CURRENT3(PE_CURRENT_5_0_mA), | ||
773 | .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | | ||
774 | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | | ||
775 | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | | ||
776 | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), | ||
777 | }, | ||
778 | }; | ||
779 | #else /* CONFIG_ARCH_TEGRA_2x_SOC */ | ||
780 | const struct tdms_config tdms_config[] = { | ||
781 | { /* 480p modes */ | ||
782 | .pclk = 27000000, | ||
783 | .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | | ||
784 | SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(3), | ||
785 | .pll1 = SOR_PLL_TMDS_TERM_ENABLE, | ||
786 | .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | | ||
787 | PE_CURRENT1(PE_CURRENT_0_0_mA) | | ||
788 | PE_CURRENT2(PE_CURRENT_0_0_mA) | | ||
789 | PE_CURRENT3(PE_CURRENT_0_0_mA), | ||
790 | .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | | ||
791 | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | | ||
792 | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | | ||
793 | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), | ||
794 | }, | ||
795 | { /* 720p modes */ | ||
796 | .pclk = 74250000, | ||
797 | .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | | ||
798 | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3), | ||
799 | .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, | ||
800 | .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | | ||
801 | PE_CURRENT1(PE_CURRENT_6_0_mA) | | ||
802 | PE_CURRENT2(PE_CURRENT_6_0_mA) | | ||
803 | PE_CURRENT3(PE_CURRENT_6_0_mA), | ||
804 | .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | | ||
805 | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | | ||
806 | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | | ||
807 | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), | ||
808 | }, | ||
809 | { /* 1080p modes */ | ||
810 | .pclk = INT_MAX, | ||
811 | .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | | ||
812 | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3), | ||
813 | .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, | ||
814 | .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | | ||
815 | PE_CURRENT1(PE_CURRENT_6_0_mA) | | ||
816 | PE_CURRENT2(PE_CURRENT_6_0_mA) | | ||
817 | PE_CURRENT3(PE_CURRENT_6_0_mA), | ||
818 | .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | | ||
819 | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | | ||
820 | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | | ||
821 | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), | ||
822 | }, | ||
823 | }; | ||
824 | #endif | ||
825 | |||
826 | struct tegra_hdmi_audio_config { | ||
827 | unsigned pix_clock; | ||
828 | unsigned n; | ||
829 | unsigned cts; | ||
830 | unsigned aval; | ||
831 | }; | ||
832 | |||
833 | |||
834 | const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = { | ||
835 | {25200000, 4096, 25200, 24000}, | ||
836 | {27000000, 4096, 27000, 24000}, | ||
837 | {74250000, 4096, 74250, 24000}, | ||
838 | {148500000, 4096, 148500, 24000}, | ||
839 | {0, 0, 0}, | ||
840 | }; | ||
841 | |||
842 | const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = { | ||
843 | {25200000, 5880, 26250, 25000}, | ||
844 | {27000000, 5880, 28125, 25000}, | ||
845 | {74250000, 4704, 61875, 20000}, | ||
846 | {148500000, 4704, 123750, 20000}, | ||
847 | {0, 0, 0}, | ||
848 | }; | ||
849 | |||
850 | const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = { | ||
851 | {25200000, 6144, 25200, 24000}, | ||
852 | {27000000, 6144, 27000, 24000}, | ||
853 | {74250000, 6144, 74250, 24000}, | ||
854 | {148500000, 6144, 148500, 24000}, | ||
855 | {0, 0, 0}, | ||
856 | }; | ||
857 | |||
858 | const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = { | ||
859 | {25200000, 11760, 26250, 25000}, | ||
860 | {27000000, 11760, 28125, 25000}, | ||
861 | {74250000, 9408, 61875, 20000}, | ||
862 | {148500000, 9408, 123750, 20000}, | ||
863 | {0, 0, 0}, | ||
864 | }; | ||
865 | |||
866 | const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = { | ||
867 | {25200000, 12288, 25200, 24000}, | ||
868 | {27000000, 12288, 27000, 24000}, | ||
869 | {74250000, 12288, 74250, 24000}, | ||
870 | {148500000, 12288, 148500, 24000}, | ||
871 | {0, 0, 0}, | ||
872 | }; | ||
873 | |||
874 | const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = { | ||
875 | {25200000, 23520, 26250, 25000}, | ||
876 | {27000000, 23520, 28125, 25000}, | ||
877 | {74250000, 18816, 61875, 20000}, | ||
878 | {148500000, 18816, 123750, 20000}, | ||
879 | {0, 0, 0}, | ||
880 | }; | ||
881 | |||
882 | const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = { | ||
883 | {25200000, 24576, 25200, 24000}, | ||
884 | {27000000, 24576, 27000, 24000}, | ||
885 | {74250000, 24576, 74250, 24000}, | ||
886 | {148500000, 24576, 148500, 24000}, | ||
887 | {0, 0, 0}, | ||
888 | }; | ||
889 | |||
890 | static const struct tegra_hdmi_audio_config | ||
891 | *tegra_hdmi_get_audio_config(unsigned audio_freq, unsigned pix_clock) | ||
892 | { | ||
893 | const struct tegra_hdmi_audio_config *table; | ||
894 | |||
895 | switch (audio_freq) { | ||
896 | case AUDIO_FREQ_32K: | ||
897 | table = tegra_hdmi_audio_32k; | ||
898 | break; | ||
899 | case AUDIO_FREQ_44_1K: | ||
900 | table = tegra_hdmi_audio_44_1k; | ||
901 | break; | ||
902 | case AUDIO_FREQ_48K: | ||
903 | table = tegra_hdmi_audio_48k; | ||
904 | break; | ||
905 | case AUDIO_FREQ_88_2K: | ||
906 | table = tegra_hdmi_audio_88_2k; | ||
907 | break; | ||
908 | case AUDIO_FREQ_96K: | ||
909 | table = tegra_hdmi_audio_96k; | ||
910 | break; | ||
911 | case AUDIO_FREQ_176_4K: | ||
912 | table = tegra_hdmi_audio_176_4k; | ||
913 | break; | ||
914 | case AUDIO_FREQ_192K: | ||
915 | table = tegra_hdmi_audio_192k; | ||
916 | break; | ||
917 | default: | ||
918 | return NULL; | ||
919 | } | ||
920 | |||
921 | while (table->pix_clock) { | ||
922 | if (table->pix_clock == pix_clock) | ||
923 | return table; | ||
924 | table++; | ||
925 | } | ||
926 | |||
927 | return NULL; | ||
928 | } | ||
929 | |||
930 | |||
931 | unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi, | ||
932 | unsigned long reg) | ||
933 | { | ||
934 | return readl(hdmi->base + reg * 4); | ||
935 | } | ||
936 | |||
937 | void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi, | ||
938 | unsigned long val, unsigned long reg) | ||
939 | { | ||
940 | writel(val, hdmi->base + reg * 4); | ||
941 | } | ||
942 | |||
943 | static inline void tegra_hdmi_clrsetbits(struct tegra_dc_hdmi_data *hdmi, | ||
944 | unsigned long reg, unsigned long clr, | ||
945 | unsigned long set) | ||
946 | { | ||
947 | unsigned long val = tegra_hdmi_readl(hdmi, reg); | ||
948 | val &= ~clr; | ||
949 | val |= set; | ||
950 | tegra_hdmi_writel(hdmi, val, reg); | ||
951 | } | ||
952 | |||
953 | #ifdef CONFIG_DEBUG_FS | ||
954 | static int dbg_hdmi_show(struct seq_file *s, void *unused) | ||
955 | { | ||
956 | struct tegra_dc_hdmi_data *hdmi = s->private; | ||
957 | |||
958 | #define DUMP_REG(a) do { \ | ||
959 | seq_printf(s, "%-32s\t%03x\t%08lx\n", \ | ||
960 | #a, a, tegra_hdmi_readl(hdmi, a)); \ | ||
961 | } while (0) | ||
962 | |||
963 | tegra_dc_io_start(hdmi->dc); | ||
964 | clk_enable(hdmi->clk); | ||
965 | |||
966 | DUMP_REG(HDMI_CTXSW); | ||
967 | DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); | ||
968 | DUMP_REG(HDMI_NV_PDISP_SOR_STATE1); | ||
969 | DUMP_REG(HDMI_NV_PDISP_SOR_STATE2); | ||
970 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB); | ||
971 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB); | ||
972 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB); | ||
973 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB); | ||
974 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB); | ||
975 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB); | ||
976 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB); | ||
977 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB); | ||
978 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB); | ||
979 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB); | ||
980 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB); | ||
981 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB); | ||
982 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL); | ||
983 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE); | ||
984 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB); | ||
985 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB); | ||
986 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB); | ||
987 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2); | ||
988 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1); | ||
989 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI); | ||
990 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB); | ||
991 | DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB); | ||
992 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0); | ||
993 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0); | ||
994 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1); | ||
995 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2); | ||
996 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); | ||
997 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS); | ||
998 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER); | ||
999 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW); | ||
1000 | DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH); | ||
1001 | DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); | ||
1002 | DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS); | ||
1003 | DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER); | ||
1004 | DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW); | ||
1005 | DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH); | ||
1006 | DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW); | ||
1007 | DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH); | ||
1008 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL); | ||
1009 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS); | ||
1010 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER); | ||
1011 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW); | ||
1012 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH); | ||
1013 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW); | ||
1014 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH); | ||
1015 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW); | ||
1016 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH); | ||
1017 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW); | ||
1018 | DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH); | ||
1019 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL); | ||
1020 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW); | ||
1021 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH); | ||
1022 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); | ||
1023 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); | ||
1024 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW); | ||
1025 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH); | ||
1026 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW); | ||
1027 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH); | ||
1028 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW); | ||
1029 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH); | ||
1030 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW); | ||
1031 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH); | ||
1032 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW); | ||
1033 | DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH); | ||
1034 | DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL); | ||
1035 | DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT); | ||
1036 | DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); | ||
1037 | DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL); | ||
1038 | DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS); | ||
1039 | DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK); | ||
1040 | DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1); | ||
1041 | DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2); | ||
1042 | DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0); | ||
1043 | DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1); | ||
1044 | DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA); | ||
1045 | DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE); | ||
1046 | DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1); | ||
1047 | DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2); | ||
1048 | DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_CTRL); | ||
1049 | DUMP_REG(HDMI_NV_PDISP_SOR_CAP); | ||
1050 | DUMP_REG(HDMI_NV_PDISP_SOR_PWR); | ||
1051 | DUMP_REG(HDMI_NV_PDISP_SOR_TEST); | ||
1052 | DUMP_REG(HDMI_NV_PDISP_SOR_PLL0); | ||
1053 | DUMP_REG(HDMI_NV_PDISP_SOR_PLL1); | ||
1054 | DUMP_REG(HDMI_NV_PDISP_SOR_PLL2); | ||
1055 | DUMP_REG(HDMI_NV_PDISP_SOR_CSTM); | ||
1056 | DUMP_REG(HDMI_NV_PDISP_SOR_LVDS); | ||
1057 | DUMP_REG(HDMI_NV_PDISP_SOR_CRCA); | ||
1058 | DUMP_REG(HDMI_NV_PDISP_SOR_CRCB); | ||
1059 | DUMP_REG(HDMI_NV_PDISP_SOR_BLANK); | ||
1060 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL); | ||
1061 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST0); | ||
1062 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST1); | ||
1063 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST2); | ||
1064 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST3); | ||
1065 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST4); | ||
1066 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST5); | ||
1067 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST6); | ||
1068 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST7); | ||
1069 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST8); | ||
1070 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST9); | ||
1071 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTA); | ||
1072 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTB); | ||
1073 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTC); | ||
1074 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTD); | ||
1075 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTE); | ||
1076 | DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTF); | ||
1077 | DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0); | ||
1078 | DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1); | ||
1079 | DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0); | ||
1080 | DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1); | ||
1081 | DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0); | ||
1082 | DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1); | ||
1083 | DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0); | ||
1084 | DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1); | ||
1085 | DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0); | ||
1086 | DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1); | ||
1087 | DUMP_REG(HDMI_NV_PDISP_SOR_TRIG); | ||
1088 | DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK); | ||
1089 | DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); | ||
1090 | DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0); | ||
1091 | DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1); | ||
1092 | DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2); | ||
1093 | DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0)); | ||
1094 | DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1)); | ||
1095 | DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2)); | ||
1096 | DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3)); | ||
1097 | DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4)); | ||
1098 | DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5)); | ||
1099 | DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6)); | ||
1100 | DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH); | ||
1101 | DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD); | ||
1102 | DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0); | ||
1103 | DUMP_REG(HDMI_NV_PDISP_AUDIO_N); | ||
1104 | DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING); | ||
1105 | DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK); | ||
1106 | DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL); | ||
1107 | DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL); | ||
1108 | DUMP_REG(HDMI_NV_PDISP_SCRATCH); | ||
1109 | DUMP_REG(HDMI_NV_PDISP_PE_CURRENT); | ||
1110 | DUMP_REG(HDMI_NV_PDISP_KEY_CTRL); | ||
1111 | DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0); | ||
1112 | DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1); | ||
1113 | DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2); | ||
1114 | DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0); | ||
1115 | DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1); | ||
1116 | DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2); | ||
1117 | DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3); | ||
1118 | DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); | ||
1119 | DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); | ||
1120 | #undef DUMP_REG | ||
1121 | |||
1122 | clk_disable(hdmi->clk); | ||
1123 | tegra_dc_io_end(hdmi->dc); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static int dbg_hdmi_open(struct inode *inode, struct file *file) | ||
1129 | { | ||
1130 | return single_open(file, dbg_hdmi_show, inode->i_private); | ||
1131 | } | ||
1132 | |||
1133 | static const struct file_operations dbg_fops = { | ||
1134 | .open = dbg_hdmi_open, | ||
1135 | .read = seq_read, | ||
1136 | .llseek = seq_lseek, | ||
1137 | .release = single_release, | ||
1138 | }; | ||
1139 | |||
1140 | static struct dentry *hdmidir; | ||
1141 | |||
1142 | static void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi) | ||
1143 | { | ||
1144 | struct dentry *retval; | ||
1145 | |||
1146 | hdmidir = debugfs_create_dir("tegra_hdmi", NULL); | ||
1147 | if (!hdmidir) | ||
1148 | return; | ||
1149 | retval = debugfs_create_file("regs", S_IRUGO, hdmidir, hdmi, | ||
1150 | &dbg_fops); | ||
1151 | if (!retval) | ||
1152 | goto free_out; | ||
1153 | return; | ||
1154 | free_out: | ||
1155 | debugfs_remove_recursive(hdmidir); | ||
1156 | hdmidir = NULL; | ||
1157 | return; | ||
1158 | } | ||
1159 | #else | ||
1160 | static inline void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi) | ||
1161 | { } | ||
1162 | #endif | ||
1163 | |||
1164 | #define PIXCLOCK_TOLERANCE 200 | ||
1165 | |||
1166 | static int tegra_dc_calc_clock_per_frame(const struct fb_videomode *mode) | ||
1167 | { | ||
1168 | return (mode->left_margin + mode->xres + | ||
1169 | mode->right_margin + mode->hsync_len) * | ||
1170 | (mode->upper_margin + mode->yres + | ||
1171 | mode->lower_margin + mode->vsync_len); | ||
1172 | } | ||
1173 | static bool tegra_dc_hdmi_mode_equal(const struct fb_videomode *mode1, | ||
1174 | const struct fb_videomode *mode2) | ||
1175 | { | ||
1176 | int clock_per_frame1 = tegra_dc_calc_clock_per_frame(mode1); | ||
1177 | int clock_per_frame2 = tegra_dc_calc_clock_per_frame(mode2); | ||
1178 | |||
1179 | /* allows up to 1Hz of pixclock difference */ | ||
1180 | return (clock_per_frame1 == clock_per_frame2 && | ||
1181 | mode1->xres == mode2->xres && | ||
1182 | mode1->yres == mode2->yres && | ||
1183 | mode1->vmode == mode2->vmode && | ||
1184 | (mode1->pixclock == mode2->pixclock || | ||
1185 | (abs(PICOS2KHZ(mode1->pixclock) - | ||
1186 | PICOS2KHZ(mode2->pixclock)) * | ||
1187 | 1000 / clock_per_frame1 <= 1))); | ||
1188 | } | ||
1189 | |||
1190 | static bool tegra_dc_hdmi_valid_pixclock(const struct tegra_dc *dc, | ||
1191 | const struct fb_videomode *mode) | ||
1192 | { | ||
1193 | unsigned max_pixclock = tegra_dc_get_out_max_pixclock(dc); | ||
1194 | if (max_pixclock) { | ||
1195 | /* this might look counter-intuitive, | ||
1196 | * but pixclock's unit is picos(not Khz) | ||
1197 | */ | ||
1198 | return mode->pixclock >= max_pixclock; | ||
1199 | } else { | ||
1200 | return true; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1204 | static bool tegra_dc_cvt_mode_equal(const struct fb_videomode *mode1, | ||
1205 | const struct fb_videomode *mode2) | ||
1206 | { | ||
1207 | return (mode1->xres == mode2->xres && | ||
1208 | mode1->yres == mode2->yres && | ||
1209 | mode1->refresh == mode2->refresh && | ||
1210 | mode1->vmode == mode2->vmode); | ||
1211 | } | ||
1212 | |||
1213 | static bool tegra_dc_reload_mode(struct fb_videomode *mode) | ||
1214 | { | ||
1215 | int i = 0; | ||
1216 | for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_cvt_modes); i++) { | ||
1217 | const struct fb_videomode *cvt_mode | ||
1218 | = &tegra_dc_hdmi_supported_cvt_modes[i]; | ||
1219 | if (tegra_dc_cvt_mode_equal(cvt_mode, mode)) { | ||
1220 | memcpy(mode, cvt_mode, sizeof(*mode)); | ||
1221 | return true; | ||
1222 | } | ||
1223 | } | ||
1224 | return false; | ||
1225 | } | ||
1226 | |||
1227 | |||
1228 | static bool tegra_dc_hdmi_mode_filter(const struct tegra_dc *dc, | ||
1229 | struct fb_videomode *mode) | ||
1230 | { | ||
1231 | int i; | ||
1232 | int clock_per_frame; | ||
1233 | |||
1234 | if (!mode->pixclock) | ||
1235 | return false; | ||
1236 | |||
1237 | #ifdef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
1238 | if (PICOS2KHZ(mode->pixclock) > 74250) | ||
1239 | return false; | ||
1240 | #endif | ||
1241 | |||
1242 | for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_modes); i++) { | ||
1243 | const struct fb_videomode *supported_mode | ||
1244 | = &tegra_dc_hdmi_supported_modes[i]; | ||
1245 | if (tegra_dc_hdmi_mode_equal(supported_mode, mode) && | ||
1246 | tegra_dc_hdmi_valid_pixclock(dc, supported_mode)) { | ||
1247 | if (mode->lower_margin == 1) { | ||
1248 | /* This might be the case for HDMI<->DVI | ||
1249 | * where std VESA representation will not | ||
1250 | * pass constraint V_FRONT_PORCH >= | ||
1251 | * V_REF_TO_SYNC + 1.So reload mode in | ||
1252 | * CVT timing standards. | ||
1253 | */ | ||
1254 | if (!tegra_dc_reload_mode(mode)) | ||
1255 | return false; | ||
1256 | } | ||
1257 | else | ||
1258 | memcpy(mode, supported_mode, sizeof(*mode)); | ||
1259 | |||
1260 | mode->flag = FB_MODE_IS_DETAILED; | ||
1261 | clock_per_frame = tegra_dc_calc_clock_per_frame(mode); | ||
1262 | mode->refresh = (PICOS2KHZ(mode->pixclock) * 1000) | ||
1263 | / clock_per_frame; | ||
1264 | return true; | ||
1265 | } | ||
1266 | } | ||
1267 | |||
1268 | return false; | ||
1269 | } | ||
1270 | |||
1271 | |||
1272 | static bool tegra_dc_hdmi_hpd(struct tegra_dc *dc) | ||
1273 | { | ||
1274 | int sense; | ||
1275 | int level; | ||
1276 | |||
1277 | level = gpio_get_value(dc->out->hotplug_gpio); | ||
1278 | |||
1279 | sense = dc->out->flags & TEGRA_DC_OUT_HOTPLUG_MASK; | ||
1280 | |||
1281 | return (sense == TEGRA_DC_OUT_HOTPLUG_HIGH && level) || | ||
1282 | (sense == TEGRA_DC_OUT_HOTPLUG_LOW && !level); | ||
1283 | } | ||
1284 | |||
1285 | |||
1286 | void tegra_dc_hdmi_detect_config(struct tegra_dc *dc, | ||
1287 | struct fb_monspecs *specs) | ||
1288 | { | ||
1289 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1290 | |||
1291 | /* monitors like to lie about these but they are still useful for | ||
1292 | * detecting aspect ratios | ||
1293 | */ | ||
1294 | dc->out->h_size = specs->max_x * 1000; | ||
1295 | dc->out->v_size = specs->max_y * 1000; | ||
1296 | |||
1297 | hdmi->dvi = !(specs->misc & FB_MISC_HDMI); | ||
1298 | |||
1299 | tegra_fb_update_monspecs(dc->fb, specs, tegra_dc_hdmi_mode_filter); | ||
1300 | #ifdef CONFIG_SWITCH | ||
1301 | hdmi->hpd_switch.state = 0; | ||
1302 | switch_set_state(&hdmi->hpd_switch, 1); | ||
1303 | #endif | ||
1304 | dev_info(&dc->ndev->dev, "display detected\n"); | ||
1305 | |||
1306 | dc->connected = true; | ||
1307 | tegra_dc_ext_process_hotplug(dc->ndev->id); | ||
1308 | } | ||
1309 | |||
1310 | /* This function is used to enable DC1 and HDMI for the purpose of testing. */ | ||
1311 | bool tegra_dc_hdmi_detect_test(struct tegra_dc *dc, unsigned char *edid_ptr) | ||
1312 | { | ||
1313 | int err; | ||
1314 | struct fb_monspecs specs; | ||
1315 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1316 | |||
1317 | if (!hdmi || !edid_ptr) { | ||
1318 | dev_err(&dc->ndev->dev, "HDMI test failed to get arguments.\n"); | ||
1319 | return false; | ||
1320 | } | ||
1321 | |||
1322 | err = tegra_edid_get_monspecs_test(hdmi->edid, &specs, edid_ptr); | ||
1323 | if (err < 0) { | ||
1324 | dev_err(&dc->ndev->dev, "error reading edid\n"); | ||
1325 | goto fail; | ||
1326 | } | ||
1327 | |||
1328 | err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld); | ||
1329 | if (err < 0) { | ||
1330 | dev_err(&dc->ndev->dev, "error populating eld\n"); | ||
1331 | goto fail; | ||
1332 | } | ||
1333 | hdmi->eld_retrieved = true; | ||
1334 | |||
1335 | tegra_dc_hdmi_detect_config(dc, &specs); | ||
1336 | |||
1337 | return true; | ||
1338 | |||
1339 | fail: | ||
1340 | hdmi->eld_retrieved = false; | ||
1341 | #ifdef CONFIG_SWITCH | ||
1342 | switch_set_state(&hdmi->hpd_switch, 0); | ||
1343 | #endif | ||
1344 | tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0); | ||
1345 | return false; | ||
1346 | } | ||
1347 | EXPORT_SYMBOL(tegra_dc_hdmi_detect_test); | ||
1348 | |||
1349 | static bool tegra_dc_hdmi_detect(struct tegra_dc *dc) | ||
1350 | { | ||
1351 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1352 | struct fb_monspecs specs; | ||
1353 | int err; | ||
1354 | |||
1355 | if (!tegra_dc_hdmi_hpd(dc)) | ||
1356 | goto fail; | ||
1357 | |||
1358 | err = tegra_edid_get_monspecs(hdmi->edid, &specs); | ||
1359 | if (err < 0) { | ||
1360 | dev_err(&dc->ndev->dev, "error reading edid\n"); | ||
1361 | goto fail; | ||
1362 | } | ||
1363 | |||
1364 | err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld); | ||
1365 | if (err < 0) { | ||
1366 | dev_err(&dc->ndev->dev, "error populating eld\n"); | ||
1367 | goto fail; | ||
1368 | } | ||
1369 | hdmi->eld_retrieved = true; | ||
1370 | |||
1371 | tegra_dc_hdmi_detect_config(dc, &specs); | ||
1372 | |||
1373 | return true; | ||
1374 | |||
1375 | fail: | ||
1376 | hdmi->eld_retrieved = false; | ||
1377 | #ifdef CONFIG_SWITCH | ||
1378 | switch_set_state(&hdmi->hpd_switch, 0); | ||
1379 | #endif | ||
1380 | tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0); | ||
1381 | return false; | ||
1382 | } | ||
1383 | |||
1384 | |||
1385 | static void tegra_dc_hdmi_detect_worker(struct work_struct *work) | ||
1386 | { | ||
1387 | struct tegra_dc_hdmi_data *hdmi = | ||
1388 | container_of(to_delayed_work(work), struct tegra_dc_hdmi_data, work); | ||
1389 | struct tegra_dc *dc = hdmi->dc; | ||
1390 | |||
1391 | tegra_dc_enable(dc); | ||
1392 | msleep(5); | ||
1393 | if (!tegra_dc_hdmi_detect(dc)) { | ||
1394 | tegra_dc_disable(dc); | ||
1395 | tegra_fb_update_monspecs(dc->fb, NULL, NULL); | ||
1396 | |||
1397 | dc->connected = false; | ||
1398 | tegra_dc_ext_process_hotplug(dc->ndev->id); | ||
1399 | } | ||
1400 | } | ||
1401 | |||
1402 | static irqreturn_t tegra_dc_hdmi_irq(int irq, void *ptr) | ||
1403 | { | ||
1404 | struct tegra_dc *dc = ptr; | ||
1405 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1406 | unsigned long flags; | ||
1407 | |||
1408 | spin_lock_irqsave(&hdmi->suspend_lock, flags); | ||
1409 | if (!hdmi->suspended) { | ||
1410 | __cancel_delayed_work(&hdmi->work); | ||
1411 | if (tegra_dc_hdmi_hpd(dc)) | ||
1412 | queue_delayed_work(system_nrt_wq, &hdmi->work, | ||
1413 | msecs_to_jiffies(100)); | ||
1414 | else | ||
1415 | queue_delayed_work(system_nrt_wq, &hdmi->work, | ||
1416 | msecs_to_jiffies(30)); | ||
1417 | } | ||
1418 | spin_unlock_irqrestore(&hdmi->suspend_lock, flags); | ||
1419 | |||
1420 | return IRQ_HANDLED; | ||
1421 | } | ||
1422 | |||
1423 | static void tegra_dc_hdmi_suspend(struct tegra_dc *dc) | ||
1424 | { | ||
1425 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1426 | unsigned long flags; | ||
1427 | |||
1428 | tegra_nvhdcp_suspend(hdmi->nvhdcp); | ||
1429 | spin_lock_irqsave(&hdmi->suspend_lock, flags); | ||
1430 | hdmi->suspended = true; | ||
1431 | spin_unlock_irqrestore(&hdmi->suspend_lock, flags); | ||
1432 | } | ||
1433 | |||
1434 | static void tegra_dc_hdmi_resume(struct tegra_dc *dc) | ||
1435 | { | ||
1436 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1437 | unsigned long flags; | ||
1438 | |||
1439 | spin_lock_irqsave(&hdmi->suspend_lock, flags); | ||
1440 | hdmi->suspended = false; | ||
1441 | |||
1442 | if (tegra_dc_hdmi_hpd(dc)) | ||
1443 | queue_delayed_work(system_nrt_wq, &hdmi->work, | ||
1444 | msecs_to_jiffies(100)); | ||
1445 | else | ||
1446 | queue_delayed_work(system_nrt_wq, &hdmi->work, | ||
1447 | msecs_to_jiffies(30)); | ||
1448 | |||
1449 | spin_unlock_irqrestore(&hdmi->suspend_lock, flags); | ||
1450 | tegra_nvhdcp_resume(hdmi->nvhdcp); | ||
1451 | } | ||
1452 | |||
1453 | static ssize_t underscan_show(struct device *dev, | ||
1454 | struct device_attribute *attr, char *buf) | ||
1455 | { | ||
1456 | #ifdef CONFIG_SWITCH | ||
1457 | struct tegra_dc_hdmi_data *hdmi = | ||
1458 | container_of(dev_get_drvdata(dev), struct tegra_dc_hdmi_data, hpd_switch); | ||
1459 | |||
1460 | if (hdmi->edid) | ||
1461 | return sprintf(buf, "%d\n", tegra_edid_underscan_supported(hdmi->edid)); | ||
1462 | else | ||
1463 | return 0; | ||
1464 | #else | ||
1465 | return 0; | ||
1466 | #endif | ||
1467 | } | ||
1468 | |||
1469 | static DEVICE_ATTR(underscan, S_IRUGO | S_IWUSR, underscan_show, NULL); | ||
1470 | |||
1471 | static int tegra_dc_hdmi_init(struct tegra_dc *dc) | ||
1472 | { | ||
1473 | struct tegra_dc_hdmi_data *hdmi; | ||
1474 | struct resource *res; | ||
1475 | struct resource *base_res; | ||
1476 | int ret; | ||
1477 | void __iomem *base; | ||
1478 | struct clk *clk = NULL; | ||
1479 | struct clk *disp1_clk = NULL; | ||
1480 | struct clk *disp2_clk = NULL; | ||
1481 | int err; | ||
1482 | |||
1483 | hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); | ||
1484 | if (!hdmi) | ||
1485 | return -ENOMEM; | ||
1486 | |||
1487 | res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM, "hdmi_regs"); | ||
1488 | if (!res) { | ||
1489 | dev_err(&dc->ndev->dev, "hdmi: no mem resource\n"); | ||
1490 | err = -ENOENT; | ||
1491 | goto err_free_hdmi; | ||
1492 | } | ||
1493 | |||
1494 | base_res = request_mem_region(res->start, resource_size(res), dc->ndev->name); | ||
1495 | if (!base_res) { | ||
1496 | dev_err(&dc->ndev->dev, "hdmi: request_mem_region failed\n"); | ||
1497 | err = -EBUSY; | ||
1498 | goto err_free_hdmi; | ||
1499 | } | ||
1500 | |||
1501 | base = ioremap(res->start, resource_size(res)); | ||
1502 | if (!base) { | ||
1503 | dev_err(&dc->ndev->dev, "hdmi: registers can't be mapped\n"); | ||
1504 | err = -EBUSY; | ||
1505 | goto err_release_resource_reg; | ||
1506 | } | ||
1507 | |||
1508 | clk = clk_get(&dc->ndev->dev, "hdmi"); | ||
1509 | if (IS_ERR_OR_NULL(clk)) { | ||
1510 | dev_err(&dc->ndev->dev, "hdmi: can't get clock\n"); | ||
1511 | err = -ENOENT; | ||
1512 | goto err_iounmap_reg; | ||
1513 | } | ||
1514 | |||
1515 | disp1_clk = clk_get_sys("tegradc.0", NULL); | ||
1516 | if (IS_ERR_OR_NULL(disp1_clk)) { | ||
1517 | dev_err(&dc->ndev->dev, "hdmi: can't disp1 clock\n"); | ||
1518 | err = -ENOENT; | ||
1519 | goto err_put_clock; | ||
1520 | } | ||
1521 | |||
1522 | disp2_clk = clk_get_sys("tegradc.1", NULL); | ||
1523 | if (IS_ERR_OR_NULL(disp2_clk)) { | ||
1524 | dev_err(&dc->ndev->dev, "hdmi: can't disp2 clock\n"); | ||
1525 | err = -ENOENT; | ||
1526 | goto err_put_clock; | ||
1527 | } | ||
1528 | |||
1529 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1530 | hdmi->hda_clk = clk_get_sys("tegra30-hda", "hda"); | ||
1531 | if (IS_ERR_OR_NULL(hdmi->hda_clk)) { | ||
1532 | dev_err(&dc->ndev->dev, "hdmi: can't get hda clock\n"); | ||
1533 | err = -ENOENT; | ||
1534 | goto err_put_clock; | ||
1535 | } | ||
1536 | |||
1537 | hdmi->hda2codec_clk = clk_get_sys("tegra30-hda", "hda2codec"); | ||
1538 | if (IS_ERR_OR_NULL(hdmi->hda2codec_clk)) { | ||
1539 | dev_err(&dc->ndev->dev, "hdmi: can't get hda2codec clock\n"); | ||
1540 | err = -ENOENT; | ||
1541 | goto err_put_clock; | ||
1542 | } | ||
1543 | |||
1544 | hdmi->hda2hdmi_clk = clk_get_sys("tegra30-hda", "hda2hdmi"); | ||
1545 | if (IS_ERR_OR_NULL(hdmi->hda2hdmi_clk)) { | ||
1546 | dev_err(&dc->ndev->dev, "hdmi: can't get hda2hdmi clock\n"); | ||
1547 | err = -ENOENT; | ||
1548 | goto err_put_clock; | ||
1549 | } | ||
1550 | #endif | ||
1551 | |||
1552 | /* TODO: support non-hotplug */ | ||
1553 | if (request_irq(gpio_to_irq(dc->out->hotplug_gpio), tegra_dc_hdmi_irq, | ||
1554 | IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
1555 | dev_name(&dc->ndev->dev), dc)) { | ||
1556 | dev_err(&dc->ndev->dev, "hdmi: request_irq %d failed\n", | ||
1557 | gpio_to_irq(dc->out->hotplug_gpio)); | ||
1558 | err = -EBUSY; | ||
1559 | goto err_put_clock; | ||
1560 | } | ||
1561 | |||
1562 | hdmi->edid = tegra_edid_create(dc->out->dcc_bus); | ||
1563 | if (IS_ERR_OR_NULL(hdmi->edid)) { | ||
1564 | dev_err(&dc->ndev->dev, "hdmi: can't create edid\n"); | ||
1565 | err = PTR_ERR(hdmi->edid); | ||
1566 | goto err_free_irq; | ||
1567 | } | ||
1568 | |||
1569 | #ifdef CONFIG_TEGRA_NVHDCP | ||
1570 | hdmi->nvhdcp = tegra_nvhdcp_create(hdmi, dc->ndev->id, | ||
1571 | dc->out->dcc_bus); | ||
1572 | if (IS_ERR_OR_NULL(hdmi->nvhdcp)) { | ||
1573 | dev_err(&dc->ndev->dev, "hdmi: can't create nvhdcp\n"); | ||
1574 | err = PTR_ERR(hdmi->nvhdcp); | ||
1575 | goto err_edid_destroy; | ||
1576 | } | ||
1577 | #else | ||
1578 | hdmi->nvhdcp = NULL; | ||
1579 | #endif | ||
1580 | |||
1581 | INIT_DELAYED_WORK(&hdmi->work, tegra_dc_hdmi_detect_worker); | ||
1582 | |||
1583 | hdmi->dc = dc; | ||
1584 | hdmi->base = base; | ||
1585 | hdmi->base_res = base_res; | ||
1586 | hdmi->clk = clk; | ||
1587 | hdmi->disp1_clk = disp1_clk; | ||
1588 | hdmi->disp2_clk = disp2_clk; | ||
1589 | hdmi->suspended = false; | ||
1590 | hdmi->eld_retrieved= false; | ||
1591 | hdmi->clk_enabled = false; | ||
1592 | hdmi->audio_freq = 44100; | ||
1593 | hdmi->audio_source = AUTO; | ||
1594 | spin_lock_init(&hdmi->suspend_lock); | ||
1595 | |||
1596 | #ifdef CONFIG_SWITCH | ||
1597 | hdmi->hpd_switch.name = "hdmi"; | ||
1598 | ret = switch_dev_register(&hdmi->hpd_switch); | ||
1599 | |||
1600 | if (!ret) | ||
1601 | ret = device_create_file(hdmi->hpd_switch.dev, | ||
1602 | &dev_attr_underscan); | ||
1603 | BUG_ON(ret != 0); | ||
1604 | #endif | ||
1605 | |||
1606 | dc->out->depth = 24; | ||
1607 | |||
1608 | tegra_dc_set_outdata(dc, hdmi); | ||
1609 | |||
1610 | dc_hdmi = hdmi; | ||
1611 | /* boards can select default content protection policy */ | ||
1612 | if (dc->out->flags & TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND) | ||
1613 | tegra_nvhdcp_set_policy(hdmi->nvhdcp, | ||
1614 | TEGRA_NVHDCP_POLICY_ON_DEMAND); | ||
1615 | else | ||
1616 | tegra_nvhdcp_set_policy(hdmi->nvhdcp, | ||
1617 | TEGRA_NVHDCP_POLICY_ALWAYS_ON); | ||
1618 | |||
1619 | tegra_dc_hdmi_debug_create(hdmi); | ||
1620 | |||
1621 | return 0; | ||
1622 | |||
1623 | err_edid_destroy: | ||
1624 | tegra_edid_destroy(hdmi->edid); | ||
1625 | err_free_irq: | ||
1626 | free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc); | ||
1627 | err_put_clock: | ||
1628 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1629 | if (!IS_ERR_OR_NULL(hdmi->hda2hdmi_clk)) | ||
1630 | clk_put(hdmi->hda2hdmi_clk); | ||
1631 | if (!IS_ERR_OR_NULL(hdmi->hda2codec_clk)) | ||
1632 | clk_put(hdmi->hda2codec_clk); | ||
1633 | if (!IS_ERR_OR_NULL(hdmi->hda_clk)) | ||
1634 | clk_put(hdmi->hda_clk); | ||
1635 | #endif | ||
1636 | if (!IS_ERR_OR_NULL(disp2_clk)) | ||
1637 | clk_put(disp2_clk); | ||
1638 | if (!IS_ERR_OR_NULL(disp1_clk)) | ||
1639 | clk_put(disp1_clk); | ||
1640 | if (!IS_ERR_OR_NULL(clk)) | ||
1641 | clk_put(clk); | ||
1642 | err_iounmap_reg: | ||
1643 | iounmap(base); | ||
1644 | err_release_resource_reg: | ||
1645 | release_resource(base_res); | ||
1646 | err_free_hdmi: | ||
1647 | kfree(hdmi); | ||
1648 | return err; | ||
1649 | } | ||
1650 | |||
1651 | static void tegra_dc_hdmi_destroy(struct tegra_dc *dc) | ||
1652 | { | ||
1653 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1654 | |||
1655 | free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc); | ||
1656 | cancel_delayed_work_sync(&hdmi->work); | ||
1657 | #ifdef CONFIG_SWITCH | ||
1658 | switch_dev_unregister(&hdmi->hpd_switch); | ||
1659 | #endif | ||
1660 | iounmap(hdmi->base); | ||
1661 | release_resource(hdmi->base_res); | ||
1662 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1663 | clk_put(hdmi->hda2hdmi_clk); | ||
1664 | clk_put(hdmi->hda2codec_clk); | ||
1665 | clk_put(hdmi->hda_clk); | ||
1666 | #endif | ||
1667 | clk_put(hdmi->clk); | ||
1668 | clk_put(hdmi->disp1_clk); | ||
1669 | clk_put(hdmi->disp2_clk); | ||
1670 | tegra_edid_destroy(hdmi->edid); | ||
1671 | tegra_nvhdcp_destroy(hdmi->nvhdcp); | ||
1672 | |||
1673 | kfree(hdmi); | ||
1674 | |||
1675 | } | ||
1676 | |||
1677 | static void tegra_dc_hdmi_setup_audio_fs_tables(struct tegra_dc *dc) | ||
1678 | { | ||
1679 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1680 | int i; | ||
1681 | unsigned freqs[] = { | ||
1682 | 32000, | ||
1683 | 44100, | ||
1684 | 48000, | ||
1685 | 88200, | ||
1686 | 96000, | ||
1687 | 176400, | ||
1688 | 192000, | ||
1689 | }; | ||
1690 | |||
1691 | for (i = 0; i < ARRAY_SIZE(freqs); i++) { | ||
1692 | unsigned f = freqs[i]; | ||
1693 | unsigned eight_half; | ||
1694 | unsigned delta;; | ||
1695 | |||
1696 | if (f > 96000) | ||
1697 | delta = 2; | ||
1698 | else if (f > 48000) | ||
1699 | delta = 6; | ||
1700 | else | ||
1701 | delta = 9; | ||
1702 | |||
1703 | eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128); | ||
1704 | tegra_hdmi_writel(hdmi, AUDIO_FS_LOW(eight_half - delta) | | ||
1705 | AUDIO_FS_HIGH(eight_half + delta), | ||
1706 | HDMI_NV_PDISP_AUDIO_FS(i)); | ||
1707 | } | ||
1708 | } | ||
1709 | |||
1710 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1711 | static void tegra_dc_hdmi_setup_eld_buff(struct tegra_dc *dc) | ||
1712 | { | ||
1713 | int i; | ||
1714 | int j; | ||
1715 | u8 tmp; | ||
1716 | |||
1717 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1718 | |||
1719 | /* program ELD stuff */ | ||
1720 | for (i = 0; i < HDMI_ELD_MONITOR_NAME_INDEX; i++) { | ||
1721 | switch (i) { | ||
1722 | case HDMI_ELD_VER_INDEX: | ||
1723 | tmp = (hdmi->eld.eld_ver << 3); | ||
1724 | tegra_hdmi_writel(hdmi, (i << 8) | tmp, | ||
1725 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1726 | break; | ||
1727 | case HDMI_ELD_BASELINE_LEN_INDEX: | ||
1728 | break; | ||
1729 | case HDMI_ELD_CEA_VER_MNL_INDEX: | ||
1730 | tmp = (hdmi->eld.cea_edid_ver << 5); | ||
1731 | tmp |= (hdmi->eld.mnl & 0x1f); | ||
1732 | tegra_hdmi_writel(hdmi, (i << 8) | tmp, | ||
1733 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1734 | break; | ||
1735 | case HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX: | ||
1736 | tmp = (hdmi->eld.sad_count << 4); | ||
1737 | tmp |= (hdmi->eld.conn_type & 0xC); | ||
1738 | tmp |= (hdmi->eld.support_ai & 0x2); | ||
1739 | tmp |= (hdmi->eld.support_hdcp & 0x1); | ||
1740 | tegra_hdmi_writel(hdmi, (i << 8) | tmp, | ||
1741 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1742 | break; | ||
1743 | case HDMI_ELD_AUD_SYNC_DELAY_INDEX: | ||
1744 | tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.aud_synch_delay), | ||
1745 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1746 | break; | ||
1747 | case HDMI_ELD_SPK_ALLOC_INDEX: | ||
1748 | tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.spk_alloc), | ||
1749 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1750 | break; | ||
1751 | case HDMI_ELD_PORT_ID_INDEX: | ||
1752 | for (j = 0; j < 8;j++) { | ||
1753 | tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.port_id[j]), | ||
1754 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1755 | } | ||
1756 | break; | ||
1757 | case HDMI_ELD_MANF_NAME_INDEX: | ||
1758 | for (j = 0; j < 2;j++) { | ||
1759 | tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.manufacture_id[j]), | ||
1760 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1761 | } | ||
1762 | break; | ||
1763 | case HDMI_ELD_PRODUCT_CODE_INDEX: | ||
1764 | for (j = 0; j < 2;j++) { | ||
1765 | tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.product_id[j]), | ||
1766 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1767 | } | ||
1768 | break; | ||
1769 | } | ||
1770 | } | ||
1771 | for (j = 0; j < hdmi->eld.mnl;j++) { | ||
1772 | tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX) << 8) | | ||
1773 | (hdmi->eld.monitor_name[j]), | ||
1774 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1775 | } | ||
1776 | for (j = 0; j < hdmi->eld.sad_count;j++) { | ||
1777 | tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX + hdmi->eld.mnl) << 8) | | ||
1778 | (hdmi->eld.sad[j]), | ||
1779 | HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); | ||
1780 | } | ||
1781 | /* set presence andvalid bit */ | ||
1782 | tegra_hdmi_writel(hdmi, 3, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0); | ||
1783 | } | ||
1784 | #endif | ||
1785 | |||
1786 | static int tegra_dc_hdmi_setup_audio(struct tegra_dc *dc, unsigned audio_freq, | ||
1787 | unsigned audio_source) | ||
1788 | { | ||
1789 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1790 | const struct tegra_hdmi_audio_config *config; | ||
1791 | unsigned long audio_n; | ||
1792 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1793 | unsigned long reg_addr = 0; | ||
1794 | #endif | ||
1795 | unsigned a_source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; | ||
1796 | |||
1797 | if (HDA == audio_source) | ||
1798 | a_source = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; | ||
1799 | else if (SPDIF == audio_source) | ||
1800 | a_source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; | ||
1801 | |||
1802 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1803 | tegra_hdmi_writel(hdmi,a_source | AUDIO_CNTRL0_INJECT_NULLSMPL, | ||
1804 | HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0); | ||
1805 | tegra_hdmi_writel(hdmi, | ||
1806 | AUDIO_CNTRL0_ERROR_TOLERANCE(6) | | ||
1807 | AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0), | ||
1808 | HDMI_NV_PDISP_AUDIO_CNTRL0); | ||
1809 | #else | ||
1810 | tegra_hdmi_writel(hdmi, | ||
1811 | AUDIO_CNTRL0_ERROR_TOLERANCE(6) | | ||
1812 | AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) | | ||
1813 | a_source, | ||
1814 | HDMI_NV_PDISP_AUDIO_CNTRL0); | ||
1815 | #endif | ||
1816 | config = tegra_hdmi_get_audio_config(audio_freq, dc->mode.pclk); | ||
1817 | if (!config) { | ||
1818 | dev_err(&dc->ndev->dev, | ||
1819 | "hdmi: can't set audio to %d at %d pix_clock", | ||
1820 | audio_freq, dc->mode.pclk); | ||
1821 | return -EINVAL; | ||
1822 | } | ||
1823 | |||
1824 | tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL); | ||
1825 | |||
1826 | audio_n = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNALTE | | ||
1827 | AUDIO_N_VALUE(config->n - 1); | ||
1828 | tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N); | ||
1829 | |||
1830 | tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, | ||
1831 | HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); | ||
1832 | |||
1833 | tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts), | ||
1834 | HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); | ||
1835 | |||
1836 | tegra_hdmi_writel(hdmi, SPARE_HW_CTS | SPARE_FORCE_SW_CTS | | ||
1837 | SPARE_CTS_RESET_VAL(1), | ||
1838 | HDMI_NV_PDISP_HDMI_SPARE); | ||
1839 | |||
1840 | audio_n &= ~AUDIO_N_RESETF; | ||
1841 | tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N); | ||
1842 | |||
1843 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1844 | switch (audio_freq) { | ||
1845 | case AUDIO_FREQ_32K: | ||
1846 | reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320_0; | ||
1847 | break; | ||
1848 | case AUDIO_FREQ_44_1K: | ||
1849 | reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441_0; | ||
1850 | break; | ||
1851 | case AUDIO_FREQ_48K: | ||
1852 | reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480_0; | ||
1853 | break; | ||
1854 | case AUDIO_FREQ_88_2K: | ||
1855 | reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882_0; | ||
1856 | break; | ||
1857 | case AUDIO_FREQ_96K: | ||
1858 | reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960_0; | ||
1859 | break; | ||
1860 | case AUDIO_FREQ_176_4K: | ||
1861 | reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764_0; | ||
1862 | break; | ||
1863 | case AUDIO_FREQ_192K: | ||
1864 | reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920_0; | ||
1865 | break; | ||
1866 | } | ||
1867 | |||
1868 | tegra_hdmi_writel(hdmi, config->aval, reg_addr); | ||
1869 | #endif | ||
1870 | tegra_dc_hdmi_setup_audio_fs_tables(dc); | ||
1871 | |||
1872 | return 0; | ||
1873 | } | ||
1874 | |||
1875 | int tegra_hdmi_setup_audio_freq_source(unsigned audio_freq, unsigned audio_source) | ||
1876 | { | ||
1877 | struct tegra_dc_hdmi_data *hdmi = dc_hdmi; | ||
1878 | |||
1879 | if (!hdmi) | ||
1880 | return -EAGAIN; | ||
1881 | |||
1882 | /* check for know freq */ | ||
1883 | if (AUDIO_FREQ_32K == audio_freq || | ||
1884 | AUDIO_FREQ_44_1K== audio_freq || | ||
1885 | AUDIO_FREQ_48K== audio_freq || | ||
1886 | AUDIO_FREQ_88_2K== audio_freq || | ||
1887 | AUDIO_FREQ_96K== audio_freq || | ||
1888 | AUDIO_FREQ_176_4K== audio_freq || | ||
1889 | AUDIO_FREQ_192K== audio_freq) { | ||
1890 | /* If we can program HDMI, then proceed */ | ||
1891 | if (hdmi->clk_enabled) | ||
1892 | tegra_dc_hdmi_setup_audio(hdmi->dc, audio_freq,audio_source); | ||
1893 | |||
1894 | /* Store it for using it in enable */ | ||
1895 | hdmi->audio_freq = audio_freq; | ||
1896 | hdmi->audio_source = audio_source; | ||
1897 | } | ||
1898 | else | ||
1899 | return -EINVAL; | ||
1900 | |||
1901 | return 0; | ||
1902 | } | ||
1903 | EXPORT_SYMBOL(tegra_hdmi_setup_audio_freq_source); | ||
1904 | |||
1905 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
1906 | int tegra_hdmi_setup_hda_presence() | ||
1907 | { | ||
1908 | struct tegra_dc_hdmi_data *hdmi = dc_hdmi; | ||
1909 | |||
1910 | if (!hdmi) | ||
1911 | return -EAGAIN; | ||
1912 | |||
1913 | if (hdmi->clk_enabled && hdmi->eld_retrieved) { | ||
1914 | /* If HDA_PRESENCE is already set reset it */ | ||
1915 | if (tegra_hdmi_readl(hdmi, | ||
1916 | HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0)) | ||
1917 | tegra_hdmi_writel(hdmi, 0, | ||
1918 | HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0); | ||
1919 | |||
1920 | tegra_dc_hdmi_setup_eld_buff(hdmi->dc); | ||
1921 | } | ||
1922 | else | ||
1923 | return -ENODEV; | ||
1924 | |||
1925 | return 0; | ||
1926 | } | ||
1927 | EXPORT_SYMBOL(tegra_hdmi_setup_hda_presence); | ||
1928 | #endif | ||
1929 | |||
1930 | static void tegra_dc_hdmi_write_infopack(struct tegra_dc *dc, int header_reg, | ||
1931 | u8 type, u8 version, void *data, int len) | ||
1932 | { | ||
1933 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1934 | u32 subpack[2]; /* extra byte for zero padding of subpack */ | ||
1935 | int i; | ||
1936 | u8 csum; | ||
1937 | |||
1938 | /* first byte of data is the checksum */ | ||
1939 | csum = type + version + len - 1; | ||
1940 | for (i = 1; i < len; i++) | ||
1941 | csum +=((u8 *)data)[i]; | ||
1942 | ((u8 *)data)[0] = 0x100 - csum; | ||
1943 | |||
1944 | tegra_hdmi_writel(hdmi, INFOFRAME_HEADER_TYPE(type) | | ||
1945 | INFOFRAME_HEADER_VERSION(version) | | ||
1946 | INFOFRAME_HEADER_LEN(len - 1), | ||
1947 | header_reg); | ||
1948 | |||
1949 | /* The audio inforame only has one set of subpack registers. The hdmi | ||
1950 | * block pads the rest of the data as per the spec so we have to fixup | ||
1951 | * the length before filling in the subpacks. | ||
1952 | */ | ||
1953 | if (header_reg == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER) | ||
1954 | len = 6; | ||
1955 | |||
1956 | /* each subpack 7 bytes devided into: | ||
1957 | * subpack_low - bytes 0 - 3 | ||
1958 | * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00) | ||
1959 | */ | ||
1960 | for (i = 0; i < len; i++) { | ||
1961 | int subpack_idx = i % 7; | ||
1962 | |||
1963 | if (subpack_idx == 0) | ||
1964 | memset(subpack, 0x0, sizeof(subpack)); | ||
1965 | |||
1966 | ((u8 *)subpack)[subpack_idx] = ((u8 *)data)[i]; | ||
1967 | |||
1968 | if (subpack_idx == 6 || (i + 1 == len)) { | ||
1969 | int reg = header_reg + 1 + (i / 7) * 2; | ||
1970 | |||
1971 | tegra_hdmi_writel(hdmi, subpack[0], reg); | ||
1972 | tegra_hdmi_writel(hdmi, subpack[1], reg + 1); | ||
1973 | } | ||
1974 | } | ||
1975 | } | ||
1976 | |||
1977 | static void tegra_dc_hdmi_setup_avi_infoframe(struct tegra_dc *dc, bool dvi) | ||
1978 | { | ||
1979 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
1980 | struct hdmi_avi_infoframe avi; | ||
1981 | |||
1982 | if (dvi) { | ||
1983 | tegra_hdmi_writel(hdmi, 0x0, | ||
1984 | HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); | ||
1985 | return; | ||
1986 | } | ||
1987 | |||
1988 | memset(&avi, 0x0, sizeof(avi)); | ||
1989 | |||
1990 | avi.r = HDMI_AVI_R_SAME; | ||
1991 | |||
1992 | if (dc->mode.v_active == 480) { | ||
1993 | if (dc->mode.h_active == 640) { | ||
1994 | avi.m = HDMI_AVI_M_4_3; | ||
1995 | avi.vic = 1; | ||
1996 | } else { | ||
1997 | avi.m = HDMI_AVI_M_16_9; | ||
1998 | avi.vic = 3; | ||
1999 | } | ||
2000 | } else if (dc->mode.v_active == 576) { | ||
2001 | /* CEC modes 17 and 18 differ only by the pysical size of the | ||
2002 | * screen so we have to calculation the physical aspect | ||
2003 | * ratio. 4 * 10 / 3 is 13 | ||
2004 | */ | ||
2005 | if ((dc->out->h_size * 10) / dc->out->v_size > 14) { | ||
2006 | avi.m = HDMI_AVI_M_16_9; | ||
2007 | avi.vic = 18; | ||
2008 | } else { | ||
2009 | avi.m = HDMI_AVI_M_4_3; | ||
2010 | avi.vic = 17; | ||
2011 | } | ||
2012 | } else if (dc->mode.v_active == 720 || | ||
2013 | (dc->mode.v_active == 1470 && dc->mode.stereo_mode)) { | ||
2014 | /* VIC for both 720p and 720p 3D mode */ | ||
2015 | avi.m = HDMI_AVI_M_16_9; | ||
2016 | if (dc->mode.h_front_porch == 110) | ||
2017 | avi.vic = 4; /* 60 Hz */ | ||
2018 | else | ||
2019 | avi.vic = 19; /* 50 Hz */ | ||
2020 | } else if (dc->mode.v_active == 1080 || | ||
2021 | (dc->mode.v_active == 2205 && dc->mode.stereo_mode)) { | ||
2022 | /* VIC for both 1080p and 1080p 3D mode */ | ||
2023 | avi.m = HDMI_AVI_M_16_9; | ||
2024 | if (dc->mode.h_front_porch == 88) | ||
2025 | avi.vic = 16; /* 60 Hz */ | ||
2026 | else if (dc->mode.h_front_porch == 528) | ||
2027 | avi.vic = 31; /* 50 Hz */ | ||
2028 | else | ||
2029 | avi.vic = 32; /* 24 Hz */ | ||
2030 | } else { | ||
2031 | avi.m = HDMI_AVI_M_16_9; | ||
2032 | avi.vic = 0; | ||
2033 | } | ||
2034 | |||
2035 | |||
2036 | tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, | ||
2037 | HDMI_INFOFRAME_TYPE_AVI, | ||
2038 | HDMI_AVI_VERSION, | ||
2039 | &avi, sizeof(avi)); | ||
2040 | |||
2041 | tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, | ||
2042 | HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); | ||
2043 | } | ||
2044 | |||
2045 | static void tegra_dc_hdmi_setup_stereo_infoframe(struct tegra_dc *dc) | ||
2046 | { | ||
2047 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
2048 | struct hdmi_stereo_infoframe stereo; | ||
2049 | u32 val; | ||
2050 | |||
2051 | if (!dc->mode.stereo_mode) { | ||
2052 | val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); | ||
2053 | val &= ~GENERIC_CTRL_ENABLE; | ||
2054 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); | ||
2055 | return; | ||
2056 | } | ||
2057 | |||
2058 | memset(&stereo, 0x0, sizeof(stereo)); | ||
2059 | |||
2060 | stereo.regid0 = 0x03; | ||
2061 | stereo.regid1 = 0x0c; | ||
2062 | stereo.regid2 = 0x00; | ||
2063 | stereo.hdmi_video_format = 2; /* 3D_Structure present */ | ||
2064 | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
2065 | stereo._3d_structure = 0; /* frame packing */ | ||
2066 | #else | ||
2067 | stereo._3d_structure = 8; /* side-by-side (half) */ | ||
2068 | stereo._3d_ext_data = 0; /* something which fits into 00XX bit req */ | ||
2069 | #endif | ||
2070 | |||
2071 | tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_GENERIC_HEADER, | ||
2072 | HDMI_INFOFRAME_TYPE_VENDOR, | ||
2073 | HDMI_VENDOR_VERSION, | ||
2074 | &stereo, 6); | ||
2075 | |||
2076 | val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); | ||
2077 | val |= GENERIC_CTRL_ENABLE; | ||
2078 | |||
2079 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); | ||
2080 | } | ||
2081 | |||
2082 | static void tegra_dc_hdmi_setup_audio_infoframe(struct tegra_dc *dc, bool dvi) | ||
2083 | { | ||
2084 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
2085 | struct hdmi_audio_infoframe audio; | ||
2086 | |||
2087 | if (dvi) { | ||
2088 | tegra_hdmi_writel(hdmi, 0x0, | ||
2089 | HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); | ||
2090 | return; | ||
2091 | } | ||
2092 | |||
2093 | memset(&audio, 0x0, sizeof(audio)); | ||
2094 | |||
2095 | audio.cc = HDMI_AUDIO_CC_2; | ||
2096 | tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, | ||
2097 | HDMI_INFOFRAME_TYPE_AUDIO, | ||
2098 | HDMI_AUDIO_VERSION, | ||
2099 | &audio, sizeof(audio)); | ||
2100 | |||
2101 | tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, | ||
2102 | HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); | ||
2103 | } | ||
2104 | |||
2105 | static void tegra_dc_hdmi_setup_tdms(struct tegra_dc_hdmi_data *hdmi, | ||
2106 | const struct tdms_config *tc) | ||
2107 | { | ||
2108 | tegra_hdmi_writel(hdmi, tc->pll0, HDMI_NV_PDISP_SOR_PLL0); | ||
2109 | tegra_hdmi_writel(hdmi, tc->pll1, HDMI_NV_PDISP_SOR_PLL1); | ||
2110 | |||
2111 | tegra_hdmi_writel(hdmi, tc->pe_current, HDMI_NV_PDISP_PE_CURRENT); | ||
2112 | |||
2113 | tegra_hdmi_writel(hdmi, | ||
2114 | tc->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE, | ||
2115 | HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); | ||
2116 | } | ||
2117 | |||
2118 | static void tegra_dc_hdmi_enable(struct tegra_dc *dc) | ||
2119 | { | ||
2120 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
2121 | int pulse_start; | ||
2122 | int dispclk_div_8_2; | ||
2123 | int retries; | ||
2124 | int rekey; | ||
2125 | int err; | ||
2126 | unsigned long val; | ||
2127 | unsigned i; | ||
2128 | unsigned long oldrate; | ||
2129 | |||
2130 | /* enbale power, clocks, resets, etc. */ | ||
2131 | |||
2132 | /* The upstream DC needs to be clocked for accesses to HDMI to not | ||
2133 | * hard lock the system. Because we don't know if HDMI is conencted | ||
2134 | * to disp1 or disp2 we need to enable both until we set the DC mux. | ||
2135 | */ | ||
2136 | clk_enable(hdmi->disp1_clk); | ||
2137 | clk_enable(hdmi->disp2_clk); | ||
2138 | |||
2139 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
2140 | /* Enabling HDA clocks before asserting HDA PD and ELDV bits */ | ||
2141 | clk_enable(hdmi->hda_clk); | ||
2142 | clk_enable(hdmi->hda2codec_clk); | ||
2143 | clk_enable(hdmi->hda2hdmi_clk); | ||
2144 | #endif | ||
2145 | |||
2146 | /* back off multiplier before attaching to parent at new rate. */ | ||
2147 | oldrate = clk_get_rate(hdmi->clk); | ||
2148 | clk_set_rate(hdmi->clk, oldrate / 2); | ||
2149 | |||
2150 | tegra_dc_setup_clk(dc, hdmi->clk); | ||
2151 | clk_set_rate(hdmi->clk, dc->mode.pclk); | ||
2152 | |||
2153 | clk_enable(hdmi->clk); | ||
2154 | tegra_periph_reset_assert(hdmi->clk); | ||
2155 | mdelay(1); | ||
2156 | tegra_periph_reset_deassert(hdmi->clk); | ||
2157 | |||
2158 | /* TODO: copy HDCP keys from KFUSE to HDMI */ | ||
2159 | |||
2160 | /* Program display timing registers: handled by dc */ | ||
2161 | |||
2162 | /* program HDMI registers and SOR sequencer */ | ||
2163 | |||
2164 | tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS); | ||
2165 | tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, | ||
2166 | DC_DISP_DISP_COLOR_CONTROL); | ||
2167 | |||
2168 | /* video_preamble uses h_pulse2 */ | ||
2169 | pulse_start = dc->mode.h_ref_to_sync + dc->mode.h_sync_width + | ||
2170 | dc->mode.h_back_porch - 10; | ||
2171 | tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); | ||
2172 | tegra_dc_writel(dc, | ||
2173 | PULSE_MODE_NORMAL | | ||
2174 | PULSE_POLARITY_HIGH | | ||
2175 | PULSE_QUAL_VACTIVE | | ||
2176 | PULSE_LAST_END_A, | ||
2177 | DC_DISP_H_PULSE2_CONTROL); | ||
2178 | tegra_dc_writel(dc, PULSE_START(pulse_start) | PULSE_END(pulse_start + 8), | ||
2179 | DC_DISP_H_PULSE2_POSITION_A); | ||
2180 | |||
2181 | tegra_hdmi_writel(hdmi, | ||
2182 | VSYNC_WINDOW_END(0x210) | | ||
2183 | VSYNC_WINDOW_START(0x200) | | ||
2184 | VSYNC_WINDOW_ENABLE, | ||
2185 | HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); | ||
2186 | |||
2187 | tegra_hdmi_writel(hdmi, | ||
2188 | (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) | | ||
2189 | ARM_VIDEO_RANGE_LIMITED, | ||
2190 | HDMI_NV_PDISP_INPUT_CONTROL); | ||
2191 | |||
2192 | clk_disable(hdmi->disp1_clk); | ||
2193 | clk_disable(hdmi->disp2_clk); | ||
2194 | |||
2195 | dispclk_div_8_2 = clk_get_rate(hdmi->clk) / 1000000 * 4; | ||
2196 | tegra_hdmi_writel(hdmi, | ||
2197 | SOR_REFCLK_DIV_INT(dispclk_div_8_2 >> 2) | | ||
2198 | SOR_REFCLK_DIV_FRAC(dispclk_div_8_2), | ||
2199 | HDMI_NV_PDISP_SOR_REFCLK); | ||
2200 | |||
2201 | hdmi->clk_enabled = true; | ||
2202 | |||
2203 | if (!hdmi->dvi) { | ||
2204 | err = tegra_dc_hdmi_setup_audio(dc, hdmi->audio_freq, | ||
2205 | hdmi->audio_source); | ||
2206 | |||
2207 | if (err < 0) | ||
2208 | hdmi->dvi = true; | ||
2209 | } | ||
2210 | |||
2211 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
2212 | if (hdmi->eld_retrieved) | ||
2213 | tegra_dc_hdmi_setup_eld_buff(dc); | ||
2214 | #endif | ||
2215 | |||
2216 | rekey = HDMI_REKEY_DEFAULT; | ||
2217 | val = HDMI_CTRL_REKEY(rekey); | ||
2218 | val |= HDMI_CTRL_MAX_AC_PACKET((dc->mode.h_sync_width + | ||
2219 | dc->mode.h_back_porch + | ||
2220 | dc->mode.h_front_porch - | ||
2221 | rekey - 18) / 32); | ||
2222 | if (!hdmi->dvi) | ||
2223 | val |= HDMI_CTRL_ENABLE; | ||
2224 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_CTRL); | ||
2225 | |||
2226 | if (hdmi->dvi) | ||
2227 | tegra_hdmi_writel(hdmi, 0x0, | ||
2228 | HDMI_NV_PDISP_HDMI_GENERIC_CTRL); | ||
2229 | else | ||
2230 | tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO, | ||
2231 | HDMI_NV_PDISP_HDMI_GENERIC_CTRL); | ||
2232 | |||
2233 | tegra_dc_hdmi_setup_avi_infoframe(dc, hdmi->dvi); | ||
2234 | tegra_dc_hdmi_setup_audio_infoframe(dc, hdmi->dvi); | ||
2235 | tegra_dc_hdmi_setup_stereo_infoframe(dc); | ||
2236 | |||
2237 | /* TMDS CONFIG */ | ||
2238 | for (i = 0; i < ARRAY_SIZE(tdms_config); i++) { | ||
2239 | if (dc->mode.pclk <= tdms_config[i].pclk) { | ||
2240 | tegra_dc_hdmi_setup_tdms(hdmi, &tdms_config[i]); | ||
2241 | break; | ||
2242 | } | ||
2243 | } | ||
2244 | |||
2245 | tegra_hdmi_writel(hdmi, | ||
2246 | SOR_SEQ_CTL_PU_PC(0) | | ||
2247 | SOR_SEQ_PU_PC_ALT(0) | | ||
2248 | SOR_SEQ_PD_PC(8) | | ||
2249 | SOR_SEQ_PD_PC_ALT(8), | ||
2250 | HDMI_NV_PDISP_SOR_SEQ_CTL); | ||
2251 | |||
2252 | val = SOR_SEQ_INST_WAIT_TIME(1) | | ||
2253 | SOR_SEQ_INST_WAIT_UNITS_VSYNC | | ||
2254 | SOR_SEQ_INST_HALT | | ||
2255 | SOR_SEQ_INST_PIN_A_LOW | | ||
2256 | SOR_SEQ_INST_PIN_B_LOW | | ||
2257 | SOR_SEQ_INST_DRIVE_PWM_OUT_LO; | ||
2258 | |||
2259 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST0); | ||
2260 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST8); | ||
2261 | |||
2262 | val = 0x1c800; | ||
2263 | val &= ~SOR_CSTM_ROTCLK(~0); | ||
2264 | val |= SOR_CSTM_ROTCLK(2); | ||
2265 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_CSTM); | ||
2266 | |||
2267 | |||
2268 | tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); | ||
2269 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
2270 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
2271 | |||
2272 | |||
2273 | /* start SOR */ | ||
2274 | tegra_hdmi_writel(hdmi, | ||
2275 | SOR_PWR_NORMAL_STATE_PU | | ||
2276 | SOR_PWR_NORMAL_START_NORMAL | | ||
2277 | SOR_PWR_SAFE_STATE_PD | | ||
2278 | SOR_PWR_SETTING_NEW_TRIGGER, | ||
2279 | HDMI_NV_PDISP_SOR_PWR); | ||
2280 | tegra_hdmi_writel(hdmi, | ||
2281 | SOR_PWR_NORMAL_STATE_PU | | ||
2282 | SOR_PWR_NORMAL_START_NORMAL | | ||
2283 | SOR_PWR_SAFE_STATE_PD | | ||
2284 | SOR_PWR_SETTING_NEW_DONE, | ||
2285 | HDMI_NV_PDISP_SOR_PWR); | ||
2286 | |||
2287 | retries = 1000; | ||
2288 | do { | ||
2289 | BUG_ON(--retries < 0); | ||
2290 | val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR); | ||
2291 | } while (val & SOR_PWR_SETTING_NEW_PENDING); | ||
2292 | |||
2293 | val = SOR_STATE_ASY_CRCMODE_COMPLETE | | ||
2294 | SOR_STATE_ASY_OWNER_HEAD0 | | ||
2295 | SOR_STATE_ASY_SUBOWNER_BOTH | | ||
2296 | SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A | | ||
2297 | SOR_STATE_ASY_DEPOL_POS; | ||
2298 | |||
2299 | if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_H_SYNC) | ||
2300 | val |= SOR_STATE_ASY_HSYNCPOL_NEG; | ||
2301 | else | ||
2302 | val |= SOR_STATE_ASY_HSYNCPOL_POS; | ||
2303 | |||
2304 | if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_V_SYNC) | ||
2305 | val |= SOR_STATE_ASY_VSYNCPOL_NEG; | ||
2306 | else | ||
2307 | val |= SOR_STATE_ASY_VSYNCPOL_POS; | ||
2308 | |||
2309 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE2); | ||
2310 | |||
2311 | val = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL; | ||
2312 | tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE1); | ||
2313 | |||
2314 | tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); | ||
2315 | tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0); | ||
2316 | tegra_hdmi_writel(hdmi, val | SOR_STATE_ATTACHED, | ||
2317 | HDMI_NV_PDISP_SOR_STATE1); | ||
2318 | tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); | ||
2319 | |||
2320 | tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS); | ||
2321 | |||
2322 | tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
2323 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE, | ||
2324 | DC_CMD_DISPLAY_POWER_CONTROL); | ||
2325 | |||
2326 | tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND); | ||
2327 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
2328 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
2329 | |||
2330 | tegra_nvhdcp_set_plug(hdmi->nvhdcp, 1); | ||
2331 | } | ||
2332 | |||
2333 | static void tegra_dc_hdmi_disable(struct tegra_dc *dc) | ||
2334 | { | ||
2335 | struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); | ||
2336 | |||
2337 | tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0); | ||
2338 | |||
2339 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
2340 | tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0); | ||
2341 | /* sleep 1ms before disabling clocks to ensure HDA gets the interrupt */ | ||
2342 | msleep(1); | ||
2343 | clk_disable(hdmi->hda2hdmi_clk); | ||
2344 | clk_disable(hdmi->hda2codec_clk); | ||
2345 | clk_disable(hdmi->hda_clk); | ||
2346 | #endif | ||
2347 | tegra_periph_reset_assert(hdmi->clk); | ||
2348 | hdmi->clk_enabled = false; | ||
2349 | clk_disable(hdmi->clk); | ||
2350 | tegra_dvfs_set_rate(hdmi->clk, 0); | ||
2351 | } | ||
2352 | |||
2353 | struct tegra_dc_out_ops tegra_dc_hdmi_ops = { | ||
2354 | .init = tegra_dc_hdmi_init, | ||
2355 | .destroy = tegra_dc_hdmi_destroy, | ||
2356 | .enable = tegra_dc_hdmi_enable, | ||
2357 | .disable = tegra_dc_hdmi_disable, | ||
2358 | .detect = tegra_dc_hdmi_detect, | ||
2359 | .suspend = tegra_dc_hdmi_suspend, | ||
2360 | .resume = tegra_dc_hdmi_resume, | ||
2361 | }; | ||
2362 | |||
2363 | struct tegra_dc_edid *tegra_dc_get_edid(struct tegra_dc *dc) | ||
2364 | { | ||
2365 | struct tegra_dc_hdmi_data *hdmi; | ||
2366 | |||
2367 | /* TODO: Support EDID on non-HDMI devices */ | ||
2368 | if (dc->out->type != TEGRA_DC_OUT_HDMI) | ||
2369 | return ERR_PTR(-ENODEV); | ||
2370 | |||
2371 | hdmi = tegra_dc_get_outdata(dc); | ||
2372 | |||
2373 | return tegra_edid_get_data(hdmi->edid); | ||
2374 | } | ||
2375 | EXPORT_SYMBOL(tegra_dc_get_edid); | ||
2376 | |||
2377 | void tegra_dc_put_edid(struct tegra_dc_edid *edid) | ||
2378 | { | ||
2379 | tegra_edid_put_data(edid); | ||
2380 | } | ||
2381 | EXPORT_SYMBOL(tegra_dc_put_edid); | ||
diff --git a/drivers/video/tegra/dc/hdmi.h b/drivers/video/tegra/dc/hdmi.h new file mode 100644 index 00000000000..702ab16e87f --- /dev/null +++ b/drivers/video/tegra/dc/hdmi.h | |||
@@ -0,0 +1,222 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/hdmi.h | ||
3 | * | ||
4 | * non-tegra specific HDMI declarations | ||
5 | * | ||
6 | * Copyright (C) 2010 Google, Inc. | ||
7 | * Author: Erik Gilling <konkers@android.com> | ||
8 | * | ||
9 | * Copyright (C) 2010-2011 NVIDIA Corporation | ||
10 | * | ||
11 | * This software is licensed under the terms of the GNU General Public | ||
12 | * License version 2, as published by the Free Software Foundation, and | ||
13 | * may be copied, distributed, and modified under those terms. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_H | ||
23 | #define __DRIVERS_VIDEO_TEGRA_DC_HDMI_H | ||
24 | |||
25 | #define HDMI_INFOFRAME_TYPE_VENDOR 0x81 | ||
26 | #define HDMI_INFOFRAME_TYPE_AVI 0x82 | ||
27 | #define HDMI_INFOFRAME_TYPE_SPD 0x83 | ||
28 | #define HDMI_INFOFRAME_TYPE_AUDIO 0x84 | ||
29 | #define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85 | ||
30 | #define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86 | ||
31 | |||
32 | /* all fields little endian */ | ||
33 | struct hdmi_avi_infoframe { | ||
34 | /* PB0 */ | ||
35 | u8 csum; | ||
36 | |||
37 | /* PB1 */ | ||
38 | unsigned s:2; /* scan information */ | ||
39 | unsigned b:2; /* bar info data valid */ | ||
40 | unsigned a:1; /* active info present */ | ||
41 | unsigned y:2; /* RGB or YCbCr */ | ||
42 | unsigned res1:1; | ||
43 | |||
44 | /* PB2 */ | ||
45 | unsigned r:4; /* active format aspect ratio */ | ||
46 | unsigned m:2; /* picture aspect ratio */ | ||
47 | unsigned c:2; /* colorimetry */ | ||
48 | |||
49 | /* PB3 */ | ||
50 | unsigned sc:2; /* scan information */ | ||
51 | unsigned q:2; /* quantization range */ | ||
52 | unsigned ec:3; /* extended colorimetry */ | ||
53 | unsigned itc:1; /* it content */ | ||
54 | |||
55 | /* PB4 */ | ||
56 | unsigned vic:7; /* video format id code */ | ||
57 | unsigned res4:1; | ||
58 | |||
59 | /* PB5 */ | ||
60 | unsigned pr:4; /* pixel repetition factor */ | ||
61 | unsigned cn:2; /* it content type*/ | ||
62 | unsigned yq:2; /* ycc quantization range */ | ||
63 | |||
64 | /* PB6-7 */ | ||
65 | u16 top_bar_end_line; | ||
66 | |||
67 | /* PB8-9 */ | ||
68 | u16 bot_bar_start_line; | ||
69 | |||
70 | /* PB10-11 */ | ||
71 | u16 left_bar_end_pixel; | ||
72 | |||
73 | /* PB12-13 */ | ||
74 | u16 right_bar_start_pixel; | ||
75 | } __attribute__((packed)); | ||
76 | |||
77 | #define HDMI_AVI_VERSION 0x02 | ||
78 | |||
79 | #define HDMI_AVI_Y_RGB 0x0 | ||
80 | #define HDMI_AVI_Y_YCBCR_422 0x1 | ||
81 | #define HDMI_AVI_Y_YCBCR_444 0x2 | ||
82 | |||
83 | #define HDMI_AVI_B_VERT 0x1 | ||
84 | #define HDMI_AVI_B_HORIZ 0x2 | ||
85 | |||
86 | #define HDMI_AVI_S_NONE 0x0 | ||
87 | #define HDMI_AVI_S_OVERSCAN 0x1 | ||
88 | #define HDMI_AVI_S_UNDERSCAN 0x2 | ||
89 | |||
90 | #define HDMI_AVI_C_NONE 0x0 | ||
91 | #define HDMI_AVI_C_SMPTE 0x1 | ||
92 | #define HDMI_AVI_C_ITU_R 0x2 | ||
93 | #define HDMI_AVI_C_EXTENDED 0x4 | ||
94 | |||
95 | #define HDMI_AVI_M_4_3 0x1 | ||
96 | #define HDMI_AVI_M_16_9 0x2 | ||
97 | |||
98 | #define HDMI_AVI_R_SAME 0x8 | ||
99 | #define HDMI_AVI_R_4_3_CENTER 0x9 | ||
100 | #define HDMI_AVI_R_16_9_CENTER 0xa | ||
101 | #define HDMI_AVI_R_14_9_CENTER 0xb | ||
102 | |||
103 | /* all fields little endian */ | ||
104 | struct hdmi_audio_infoframe { | ||
105 | /* PB0 */ | ||
106 | u8 csum; | ||
107 | |||
108 | /* PB1 */ | ||
109 | unsigned cc:3; /* channel count */ | ||
110 | unsigned res1:1; | ||
111 | unsigned ct:4; /* coding type */ | ||
112 | |||
113 | /* PB2 */ | ||
114 | unsigned ss:2; /* sample size */ | ||
115 | unsigned sf:3; /* sample frequency */ | ||
116 | unsigned res2:3; | ||
117 | |||
118 | /* PB3 */ | ||
119 | unsigned cxt:5; /* coding extention type */ | ||
120 | unsigned res3:3; | ||
121 | |||
122 | /* PB4 */ | ||
123 | u8 ca; /* channel/speaker allocation */ | ||
124 | |||
125 | /* PB5 */ | ||
126 | unsigned res5:3; | ||
127 | unsigned lsv:4; /* level shift value */ | ||
128 | unsigned dm_inh:1; /* downmix inhibit */ | ||
129 | |||
130 | /* PB6-10 reserved */ | ||
131 | u8 res6; | ||
132 | u8 res7; | ||
133 | u8 res8; | ||
134 | u8 res9; | ||
135 | u8 res10; | ||
136 | } __attribute__((packed)); | ||
137 | |||
138 | #define HDMI_AUDIO_VERSION 0x01 | ||
139 | |||
140 | #define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */ | ||
141 | #define HDMI_AUDIO_CC_2 0x1 | ||
142 | #define HDMI_AUDIO_CC_3 0x2 | ||
143 | #define HDMI_AUDIO_CC_4 0x3 | ||
144 | #define HDMI_AUDIO_CC_5 0x4 | ||
145 | #define HDMI_AUDIO_CC_6 0x5 | ||
146 | #define HDMI_AUDIO_CC_7 0x6 | ||
147 | #define HDMI_AUDIO_CC_8 0x7 | ||
148 | |||
149 | #define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */ | ||
150 | #define HDMI_AUDIO_CT_PCM 0x1 | ||
151 | #define HDMI_AUDIO_CT_AC3 0x2 | ||
152 | #define HDMI_AUDIO_CT_MPEG1 0x3 | ||
153 | #define HDMI_AUDIO_CT_MP3 0x4 | ||
154 | #define HDMI_AUDIO_CT_MPEG2 0x5 | ||
155 | #define HDMI_AUDIO_CT_AAC_LC 0x6 | ||
156 | #define HDMI_AUDIO_CT_DTS 0x7 | ||
157 | #define HDMI_AUDIO_CT_ATRAC 0x8 | ||
158 | #define HDMI_AUDIO_CT_DSD 0x9 | ||
159 | #define HDMI_AUDIO_CT_E_AC3 0xa | ||
160 | #define HDMI_AUDIO_CT_DTS_HD 0xb | ||
161 | #define HDMI_AUDIO_CT_MLP 0xc | ||
162 | #define HDMI_AUDIO_CT_DST 0xd | ||
163 | #define HDMI_AUDIO_CT_WMA_PRO 0xe | ||
164 | #define HDMI_AUDIO_CT_CXT 0xf | ||
165 | |||
166 | #define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */ | ||
167 | #define HDMI_AUIDO_SF_32K 0x1 | ||
168 | #define HDMI_AUDIO_SF_44_1K 0x2 | ||
169 | #define HDMI_AUDIO_SF_48K 0x3 | ||
170 | #define HDMI_AUDIO_SF_88_2K 0x4 | ||
171 | #define HDMI_AUDIO_SF_96K 0x5 | ||
172 | #define HDMI_AUDIO_SF_176_4K 0x6 | ||
173 | #define HDMI_AUDIO_SF_192K 0x7 | ||
174 | |||
175 | #define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */ | ||
176 | #define HDMI_AUDIO_SS_16BIT 0x1 | ||
177 | #define HDMI_AUDIO_SS_20BIT 0x2 | ||
178 | #define HDMI_AUDIO_SS_24BIT 0x3 | ||
179 | |||
180 | #define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */ | ||
181 | #define HDMI_AUDIO_CXT_HE_AAC 0x1 | ||
182 | #define HDMI_AUDIO_CXT_HE_AAC_V2 0x2 | ||
183 | #define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3 | ||
184 | |||
185 | /* all fields little endian */ | ||
186 | struct hdmi_stereo_infoframe { | ||
187 | /* PB0 */ | ||
188 | u8 csum; | ||
189 | |||
190 | /* PB1 */ | ||
191 | u8 regid0; | ||
192 | |||
193 | /* PB2 */ | ||
194 | u8 regid1; | ||
195 | |||
196 | /* PB3 */ | ||
197 | u8 regid2; | ||
198 | |||
199 | /* PB4 */ | ||
200 | unsigned res1:5; | ||
201 | unsigned hdmi_video_format:3; | ||
202 | |||
203 | /* PB5 */ | ||
204 | unsigned res2:4; | ||
205 | unsigned _3d_structure:4; | ||
206 | |||
207 | /* PB6*/ | ||
208 | unsigned res3:4; | ||
209 | unsigned _3d_ext_data:4; | ||
210 | |||
211 | } __attribute__((packed)); | ||
212 | |||
213 | #define HDMI_VENDOR_VERSION 0x01 | ||
214 | |||
215 | struct tegra_dc_hdmi_data; | ||
216 | |||
217 | unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi, | ||
218 | unsigned long reg); | ||
219 | void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi, | ||
220 | unsigned long val, unsigned long reg); | ||
221 | |||
222 | #endif | ||
diff --git a/drivers/video/tegra/dc/hdmi_reg.h b/drivers/video/tegra/dc/hdmi_reg.h new file mode 100644 index 00000000000..0bdda43199e --- /dev/null +++ b/drivers/video/tegra/dc/hdmi_reg.h | |||
@@ -0,0 +1,478 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/hdmi_reg.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H | ||
19 | #define __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H | ||
20 | |||
21 | #define HDMI_CTXSW 0x00 | ||
22 | #define HDMI_NV_PDISP_SOR_STATE0 0x01 | ||
23 | #define SOR_STATE_UPDATE (1 << 0) | ||
24 | |||
25 | #define HDMI_NV_PDISP_SOR_STATE1 0x02 | ||
26 | #define SOR_STATE_ASY_HEAD_OPMODE_SLEEP (0 << 0) | ||
27 | #define SOR_STATE_ASY_HEAD_OPMODE_SNOOSE (1 << 0) | ||
28 | #define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0) | ||
29 | #define SOR_STATE_ASY_ORMODE_SAFE (0 << 2) | ||
30 | #define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2) | ||
31 | #define SOR_STATE_ATTACHED (1 << 3) | ||
32 | #define SOR_STATE_ARM_SHOW_VGA (1 << 4) | ||
33 | |||
34 | #define HDMI_NV_PDISP_SOR_STATE2 0x03 | ||
35 | #define SOR_STATE_ASY_OWNER_NONE (0 << 0) | ||
36 | #define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0) | ||
37 | #define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4) | ||
38 | #define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4) | ||
39 | #define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4) | ||
40 | #define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4) | ||
41 | #define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6) | ||
42 | #define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6) | ||
43 | #define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6) | ||
44 | #define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8) | ||
45 | #define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8) | ||
46 | #define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12) | ||
47 | #define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12) | ||
48 | #define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13) | ||
49 | #define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13) | ||
50 | #define SOR_STATE_ASY_DEPOL_POS (0 << 14) | ||
51 | #define SOR_STATE_ASY_DEPOL_NEG (1 << 14) | ||
52 | |||
53 | #define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04 | ||
54 | #define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05 | ||
55 | #define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06 | ||
56 | #define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07 | ||
57 | #define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08 | ||
58 | #define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09 | ||
59 | #define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a | ||
60 | #define REPEATER (1 << 31) | ||
61 | #define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b | ||
62 | #define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c | ||
63 | #define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d | ||
64 | #define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e | ||
65 | #define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f | ||
66 | #define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10 | ||
67 | #define HDCP_RUN_YES (1 << 0) | ||
68 | #define CRYPT_ENABLED (1 << 1) | ||
69 | #define ONEONE_ENABLED (1 << 3) | ||
70 | #define AN_VALID (1 << 8) | ||
71 | #define R0_VALID (1 << 9) | ||
72 | #define SPRIME_VALID (1 << 10) | ||
73 | #define MPRIME_VALID (1 << 11) | ||
74 | #define SROM_ERR (1 << 13) | ||
75 | #define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11 | ||
76 | #define TMDS0_LINK0 (1 << 4) | ||
77 | #define READ_S (1 << 0) | ||
78 | #define READ_M (2 << 0) | ||
79 | #define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12 | ||
80 | #define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13 | ||
81 | #define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14 | ||
82 | #define STATUS_CS (1 << 6) | ||
83 | #define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15 | ||
84 | #define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16 | ||
85 | #define HDMI_NV_PDISP_RG_HDCP_RI 0x17 | ||
86 | #define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18 | ||
87 | #define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19 | ||
88 | #define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a | ||
89 | #define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b | ||
90 | #define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c | ||
91 | #define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d | ||
92 | #define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e | ||
93 | #define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f | ||
94 | #define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20 | ||
95 | #define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21 | ||
96 | #define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22 | ||
97 | #define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23 | ||
98 | #define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24 | ||
99 | #define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25 | ||
100 | #define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26 | ||
101 | #define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27 | ||
102 | #define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28 | ||
103 | #define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29 | ||
104 | #define INFOFRAME_CTRL_ENABLE (1 << 0) | ||
105 | #define INFOFRAME_CTRL_OTHER (1 << 4) | ||
106 | #define INFOFRAME_CTRL_SINGLE (1 << 8) | ||
107 | |||
108 | #define INFOFRAME_HEADER_TYPE(x) ((x) & 0xff) | ||
109 | #define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) | ||
110 | #define INFOFRAME_HEADER_LEN(x) (((x) & 0xf) << 16) | ||
111 | |||
112 | #define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a | ||
113 | #define GENERIC_CTRL_ENABLE (1 << 0) | ||
114 | #define GENERIC_CTRL_OTHER (1 << 4) | ||
115 | #define GENERIC_CTRL_SINGLE (1 << 8) | ||
116 | #define GENERIC_CTRL_HBLANK (1 << 12) | ||
117 | #define GENERIC_CTRL_AUDIO (1 << 16) | ||
118 | |||
119 | #define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b | ||
120 | #define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c | ||
121 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d | ||
122 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e | ||
123 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f | ||
124 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30 | ||
125 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31 | ||
126 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32 | ||
127 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33 | ||
128 | #define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34 | ||
129 | #define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35 | ||
130 | #define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36 | ||
131 | #define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37 | ||
132 | #define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38 | ||
133 | #define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39 | ||
134 | #define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a | ||
135 | #define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b | ||
136 | #define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c | ||
137 | #define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d | ||
138 | #define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e | ||
139 | #define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f | ||
140 | #define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40 | ||
141 | #define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41 | ||
142 | #define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42 | ||
143 | #define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43 | ||
144 | #define ACR_SB3(x) (((x) & 0xff) << 8) | ||
145 | #define ACR_SB2(x) (((x) & 0xff) << 16) | ||
146 | #define ACR_SB1(x) (((x) & 0xff) << 24) | ||
147 | #define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8) | ||
148 | |||
149 | #define ACR_SB6(x) (((x) & 0xff) << 0) | ||
150 | #define ACR_SB5(x) (((x) & 0xff) << 8) | ||
151 | #define ACR_SB4(x) (((x) & 0xff) << 16) | ||
152 | #define ACR_ENABLE (1 << 31) | ||
153 | #define ACR_SUBPACK_N(x) ((x) & 0xffffff) | ||
154 | |||
155 | #define HDMI_NV_PDISP_HDMI_CTRL 0x44 | ||
156 | #define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0) | ||
157 | #define HDMI_CTRL_AUDIO_LAYOUT (1 << 8) | ||
158 | #define HDMI_CTRL_SAMPLE_FLAT (1 << 12) | ||
159 | #define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16) | ||
160 | #define HDMI_CTRL_ENABLE (1 << 30) | ||
161 | |||
162 | #define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45 | ||
163 | #define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46 | ||
164 | #define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0) | ||
165 | #define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16) | ||
166 | #define VSYNC_WINDOW_ENABLE (1 << 31) | ||
167 | |||
168 | #define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47 | ||
169 | #define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48 | ||
170 | #define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49 | ||
171 | #define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a | ||
172 | #define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b | ||
173 | #define HDMI_NV_PDISP_HDMI_EMU0 0x4c | ||
174 | #define HDMI_NV_PDISP_HDMI_EMU1 0x4d | ||
175 | #define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e | ||
176 | #define HDMI_NV_PDISP_HDMI_SPARE 0x4f | ||
177 | #define SPARE_HW_CTS (1 << 0) | ||
178 | #define SPARE_FORCE_SW_CTS (1 << 1) | ||
179 | #define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16) | ||
180 | #define SPARE_ACR_PRIORITY_HIGH (0 << 31) | ||
181 | #define SPARE_ACR_PRIORITY_LOW (1 << 31) | ||
182 | |||
183 | #define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50 | ||
184 | #define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51 | ||
185 | #define HDMI_NV_PDISP_HDCPRIF_ROM_CTRL 0x53 | ||
186 | #define HDMI_NV_PDISP_SOR_CAP 0x54 | ||
187 | #define HDMI_NV_PDISP_SOR_PWR 0x55 | ||
188 | #define SOR_PWR_NORMAL_STATE_PD (0 << 0) | ||
189 | #define SOR_PWR_NORMAL_STATE_PU (1 << 0) | ||
190 | #define SOR_PWR_NORMAL_START_NORMAL (0 << 1) | ||
191 | #define SOR_PWR_NORMAL_START_ALT (1 << 1) | ||
192 | #define SOR_PWR_SAFE_STATE_PD (0 << 16) | ||
193 | #define SOR_PWR_SAFE_STATE_PU (1 << 16) | ||
194 | #define SOR_PWR_SAFE_START_NORMAL (0 << 17) | ||
195 | #define SOR_PWR_SAFE_START_ALT (1 << 17) | ||
196 | #define SOR_PWR_HALT_DELAY (1 << 24) | ||
197 | #define SOR_PWR_MODE (1 << 28) | ||
198 | #define SOR_PWR_SETTING_NEW_DONE (0 << 31) | ||
199 | #define SOR_PWR_SETTING_NEW_PENDING (1 << 31) | ||
200 | #define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31) | ||
201 | |||
202 | #define HDMI_NV_PDISP_SOR_TEST 0x56 | ||
203 | #define HDMI_NV_PDISP_SOR_PLL0 0x57 | ||
204 | #define SOR_PLL_PWR (1 << 0) | ||
205 | #define SOR_PLL_PDBG (1 << 1) | ||
206 | #define SOR_PLL_VCOPD (1 << 2) | ||
207 | #define SOR_PLL_PDPORT (1 << 3) | ||
208 | #define SOR_PLL_RESISTORSEL (1 << 4) | ||
209 | #define SOR_PLL_PULLDOWN (1 << 5) | ||
210 | #define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8) | ||
211 | #define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12) | ||
212 | #define SOR_PLL_FILTER(x) (((x) & 0xf) << 16) | ||
213 | #define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24) | ||
214 | #define SOR_PLL_TX_REG_LOAD(x) (((x) & 0x3) << 28) | ||
215 | |||
216 | #define HDMI_NV_PDISP_SOR_PLL1 0x58 | ||
217 | #define SOR_PLL_TMDS_TERM_ENABLE (1 << 8) | ||
218 | #define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9) | ||
219 | #define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20) | ||
220 | #define SOR_PLL_PE_EN (1 << 28) | ||
221 | #define SOR_PLL_HALF_FULL_PE (1 << 29) | ||
222 | #define SOR_PLL_S_D_PIN_PE (1 << 30) | ||
223 | |||
224 | #define HDMI_NV_PDISP_SOR_PLL2 0x59 | ||
225 | #define HDMI_NV_PDISP_SOR_CSTM 0x5a | ||
226 | #define SOR_CSTM_PD_TXDA_0 (1 << 0) | ||
227 | #define SOR_CSTM_PD_TXDA_1 (1 << 1) | ||
228 | #define SOR_CSTM_PD_TXDA_2 (1 << 2) | ||
229 | #define SOR_CSTM_PD_TXDA_3 (1 << 3) | ||
230 | #define SOR_CSTM_PD_TXDB_0 (1 << 4) | ||
231 | #define SOR_CSTM_PD_TXDB_1 (1 << 5) | ||
232 | #define SOR_CSTM_PD_TXDB_2 (1 << 6) | ||
233 | #define SOR_CSTM_PD_TXDB_3 (1 << 7) | ||
234 | #define SOR_CSTM_PD_TXCA (1 << 8) | ||
235 | #define SOR_CSTM_PD_TXCB (1 << 9) | ||
236 | #define SOR_CSTM_UPPER (1 << 11) | ||
237 | #define SOR_CSTM_MODE(x) (((x) & 0x3) << 12) | ||
238 | #define SOR_CSTM_LINKACTA (1 << 14) | ||
239 | #define SOR_CSTM_LINKACTB (1 << 15) | ||
240 | #define SOR_CSTM_LVDS_EN (1 << 16) | ||
241 | #define SOR_CSTM_DUP_SYNC (1 << 17) | ||
242 | #define SOR_CSTM_NEW_MODE (1 << 18) | ||
243 | #define SOR_CSTM_BALANCED (1 << 19) | ||
244 | #define SOR_CSTM_PLLDIV (1 << 21) | ||
245 | #define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24) | ||
246 | #define SOR_CSTM_ROTDAT(x) (((x) & 0x7) << 28) | ||
247 | |||
248 | #define HDMI_NV_PDISP_SOR_LVDS 0x5b | ||
249 | #define HDMI_NV_PDISP_SOR_CRCA 0x5c | ||
250 | #define HDMI_NV_PDISP_SOR_CRCB 0x5d | ||
251 | #define HDMI_NV_PDISP_SOR_BLANK 0x5e | ||
252 | #define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f | ||
253 | #define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0) | ||
254 | #define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4) | ||
255 | #define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8) | ||
256 | #define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12) | ||
257 | #define SOR_SEQ_PC(x) (((x) & 0xf) << 16) | ||
258 | #define SOR_SEQ_STATUS (1 << 28) | ||
259 | #define SOR_SEQ_SWITCH (1 << 30) | ||
260 | |||
261 | #define HDMI_NV_PDISP_SOR_SEQ_INST0 0x60 | ||
262 | #define HDMI_NV_PDISP_SOR_SEQ_INST1 0x61 | ||
263 | #define HDMI_NV_PDISP_SOR_SEQ_INST2 0x62 | ||
264 | #define HDMI_NV_PDISP_SOR_SEQ_INST3 0x63 | ||
265 | #define HDMI_NV_PDISP_SOR_SEQ_INST4 0x64 | ||
266 | #define HDMI_NV_PDISP_SOR_SEQ_INST5 0x65 | ||
267 | #define HDMI_NV_PDISP_SOR_SEQ_INST6 0x66 | ||
268 | #define HDMI_NV_PDISP_SOR_SEQ_INST7 0x67 | ||
269 | #define HDMI_NV_PDISP_SOR_SEQ_INST8 0x68 | ||
270 | #define HDMI_NV_PDISP_SOR_SEQ_INST9 0x69 | ||
271 | #define HDMI_NV_PDISP_SOR_SEQ_INSTA 0x6a | ||
272 | #define HDMI_NV_PDISP_SOR_SEQ_INSTB 0x6b | ||
273 | #define HDMI_NV_PDISP_SOR_SEQ_INSTC 0x6c | ||
274 | #define HDMI_NV_PDISP_SOR_SEQ_INSTD 0x6d | ||
275 | #define HDMI_NV_PDISP_SOR_SEQ_INSTE 0x6e | ||
276 | #define HDMI_NV_PDISP_SOR_SEQ_INSTF 0x6f | ||
277 | #define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0) | ||
278 | #define SOR_SEQ_INST_WAIT_UNITS_US (0 << 12) | ||
279 | #define SOR_SEQ_INST_WAIT_UNITS_MS (1 << 12) | ||
280 | #define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12) | ||
281 | #define SOR_SEQ_INST_HALT (1 << 15) | ||
282 | #define SOR_SEQ_INST_PIN_A_LOW (0 << 21) | ||
283 | #define SOR_SEQ_INST_PIN_A_HIGH (1 << 21) | ||
284 | #define SOR_SEQ_INST_PIN_B_LOW (0 << 22) | ||
285 | #define SOR_SEQ_INST_PIN_B_HIGH (1 << 22) | ||
286 | #define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23) | ||
287 | #define SOR_SEQ_INST_TRISTATE_IOS (1 << 24) | ||
288 | #define SOR_SEQ_INST_SOR_SEQ_INST_BLACK_DATA (1 << 25) | ||
289 | #define SOR_SEQ_INST_BLANK_DE (1 << 26) | ||
290 | #define SOR_SEQ_INST_BLANK_H (1 << 27) | ||
291 | #define SOR_SEQ_INST_BLANK_V (1 << 28) | ||
292 | #define SOR_SEQ_INST_ASSERT_PLL_RESETV (1 << 29) | ||
293 | #define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30) | ||
294 | #define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31) | ||
295 | |||
296 | #define HDMI_NV_PDISP_SOR_VCRCA0 0x72 | ||
297 | #define HDMI_NV_PDISP_SOR_VCRCA1 0x73 | ||
298 | #define HDMI_NV_PDISP_SOR_CCRCA0 0x74 | ||
299 | #define HDMI_NV_PDISP_SOR_CCRCA1 0x75 | ||
300 | #define HDMI_NV_PDISP_SOR_EDATAA0 0x76 | ||
301 | #define HDMI_NV_PDISP_SOR_EDATAA1 0x77 | ||
302 | #define HDMI_NV_PDISP_SOR_COUNTA0 0x78 | ||
303 | #define HDMI_NV_PDISP_SOR_COUNTA1 0x79 | ||
304 | #define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a | ||
305 | #define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b | ||
306 | #define HDMI_NV_PDISP_SOR_TRIG 0x7c | ||
307 | #define HDMI_NV_PDISP_SOR_MSCHECK 0x7d | ||
308 | #define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e | ||
309 | #define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0) | ||
310 | #define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8) | ||
311 | #define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16) | ||
312 | #define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24) | ||
313 | #define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31) | ||
314 | #define DRIVE_CURRENT_1_500_mA 0x00 | ||
315 | #define DRIVE_CURRENT_1_875_mA 0x01 | ||
316 | #define DRIVE_CURRENT_2_250_mA 0x02 | ||
317 | #define DRIVE_CURRENT_2_625_mA 0x03 | ||
318 | #define DRIVE_CURRENT_3_000_mA 0x04 | ||
319 | #define DRIVE_CURRENT_3_375_mA 0x05 | ||
320 | #define DRIVE_CURRENT_3_750_mA 0x06 | ||
321 | #define DRIVE_CURRENT_4_125_mA 0x07 | ||
322 | #define DRIVE_CURRENT_4_500_mA 0x08 | ||
323 | #define DRIVE_CURRENT_4_875_mA 0x09 | ||
324 | #define DRIVE_CURRENT_5_250_mA 0x0a | ||
325 | #define DRIVE_CURRENT_5_625_mA 0x0b | ||
326 | #define DRIVE_CURRENT_6_000_mA 0x0c | ||
327 | #define DRIVE_CURRENT_6_375_mA 0x0d | ||
328 | #define DRIVE_CURRENT_6_750_mA 0x0e | ||
329 | #define DRIVE_CURRENT_7_125_mA 0x0f | ||
330 | #define DRIVE_CURRENT_7_500_mA 0x10 | ||
331 | #define DRIVE_CURRENT_7_875_mA 0x11 | ||
332 | #define DRIVE_CURRENT_8_250_mA 0x12 | ||
333 | #define DRIVE_CURRENT_8_625_mA 0x13 | ||
334 | #define DRIVE_CURRENT_9_000_mA 0x14 | ||
335 | #define DRIVE_CURRENT_9_375_mA 0x15 | ||
336 | #define DRIVE_CURRENT_9_750_mA 0x16 | ||
337 | #define DRIVE_CURRENT_10_125_mA 0x17 | ||
338 | #define DRIVE_CURRENT_10_500_mA 0x18 | ||
339 | #define DRIVE_CURRENT_10_875_mA 0x19 | ||
340 | #define DRIVE_CURRENT_11_250_mA 0x1a | ||
341 | #define DRIVE_CURRENT_11_625_mA 0x1b | ||
342 | #define DRIVE_CURRENT_12_000_mA 0x1c | ||
343 | #define DRIVE_CURRENT_12_375_mA 0x1d | ||
344 | #define DRIVE_CURRENT_12_750_mA 0x1e | ||
345 | #define DRIVE_CURRENT_13_125_mA 0x1f | ||
346 | #define DRIVE_CURRENT_13_500_mA 0x20 | ||
347 | #define DRIVE_CURRENT_13_875_mA 0x21 | ||
348 | #define DRIVE_CURRENT_14_250_mA 0x22 | ||
349 | #define DRIVE_CURRENT_14_625_mA 0x23 | ||
350 | #define DRIVE_CURRENT_15_000_mA 0x24 | ||
351 | #define DRIVE_CURRENT_15_375_mA 0x25 | ||
352 | #define DRIVE_CURRENT_15_750_mA 0x26 | ||
353 | #define DRIVE_CURRENT_16_125_mA 0x27 | ||
354 | #define DRIVE_CURRENT_16_500_mA 0x28 | ||
355 | #define DRIVE_CURRENT_16_875_mA 0x29 | ||
356 | #define DRIVE_CURRENT_17_250_mA 0x2a | ||
357 | #define DRIVE_CURRENT_17_625_mA 0x2b | ||
358 | #define DRIVE_CURRENT_18_000_mA 0x2c | ||
359 | #define DRIVE_CURRENT_18_375_mA 0x2d | ||
360 | #define DRIVE_CURRENT_18_750_mA 0x2e | ||
361 | #define DRIVE_CURRENT_19_125_mA 0x2f | ||
362 | #define DRIVE_CURRENT_19_500_mA 0x30 | ||
363 | #define DRIVE_CURRENT_19_875_mA 0x31 | ||
364 | #define DRIVE_CURRENT_20_250_mA 0x32 | ||
365 | #define DRIVE_CURRENT_20_625_mA 0x33 | ||
366 | #define DRIVE_CURRENT_21_000_mA 0x34 | ||
367 | #define DRIVE_CURRENT_21_375_mA 0x35 | ||
368 | #define DRIVE_CURRENT_21_750_mA 0x36 | ||
369 | #define DRIVE_CURRENT_22_125_mA 0x37 | ||
370 | #define DRIVE_CURRENT_22_500_mA 0x38 | ||
371 | #define DRIVE_CURRENT_22_875_mA 0x39 | ||
372 | #define DRIVE_CURRENT_23_250_mA 0x3a | ||
373 | #define DRIVE_CURRENT_23_625_mA 0x3b | ||
374 | #define DRIVE_CURRENT_24_000_mA 0x3c | ||
375 | #define DRIVE_CURRENT_24_375_mA 0x3d | ||
376 | #define DRIVE_CURRENT_24_750_mA 0x3e | ||
377 | |||
378 | #define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f | ||
379 | #define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80 | ||
380 | #define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81 | ||
381 | /* note: datasheet defines FS1..FS7. we have FS(0)..FS(6) */ | ||
382 | #define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x)) | ||
383 | #define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0) | ||
384 | #define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16) | ||
385 | |||
386 | |||
387 | #define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89 | ||
388 | #define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a | ||
389 | #define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b | ||
390 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
391 | #define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0 0xac | ||
392 | #define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0 0xbc | ||
393 | #define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0 0xbd | ||
394 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320_0 0xbf | ||
395 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441_0 0xc0 | ||
396 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882_0 0xc1 | ||
397 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764_0 0xc2 | ||
398 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480_0 0xc3 | ||
399 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960_0 0xc4 | ||
400 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920_0 0xc5 | ||
401 | #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT_0 0xc6 | ||
402 | #endif | ||
403 | #define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0) | ||
404 | #define AUDIO_CNTRL0_SOFT_RESET (1 << 8) | ||
405 | #define AUDIO_CNTRL0_SOFT_RESET_ALL (1 << 12) | ||
406 | #define AUDIO_CNTRL0_SAMPLING_FREQ_UNKNOWN (1 << 16) | ||
407 | #define AUDIO_CNTRL0_SAMPLING_FREQ_32K (2 << 16) | ||
408 | #define AUDIO_CNTRL0_SAMPLING_FREQ_44_1K (0 << 16) | ||
409 | #define AUDIO_CNTRL0_SAMPLING_FREQ_48K (2 << 16) | ||
410 | #define AUDIO_CNTRL0_SAMPLING_FREQ_88_2K (8 << 16) | ||
411 | #define AUDIO_CNTRL0_SAMPLING_FREQ_96K (10 << 16) | ||
412 | #define AUDIO_CNTRL0_SAMPLING_FREQ_176_4K (12 << 16) | ||
413 | #define AUDIO_CNTRL0_SAMPLING_FREQ_192K (14 << 16) | ||
414 | #define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20) | ||
415 | #define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20) | ||
416 | #define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20) | ||
417 | #define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29) | ||
418 | #define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24) | ||
419 | |||
420 | #define HDMI_NV_PDISP_AUDIO_N 0x8c | ||
421 | #define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0) | ||
422 | #define AUDIO_N_RESETF (1 << 20) | ||
423 | #define AUDIO_N_GENERATE_NORMAL (0 << 24) | ||
424 | #define AUDIO_N_GENERATE_ALTERNALTE (1 << 24) | ||
425 | #define AUDIO_N_LOOKUP_ENABLE (1 << 28) | ||
426 | |||
427 | #define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94 | ||
428 | #define HDMI_NV_PDISP_SOR_REFCLK 0x95 | ||
429 | #define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8) | ||
430 | #define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6) | ||
431 | |||
432 | #define HDMI_NV_PDISP_CRC_CONTROL 0x96 | ||
433 | #define HDMI_NV_PDISP_INPUT_CONTROL 0x97 | ||
434 | #define HDMI_SRC_DISPLAYA (0 << 0) | ||
435 | #define HDMI_SRC_DISPLAYB (1 << 0) | ||
436 | #define ARM_VIDEO_RANGE_FULL (0 << 1) | ||
437 | #define ARM_VIDEO_RANGE_LIMITED (1 << 1) | ||
438 | |||
439 | #define HDMI_NV_PDISP_SCRATCH 0x98 | ||
440 | #define HDMI_NV_PDISP_PE_CURRENT 0x99 | ||
441 | #define PE_CURRENT0(x) (((x) & 0xf) << 0) | ||
442 | #define PE_CURRENT1(x) (((x) & 0xf) << 8) | ||
443 | #define PE_CURRENT2(x) (((x) & 0xf) << 16) | ||
444 | #define PE_CURRENT3(x) (((x) & 0xf) << 24) | ||
445 | #define PE_CURRENT_0_0_mA 0x0 | ||
446 | #define PE_CURRENT_0_5_mA 0x1 | ||
447 | #define PE_CURRENT_1_0_mA 0x2 | ||
448 | #define PE_CURRENT_1_5_mA 0x3 | ||
449 | #define PE_CURRENT_2_0_mA 0x4 | ||
450 | #define PE_CURRENT_2_5_mA 0x5 | ||
451 | #define PE_CURRENT_3_0_mA 0x6 | ||
452 | #define PE_CURRENT_3_5_mA 0x7 | ||
453 | #define PE_CURRENT_4_0_mA 0x8 | ||
454 | #define PE_CURRENT_4_5_mA 0x9 | ||
455 | #define PE_CURRENT_5_0_mA 0xa | ||
456 | #define PE_CURRENT_5_5_mA 0xb | ||
457 | #define PE_CURRENT_6_0_mA 0xc | ||
458 | #define PE_CURRENT_6_5_mA 0xd | ||
459 | #define PE_CURRENT_7_0_mA 0xe | ||
460 | #define PE_CURRENT_7_5_mA 0xf | ||
461 | |||
462 | #define HDMI_NV_PDISP_KEY_CTRL 0x9a | ||
463 | #define LOCAL_KEYS (1 << 0) | ||
464 | #define AUTOINC (1 << 1) | ||
465 | #define WRITE16 (1 << 4) | ||
466 | #define PKEY_REQUEST_RELOAD_TRIGGER (1 << 5) | ||
467 | #define PKEY_LOADED (1 << 6) | ||
468 | #define HDMI_NV_PDISP_KEY_DEBUG0 0x9b | ||
469 | #define HDMI_NV_PDISP_KEY_DEBUG1 0x9c | ||
470 | #define HDMI_NV_PDISP_KEY_DEBUG2 0x9d | ||
471 | #define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e | ||
472 | #define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f | ||
473 | #define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0 | ||
474 | #define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1 | ||
475 | #define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2 | ||
476 | #define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3 | ||
477 | |||
478 | #endif | ||
diff --git a/drivers/video/tegra/dc/nvhdcp.c b/drivers/video/tegra/dc/nvhdcp.c new file mode 100644 index 00000000000..263de07a3da --- /dev/null +++ b/drivers/video/tegra/dc/nvhdcp.c | |||
@@ -0,0 +1,1259 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/nvhdcp.c | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/i2c.h> | ||
20 | #include <linux/miscdevice.h> | ||
21 | #include <linux/poll.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/wait.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <asm/atomic.h> | ||
27 | |||
28 | #include <mach/dc.h> | ||
29 | #include <mach/kfuse.h> | ||
30 | |||
31 | #include <video/nvhdcp.h> | ||
32 | |||
33 | #include "dc_reg.h" | ||
34 | #include "dc_priv.h" | ||
35 | #include "hdmi_reg.h" | ||
36 | #include "hdmi.h" | ||
37 | |||
38 | DECLARE_WAIT_QUEUE_HEAD(wq_worker); | ||
39 | |||
40 | /* for 0x40 Bcaps */ | ||
41 | #define BCAPS_REPEATER (1 << 6) | ||
42 | #define BCAPS_READY (1 << 5) | ||
43 | #define BCAPS_11 (1 << 1) /* used for both Bcaps and Ainfo */ | ||
44 | |||
45 | /* for 0x41 Bstatus */ | ||
46 | #define BSTATUS_MAX_DEVS_EXCEEDED (1 << 7) | ||
47 | #define BSTATUS_MAX_CASCADE_EXCEEDED (1 << 11) | ||
48 | |||
49 | #ifdef VERBOSE_DEBUG | ||
50 | #define nvhdcp_vdbg(...) \ | ||
51 | printk("nvhdcp: " __VA_ARGS__) | ||
52 | #else | ||
53 | #define nvhdcp_vdbg(...) \ | ||
54 | ({ \ | ||
55 | if(0) \ | ||
56 | printk("nvhdcp: " __VA_ARGS__); \ | ||
57 | 0; \ | ||
58 | }) | ||
59 | #endif | ||
60 | #define nvhdcp_debug(...) \ | ||
61 | pr_debug("nvhdcp: " __VA_ARGS__) | ||
62 | #define nvhdcp_err(...) \ | ||
63 | pr_err("nvhdcp: Error: " __VA_ARGS__) | ||
64 | #define nvhdcp_info(...) \ | ||
65 | pr_info("nvhdcp: " __VA_ARGS__) | ||
66 | |||
67 | |||
68 | /* for nvhdcp.state */ | ||
69 | enum tegra_nvhdcp_state { | ||
70 | STATE_OFF, | ||
71 | STATE_UNAUTHENTICATED, | ||
72 | STATE_LINK_VERIFY, | ||
73 | STATE_RENEGOTIATE, | ||
74 | }; | ||
75 | |||
76 | struct tegra_nvhdcp { | ||
77 | struct delayed_work work; | ||
78 | struct tegra_dc_hdmi_data *hdmi; | ||
79 | struct workqueue_struct *downstream_wq; | ||
80 | struct mutex lock; | ||
81 | struct miscdevice miscdev; | ||
82 | char name[12]; | ||
83 | unsigned id; | ||
84 | bool plugged; /* true if hotplug detected */ | ||
85 | atomic_t policy; /* set policy */ | ||
86 | enum tegra_nvhdcp_state state; /* STATE_xxx */ | ||
87 | struct i2c_client *client; | ||
88 | struct i2c_board_info info; | ||
89 | int bus; | ||
90 | u32 b_status; | ||
91 | u64 a_n; | ||
92 | u64 c_n; | ||
93 | u64 a_ksv; | ||
94 | u64 b_ksv; | ||
95 | u64 c_ksv; | ||
96 | u64 d_ksv; | ||
97 | u8 v_prime[20]; | ||
98 | u64 m_prime; | ||
99 | u32 num_bksv_list; | ||
100 | u64 bksv_list[TEGRA_NVHDCP_MAX_DEVS]; | ||
101 | int fail_count; | ||
102 | }; | ||
103 | |||
104 | static inline bool nvhdcp_is_plugged(struct tegra_nvhdcp *nvhdcp) | ||
105 | { | ||
106 | rmb(); | ||
107 | return nvhdcp->plugged; | ||
108 | } | ||
109 | |||
110 | static inline bool nvhdcp_set_plugged(struct tegra_nvhdcp *nvhdcp, bool plugged) | ||
111 | { | ||
112 | nvhdcp->plugged = plugged; | ||
113 | wmb(); | ||
114 | return plugged; | ||
115 | } | ||
116 | |||
117 | static int nvhdcp_i2c_read(struct tegra_nvhdcp *nvhdcp, u8 reg, | ||
118 | size_t len, void *data) | ||
119 | { | ||
120 | int status; | ||
121 | int retries = 15; | ||
122 | struct i2c_msg msg[] = { | ||
123 | { | ||
124 | .addr = 0x74 >> 1, /* primary link */ | ||
125 | .flags = 0, | ||
126 | .len = 1, | ||
127 | .buf = ®, | ||
128 | }, | ||
129 | { | ||
130 | .addr = 0x74 >> 1, /* primary link */ | ||
131 | .flags = I2C_M_RD, | ||
132 | .len = len, | ||
133 | .buf = data, | ||
134 | }, | ||
135 | }; | ||
136 | |||
137 | do { | ||
138 | if (!nvhdcp_is_plugged(nvhdcp)) { | ||
139 | nvhdcp_err("disconnect during i2c xfer\n"); | ||
140 | return -EIO; | ||
141 | } | ||
142 | status = i2c_transfer(nvhdcp->client->adapter, | ||
143 | msg, ARRAY_SIZE(msg)); | ||
144 | if ((status < 0) && (retries > 1)) | ||
145 | msleep(250); | ||
146 | } while ((status < 0) && retries--); | ||
147 | |||
148 | if (status < 0) { | ||
149 | nvhdcp_err("i2c xfer error %d\n", status); | ||
150 | return status; | ||
151 | } | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static int nvhdcp_i2c_write(struct tegra_nvhdcp *nvhdcp, u8 reg, | ||
157 | size_t len, const void *data) | ||
158 | { | ||
159 | int status; | ||
160 | u8 buf[len + 1]; | ||
161 | struct i2c_msg msg[] = { | ||
162 | { | ||
163 | .addr = 0x74 >> 1, /* primary link */ | ||
164 | .flags = 0, | ||
165 | .len = len + 1, | ||
166 | .buf = buf, | ||
167 | }, | ||
168 | }; | ||
169 | int retries = 15; | ||
170 | |||
171 | buf[0] = reg; | ||
172 | memcpy(buf + 1, data, len); | ||
173 | |||
174 | do { | ||
175 | if (!nvhdcp_is_plugged(nvhdcp)) { | ||
176 | nvhdcp_err("disconnect during i2c xfer\n"); | ||
177 | return -EIO; | ||
178 | } | ||
179 | status = i2c_transfer(nvhdcp->client->adapter, | ||
180 | msg, ARRAY_SIZE(msg)); | ||
181 | if ((status < 0) && (retries > 1)) | ||
182 | msleep(250); | ||
183 | } while ((status < 0) && retries--); | ||
184 | |||
185 | if (status < 0) { | ||
186 | nvhdcp_err("i2c xfer error %d\n", status); | ||
187 | return status; | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static inline int nvhdcp_i2c_read8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 *val) | ||
194 | { | ||
195 | return nvhdcp_i2c_read(nvhdcp, reg, 1, val); | ||
196 | } | ||
197 | |||
198 | static inline int nvhdcp_i2c_write8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 val) | ||
199 | { | ||
200 | return nvhdcp_i2c_write(nvhdcp, reg, 1, &val); | ||
201 | } | ||
202 | |||
203 | static inline int nvhdcp_i2c_read16(struct tegra_nvhdcp *nvhdcp, | ||
204 | u8 reg, u16 *val) | ||
205 | { | ||
206 | u8 buf[2]; | ||
207 | int e; | ||
208 | |||
209 | e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf); | ||
210 | if (e) | ||
211 | return e; | ||
212 | |||
213 | if (val) | ||
214 | *val = buf[0] | (u16)buf[1] << 8; | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static int nvhdcp_i2c_read40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 *val) | ||
220 | { | ||
221 | u8 buf[5]; | ||
222 | int e, i; | ||
223 | u64 n; | ||
224 | |||
225 | e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf); | ||
226 | if (e) | ||
227 | return e; | ||
228 | |||
229 | for(i = 0, n = 0; i < 5; i++ ) { | ||
230 | n <<= 8; | ||
231 | n |= buf[4 - i]; | ||
232 | } | ||
233 | |||
234 | if (val) | ||
235 | *val = n; | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static int nvhdcp_i2c_write40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val) | ||
241 | { | ||
242 | char buf[5]; | ||
243 | int i; | ||
244 | for(i = 0; i < 5; i++ ) { | ||
245 | buf[i] = val; | ||
246 | val >>= 8; | ||
247 | } | ||
248 | return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf); | ||
249 | } | ||
250 | |||
251 | static int nvhdcp_i2c_write64(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val) | ||
252 | { | ||
253 | char buf[8]; | ||
254 | int i; | ||
255 | for(i = 0; i < 8; i++ ) { | ||
256 | buf[i] = val; | ||
257 | val >>= 8; | ||
258 | } | ||
259 | return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf); | ||
260 | } | ||
261 | |||
262 | |||
263 | /* 64-bit link encryption session random number */ | ||
264 | static inline u64 get_an(struct tegra_dc_hdmi_data *hdmi) | ||
265 | { | ||
266 | u64 r; | ||
267 | r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_MSB) << 32; | ||
268 | r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_LSB); | ||
269 | return r; | ||
270 | } | ||
271 | |||
272 | /* 64-bit upstream exchange random number */ | ||
273 | static inline void set_cn(struct tegra_dc_hdmi_data *hdmi, u64 c_n) | ||
274 | { | ||
275 | tegra_hdmi_writel(hdmi, (u32)c_n, HDMI_NV_PDISP_RG_HDCP_CN_LSB); | ||
276 | tegra_hdmi_writel(hdmi, c_n >> 32, HDMI_NV_PDISP_RG_HDCP_CN_MSB); | ||
277 | } | ||
278 | |||
279 | |||
280 | /* 40-bit transmitter's key selection vector */ | ||
281 | static inline u64 get_aksv(struct tegra_dc_hdmi_data *hdmi) | ||
282 | { | ||
283 | u64 r; | ||
284 | r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_MSB) << 32; | ||
285 | r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_LSB); | ||
286 | return r; | ||
287 | } | ||
288 | |||
289 | /* 40-bit receiver's key selection vector */ | ||
290 | static inline void set_bksv(struct tegra_dc_hdmi_data *hdmi, u64 b_ksv, bool repeater) | ||
291 | { | ||
292 | if (repeater) | ||
293 | b_ksv |= (u64)REPEATER << 32; | ||
294 | tegra_hdmi_writel(hdmi, (u32)b_ksv, HDMI_NV_PDISP_RG_HDCP_BKSV_LSB); | ||
295 | tegra_hdmi_writel(hdmi, b_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_BKSV_MSB); | ||
296 | } | ||
297 | |||
298 | |||
299 | /* 40-bit software's key selection vector */ | ||
300 | static inline void set_cksv(struct tegra_dc_hdmi_data *hdmi, u64 c_ksv) | ||
301 | { | ||
302 | tegra_hdmi_writel(hdmi, (u32)c_ksv, HDMI_NV_PDISP_RG_HDCP_CKSV_LSB); | ||
303 | tegra_hdmi_writel(hdmi, c_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_CKSV_MSB); | ||
304 | } | ||
305 | |||
306 | /* 40-bit connection state */ | ||
307 | static inline u64 get_cs(struct tegra_dc_hdmi_data *hdmi) | ||
308 | { | ||
309 | u64 r; | ||
310 | r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_MSB) << 32; | ||
311 | r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_LSB); | ||
312 | return r; | ||
313 | } | ||
314 | |||
315 | /* 40-bit upstream key selection vector */ | ||
316 | static inline u64 get_dksv(struct tegra_dc_hdmi_data *hdmi) | ||
317 | { | ||
318 | u64 r; | ||
319 | r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_MSB) << 32; | ||
320 | r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_LSB); | ||
321 | return r; | ||
322 | } | ||
323 | |||
324 | /* 64-bit encrypted M0 value */ | ||
325 | static inline u64 get_mprime(struct tegra_dc_hdmi_data *hdmi) | ||
326 | { | ||
327 | u64 r; | ||
328 | r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB) << 32; | ||
329 | r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB); | ||
330 | return r; | ||
331 | } | ||
332 | |||
333 | static inline u16 get_transmitter_ri(struct tegra_dc_hdmi_data *hdmi) | ||
334 | { | ||
335 | return tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_RI); | ||
336 | } | ||
337 | |||
338 | static inline int get_receiver_ri(struct tegra_nvhdcp *nvhdcp, u16 *r) | ||
339 | { | ||
340 | return nvhdcp_i2c_read16(nvhdcp, 0x8, r); /* long read */ | ||
341 | } | ||
342 | |||
343 | static int get_bcaps(struct tegra_nvhdcp *nvhdcp, u8 *b_caps) | ||
344 | { | ||
345 | return nvhdcp_i2c_read8(nvhdcp, 0x40, b_caps); | ||
346 | } | ||
347 | |||
348 | static int get_ksvfifo(struct tegra_nvhdcp *nvhdcp, | ||
349 | unsigned num_bksv_list, u64 *ksv_list) | ||
350 | { | ||
351 | u8 *buf, *p; | ||
352 | int e; | ||
353 | unsigned i; | ||
354 | size_t buf_len = num_bksv_list * 5; | ||
355 | |||
356 | if (!ksv_list || num_bksv_list > TEGRA_NVHDCP_MAX_DEVS) | ||
357 | return -EINVAL; | ||
358 | |||
359 | if (num_bksv_list == 0) | ||
360 | return 0; | ||
361 | |||
362 | buf = kmalloc(buf_len, GFP_KERNEL); | ||
363 | if (IS_ERR_OR_NULL(buf)) | ||
364 | return -ENOMEM; | ||
365 | |||
366 | e = nvhdcp_i2c_read(nvhdcp, 0x43, buf_len, buf); | ||
367 | if (e) { | ||
368 | kfree(buf); | ||
369 | return e; | ||
370 | } | ||
371 | |||
372 | /* load 40-bit keys from repeater into array of u64 */ | ||
373 | p = buf; | ||
374 | for (i = 0; i < num_bksv_list; i++) { | ||
375 | ksv_list[i] = p[0] | ((u64)p[1] << 8) | ((u64)p[2] << 16) | ||
376 | | ((u64)p[3] << 24) | ((u64)p[4] << 32); | ||
377 | p += 5; | ||
378 | } | ||
379 | |||
380 | kfree(buf); | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | /* get V' 160-bit SHA-1 hash from repeater */ | ||
385 | static int get_vprime(struct tegra_nvhdcp *nvhdcp, u8 *v_prime) | ||
386 | { | ||
387 | int e, i; | ||
388 | |||
389 | for (i = 0; i < 20; i += 4) { | ||
390 | e = nvhdcp_i2c_read(nvhdcp, 0x20 + i, 4, v_prime + i); | ||
391 | if (e) | ||
392 | return e; | ||
393 | } | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | |||
398 | /* set or clear RUN_YES */ | ||
399 | static void hdcp_ctrl_run(struct tegra_dc_hdmi_data *hdmi, bool v) | ||
400 | { | ||
401 | u32 ctrl; | ||
402 | |||
403 | if (v) { | ||
404 | ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL); | ||
405 | ctrl |= HDCP_RUN_YES; | ||
406 | } else { | ||
407 | ctrl = 0; | ||
408 | } | ||
409 | |||
410 | tegra_hdmi_writel(hdmi, ctrl, HDMI_NV_PDISP_RG_HDCP_CTRL); | ||
411 | } | ||
412 | |||
413 | /* wait for any bits in mask to be set in HDMI_NV_PDISP_RG_HDCP_CTRL | ||
414 | * sleeps up to 120mS */ | ||
415 | static int wait_hdcp_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask, u32 *v) | ||
416 | { | ||
417 | int retries = 13; | ||
418 | u32 ctrl; | ||
419 | |||
420 | do { | ||
421 | ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL); | ||
422 | if ((ctrl & mask)) { | ||
423 | if (v) | ||
424 | *v = ctrl; | ||
425 | break; | ||
426 | } | ||
427 | if (retries > 1) | ||
428 | msleep(10); | ||
429 | } while (--retries); | ||
430 | if (!retries) { | ||
431 | nvhdcp_err("ctrl read timeout (mask=0x%x)\n", mask); | ||
432 | return -EIO; | ||
433 | } | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /* wait for bits in mask to be set to value in HDMI_NV_PDISP_KEY_CTRL | ||
438 | * waits up to 100mS */ | ||
439 | static int wait_key_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask, u32 value) | ||
440 | { | ||
441 | int retries = 101; | ||
442 | u32 ctrl; | ||
443 | |||
444 | do { | ||
445 | msleep(1); | ||
446 | ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL); | ||
447 | if (((ctrl ^ value) & mask) == 0) | ||
448 | break; | ||
449 | } while (--retries); | ||
450 | if (!retries) { | ||
451 | nvhdcp_err("key ctrl read timeout (mask=0x%x)\n", mask); | ||
452 | return -EIO; | ||
453 | } | ||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | /* check that key selection vector is well formed. | ||
458 | * NOTE: this function assumes KSV has already been checked against | ||
459 | * revocation list. | ||
460 | */ | ||
461 | static int verify_ksv(u64 k) | ||
462 | { | ||
463 | unsigned i; | ||
464 | |||
465 | /* count set bits, must be exactly 20 set to be valid */ | ||
466 | for(i = 0; k; i++) | ||
467 | k ^= k & -k; | ||
468 | |||
469 | return (i != 20) ? -EINVAL : 0; | ||
470 | } | ||
471 | |||
472 | /* get Status and Kprime signature - READ_S on TMDS0_LINK0 only */ | ||
473 | static int get_s_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt) | ||
474 | { | ||
475 | struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi; | ||
476 | u32 sp_msb, sp_lsb1, sp_lsb2; | ||
477 | int e; | ||
478 | |||
479 | /* if connection isn't authenticated ... */ | ||
480 | mutex_lock(&nvhdcp->lock); | ||
481 | if (nvhdcp->state != STATE_LINK_VERIFY) { | ||
482 | memset(pkt, 0, sizeof *pkt); | ||
483 | pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED; | ||
484 | e = 0; | ||
485 | goto err; | ||
486 | } | ||
487 | |||
488 | pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL; | ||
489 | |||
490 | /* we will be taking c_n, c_ksv as input */ | ||
491 | if (!(pkt->value_flags & TEGRA_NVHDCP_FLAG_CN) | ||
492 | || !(pkt->value_flags & TEGRA_NVHDCP_FLAG_CKSV)) { | ||
493 | nvhdcp_err("missing value_flags (0x%x)\n", pkt->value_flags); | ||
494 | e = -EINVAL; | ||
495 | goto err; | ||
496 | } | ||
497 | |||
498 | pkt->value_flags = 0; | ||
499 | |||
500 | pkt->a_ksv = nvhdcp->a_ksv; | ||
501 | pkt->a_n = nvhdcp->a_n; | ||
502 | pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN; | ||
503 | |||
504 | nvhdcp_vdbg("%s():cn %llx cksv %llx\n", __func__, pkt->c_n, pkt->c_ksv); | ||
505 | |||
506 | set_cn(hdmi, pkt->c_n); | ||
507 | |||
508 | tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_S, | ||
509 | HDMI_NV_PDISP_RG_HDCP_CMODE); | ||
510 | |||
511 | set_cksv(hdmi, pkt->c_ksv); | ||
512 | |||
513 | e = wait_hdcp_ctrl(hdmi, SPRIME_VALID, NULL); | ||
514 | if (e) { | ||
515 | nvhdcp_err("Sprime read timeout\n"); | ||
516 | pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL; | ||
517 | e = -EIO; | ||
518 | goto err; | ||
519 | } | ||
520 | |||
521 | msleep(50); | ||
522 | |||
523 | /* read 56-bit Sprime plus 16 status bits */ | ||
524 | sp_msb = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB); | ||
525 | sp_lsb1 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1); | ||
526 | sp_lsb2 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2); | ||
527 | |||
528 | /* top 8 bits of LSB2 and bottom 8 bits of MSB hold status bits. */ | ||
529 | pkt->hdcp_status = ( sp_msb << 8 ) | ( sp_lsb2 >> 24); | ||
530 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_S; | ||
531 | |||
532 | /* 56-bit Kprime */ | ||
533 | pkt->k_prime = ((u64)(sp_lsb2 & 0xffffff) << 32) | sp_lsb1; | ||
534 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_KP; | ||
535 | |||
536 | /* is connection state supported? */ | ||
537 | if (sp_msb & STATUS_CS) { | ||
538 | pkt->cs = get_cs(hdmi); | ||
539 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_CS; | ||
540 | } | ||
541 | |||
542 | /* load Dksv */ | ||
543 | pkt->d_ksv = get_dksv(hdmi); | ||
544 | if (verify_ksv(pkt->d_ksv)) { | ||
545 | nvhdcp_err("Dksv invalid!\n"); | ||
546 | pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL; | ||
547 | e = -EIO; /* treat bad Dksv as I/O error */ | ||
548 | } | ||
549 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV; | ||
550 | |||
551 | /* copy current Bksv */ | ||
552 | pkt->b_ksv = nvhdcp->b_ksv; | ||
553 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV; | ||
554 | |||
555 | pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS; | ||
556 | mutex_unlock(&nvhdcp->lock); | ||
557 | return 0; | ||
558 | |||
559 | err: | ||
560 | mutex_unlock(&nvhdcp->lock); | ||
561 | return e; | ||
562 | } | ||
563 | |||
564 | /* get M prime - READ_M on TMDS0_LINK0 only */ | ||
565 | static inline int get_m_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt) | ||
566 | { | ||
567 | struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi; | ||
568 | int e; | ||
569 | |||
570 | pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL; | ||
571 | |||
572 | /* if connection isn't authenticated ... */ | ||
573 | mutex_lock(&nvhdcp->lock); | ||
574 | if (nvhdcp->state != STATE_LINK_VERIFY) { | ||
575 | memset(pkt, 0, sizeof *pkt); | ||
576 | pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED; | ||
577 | e = 0; | ||
578 | goto err; | ||
579 | } | ||
580 | |||
581 | pkt->a_ksv = nvhdcp->a_ksv; | ||
582 | pkt->a_n = nvhdcp->a_n; | ||
583 | pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN; | ||
584 | |||
585 | set_cn(hdmi, pkt->c_n); | ||
586 | |||
587 | tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_M, | ||
588 | HDMI_NV_PDISP_RG_HDCP_CMODE); | ||
589 | |||
590 | /* Cksv write triggers Mprime update */ | ||
591 | set_cksv(hdmi, pkt->c_ksv); | ||
592 | |||
593 | e = wait_hdcp_ctrl(hdmi, MPRIME_VALID, NULL); | ||
594 | if (e) { | ||
595 | nvhdcp_err("Mprime read timeout\n"); | ||
596 | e = -EIO; | ||
597 | goto err; | ||
598 | } | ||
599 | msleep(50); | ||
600 | |||
601 | /* load Mprime */ | ||
602 | pkt->m_prime = get_mprime(hdmi); | ||
603 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_MP; | ||
604 | |||
605 | pkt->b_status = nvhdcp->b_status; | ||
606 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_BSTATUS; | ||
607 | |||
608 | /* copy most recent KSVFIFO, if it is non-zero */ | ||
609 | pkt->num_bksv_list = nvhdcp->num_bksv_list; | ||
610 | if( nvhdcp->num_bksv_list ) { | ||
611 | BUILD_BUG_ON(sizeof(pkt->bksv_list) != sizeof(nvhdcp->bksv_list)); | ||
612 | memcpy(pkt->bksv_list, nvhdcp->bksv_list, | ||
613 | nvhdcp->num_bksv_list * sizeof(*pkt->bksv_list)); | ||
614 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSVLIST; | ||
615 | } | ||
616 | |||
617 | /* copy v_prime */ | ||
618 | BUILD_BUG_ON(sizeof(pkt->v_prime) != sizeof(nvhdcp->v_prime)); | ||
619 | memcpy(pkt->v_prime, nvhdcp->v_prime, sizeof(nvhdcp->v_prime)); | ||
620 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_V; | ||
621 | |||
622 | /* load Dksv */ | ||
623 | pkt->d_ksv = get_dksv(hdmi); | ||
624 | if (verify_ksv(pkt->d_ksv)) { | ||
625 | nvhdcp_err("Dksv invalid!\n"); | ||
626 | e = -EIO; | ||
627 | goto err; | ||
628 | } | ||
629 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV; | ||
630 | |||
631 | /* copy current Bksv */ | ||
632 | pkt->b_ksv = nvhdcp->b_ksv; | ||
633 | pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV; | ||
634 | |||
635 | pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS; | ||
636 | mutex_unlock(&nvhdcp->lock); | ||
637 | return 0; | ||
638 | |||
639 | err: | ||
640 | mutex_unlock(&nvhdcp->lock); | ||
641 | return e; | ||
642 | } | ||
643 | |||
644 | static int load_kfuse(struct tegra_dc_hdmi_data *hdmi) | ||
645 | { | ||
646 | unsigned buf[KFUSE_DATA_SZ / 4]; | ||
647 | int e, i; | ||
648 | u32 ctrl; | ||
649 | u32 tmp; | ||
650 | int retries; | ||
651 | |||
652 | /* copy load kfuse into buffer - only needed for early Tegra parts */ | ||
653 | e = tegra_kfuse_read(buf, sizeof buf); | ||
654 | if (e) { | ||
655 | nvhdcp_err("Kfuse read failure\n"); | ||
656 | return e; | ||
657 | } | ||
658 | |||
659 | /* write the kfuse to HDMI SRAM */ | ||
660 | |||
661 | tegra_hdmi_writel(hdmi, 1, HDMI_NV_PDISP_KEY_CTRL); /* LOAD_KEYS */ | ||
662 | |||
663 | /* issue a reload */ | ||
664 | ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL); | ||
665 | tegra_hdmi_writel(hdmi, ctrl | PKEY_REQUEST_RELOAD_TRIGGER | ||
666 | | LOCAL_KEYS , HDMI_NV_PDISP_KEY_CTRL); | ||
667 | |||
668 | e = wait_key_ctrl(hdmi, PKEY_LOADED, PKEY_LOADED); | ||
669 | if (e) { | ||
670 | nvhdcp_err("key reload timeout\n"); | ||
671 | return -EIO; | ||
672 | } | ||
673 | |||
674 | tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_KEY_SKEY_INDEX); | ||
675 | |||
676 | /* wait for SRAM to be cleared */ | ||
677 | retries = 6; | ||
678 | do { | ||
679 | tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_DEBUG0); | ||
680 | if ((tmp & 1) == 0) break; | ||
681 | if (retries > 1) | ||
682 | mdelay(1); | ||
683 | } while (--retries); | ||
684 | if (!retries) { | ||
685 | nvhdcp_err("key SRAM clear timeout\n"); | ||
686 | return -EIO; | ||
687 | } | ||
688 | |||
689 | for (i = 0; i < KFUSE_DATA_SZ / 4; i += 4) { | ||
690 | |||
691 | /* load 128-bits*/ | ||
692 | tegra_hdmi_writel(hdmi, buf[i], HDMI_NV_PDISP_KEY_HDCP_KEY_0); | ||
693 | tegra_hdmi_writel(hdmi, buf[i+1], HDMI_NV_PDISP_KEY_HDCP_KEY_1); | ||
694 | tegra_hdmi_writel(hdmi, buf[i+2], HDMI_NV_PDISP_KEY_HDCP_KEY_2); | ||
695 | tegra_hdmi_writel(hdmi, buf[i+3], HDMI_NV_PDISP_KEY_HDCP_KEY_3); | ||
696 | |||
697 | /* trigger LOAD_HDCP_KEY */ | ||
698 | tegra_hdmi_writel(hdmi, 0x100, HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); | ||
699 | |||
700 | tmp = LOCAL_KEYS | WRITE16; | ||
701 | if (i) | ||
702 | tmp |= AUTOINC; | ||
703 | tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_KEY_CTRL); | ||
704 | |||
705 | /* wait for WRITE16 to complete */ | ||
706 | e = wait_key_ctrl(hdmi, 0x10, 0); /* WRITE16 */ | ||
707 | if (e) { | ||
708 | nvhdcp_err("key write timeout\n"); | ||
709 | return -EIO; | ||
710 | } | ||
711 | } | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static int verify_link(struct tegra_nvhdcp *nvhdcp, bool wait_ri) | ||
717 | { | ||
718 | struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi; | ||
719 | int retries = 3; | ||
720 | u16 old, rx, tx; | ||
721 | int e; | ||
722 | |||
723 | old = 0; | ||
724 | rx = 0; | ||
725 | tx = 0; | ||
726 | /* retry 3 times to deal with I2C link issues */ | ||
727 | do { | ||
728 | if (wait_ri) | ||
729 | old = get_transmitter_ri(hdmi); | ||
730 | |||
731 | e = get_receiver_ri(nvhdcp, &rx); | ||
732 | if (!e) { | ||
733 | if (!rx) { | ||
734 | nvhdcp_err("Ri is 0!\n"); | ||
735 | return -EINVAL; | ||
736 | } | ||
737 | |||
738 | tx = get_transmitter_ri(hdmi); | ||
739 | } else { | ||
740 | rx = ~tx; | ||
741 | msleep(50); | ||
742 | } | ||
743 | |||
744 | } while (wait_ri && --retries && old != tx); | ||
745 | |||
746 | nvhdcp_debug("R0 Ri poll:rx=0x%04x tx=0x%04x\n", rx, tx); | ||
747 | |||
748 | if (!nvhdcp_is_plugged(nvhdcp)) { | ||
749 | nvhdcp_err("aborting verify links - lost hdmi connection\n"); | ||
750 | return -EIO; | ||
751 | } | ||
752 | |||
753 | if (rx != tx) | ||
754 | return -EINVAL; | ||
755 | |||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | static int get_repeater_info(struct tegra_nvhdcp *nvhdcp) | ||
760 | { | ||
761 | int e, retries; | ||
762 | u8 b_caps; | ||
763 | u16 b_status; | ||
764 | |||
765 | nvhdcp_vdbg("repeater found:fetching repeater info\n"); | ||
766 | |||
767 | /* wait up to 5 seconds for READY on repeater */ | ||
768 | retries = 51; | ||
769 | do { | ||
770 | if (!nvhdcp_is_plugged(nvhdcp)) { | ||
771 | nvhdcp_err("disconnect while waiting for repeater\n"); | ||
772 | return -EIO; | ||
773 | } | ||
774 | |||
775 | e = get_bcaps(nvhdcp, &b_caps); | ||
776 | if (!e && (b_caps & BCAPS_READY)) { | ||
777 | nvhdcp_debug("Bcaps READY from repeater\n"); | ||
778 | break; | ||
779 | } | ||
780 | if (retries > 1) | ||
781 | msleep(100); | ||
782 | } while (--retries); | ||
783 | if (!retries) { | ||
784 | nvhdcp_err("repeater Bcaps read timeout\n"); | ||
785 | return -ETIMEDOUT; | ||
786 | } | ||
787 | |||
788 | memset(nvhdcp->v_prime, 0, sizeof nvhdcp->v_prime); | ||
789 | e = get_vprime(nvhdcp, nvhdcp->v_prime); | ||
790 | if (e) { | ||
791 | nvhdcp_err("repeater Vprime read failure!\n"); | ||
792 | return e; | ||
793 | } | ||
794 | |||
795 | e = nvhdcp_i2c_read16(nvhdcp, 0x41, &b_status); | ||
796 | if (e) { | ||
797 | nvhdcp_err("Bstatus read failure!\n"); | ||
798 | return e; | ||
799 | } | ||
800 | |||
801 | if (b_status & BSTATUS_MAX_DEVS_EXCEEDED) { | ||
802 | nvhdcp_err("repeater:max devices (0x%04x)\n", b_status); | ||
803 | return -EINVAL; | ||
804 | } | ||
805 | |||
806 | if (b_status & BSTATUS_MAX_CASCADE_EXCEEDED) { | ||
807 | nvhdcp_err("repeater:max cascade (0x%04x)\n", b_status); | ||
808 | return -EINVAL; | ||
809 | } | ||
810 | |||
811 | nvhdcp->b_status = b_status; | ||
812 | nvhdcp->num_bksv_list = b_status & 0x7f; | ||
813 | nvhdcp_vdbg("Bstatus 0x%x (devices: %d)\n", | ||
814 | b_status, nvhdcp->num_bksv_list); | ||
815 | |||
816 | memset(nvhdcp->bksv_list, 0, sizeof nvhdcp->bksv_list); | ||
817 | e = get_ksvfifo(nvhdcp, nvhdcp->num_bksv_list, nvhdcp->bksv_list); | ||
818 | if (e) { | ||
819 | nvhdcp_err("repeater:could not read KSVFIFO (err %d)\n", e); | ||
820 | return e; | ||
821 | } | ||
822 | |||
823 | return 0; | ||
824 | } | ||
825 | |||
826 | static void nvhdcp_downstream_worker(struct work_struct *work) | ||
827 | { | ||
828 | struct tegra_nvhdcp *nvhdcp = | ||
829 | container_of(to_delayed_work(work), struct tegra_nvhdcp, work); | ||
830 | struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi; | ||
831 | int e; | ||
832 | u8 b_caps; | ||
833 | u32 tmp; | ||
834 | u32 res; | ||
835 | |||
836 | nvhdcp_vdbg("%s():started thread %s\n", __func__, nvhdcp->name); | ||
837 | |||
838 | mutex_lock(&nvhdcp->lock); | ||
839 | if (nvhdcp->state == STATE_OFF) { | ||
840 | nvhdcp_err("nvhdcp failure - giving up\n"); | ||
841 | goto err; | ||
842 | } | ||
843 | nvhdcp->state = STATE_UNAUTHENTICATED; | ||
844 | |||
845 | /* check plug state to terminate early in case flush_workqueue() */ | ||
846 | if (!nvhdcp_is_plugged(nvhdcp)) { | ||
847 | nvhdcp_err("worker started while unplugged!\n"); | ||
848 | goto lost_hdmi; | ||
849 | } | ||
850 | nvhdcp_vdbg("%s():hpd=%d\n", __func__, nvhdcp->plugged); | ||
851 | |||
852 | nvhdcp->a_ksv = 0; | ||
853 | nvhdcp->b_ksv = 0; | ||
854 | nvhdcp->a_n = 0; | ||
855 | |||
856 | e = get_bcaps(nvhdcp, &b_caps); | ||
857 | if (e) { | ||
858 | nvhdcp_err("Bcaps read failure\n"); | ||
859 | goto failure; | ||
860 | } | ||
861 | |||
862 | nvhdcp_vdbg("read Bcaps = 0x%02x\n", b_caps); | ||
863 | |||
864 | nvhdcp_vdbg("kfuse loading ...\n"); | ||
865 | |||
866 | /* repeater flag in Bskv must be configured before loading fuses */ | ||
867 | set_bksv(hdmi, 0, (b_caps & BCAPS_REPEATER)); | ||
868 | |||
869 | e = load_kfuse(hdmi); | ||
870 | if (e) { | ||
871 | nvhdcp_err("kfuse could not be loaded\n"); | ||
872 | goto failure; | ||
873 | } | ||
874 | |||
875 | hdcp_ctrl_run(hdmi, 1); | ||
876 | |||
877 | nvhdcp_vdbg("wait AN_VALID ...\n"); | ||
878 | |||
879 | /* wait for hardware to generate HDCP values */ | ||
880 | e = wait_hdcp_ctrl(hdmi, AN_VALID | SROM_ERR, &res); | ||
881 | if (e) { | ||
882 | nvhdcp_err("An key generation timeout\n"); | ||
883 | goto failure; | ||
884 | } | ||
885 | if (res & SROM_ERR) { | ||
886 | nvhdcp_err("SROM error\n"); | ||
887 | goto failure; | ||
888 | } | ||
889 | |||
890 | msleep(25); | ||
891 | |||
892 | nvhdcp->a_ksv = get_aksv(hdmi); | ||
893 | nvhdcp->a_n = get_an(hdmi); | ||
894 | nvhdcp_vdbg("Aksv is 0x%016llx\n", nvhdcp->a_ksv); | ||
895 | nvhdcp_vdbg("An is 0x%016llx\n", nvhdcp->a_n); | ||
896 | if (verify_ksv(nvhdcp->a_ksv)) { | ||
897 | nvhdcp_err("Aksv verify failure! (0x%016llx)\n", nvhdcp->a_ksv); | ||
898 | goto disable; | ||
899 | } | ||
900 | |||
901 | /* write Ainfo to receiver - set 1.1 only if b_caps supports it */ | ||
902 | e = nvhdcp_i2c_write8(nvhdcp, 0x15, b_caps & BCAPS_11); | ||
903 | if (e) { | ||
904 | nvhdcp_err("Ainfo write failure\n"); | ||
905 | goto failure; | ||
906 | } | ||
907 | |||
908 | /* write An to receiver */ | ||
909 | e = nvhdcp_i2c_write64(nvhdcp, 0x18, nvhdcp->a_n); | ||
910 | if (e) { | ||
911 | nvhdcp_err("An write failure\n"); | ||
912 | goto failure; | ||
913 | } | ||
914 | |||
915 | nvhdcp_vdbg("wrote An = 0x%016llx\n", nvhdcp->a_n); | ||
916 | |||
917 | /* write Aksv to receiver - triggers auth sequence */ | ||
918 | e = nvhdcp_i2c_write40(nvhdcp, 0x10, nvhdcp->a_ksv); | ||
919 | if (e) { | ||
920 | nvhdcp_err("Aksv write failure\n"); | ||
921 | goto failure; | ||
922 | } | ||
923 | |||
924 | nvhdcp_vdbg("wrote Aksv = 0x%010llx\n", nvhdcp->a_ksv); | ||
925 | |||
926 | /* bail out if unplugged in the middle of negotiation */ | ||
927 | if (!nvhdcp_is_plugged(nvhdcp)) | ||
928 | goto lost_hdmi; | ||
929 | |||
930 | /* get Bksv from receiver */ | ||
931 | e = nvhdcp_i2c_read40(nvhdcp, 0x00, &nvhdcp->b_ksv); | ||
932 | if (e) { | ||
933 | nvhdcp_err("Bksv read failure\n"); | ||
934 | goto failure; | ||
935 | } | ||
936 | nvhdcp_vdbg("Bksv is 0x%016llx\n", nvhdcp->b_ksv); | ||
937 | if (verify_ksv(nvhdcp->b_ksv)) { | ||
938 | nvhdcp_err("Bksv verify failure!\n"); | ||
939 | goto failure; | ||
940 | } | ||
941 | |||
942 | nvhdcp_vdbg("read Bksv = 0x%010llx from device\n", nvhdcp->b_ksv); | ||
943 | |||
944 | set_bksv(hdmi, nvhdcp->b_ksv, (b_caps & BCAPS_REPEATER)); | ||
945 | |||
946 | nvhdcp_vdbg("loaded Bksv into controller\n"); | ||
947 | |||
948 | e = wait_hdcp_ctrl(hdmi, R0_VALID, NULL); | ||
949 | if (e) { | ||
950 | nvhdcp_err("R0 read failure!\n"); | ||
951 | goto failure; | ||
952 | } | ||
953 | |||
954 | nvhdcp_vdbg("R0 valid\n"); | ||
955 | |||
956 | msleep(100); /* can't read R0' within 100ms of writing Aksv */ | ||
957 | |||
958 | nvhdcp_vdbg("verifying links ...\n"); | ||
959 | |||
960 | e = verify_link(nvhdcp, false); | ||
961 | if (e) { | ||
962 | nvhdcp_err("link verification failed err %d\n", e); | ||
963 | goto failure; | ||
964 | } | ||
965 | |||
966 | /* if repeater then get repeater info */ | ||
967 | if (b_caps & BCAPS_REPEATER) { | ||
968 | e = get_repeater_info(nvhdcp); | ||
969 | if (e) { | ||
970 | nvhdcp_err("get repeater info failed\n"); | ||
971 | goto failure; | ||
972 | } | ||
973 | } | ||
974 | |||
975 | tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL); | ||
976 | tmp |= CRYPT_ENABLED; | ||
977 | if (b_caps & BCAPS_11) /* HDCP 1.1 ? */ | ||
978 | tmp |= ONEONE_ENABLED; | ||
979 | tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_RG_HDCP_CTRL); | ||
980 | |||
981 | nvhdcp_vdbg("CRYPT enabled\n"); | ||
982 | |||
983 | nvhdcp->state = STATE_LINK_VERIFY; | ||
984 | nvhdcp_info("link verified!\n"); | ||
985 | |||
986 | while (1) { | ||
987 | if (!nvhdcp_is_plugged(nvhdcp)) | ||
988 | goto lost_hdmi; | ||
989 | |||
990 | if (nvhdcp->state != STATE_LINK_VERIFY) | ||
991 | goto failure; | ||
992 | |||
993 | e = verify_link(nvhdcp, true); | ||
994 | if (e) { | ||
995 | nvhdcp_err("link verification failed err %d\n", e); | ||
996 | goto failure; | ||
997 | } | ||
998 | mutex_unlock(&nvhdcp->lock); | ||
999 | wait_event_interruptible_timeout(wq_worker, | ||
1000 | !nvhdcp_is_plugged(nvhdcp), msecs_to_jiffies(1500)); | ||
1001 | mutex_lock(&nvhdcp->lock); | ||
1002 | |||
1003 | } | ||
1004 | |||
1005 | failure: | ||
1006 | nvhdcp->fail_count++; | ||
1007 | if(nvhdcp->fail_count > 5) { | ||
1008 | nvhdcp_err("nvhdcp failure - too many failures, giving up!\n"); | ||
1009 | } else { | ||
1010 | nvhdcp_err("nvhdcp failure - renegotiating in 1 second\n"); | ||
1011 | if (!nvhdcp_is_plugged(nvhdcp)) | ||
1012 | goto lost_hdmi; | ||
1013 | queue_delayed_work(nvhdcp->downstream_wq, &nvhdcp->work, | ||
1014 | msecs_to_jiffies(1000)); | ||
1015 | } | ||
1016 | |||
1017 | lost_hdmi: | ||
1018 | nvhdcp->state = STATE_UNAUTHENTICATED; | ||
1019 | hdcp_ctrl_run(hdmi, 0); | ||
1020 | |||
1021 | err: | ||
1022 | mutex_unlock(&nvhdcp->lock); | ||
1023 | return; | ||
1024 | disable: | ||
1025 | nvhdcp->state = STATE_OFF; | ||
1026 | nvhdcp_set_plugged(nvhdcp, false); | ||
1027 | mutex_unlock(&nvhdcp->lock); | ||
1028 | return; | ||
1029 | } | ||
1030 | |||
1031 | static int tegra_nvhdcp_on(struct tegra_nvhdcp *nvhdcp) | ||
1032 | { | ||
1033 | nvhdcp->state = STATE_UNAUTHENTICATED; | ||
1034 | if (nvhdcp_is_plugged(nvhdcp)) { | ||
1035 | nvhdcp->fail_count = 0; | ||
1036 | queue_delayed_work(nvhdcp->downstream_wq, &nvhdcp->work, | ||
1037 | msecs_to_jiffies(100)); | ||
1038 | } | ||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | static int tegra_nvhdcp_off(struct tegra_nvhdcp *nvhdcp) | ||
1043 | { | ||
1044 | mutex_lock(&nvhdcp->lock); | ||
1045 | nvhdcp->state = STATE_OFF; | ||
1046 | nvhdcp_set_plugged(nvhdcp, false); | ||
1047 | mutex_unlock(&nvhdcp->lock); | ||
1048 | wake_up_interruptible(&wq_worker); | ||
1049 | flush_workqueue(nvhdcp->downstream_wq); | ||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd) | ||
1054 | { | ||
1055 | nvhdcp_debug("hdmi hotplug detected (hpd = %d)\n", hpd); | ||
1056 | |||
1057 | if (hpd) { | ||
1058 | nvhdcp_set_plugged(nvhdcp, true); | ||
1059 | tegra_nvhdcp_on(nvhdcp); | ||
1060 | } else { | ||
1061 | tegra_nvhdcp_off(nvhdcp); | ||
1062 | } | ||
1063 | } | ||
1064 | |||
1065 | int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol) | ||
1066 | { | ||
1067 | if (pol == TEGRA_NVHDCP_POLICY_ALWAYS_ON) { | ||
1068 | nvhdcp_info("using \"always on\" policy.\n"); | ||
1069 | if (atomic_xchg(&nvhdcp->policy, pol) != pol) { | ||
1070 | /* policy changed, start working */ | ||
1071 | tegra_nvhdcp_on(nvhdcp); | ||
1072 | } | ||
1073 | } else { | ||
1074 | /* unsupported policy */ | ||
1075 | return -EINVAL; | ||
1076 | } | ||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | static int tegra_nvhdcp_renegotiate(struct tegra_nvhdcp *nvhdcp) | ||
1082 | { | ||
1083 | mutex_lock(&nvhdcp->lock); | ||
1084 | nvhdcp->state = STATE_RENEGOTIATE; | ||
1085 | mutex_unlock(&nvhdcp->lock); | ||
1086 | tegra_nvhdcp_on(nvhdcp); | ||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp) | ||
1091 | { | ||
1092 | if (!nvhdcp) return; | ||
1093 | tegra_nvhdcp_off(nvhdcp); | ||
1094 | } | ||
1095 | |||
1096 | void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp) | ||
1097 | { | ||
1098 | if (!nvhdcp) return; | ||
1099 | tegra_nvhdcp_renegotiate(nvhdcp); | ||
1100 | } | ||
1101 | |||
1102 | static long nvhdcp_dev_ioctl(struct file *filp, | ||
1103 | unsigned int cmd, unsigned long arg) | ||
1104 | { | ||
1105 | struct tegra_nvhdcp *nvhdcp = filp->private_data; | ||
1106 | struct tegra_nvhdcp_packet *pkt; | ||
1107 | int e = -ENOTTY; | ||
1108 | |||
1109 | switch (cmd) { | ||
1110 | case TEGRAIO_NVHDCP_ON: | ||
1111 | return tegra_nvhdcp_on(nvhdcp); | ||
1112 | |||
1113 | case TEGRAIO_NVHDCP_OFF: | ||
1114 | return tegra_nvhdcp_off(nvhdcp); | ||
1115 | |||
1116 | case TEGRAIO_NVHDCP_SET_POLICY: | ||
1117 | return tegra_nvhdcp_set_policy(nvhdcp, arg); | ||
1118 | |||
1119 | case TEGRAIO_NVHDCP_READ_M: | ||
1120 | pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); | ||
1121 | if (!pkt) | ||
1122 | return -ENOMEM; | ||
1123 | if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) { | ||
1124 | e = -EFAULT; | ||
1125 | goto kfree_pkt; | ||
1126 | } | ||
1127 | e = get_m_prime(nvhdcp, pkt); | ||
1128 | if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) { | ||
1129 | e = -EFAULT; | ||
1130 | goto kfree_pkt; | ||
1131 | } | ||
1132 | kfree(pkt); | ||
1133 | return e; | ||
1134 | |||
1135 | case TEGRAIO_NVHDCP_READ_S: | ||
1136 | pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); | ||
1137 | if (!pkt) | ||
1138 | return -ENOMEM; | ||
1139 | if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) { | ||
1140 | e = -EFAULT; | ||
1141 | goto kfree_pkt; | ||
1142 | } | ||
1143 | e = get_s_prime(nvhdcp, pkt); | ||
1144 | if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) { | ||
1145 | e = -EFAULT; | ||
1146 | goto kfree_pkt; | ||
1147 | } | ||
1148 | kfree(pkt); | ||
1149 | return e; | ||
1150 | |||
1151 | case TEGRAIO_NVHDCP_RENEGOTIATE: | ||
1152 | e = tegra_nvhdcp_renegotiate(nvhdcp); | ||
1153 | break; | ||
1154 | } | ||
1155 | |||
1156 | return e; | ||
1157 | kfree_pkt: | ||
1158 | kfree(pkt); | ||
1159 | return e; | ||
1160 | } | ||
1161 | |||
1162 | static int nvhdcp_dev_open(struct inode *inode, struct file *filp) | ||
1163 | { | ||
1164 | struct miscdevice *miscdev = filp->private_data; | ||
1165 | struct tegra_nvhdcp *nvhdcp = | ||
1166 | container_of(miscdev, struct tegra_nvhdcp, miscdev); | ||
1167 | filp->private_data = nvhdcp; | ||
1168 | return 0; | ||
1169 | } | ||
1170 | |||
1171 | static int nvhdcp_dev_release(struct inode *inode, struct file *filp) | ||
1172 | { | ||
1173 | filp->private_data = NULL; | ||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1177 | static const struct file_operations nvhdcp_fops = { | ||
1178 | .owner = THIS_MODULE, | ||
1179 | .llseek = no_llseek, | ||
1180 | .unlocked_ioctl = nvhdcp_dev_ioctl, | ||
1181 | .open = nvhdcp_dev_open, | ||
1182 | .release = nvhdcp_dev_release, | ||
1183 | }; | ||
1184 | |||
1185 | /* we only support one AP right now, so should only call this once. */ | ||
1186 | struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi, | ||
1187 | int id, int bus) | ||
1188 | { | ||
1189 | static struct tegra_nvhdcp *nvhdcp; /* prevent multiple calls */ | ||
1190 | struct i2c_adapter *adapter; | ||
1191 | int e; | ||
1192 | |||
1193 | if (nvhdcp) | ||
1194 | return ERR_PTR(-EMFILE); | ||
1195 | |||
1196 | nvhdcp = kzalloc(sizeof(*nvhdcp), GFP_KERNEL); | ||
1197 | if (!nvhdcp) | ||
1198 | return ERR_PTR(-ENOMEM); | ||
1199 | |||
1200 | nvhdcp->id = id; | ||
1201 | snprintf(nvhdcp->name, sizeof(nvhdcp->name), "nvhdcp%u", id); | ||
1202 | nvhdcp->hdmi = hdmi; | ||
1203 | mutex_init(&nvhdcp->lock); | ||
1204 | |||
1205 | strlcpy(nvhdcp->info.type, nvhdcp->name, sizeof(nvhdcp->info.type)); | ||
1206 | nvhdcp->bus = bus; | ||
1207 | nvhdcp->info.addr = 0x74 >> 1; | ||
1208 | nvhdcp->info.platform_data = nvhdcp; | ||
1209 | nvhdcp->fail_count = 0; | ||
1210 | |||
1211 | adapter = i2c_get_adapter(bus); | ||
1212 | if (!adapter) { | ||
1213 | nvhdcp_err("can't get adapter for bus %d\n", bus); | ||
1214 | e = -EBUSY; | ||
1215 | goto free_nvhdcp; | ||
1216 | } | ||
1217 | |||
1218 | nvhdcp->client = i2c_new_device(adapter, &nvhdcp->info); | ||
1219 | i2c_put_adapter(adapter); | ||
1220 | |||
1221 | if (!nvhdcp->client) { | ||
1222 | nvhdcp_err("can't create new device\n"); | ||
1223 | e = -EBUSY; | ||
1224 | goto free_nvhdcp; | ||
1225 | } | ||
1226 | |||
1227 | nvhdcp->state = STATE_UNAUTHENTICATED; | ||
1228 | |||
1229 | nvhdcp->downstream_wq = create_singlethread_workqueue(nvhdcp->name); | ||
1230 | INIT_DELAYED_WORK(&nvhdcp->work, nvhdcp_downstream_worker); | ||
1231 | |||
1232 | nvhdcp->miscdev.minor = MISC_DYNAMIC_MINOR; | ||
1233 | nvhdcp->miscdev.name = nvhdcp->name; | ||
1234 | nvhdcp->miscdev.fops = &nvhdcp_fops; | ||
1235 | |||
1236 | e = misc_register(&nvhdcp->miscdev); | ||
1237 | if (e) | ||
1238 | goto free_workqueue; | ||
1239 | |||
1240 | nvhdcp_vdbg("%s(): created misc device %s\n", __func__, nvhdcp->name); | ||
1241 | |||
1242 | return nvhdcp; | ||
1243 | free_workqueue: | ||
1244 | destroy_workqueue(nvhdcp->downstream_wq); | ||
1245 | i2c_release_client(nvhdcp->client); | ||
1246 | free_nvhdcp: | ||
1247 | kfree(nvhdcp); | ||
1248 | nvhdcp_err("unable to create device.\n"); | ||
1249 | return ERR_PTR(e); | ||
1250 | } | ||
1251 | |||
1252 | void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp) | ||
1253 | { | ||
1254 | misc_deregister(&nvhdcp->miscdev); | ||
1255 | tegra_nvhdcp_off(nvhdcp); | ||
1256 | destroy_workqueue(nvhdcp->downstream_wq); | ||
1257 | i2c_release_client(nvhdcp->client); | ||
1258 | kfree(nvhdcp); | ||
1259 | } | ||
diff --git a/drivers/video/tegra/dc/nvhdcp.h b/drivers/video/tegra/dc/nvhdcp.h new file mode 100644 index 00000000000..90ea0be36d1 --- /dev/null +++ b/drivers/video/tegra/dc/nvhdcp.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/nvhdcp.h | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H | ||
18 | #define __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H | ||
19 | #include <video/nvhdcp.h> | ||
20 | |||
21 | struct tegra_nvhdcp; | ||
22 | #ifdef CONFIG_TEGRA_NVHDCP | ||
23 | void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd); | ||
24 | int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol); | ||
25 | void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp); | ||
26 | void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp); | ||
27 | struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi, | ||
28 | int id, int bus); | ||
29 | void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp); | ||
30 | #else | ||
31 | inline void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd) { } | ||
32 | inline int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol) | ||
33 | { | ||
34 | return 0; | ||
35 | } | ||
36 | inline void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp) { } | ||
37 | inline void tegra_nvhdcp_resume(struct tegra_nvhdcp *nvhdcp) { } | ||
38 | inline struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi, | ||
39 | int id, int bus) | ||
40 | { | ||
41 | return NULL; | ||
42 | } | ||
43 | inline void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp) { } | ||
44 | #endif | ||
45 | |||
46 | #endif | ||
diff --git a/drivers/video/tegra/dc/nvsd.c b/drivers/video/tegra/dc/nvsd.c new file mode 100644 index 00000000000..a2f3ece6ae7 --- /dev/null +++ b/drivers/video/tegra/dc/nvsd.c | |||
@@ -0,0 +1,906 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/nvsd.c | ||
3 | * | ||
4 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <mach/dc.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/backlight.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | |||
25 | #include "dc_reg.h" | ||
26 | #include "dc_priv.h" | ||
27 | #include "nvsd.h" | ||
28 | |||
29 | /* Elements for sysfs access */ | ||
30 | #define NVSD_ATTR(__name) static struct kobj_attribute nvsd_attr_##__name = \ | ||
31 | __ATTR(__name, S_IRUGO|S_IWUSR, nvsd_settings_show, nvsd_settings_store) | ||
32 | #define NVSD_ATTRS_ENTRY(__name) (&nvsd_attr_##__name.attr) | ||
33 | #define IS_NVSD_ATTR(__name) (attr == &nvsd_attr_##__name) | ||
34 | |||
35 | static ssize_t nvsd_settings_show(struct kobject *kobj, | ||
36 | struct kobj_attribute *attr, char *buf); | ||
37 | |||
38 | static ssize_t nvsd_settings_store(struct kobject *kobj, | ||
39 | struct kobj_attribute *attr, const char *buf, size_t count); | ||
40 | |||
41 | static ssize_t nvsd_registers_show(struct kobject *kobj, | ||
42 | struct kobj_attribute *attr, char *buf); | ||
43 | |||
44 | NVSD_ATTR(enable); | ||
45 | NVSD_ATTR(aggressiveness); | ||
46 | NVSD_ATTR(phase_in_settings); | ||
47 | NVSD_ATTR(phase_in_adjustments); | ||
48 | NVSD_ATTR(bin_width); | ||
49 | NVSD_ATTR(hw_update_delay); | ||
50 | NVSD_ATTR(use_vid_luma); | ||
51 | NVSD_ATTR(coeff); | ||
52 | NVSD_ATTR(blp_time_constant); | ||
53 | NVSD_ATTR(blp_step); | ||
54 | NVSD_ATTR(fc_time_limit); | ||
55 | NVSD_ATTR(fc_threshold); | ||
56 | NVSD_ATTR(lut); | ||
57 | NVSD_ATTR(bltf); | ||
58 | static struct kobj_attribute nvsd_attr_registers = | ||
59 | __ATTR(registers, S_IRUGO, nvsd_registers_show, NULL); | ||
60 | |||
61 | static struct attribute *nvsd_attrs[] = { | ||
62 | NVSD_ATTRS_ENTRY(enable), | ||
63 | NVSD_ATTRS_ENTRY(aggressiveness), | ||
64 | NVSD_ATTRS_ENTRY(phase_in_settings), | ||
65 | NVSD_ATTRS_ENTRY(phase_in_adjustments), | ||
66 | NVSD_ATTRS_ENTRY(bin_width), | ||
67 | NVSD_ATTRS_ENTRY(hw_update_delay), | ||
68 | NVSD_ATTRS_ENTRY(use_vid_luma), | ||
69 | NVSD_ATTRS_ENTRY(coeff), | ||
70 | NVSD_ATTRS_ENTRY(blp_time_constant), | ||
71 | NVSD_ATTRS_ENTRY(blp_step), | ||
72 | NVSD_ATTRS_ENTRY(fc_time_limit), | ||
73 | NVSD_ATTRS_ENTRY(fc_threshold), | ||
74 | NVSD_ATTRS_ENTRY(lut), | ||
75 | NVSD_ATTRS_ENTRY(bltf), | ||
76 | NVSD_ATTRS_ENTRY(registers), | ||
77 | NULL, | ||
78 | }; | ||
79 | |||
80 | static struct attribute_group nvsd_attr_group = { | ||
81 | .attrs = nvsd_attrs, | ||
82 | }; | ||
83 | |||
84 | static struct kobject *nvsd_kobj; | ||
85 | |||
86 | /* shared brightness variable */ | ||
87 | static atomic_t *sd_brightness = NULL; | ||
88 | /* shared boolean for manual K workaround */ | ||
89 | static atomic_t man_k_until_blank = ATOMIC_INIT(0); | ||
90 | |||
91 | static u8 nvsd_get_bw_idx(struct tegra_dc_sd_settings *settings) | ||
92 | { | ||
93 | u8 bw; | ||
94 | |||
95 | switch (settings->bin_width) { | ||
96 | default: | ||
97 | case -1: | ||
98 | /* A -1 bin-width indicates 'automatic' | ||
99 | based upon aggressiveness. */ | ||
100 | settings->bin_width = -1; | ||
101 | switch (settings->aggressiveness) { | ||
102 | default: | ||
103 | case 0: | ||
104 | case 1: | ||
105 | bw = SD_BIN_WIDTH_ONE; | ||
106 | break; | ||
107 | case 2: | ||
108 | case 3: | ||
109 | case 4: | ||
110 | bw = SD_BIN_WIDTH_TWO; | ||
111 | break; | ||
112 | case 5: | ||
113 | bw = SD_BIN_WIDTH_FOUR; | ||
114 | break; | ||
115 | } | ||
116 | break; | ||
117 | case 1: | ||
118 | bw = SD_BIN_WIDTH_ONE; | ||
119 | break; | ||
120 | case 2: | ||
121 | bw = SD_BIN_WIDTH_TWO; | ||
122 | break; | ||
123 | case 4: | ||
124 | bw = SD_BIN_WIDTH_FOUR; | ||
125 | break; | ||
126 | case 8: | ||
127 | bw = SD_BIN_WIDTH_EIGHT; | ||
128 | break; | ||
129 | } | ||
130 | return bw >> 3; | ||
131 | |||
132 | } | ||
133 | |||
134 | static bool nvsd_phase_in_adjustments(struct tegra_dc *dc, | ||
135 | struct tegra_dc_sd_settings *settings) | ||
136 | { | ||
137 | u8 step, cur_sd_brightness; | ||
138 | u16 target_k, cur_k; | ||
139 | u32 man_k, val; | ||
140 | |||
141 | cur_sd_brightness = atomic_read(sd_brightness); | ||
142 | |||
143 | target_k = tegra_dc_readl(dc, DC_DISP_SD_HW_K_VALUES); | ||
144 | target_k = SD_HW_K_R(target_k); | ||
145 | cur_k = tegra_dc_readl(dc, DC_DISP_SD_MAN_K_VALUES); | ||
146 | cur_k = SD_HW_K_R(cur_k); | ||
147 | |||
148 | /* read brightness value */ | ||
149 | val = tegra_dc_readl(dc, DC_DISP_SD_BL_CONTROL); | ||
150 | val = SD_BLC_BRIGHTNESS(val); | ||
151 | |||
152 | step = settings->phase_adj_step; | ||
153 | if (cur_sd_brightness != val || target_k != cur_k) { | ||
154 | if (!step) | ||
155 | step = ADJ_PHASE_STEP; | ||
156 | |||
157 | /* Phase in Backlight and Pixel K | ||
158 | every ADJ_PHASE_STEP frames*/ | ||
159 | if ((step-- & ADJ_PHASE_STEP) == ADJ_PHASE_STEP) { | ||
160 | |||
161 | if (val != cur_sd_brightness) | ||
162 | val > cur_sd_brightness ? | ||
163 | (cur_sd_brightness++) : | ||
164 | (cur_sd_brightness--); | ||
165 | |||
166 | if (target_k != cur_k) { | ||
167 | if (target_k > cur_k) | ||
168 | cur_k += K_STEP; | ||
169 | else | ||
170 | cur_k -= K_STEP; | ||
171 | } | ||
172 | |||
173 | /* Set manual k value */ | ||
174 | man_k = SD_MAN_K_R(cur_k) | | ||
175 | SD_MAN_K_G(cur_k) | SD_MAN_K_B(cur_k); | ||
176 | tegra_dc_writel(dc, man_k, DC_DISP_SD_MAN_K_VALUES); | ||
177 | /* Set manual brightness value */ | ||
178 | atomic_set(sd_brightness, cur_sd_brightness); | ||
179 | } | ||
180 | settings->phase_adj_step = step; | ||
181 | return true; | ||
182 | } else | ||
183 | return false; | ||
184 | } | ||
185 | |||
186 | /* phase in the luts based on the current and max step */ | ||
187 | static void nvsd_phase_in_luts(struct tegra_dc_sd_settings *settings, | ||
188 | struct tegra_dc *dc) | ||
189 | { | ||
190 | u32 val; | ||
191 | u8 bw_idx; | ||
192 | int i; | ||
193 | u16 phase_settings_step = settings->phase_settings_step; | ||
194 | u16 num_phase_in_steps = settings->num_phase_in_steps; | ||
195 | |||
196 | bw_idx = nvsd_get_bw_idx(settings); | ||
197 | |||
198 | /* Phase in Final LUT */ | ||
199 | for (i = 0; i < DC_DISP_SD_LUT_NUM; i++) { | ||
200 | val = SD_LUT_R((settings->lut[bw_idx][i].r * | ||
201 | phase_settings_step)/num_phase_in_steps) | | ||
202 | SD_LUT_G((settings->lut[bw_idx][i].g * | ||
203 | phase_settings_step)/num_phase_in_steps) | | ||
204 | SD_LUT_B((settings->lut[bw_idx][i].b * | ||
205 | phase_settings_step)/num_phase_in_steps); | ||
206 | |||
207 | tegra_dc_writel(dc, val, DC_DISP_SD_LUT(i)); | ||
208 | } | ||
209 | /* Phase in Final BLTF */ | ||
210 | for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) { | ||
211 | val = SD_BL_TF_POINT_0(255-((255-settings->bltf[bw_idx][i][0]) | ||
212 | * phase_settings_step)/num_phase_in_steps) | | ||
213 | SD_BL_TF_POINT_1(255-((255-settings->bltf[bw_idx][i][1]) | ||
214 | * phase_settings_step)/num_phase_in_steps) | | ||
215 | SD_BL_TF_POINT_2(255-((255-settings->bltf[bw_idx][i][2]) | ||
216 | * phase_settings_step)/num_phase_in_steps) | | ||
217 | SD_BL_TF_POINT_3(255-((255-settings->bltf[bw_idx][i][3]) | ||
218 | * phase_settings_step)/num_phase_in_steps); | ||
219 | |||
220 | tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i)); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | /* handle the commands that may be invoked for phase_in_settings */ | ||
225 | static void nvsd_cmd_handler(struct tegra_dc_sd_settings *settings, | ||
226 | struct tegra_dc *dc) | ||
227 | { | ||
228 | u32 val; | ||
229 | u8 bw_idx, bw; | ||
230 | |||
231 | if (settings->cmd & ENABLE) { | ||
232 | settings->phase_settings_step++; | ||
233 | if (settings->phase_settings_step >= | ||
234 | settings->num_phase_in_steps) | ||
235 | settings->cmd &= ~ENABLE; | ||
236 | |||
237 | nvsd_phase_in_luts(settings, dc); | ||
238 | } | ||
239 | if (settings->cmd & DISABLE) { | ||
240 | settings->phase_settings_step--; | ||
241 | nvsd_phase_in_luts(settings, dc); | ||
242 | if (settings->phase_settings_step == 0) { | ||
243 | /* finish up aggressiveness phase in */ | ||
244 | if (settings->cmd & AGG_CHG) | ||
245 | settings->aggressiveness = settings->final_agg; | ||
246 | settings->cmd = NO_CMD; | ||
247 | settings->enable = 0; | ||
248 | nvsd_init(dc, settings); | ||
249 | } | ||
250 | } | ||
251 | if (settings->cmd & AGG_CHG) { | ||
252 | if (settings->aggressiveness == settings->final_agg) | ||
253 | settings->cmd &= ~AGG_CHG; | ||
254 | if ((settings->cur_agg_step++ & (STEPS_PER_AGG_CHG - 1)) == 0) { | ||
255 | settings->final_agg > settings->aggressiveness ? | ||
256 | settings->aggressiveness++ : | ||
257 | settings->aggressiveness--; | ||
258 | |||
259 | /* Update aggressiveness value in HW */ | ||
260 | val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL); | ||
261 | val &= ~SD_AGGRESSIVENESS(0x7); | ||
262 | val |= SD_AGGRESSIVENESS(settings->aggressiveness); | ||
263 | |||
264 | /* Adjust bin_width for automatic setting */ | ||
265 | if (settings->bin_width == -1) { | ||
266 | bw_idx = nvsd_get_bw_idx(settings); | ||
267 | |||
268 | bw = bw_idx << 3; | ||
269 | |||
270 | val &= ~SD_BIN_WIDTH_MASK; | ||
271 | val |= bw; | ||
272 | } | ||
273 | tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL); | ||
274 | |||
275 | nvsd_phase_in_luts(settings, dc); | ||
276 | } | ||
277 | } | ||
278 | } | ||
279 | |||
280 | static bool nvsd_update_enable(struct tegra_dc_sd_settings *settings, | ||
281 | int enable_val) | ||
282 | { | ||
283 | |||
284 | if (enable_val != 1 && enable_val != 0) | ||
285 | return false; | ||
286 | |||
287 | if (!settings->cmd && settings->enable != enable_val) { | ||
288 | settings->num_phase_in_steps = | ||
289 | STEPS_PER_AGG_LVL*settings->aggressiveness; | ||
290 | settings->phase_settings_step = enable_val ? | ||
291 | 0 : settings->num_phase_in_steps; | ||
292 | } | ||
293 | |||
294 | if (settings->enable != enable_val || settings->cmd & DISABLE) { | ||
295 | settings->cmd &= ~(ENABLE | DISABLE); | ||
296 | if (!settings->enable && enable_val) | ||
297 | settings->cmd |= PHASE_IN; | ||
298 | settings->cmd |= enable_val ? ENABLE : DISABLE; | ||
299 | return true; | ||
300 | } | ||
301 | |||
302 | return false; | ||
303 | } | ||
304 | |||
305 | static bool nvsd_update_agg(struct tegra_dc_sd_settings *settings, int agg_val) | ||
306 | { | ||
307 | int i; | ||
308 | int pri_lvl = SD_AGG_PRI_LVL(agg_val); | ||
309 | int agg_lvl = SD_GET_AGG(agg_val); | ||
310 | struct tegra_dc_sd_agg_priorities *sd_agg_priorities = | ||
311 | &settings->agg_priorities; | ||
312 | |||
313 | if (agg_lvl > 5 || agg_lvl < 0) | ||
314 | return false; | ||
315 | else if (agg_lvl == 0 && pri_lvl == 0) | ||
316 | return false; | ||
317 | |||
318 | if (pri_lvl >= 0 && pri_lvl < 4) | ||
319 | sd_agg_priorities->agg[pri_lvl] = agg_lvl; | ||
320 | |||
321 | for (i = NUM_AGG_PRI_LVLS - 1; i >= 0; i--) { | ||
322 | if (sd_agg_priorities->agg[i]) | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | sd_agg_priorities->pri_lvl = i; | ||
327 | pri_lvl = i; | ||
328 | agg_lvl = sd_agg_priorities->agg[i]; | ||
329 | |||
330 | if (settings->phase_in_settings && settings->enable && | ||
331 | settings->aggressiveness != agg_lvl) { | ||
332 | |||
333 | settings->final_agg = agg_lvl; | ||
334 | settings->cmd |= AGG_CHG; | ||
335 | settings->cur_agg_step = 0; | ||
336 | return true; | ||
337 | } else if (settings->aggressiveness != agg_lvl) { | ||
338 | settings->aggressiveness = agg_lvl; | ||
339 | return true; | ||
340 | } | ||
341 | |||
342 | return false; | ||
343 | } | ||
344 | |||
345 | /* Functional initialization */ | ||
346 | void nvsd_init(struct tegra_dc *dc, struct tegra_dc_sd_settings *settings) | ||
347 | { | ||
348 | u32 i = 0; | ||
349 | u32 val = 0; | ||
350 | u32 bw_idx = 0; | ||
351 | /* TODO: check if HW says SD's available */ | ||
352 | |||
353 | /* If SD's not present or disabled, clear the register and return. */ | ||
354 | if (!settings || settings->enable == 0) { | ||
355 | /* clear the brightness val, too. */ | ||
356 | if (sd_brightness) | ||
357 | atomic_set(sd_brightness, 255); | ||
358 | |||
359 | sd_brightness = NULL; | ||
360 | |||
361 | if (settings) | ||
362 | settings->phase_settings_step = 0; | ||
363 | tegra_dc_writel(dc, 0, DC_DISP_SD_CONTROL); | ||
364 | return; | ||
365 | } | ||
366 | |||
367 | dev_dbg(&dc->ndev->dev, "NVSD Init:\n"); | ||
368 | |||
369 | /* init agg_priorities */ | ||
370 | if (!settings->agg_priorities.agg[0]) | ||
371 | settings->agg_priorities.agg[0] = settings->aggressiveness; | ||
372 | |||
373 | /* WAR: Settings will not be valid until the next flip. | ||
374 | * Thus, set manual K to either HW's current value (if | ||
375 | * we're already enabled) or a non-effective value (if | ||
376 | * we're about to enable). */ | ||
377 | val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL); | ||
378 | |||
379 | if (val & SD_ENABLE_NORMAL) | ||
380 | i = tegra_dc_readl(dc, DC_DISP_SD_HW_K_VALUES); | ||
381 | else | ||
382 | i = 0; /* 0 values for RGB = 1.0, i.e. non-affected */ | ||
383 | |||
384 | tegra_dc_writel(dc, i, DC_DISP_SD_MAN_K_VALUES); | ||
385 | /* Enable manual correction mode here so that changing the | ||
386 | * settings won't immediately impact display dehavior. */ | ||
387 | val |= SD_CORRECTION_MODE_MAN; | ||
388 | tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL); | ||
389 | |||
390 | bw_idx = nvsd_get_bw_idx(settings); | ||
391 | |||
392 | /* Write LUT */ | ||
393 | if (!settings->cmd) { | ||
394 | dev_dbg(&dc->ndev->dev, " LUT:\n"); | ||
395 | |||
396 | for (i = 0; i < DC_DISP_SD_LUT_NUM; i++) { | ||
397 | val = SD_LUT_R(settings->lut[bw_idx][i].r) | | ||
398 | SD_LUT_G(settings->lut[bw_idx][i].g) | | ||
399 | SD_LUT_B(settings->lut[bw_idx][i].b); | ||
400 | tegra_dc_writel(dc, val, DC_DISP_SD_LUT(i)); | ||
401 | |||
402 | dev_dbg(&dc->ndev->dev, " %d: 0x%08x\n", i, val); | ||
403 | } | ||
404 | } | ||
405 | |||
406 | /* Write BL TF */ | ||
407 | if (!settings->cmd) { | ||
408 | dev_dbg(&dc->ndev->dev, " BL_TF:\n"); | ||
409 | |||
410 | for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) { | ||
411 | val = SD_BL_TF_POINT_0(settings->bltf[bw_idx][i][0]) | | ||
412 | SD_BL_TF_POINT_1(settings->bltf[bw_idx][i][1]) | | ||
413 | SD_BL_TF_POINT_2(settings->bltf[bw_idx][i][2]) | | ||
414 | SD_BL_TF_POINT_3(settings->bltf[bw_idx][i][3]); | ||
415 | |||
416 | tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i)); | ||
417 | |||
418 | dev_dbg(&dc->ndev->dev, " %d: 0x%08x\n", i, val); | ||
419 | } | ||
420 | } else if ((settings->cmd & PHASE_IN)) { | ||
421 | settings->cmd &= ~PHASE_IN; | ||
422 | /* Write NO_OP values for BLTF */ | ||
423 | for (i = 0; i < DC_DISP_SD_BL_TF_NUM; i++) { | ||
424 | val = SD_BL_TF_POINT_0(0xFF) | | ||
425 | SD_BL_TF_POINT_1(0xFF) | | ||
426 | SD_BL_TF_POINT_2(0xFF) | | ||
427 | SD_BL_TF_POINT_3(0xFF); | ||
428 | |||
429 | tegra_dc_writel(dc, val, DC_DISP_SD_BL_TF(i)); | ||
430 | |||
431 | dev_dbg(&dc->ndev->dev, " %d: 0x%08x\n", i, val); | ||
432 | } | ||
433 | } | ||
434 | |||
435 | /* Set step correctly on init */ | ||
436 | if (!settings->cmd && settings->phase_in_settings) { | ||
437 | settings->num_phase_in_steps = STEPS_PER_AGG_LVL * | ||
438 | settings->aggressiveness; | ||
439 | settings->phase_settings_step = settings->enable ? | ||
440 | settings->num_phase_in_steps : 0; | ||
441 | } | ||
442 | |||
443 | /* Write Coeff */ | ||
444 | val = SD_CSC_COEFF_R(settings->coeff.r) | | ||
445 | SD_CSC_COEFF_G(settings->coeff.g) | | ||
446 | SD_CSC_COEFF_B(settings->coeff.b); | ||
447 | tegra_dc_writel(dc, val, DC_DISP_SD_CSC_COEFF); | ||
448 | dev_dbg(&dc->ndev->dev, " COEFF: 0x%08x\n", val); | ||
449 | |||
450 | /* Write BL Params */ | ||
451 | val = SD_BLP_TIME_CONSTANT(settings->blp.time_constant) | | ||
452 | SD_BLP_STEP(settings->blp.step); | ||
453 | tegra_dc_writel(dc, val, DC_DISP_SD_BL_PARAMETERS); | ||
454 | dev_dbg(&dc->ndev->dev, " BLP: 0x%08x\n", val); | ||
455 | |||
456 | /* Write Auto/Manual PWM */ | ||
457 | val = (settings->use_auto_pwm) ? SD_BLC_MODE_AUTO : SD_BLC_MODE_MAN; | ||
458 | tegra_dc_writel(dc, val, DC_DISP_SD_BL_CONTROL); | ||
459 | dev_dbg(&dc->ndev->dev, " BL_CONTROL: 0x%08x\n", val); | ||
460 | |||
461 | /* Write Flicker Control */ | ||
462 | val = SD_FC_TIME_LIMIT(settings->fc.time_limit) | | ||
463 | SD_FC_THRESHOLD(settings->fc.threshold); | ||
464 | tegra_dc_writel(dc, val, DC_DISP_SD_FLICKER_CONTROL); | ||
465 | dev_dbg(&dc->ndev->dev, " FLICKER_CONTROL: 0x%08x\n", val); | ||
466 | |||
467 | /* Manage SD Control */ | ||
468 | val = 0; | ||
469 | /* Stay in manual correction mode until the next flip. */ | ||
470 | val |= SD_CORRECTION_MODE_MAN; | ||
471 | /* Enable / One-Shot */ | ||
472 | val |= (settings->enable == 2) ? | ||
473 | (SD_ENABLE_ONESHOT | SD_ONESHOT_ENABLE) : | ||
474 | SD_ENABLE_NORMAL; | ||
475 | /* HW Update Delay */ | ||
476 | val |= SD_HW_UPDATE_DLY(settings->hw_update_delay); | ||
477 | /* Video Luma */ | ||
478 | val |= (settings->use_vid_luma) ? SD_USE_VID_LUMA : 0; | ||
479 | /* Aggressiveness */ | ||
480 | val |= SD_AGGRESSIVENESS(settings->aggressiveness); | ||
481 | /* Bin Width (value derived from bw_idx) */ | ||
482 | val |= bw_idx << 3; | ||
483 | /* Finally, Write SD Control */ | ||
484 | tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL); | ||
485 | dev_dbg(&dc->ndev->dev, " SD_CONTROL: 0x%08x\n", val); | ||
486 | |||
487 | /* set the brightness pointer */ | ||
488 | sd_brightness = settings->sd_brightness; | ||
489 | |||
490 | /* note that we're in manual K until the next flip */ | ||
491 | atomic_set(&man_k_until_blank, 1); | ||
492 | } | ||
493 | |||
494 | /* Periodic update */ | ||
495 | bool nvsd_update_brightness(struct tegra_dc *dc) | ||
496 | { | ||
497 | u32 val = 0; | ||
498 | int cur_sd_brightness; | ||
499 | struct tegra_dc_sd_settings *settings = dc->out->sd_settings; | ||
500 | |||
501 | if (sd_brightness) { | ||
502 | if (atomic_read(&man_k_until_blank) && | ||
503 | !settings->phase_in_adjustments) { | ||
504 | val = tegra_dc_readl(dc, DC_DISP_SD_CONTROL); | ||
505 | val &= ~SD_CORRECTION_MODE_MAN; | ||
506 | tegra_dc_writel(dc, val, DC_DISP_SD_CONTROL); | ||
507 | atomic_set(&man_k_until_blank, 0); | ||
508 | } | ||
509 | |||
510 | if (settings->cmd) | ||
511 | nvsd_cmd_handler(settings, dc); | ||
512 | |||
513 | /* nvsd_cmd_handler may turn off didim */ | ||
514 | if (!settings->enable) | ||
515 | return true; | ||
516 | |||
517 | cur_sd_brightness = atomic_read(sd_brightness); | ||
518 | |||
519 | /* read brightness value */ | ||
520 | val = tegra_dc_readl(dc, DC_DISP_SD_BL_CONTROL); | ||
521 | val = SD_BLC_BRIGHTNESS(val); | ||
522 | |||
523 | if (settings->phase_in_adjustments) { | ||
524 | return nvsd_phase_in_adjustments(dc, settings); | ||
525 | } else if (val != (u32)cur_sd_brightness) { | ||
526 | /* set brightness value and note the update */ | ||
527 | atomic_set(sd_brightness, (int)val); | ||
528 | return true; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | /* No update needed. */ | ||
533 | return false; | ||
534 | } | ||
535 | |||
536 | static ssize_t nvsd_lut_show(struct tegra_dc_sd_settings *sd_settings, | ||
537 | char *buf, ssize_t res) | ||
538 | { | ||
539 | u32 i; | ||
540 | u32 j; | ||
541 | |||
542 | for (i = 0; i < NUM_BIN_WIDTHS; i++) { | ||
543 | res += snprintf(buf + res, PAGE_SIZE - res, | ||
544 | "Bin Width: %d\n", 1 << i); | ||
545 | |||
546 | for (j = 0; j < DC_DISP_SD_LUT_NUM; j++) { | ||
547 | res += snprintf(buf + res, | ||
548 | PAGE_SIZE - res, | ||
549 | "%d: R: %3d / G: %3d / B: %3d\n", | ||
550 | j, | ||
551 | sd_settings->lut[i][j].r, | ||
552 | sd_settings->lut[i][j].g, | ||
553 | sd_settings->lut[i][j].b); | ||
554 | } | ||
555 | } | ||
556 | return res; | ||
557 | } | ||
558 | |||
559 | static ssize_t nvsd_bltf_show(struct tegra_dc_sd_settings *sd_settings, | ||
560 | char *buf, ssize_t res) | ||
561 | { | ||
562 | u32 i; | ||
563 | u32 j; | ||
564 | |||
565 | for (i = 0; i < NUM_BIN_WIDTHS; i++) { | ||
566 | res += snprintf(buf + res, PAGE_SIZE - res, | ||
567 | "Bin Width: %d\n", 1 << i); | ||
568 | |||
569 | for (j = 0; j < DC_DISP_SD_BL_TF_NUM; j++) { | ||
570 | res += snprintf(buf + res, | ||
571 | PAGE_SIZE - res, | ||
572 | "%d: 0: %3d / 1: %3d / 2: %3d / 3: %3d\n", | ||
573 | j, | ||
574 | sd_settings->bltf[i][j][0], | ||
575 | sd_settings->bltf[i][j][1], | ||
576 | sd_settings->bltf[i][j][2], | ||
577 | sd_settings->bltf[i][j][3]); | ||
578 | } | ||
579 | } | ||
580 | return res; | ||
581 | } | ||
582 | |||
583 | /* Sysfs accessors */ | ||
584 | static ssize_t nvsd_settings_show(struct kobject *kobj, | ||
585 | struct kobj_attribute *attr, char *buf) | ||
586 | { | ||
587 | struct device *dev = container_of((kobj->parent), struct device, kobj); | ||
588 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
589 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
590 | struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings; | ||
591 | ssize_t res = 0; | ||
592 | |||
593 | if (sd_settings) { | ||
594 | if (IS_NVSD_ATTR(enable)) | ||
595 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
596 | sd_settings->enable); | ||
597 | else if (IS_NVSD_ATTR(aggressiveness)) | ||
598 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
599 | sd_settings->aggressiveness); | ||
600 | else if (IS_NVSD_ATTR(phase_in_settings)) | ||
601 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
602 | sd_settings->phase_in_settings); | ||
603 | else if (IS_NVSD_ATTR(phase_in_adjustments)) | ||
604 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
605 | sd_settings->phase_in_adjustments); | ||
606 | else if (IS_NVSD_ATTR(bin_width)) | ||
607 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
608 | sd_settings->bin_width); | ||
609 | else if (IS_NVSD_ATTR(hw_update_delay)) | ||
610 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
611 | sd_settings->hw_update_delay); | ||
612 | else if (IS_NVSD_ATTR(use_vid_luma)) | ||
613 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
614 | sd_settings->use_vid_luma); | ||
615 | else if (IS_NVSD_ATTR(coeff)) | ||
616 | res = snprintf(buf, PAGE_SIZE, | ||
617 | "R: %d / G: %d / B: %d\n", | ||
618 | sd_settings->coeff.r, | ||
619 | sd_settings->coeff.g, | ||
620 | sd_settings->coeff.b); | ||
621 | else if (IS_NVSD_ATTR(blp_time_constant)) | ||
622 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
623 | sd_settings->blp.time_constant); | ||
624 | else if (IS_NVSD_ATTR(blp_step)) | ||
625 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
626 | sd_settings->blp.step); | ||
627 | else if (IS_NVSD_ATTR(fc_time_limit)) | ||
628 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
629 | sd_settings->fc.time_limit); | ||
630 | else if (IS_NVSD_ATTR(fc_threshold)) | ||
631 | res = snprintf(buf, PAGE_SIZE, "%d\n", | ||
632 | sd_settings->fc.threshold); | ||
633 | else if (IS_NVSD_ATTR(lut)) | ||
634 | res = nvsd_lut_show(sd_settings, buf, res); | ||
635 | else if (IS_NVSD_ATTR(bltf)) | ||
636 | res = nvsd_bltf_show(sd_settings, buf, res); | ||
637 | else | ||
638 | res = -EINVAL; | ||
639 | } else { | ||
640 | /* This shouldn't be reachable. But just in case... */ | ||
641 | res = -EINVAL; | ||
642 | } | ||
643 | |||
644 | return res; | ||
645 | } | ||
646 | |||
647 | #define nvsd_check_and_update(_min, _max, _varname) { \ | ||
648 | int val = simple_strtol(buf, NULL, 10); \ | ||
649 | if (val >= _min && val <= _max) { \ | ||
650 | sd_settings->_varname = val; \ | ||
651 | settings_updated = true; \ | ||
652 | } } | ||
653 | |||
654 | #define nvsd_get_multi(_ele, _num, _act, _min, _max) { \ | ||
655 | char *b, *c, *orig_b; \ | ||
656 | b = orig_b = kstrdup(buf, GFP_KERNEL); \ | ||
657 | for (_act = 0; _act < _num; _act++) { \ | ||
658 | if (!b) \ | ||
659 | break; \ | ||
660 | b = strim(b); \ | ||
661 | c = strsep(&b, " "); \ | ||
662 | if (!strlen(c)) \ | ||
663 | break; \ | ||
664 | _ele[_act] = simple_strtol(c, NULL, 10); \ | ||
665 | if (_ele[_act] < _min || _ele[_act] > _max) \ | ||
666 | break; \ | ||
667 | } \ | ||
668 | if (orig_b) \ | ||
669 | kfree(orig_b); \ | ||
670 | } | ||
671 | |||
672 | static int nvsd_lut_store(struct tegra_dc_sd_settings *sd_settings, | ||
673 | const char *buf) | ||
674 | { | ||
675 | int ele[3 * DC_DISP_SD_LUT_NUM * NUM_BIN_WIDTHS]; | ||
676 | int i = 0; | ||
677 | int j = 0; | ||
678 | int num = 3 * DC_DISP_SD_LUT_NUM * NUM_BIN_WIDTHS; | ||
679 | |||
680 | nvsd_get_multi(ele, num, i, 0, 255); | ||
681 | |||
682 | if (i != num) | ||
683 | return -EINVAL; | ||
684 | |||
685 | for (i = 0; i < NUM_BIN_WIDTHS; i++) { | ||
686 | for (j = 0; j < DC_DISP_SD_LUT_NUM; j++) { | ||
687 | sd_settings->lut[i][j].r = | ||
688 | ele[i * NUM_BIN_WIDTHS + j * 3 + 0]; | ||
689 | sd_settings->lut[i][j].g = | ||
690 | ele[i * NUM_BIN_WIDTHS + j * 3 + 1]; | ||
691 | sd_settings->lut[i][j].b = | ||
692 | ele[i * NUM_BIN_WIDTHS + j * 3 + 2]; | ||
693 | } | ||
694 | } | ||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | static int nvsd_bltf_store(struct tegra_dc_sd_settings *sd_settings, | ||
699 | const char *buf) | ||
700 | { | ||
701 | int ele[4 * DC_DISP_SD_BL_TF_NUM * NUM_BIN_WIDTHS]; | ||
702 | int i = 0, j = 0, num = ARRAY_SIZE(ele); | ||
703 | |||
704 | nvsd_get_multi(ele, num, i, 0, 255); | ||
705 | |||
706 | if (i != num) | ||
707 | return -EINVAL; | ||
708 | |||
709 | for (i = 0; i < NUM_BIN_WIDTHS; i++) { | ||
710 | for (j = 0; j < DC_DISP_SD_BL_TF_NUM; j++) { | ||
711 | size_t base = (i * NUM_BIN_WIDTHS * | ||
712 | DC_DISP_SD_BL_TF_NUM) + (j * 4); | ||
713 | sd_settings->bltf[i][j][0] = ele[base + 0]; | ||
714 | sd_settings->bltf[i][j][1] = ele[base + 1]; | ||
715 | sd_settings->bltf[i][j][2] = ele[base + 2]; | ||
716 | sd_settings->bltf[i][j][3] = ele[base + 3]; | ||
717 | } | ||
718 | } | ||
719 | |||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | static ssize_t nvsd_settings_store(struct kobject *kobj, | ||
724 | struct kobj_attribute *attr, const char *buf, size_t count) | ||
725 | { | ||
726 | struct device *dev = container_of((kobj->parent), struct device, kobj); | ||
727 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
728 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
729 | struct tegra_dc_sd_settings *sd_settings = dc->out->sd_settings; | ||
730 | ssize_t res = count; | ||
731 | bool settings_updated = false; | ||
732 | long int result; | ||
733 | int err; | ||
734 | |||
735 | if (sd_settings) { | ||
736 | if (IS_NVSD_ATTR(enable)) { | ||
737 | if (sd_settings->phase_in_settings) { | ||
738 | err = strict_strtol(buf, 10, &result); | ||
739 | if (err) | ||
740 | return err; | ||
741 | |||
742 | if (nvsd_update_enable(sd_settings, result)) | ||
743 | nvsd_check_and_update(1, 1, enable); | ||
744 | |||
745 | } else { | ||
746 | nvsd_check_and_update(0, 1, enable); | ||
747 | } | ||
748 | } else if (IS_NVSD_ATTR(aggressiveness)) { | ||
749 | err = strict_strtol(buf, 10, &result); | ||
750 | if (err) | ||
751 | return err; | ||
752 | |||
753 | if (nvsd_update_agg(sd_settings, result) | ||
754 | && !sd_settings->phase_in_settings) | ||
755 | settings_updated = true; | ||
756 | |||
757 | } else if (IS_NVSD_ATTR(phase_in_settings)) { | ||
758 | nvsd_check_and_update(0, 1, phase_in_settings); | ||
759 | } else if (IS_NVSD_ATTR(phase_in_adjustments)) { | ||
760 | nvsd_check_and_update(0, 1, phase_in_adjustments); | ||
761 | } else if (IS_NVSD_ATTR(bin_width)) { | ||
762 | nvsd_check_and_update(0, 8, bin_width); | ||
763 | } else if (IS_NVSD_ATTR(hw_update_delay)) { | ||
764 | nvsd_check_and_update(0, 2, hw_update_delay); | ||
765 | } else if (IS_NVSD_ATTR(use_vid_luma)) { | ||
766 | nvsd_check_and_update(0, 1, use_vid_luma); | ||
767 | } else if (IS_NVSD_ATTR(coeff)) { | ||
768 | int ele[3], i = 0, num = 3; | ||
769 | nvsd_get_multi(ele, num, i, 0, 15); | ||
770 | |||
771 | if (i == num) { | ||
772 | sd_settings->coeff.r = ele[0]; | ||
773 | sd_settings->coeff.g = ele[1]; | ||
774 | sd_settings->coeff.b = ele[2]; | ||
775 | settings_updated = true; | ||
776 | } else { | ||
777 | res = -EINVAL; | ||
778 | } | ||
779 | } else if (IS_NVSD_ATTR(blp_time_constant)) { | ||
780 | nvsd_check_and_update(0, 1024, blp.time_constant); | ||
781 | } else if (IS_NVSD_ATTR(blp_step)) { | ||
782 | nvsd_check_and_update(0, 255, blp.step); | ||
783 | } else if (IS_NVSD_ATTR(fc_time_limit)) { | ||
784 | nvsd_check_and_update(0, 255, fc.time_limit); | ||
785 | } else if (IS_NVSD_ATTR(fc_threshold)) { | ||
786 | nvsd_check_and_update(0, 255, fc.threshold); | ||
787 | } else if (IS_NVSD_ATTR(lut)) { | ||
788 | if (nvsd_lut_store(sd_settings, buf)) | ||
789 | res = -EINVAL; | ||
790 | else | ||
791 | settings_updated = true; | ||
792 | } else if (IS_NVSD_ATTR(bltf)) { | ||
793 | if (nvsd_bltf_store(sd_settings, buf)) | ||
794 | res = -EINVAL; | ||
795 | else | ||
796 | settings_updated = true; | ||
797 | } else { | ||
798 | res = -EINVAL; | ||
799 | } | ||
800 | |||
801 | /* Re-init if our settings were updated. */ | ||
802 | if (settings_updated) { | ||
803 | mutex_lock(&dc->lock); | ||
804 | if (!dc->enabled) { | ||
805 | mutex_unlock(&dc->lock); | ||
806 | return -ENODEV; | ||
807 | } | ||
808 | mutex_unlock(&dc->lock); | ||
809 | |||
810 | nvsd_init(dc, sd_settings); | ||
811 | |||
812 | /* Update backlight state IFF we're disabling! */ | ||
813 | if (!sd_settings->enable && sd_settings->bl_device) { | ||
814 | /* Do the actual brightness update outside of | ||
815 | * the mutex */ | ||
816 | struct platform_device *pdev = | ||
817 | sd_settings->bl_device; | ||
818 | struct backlight_device *bl = | ||
819 | platform_get_drvdata(pdev); | ||
820 | |||
821 | if (bl) | ||
822 | backlight_update_status(bl); | ||
823 | } | ||
824 | } | ||
825 | } else { | ||
826 | /* This shouldn't be reachable. But just in case... */ | ||
827 | res = -EINVAL; | ||
828 | } | ||
829 | |||
830 | return res; | ||
831 | } | ||
832 | |||
833 | #define NVSD_PRINT_REG(__name) { \ | ||
834 | u32 val = tegra_dc_readl(dc, __name); \ | ||
835 | res += snprintf(buf + res, PAGE_SIZE - res, #__name ": 0x%08x\n", \ | ||
836 | val); \ | ||
837 | } | ||
838 | |||
839 | #define NVSD_PRINT_REG_ARRAY(__name) { \ | ||
840 | u32 val = 0, i = 0; \ | ||
841 | res += snprintf(buf + res, PAGE_SIZE - res, #__name ":\n"); \ | ||
842 | for (i = 0; i < __name##_NUM; i++) { \ | ||
843 | val = tegra_dc_readl(dc, __name(i)); \ | ||
844 | res += snprintf(buf + res, PAGE_SIZE - res, " %d: 0x%08x\n", \ | ||
845 | i, val); \ | ||
846 | } \ | ||
847 | } | ||
848 | |||
849 | static ssize_t nvsd_registers_show(struct kobject *kobj, | ||
850 | struct kobj_attribute *attr, char *buf) | ||
851 | { | ||
852 | struct device *dev = container_of((kobj->parent), struct device, kobj); | ||
853 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
854 | struct tegra_dc *dc = nvhost_get_drvdata(ndev); | ||
855 | ssize_t res = 0; | ||
856 | |||
857 | mutex_lock(&dc->lock); | ||
858 | if (!dc->enabled) { | ||
859 | mutex_unlock(&dc->lock); | ||
860 | return -ENODEV; | ||
861 | } | ||
862 | |||
863 | mutex_unlock(&dc->lock); | ||
864 | NVSD_PRINT_REG(DC_DISP_SD_CONTROL); | ||
865 | NVSD_PRINT_REG(DC_DISP_SD_CSC_COEFF); | ||
866 | NVSD_PRINT_REG_ARRAY(DC_DISP_SD_LUT); | ||
867 | NVSD_PRINT_REG(DC_DISP_SD_FLICKER_CONTROL); | ||
868 | NVSD_PRINT_REG(DC_DISP_SD_PIXEL_COUNT); | ||
869 | NVSD_PRINT_REG_ARRAY(DC_DISP_SD_HISTOGRAM); | ||
870 | NVSD_PRINT_REG(DC_DISP_SD_BL_PARAMETERS); | ||
871 | NVSD_PRINT_REG_ARRAY(DC_DISP_SD_BL_TF); | ||
872 | NVSD_PRINT_REG(DC_DISP_SD_BL_CONTROL); | ||
873 | NVSD_PRINT_REG(DC_DISP_SD_HW_K_VALUES); | ||
874 | NVSD_PRINT_REG(DC_DISP_SD_MAN_K_VALUES); | ||
875 | |||
876 | return res; | ||
877 | } | ||
878 | |||
879 | /* Sysfs initializer */ | ||
880 | int nvsd_create_sysfs(struct device *dev) | ||
881 | { | ||
882 | int retval = 0; | ||
883 | |||
884 | nvsd_kobj = kobject_create_and_add("smartdimmer", &dev->kobj); | ||
885 | |||
886 | if (!nvsd_kobj) | ||
887 | return -ENOMEM; | ||
888 | |||
889 | retval = sysfs_create_group(nvsd_kobj, &nvsd_attr_group); | ||
890 | |||
891 | if (retval) { | ||
892 | kobject_put(nvsd_kobj); | ||
893 | dev_err(dev, "%s: failed to create attributes\n", __func__); | ||
894 | } | ||
895 | |||
896 | return retval; | ||
897 | } | ||
898 | |||
899 | /* Sysfs destructor */ | ||
900 | void __devexit nvsd_remove_sysfs(struct device *dev) | ||
901 | { | ||
902 | if (nvsd_kobj) { | ||
903 | sysfs_remove_group(nvsd_kobj, &nvsd_attr_group); | ||
904 | kobject_put(nvsd_kobj); | ||
905 | } | ||
906 | } | ||
diff --git a/drivers/video/tegra/dc/nvsd.h b/drivers/video/tegra/dc/nvsd.h new file mode 100644 index 00000000000..f7fc4a1ead6 --- /dev/null +++ b/drivers/video/tegra/dc/nvsd.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/nvsd.h | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __DRIVERS_VIDEO_TEGRA_DC_NVSD_H | ||
18 | #define __DRIVERS_VIDEO_TEGRA_DC_NVSD_H | ||
19 | |||
20 | void nvsd_init(struct tegra_dc *dc, struct tegra_dc_sd_settings *settings); | ||
21 | bool nvsd_update_brightness(struct tegra_dc *dc); | ||
22 | int nvsd_create_sysfs(struct device *dev); | ||
23 | void __devexit nvsd_remove_sysfs(struct device *dev); | ||
24 | |||
25 | #endif | ||
diff --git a/drivers/video/tegra/dc/rgb.c b/drivers/video/tegra/dc/rgb.c new file mode 100644 index 00000000000..2112643058f --- /dev/null +++ b/drivers/video/tegra/dc/rgb.c | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/dc/rgb.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | |||
20 | #include <mach/dc.h> | ||
21 | |||
22 | #include "dc_reg.h" | ||
23 | #include "dc_priv.h" | ||
24 | |||
25 | |||
26 | static const u32 tegra_dc_rgb_enable_partial_pintable[] = { | ||
27 | DC_COM_PIN_OUTPUT_ENABLE0, 0x00000000, | ||
28 | DC_COM_PIN_OUTPUT_ENABLE1, 0x00000000, | ||
29 | DC_COM_PIN_OUTPUT_ENABLE2, 0x00000000, | ||
30 | DC_COM_PIN_OUTPUT_ENABLE3, 0x00000000, | ||
31 | DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000, | ||
32 | DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000, | ||
33 | DC_COM_PIN_OUTPUT_DATA0, 0x00000000, | ||
34 | DC_COM_PIN_OUTPUT_DATA1, 0x00000000, | ||
35 | DC_COM_PIN_OUTPUT_DATA2, 0x00000000, | ||
36 | DC_COM_PIN_OUTPUT_DATA3, 0x00000000, | ||
37 | }; | ||
38 | |||
39 | static const u32 tegra_dc_rgb_enable_pintable[] = { | ||
40 | DC_COM_PIN_OUTPUT_ENABLE0, 0x00000000, | ||
41 | DC_COM_PIN_OUTPUT_ENABLE1, 0x00000000, | ||
42 | DC_COM_PIN_OUTPUT_ENABLE2, 0x00000000, | ||
43 | DC_COM_PIN_OUTPUT_ENABLE3, 0x00000000, | ||
44 | DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000, | ||
45 | DC_COM_PIN_OUTPUT_POLARITY1, 0x01000000, | ||
46 | DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000, | ||
47 | DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000, | ||
48 | DC_COM_PIN_OUTPUT_DATA0, 0x00000000, | ||
49 | DC_COM_PIN_OUTPUT_DATA1, 0x00000000, | ||
50 | DC_COM_PIN_OUTPUT_DATA2, 0x00000000, | ||
51 | DC_COM_PIN_OUTPUT_DATA3, 0x00000000, | ||
52 | }; | ||
53 | |||
54 | static const u32 tegra_dc_rgb_enable_out_sel_pintable[] = { | ||
55 | DC_COM_PIN_OUTPUT_SELECT0, 0x00000000, | ||
56 | DC_COM_PIN_OUTPUT_SELECT1, 0x00000000, | ||
57 | DC_COM_PIN_OUTPUT_SELECT2, 0x00000000, | ||
58 | #ifdef CONFIG_TEGRA_SILICON_PLATFORM | ||
59 | DC_COM_PIN_OUTPUT_SELECT3, 0x00000000, | ||
60 | #else | ||
61 | /* The display panel sub-board used on FPGA platforms (panel 86) | ||
62 | is non-standard. It expects the Data Enable signal on the WR | ||
63 | pin instead of the DE pin. */ | ||
64 | DC_COM_PIN_OUTPUT_SELECT3, 0x00200000, | ||
65 | #endif | ||
66 | DC_COM_PIN_OUTPUT_SELECT4, 0x00210222, | ||
67 | DC_COM_PIN_OUTPUT_SELECT5, 0x00002200, | ||
68 | DC_COM_PIN_OUTPUT_SELECT6, 0x00020000, | ||
69 | }; | ||
70 | |||
71 | static const u32 tegra_dc_rgb_disable_pintable[] = { | ||
72 | DC_COM_PIN_OUTPUT_ENABLE0, 0x55555555, | ||
73 | DC_COM_PIN_OUTPUT_ENABLE1, 0x55150005, | ||
74 | DC_COM_PIN_OUTPUT_ENABLE2, 0x55555555, | ||
75 | DC_COM_PIN_OUTPUT_ENABLE3, 0x55555555, | ||
76 | DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000, | ||
77 | DC_COM_PIN_OUTPUT_POLARITY1, 0x00000000, | ||
78 | DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000, | ||
79 | DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000, | ||
80 | DC_COM_PIN_OUTPUT_DATA0, 0xaaaaaaaa, | ||
81 | DC_COM_PIN_OUTPUT_DATA1, 0xaaaaaaaa, | ||
82 | DC_COM_PIN_OUTPUT_DATA2, 0xaaaaaaaa, | ||
83 | DC_COM_PIN_OUTPUT_DATA3, 0xaaaaaaaa, | ||
84 | DC_COM_PIN_OUTPUT_SELECT0, 0x00000000, | ||
85 | DC_COM_PIN_OUTPUT_SELECT1, 0x00000000, | ||
86 | DC_COM_PIN_OUTPUT_SELECT2, 0x00000000, | ||
87 | DC_COM_PIN_OUTPUT_SELECT3, 0x00000000, | ||
88 | DC_COM_PIN_OUTPUT_SELECT4, 0x00000000, | ||
89 | DC_COM_PIN_OUTPUT_SELECT5, 0x00000000, | ||
90 | DC_COM_PIN_OUTPUT_SELECT6, 0x00000000, | ||
91 | }; | ||
92 | |||
93 | void tegra_dc_rgb_enable(struct tegra_dc *dc) | ||
94 | { | ||
95 | int i; | ||
96 | u32 out_sel_pintable[ARRAY_SIZE(tegra_dc_rgb_enable_out_sel_pintable)]; | ||
97 | |||
98 | tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
99 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE, | ||
100 | DC_CMD_DISPLAY_POWER_CONTROL); | ||
101 | |||
102 | tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND); | ||
103 | |||
104 | if (dc->out->out_pins) { | ||
105 | tegra_dc_set_out_pin_polars(dc, dc->out->out_pins, | ||
106 | dc->out->n_out_pins); | ||
107 | tegra_dc_write_table(dc, tegra_dc_rgb_enable_partial_pintable); | ||
108 | } else { | ||
109 | tegra_dc_write_table(dc, tegra_dc_rgb_enable_pintable); | ||
110 | } | ||
111 | |||
112 | memcpy(out_sel_pintable, tegra_dc_rgb_enable_out_sel_pintable, | ||
113 | sizeof(tegra_dc_rgb_enable_out_sel_pintable)); | ||
114 | |||
115 | if (dc->out && dc->out->out_sel_configs) { | ||
116 | u8 *out_sels = dc->out->out_sel_configs; | ||
117 | for (i = 0; i < dc->out->n_out_sel_configs; i++) { | ||
118 | switch (out_sels[i]) { | ||
119 | case TEGRA_PIN_OUT_CONFIG_SEL_LM1_M1: | ||
120 | out_sel_pintable[5*2+1] = | ||
121 | (out_sel_pintable[5*2+1] & | ||
122 | ~PIN5_LM1_LCD_M1_OUTPUT_MASK) | | ||
123 | PIN5_LM1_LCD_M1_OUTPUT_M1; | ||
124 | break; | ||
125 | case TEGRA_PIN_OUT_CONFIG_SEL_LM1_LD21: | ||
126 | out_sel_pintable[5*2+1] = | ||
127 | (out_sel_pintable[5*2+1] & | ||
128 | ~PIN5_LM1_LCD_M1_OUTPUT_MASK) | | ||
129 | PIN5_LM1_LCD_M1_OUTPUT_LD21; | ||
130 | break; | ||
131 | case TEGRA_PIN_OUT_CONFIG_SEL_LM1_PM1: | ||
132 | out_sel_pintable[5*2+1] = | ||
133 | (out_sel_pintable[5*2+1] & | ||
134 | ~PIN5_LM1_LCD_M1_OUTPUT_MASK) | | ||
135 | PIN5_LM1_LCD_M1_OUTPUT_PM1; | ||
136 | break; | ||
137 | default: | ||
138 | dev_err(&dc->ndev->dev, | ||
139 | "Invalid pin config[%d]: %d\n", | ||
140 | i, out_sels[i]); | ||
141 | break; | ||
142 | } | ||
143 | } | ||
144 | } | ||
145 | |||
146 | tegra_dc_write_table(dc, out_sel_pintable); | ||
147 | } | ||
148 | |||
149 | void tegra_dc_rgb_disable(struct tegra_dc *dc) | ||
150 | { | ||
151 | tegra_dc_writel(dc, 0x00000000, DC_CMD_DISPLAY_POWER_CONTROL); | ||
152 | |||
153 | tegra_dc_write_table(dc, tegra_dc_rgb_disable_pintable); | ||
154 | } | ||
155 | |||
156 | struct tegra_dc_out_ops tegra_dc_rgb_ops = { | ||
157 | .enable = tegra_dc_rgb_enable, | ||
158 | .disable = tegra_dc_rgb_disable, | ||
159 | }; | ||
160 | |||
diff --git a/drivers/video/tegra/fb.c b/drivers/video/tegra/fb.c new file mode 100644 index 00000000000..7bc3ab06c47 --- /dev/null +++ b/drivers/video/tegra/fb.c | |||
@@ -0,0 +1,625 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/fb.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * Colin Cross <ccross@android.com> | ||
7 | * Travis Geiselbrecht <travis@palm.com> | ||
8 | * | ||
9 | * Copyright (C) 2010-2011 NVIDIA Corporation | ||
10 | * | ||
11 | * This software is licensed under the terms of the GNU General Public | ||
12 | * License version 2, as published by the Free Software Foundation, and | ||
13 | * may be copied, distributed, and modified under those terms. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/fb.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/string.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/uaccess.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/file.h> | ||
31 | #include <linux/workqueue.h> | ||
32 | |||
33 | #include <asm/atomic.h> | ||
34 | |||
35 | #include <video/tegrafb.h> | ||
36 | |||
37 | #include <mach/dc.h> | ||
38 | #include <mach/fb.h> | ||
39 | #include <linux/nvhost.h> | ||
40 | #include <mach/nvmap.h> | ||
41 | |||
42 | #include "host/dev.h" | ||
43 | #include "nvmap/nvmap.h" | ||
44 | #include "dc/dc_priv.h" | ||
45 | |||
46 | /* Pad pitch to 16-byte boundary. */ | ||
47 | #define TEGRA_LINEAR_PITCH_ALIGNMENT 16 | ||
48 | |||
49 | struct tegra_fb_info { | ||
50 | struct tegra_dc_win *win; | ||
51 | struct nvhost_device *ndev; | ||
52 | struct fb_info *info; | ||
53 | bool valid; | ||
54 | |||
55 | struct resource *fb_mem; | ||
56 | |||
57 | int xres; | ||
58 | int yres; | ||
59 | }; | ||
60 | |||
61 | /* palette array used by the fbcon */ | ||
62 | static u32 pseudo_palette[16]; | ||
63 | |||
64 | static int tegra_fb_check_var(struct fb_var_screeninfo *var, | ||
65 | struct fb_info *info) | ||
66 | { | ||
67 | if ((var->yres * var->xres * var->bits_per_pixel / 8 * 2) > | ||
68 | info->screen_size) | ||
69 | return -EINVAL; | ||
70 | |||
71 | /* double yres_virtual to allow double buffering through pan_display */ | ||
72 | var->yres_virtual = var->yres * 2; | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static int tegra_fb_set_par(struct fb_info *info) | ||
78 | { | ||
79 | struct tegra_fb_info *tegra_fb = info->par; | ||
80 | struct fb_var_screeninfo *var = &info->var; | ||
81 | |||
82 | if (var->bits_per_pixel) { | ||
83 | /* we only support RGB ordering for now */ | ||
84 | switch (var->bits_per_pixel) { | ||
85 | case 32: | ||
86 | var->red.offset = 0; | ||
87 | var->red.length = 8; | ||
88 | var->green.offset = 8; | ||
89 | var->green.length = 8; | ||
90 | var->blue.offset = 16; | ||
91 | var->blue.length = 8; | ||
92 | var->transp.offset = 24; | ||
93 | var->transp.length = 8; | ||
94 | tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8; | ||
95 | break; | ||
96 | case 16: | ||
97 | var->red.offset = 11; | ||
98 | var->red.length = 5; | ||
99 | var->green.offset = 5; | ||
100 | var->green.length = 6; | ||
101 | var->blue.offset = 0; | ||
102 | var->blue.length = 5; | ||
103 | tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5; | ||
104 | break; | ||
105 | |||
106 | default: | ||
107 | return -EINVAL; | ||
108 | } | ||
109 | info->fix.line_length = var->xres * var->bits_per_pixel / 8; | ||
110 | /* Pad the stride to 16-byte boundary. */ | ||
111 | info->fix.line_length = round_up(info->fix.line_length, | ||
112 | TEGRA_LINEAR_PITCH_ALIGNMENT); | ||
113 | tegra_fb->win->stride = info->fix.line_length; | ||
114 | tegra_fb->win->stride_uv = 0; | ||
115 | tegra_fb->win->phys_addr_u = 0; | ||
116 | tegra_fb->win->phys_addr_v = 0; | ||
117 | } | ||
118 | |||
119 | if (var->pixclock) { | ||
120 | bool stereo; | ||
121 | struct fb_videomode m; | ||
122 | |||
123 | fb_var_to_videomode(&m, var); | ||
124 | |||
125 | info->mode = (struct fb_videomode *) | ||
126 | fb_find_nearest_mode(&m, &info->modelist); | ||
127 | if (!info->mode) { | ||
128 | dev_warn(&tegra_fb->ndev->dev, "can't match video mode\n"); | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * only enable stereo if the mode supports it and | ||
134 | * client requests it | ||
135 | */ | ||
136 | stereo = !!(var->vmode & info->mode->vmode & | ||
137 | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT | ||
138 | FB_VMODE_STEREO_FRAME_PACK); | ||
139 | #else | ||
140 | FB_VMODE_STEREO_LEFT_RIGHT); | ||
141 | #endif | ||
142 | |||
143 | tegra_dc_set_fb_mode(tegra_fb->win->dc, info->mode, stereo); | ||
144 | |||
145 | tegra_fb->win->w.full = dfixed_const(info->mode->xres); | ||
146 | tegra_fb->win->h.full = dfixed_const(info->mode->yres); | ||
147 | tegra_fb->win->out_w = info->mode->xres; | ||
148 | tegra_fb->win->out_h = info->mode->yres; | ||
149 | } | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int tegra_fb_setcolreg(unsigned regno, unsigned red, unsigned green, | ||
154 | unsigned blue, unsigned transp, struct fb_info *info) | ||
155 | { | ||
156 | struct fb_var_screeninfo *var = &info->var; | ||
157 | |||
158 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | ||
159 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | ||
160 | u32 v; | ||
161 | |||
162 | if (regno >= 16) | ||
163 | return -EINVAL; | ||
164 | |||
165 | red = (red >> (16 - info->var.red.length)); | ||
166 | green = (green >> (16 - info->var.green.length)); | ||
167 | blue = (blue >> (16 - info->var.blue.length)); | ||
168 | |||
169 | v = (red << var->red.offset) | | ||
170 | (green << var->green.offset) | | ||
171 | (blue << var->blue.offset); | ||
172 | |||
173 | ((u32 *)info->pseudo_palette)[regno] = v; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | |||
180 | static int tegra_fb_setcmap(struct fb_cmap *cmap, struct fb_info *info) | ||
181 | { | ||
182 | struct tegra_fb_info *tegra_fb = info->par; | ||
183 | struct tegra_dc *dc = tegra_fb->win->dc; | ||
184 | int i; | ||
185 | u16 *red = cmap->red; | ||
186 | u16 *green = cmap->green; | ||
187 | u16 *blue = cmap->blue; | ||
188 | int start = cmap->start; | ||
189 | |||
190 | if (((unsigned)start > 255) || ((start + cmap->len) > 256)) | ||
191 | return -EINVAL; | ||
192 | |||
193 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | ||
194 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | ||
195 | /* | ||
196 | * For now we are considering color schemes with | ||
197 | * cmap->len <=16 as special case of basic color | ||
198 | * scheme to support fbconsole.But for DirectColor | ||
199 | * visuals(like the one we actually have, that include | ||
200 | * a HW LUT),the way it's intended to work is that the | ||
201 | * actual LUT HW is programmed to the intended values, | ||
202 | * even for small color maps like those with 16 or fewer | ||
203 | * entries. The pseudo_palette is then programmed to the | ||
204 | * identity transform. | ||
205 | */ | ||
206 | if (cmap->len <= 16) { | ||
207 | /* Low-color schemes like fbconsole*/ | ||
208 | u16 *transp = cmap->transp; | ||
209 | u_int vtransp = 0xffff; | ||
210 | |||
211 | for (i = 0; i < cmap->len; i++) { | ||
212 | if (transp) | ||
213 | vtransp = *transp++; | ||
214 | if (tegra_fb_setcolreg(start++, *red++, | ||
215 | *green++, *blue++, | ||
216 | vtransp, info)) | ||
217 | return -EINVAL; | ||
218 | } | ||
219 | } else { | ||
220 | /* High-color schemes*/ | ||
221 | for (i = 0; i < cmap->len; i++) { | ||
222 | dc->fb_lut.r[start+i] = *red++ >> 8; | ||
223 | dc->fb_lut.g[start+i] = *green++ >> 8; | ||
224 | dc->fb_lut.b[start+i] = *blue++ >> 8; | ||
225 | } | ||
226 | tegra_dc_update_lut(dc, -1, -1); | ||
227 | } | ||
228 | } | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | #if defined(CONFIG_FRAMEBUFFER_CONSOLE) | ||
233 | static void tegra_fb_flip_win(struct tegra_fb_info *tegra_fb) | ||
234 | { | ||
235 | struct tegra_dc_win *win = tegra_fb->win; | ||
236 | struct fb_info *info = tegra_fb->info; | ||
237 | |||
238 | win->x.full = dfixed_const(0); | ||
239 | win->y.full = dfixed_const(0); | ||
240 | win->w.full = dfixed_const(tegra_fb->xres); | ||
241 | win->h.full = dfixed_const(tegra_fb->yres); | ||
242 | |||
243 | /* TODO: set to output res dc */ | ||
244 | win->out_x = 0; | ||
245 | win->out_y = 0; | ||
246 | win->out_w = tegra_fb->xres; | ||
247 | win->out_h = tegra_fb->yres; | ||
248 | win->z = 0; | ||
249 | win->phys_addr = info->fix.smem_start + | ||
250 | (info->var.yoffset * info->fix.line_length) + | ||
251 | (info->var.xoffset * (info->var.bits_per_pixel / 8)); | ||
252 | win->virt_addr = info->screen_base; | ||
253 | |||
254 | win->phys_addr_u = 0; | ||
255 | win->phys_addr_v = 0; | ||
256 | win->stride = info->fix.line_length; | ||
257 | win->stride_uv = 0; | ||
258 | |||
259 | switch (info->var.bits_per_pixel) { | ||
260 | default: | ||
261 | WARN_ON(1); | ||
262 | /* fall through */ | ||
263 | case 32: | ||
264 | tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8; | ||
265 | break; | ||
266 | case 16: | ||
267 | tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5; | ||
268 | break; | ||
269 | } | ||
270 | win->flags = TEGRA_WIN_FLAG_ENABLED; | ||
271 | |||
272 | tegra_dc_update_windows(&tegra_fb->win, 1); | ||
273 | tegra_dc_sync_windows(&tegra_fb->win, 1); | ||
274 | } | ||
275 | #endif | ||
276 | |||
277 | static int tegra_fb_blank(int blank, struct fb_info *info) | ||
278 | { | ||
279 | struct tegra_fb_info *tegra_fb = info->par; | ||
280 | |||
281 | switch (blank) { | ||
282 | case FB_BLANK_UNBLANK: | ||
283 | dev_dbg(&tegra_fb->ndev->dev, "unblank\n"); | ||
284 | tegra_fb->win->flags = TEGRA_WIN_FLAG_ENABLED; | ||
285 | tegra_dc_enable(tegra_fb->win->dc); | ||
286 | #if defined(CONFIG_FRAMEBUFFER_CONSOLE) | ||
287 | /* | ||
288 | * TODO: | ||
289 | * This is a work around to provide an unblanking flip | ||
290 | * to dc driver, required to display fb-console after | ||
291 | * a blank event,and needs to be replaced by a proper | ||
292 | * unblanking mechanism | ||
293 | */ | ||
294 | tegra_fb_flip_win(tegra_fb); | ||
295 | #endif | ||
296 | return 0; | ||
297 | |||
298 | case FB_BLANK_NORMAL: | ||
299 | dev_dbg(&tegra_fb->ndev->dev, "blank - normal\n"); | ||
300 | tegra_dc_blank(tegra_fb->win->dc); | ||
301 | return 0; | ||
302 | |||
303 | case FB_BLANK_VSYNC_SUSPEND: | ||
304 | case FB_BLANK_HSYNC_SUSPEND: | ||
305 | case FB_BLANK_POWERDOWN: | ||
306 | dev_dbg(&tegra_fb->ndev->dev, "blank - powerdown\n"); | ||
307 | tegra_dc_disable(tegra_fb->win->dc); | ||
308 | return 0; | ||
309 | |||
310 | default: | ||
311 | return -ENOTTY; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | static int tegra_fb_pan_display(struct fb_var_screeninfo *var, | ||
316 | struct fb_info *info) | ||
317 | { | ||
318 | struct tegra_fb_info *tegra_fb = info->par; | ||
319 | char __iomem *flush_start; | ||
320 | char __iomem *flush_end; | ||
321 | u32 addr; | ||
322 | |||
323 | if (!tegra_fb->win->cur_handle) { | ||
324 | flush_start = info->screen_base + (var->yoffset * info->fix.line_length); | ||
325 | flush_end = flush_start + (var->yres * info->fix.line_length); | ||
326 | |||
327 | info->var.xoffset = var->xoffset; | ||
328 | info->var.yoffset = var->yoffset; | ||
329 | |||
330 | addr = info->fix.smem_start + (var->yoffset * info->fix.line_length) + | ||
331 | (var->xoffset * (var->bits_per_pixel/8)); | ||
332 | |||
333 | tegra_fb->win->phys_addr = addr; | ||
334 | /* TODO: update virt_addr */ | ||
335 | |||
336 | tegra_dc_update_windows(&tegra_fb->win, 1); | ||
337 | tegra_dc_sync_windows(&tegra_fb->win, 1); | ||
338 | } | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | static void tegra_fb_fillrect(struct fb_info *info, | ||
344 | const struct fb_fillrect *rect) | ||
345 | { | ||
346 | cfb_fillrect(info, rect); | ||
347 | } | ||
348 | |||
349 | static void tegra_fb_copyarea(struct fb_info *info, | ||
350 | const struct fb_copyarea *region) | ||
351 | { | ||
352 | cfb_copyarea(info, region); | ||
353 | } | ||
354 | |||
355 | static void tegra_fb_imageblit(struct fb_info *info, | ||
356 | const struct fb_image *image) | ||
357 | { | ||
358 | cfb_imageblit(info, image); | ||
359 | } | ||
360 | |||
361 | static int tegra_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) | ||
362 | { | ||
363 | struct tegra_fb_modedb modedb; | ||
364 | struct fb_modelist *modelist; | ||
365 | int i; | ||
366 | |||
367 | switch (cmd) { | ||
368 | case FBIO_TEGRA_GET_MODEDB: | ||
369 | if (copy_from_user(&modedb, (void __user *)arg, sizeof(modedb))) | ||
370 | return -EFAULT; | ||
371 | |||
372 | i = 0; | ||
373 | list_for_each_entry(modelist, &info->modelist, list) { | ||
374 | struct fb_var_screeninfo var; | ||
375 | |||
376 | if (i >= modedb.modedb_len) | ||
377 | break; | ||
378 | |||
379 | /* fb_videomode_to_var doesn't fill out all the members | ||
380 | of fb_var_screeninfo */ | ||
381 | memset(&var, 0x0, sizeof(var)); | ||
382 | |||
383 | fb_videomode_to_var(&var, &modelist->mode); | ||
384 | |||
385 | if (copy_to_user((void __user *)&modedb.modedb[i], | ||
386 | &var, sizeof(var))) | ||
387 | return -EFAULT; | ||
388 | i++; | ||
389 | |||
390 | if (var.vmode & FB_VMODE_STEREO_MASK) { | ||
391 | if (i >= modedb.modedb_len) | ||
392 | break; | ||
393 | var.vmode &= ~FB_VMODE_STEREO_MASK; | ||
394 | if (copy_to_user( | ||
395 | (void __user *)&modedb.modedb[i], | ||
396 | &var, sizeof(var))) | ||
397 | return -EFAULT; | ||
398 | i++; | ||
399 | } | ||
400 | } | ||
401 | modedb.modedb_len = i; | ||
402 | |||
403 | if (copy_to_user((void __user *)arg, &modedb, sizeof(modedb))) | ||
404 | return -EFAULT; | ||
405 | break; | ||
406 | |||
407 | default: | ||
408 | return -ENOTTY; | ||
409 | } | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static struct fb_ops tegra_fb_ops = { | ||
415 | .owner = THIS_MODULE, | ||
416 | .fb_check_var = tegra_fb_check_var, | ||
417 | .fb_set_par = tegra_fb_set_par, | ||
418 | .fb_setcmap = tegra_fb_setcmap, | ||
419 | .fb_blank = tegra_fb_blank, | ||
420 | .fb_pan_display = tegra_fb_pan_display, | ||
421 | .fb_fillrect = tegra_fb_fillrect, | ||
422 | .fb_copyarea = tegra_fb_copyarea, | ||
423 | .fb_imageblit = tegra_fb_imageblit, | ||
424 | .fb_ioctl = tegra_fb_ioctl, | ||
425 | }; | ||
426 | |||
427 | void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info, | ||
428 | struct fb_monspecs *specs, | ||
429 | bool (*mode_filter)(const struct tegra_dc *dc, | ||
430 | struct fb_videomode *mode)) | ||
431 | { | ||
432 | struct fb_event event; | ||
433 | int i; | ||
434 | |||
435 | mutex_lock(&fb_info->info->lock); | ||
436 | fb_destroy_modedb(fb_info->info->monspecs.modedb); | ||
437 | |||
438 | fb_destroy_modelist(&fb_info->info->modelist); | ||
439 | |||
440 | if (specs == NULL) { | ||
441 | struct tegra_dc_mode mode; | ||
442 | memset(&fb_info->info->monspecs, 0x0, | ||
443 | sizeof(fb_info->info->monspecs)); | ||
444 | memset(&mode, 0x0, sizeof(mode)); | ||
445 | |||
446 | /* | ||
447 | * reset video mode properties to prevent garbage being displayed on 'mode' device. | ||
448 | */ | ||
449 | fb_info->info->mode = (struct fb_videomode*) NULL; | ||
450 | |||
451 | tegra_dc_set_mode(fb_info->win->dc, &mode); | ||
452 | mutex_unlock(&fb_info->info->lock); | ||
453 | return; | ||
454 | } | ||
455 | |||
456 | memcpy(&fb_info->info->monspecs, specs, | ||
457 | sizeof(fb_info->info->monspecs)); | ||
458 | |||
459 | for (i = 0; i < specs->modedb_len; i++) { | ||
460 | if (mode_filter) { | ||
461 | if (mode_filter(fb_info->win->dc, &specs->modedb[i])) | ||
462 | fb_add_videomode(&specs->modedb[i], | ||
463 | &fb_info->info->modelist); | ||
464 | } else { | ||
465 | fb_add_videomode(&specs->modedb[i], | ||
466 | &fb_info->info->modelist); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | event.info = fb_info->info; | ||
471 | fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); | ||
472 | mutex_unlock(&fb_info->info->lock); | ||
473 | } | ||
474 | |||
475 | struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev, | ||
476 | struct tegra_dc *dc, | ||
477 | struct tegra_fb_data *fb_data, | ||
478 | struct resource *fb_mem) | ||
479 | { | ||
480 | struct tegra_dc_win *win; | ||
481 | struct fb_info *info; | ||
482 | struct tegra_fb_info *tegra_fb; | ||
483 | void __iomem *fb_base = NULL; | ||
484 | unsigned long fb_size = 0; | ||
485 | unsigned long fb_phys = 0; | ||
486 | int ret = 0; | ||
487 | |||
488 | win = tegra_dc_get_window(dc, fb_data->win); | ||
489 | if (!win) { | ||
490 | dev_err(&ndev->dev, "dc does not have a window at index %d\n", | ||
491 | fb_data->win); | ||
492 | return ERR_PTR(-ENOENT); | ||
493 | } | ||
494 | |||
495 | info = framebuffer_alloc(sizeof(struct tegra_fb_info), &ndev->dev); | ||
496 | if (!info) { | ||
497 | ret = -ENOMEM; | ||
498 | goto err; | ||
499 | } | ||
500 | |||
501 | tegra_fb = info->par; | ||
502 | tegra_fb->win = win; | ||
503 | tegra_fb->ndev = ndev; | ||
504 | tegra_fb->fb_mem = fb_mem; | ||
505 | tegra_fb->xres = fb_data->xres; | ||
506 | tegra_fb->yres = fb_data->yres; | ||
507 | |||
508 | if (fb_mem) { | ||
509 | fb_size = resource_size(fb_mem); | ||
510 | fb_phys = fb_mem->start; | ||
511 | fb_base = ioremap_nocache(fb_phys, fb_size); | ||
512 | if (!fb_base) { | ||
513 | dev_err(&ndev->dev, "fb can't be mapped\n"); | ||
514 | ret = -EBUSY; | ||
515 | goto err_free; | ||
516 | } | ||
517 | tegra_fb->valid = true; | ||
518 | } | ||
519 | |||
520 | info->fbops = &tegra_fb_ops; | ||
521 | info->pseudo_palette = pseudo_palette; | ||
522 | info->screen_base = fb_base; | ||
523 | info->screen_size = fb_size; | ||
524 | |||
525 | strlcpy(info->fix.id, "tegra_fb", sizeof(info->fix.id)); | ||
526 | info->fix.type = FB_TYPE_PACKED_PIXELS; | ||
527 | info->fix.visual = FB_VISUAL_TRUECOLOR; | ||
528 | info->fix.xpanstep = 1; | ||
529 | info->fix.ypanstep = 1; | ||
530 | info->fix.accel = FB_ACCEL_NONE; | ||
531 | info->fix.smem_start = fb_phys; | ||
532 | info->fix.smem_len = fb_size; | ||
533 | info->fix.line_length = fb_data->xres * fb_data->bits_per_pixel / 8; | ||
534 | /* Pad the stride to 16-byte boundary. */ | ||
535 | info->fix.line_length = round_up(info->fix.line_length, | ||
536 | TEGRA_LINEAR_PITCH_ALIGNMENT); | ||
537 | |||
538 | info->var.xres = fb_data->xres; | ||
539 | info->var.yres = fb_data->yres; | ||
540 | info->var.xres_virtual = fb_data->xres; | ||
541 | info->var.yres_virtual = fb_data->yres * 2; | ||
542 | info->var.bits_per_pixel = fb_data->bits_per_pixel; | ||
543 | info->var.activate = FB_ACTIVATE_VBL; | ||
544 | info->var.height = tegra_dc_get_out_height(dc); | ||
545 | info->var.width = tegra_dc_get_out_width(dc); | ||
546 | info->var.pixclock = 0; | ||
547 | info->var.left_margin = 0; | ||
548 | info->var.right_margin = 0; | ||
549 | info->var.upper_margin = 0; | ||
550 | info->var.lower_margin = 0; | ||
551 | info->var.hsync_len = 0; | ||
552 | info->var.vsync_len = 0; | ||
553 | info->var.vmode = FB_VMODE_NONINTERLACED; | ||
554 | |||
555 | win->x.full = dfixed_const(0); | ||
556 | win->y.full = dfixed_const(0); | ||
557 | win->w.full = dfixed_const(fb_data->xres); | ||
558 | win->h.full = dfixed_const(fb_data->yres); | ||
559 | /* TODO: set to output res dc */ | ||
560 | win->out_x = 0; | ||
561 | win->out_y = 0; | ||
562 | win->out_w = fb_data->xres; | ||
563 | win->out_h = fb_data->yres; | ||
564 | win->z = 0; | ||
565 | win->phys_addr = fb_phys; | ||
566 | win->virt_addr = fb_base; | ||
567 | win->phys_addr_u = 0; | ||
568 | win->phys_addr_v = 0; | ||
569 | win->stride = info->fix.line_length; | ||
570 | win->stride_uv = 0; | ||
571 | win->flags = TEGRA_WIN_FLAG_ENABLED; | ||
572 | |||
573 | if (fb_mem) | ||
574 | tegra_fb_set_par(info); | ||
575 | |||
576 | if (register_framebuffer(info)) { | ||
577 | dev_err(&ndev->dev, "failed to register framebuffer\n"); | ||
578 | ret = -ENODEV; | ||
579 | goto err_iounmap_fb; | ||
580 | } | ||
581 | |||
582 | tegra_fb->info = info; | ||
583 | |||
584 | dev_info(&ndev->dev, "probed\n"); | ||
585 | |||
586 | if (fb_data->flags & TEGRA_FB_FLIP_ON_PROBE) { | ||
587 | tegra_dc_update_windows(&tegra_fb->win, 1); | ||
588 | tegra_dc_sync_windows(&tegra_fb->win, 1); | ||
589 | } | ||
590 | |||
591 | if (dc->mode.pclk > 1000) { | ||
592 | struct tegra_dc_mode *mode = &dc->mode; | ||
593 | |||
594 | if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) | ||
595 | info->var.pixclock = KHZ2PICOS(mode->rated_pclk / 1000); | ||
596 | else | ||
597 | info->var.pixclock = KHZ2PICOS(mode->pclk / 1000); | ||
598 | info->var.left_margin = mode->h_back_porch; | ||
599 | info->var.right_margin = mode->h_front_porch; | ||
600 | info->var.upper_margin = mode->v_back_porch; | ||
601 | info->var.lower_margin = mode->v_front_porch; | ||
602 | info->var.hsync_len = mode->h_sync_width; | ||
603 | info->var.vsync_len = mode->v_sync_width; | ||
604 | } | ||
605 | |||
606 | return tegra_fb; | ||
607 | |||
608 | err_iounmap_fb: | ||
609 | if (fb_base) | ||
610 | iounmap(fb_base); | ||
611 | err_free: | ||
612 | framebuffer_release(info); | ||
613 | err: | ||
614 | return ERR_PTR(ret); | ||
615 | } | ||
616 | |||
617 | void tegra_fb_unregister(struct tegra_fb_info *fb_info) | ||
618 | { | ||
619 | struct fb_info *info = fb_info->info; | ||
620 | |||
621 | unregister_framebuffer(info); | ||
622 | |||
623 | iounmap(info->screen_base); | ||
624 | framebuffer_release(info); | ||
625 | } | ||
diff --git a/drivers/video/tegra/host/Makefile b/drivers/video/tegra/host/Makefile new file mode 100644 index 00000000000..0180885af4d --- /dev/null +++ b/drivers/video/tegra/host/Makefile | |||
@@ -0,0 +1,23 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | nvhost-objs = \ | ||
3 | nvhost_acm.o \ | ||
4 | nvhost_syncpt.o \ | ||
5 | nvhost_cdma.o \ | ||
6 | nvhost_intr.o \ | ||
7 | nvhost_channel.o \ | ||
8 | nvhost_job.o \ | ||
9 | bus.o \ | ||
10 | dev.o \ | ||
11 | debug.o \ | ||
12 | bus_client.o | ||
13 | |||
14 | obj-$(CONFIG_TEGRA_GRHOST) += mpe/ | ||
15 | obj-$(CONFIG_TEGRA_GRHOST) += gr3d/ | ||
16 | obj-$(CONFIG_TEGRA_GRHOST) += host1x/ | ||
17 | obj-$(CONFIG_TEGRA_GRHOST) += t20/ | ||
18 | obj-$(CONFIG_TEGRA_GRHOST) += t30/ | ||
19 | obj-$(CONFIG_TEGRA_GRHOST) += dsi/ | ||
20 | obj-$(CONFIG_TEGRA_GRHOST) += gr2d/ | ||
21 | obj-$(CONFIG_TEGRA_GRHOST) += isp/ | ||
22 | obj-$(CONFIG_TEGRA_GRHOST) += vi/ | ||
23 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o | ||
diff --git a/drivers/video/tegra/host/bus.c b/drivers/video/tegra/host/bus.c new file mode 100644 index 00000000000..774aac7bd43 --- /dev/null +++ b/drivers/video/tegra/host/bus.c | |||
@@ -0,0 +1,569 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/bus.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@google.com> | ||
6 | * | ||
7 | * Copyright (C) 2010-2012 NVIDIA Corporation | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/pm_runtime.h> | ||
21 | #include <linux/nvhost.h> | ||
22 | |||
23 | #include "dev.h" | ||
24 | |||
25 | struct nvhost_master *nvhost; | ||
26 | |||
27 | struct resource *nvhost_get_resource(struct nvhost_device *dev, | ||
28 | unsigned int type, unsigned int num) | ||
29 | { | ||
30 | int i; | ||
31 | |||
32 | for (i = 0; i < dev->num_resources; i++) { | ||
33 | struct resource *r = &dev->resource[i]; | ||
34 | |||
35 | if (type == resource_type(r) && num-- == 0) | ||
36 | return r; | ||
37 | } | ||
38 | return NULL; | ||
39 | } | ||
40 | EXPORT_SYMBOL_GPL(nvhost_get_resource); | ||
41 | |||
42 | int nvhost_get_irq(struct nvhost_device *dev, unsigned int num) | ||
43 | { | ||
44 | struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num); | ||
45 | |||
46 | return r ? r->start : -ENXIO; | ||
47 | } | ||
48 | EXPORT_SYMBOL_GPL(nvhost_get_irq); | ||
49 | |||
50 | struct resource *nvhost_get_resource_byname(struct nvhost_device *dev, | ||
51 | unsigned int type, | ||
52 | const char *name) | ||
53 | { | ||
54 | int i; | ||
55 | |||
56 | for (i = 0; i < dev->num_resources; i++) { | ||
57 | struct resource *r = &dev->resource[i]; | ||
58 | |||
59 | if (type == resource_type(r) && !strcmp(r->name, name)) | ||
60 | return r; | ||
61 | } | ||
62 | return NULL; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(nvhost_get_resource_byname); | ||
65 | |||
66 | int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name) | ||
67 | { | ||
68 | struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ, | ||
69 | name); | ||
70 | |||
71 | return r ? r->start : -ENXIO; | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(nvhost_get_irq_byname); | ||
74 | |||
75 | static int nvhost_drv_probe(struct device *_dev) | ||
76 | { | ||
77 | struct nvhost_driver *drv = to_nvhost_driver(_dev->driver); | ||
78 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
79 | |||
80 | return drv->probe(dev); | ||
81 | } | ||
82 | |||
83 | static int nvhost_drv_remove(struct device *_dev) | ||
84 | { | ||
85 | struct nvhost_driver *drv = to_nvhost_driver(_dev->driver); | ||
86 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
87 | |||
88 | return drv->remove(dev); | ||
89 | } | ||
90 | |||
91 | static void nvhost_drv_shutdown(struct device *_dev) | ||
92 | { | ||
93 | struct nvhost_driver *drv = to_nvhost_driver(_dev->driver); | ||
94 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
95 | |||
96 | drv->shutdown(dev); | ||
97 | } | ||
98 | |||
99 | int nvhost_driver_register(struct nvhost_driver *drv) | ||
100 | { | ||
101 | drv->driver.bus = &nvhost_bus_type; | ||
102 | if (drv->probe) | ||
103 | drv->driver.probe = nvhost_drv_probe; | ||
104 | if (drv->remove) | ||
105 | drv->driver.remove = nvhost_drv_remove; | ||
106 | if (drv->shutdown) | ||
107 | drv->driver.shutdown = nvhost_drv_shutdown; | ||
108 | |||
109 | return driver_register(&drv->driver); | ||
110 | } | ||
111 | EXPORT_SYMBOL(nvhost_driver_register); | ||
112 | |||
113 | void nvhost_driver_unregister(struct nvhost_driver *drv) | ||
114 | { | ||
115 | driver_unregister(&drv->driver); | ||
116 | } | ||
117 | EXPORT_SYMBOL_GPL(nvhost_driver_unregister); | ||
118 | |||
119 | int nvhost_device_register(struct nvhost_device *dev) | ||
120 | { | ||
121 | int i, ret = 0; | ||
122 | |||
123 | if (!dev) | ||
124 | return -EINVAL; | ||
125 | |||
126 | device_initialize(&dev->dev); | ||
127 | |||
128 | /* If the dev does not have a parent, assign host1x as parent */ | ||
129 | if (!dev->dev.parent && nvhost && nvhost->dev != dev) | ||
130 | dev->dev.parent = &nvhost->dev->dev; | ||
131 | |||
132 | dev->dev.bus = &nvhost_bus_type; | ||
133 | |||
134 | if (dev->id != -1) | ||
135 | dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id); | ||
136 | else | ||
137 | dev_set_name(&dev->dev, "%s", dev->name); | ||
138 | |||
139 | for (i = 0; i < dev->num_resources; i++) { | ||
140 | struct resource *p, *r = &dev->resource[i]; | ||
141 | |||
142 | if (r->name == NULL) | ||
143 | r->name = dev_name(&dev->dev); | ||
144 | |||
145 | p = r->parent; | ||
146 | if (!p) { | ||
147 | if (resource_type(r) == IORESOURCE_MEM) | ||
148 | p = &iomem_resource; | ||
149 | else if (resource_type(r) == IORESOURCE_IO) | ||
150 | p = &ioport_resource; | ||
151 | } | ||
152 | |||
153 | if (p && insert_resource(p, r)) { | ||
154 | pr_err("%s: failed to claim resource %d\n", | ||
155 | dev_name(&dev->dev), i); | ||
156 | ret = -EBUSY; | ||
157 | goto failed; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | ret = device_add(&dev->dev); | ||
162 | if (ret == 0) | ||
163 | return ret; | ||
164 | |||
165 | failed: | ||
166 | while (--i >= 0) { | ||
167 | struct resource *r = &dev->resource[i]; | ||
168 | unsigned long type = resource_type(r); | ||
169 | |||
170 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) | ||
171 | release_resource(r); | ||
172 | } | ||
173 | |||
174 | return ret; | ||
175 | } | ||
176 | EXPORT_SYMBOL_GPL(nvhost_device_register); | ||
177 | |||
178 | void nvhost_device_unregister(struct nvhost_device *dev) | ||
179 | { | ||
180 | int i; | ||
181 | if (dev) { | ||
182 | device_del(&dev->dev); | ||
183 | |||
184 | for (i = 0; i < dev->num_resources; i++) { | ||
185 | struct resource *r = &dev->resource[i]; | ||
186 | unsigned long type = resource_type(r); | ||
187 | |||
188 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) | ||
189 | release_resource(r); | ||
190 | } | ||
191 | |||
192 | put_device(&dev->dev); | ||
193 | } | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(nvhost_device_unregister); | ||
196 | |||
197 | static int nvhost_bus_match(struct device *_dev, struct device_driver *drv) | ||
198 | { | ||
199 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
200 | |||
201 | return !strncmp(dev->name, drv->name, strlen(drv->name)); | ||
202 | } | ||
203 | |||
204 | #ifdef CONFIG_PM_SLEEP | ||
205 | |||
206 | static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg) | ||
207 | { | ||
208 | struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver); | ||
209 | struct nvhost_device *pdev = to_nvhost_device(dev); | ||
210 | int ret = 0; | ||
211 | |||
212 | if (dev->driver && pdrv->suspend) | ||
213 | ret = pdrv->suspend(pdev, mesg); | ||
214 | |||
215 | return ret; | ||
216 | } | ||
217 | |||
218 | static int nvhost_legacy_resume(struct device *dev) | ||
219 | { | ||
220 | struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver); | ||
221 | struct nvhost_device *pdev = to_nvhost_device(dev); | ||
222 | int ret = 0; | ||
223 | |||
224 | if (dev->driver && pdrv->resume) | ||
225 | ret = pdrv->resume(pdev); | ||
226 | |||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | static int nvhost_pm_prepare(struct device *dev) | ||
231 | { | ||
232 | struct device_driver *drv = dev->driver; | ||
233 | int ret = 0; | ||
234 | |||
235 | if (drv && drv->pm && drv->pm->prepare) | ||
236 | ret = drv->pm->prepare(dev); | ||
237 | |||
238 | return ret; | ||
239 | } | ||
240 | |||
241 | static void nvhost_pm_complete(struct device *dev) | ||
242 | { | ||
243 | struct device_driver *drv = dev->driver; | ||
244 | |||
245 | if (drv && drv->pm && drv->pm->complete) | ||
246 | drv->pm->complete(dev); | ||
247 | } | ||
248 | |||
249 | #else /* !CONFIG_PM_SLEEP */ | ||
250 | |||
251 | #define nvhost_pm_prepare NULL | ||
252 | #define nvhost_pm_complete NULL | ||
253 | |||
254 | #endif /* !CONFIG_PM_SLEEP */ | ||
255 | |||
256 | #ifdef CONFIG_SUSPEND | ||
257 | |||
258 | int __weak nvhost_pm_suspend(struct device *dev) | ||
259 | { | ||
260 | struct device_driver *drv = dev->driver; | ||
261 | int ret = 0; | ||
262 | |||
263 | if (!drv) | ||
264 | return 0; | ||
265 | |||
266 | if (drv->pm) { | ||
267 | if (drv->pm->suspend) | ||
268 | ret = drv->pm->suspend(dev); | ||
269 | } else { | ||
270 | ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND); | ||
271 | } | ||
272 | |||
273 | return ret; | ||
274 | } | ||
275 | |||
276 | int __weak nvhost_pm_suspend_noirq(struct device *dev) | ||
277 | { | ||
278 | struct device_driver *drv = dev->driver; | ||
279 | int ret = 0; | ||
280 | |||
281 | if (!drv) | ||
282 | return 0; | ||
283 | |||
284 | if (drv->pm) { | ||
285 | if (drv->pm->suspend_noirq) | ||
286 | ret = drv->pm->suspend_noirq(dev); | ||
287 | } | ||
288 | |||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | int __weak nvhost_pm_resume(struct device *dev) | ||
293 | { | ||
294 | struct device_driver *drv = dev->driver; | ||
295 | int ret = 0; | ||
296 | |||
297 | if (!drv) | ||
298 | return 0; | ||
299 | |||
300 | if (drv->pm) { | ||
301 | if (drv->pm->resume) | ||
302 | ret = drv->pm->resume(dev); | ||
303 | } else { | ||
304 | ret = nvhost_legacy_resume(dev); | ||
305 | } | ||
306 | |||
307 | return ret; | ||
308 | } | ||
309 | |||
310 | int __weak nvhost_pm_resume_noirq(struct device *dev) | ||
311 | { | ||
312 | struct device_driver *drv = dev->driver; | ||
313 | int ret = 0; | ||
314 | |||
315 | if (!drv) | ||
316 | return 0; | ||
317 | |||
318 | if (drv->pm) { | ||
319 | if (drv->pm->resume_noirq) | ||
320 | ret = drv->pm->resume_noirq(dev); | ||
321 | } | ||
322 | |||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | #else /* !CONFIG_SUSPEND */ | ||
327 | |||
328 | #define nvhost_pm_suspend NULL | ||
329 | #define nvhost_pm_resume NULL | ||
330 | #define nvhost_pm_suspend_noirq NULL | ||
331 | #define nvhost_pm_resume_noirq NULL | ||
332 | |||
333 | #endif /* !CONFIG_SUSPEND */ | ||
334 | |||
335 | #ifdef CONFIG_HIBERNATION | ||
336 | |||
337 | static int nvhost_pm_freeze(struct device *dev) | ||
338 | { | ||
339 | struct device_driver *drv = dev->driver; | ||
340 | int ret = 0; | ||
341 | |||
342 | if (!drv) | ||
343 | return 0; | ||
344 | |||
345 | if (drv->pm) { | ||
346 | if (drv->pm->freeze) | ||
347 | ret = drv->pm->freeze(dev); | ||
348 | } else { | ||
349 | ret = nvhost_legacy_suspend(dev, PMSG_FREEZE); | ||
350 | } | ||
351 | |||
352 | return ret; | ||
353 | } | ||
354 | |||
355 | static int nvhost_pm_freeze_noirq(struct device *dev) | ||
356 | { | ||
357 | struct device_driver *drv = dev->driver; | ||
358 | int ret = 0; | ||
359 | |||
360 | if (!drv) | ||
361 | return 0; | ||
362 | |||
363 | if (drv->pm) { | ||
364 | if (drv->pm->freeze_noirq) | ||
365 | ret = drv->pm->freeze_noirq(dev); | ||
366 | } | ||
367 | |||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | static int nvhost_pm_thaw(struct device *dev) | ||
372 | { | ||
373 | struct device_driver *drv = dev->driver; | ||
374 | int ret = 0; | ||
375 | |||
376 | if (!drv) | ||
377 | return 0; | ||
378 | |||
379 | if (drv->pm) { | ||
380 | if (drv->pm->thaw) | ||
381 | ret = drv->pm->thaw(dev); | ||
382 | } else { | ||
383 | ret = nvhost_legacy_resume(dev); | ||
384 | } | ||
385 | |||
386 | return ret; | ||
387 | } | ||
388 | |||
389 | static int nvhost_pm_thaw_noirq(struct device *dev) | ||
390 | { | ||
391 | struct device_driver *drv = dev->driver; | ||
392 | int ret = 0; | ||
393 | |||
394 | if (!drv) | ||
395 | return 0; | ||
396 | |||
397 | if (drv->pm) { | ||
398 | if (drv->pm->thaw_noirq) | ||
399 | ret = drv->pm->thaw_noirq(dev); | ||
400 | } | ||
401 | |||
402 | return ret; | ||
403 | } | ||
404 | |||
405 | static int nvhost_pm_poweroff(struct device *dev) | ||
406 | { | ||
407 | struct device_driver *drv = dev->driver; | ||
408 | int ret = 0; | ||
409 | |||
410 | if (!drv) | ||
411 | return 0; | ||
412 | |||
413 | if (drv->pm) { | ||
414 | if (drv->pm->poweroff) | ||
415 | ret = drv->pm->poweroff(dev); | ||
416 | } else { | ||
417 | ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE); | ||
418 | } | ||
419 | |||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | static int nvhost_pm_poweroff_noirq(struct device *dev) | ||
424 | { | ||
425 | struct device_driver *drv = dev->driver; | ||
426 | int ret = 0; | ||
427 | |||
428 | if (!drv) | ||
429 | return 0; | ||
430 | |||
431 | if (drv->pm) { | ||
432 | if (drv->pm->poweroff_noirq) | ||
433 | ret = drv->pm->poweroff_noirq(dev); | ||
434 | } | ||
435 | |||
436 | return ret; | ||
437 | } | ||
438 | |||
439 | static int nvhost_pm_restore(struct device *dev) | ||
440 | { | ||
441 | struct device_driver *drv = dev->driver; | ||
442 | int ret = 0; | ||
443 | |||
444 | if (!drv) | ||
445 | return 0; | ||
446 | |||
447 | if (drv->pm) { | ||
448 | if (drv->pm->restore) | ||
449 | ret = drv->pm->restore(dev); | ||
450 | } else { | ||
451 | ret = nvhost_legacy_resume(dev); | ||
452 | } | ||
453 | |||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | static int nvhost_pm_restore_noirq(struct device *dev) | ||
458 | { | ||
459 | struct device_driver *drv = dev->driver; | ||
460 | int ret = 0; | ||
461 | |||
462 | if (!drv) | ||
463 | return 0; | ||
464 | |||
465 | if (drv->pm) { | ||
466 | if (drv->pm->restore_noirq) | ||
467 | ret = drv->pm->restore_noirq(dev); | ||
468 | } | ||
469 | |||
470 | return ret; | ||
471 | } | ||
472 | |||
473 | #else /* !CONFIG_HIBERNATION */ | ||
474 | |||
475 | #define nvhost_pm_freeze NULL | ||
476 | #define nvhost_pm_thaw NULL | ||
477 | #define nvhost_pm_poweroff NULL | ||
478 | #define nvhost_pm_restore NULL | ||
479 | #define nvhost_pm_freeze_noirq NULL | ||
480 | #define nvhost_pm_thaw_noirq NULL | ||
481 | #define nvhost_pm_poweroff_noirq NULL | ||
482 | #define nvhost_pm_restore_noirq NULL | ||
483 | |||
484 | #endif /* !CONFIG_HIBERNATION */ | ||
485 | |||
486 | #ifdef CONFIG_PM_RUNTIME | ||
487 | |||
488 | int __weak nvhost_pm_runtime_suspend(struct device *dev) | ||
489 | { | ||
490 | return pm_generic_runtime_suspend(dev); | ||
491 | }; | ||
492 | |||
493 | int __weak nvhost_pm_runtime_resume(struct device *dev) | ||
494 | { | ||
495 | return pm_generic_runtime_resume(dev); | ||
496 | }; | ||
497 | |||
498 | int __weak nvhost_pm_runtime_idle(struct device *dev) | ||
499 | { | ||
500 | return pm_generic_runtime_idle(dev); | ||
501 | }; | ||
502 | |||
503 | #else /* !CONFIG_PM_RUNTIME */ | ||
504 | |||
505 | #define nvhost_pm_runtime_suspend NULL | ||
506 | #define nvhost_pm_runtime_resume NULL | ||
507 | #define nvhost_pm_runtime_idle NULL | ||
508 | |||
509 | #endif /* !CONFIG_PM_RUNTIME */ | ||
510 | |||
511 | static const struct dev_pm_ops nvhost_dev_pm_ops = { | ||
512 | .prepare = nvhost_pm_prepare, | ||
513 | .complete = nvhost_pm_complete, | ||
514 | .suspend = nvhost_pm_suspend, | ||
515 | .resume = nvhost_pm_resume, | ||
516 | .freeze = nvhost_pm_freeze, | ||
517 | .thaw = nvhost_pm_thaw, | ||
518 | .poweroff = nvhost_pm_poweroff, | ||
519 | .restore = nvhost_pm_restore, | ||
520 | .suspend_noirq = nvhost_pm_suspend_noirq, | ||
521 | .resume_noirq = nvhost_pm_resume_noirq, | ||
522 | .freeze_noirq = nvhost_pm_freeze_noirq, | ||
523 | .thaw_noirq = nvhost_pm_thaw_noirq, | ||
524 | .poweroff_noirq = nvhost_pm_poweroff_noirq, | ||
525 | .restore_noirq = nvhost_pm_restore_noirq, | ||
526 | .runtime_suspend = nvhost_pm_runtime_suspend, | ||
527 | .runtime_resume = nvhost_pm_runtime_resume, | ||
528 | .runtime_idle = nvhost_pm_runtime_idle, | ||
529 | }; | ||
530 | |||
531 | struct bus_type nvhost_bus_type = { | ||
532 | .name = "nvhost", | ||
533 | .match = nvhost_bus_match, | ||
534 | .pm = &nvhost_dev_pm_ops, | ||
535 | }; | ||
536 | EXPORT_SYMBOL(nvhost_bus_type); | ||
537 | |||
538 | static int set_parent(struct device *dev, void *data) | ||
539 | { | ||
540 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
541 | struct nvhost_master *host = data; | ||
542 | if (!dev->parent && ndev != host->dev) | ||
543 | dev->parent = &host->dev->dev; | ||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | int nvhost_bus_add_host(struct nvhost_master *host) | ||
548 | { | ||
549 | nvhost = host; | ||
550 | |||
551 | /* Assign host1x as parent to all devices in nvhost bus */ | ||
552 | bus_for_each_dev(&nvhost_bus_type, NULL, host, set_parent); | ||
553 | |||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | |||
558 | int nvhost_bus_init(void) | ||
559 | { | ||
560 | int err; | ||
561 | |||
562 | pr_info("host1x bus init\n"); | ||
563 | |||
564 | err = bus_register(&nvhost_bus_type); | ||
565 | |||
566 | return err; | ||
567 | } | ||
568 | postcore_initcall(nvhost_bus_init); | ||
569 | |||
diff --git a/drivers/video/tegra/host/bus_client.c b/drivers/video/tegra/host/bus_client.c new file mode 100644 index 00000000000..940f04a40e8 --- /dev/null +++ b/drivers/video/tegra/host/bus_client.c | |||
@@ -0,0 +1,606 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/bus_client.c | ||
3 | * | ||
4 | * Tegra Graphics Host Client Module | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/cdev.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <linux/file.h> | ||
28 | #include <linux/clk.h> | ||
29 | #include <linux/hrtimer.h> | ||
30 | |||
31 | #include <trace/events/nvhost.h> | ||
32 | |||
33 | #include <linux/io.h> | ||
34 | #include <linux/string.h> | ||
35 | |||
36 | #include <linux/nvhost.h> | ||
37 | #include <linux/nvhost_ioctl.h> | ||
38 | |||
39 | #include <mach/nvmap.h> | ||
40 | #include <mach/gpufuse.h> | ||
41 | #include <mach/hardware.h> | ||
42 | #include <mach/iomap.h> | ||
43 | |||
44 | #include "debug.h" | ||
45 | #include "bus_client.h" | ||
46 | #include "dev.h" | ||
47 | |||
48 | void nvhost_read_module_regs(struct nvhost_device *ndev, | ||
49 | u32 offset, int count, u32 *values) | ||
50 | { | ||
51 | void __iomem *p = ndev->aperture + offset; | ||
52 | |||
53 | nvhost_module_busy(ndev); | ||
54 | while (count--) { | ||
55 | *(values++) = readl(p); | ||
56 | p += 4; | ||
57 | } | ||
58 | rmb(); | ||
59 | nvhost_module_idle(ndev); | ||
60 | } | ||
61 | |||
62 | void nvhost_write_module_regs(struct nvhost_device *ndev, | ||
63 | u32 offset, int count, const u32 *values) | ||
64 | { | ||
65 | void __iomem *p = ndev->aperture + offset; | ||
66 | |||
67 | nvhost_module_busy(ndev); | ||
68 | while (count--) { | ||
69 | writel(*(values++), p); | ||
70 | p += 4; | ||
71 | } | ||
72 | wmb(); | ||
73 | nvhost_module_idle(ndev); | ||
74 | } | ||
75 | |||
76 | struct nvhost_channel_userctx { | ||
77 | struct nvhost_channel *ch; | ||
78 | struct nvhost_hwctx *hwctx; | ||
79 | struct nvhost_submit_hdr_ext hdr; | ||
80 | int num_relocshifts; | ||
81 | struct nvhost_job *job; | ||
82 | struct nvmap_client *nvmap; | ||
83 | u32 timeout; | ||
84 | u32 priority; | ||
85 | int clientid; | ||
86 | }; | ||
87 | |||
88 | /* | ||
89 | * Write cmdbuf to ftrace output. Checks if cmdbuf contents should be output | ||
90 | * and mmaps the cmdbuf contents if required. | ||
91 | */ | ||
92 | static void trace_write_cmdbufs(struct nvhost_job *job) | ||
93 | { | ||
94 | struct nvmap_handle_ref handle; | ||
95 | void *mem = NULL; | ||
96 | int i = 0; | ||
97 | |||
98 | for (i = 0; i < job->num_gathers; i++) { | ||
99 | struct nvhost_channel_gather *gather = &job->gathers[i]; | ||
100 | if (nvhost_debug_trace_cmdbuf) { | ||
101 | handle.handle = nvmap_id_to_handle(gather->mem_id); | ||
102 | mem = nvmap_mmap(&handle); | ||
103 | if (IS_ERR_OR_NULL(mem)) | ||
104 | mem = NULL; | ||
105 | }; | ||
106 | |||
107 | if (mem) { | ||
108 | u32 i; | ||
109 | /* | ||
110 | * Write in batches of 128 as there seems to be a limit | ||
111 | * of how much you can output to ftrace at once. | ||
112 | */ | ||
113 | for (i = 0; i < gather->words; i += TRACE_MAX_LENGTH) { | ||
114 | trace_nvhost_channel_write_cmdbuf_data( | ||
115 | job->ch->dev->name, | ||
116 | gather->mem_id, | ||
117 | min(gather->words - i, | ||
118 | TRACE_MAX_LENGTH), | ||
119 | gather->offset + i * sizeof(u32), | ||
120 | mem); | ||
121 | } | ||
122 | nvmap_munmap(&handle, mem); | ||
123 | } | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static int nvhost_channelrelease(struct inode *inode, struct file *filp) | ||
128 | { | ||
129 | struct nvhost_channel_userctx *priv = filp->private_data; | ||
130 | |||
131 | trace_nvhost_channel_release(priv->ch->dev->name); | ||
132 | |||
133 | filp->private_data = NULL; | ||
134 | |||
135 | nvhost_module_remove_client(priv->ch->dev, priv); | ||
136 | nvhost_putchannel(priv->ch, priv->hwctx); | ||
137 | |||
138 | if (priv->hwctx) | ||
139 | priv->ch->ctxhandler->put(priv->hwctx); | ||
140 | |||
141 | if (priv->job) | ||
142 | nvhost_job_put(priv->job); | ||
143 | |||
144 | nvmap_client_put(priv->nvmap); | ||
145 | kfree(priv); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int nvhost_channelopen(struct inode *inode, struct file *filp) | ||
150 | { | ||
151 | struct nvhost_channel_userctx *priv; | ||
152 | struct nvhost_channel *ch; | ||
153 | |||
154 | ch = container_of(inode->i_cdev, struct nvhost_channel, cdev); | ||
155 | ch = nvhost_getchannel(ch); | ||
156 | if (!ch) | ||
157 | return -ENOMEM; | ||
158 | trace_nvhost_channel_open(ch->dev->name); | ||
159 | |||
160 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
161 | if (!priv) { | ||
162 | nvhost_putchannel(ch, NULL); | ||
163 | return -ENOMEM; | ||
164 | } | ||
165 | filp->private_data = priv; | ||
166 | priv->ch = ch; | ||
167 | nvhost_module_add_client(ch->dev, priv); | ||
168 | |||
169 | if (ch->ctxhandler && ch->ctxhandler->alloc) { | ||
170 | priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch); | ||
171 | if (!priv->hwctx) | ||
172 | goto fail; | ||
173 | } | ||
174 | priv->priority = NVHOST_PRIORITY_MEDIUM; | ||
175 | priv->clientid = atomic_add_return(1, | ||
176 | &nvhost_get_host(ch->dev)->clientid); | ||
177 | |||
178 | priv->job = nvhost_job_alloc(ch, priv->hwctx, &priv->hdr, | ||
179 | NULL, priv->priority, priv->clientid); | ||
180 | if (!priv->job) | ||
181 | goto fail; | ||
182 | |||
183 | return 0; | ||
184 | fail: | ||
185 | nvhost_channelrelease(inode, filp); | ||
186 | return -ENOMEM; | ||
187 | } | ||
188 | |||
189 | static int set_submit(struct nvhost_channel_userctx *ctx) | ||
190 | { | ||
191 | struct device *device = &ctx->ch->dev->dev; | ||
192 | |||
193 | /* submit should have at least 1 cmdbuf */ | ||
194 | if (!ctx->hdr.num_cmdbufs) | ||
195 | return -EIO; | ||
196 | |||
197 | if (!ctx->nvmap) { | ||
198 | dev_err(device, "no nvmap context set\n"); | ||
199 | return -EFAULT; | ||
200 | } | ||
201 | |||
202 | ctx->job = nvhost_job_realloc(ctx->job, | ||
203 | ctx->hwctx, | ||
204 | &ctx->hdr, | ||
205 | ctx->nvmap, | ||
206 | ctx->priority, | ||
207 | ctx->clientid); | ||
208 | if (!ctx->job) | ||
209 | return -ENOMEM; | ||
210 | ctx->job->timeout = ctx->timeout; | ||
211 | |||
212 | if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2) | ||
213 | ctx->num_relocshifts = ctx->hdr.num_relocs; | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static void reset_submit(struct nvhost_channel_userctx *ctx) | ||
219 | { | ||
220 | ctx->hdr.num_cmdbufs = 0; | ||
221 | ctx->hdr.num_relocs = 0; | ||
222 | ctx->num_relocshifts = 0; | ||
223 | ctx->hdr.num_waitchks = 0; | ||
224 | } | ||
225 | |||
226 | static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf, | ||
227 | size_t count, loff_t *offp) | ||
228 | { | ||
229 | struct nvhost_channel_userctx *priv = filp->private_data; | ||
230 | size_t remaining = count; | ||
231 | int err = 0; | ||
232 | struct nvhost_job *job = priv->job; | ||
233 | struct nvhost_submit_hdr_ext *hdr = &priv->hdr; | ||
234 | const char *chname = priv->ch->dev->name; | ||
235 | |||
236 | if (!job) | ||
237 | return -EIO; | ||
238 | |||
239 | while (remaining) { | ||
240 | size_t consumed; | ||
241 | if (!hdr->num_relocs && | ||
242 | !priv->num_relocshifts && | ||
243 | !hdr->num_cmdbufs && | ||
244 | !hdr->num_waitchks) { | ||
245 | consumed = sizeof(struct nvhost_submit_hdr); | ||
246 | if (remaining < consumed) | ||
247 | break; | ||
248 | if (copy_from_user(hdr, buf, consumed)) { | ||
249 | err = -EFAULT; | ||
250 | break; | ||
251 | } | ||
252 | hdr->submit_version = NVHOST_SUBMIT_VERSION_V0; | ||
253 | err = set_submit(priv); | ||
254 | if (err) | ||
255 | break; | ||
256 | trace_nvhost_channel_write_submit(chname, | ||
257 | count, hdr->num_cmdbufs, hdr->num_relocs, | ||
258 | hdr->syncpt_id, hdr->syncpt_incrs); | ||
259 | } else if (hdr->num_cmdbufs) { | ||
260 | struct nvhost_cmdbuf cmdbuf; | ||
261 | consumed = sizeof(cmdbuf); | ||
262 | if (remaining < consumed) | ||
263 | break; | ||
264 | if (copy_from_user(&cmdbuf, buf, consumed)) { | ||
265 | err = -EFAULT; | ||
266 | break; | ||
267 | } | ||
268 | trace_nvhost_channel_write_cmdbuf(chname, | ||
269 | cmdbuf.mem, cmdbuf.words, cmdbuf.offset); | ||
270 | nvhost_job_add_gather(job, | ||
271 | cmdbuf.mem, cmdbuf.words, cmdbuf.offset); | ||
272 | hdr->num_cmdbufs--; | ||
273 | } else if (hdr->num_relocs) { | ||
274 | consumed = sizeof(struct nvhost_reloc); | ||
275 | if (remaining < consumed) | ||
276 | break; | ||
277 | if (copy_from_user(&job->pinarray[job->num_pins], | ||
278 | buf, consumed)) { | ||
279 | err = -EFAULT; | ||
280 | break; | ||
281 | } | ||
282 | trace_nvhost_channel_write_reloc(chname); | ||
283 | job->num_pins++; | ||
284 | hdr->num_relocs--; | ||
285 | } else if (hdr->num_waitchks) { | ||
286 | int numwaitchks = | ||
287 | (remaining / sizeof(struct nvhost_waitchk)); | ||
288 | if (!numwaitchks) | ||
289 | break; | ||
290 | numwaitchks = min_t(int, | ||
291 | numwaitchks, hdr->num_waitchks); | ||
292 | consumed = numwaitchks * sizeof(struct nvhost_waitchk); | ||
293 | if (copy_from_user(&job->waitchk[job->num_waitchk], | ||
294 | buf, consumed)) { | ||
295 | err = -EFAULT; | ||
296 | break; | ||
297 | } | ||
298 | trace_nvhost_channel_write_waitchks( | ||
299 | chname, numwaitchks, | ||
300 | hdr->waitchk_mask); | ||
301 | job->num_waitchk += numwaitchks; | ||
302 | hdr->num_waitchks -= numwaitchks; | ||
303 | } else if (priv->num_relocshifts) { | ||
304 | int next_shift = | ||
305 | job->num_pins - priv->num_relocshifts; | ||
306 | consumed = sizeof(struct nvhost_reloc_shift); | ||
307 | if (remaining < consumed) | ||
308 | break; | ||
309 | if (copy_from_user( | ||
310 | &job->pinarray[next_shift].reloc_shift, | ||
311 | buf, consumed)) { | ||
312 | err = -EFAULT; | ||
313 | break; | ||
314 | } | ||
315 | priv->num_relocshifts--; | ||
316 | } else { | ||
317 | err = -EFAULT; | ||
318 | break; | ||
319 | } | ||
320 | remaining -= consumed; | ||
321 | buf += consumed; | ||
322 | } | ||
323 | |||
324 | if (err < 0) { | ||
325 | dev_err(&priv->ch->dev->dev, "channel write error\n"); | ||
326 | reset_submit(priv); | ||
327 | return err; | ||
328 | } | ||
329 | |||
330 | return count - remaining; | ||
331 | } | ||
332 | |||
333 | static int nvhost_ioctl_channel_flush( | ||
334 | struct nvhost_channel_userctx *ctx, | ||
335 | struct nvhost_get_param_args *args, | ||
336 | int null_kickoff) | ||
337 | { | ||
338 | struct device *device = &ctx->ch->dev->dev; | ||
339 | int err; | ||
340 | |||
341 | trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name); | ||
342 | |||
343 | if (!ctx->job || | ||
344 | ctx->hdr.num_relocs || | ||
345 | ctx->hdr.num_cmdbufs || | ||
346 | ctx->hdr.num_waitchks) { | ||
347 | reset_submit(ctx); | ||
348 | dev_err(device, "channel submit out of sync\n"); | ||
349 | return -EFAULT; | ||
350 | } | ||
351 | |||
352 | err = nvhost_job_pin(ctx->job); | ||
353 | if (err) { | ||
354 | dev_warn(device, "nvhost_job_pin failed: %d\n", err); | ||
355 | return err; | ||
356 | } | ||
357 | |||
358 | if (nvhost_debug_null_kickoff_pid == current->tgid) | ||
359 | null_kickoff = 1; | ||
360 | ctx->job->null_kickoff = null_kickoff; | ||
361 | |||
362 | if ((nvhost_debug_force_timeout_pid == current->tgid) && | ||
363 | (nvhost_debug_force_timeout_channel == ctx->ch->chid)) { | ||
364 | ctx->timeout = nvhost_debug_force_timeout_val; | ||
365 | } | ||
366 | |||
367 | trace_write_cmdbufs(ctx->job); | ||
368 | |||
369 | /* context switch if needed, and submit user's gathers to the channel */ | ||
370 | err = nvhost_channel_submit(ctx->job); | ||
371 | args->value = ctx->job->syncpt_end; | ||
372 | if (err) | ||
373 | nvhost_job_unpin(ctx->job); | ||
374 | |||
375 | return err; | ||
376 | } | ||
377 | |||
378 | static int nvhost_ioctl_channel_read_3d_reg( | ||
379 | struct nvhost_channel_userctx *ctx, | ||
380 | struct nvhost_read_3d_reg_args *args) | ||
381 | { | ||
382 | BUG_ON(!channel_op(ctx->ch).read3dreg); | ||
383 | return channel_op(ctx->ch).read3dreg(ctx->ch, ctx->hwctx, | ||
384 | args->offset, &args->value); | ||
385 | } | ||
386 | |||
387 | static long nvhost_channelctl(struct file *filp, | ||
388 | unsigned int cmd, unsigned long arg) | ||
389 | { | ||
390 | struct nvhost_channel_userctx *priv = filp->private_data; | ||
391 | u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE]; | ||
392 | int err = 0; | ||
393 | |||
394 | if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) || | ||
395 | (_IOC_NR(cmd) == 0) || | ||
396 | (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST)) | ||
397 | return -EFAULT; | ||
398 | |||
399 | BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE); | ||
400 | |||
401 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
402 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
403 | return -EFAULT; | ||
404 | } | ||
405 | |||
406 | switch (cmd) { | ||
407 | case NVHOST_IOCTL_CHANNEL_FLUSH: | ||
408 | err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0); | ||
409 | break; | ||
410 | case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF: | ||
411 | err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1); | ||
412 | break; | ||
413 | case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT: | ||
414 | { | ||
415 | struct nvhost_submit_hdr_ext *hdr; | ||
416 | |||
417 | if (priv->hdr.num_relocs || | ||
418 | priv->num_relocshifts || | ||
419 | priv->hdr.num_cmdbufs || | ||
420 | priv->hdr.num_waitchks) { | ||
421 | reset_submit(priv); | ||
422 | dev_err(&priv->ch->dev->dev, | ||
423 | "channel submit out of sync\n"); | ||
424 | err = -EIO; | ||
425 | break; | ||
426 | } | ||
427 | |||
428 | hdr = (struct nvhost_submit_hdr_ext *)buf; | ||
429 | if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) { | ||
430 | dev_err(&priv->ch->dev->dev, | ||
431 | "submit version %d > max supported %d\n", | ||
432 | hdr->submit_version, | ||
433 | NVHOST_SUBMIT_VERSION_MAX_SUPPORTED); | ||
434 | err = -EINVAL; | ||
435 | break; | ||
436 | } | ||
437 | memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext)); | ||
438 | err = set_submit(priv); | ||
439 | trace_nvhost_ioctl_channel_submit(priv->ch->dev->name, | ||
440 | priv->hdr.submit_version, | ||
441 | priv->hdr.num_cmdbufs, priv->hdr.num_relocs, | ||
442 | priv->hdr.num_waitchks, | ||
443 | priv->hdr.syncpt_id, priv->hdr.syncpt_incrs); | ||
444 | break; | ||
445 | } | ||
446 | case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS: | ||
447 | /* host syncpt ID is used by the RM (and never be given out) */ | ||
448 | BUG_ON(priv->ch->dev->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST)); | ||
449 | ((struct nvhost_get_param_args *)buf)->value = | ||
450 | priv->ch->dev->syncpts; | ||
451 | break; | ||
452 | case NVHOST_IOCTL_CHANNEL_GET_WAITBASES: | ||
453 | ((struct nvhost_get_param_args *)buf)->value = | ||
454 | priv->ch->dev->waitbases; | ||
455 | break; | ||
456 | case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES: | ||
457 | ((struct nvhost_get_param_args *)buf)->value = | ||
458 | priv->ch->dev->modulemutexes; | ||
459 | break; | ||
460 | case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD: | ||
461 | { | ||
462 | int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd; | ||
463 | struct nvmap_client *new_client = nvmap_client_get_file(fd); | ||
464 | |||
465 | if (IS_ERR(new_client)) { | ||
466 | err = PTR_ERR(new_client); | ||
467 | break; | ||
468 | } | ||
469 | |||
470 | if (priv->nvmap) | ||
471 | nvmap_client_put(priv->nvmap); | ||
472 | |||
473 | priv->nvmap = new_client; | ||
474 | break; | ||
475 | } | ||
476 | case NVHOST_IOCTL_CHANNEL_READ_3D_REG: | ||
477 | err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf); | ||
478 | break; | ||
479 | case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE: | ||
480 | { | ||
481 | unsigned long rate; | ||
482 | struct nvhost_clk_rate_args *arg = | ||
483 | (struct nvhost_clk_rate_args *)buf; | ||
484 | |||
485 | err = nvhost_module_get_rate(priv->ch->dev, &rate, 0); | ||
486 | if (err == 0) | ||
487 | arg->rate = rate; | ||
488 | break; | ||
489 | } | ||
490 | case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE: | ||
491 | { | ||
492 | struct nvhost_clk_rate_args *arg = | ||
493 | (struct nvhost_clk_rate_args *)buf; | ||
494 | unsigned long rate = (unsigned long)arg->rate; | ||
495 | |||
496 | err = nvhost_module_set_rate(priv->ch->dev, priv, rate, 0); | ||
497 | break; | ||
498 | } | ||
499 | case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT: | ||
500 | priv->timeout = | ||
501 | (u32)((struct nvhost_set_timeout_args *)buf)->timeout; | ||
502 | dev_dbg(&priv->ch->dev->dev, | ||
503 | "%s: setting buffer timeout (%d ms) for userctx 0x%p\n", | ||
504 | __func__, priv->timeout, priv); | ||
505 | break; | ||
506 | case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT: | ||
507 | ((struct nvhost_get_param_args *)buf)->value = | ||
508 | priv->hwctx->has_timedout; | ||
509 | break; | ||
510 | case NVHOST_IOCTL_CHANNEL_SET_PRIORITY: | ||
511 | priv->priority = | ||
512 | (u32)((struct nvhost_set_priority_args *)buf)->priority; | ||
513 | break; | ||
514 | default: | ||
515 | err = -ENOTTY; | ||
516 | break; | ||
517 | } | ||
518 | |||
519 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
520 | err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); | ||
521 | |||
522 | return err; | ||
523 | } | ||
524 | |||
525 | static const struct file_operations nvhost_channelops = { | ||
526 | .owner = THIS_MODULE, | ||
527 | .release = nvhost_channelrelease, | ||
528 | .open = nvhost_channelopen, | ||
529 | .write = nvhost_channelwrite, | ||
530 | .unlocked_ioctl = nvhost_channelctl | ||
531 | }; | ||
532 | |||
533 | int nvhost_client_user_init(struct nvhost_device *dev) | ||
534 | { | ||
535 | int err, devno; | ||
536 | |||
537 | struct nvhost_channel *ch = dev->channel; | ||
538 | |||
539 | cdev_init(&ch->cdev, &nvhost_channelops); | ||
540 | ch->cdev.owner = THIS_MODULE; | ||
541 | |||
542 | devno = MKDEV(nvhost_major, nvhost_minor + dev->index); | ||
543 | err = cdev_add(&ch->cdev, devno, 1); | ||
544 | if (err < 0) { | ||
545 | dev_err(&dev->dev, | ||
546 | "failed to add chan %i cdev\n", dev->index); | ||
547 | goto fail; | ||
548 | } | ||
549 | ch->node = device_create(nvhost_get_host(dev)->nvhost_class, NULL, devno, NULL, | ||
550 | IFACE_NAME "-%s", dev->name); | ||
551 | if (IS_ERR(ch->node)) { | ||
552 | err = PTR_ERR(ch->node); | ||
553 | dev_err(&dev->dev, | ||
554 | "failed to create %s channel device\n", dev->name); | ||
555 | goto fail; | ||
556 | } | ||
557 | |||
558 | return 0; | ||
559 | fail: | ||
560 | return err; | ||
561 | } | ||
562 | |||
563 | int nvhost_client_device_init(struct nvhost_device *dev) | ||
564 | { | ||
565 | int err; | ||
566 | struct nvhost_master *nvhost_master = nvhost_get_host(dev); | ||
567 | struct nvhost_channel *ch = &nvhost_master->channels[dev->index]; | ||
568 | |||
569 | /* store the pointer to this device for channel */ | ||
570 | ch->dev = dev; | ||
571 | |||
572 | err = nvhost_channel_init(ch, nvhost_master, dev->index); | ||
573 | if (err) | ||
574 | goto fail; | ||
575 | |||
576 | err = nvhost_client_user_init(dev); | ||
577 | if (err) | ||
578 | goto fail; | ||
579 | |||
580 | err = nvhost_module_init(dev); | ||
581 | if (err) | ||
582 | goto fail; | ||
583 | |||
584 | dev_info(&dev->dev, "initialized\n"); | ||
585 | |||
586 | return 0; | ||
587 | |||
588 | fail: | ||
589 | /* Add clean-up */ | ||
590 | return err; | ||
591 | } | ||
592 | |||
593 | int nvhost_client_device_suspend(struct nvhost_device *dev) | ||
594 | { | ||
595 | int ret = 0; | ||
596 | |||
597 | dev_info(&dev->dev, "suspending\n"); | ||
598 | |||
599 | ret = nvhost_channel_suspend(dev->channel); | ||
600 | if (ret) | ||
601 | return ret; | ||
602 | |||
603 | dev_info(&dev->dev, "suspend status: %d\n", ret); | ||
604 | |||
605 | return ret; | ||
606 | } | ||
diff --git a/drivers/video/tegra/host/bus_client.h b/drivers/video/tegra/host/bus_client.h new file mode 100644 index 00000000000..4e47071fd14 --- /dev/null +++ b/drivers/video/tegra/host/bus_client.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/bus_client.h | ||
3 | * | ||
4 | * Tegra Graphics Host client | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_BUS_CLIENT_H | ||
22 | #define __NVHOST_BUS_CLIENT_H | ||
23 | |||
24 | #include <linux/types.h> | ||
25 | struct nvhost_device; | ||
26 | |||
27 | void nvhost_read_module_regs(struct nvhost_device *ndev, | ||
28 | u32 offset, int count, u32 *values); | ||
29 | |||
30 | void nvhost_write_module_regs(struct nvhost_device *ndev, | ||
31 | u32 offset, int count, const u32 *values); | ||
32 | |||
33 | int nvhost_client_user_init(struct nvhost_device *dev); | ||
34 | |||
35 | int nvhost_client_device_init(struct nvhost_device *dev); | ||
36 | |||
37 | int nvhost_client_device_suspend(struct nvhost_device *dev); | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/video/tegra/host/chip_support.h b/drivers/video/tegra/host/chip_support.h new file mode 100644 index 00000000000..6727e7a69fb --- /dev/null +++ b/drivers/video/tegra/host/chip_support.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/chip_support.h | ||
3 | * | ||
4 | * Tegra Graphics Host Chip Support | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | #ifndef _NVHOST_CHIP_SUPPORT_H_ | ||
21 | #define _NVHOST_CHIP_SUPPORT_H_ | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | struct output; | ||
25 | struct nvhost_waitchk; | ||
26 | struct nvhost_userctx_timeout; | ||
27 | struct nvhost_master; | ||
28 | struct nvhost_channel; | ||
29 | struct nvmap_handle; | ||
30 | struct nvmap_client; | ||
31 | struct nvhost_hwctx; | ||
32 | struct nvhost_cdma; | ||
33 | struct nvhost_intr; | ||
34 | struct push_buffer; | ||
35 | struct nvhost_syncpt; | ||
36 | struct nvhost_master; | ||
37 | struct dentry; | ||
38 | struct nvhost_job; | ||
39 | |||
40 | struct nvhost_chip_support { | ||
41 | struct { | ||
42 | int (*init)(struct nvhost_channel *, | ||
43 | struct nvhost_master *, | ||
44 | int chid); | ||
45 | int (*submit)(struct nvhost_job *job); | ||
46 | int (*read3dreg)(struct nvhost_channel *channel, | ||
47 | struct nvhost_hwctx *hwctx, | ||
48 | u32 offset, | ||
49 | u32 *value); | ||
50 | } channel; | ||
51 | |||
52 | struct { | ||
53 | void (*start)(struct nvhost_cdma *); | ||
54 | void (*stop)(struct nvhost_cdma *); | ||
55 | void (*kick)(struct nvhost_cdma *); | ||
56 | int (*timeout_init)(struct nvhost_cdma *, | ||
57 | u32 syncpt_id); | ||
58 | void (*timeout_destroy)(struct nvhost_cdma *); | ||
59 | void (*timeout_teardown_begin)(struct nvhost_cdma *); | ||
60 | void (*timeout_teardown_end)(struct nvhost_cdma *, | ||
61 | u32 getptr); | ||
62 | void (*timeout_cpu_incr)(struct nvhost_cdma *, | ||
63 | u32 getptr, | ||
64 | u32 syncpt_incrs, | ||
65 | u32 syncval, | ||
66 | u32 nr_slots); | ||
67 | void (*timeout_pb_incr)(struct nvhost_cdma *, | ||
68 | u32 getptr, | ||
69 | u32 syncpt_incrs, | ||
70 | u32 nr_slots, | ||
71 | bool exec_ctxsave); | ||
72 | } cdma; | ||
73 | |||
74 | struct { | ||
75 | void (*reset)(struct push_buffer *); | ||
76 | int (*init)(struct push_buffer *); | ||
77 | void (*destroy)(struct push_buffer *); | ||
78 | void (*push_to)(struct push_buffer *, | ||
79 | struct nvmap_client *, | ||
80 | struct nvmap_handle *, | ||
81 | u32 op1, u32 op2); | ||
82 | void (*pop_from)(struct push_buffer *, | ||
83 | unsigned int slots); | ||
84 | u32 (*space)(struct push_buffer *); | ||
85 | u32 (*putptr)(struct push_buffer *); | ||
86 | } push_buffer; | ||
87 | |||
88 | struct { | ||
89 | void (*debug_init)(struct dentry *de); | ||
90 | void (*show_channel_cdma)(struct nvhost_master *, | ||
91 | struct nvhost_channel *, | ||
92 | struct output *, | ||
93 | int chid); | ||
94 | void (*show_channel_fifo)(struct nvhost_master *, | ||
95 | struct nvhost_channel *, | ||
96 | struct output *, | ||
97 | int chid); | ||
98 | void (*show_mlocks)(struct nvhost_master *m, | ||
99 | struct output *o); | ||
100 | |||
101 | } debug; | ||
102 | |||
103 | struct { | ||
104 | void (*reset)(struct nvhost_syncpt *, u32 id); | ||
105 | void (*reset_wait_base)(struct nvhost_syncpt *, u32 id); | ||
106 | void (*read_wait_base)(struct nvhost_syncpt *, u32 id); | ||
107 | u32 (*update_min)(struct nvhost_syncpt *, u32 id); | ||
108 | void (*cpu_incr)(struct nvhost_syncpt *, u32 id); | ||
109 | int (*wait_check)(struct nvhost_syncpt *sp, | ||
110 | struct nvmap_client *nvmap, | ||
111 | u32 waitchk_mask, | ||
112 | struct nvhost_waitchk *wait, | ||
113 | int num_waitchk); | ||
114 | void (*debug)(struct nvhost_syncpt *); | ||
115 | const char * (*name)(struct nvhost_syncpt *, u32 id); | ||
116 | int (*mutex_try_lock)(struct nvhost_syncpt *, | ||
117 | unsigned int idx); | ||
118 | void (*mutex_unlock)(struct nvhost_syncpt *, | ||
119 | unsigned int idx); | ||
120 | } syncpt; | ||
121 | |||
122 | struct { | ||
123 | void (*init_host_sync)(struct nvhost_intr *); | ||
124 | void (*set_host_clocks_per_usec)( | ||
125 | struct nvhost_intr *, u32 clocks); | ||
126 | void (*set_syncpt_threshold)( | ||
127 | struct nvhost_intr *, u32 id, u32 thresh); | ||
128 | void (*enable_syncpt_intr)(struct nvhost_intr *, u32 id); | ||
129 | void (*disable_all_syncpt_intrs)(struct nvhost_intr *); | ||
130 | int (*request_host_general_irq)(struct nvhost_intr *); | ||
131 | void (*free_host_general_irq)(struct nvhost_intr *); | ||
132 | int (*request_syncpt_irq)(struct nvhost_intr_syncpt *syncpt); | ||
133 | } intr; | ||
134 | |||
135 | struct { | ||
136 | struct nvhost_device *(*get_nvhost_device)(struct nvhost_master *host, | ||
137 | char *name); | ||
138 | } nvhost_dev; | ||
139 | }; | ||
140 | |||
141 | #endif /* _NVHOST_CHIP_SUPPORT_H_ */ | ||
diff --git a/drivers/video/tegra/host/debug.c b/drivers/video/tegra/host/debug.c new file mode 100644 index 00000000000..91436c903fc --- /dev/null +++ b/drivers/video/tegra/host/debug.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/debug.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * Copyright (C) 2011 NVIDIA Corporation | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/debugfs.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | |||
23 | #include <linux/io.h> | ||
24 | |||
25 | #include "dev.h" | ||
26 | #include "debug.h" | ||
27 | |||
28 | pid_t nvhost_debug_null_kickoff_pid; | ||
29 | unsigned int nvhost_debug_trace_cmdbuf; | ||
30 | |||
31 | pid_t nvhost_debug_force_timeout_pid; | ||
32 | u32 nvhost_debug_force_timeout_val; | ||
33 | u32 nvhost_debug_force_timeout_channel; | ||
34 | |||
35 | void nvhost_debug_output(struct output *o, const char* fmt, ...) | ||
36 | { | ||
37 | va_list args; | ||
38 | int len; | ||
39 | |||
40 | va_start(args, fmt); | ||
41 | len = vsnprintf(o->buf, sizeof(o->buf), fmt, args); | ||
42 | va_end(args); | ||
43 | o->fn(o->ctx, o->buf, len); | ||
44 | } | ||
45 | |||
46 | static int show_channels(struct device *dev, void *data) | ||
47 | { | ||
48 | struct nvhost_channel *ch; | ||
49 | struct nvhost_device *nvdev = to_nvhost_device(dev); | ||
50 | struct output *o = data; | ||
51 | struct nvhost_master *m; | ||
52 | |||
53 | if (nvdev == NULL) | ||
54 | return 0; | ||
55 | |||
56 | m = nvhost_get_host(nvdev); | ||
57 | ch = nvdev->channel; | ||
58 | if (ch) { | ||
59 | mutex_lock(&ch->reflock); | ||
60 | if (ch->refcount) { | ||
61 | mutex_lock(&ch->cdma.lock); | ||
62 | m->op.debug.show_channel_fifo(m, ch, o, nvdev->index); | ||
63 | m->op.debug.show_channel_cdma(m, ch, o, nvdev->index); | ||
64 | mutex_unlock(&ch->cdma.lock); | ||
65 | } | ||
66 | mutex_unlock(&ch->reflock); | ||
67 | } | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static void show_syncpts(struct nvhost_master *m, struct output *o) | ||
73 | { | ||
74 | int i; | ||
75 | BUG_ON(!m->op.syncpt.name); | ||
76 | nvhost_debug_output(o, "---- syncpts ----\n"); | ||
77 | for (i = 0; i < m->syncpt.nb_pts; i++) { | ||
78 | u32 max = nvhost_syncpt_read_max(&m->syncpt, i); | ||
79 | u32 min = nvhost_syncpt_update_min(&m->syncpt, i); | ||
80 | if (!min && !max) | ||
81 | continue; | ||
82 | nvhost_debug_output(o, "id %d (%s) min %d max %d\n", | ||
83 | i, m->op.syncpt.name(&m->syncpt, i), | ||
84 | min, max); | ||
85 | } | ||
86 | |||
87 | for (i = 0; i < m->syncpt.nb_bases; i++) { | ||
88 | u32 base_val; | ||
89 | base_val = nvhost_syncpt_read_wait_base(&m->syncpt, i); | ||
90 | if (base_val) | ||
91 | nvhost_debug_output(o, "waitbase id %d val %d\n", | ||
92 | i, base_val); | ||
93 | } | ||
94 | |||
95 | nvhost_debug_output(o, "\n"); | ||
96 | } | ||
97 | |||
98 | static void show_all(struct nvhost_master *m, struct output *o) | ||
99 | { | ||
100 | nvhost_module_busy(m->dev); | ||
101 | |||
102 | m->op.debug.show_mlocks(m, o); | ||
103 | show_syncpts(m, o); | ||
104 | nvhost_debug_output(o, "---- channels ----\n"); | ||
105 | bus_for_each_dev(&nvhost_bus_type, NULL, o, show_channels); | ||
106 | |||
107 | nvhost_module_idle(m->dev); | ||
108 | } | ||
109 | |||
110 | #ifdef CONFIG_DEBUG_FS | ||
111 | static int nvhost_debug_show(struct seq_file *s, void *unused) | ||
112 | { | ||
113 | struct output o = { | ||
114 | .fn = write_to_seqfile, | ||
115 | .ctx = s | ||
116 | }; | ||
117 | show_all(s->private, &o); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int nvhost_debug_open(struct inode *inode, struct file *file) | ||
122 | { | ||
123 | return single_open(file, nvhost_debug_show, inode->i_private); | ||
124 | } | ||
125 | |||
126 | static const struct file_operations nvhost_debug_fops = { | ||
127 | .open = nvhost_debug_open, | ||
128 | .read = seq_read, | ||
129 | .llseek = seq_lseek, | ||
130 | .release = single_release, | ||
131 | }; | ||
132 | |||
133 | void nvhost_debug_init(struct nvhost_master *master) | ||
134 | { | ||
135 | struct dentry *de = debugfs_create_dir("tegra_host", NULL); | ||
136 | |||
137 | debugfs_create_file("status", S_IRUGO, de, | ||
138 | master, &nvhost_debug_fops); | ||
139 | |||
140 | debugfs_create_u32("null_kickoff_pid", S_IRUGO|S_IWUSR, de, | ||
141 | &nvhost_debug_null_kickoff_pid); | ||
142 | debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de, | ||
143 | &nvhost_debug_trace_cmdbuf); | ||
144 | |||
145 | if (master->op.debug.debug_init) | ||
146 | master->op.debug.debug_init(de); | ||
147 | |||
148 | debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de, | ||
149 | &nvhost_debug_force_timeout_pid); | ||
150 | debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de, | ||
151 | &nvhost_debug_force_timeout_val); | ||
152 | debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de, | ||
153 | &nvhost_debug_force_timeout_channel); | ||
154 | } | ||
155 | #else | ||
156 | void nvhost_debug_init(struct nvhost_master *master) | ||
157 | { | ||
158 | } | ||
159 | #endif | ||
160 | |||
161 | void nvhost_debug_dump(struct nvhost_master *master) | ||
162 | { | ||
163 | struct output o = { | ||
164 | .fn = write_to_printk | ||
165 | }; | ||
166 | show_all(master, &o); | ||
167 | } | ||
diff --git a/drivers/video/tegra/host/debug.h b/drivers/video/tegra/host/debug.h new file mode 100644 index 00000000000..3dc156ab474 --- /dev/null +++ b/drivers/video/tegra/host/debug.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/debug.h | ||
3 | * | ||
4 | * Tegra Graphics Host Debug | ||
5 | * | ||
6 | * Copyright (c) 2011-2012 NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | #ifndef __NVHOST_DEBUG_H | ||
21 | #define __NVHOST_DEBUG_H | ||
22 | |||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/seq_file.h> | ||
25 | |||
26 | struct output { | ||
27 | void (*fn)(void *ctx, const char* str, size_t len); | ||
28 | void *ctx; | ||
29 | char buf[256]; | ||
30 | }; | ||
31 | |||
32 | static inline void write_to_seqfile(void *ctx, const char* str, size_t len) | ||
33 | { | ||
34 | seq_write((struct seq_file *)ctx, str, len); | ||
35 | } | ||
36 | |||
37 | static inline void write_to_printk(void *ctx, const char* str, size_t len) | ||
38 | { | ||
39 | printk(KERN_INFO "%s", str); | ||
40 | } | ||
41 | |||
42 | void nvhost_debug_output(struct output *o, const char* fmt, ...); | ||
43 | |||
44 | extern pid_t nvhost_debug_null_kickoff_pid; | ||
45 | extern pid_t nvhost_debug_force_timeout_pid; | ||
46 | extern u32 nvhost_debug_force_timeout_val; | ||
47 | extern u32 nvhost_debug_force_timeout_channel; | ||
48 | extern unsigned int nvhost_debug_trace_cmdbuf; | ||
49 | |||
50 | #endif /*__NVHOST_DEBUG_H */ | ||
diff --git a/drivers/video/tegra/host/dev.c b/drivers/video/tegra/host/dev.c new file mode 100644 index 00000000000..8f0c0393401 --- /dev/null +++ b/drivers/video/tegra/host/dev.c | |||
@@ -0,0 +1,635 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/dev.c | ||
3 | * | ||
4 | * Tegra Graphics Host Driver Entrypoint | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/cdev.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <linux/file.h> | ||
28 | #include <linux/clk.h> | ||
29 | #include <linux/hrtimer.h> | ||
30 | |||
31 | #include "dev.h" | ||
32 | #define CREATE_TRACE_POINTS | ||
33 | #include <trace/events/nvhost.h> | ||
34 | |||
35 | #include <linux/io.h> | ||
36 | |||
37 | #include <linux/nvhost.h> | ||
38 | #include <linux/nvhost_ioctl.h> | ||
39 | #include <mach/nvmap.h> | ||
40 | #include <mach/gpufuse.h> | ||
41 | #include <mach/hardware.h> | ||
42 | #include <mach/iomap.h> | ||
43 | |||
44 | #include "debug.h" | ||
45 | #include "nvhost_job.h" | ||
46 | #include "t20/t20.h" | ||
47 | #include "t30/t30.h" | ||
48 | #include "bus_client.h" | ||
49 | |||
50 | #define DRIVER_NAME "host1x" | ||
51 | |||
52 | int nvhost_major; | ||
53 | int nvhost_minor; | ||
54 | |||
55 | static unsigned int register_sets; | ||
56 | |||
57 | struct nvhost_ctrl_userctx { | ||
58 | struct nvhost_master *dev; | ||
59 | u32 *mod_locks; | ||
60 | }; | ||
61 | |||
62 | static int nvhost_ctrlrelease(struct inode *inode, struct file *filp) | ||
63 | { | ||
64 | struct nvhost_ctrl_userctx *priv = filp->private_data; | ||
65 | int i; | ||
66 | |||
67 | trace_nvhost_ctrlrelease(priv->dev->dev->name); | ||
68 | |||
69 | filp->private_data = NULL; | ||
70 | if (priv->mod_locks[0]) | ||
71 | nvhost_module_idle(priv->dev->dev); | ||
72 | for (i = 1; i < priv->dev->syncpt.nb_mlocks; i++) | ||
73 | if (priv->mod_locks[i]) | ||
74 | nvhost_mutex_unlock(&priv->dev->syncpt, i); | ||
75 | kfree(priv->mod_locks); | ||
76 | kfree(priv); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int nvhost_ctrlopen(struct inode *inode, struct file *filp) | ||
81 | { | ||
82 | struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev); | ||
83 | struct nvhost_ctrl_userctx *priv; | ||
84 | u32 *mod_locks; | ||
85 | |||
86 | trace_nvhost_ctrlopen(host->dev->name); | ||
87 | |||
88 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
89 | mod_locks = kzalloc(sizeof(u32) * host->syncpt.nb_mlocks, GFP_KERNEL); | ||
90 | |||
91 | if (!(priv && mod_locks)) { | ||
92 | kfree(priv); | ||
93 | kfree(mod_locks); | ||
94 | return -ENOMEM; | ||
95 | } | ||
96 | |||
97 | priv->dev = host; | ||
98 | priv->mod_locks = mod_locks; | ||
99 | filp->private_data = priv; | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx, | ||
104 | struct nvhost_ctrl_syncpt_read_args *args) | ||
105 | { | ||
106 | if (args->id >= ctx->dev->syncpt.nb_pts) | ||
107 | return -EINVAL; | ||
108 | args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id); | ||
109 | trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx, | ||
114 | struct nvhost_ctrl_syncpt_incr_args *args) | ||
115 | { | ||
116 | if (args->id >= ctx->dev->syncpt.nb_pts) | ||
117 | return -EINVAL; | ||
118 | trace_nvhost_ioctl_ctrl_syncpt_incr(args->id); | ||
119 | nvhost_syncpt_incr(&ctx->dev->syncpt, args->id); | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx, | ||
124 | struct nvhost_ctrl_syncpt_waitex_args *args) | ||
125 | { | ||
126 | u32 timeout; | ||
127 | int err; | ||
128 | if (args->id >= ctx->dev->syncpt.nb_pts) | ||
129 | return -EINVAL; | ||
130 | if (args->timeout == NVHOST_NO_TIMEOUT) | ||
131 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
132 | else | ||
133 | timeout = (u32)msecs_to_jiffies(args->timeout); | ||
134 | |||
135 | err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id, | ||
136 | args->thresh, timeout, &args->value); | ||
137 | trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh, | ||
138 | args->timeout, args->value, err); | ||
139 | |||
140 | return err; | ||
141 | } | ||
142 | |||
143 | static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx, | ||
144 | struct nvhost_ctrl_module_mutex_args *args) | ||
145 | { | ||
146 | int err = 0; | ||
147 | if (args->id >= ctx->dev->syncpt.nb_mlocks || | ||
148 | args->lock > 1) | ||
149 | return -EINVAL; | ||
150 | |||
151 | trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id); | ||
152 | if (args->lock && !ctx->mod_locks[args->id]) { | ||
153 | if (args->id == 0) | ||
154 | nvhost_module_busy(ctx->dev->dev); | ||
155 | else | ||
156 | err = nvhost_mutex_try_lock(&ctx->dev->syncpt, | ||
157 | args->id); | ||
158 | if (!err) | ||
159 | ctx->mod_locks[args->id] = 1; | ||
160 | } else if (!args->lock && ctx->mod_locks[args->id]) { | ||
161 | if (args->id == 0) | ||
162 | nvhost_module_idle(ctx->dev->dev); | ||
163 | else | ||
164 | nvhost_mutex_unlock(&ctx->dev->syncpt, args->id); | ||
165 | ctx->mod_locks[args->id] = 0; | ||
166 | } | ||
167 | return err; | ||
168 | } | ||
169 | |||
170 | static struct nvhost_device *get_ndev_by_moduleid(struct nvhost_master *host, | ||
171 | u32 id) | ||
172 | { | ||
173 | int i; | ||
174 | |||
175 | for (i = 0; i < host->nb_channels; i++) { | ||
176 | struct nvhost_device *ndev = host->channels[i].dev; | ||
177 | |||
178 | /* display and dsi do not use channel for register programming. | ||
179 | * so their channels do not have device instance. | ||
180 | * hence skip such channels from here. */ | ||
181 | if (ndev == NULL) | ||
182 | continue; | ||
183 | |||
184 | if (id == ndev->moduleid) | ||
185 | return ndev; | ||
186 | } | ||
187 | return NULL; | ||
188 | } | ||
189 | |||
190 | static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx, | ||
191 | struct nvhost_ctrl_module_regrdwr_args *args) | ||
192 | { | ||
193 | u32 num_offsets = args->num_offsets; | ||
194 | u32 *offsets = args->offsets; | ||
195 | u32 *values = args->values; | ||
196 | u32 vals[64]; | ||
197 | struct nvhost_device *ndev; | ||
198 | |||
199 | trace_nvhost_ioctl_ctrl_module_regrdwr(args->id, | ||
200 | args->num_offsets, args->write); | ||
201 | /* Check that there is something to read and that block size is | ||
202 | * u32 aligned */ | ||
203 | if (num_offsets == 0 || args->block_size & 3) | ||
204 | return -EINVAL; | ||
205 | |||
206 | ndev = get_ndev_by_moduleid(ctx->dev, args->id); | ||
207 | if (!ndev) | ||
208 | return -EINVAL; | ||
209 | |||
210 | while (num_offsets--) { | ||
211 | int remaining = args->block_size >> 2; | ||
212 | u32 offs; | ||
213 | if (get_user(offs, offsets)) | ||
214 | return -EFAULT; | ||
215 | offsets++; | ||
216 | while (remaining) { | ||
217 | int batch = min(remaining, 64); | ||
218 | if (args->write) { | ||
219 | if (copy_from_user(vals, values, | ||
220 | batch*sizeof(u32))) | ||
221 | return -EFAULT; | ||
222 | nvhost_write_module_regs(ndev, | ||
223 | offs, batch, vals); | ||
224 | } else { | ||
225 | nvhost_read_module_regs(ndev, | ||
226 | offs, batch, vals); | ||
227 | if (copy_to_user(values, vals, | ||
228 | batch*sizeof(u32))) | ||
229 | return -EFAULT; | ||
230 | } | ||
231 | remaining -= batch; | ||
232 | offs += batch; | ||
233 | values += batch; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx, | ||
241 | struct nvhost_get_param_args *args) | ||
242 | { | ||
243 | args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED; | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static long nvhost_ctrlctl(struct file *filp, | ||
248 | unsigned int cmd, unsigned long arg) | ||
249 | { | ||
250 | struct nvhost_ctrl_userctx *priv = filp->private_data; | ||
251 | u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE]; | ||
252 | int err = 0; | ||
253 | |||
254 | if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) || | ||
255 | (_IOC_NR(cmd) == 0) || | ||
256 | (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST)) | ||
257 | return -EFAULT; | ||
258 | |||
259 | BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE); | ||
260 | |||
261 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
262 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
263 | return -EFAULT; | ||
264 | } | ||
265 | |||
266 | switch (cmd) { | ||
267 | case NVHOST_IOCTL_CTRL_SYNCPT_READ: | ||
268 | err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf); | ||
269 | break; | ||
270 | case NVHOST_IOCTL_CTRL_SYNCPT_INCR: | ||
271 | err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf); | ||
272 | break; | ||
273 | case NVHOST_IOCTL_CTRL_SYNCPT_WAIT: | ||
274 | err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf); | ||
275 | break; | ||
276 | case NVHOST_IOCTL_CTRL_MODULE_MUTEX: | ||
277 | err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf); | ||
278 | break; | ||
279 | case NVHOST_IOCTL_CTRL_MODULE_REGRDWR: | ||
280 | err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf); | ||
281 | break; | ||
282 | case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX: | ||
283 | err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf); | ||
284 | break; | ||
285 | case NVHOST_IOCTL_CTRL_GET_VERSION: | ||
286 | err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf); | ||
287 | break; | ||
288 | default: | ||
289 | err = -ENOTTY; | ||
290 | break; | ||
291 | } | ||
292 | |||
293 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
294 | err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); | ||
295 | |||
296 | return err; | ||
297 | } | ||
298 | |||
299 | static const struct file_operations nvhost_ctrlops = { | ||
300 | .owner = THIS_MODULE, | ||
301 | .release = nvhost_ctrlrelease, | ||
302 | .open = nvhost_ctrlopen, | ||
303 | .unlocked_ioctl = nvhost_ctrlctl | ||
304 | }; | ||
305 | |||
306 | static void power_on_host(struct nvhost_device *dev) | ||
307 | { | ||
308 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
309 | nvhost_syncpt_reset(&host->syncpt); | ||
310 | nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0])); | ||
311 | } | ||
312 | |||
313 | static int power_off_host(struct nvhost_device *dev) | ||
314 | { | ||
315 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
316 | nvhost_syncpt_save(&host->syncpt); | ||
317 | nvhost_intr_stop(&host->intr); | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static int __devinit nvhost_user_init(struct nvhost_master *host) | ||
322 | { | ||
323 | int err, devno; | ||
324 | |||
325 | host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME); | ||
326 | if (IS_ERR(host->nvhost_class)) { | ||
327 | err = PTR_ERR(host->nvhost_class); | ||
328 | dev_err(&host->dev->dev, "failed to create class\n"); | ||
329 | goto fail; | ||
330 | } | ||
331 | |||
332 | err = alloc_chrdev_region(&devno, nvhost_minor, | ||
333 | host->nb_channels + 1, IFACE_NAME); | ||
334 | nvhost_major = MAJOR(devno); | ||
335 | if (err < 0) { | ||
336 | dev_err(&host->dev->dev, "failed to reserve chrdev region\n"); | ||
337 | goto fail; | ||
338 | } | ||
339 | |||
340 | cdev_init(&host->cdev, &nvhost_ctrlops); | ||
341 | host->cdev.owner = THIS_MODULE; | ||
342 | devno = MKDEV(nvhost_major, nvhost_minor + host->nb_channels); | ||
343 | err = cdev_add(&host->cdev, devno, 1); | ||
344 | if (err < 0) | ||
345 | goto fail; | ||
346 | host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL, | ||
347 | IFACE_NAME "-ctrl"); | ||
348 | if (IS_ERR(host->ctrl)) { | ||
349 | err = PTR_ERR(host->ctrl); | ||
350 | dev_err(&host->dev->dev, "failed to create ctrl device\n"); | ||
351 | goto fail; | ||
352 | } | ||
353 | |||
354 | return 0; | ||
355 | fail: | ||
356 | return err; | ||
357 | } | ||
358 | |||
359 | struct nvhost_device *nvhost_get_device(char *name) | ||
360 | { | ||
361 | BUG_ON(!host_device_op(nvhost).get_nvhost_device); | ||
362 | return host_device_op(nvhost).get_nvhost_device(nvhost, name); | ||
363 | } | ||
364 | |||
365 | static void nvhost_remove_chip_support(struct nvhost_master *host) | ||
366 | { | ||
367 | kfree(host->channels); | ||
368 | host->channels = 0; | ||
369 | |||
370 | kfree(host->syncpt.min_val); | ||
371 | host->syncpt.min_val = 0; | ||
372 | |||
373 | kfree(host->syncpt.max_val); | ||
374 | host->syncpt.max_val = 0; | ||
375 | |||
376 | kfree(host->syncpt.base_val); | ||
377 | host->syncpt.base_val = 0; | ||
378 | |||
379 | kfree(host->intr.syncpt); | ||
380 | host->intr.syncpt = 0; | ||
381 | |||
382 | kfree(host->syncpt.lock_counts); | ||
383 | host->syncpt.lock_counts = 0; | ||
384 | } | ||
385 | |||
386 | static int __devinit nvhost_init_chip_support(struct nvhost_master *host) | ||
387 | { | ||
388 | int err; | ||
389 | switch (tegra_get_chipid()) { | ||
390 | case TEGRA_CHIPID_TEGRA2: | ||
391 | err = nvhost_init_t20_support(host); | ||
392 | break; | ||
393 | |||
394 | case TEGRA_CHIPID_TEGRA3: | ||
395 | err = nvhost_init_t30_support(host); | ||
396 | break; | ||
397 | default: | ||
398 | return -ENODEV; | ||
399 | } | ||
400 | |||
401 | if (err) | ||
402 | return err; | ||
403 | |||
404 | /* allocate items sized in chip specific support init */ | ||
405 | host->channels = kzalloc(sizeof(struct nvhost_channel) * | ||
406 | host->nb_channels, GFP_KERNEL); | ||
407 | |||
408 | host->syncpt.min_val = kzalloc(sizeof(atomic_t) * | ||
409 | host->syncpt.nb_pts, GFP_KERNEL); | ||
410 | |||
411 | host->syncpt.max_val = kzalloc(sizeof(atomic_t) * | ||
412 | host->syncpt.nb_pts, GFP_KERNEL); | ||
413 | |||
414 | host->syncpt.base_val = kzalloc(sizeof(u32) * | ||
415 | host->syncpt.nb_bases, GFP_KERNEL); | ||
416 | |||
417 | host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) * | ||
418 | host->syncpt.nb_pts, GFP_KERNEL); | ||
419 | |||
420 | host->syncpt.lock_counts = kzalloc(sizeof(atomic_t) * | ||
421 | host->syncpt.nb_mlocks, GFP_KERNEL); | ||
422 | |||
423 | if (!(host->channels && host->syncpt.min_val && | ||
424 | host->syncpt.max_val && host->syncpt.base_val && | ||
425 | host->intr.syncpt && host->syncpt.lock_counts)) { | ||
426 | /* frees happen in the support removal phase */ | ||
427 | return -ENOMEM; | ||
428 | } | ||
429 | |||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | static struct resource nvhost_resources[] = { | ||
434 | { | ||
435 | .start = TEGRA_HOST1X_BASE, | ||
436 | .end = TEGRA_HOST1X_BASE + TEGRA_HOST1X_SIZE - 1, | ||
437 | .flags = IORESOURCE_MEM, | ||
438 | }, | ||
439 | { | ||
440 | .start = TEGRA_DISPLAY_BASE, | ||
441 | .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1, | ||
442 | .flags = IORESOURCE_MEM, | ||
443 | }, | ||
444 | { | ||
445 | .start = TEGRA_DISPLAY2_BASE, | ||
446 | .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1, | ||
447 | .flags = IORESOURCE_MEM, | ||
448 | }, | ||
449 | { | ||
450 | .start = TEGRA_VI_BASE, | ||
451 | .end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1, | ||
452 | .flags = IORESOURCE_MEM, | ||
453 | }, | ||
454 | { | ||
455 | .start = TEGRA_ISP_BASE, | ||
456 | .end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1, | ||
457 | .flags = IORESOURCE_MEM, | ||
458 | }, | ||
459 | { | ||
460 | .start = TEGRA_MPE_BASE, | ||
461 | .end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1, | ||
462 | .flags = IORESOURCE_MEM, | ||
463 | }, | ||
464 | { | ||
465 | .start = INT_SYNCPT_THRESH_BASE, | ||
466 | .end = INT_SYNCPT_THRESH_BASE + INT_SYNCPT_THRESH_NR - 1, | ||
467 | .flags = IORESOURCE_IRQ, | ||
468 | }, | ||
469 | { | ||
470 | .start = INT_HOST1X_MPCORE_GENERAL, | ||
471 | .end = INT_HOST1X_MPCORE_GENERAL, | ||
472 | .flags = IORESOURCE_IRQ, | ||
473 | }, | ||
474 | }; | ||
475 | |||
476 | struct nvhost_device tegra_grhost_device = { | ||
477 | .name = DRIVER_NAME, | ||
478 | .id = -1, | ||
479 | .resource = nvhost_resources, | ||
480 | .num_resources = ARRAY_SIZE(nvhost_resources), | ||
481 | .finalize_poweron = power_on_host, | ||
482 | .prepare_poweroff = power_off_host, | ||
483 | .clocks = {{"host1x", UINT_MAX}, {} }, | ||
484 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
485 | }; | ||
486 | |||
487 | static int __devinit nvhost_probe(struct nvhost_device *dev) | ||
488 | { | ||
489 | struct nvhost_master *host; | ||
490 | struct resource *regs, *intr0, *intr1; | ||
491 | int i, err; | ||
492 | |||
493 | regs = nvhost_get_resource(dev, IORESOURCE_MEM, 0); | ||
494 | intr0 = nvhost_get_resource(dev, IORESOURCE_IRQ, 0); | ||
495 | intr1 = nvhost_get_resource(dev, IORESOURCE_IRQ, 1); | ||
496 | |||
497 | if (!regs || !intr0 || !intr1) { | ||
498 | dev_err(&dev->dev, "missing required platform resources\n"); | ||
499 | return -ENXIO; | ||
500 | } | ||
501 | |||
502 | host = kzalloc(sizeof(*host), GFP_KERNEL); | ||
503 | if (!host) | ||
504 | return -ENOMEM; | ||
505 | |||
506 | host->nvmap = nvmap_create_client(nvmap_dev, "nvhost"); | ||
507 | if (!host->nvmap) { | ||
508 | dev_err(&dev->dev, "unable to create nvmap client\n"); | ||
509 | err = -EIO; | ||
510 | goto fail; | ||
511 | } | ||
512 | |||
513 | host->reg_mem = request_mem_region(regs->start, | ||
514 | resource_size(regs), dev->name); | ||
515 | if (!host->reg_mem) { | ||
516 | dev_err(&dev->dev, "failed to get host register memory\n"); | ||
517 | err = -ENXIO; | ||
518 | goto fail; | ||
519 | } | ||
520 | |||
521 | host->aperture = ioremap(regs->start, resource_size(regs)); | ||
522 | if (!host->aperture) { | ||
523 | dev_err(&dev->dev, "failed to remap host registers\n"); | ||
524 | err = -ENXIO; | ||
525 | goto fail; | ||
526 | } | ||
527 | |||
528 | err = nvhost_init_chip_support(host); | ||
529 | if (err) { | ||
530 | dev_err(&dev->dev, "failed to init chip support\n"); | ||
531 | goto fail; | ||
532 | } | ||
533 | |||
534 | /* Register host1x device as bus master */ | ||
535 | host->dev = dev; | ||
536 | |||
537 | /* Give pointer to host1x via driver */ | ||
538 | nvhost_set_drvdata(dev, host); | ||
539 | |||
540 | nvhost_bus_add_host(host); | ||
541 | |||
542 | err = nvhost_intr_init(&host->intr, intr1->start, intr0->start); | ||
543 | if (err) | ||
544 | goto fail; | ||
545 | |||
546 | err = nvhost_user_init(host); | ||
547 | if (err) | ||
548 | goto fail; | ||
549 | |||
550 | err = nvhost_module_init(&tegra_grhost_device); | ||
551 | if (err) | ||
552 | goto fail; | ||
553 | |||
554 | for (i = 0; i < host->dev->num_clks; i++) | ||
555 | clk_enable(host->dev->clk[i]); | ||
556 | nvhost_syncpt_reset(&host->syncpt); | ||
557 | for (i = 0; i < host->dev->num_clks; i++) | ||
558 | clk_disable(host->dev->clk[0]); | ||
559 | |||
560 | nvhost_debug_init(host); | ||
561 | |||
562 | dev_info(&dev->dev, "initialized\n"); | ||
563 | return 0; | ||
564 | |||
565 | fail: | ||
566 | nvhost_remove_chip_support(host); | ||
567 | if (host->nvmap) | ||
568 | nvmap_client_put(host->nvmap); | ||
569 | kfree(host); | ||
570 | return err; | ||
571 | } | ||
572 | |||
573 | static int __exit nvhost_remove(struct nvhost_device *dev) | ||
574 | { | ||
575 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
576 | nvhost_intr_deinit(&host->intr); | ||
577 | nvhost_remove_chip_support(host); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | static int nvhost_suspend(struct nvhost_device *dev, pm_message_t state) | ||
582 | { | ||
583 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
584 | int ret = 0; | ||
585 | |||
586 | dev_info(&dev->dev, "suspending\n"); | ||
587 | ret = nvhost_module_suspend(host->dev, true); | ||
588 | dev_info(&dev->dev, "suspend status: %d\n", ret); | ||
589 | |||
590 | return ret; | ||
591 | } | ||
592 | |||
593 | static int nvhost_resume(struct nvhost_device *dev) | ||
594 | { | ||
595 | dev_info(&dev->dev, "resuming\n"); | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static struct nvhost_driver nvhost_driver = { | ||
600 | .probe = nvhost_probe, | ||
601 | .remove = __exit_p(nvhost_remove), | ||
602 | .suspend = nvhost_suspend, | ||
603 | .resume = nvhost_resume, | ||
604 | .driver = { | ||
605 | .owner = THIS_MODULE, | ||
606 | .name = DRIVER_NAME | ||
607 | } | ||
608 | }; | ||
609 | |||
610 | static int __init nvhost_mod_init(void) | ||
611 | { | ||
612 | register_sets = tegra_gpu_register_sets(); | ||
613 | return nvhost_driver_register(&nvhost_driver); | ||
614 | } | ||
615 | |||
616 | static void __exit nvhost_mod_exit(void) | ||
617 | { | ||
618 | nvhost_driver_unregister(&nvhost_driver); | ||
619 | } | ||
620 | |||
621 | /* host1x master device needs nvmap to be instantiated first. | ||
622 | * nvmap is instantiated via fs_initcall. | ||
623 | * Hence instantiate host1x master device using rootfs_initcall | ||
624 | * which is one level after fs_initcall. */ | ||
625 | rootfs_initcall(nvhost_mod_init); | ||
626 | module_exit(nvhost_mod_exit); | ||
627 | |||
628 | module_param_call(register_sets, NULL, param_get_uint, ®ister_sets, 0444); | ||
629 | MODULE_PARM_DESC(register_sets, "Number of register sets"); | ||
630 | |||
631 | MODULE_AUTHOR("NVIDIA"); | ||
632 | MODULE_DESCRIPTION("Graphics host driver for Tegra products"); | ||
633 | MODULE_VERSION("1.0"); | ||
634 | MODULE_LICENSE("GPL"); | ||
635 | MODULE_ALIAS("platform-nvhost"); | ||
diff --git a/drivers/video/tegra/host/dev.h b/drivers/video/tegra/host/dev.h new file mode 100644 index 00000000000..74d7e16fc27 --- /dev/null +++ b/drivers/video/tegra/host/dev.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/dev.h | ||
3 | * | ||
4 | * Tegra Graphics Host Driver Entrypoint | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_DEV_H | ||
22 | #define __NVHOST_DEV_H | ||
23 | |||
24 | #include "nvhost_acm.h" | ||
25 | #include "nvhost_syncpt.h" | ||
26 | #include "nvhost_intr.h" | ||
27 | #include "nvhost_channel.h" | ||
28 | #include "chip_support.h" | ||
29 | |||
30 | #define TRACE_MAX_LENGTH 128U | ||
31 | #define IFACE_NAME "nvhost" | ||
32 | |||
33 | extern int nvhost_major; | ||
34 | extern int nvhost_minor; | ||
35 | |||
36 | struct nvhost_hwctx; | ||
37 | |||
38 | struct nvhost_master { | ||
39 | void __iomem *aperture; | ||
40 | void __iomem *sync_aperture; | ||
41 | struct resource *reg_mem; | ||
42 | struct class *nvhost_class; | ||
43 | struct cdev cdev; | ||
44 | struct device *ctrl; | ||
45 | struct nvhost_syncpt syncpt; | ||
46 | struct nvmap_client *nvmap; | ||
47 | struct nvhost_intr intr; | ||
48 | struct nvhost_device *dev; | ||
49 | struct nvhost_channel *channels; | ||
50 | u32 nb_channels; | ||
51 | |||
52 | struct nvhost_chip_support op; | ||
53 | |||
54 | atomic_t clientid; | ||
55 | }; | ||
56 | |||
57 | extern struct nvhost_master *nvhost; | ||
58 | |||
59 | void nvhost_debug_init(struct nvhost_master *master); | ||
60 | void nvhost_debug_dump(struct nvhost_master *master); | ||
61 | |||
62 | #define host_device_op(host) (host->op.nvhost_dev) | ||
63 | |||
64 | struct nvhost_device *nvhost_get_device(char *name); | ||
65 | |||
66 | extern pid_t nvhost_debug_null_kickoff_pid; | ||
67 | |||
68 | #endif | ||
diff --git a/drivers/video/tegra/host/dsi/Makefile b/drivers/video/tegra/host/dsi/Makefile new file mode 100644 index 00000000000..eb94d3ec492 --- /dev/null +++ b/drivers/video/tegra/host/dsi/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
3 | |||
4 | nvhost-dsi-objs = \ | ||
5 | dsi.o | ||
6 | |||
7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-dsi.o | ||
diff --git a/drivers/video/tegra/host/dsi/dsi.c b/drivers/video/tegra/host/dsi/dsi.c new file mode 100644 index 00000000000..0e49f591574 --- /dev/null +++ b/drivers/video/tegra/host/dsi/dsi.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/dsi/dsi.c | ||
3 | * | ||
4 | * Tegra Graphics DSI | ||
5 | * | ||
6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "dev.h" | ||
22 | #include "bus_client.h" | ||
23 | |||
24 | static int dsi_probe(struct nvhost_device *dev) | ||
25 | { | ||
26 | return nvhost_client_device_init(dev); | ||
27 | } | ||
28 | |||
29 | static int __exit dsi_remove(struct nvhost_device *dev) | ||
30 | { | ||
31 | /* Add clean-up */ | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static int dsi_suspend(struct nvhost_device *dev, pm_message_t state) | ||
36 | { | ||
37 | return nvhost_client_device_suspend(dev); | ||
38 | } | ||
39 | |||
40 | static int dsi_resume(struct nvhost_device *dev) | ||
41 | { | ||
42 | dev_info(&dev->dev, "resuming\n"); | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | struct nvhost_device *dsi_device; | ||
47 | |||
48 | static struct nvhost_driver dsi_driver = { | ||
49 | .probe = dsi_probe, | ||
50 | .remove = __exit_p(dsi_remove), | ||
51 | #ifdef CONFIG_PM | ||
52 | .suspend = dsi_suspend, | ||
53 | .resume = dsi_resume, | ||
54 | #endif | ||
55 | .driver = { | ||
56 | .owner = THIS_MODULE, | ||
57 | .name = "dsi", | ||
58 | } | ||
59 | }; | ||
60 | |||
61 | static int __init dsi_init(void) | ||
62 | { | ||
63 | int err; | ||
64 | |||
65 | dsi_device = nvhost_get_device("dsi"); | ||
66 | if (!dsi_device) | ||
67 | return -ENXIO; | ||
68 | |||
69 | err = nvhost_device_register(dsi_device); | ||
70 | if (err) | ||
71 | return err; | ||
72 | |||
73 | return nvhost_driver_register(&dsi_driver); | ||
74 | } | ||
75 | |||
76 | static void __exit dsi_exit(void) | ||
77 | { | ||
78 | nvhost_driver_unregister(&dsi_driver); | ||
79 | } | ||
80 | |||
81 | module_init(dsi_init); | ||
82 | module_exit(dsi_exit); | ||
diff --git a/drivers/video/tegra/host/gr2d/Makefile b/drivers/video/tegra/host/gr2d/Makefile new file mode 100644 index 00000000000..a79a2101677 --- /dev/null +++ b/drivers/video/tegra/host/gr2d/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
3 | |||
4 | nvhost-gr2d-objs = \ | ||
5 | gr2d.o | ||
6 | |||
7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr2d.o | ||
diff --git a/drivers/video/tegra/host/gr2d/gr2d.c b/drivers/video/tegra/host/gr2d/gr2d.c new file mode 100644 index 00000000000..f88eb72e0a4 --- /dev/null +++ b/drivers/video/tegra/host/gr2d/gr2d.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/gr2d/gr2d.c | ||
3 | * | ||
4 | * Tegra Graphics 2D | ||
5 | * | ||
6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "dev.h" | ||
22 | #include "bus_client.h" | ||
23 | |||
24 | static int __devinit gr2d_probe(struct nvhost_device *dev) | ||
25 | { | ||
26 | return nvhost_client_device_init(dev); | ||
27 | } | ||
28 | |||
29 | static int __exit gr2d_remove(struct nvhost_device *dev) | ||
30 | { | ||
31 | /* Add clean-up */ | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static int gr2d_suspend(struct nvhost_device *dev, pm_message_t state) | ||
36 | { | ||
37 | return nvhost_client_device_suspend(dev); | ||
38 | } | ||
39 | |||
40 | static int gr2d_resume(struct nvhost_device *dev) | ||
41 | { | ||
42 | dev_info(&dev->dev, "resuming\n"); | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | struct nvhost_device *gr2d_device; | ||
47 | |||
48 | static struct nvhost_driver gr2d_driver = { | ||
49 | .probe = gr2d_probe, | ||
50 | .remove = __exit_p(gr2d_remove), | ||
51 | #ifdef CONFIG_PM | ||
52 | .suspend = gr2d_suspend, | ||
53 | .resume = gr2d_resume, | ||
54 | #endif | ||
55 | .driver = { | ||
56 | .owner = THIS_MODULE, | ||
57 | .name = "gr2d", | ||
58 | } | ||
59 | }; | ||
60 | |||
61 | static int __init gr2d_init(void) | ||
62 | { | ||
63 | int err; | ||
64 | |||
65 | gr2d_device = nvhost_get_device("gr2d"); | ||
66 | if (!gr2d_device) | ||
67 | return -ENXIO; | ||
68 | |||
69 | err = nvhost_device_register(gr2d_device); | ||
70 | if (err) | ||
71 | return err; | ||
72 | |||
73 | return nvhost_driver_register(&gr2d_driver); | ||
74 | } | ||
75 | |||
76 | static void __exit gr2d_exit(void) | ||
77 | { | ||
78 | nvhost_driver_unregister(&gr2d_driver); | ||
79 | } | ||
80 | |||
81 | module_init(gr2d_init); | ||
82 | module_exit(gr2d_exit); | ||
diff --git a/drivers/video/tegra/host/gr3d/Makefile b/drivers/video/tegra/host/gr3d/Makefile new file mode 100644 index 00000000000..dfbd078ab42 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
3 | |||
4 | nvhost-gr3d-objs = \ | ||
5 | gr3d.o \ | ||
6 | gr3d_t20.o \ | ||
7 | gr3d_t30.o \ | ||
8 | scale3d.o | ||
9 | |||
10 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr3d.o | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d.c b/drivers/video/tegra/host/gr3d/gr3d.c new file mode 100644 index 00000000000..f387d54e585 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/gr3d/gr3d.c | ||
3 | * | ||
4 | * Tegra Graphics Host 3D | ||
5 | * | ||
6 | * Copyright (c) 2012 NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <mach/nvmap.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include "t20/t20.h" | ||
25 | #include "host1x/host1x_channel.h" | ||
26 | #include "host1x/host1x_hardware.h" | ||
27 | #include "host1x/host1x_syncpt.h" | ||
28 | #include "nvhost_hwctx.h" | ||
29 | #include "dev.h" | ||
30 | #include "gr3d.h" | ||
31 | #include "bus_client.h" | ||
32 | |||
33 | #ifndef TEGRA_POWERGATE_3D1 | ||
34 | #define TEGRA_POWERGATE_3D1 -1 | ||
35 | #endif | ||
36 | |||
37 | void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *p, u32 *ptr) | ||
38 | { | ||
39 | /* set class to host */ | ||
40 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
41 | NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
42 | /* increment sync point base */ | ||
43 | ptr[1] = nvhost_class_host_incr_syncpt_base(p->waitbase, | ||
44 | p->restore_incrs); | ||
45 | /* set class to 3D */ | ||
46 | ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
47 | /* program PSEQ_QUAD_ID */ | ||
48 | ptr[3] = nvhost_opcode_imm(AR3D_PSEQ_QUAD_ID, 0); | ||
49 | } | ||
50 | |||
51 | void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count) | ||
52 | { | ||
53 | ptr[0] = nvhost_opcode_incr(start_reg, count); | ||
54 | } | ||
55 | |||
56 | void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, u32 offset, | ||
57 | u32 data_reg, u32 count) | ||
58 | { | ||
59 | ptr[0] = nvhost_opcode_imm(offset_reg, offset); | ||
60 | ptr[1] = nvhost_opcode_nonincr(data_reg, count); | ||
61 | } | ||
62 | |||
63 | void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *p, u32 *ptr) | ||
64 | { | ||
65 | /* syncpt increment to track restore gather. */ | ||
66 | ptr[0] = nvhost_opcode_imm_incr_syncpt( | ||
67 | NV_SYNCPT_OP_DONE, p->syncpt); | ||
68 | } | ||
69 | |||
70 | /*** ctx3d ***/ | ||
71 | |||
72 | struct host1x_hwctx *nvhost_3dctx_alloc_common(struct host1x_hwctx_handler *p, | ||
73 | struct nvhost_channel *ch, bool map_restore) | ||
74 | { | ||
75 | struct nvmap_client *nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
76 | struct host1x_hwctx *ctx; | ||
77 | |||
78 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
79 | if (!ctx) | ||
80 | return NULL; | ||
81 | ctx->restore = nvmap_alloc(nvmap, p->restore_size * 4, 32, | ||
82 | map_restore ? NVMAP_HANDLE_WRITE_COMBINE | ||
83 | : NVMAP_HANDLE_UNCACHEABLE, 0); | ||
84 | if (IS_ERR_OR_NULL(ctx->restore)) | ||
85 | goto fail; | ||
86 | |||
87 | if (map_restore) { | ||
88 | ctx->restore_virt = nvmap_mmap(ctx->restore); | ||
89 | if (!ctx->restore_virt) | ||
90 | goto fail; | ||
91 | } else | ||
92 | ctx->restore_virt = NULL; | ||
93 | |||
94 | kref_init(&ctx->hwctx.ref); | ||
95 | ctx->hwctx.h = &p->h; | ||
96 | ctx->hwctx.channel = ch; | ||
97 | ctx->hwctx.valid = false; | ||
98 | ctx->save_incrs = p->save_incrs; | ||
99 | ctx->save_thresh = p->save_thresh; | ||
100 | ctx->save_slots = p->save_slots; | ||
101 | ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); | ||
102 | if (IS_ERR_VALUE(ctx->restore_phys)) | ||
103 | goto fail; | ||
104 | |||
105 | ctx->restore_size = p->restore_size; | ||
106 | ctx->restore_incrs = p->restore_incrs; | ||
107 | return ctx; | ||
108 | |||
109 | fail: | ||
110 | if (map_restore && ctx->restore_virt) { | ||
111 | nvmap_munmap(ctx->restore, ctx->restore_virt); | ||
112 | ctx->restore_virt = NULL; | ||
113 | } | ||
114 | nvmap_free(nvmap, ctx->restore); | ||
115 | ctx->restore = NULL; | ||
116 | kfree(ctx); | ||
117 | return NULL; | ||
118 | } | ||
119 | |||
120 | void nvhost_3dctx_get(struct nvhost_hwctx *ctx) | ||
121 | { | ||
122 | kref_get(&ctx->ref); | ||
123 | } | ||
124 | |||
125 | void nvhost_3dctx_free(struct kref *ref) | ||
126 | { | ||
127 | struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref); | ||
128 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
129 | struct nvmap_client *nvmap = | ||
130 | nvhost_get_host(nctx->channel->dev)->nvmap; | ||
131 | |||
132 | if (ctx->restore_virt) { | ||
133 | nvmap_munmap(ctx->restore, ctx->restore_virt); | ||
134 | ctx->restore_virt = NULL; | ||
135 | } | ||
136 | nvmap_unpin(nvmap, ctx->restore); | ||
137 | ctx->restore_phys = 0; | ||
138 | nvmap_free(nvmap, ctx->restore); | ||
139 | ctx->restore = NULL; | ||
140 | kfree(ctx); | ||
141 | } | ||
142 | |||
143 | void nvhost_3dctx_put(struct nvhost_hwctx *ctx) | ||
144 | { | ||
145 | kref_put(&ctx->ref, nvhost_3dctx_free); | ||
146 | } | ||
147 | |||
148 | int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev) | ||
149 | { | ||
150 | return host1x_save_context(dev, NVSYNCPT_3D); | ||
151 | } | ||
152 | |||
153 | static int __devinit gr3d_probe(struct nvhost_device *dev) | ||
154 | { | ||
155 | return nvhost_client_device_init(dev); | ||
156 | } | ||
157 | |||
158 | static int __exit gr3d_remove(struct nvhost_device *dev) | ||
159 | { | ||
160 | /* Add clean-up */ | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int gr3d_suspend(struct nvhost_device *dev, pm_message_t state) | ||
165 | { | ||
166 | return nvhost_client_device_suspend(dev); | ||
167 | } | ||
168 | |||
169 | static int gr3d_resume(struct nvhost_device *dev) | ||
170 | { | ||
171 | dev_info(&dev->dev, "resuming\n"); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | struct nvhost_device *gr3d_device; | ||
176 | |||
177 | static struct nvhost_driver gr3d_driver = { | ||
178 | .probe = gr3d_probe, | ||
179 | .remove = __exit_p(gr3d_remove), | ||
180 | #ifdef CONFIG_PM | ||
181 | .suspend = gr3d_suspend, | ||
182 | .resume = gr3d_resume, | ||
183 | #endif | ||
184 | .driver = { | ||
185 | .owner = THIS_MODULE, | ||
186 | .name = "gr3d", | ||
187 | } | ||
188 | }; | ||
189 | |||
190 | static int __init gr3d_init(void) | ||
191 | { | ||
192 | int err; | ||
193 | |||
194 | gr3d_device = nvhost_get_device("gr3d"); | ||
195 | if (!gr3d_device) | ||
196 | return -ENXIO; | ||
197 | |||
198 | err = nvhost_device_register(gr3d_device); | ||
199 | if (err) | ||
200 | return err; | ||
201 | |||
202 | return nvhost_driver_register(&gr3d_driver); | ||
203 | } | ||
204 | |||
205 | static void __exit gr3d_exit(void) | ||
206 | { | ||
207 | nvhost_driver_unregister(&gr3d_driver); | ||
208 | } | ||
209 | |||
210 | module_init(gr3d_init); | ||
211 | module_exit(gr3d_exit); | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d.h b/drivers/video/tegra/host/gr3d/gr3d.h new file mode 100644 index 00000000000..3855b237b70 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d.h | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/gr3d/gr3d.h | ||
3 | * | ||
4 | * Tegra Graphics Host 3D | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_GR3D_GR3D_H | ||
22 | #define __NVHOST_GR3D_GR3D_H | ||
23 | |||
24 | #include "host1x/host1x_hwctx.h" | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | /* Registers of 3D unit */ | ||
28 | |||
29 | #define AR3D_PSEQ_QUAD_ID 0x545 | ||
30 | #define AR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904 | ||
31 | #define AR3D_DW_MEMORY_OUTPUT_DATA 0x905 | ||
32 | #define AR3D_GSHIM_WRITE_MASK 0xb00 | ||
33 | #define AR3D_GSHIM_READ_SELECT 0xb01 | ||
34 | #define AR3D_GLOBAL_MEMORY_OUTPUT_READS 0xe40 | ||
35 | |||
36 | struct nvhost_hwctx; | ||
37 | struct nvhost_channel; | ||
38 | struct kref; | ||
39 | |||
40 | /* Functions used commonly by all 3D context switch modules */ | ||
41 | void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *h, u32 *ptr); | ||
42 | void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count); | ||
43 | void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, | ||
44 | u32 offset, u32 data_reg, u32 count); | ||
45 | void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *h, u32 *ptr); | ||
46 | struct host1x_hwctx *nvhost_3dctx_alloc_common( | ||
47 | struct host1x_hwctx_handler *p, | ||
48 | struct nvhost_channel *ch, bool map_restore); | ||
49 | void nvhost_3dctx_get(struct nvhost_hwctx *ctx); | ||
50 | void nvhost_3dctx_free(struct kref *ref); | ||
51 | void nvhost_3dctx_put(struct nvhost_hwctx *ctx); | ||
52 | int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev); | ||
53 | |||
54 | #endif | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.c b/drivers/video/tegra/host/gr3d/gr3d_t20.c new file mode 100644 index 00000000000..3604142aaf2 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t20.c | |||
@@ -0,0 +1,395 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/gr3d/gr3d_t20.c | ||
3 | * | ||
4 | * Tegra Graphics Host 3D for Tegra2 | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_hwctx.h" | ||
22 | #include "dev.h" | ||
23 | #include "host1x/host1x_channel.h" | ||
24 | #include "host1x/host1x_hardware.h" | ||
25 | #include "host1x/host1x_syncpt.h" | ||
26 | #include "gr3d.h" | ||
27 | |||
28 | #include <linux/slab.h> | ||
29 | |||
30 | static const struct hwctx_reginfo ctxsave_regs_3d_global[] = { | ||
31 | HWCTX_REGINFO(0xe00, 4, DIRECT), | ||
32 | HWCTX_REGINFO(0xe05, 30, DIRECT), | ||
33 | HWCTX_REGINFO(0xe25, 2, DIRECT), | ||
34 | HWCTX_REGINFO(0xe28, 2, DIRECT), | ||
35 | HWCTX_REGINFO(0x001, 2, DIRECT), | ||
36 | HWCTX_REGINFO(0x00c, 10, DIRECT), | ||
37 | HWCTX_REGINFO(0x100, 34, DIRECT), | ||
38 | HWCTX_REGINFO(0x124, 2, DIRECT), | ||
39 | HWCTX_REGINFO(0x200, 5, DIRECT), | ||
40 | HWCTX_REGINFO(0x205, 1024, INDIRECT), | ||
41 | HWCTX_REGINFO(0x207, 1024, INDIRECT), | ||
42 | HWCTX_REGINFO(0x209, 1, DIRECT), | ||
43 | HWCTX_REGINFO(0x300, 64, DIRECT), | ||
44 | HWCTX_REGINFO(0x343, 25, DIRECT), | ||
45 | HWCTX_REGINFO(0x363, 2, DIRECT), | ||
46 | HWCTX_REGINFO(0x400, 16, DIRECT), | ||
47 | HWCTX_REGINFO(0x411, 1, DIRECT), | ||
48 | HWCTX_REGINFO(0x500, 4, DIRECT), | ||
49 | HWCTX_REGINFO(0x520, 32, DIRECT), | ||
50 | HWCTX_REGINFO(0x540, 64, INDIRECT), | ||
51 | HWCTX_REGINFO(0x600, 16, INDIRECT_4X), | ||
52 | HWCTX_REGINFO(0x603, 128, INDIRECT), | ||
53 | HWCTX_REGINFO(0x608, 4, DIRECT), | ||
54 | HWCTX_REGINFO(0x60e, 1, DIRECT), | ||
55 | HWCTX_REGINFO(0x700, 64, INDIRECT), | ||
56 | HWCTX_REGINFO(0x710, 50, DIRECT), | ||
57 | HWCTX_REGINFO(0x800, 16, INDIRECT_4X), | ||
58 | HWCTX_REGINFO(0x803, 512, INDIRECT), | ||
59 | HWCTX_REGINFO(0x805, 64, INDIRECT), | ||
60 | HWCTX_REGINFO(0x820, 32, DIRECT), | ||
61 | HWCTX_REGINFO(0x900, 64, INDIRECT), | ||
62 | HWCTX_REGINFO(0x902, 2, DIRECT), | ||
63 | HWCTX_REGINFO(0xa02, 10, DIRECT), | ||
64 | HWCTX_REGINFO(0xe04, 1, DIRECT), | ||
65 | HWCTX_REGINFO(0xe2a, 1, DIRECT), | ||
66 | }; | ||
67 | |||
68 | /* the same context save command sequence is used for all contexts. */ | ||
69 | #define SAVE_BEGIN_V0_SIZE 5 | ||
70 | #define SAVE_DIRECT_V0_SIZE 3 | ||
71 | #define SAVE_INDIRECT_V0_SIZE 5 | ||
72 | #define SAVE_END_V0_SIZE 5 | ||
73 | #define SAVE_INCRS 3 | ||
74 | #define SAVE_THRESH_OFFSET 1 | ||
75 | #define RESTORE_BEGIN_SIZE 4 | ||
76 | #define RESTORE_DIRECT_SIZE 1 | ||
77 | #define RESTORE_INDIRECT_SIZE 2 | ||
78 | #define RESTORE_END_SIZE 1 | ||
79 | |||
80 | struct save_info { | ||
81 | u32 *ptr; | ||
82 | unsigned int save_count; | ||
83 | unsigned int restore_count; | ||
84 | unsigned int save_incrs; | ||
85 | unsigned int restore_incrs; | ||
86 | }; | ||
87 | |||
88 | static u32 *setup_restore_regs_v0(u32 *ptr, | ||
89 | const struct hwctx_reginfo *regs, | ||
90 | unsigned int nr_regs) | ||
91 | { | ||
92 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
93 | |||
94 | for ( ; regs != rend; ++regs) { | ||
95 | u32 offset = regs->offset; | ||
96 | u32 count = regs->count; | ||
97 | u32 indoff = offset + 1; | ||
98 | switch (regs->type) { | ||
99 | case HWCTX_REGINFO_DIRECT: | ||
100 | nvhost_3dctx_restore_direct(ptr, offset, count); | ||
101 | ptr += RESTORE_DIRECT_SIZE; | ||
102 | break; | ||
103 | case HWCTX_REGINFO_INDIRECT_4X: | ||
104 | ++indoff; | ||
105 | /* fall through */ | ||
106 | case HWCTX_REGINFO_INDIRECT: | ||
107 | nvhost_3dctx_restore_indirect(ptr, | ||
108 | offset, 0, indoff, count); | ||
109 | ptr += RESTORE_INDIRECT_SIZE; | ||
110 | break; | ||
111 | } | ||
112 | ptr += count; | ||
113 | } | ||
114 | return ptr; | ||
115 | } | ||
116 | |||
117 | static void setup_restore_v0(struct host1x_hwctx_handler *h, u32 *ptr) | ||
118 | { | ||
119 | nvhost_3dctx_restore_begin(h, ptr); | ||
120 | ptr += RESTORE_BEGIN_SIZE; | ||
121 | |||
122 | ptr = setup_restore_regs_v0(ptr, | ||
123 | ctxsave_regs_3d_global, | ||
124 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
125 | |||
126 | nvhost_3dctx_restore_end(h, ptr); | ||
127 | |||
128 | wmb(); | ||
129 | } | ||
130 | |||
131 | /*** v0 saver ***/ | ||
132 | |||
133 | static void save_push_v0(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) | ||
134 | { | ||
135 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
136 | struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); | ||
137 | |||
138 | nvhost_cdma_push_gather(cdma, | ||
139 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
140 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
141 | nvhost_opcode_gather(p->save_size), | ||
142 | p->save_phys); | ||
143 | } | ||
144 | |||
145 | static void __init save_begin_v0(struct host1x_hwctx_handler *h, u32 *ptr) | ||
146 | { | ||
147 | /* 3d: when done, increment syncpt to base+1 */ | ||
148 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
149 | ptr[1] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
150 | h->syncpt); /* incr 1 */ | ||
151 | /* host: wait for syncpt base+1 */ | ||
152 | ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
153 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
154 | ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt, | ||
155 | h->waitbase, 1); | ||
156 | /* host: signal context read thread to start reading */ | ||
157 | ptr[4] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, | ||
158 | h->syncpt); /* incr 2 */ | ||
159 | } | ||
160 | |||
161 | static void __init save_direct_v0(u32 *ptr, u32 start_reg, u32 count) | ||
162 | { | ||
163 | ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1); | ||
164 | ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
165 | start_reg, true); | ||
166 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
167 | } | ||
168 | |||
169 | static void __init save_indirect_v0(u32 *ptr, u32 offset_reg, u32 offset, | ||
170 | u32 data_reg, u32 count) | ||
171 | { | ||
172 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, | ||
173 | offset_reg, 1); | ||
174 | ptr[1] = offset; | ||
175 | ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
176 | NV_CLASS_HOST_INDOFF, 1); | ||
177 | ptr[3] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
178 | data_reg, false); | ||
179 | ptr[4] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
180 | } | ||
181 | |||
182 | static void __init save_end_v0(struct host1x_hwctx_handler *h, u32 *ptr) | ||
183 | { | ||
184 | /* Wait for context read service to finish (cpu incr 3) */ | ||
185 | ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
186 | ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt, | ||
187 | h->waitbase, h->save_incrs); | ||
188 | /* Advance syncpoint base */ | ||
189 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
190 | ptr[3] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_3D, | ||
191 | h->save_incrs); | ||
192 | /* set class back to the unit */ | ||
193 | ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
194 | } | ||
195 | |||
196 | static u32 *save_regs_v0(u32 *ptr, unsigned int *pending, | ||
197 | void __iomem *chan_regs, | ||
198 | const struct hwctx_reginfo *regs, | ||
199 | unsigned int nr_regs) | ||
200 | { | ||
201 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
202 | int drain_result = 0; | ||
203 | |||
204 | for ( ; regs != rend; ++regs) { | ||
205 | u32 count = regs->count; | ||
206 | switch (regs->type) { | ||
207 | case HWCTX_REGINFO_DIRECT: | ||
208 | ptr += RESTORE_DIRECT_SIZE; | ||
209 | break; | ||
210 | case HWCTX_REGINFO_INDIRECT: | ||
211 | case HWCTX_REGINFO_INDIRECT_4X: | ||
212 | ptr += RESTORE_INDIRECT_SIZE; | ||
213 | break; | ||
214 | } | ||
215 | drain_result = host1x_drain_read_fifo(chan_regs, | ||
216 | ptr, count, pending); | ||
217 | BUG_ON(drain_result < 0); | ||
218 | ptr += count; | ||
219 | } | ||
220 | return ptr; | ||
221 | } | ||
222 | |||
223 | /*** save ***/ | ||
224 | |||
225 | static void __init setup_save_regs(struct save_info *info, | ||
226 | const struct hwctx_reginfo *regs, | ||
227 | unsigned int nr_regs) | ||
228 | { | ||
229 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
230 | u32 *ptr = info->ptr; | ||
231 | unsigned int save_count = info->save_count; | ||
232 | unsigned int restore_count = info->restore_count; | ||
233 | |||
234 | for ( ; regs != rend; ++regs) { | ||
235 | u32 offset = regs->offset; | ||
236 | u32 count = regs->count; | ||
237 | u32 indoff = offset + 1; | ||
238 | switch (regs->type) { | ||
239 | case HWCTX_REGINFO_DIRECT: | ||
240 | if (ptr) { | ||
241 | save_direct_v0(ptr, offset, count); | ||
242 | ptr += SAVE_DIRECT_V0_SIZE; | ||
243 | } | ||
244 | save_count += SAVE_DIRECT_V0_SIZE; | ||
245 | restore_count += RESTORE_DIRECT_SIZE; | ||
246 | break; | ||
247 | case HWCTX_REGINFO_INDIRECT_4X: | ||
248 | ++indoff; | ||
249 | /* fall through */ | ||
250 | case HWCTX_REGINFO_INDIRECT: | ||
251 | if (ptr) { | ||
252 | save_indirect_v0(ptr, offset, 0, | ||
253 | indoff, count); | ||
254 | ptr += SAVE_INDIRECT_V0_SIZE; | ||
255 | } | ||
256 | save_count += SAVE_INDIRECT_V0_SIZE; | ||
257 | restore_count += RESTORE_INDIRECT_SIZE; | ||
258 | break; | ||
259 | } | ||
260 | if (ptr) { | ||
261 | /* SAVE cases only: reserve room for incoming data */ | ||
262 | u32 k = 0; | ||
263 | /* | ||
264 | * Create a signature pattern for indirect data (which | ||
265 | * will be overwritten by true incoming data) for | ||
266 | * better deducing where we are in a long command | ||
267 | * sequence, when given only a FIFO snapshot for debug | ||
268 | * purposes. | ||
269 | */ | ||
270 | for (k = 0; k < count; k++) | ||
271 | *(ptr + k) = 0xd000d000 | (offset << 16) | k; | ||
272 | ptr += count; | ||
273 | } | ||
274 | save_count += count; | ||
275 | restore_count += count; | ||
276 | } | ||
277 | |||
278 | info->ptr = ptr; | ||
279 | info->save_count = save_count; | ||
280 | info->restore_count = restore_count; | ||
281 | } | ||
282 | |||
283 | static void __init setup_save(struct host1x_hwctx_handler *h, u32 *ptr) | ||
284 | { | ||
285 | struct save_info info = { | ||
286 | ptr, | ||
287 | SAVE_BEGIN_V0_SIZE, | ||
288 | RESTORE_BEGIN_SIZE, | ||
289 | SAVE_INCRS, | ||
290 | 1 | ||
291 | }; | ||
292 | |||
293 | if (info.ptr) { | ||
294 | save_begin_v0(h, info.ptr); | ||
295 | info.ptr += SAVE_BEGIN_V0_SIZE; | ||
296 | } | ||
297 | |||
298 | /* save regs */ | ||
299 | setup_save_regs(&info, | ||
300 | ctxsave_regs_3d_global, | ||
301 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
302 | |||
303 | if (info.ptr) { | ||
304 | save_end_v0(h, info.ptr); | ||
305 | info.ptr += SAVE_END_V0_SIZE; | ||
306 | } | ||
307 | |||
308 | wmb(); | ||
309 | |||
310 | h->save_size = info.save_count + SAVE_END_V0_SIZE; | ||
311 | h->restore_size = info.restore_count + RESTORE_END_SIZE; | ||
312 | h->save_incrs = info.save_incrs; | ||
313 | h->save_thresh = h->save_incrs - SAVE_THRESH_OFFSET; | ||
314 | h->restore_incrs = info.restore_incrs; | ||
315 | } | ||
316 | |||
317 | |||
318 | |||
319 | /*** ctx3d ***/ | ||
320 | |||
321 | static struct nvhost_hwctx *ctx3d_alloc_v0(struct nvhost_hwctx_handler *h, | ||
322 | struct nvhost_channel *ch) | ||
323 | { | ||
324 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
325 | struct host1x_hwctx *ctx = | ||
326 | nvhost_3dctx_alloc_common(p, ch, true); | ||
327 | if (ctx) { | ||
328 | setup_restore_v0(p, ctx->restore_virt); | ||
329 | return &ctx->hwctx; | ||
330 | } else | ||
331 | return NULL; | ||
332 | } | ||
333 | |||
334 | static void ctx3d_save_service(struct nvhost_hwctx *nctx) | ||
335 | { | ||
336 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
337 | |||
338 | u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE; | ||
339 | unsigned int pending = 0; | ||
340 | |||
341 | ptr = save_regs_v0(ptr, &pending, nctx->channel->aperture, | ||
342 | ctxsave_regs_3d_global, | ||
343 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
344 | |||
345 | wmb(); | ||
346 | nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt, | ||
347 | host1x_hwctx_handler(ctx)->syncpt); | ||
348 | } | ||
349 | |||
350 | struct nvhost_hwctx_handler * __init nvhost_gr3d_t20_ctxhandler_init( | ||
351 | u32 syncpt, u32 waitbase, | ||
352 | struct nvhost_channel *ch) | ||
353 | { | ||
354 | struct nvmap_client *nvmap; | ||
355 | u32 *save_ptr; | ||
356 | struct host1x_hwctx_handler *p; | ||
357 | |||
358 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
359 | if (!p) | ||
360 | return NULL; | ||
361 | nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
362 | |||
363 | p->syncpt = syncpt; | ||
364 | p->waitbase = waitbase; | ||
365 | |||
366 | setup_save(p, NULL); | ||
367 | |||
368 | p->save_buf = nvmap_alloc(nvmap, p->save_size * sizeof(u32), 32, | ||
369 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
370 | if (IS_ERR(p->save_buf)) { | ||
371 | p->save_buf = NULL; | ||
372 | return NULL; | ||
373 | } | ||
374 | |||
375 | p->save_slots = 1; | ||
376 | |||
377 | save_ptr = nvmap_mmap(p->save_buf); | ||
378 | if (!save_ptr) { | ||
379 | nvmap_free(nvmap, p->save_buf); | ||
380 | p->save_buf = NULL; | ||
381 | return NULL; | ||
382 | } | ||
383 | |||
384 | p->save_phys = nvmap_pin(nvmap, p->save_buf); | ||
385 | |||
386 | setup_save(p, save_ptr); | ||
387 | |||
388 | p->h.alloc = ctx3d_alloc_v0; | ||
389 | p->h.save_push = save_push_v0; | ||
390 | p->h.save_service = ctx3d_save_service; | ||
391 | p->h.get = nvhost_3dctx_get; | ||
392 | p->h.put = nvhost_3dctx_put; | ||
393 | |||
394 | return &p->h; | ||
395 | } | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.h b/drivers/video/tegra/host/gr3d/gr3d_t20.h new file mode 100644 index 00000000000..5fe6d50d0c3 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t20.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/gr3d/gr3d_t20.h | ||
3 | * | ||
4 | * Tegra Graphics Host 3D for Tegra2 | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_GR3D_GR3D_T20_H | ||
22 | #define __NVHOST_GR3D_GR3D_T20_H | ||
23 | |||
24 | struct nvhost_hwctx_handler; | ||
25 | |||
26 | struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init( | ||
27 | u32 syncpt, u32 waitbase, | ||
28 | struct nvhost_channel *ch); | ||
29 | |||
30 | #endif | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.c b/drivers/video/tegra/host/gr3d/gr3d_t30.c new file mode 100644 index 00000000000..e7329e50e3d --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t30.c | |||
@@ -0,0 +1,435 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/gr3d/gr3d_t30.c | ||
3 | * | ||
4 | * Tegra Graphics Host 3D for Tegra3 | ||
5 | * | ||
6 | * Copyright (c) 2011-2012 NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_hwctx.h" | ||
22 | #include "dev.h" | ||
23 | #include "host1x/host1x_hardware.h" | ||
24 | #include "host1x/host1x_syncpt.h" | ||
25 | #include "gr3d.h" | ||
26 | |||
27 | #include <mach/gpufuse.h> | ||
28 | #include <mach/hardware.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | /* 99 > 2, which makes kernel panic if register set is incorrect */ | ||
32 | static int register_sets = 99; | ||
33 | |||
34 | static const struct hwctx_reginfo ctxsave_regs_3d_global[] = { | ||
35 | HWCTX_REGINFO(0xe00, 4, DIRECT), | ||
36 | HWCTX_REGINFO(0xe05, 30, DIRECT), | ||
37 | HWCTX_REGINFO(0xe25, 2, DIRECT), | ||
38 | HWCTX_REGINFO(0xe28, 2, DIRECT), | ||
39 | HWCTX_REGINFO(0xe30, 16, DIRECT), | ||
40 | HWCTX_REGINFO(0x001, 2, DIRECT), | ||
41 | HWCTX_REGINFO(0x00c, 10, DIRECT), | ||
42 | HWCTX_REGINFO(0x100, 34, DIRECT), | ||
43 | HWCTX_REGINFO(0x124, 2, DIRECT), | ||
44 | HWCTX_REGINFO(0x200, 5, DIRECT), | ||
45 | HWCTX_REGINFO(0x205, 1024, INDIRECT), | ||
46 | HWCTX_REGINFO(0x207, 1024, INDIRECT), | ||
47 | HWCTX_REGINFO(0x209, 1, DIRECT), | ||
48 | HWCTX_REGINFO(0x300, 64, DIRECT), | ||
49 | HWCTX_REGINFO(0x343, 25, DIRECT), | ||
50 | HWCTX_REGINFO(0x363, 2, DIRECT), | ||
51 | HWCTX_REGINFO(0x400, 16, DIRECT), | ||
52 | HWCTX_REGINFO(0x411, 1, DIRECT), | ||
53 | HWCTX_REGINFO(0x412, 1, DIRECT), | ||
54 | HWCTX_REGINFO(0x500, 4, DIRECT), | ||
55 | HWCTX_REGINFO(0x520, 32, DIRECT), | ||
56 | HWCTX_REGINFO(0x540, 64, INDIRECT), | ||
57 | HWCTX_REGINFO(0x600, 16, INDIRECT_4X), | ||
58 | HWCTX_REGINFO(0x603, 128, INDIRECT), | ||
59 | HWCTX_REGINFO(0x608, 4, DIRECT), | ||
60 | HWCTX_REGINFO(0x60e, 1, DIRECT), | ||
61 | HWCTX_REGINFO(0x700, 64, INDIRECT), | ||
62 | HWCTX_REGINFO(0x710, 50, DIRECT), | ||
63 | HWCTX_REGINFO(0x750, 16, DIRECT), | ||
64 | HWCTX_REGINFO(0x800, 16, INDIRECT_4X), | ||
65 | HWCTX_REGINFO(0x803, 512, INDIRECT), | ||
66 | HWCTX_REGINFO(0x805, 64, INDIRECT), | ||
67 | HWCTX_REGINFO(0x820, 32, DIRECT), | ||
68 | HWCTX_REGINFO(0x900, 64, INDIRECT), | ||
69 | HWCTX_REGINFO(0x902, 2, DIRECT), | ||
70 | HWCTX_REGINFO(0x90a, 1, DIRECT), | ||
71 | HWCTX_REGINFO(0xa02, 10, DIRECT), | ||
72 | HWCTX_REGINFO(0xb04, 1, DIRECT), | ||
73 | HWCTX_REGINFO(0xb06, 13, DIRECT), | ||
74 | HWCTX_REGINFO(0xe42, 2, DIRECT), /* HW bug workaround */ | ||
75 | }; | ||
76 | |||
77 | static const struct hwctx_reginfo ctxsave_regs_3d_perset[] = { | ||
78 | HWCTX_REGINFO(0xe04, 1, DIRECT), | ||
79 | HWCTX_REGINFO(0xe2a, 1, DIRECT), | ||
80 | HWCTX_REGINFO(0x413, 1, DIRECT), | ||
81 | HWCTX_REGINFO(0x90b, 1, DIRECT), | ||
82 | HWCTX_REGINFO(0xe41, 1, DIRECT), | ||
83 | }; | ||
84 | |||
85 | static unsigned int restore_set1_offset; | ||
86 | |||
87 | #define SAVE_BEGIN_V1_SIZE (1 + RESTORE_BEGIN_SIZE) | ||
88 | #define SAVE_DIRECT_V1_SIZE (4 + RESTORE_DIRECT_SIZE) | ||
89 | #define SAVE_INDIRECT_V1_SIZE (6 + RESTORE_INDIRECT_SIZE) | ||
90 | #define SAVE_END_V1_SIZE (9 + RESTORE_END_SIZE) | ||
91 | #define SAVE_INCRS 3 | ||
92 | #define SAVE_THRESH_OFFSET 0 | ||
93 | #define RESTORE_BEGIN_SIZE 4 | ||
94 | #define RESTORE_DIRECT_SIZE 1 | ||
95 | #define RESTORE_INDIRECT_SIZE 2 | ||
96 | #define RESTORE_END_SIZE 1 | ||
97 | |||
98 | struct save_info { | ||
99 | u32 *ptr; | ||
100 | unsigned int save_count; | ||
101 | unsigned int restore_count; | ||
102 | unsigned int save_incrs; | ||
103 | unsigned int restore_incrs; | ||
104 | }; | ||
105 | |||
106 | /*** v1 saver ***/ | ||
107 | |||
108 | static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) | ||
109 | { | ||
110 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
111 | struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); | ||
112 | |||
113 | /* wait for 3d idle */ | ||
114 | nvhost_cdma_push(cdma, | ||
115 | nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), | ||
116 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
117 | p->syncpt)); | ||
118 | nvhost_cdma_push(cdma, | ||
119 | nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
120 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), | ||
121 | nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
122 | p->waitbase, 1)); | ||
123 | /* back to 3d */ | ||
124 | nvhost_cdma_push(cdma, | ||
125 | nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), | ||
126 | NVHOST_OPCODE_NOOP); | ||
127 | /* set register set 0 and 1 register read memory output addresses, | ||
128 | and send their reads to memory */ | ||
129 | if (register_sets == 2) { | ||
130 | nvhost_cdma_push(cdma, | ||
131 | nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2), | ||
132 | nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, | ||
133 | 1)); | ||
134 | nvhost_cdma_push(cdma, | ||
135 | nvhost_opcode_nonincr(0x904, 1), | ||
136 | ctx->restore_phys + restore_set1_offset * 4); | ||
137 | } | ||
138 | nvhost_cdma_push(cdma, | ||
139 | nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1), | ||
140 | nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); | ||
141 | nvhost_cdma_push(cdma, | ||
142 | nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), | ||
143 | ctx->restore_phys); | ||
144 | /* gather the save buffer */ | ||
145 | nvhost_cdma_push_gather(cdma, | ||
146 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
147 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
148 | nvhost_opcode_gather(p->save_size), | ||
149 | p->save_phys); | ||
150 | } | ||
151 | |||
152 | static void __init save_begin_v1(struct host1x_hwctx_handler *p, u32 *ptr) | ||
153 | { | ||
154 | ptr[0] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA, | ||
155 | RESTORE_BEGIN_SIZE); | ||
156 | nvhost_3dctx_restore_begin(p, ptr + 1); | ||
157 | ptr += RESTORE_BEGIN_SIZE; | ||
158 | } | ||
159 | |||
160 | static void __init save_direct_v1(u32 *ptr, u32 start_reg, u32 count) | ||
161 | { | ||
162 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, | ||
163 | AR3D_DW_MEMORY_OUTPUT_DATA, 1); | ||
164 | nvhost_3dctx_restore_direct(ptr + 1, start_reg, count); | ||
165 | ptr += RESTORE_DIRECT_SIZE; | ||
166 | ptr[1] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
167 | NV_CLASS_HOST_INDOFF, 1); | ||
168 | ptr[2] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
169 | start_reg, true); | ||
170 | /* TODO could do this in the setclass if count < 6 */ | ||
171 | ptr[3] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
172 | } | ||
173 | |||
174 | static void __init save_indirect_v1(u32 *ptr, u32 offset_reg, u32 offset, | ||
175 | u32 data_reg, u32 count) | ||
176 | { | ||
177 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
178 | ptr[1] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA, | ||
179 | RESTORE_INDIRECT_SIZE); | ||
180 | nvhost_3dctx_restore_indirect(ptr + 2, offset_reg, offset, data_reg, | ||
181 | count); | ||
182 | ptr += RESTORE_INDIRECT_SIZE; | ||
183 | ptr[2] = nvhost_opcode_imm(offset_reg, offset); | ||
184 | ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
185 | NV_CLASS_HOST_INDOFF, 1); | ||
186 | ptr[4] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
187 | data_reg, false); | ||
188 | ptr[5] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
189 | } | ||
190 | |||
191 | static void __init save_end_v1(struct host1x_hwctx_handler *p, u32 *ptr) | ||
192 | { | ||
193 | /* write end of restore buffer */ | ||
194 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, | ||
195 | AR3D_DW_MEMORY_OUTPUT_DATA, 1); | ||
196 | nvhost_3dctx_restore_end(p, ptr + 1); | ||
197 | ptr += RESTORE_END_SIZE; | ||
198 | /* reset to dual reg if necessary */ | ||
199 | ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, | ||
200 | (1 << register_sets) - 1); | ||
201 | /* op_done syncpt incr to flush FDC */ | ||
202 | ptr[2] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, p->syncpt); | ||
203 | /* host wait for that syncpt incr, and advance the wait base */ | ||
204 | ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
205 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, | ||
206 | nvhost_mask2( | ||
207 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, | ||
208 | NV_CLASS_HOST_INCR_SYNCPT_BASE)); | ||
209 | ptr[4] = nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
210 | p->waitbase, p->save_incrs - 1); | ||
211 | ptr[5] = nvhost_class_host_incr_syncpt_base(p->waitbase, | ||
212 | p->save_incrs); | ||
213 | /* set class back to 3d */ | ||
214 | ptr[6] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
215 | /* send reg reads back to host */ | ||
216 | ptr[7] = nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0); | ||
217 | /* final syncpt increment to release waiters */ | ||
218 | ptr[8] = nvhost_opcode_imm(0, p->syncpt); | ||
219 | } | ||
220 | |||
221 | /*** save ***/ | ||
222 | |||
223 | |||
224 | |||
225 | static void __init setup_save_regs(struct save_info *info, | ||
226 | const struct hwctx_reginfo *regs, | ||
227 | unsigned int nr_regs) | ||
228 | { | ||
229 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
230 | u32 *ptr = info->ptr; | ||
231 | unsigned int save_count = info->save_count; | ||
232 | unsigned int restore_count = info->restore_count; | ||
233 | |||
234 | for ( ; regs != rend; ++regs) { | ||
235 | u32 offset = regs->offset; | ||
236 | u32 count = regs->count; | ||
237 | u32 indoff = offset + 1; | ||
238 | switch (regs->type) { | ||
239 | case HWCTX_REGINFO_DIRECT: | ||
240 | if (ptr) { | ||
241 | save_direct_v1(ptr, offset, count); | ||
242 | ptr += SAVE_DIRECT_V1_SIZE; | ||
243 | } | ||
244 | save_count += SAVE_DIRECT_V1_SIZE; | ||
245 | restore_count += RESTORE_DIRECT_SIZE; | ||
246 | break; | ||
247 | case HWCTX_REGINFO_INDIRECT_4X: | ||
248 | ++indoff; | ||
249 | /* fall through */ | ||
250 | case HWCTX_REGINFO_INDIRECT: | ||
251 | if (ptr) { | ||
252 | save_indirect_v1(ptr, offset, 0, | ||
253 | indoff, count); | ||
254 | ptr += SAVE_INDIRECT_V1_SIZE; | ||
255 | } | ||
256 | save_count += SAVE_INDIRECT_V1_SIZE; | ||
257 | restore_count += RESTORE_INDIRECT_SIZE; | ||
258 | break; | ||
259 | } | ||
260 | if (ptr) { | ||
261 | /* SAVE cases only: reserve room for incoming data */ | ||
262 | u32 k = 0; | ||
263 | /* | ||
264 | * Create a signature pattern for indirect data (which | ||
265 | * will be overwritten by true incoming data) for | ||
266 | * better deducing where we are in a long command | ||
267 | * sequence, when given only a FIFO snapshot for debug | ||
268 | * purposes. | ||
269 | */ | ||
270 | for (k = 0; k < count; k++) | ||
271 | *(ptr + k) = 0xd000d000 | (offset << 16) | k; | ||
272 | ptr += count; | ||
273 | } | ||
274 | save_count += count; | ||
275 | restore_count += count; | ||
276 | } | ||
277 | |||
278 | info->ptr = ptr; | ||
279 | info->save_count = save_count; | ||
280 | info->restore_count = restore_count; | ||
281 | } | ||
282 | |||
283 | static void __init switch_gpu(struct save_info *info, | ||
284 | unsigned int save_src_set, | ||
285 | u32 save_dest_sets, | ||
286 | u32 restore_dest_sets) | ||
287 | { | ||
288 | if (info->ptr) { | ||
289 | info->ptr[0] = nvhost_opcode_setclass( | ||
290 | NV_GRAPHICS_3D_CLASS_ID, | ||
291 | AR3D_DW_MEMORY_OUTPUT_DATA, 1); | ||
292 | info->ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, | ||
293 | restore_dest_sets); | ||
294 | info->ptr[2] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, | ||
295 | save_dest_sets); | ||
296 | info->ptr[3] = nvhost_opcode_imm(AR3D_GSHIM_READ_SELECT, | ||
297 | save_src_set); | ||
298 | info->ptr += 4; | ||
299 | } | ||
300 | info->save_count += 4; | ||
301 | info->restore_count += 1; | ||
302 | } | ||
303 | |||
304 | static void __init setup_save(struct host1x_hwctx_handler *p, u32 *ptr) | ||
305 | { | ||
306 | struct save_info info = { | ||
307 | ptr, | ||
308 | SAVE_BEGIN_V1_SIZE, | ||
309 | RESTORE_BEGIN_SIZE, | ||
310 | SAVE_INCRS, | ||
311 | 1 | ||
312 | }; | ||
313 | int save_end_size = SAVE_END_V1_SIZE; | ||
314 | |||
315 | BUG_ON(register_sets > 2); | ||
316 | |||
317 | if (info.ptr) { | ||
318 | save_begin_v1(p, info.ptr); | ||
319 | info.ptr += SAVE_BEGIN_V1_SIZE; | ||
320 | } | ||
321 | |||
322 | /* read from set0, write cmds through set0, restore to set0 and 1 */ | ||
323 | if (register_sets == 2) | ||
324 | switch_gpu(&info, 0, 1, 3); | ||
325 | |||
326 | /* save regs that are common to both sets */ | ||
327 | setup_save_regs(&info, | ||
328 | ctxsave_regs_3d_global, | ||
329 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
330 | |||
331 | /* read from set 0, write cmds through set0, restore to set0 */ | ||
332 | if (register_sets == 2) | ||
333 | switch_gpu(&info, 0, 1, 1); | ||
334 | |||
335 | /* save set 0 specific regs */ | ||
336 | setup_save_regs(&info, | ||
337 | ctxsave_regs_3d_perset, | ||
338 | ARRAY_SIZE(ctxsave_regs_3d_perset)); | ||
339 | |||
340 | if (register_sets == 2) { | ||
341 | /* read from set1, write cmds through set1, restore to set1 */ | ||
342 | switch_gpu(&info, 1, 2, 2); | ||
343 | /* note offset at which set 1 restore starts */ | ||
344 | restore_set1_offset = info.restore_count; | ||
345 | /* save set 1 specific regs */ | ||
346 | setup_save_regs(&info, | ||
347 | ctxsave_regs_3d_perset, | ||
348 | ARRAY_SIZE(ctxsave_regs_3d_perset)); | ||
349 | } | ||
350 | |||
351 | /* read from set0, write cmds through set1, restore to set0 and 1 */ | ||
352 | if (register_sets == 2) | ||
353 | switch_gpu(&info, 0, 2, 3); | ||
354 | |||
355 | if (info.ptr) { | ||
356 | save_end_v1(p, info.ptr); | ||
357 | info.ptr += SAVE_END_V1_SIZE; | ||
358 | } | ||
359 | |||
360 | wmb(); | ||
361 | |||
362 | p->save_size = info.save_count + save_end_size; | ||
363 | p->restore_size = info.restore_count + RESTORE_END_SIZE; | ||
364 | p->save_incrs = info.save_incrs; | ||
365 | p->save_thresh = p->save_incrs - SAVE_THRESH_OFFSET; | ||
366 | p->restore_incrs = info.restore_incrs; | ||
367 | } | ||
368 | |||
369 | |||
370 | /*** ctx3d ***/ | ||
371 | |||
372 | static struct nvhost_hwctx *ctx3d_alloc_v1(struct nvhost_hwctx_handler *h, | ||
373 | struct nvhost_channel *ch) | ||
374 | { | ||
375 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
376 | struct host1x_hwctx *ctx = nvhost_3dctx_alloc_common(p, ch, false); | ||
377 | |||
378 | if (ctx) | ||
379 | return &ctx->hwctx; | ||
380 | else | ||
381 | return NULL; | ||
382 | } | ||
383 | |||
384 | struct nvhost_hwctx_handler *__init nvhost_gr3d_t30_ctxhandler_init( | ||
385 | u32 syncpt, u32 waitbase, | ||
386 | struct nvhost_channel *ch) | ||
387 | { | ||
388 | struct nvmap_client *nvmap; | ||
389 | u32 *save_ptr; | ||
390 | struct host1x_hwctx_handler *p; | ||
391 | |||
392 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
393 | if (!p) | ||
394 | return NULL; | ||
395 | |||
396 | nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
397 | |||
398 | register_sets = tegra_gpu_register_sets(); | ||
399 | BUG_ON(register_sets == 0 || register_sets > 2); | ||
400 | |||
401 | p->syncpt = syncpt; | ||
402 | p->waitbase = waitbase; | ||
403 | |||
404 | setup_save(p, NULL); | ||
405 | |||
406 | p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, | ||
407 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
408 | if (IS_ERR(p->save_buf)) { | ||
409 | p->save_buf = NULL; | ||
410 | return NULL; | ||
411 | } | ||
412 | |||
413 | p->save_slots = 6; | ||
414 | if (register_sets == 2) | ||
415 | p->save_slots += 2; | ||
416 | |||
417 | save_ptr = nvmap_mmap(p->save_buf); | ||
418 | if (!save_ptr) { | ||
419 | nvmap_free(nvmap, p->save_buf); | ||
420 | p->save_buf = NULL; | ||
421 | return NULL; | ||
422 | } | ||
423 | |||
424 | p->save_phys = nvmap_pin(nvmap, p->save_buf); | ||
425 | |||
426 | setup_save(p, save_ptr); | ||
427 | |||
428 | p->h.alloc = ctx3d_alloc_v1; | ||
429 | p->h.save_push = save_push_v1; | ||
430 | p->h.save_service = NULL; | ||
431 | p->h.get = nvhost_3dctx_get; | ||
432 | p->h.put = nvhost_3dctx_put; | ||
433 | |||
434 | return &p->h; | ||
435 | } | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.h b/drivers/video/tegra/host/gr3d/gr3d_t30.h new file mode 100644 index 00000000000..d1b787e14b4 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t30.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/gr3d/gr3d_t30.h | ||
3 | * | ||
4 | * Tegra Graphics Host 3D for Tegra3 | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_GR3D_GR3D_T30_H | ||
22 | #define __NVHOST_GR3D_GR3D_T30_H | ||
23 | |||
24 | struct nvhost_hwctx_handler; | ||
25 | |||
26 | struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( | ||
27 | u32 syncpt, u32 waitbase, | ||
28 | struct nvhost_channel *ch); | ||
29 | |||
30 | #endif | ||
diff --git a/drivers/video/tegra/host/gr3d/scale3d.c b/drivers/video/tegra/host/gr3d/scale3d.c new file mode 100644 index 00000000000..8a267a127ea --- /dev/null +++ b/drivers/video/tegra/host/gr3d/scale3d.c | |||
@@ -0,0 +1,661 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/t20/scale3d.c | ||
3 | * | ||
4 | * Tegra Graphics Host 3D clock scaling | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * 3d clock scaling | ||
23 | * | ||
24 | * module3d_notify_busy() is called upon submit, module3d_notify_idle() is | ||
25 | * called when all outstanding submits are completed. Idle times are measured | ||
26 | * over a fixed time period (scale3d.p_period). If the 3d module idle time | ||
27 | * percentage goes over the limit (set in scale3d.p_idle_max), 3d clocks are | ||
28 | * scaled down. If the percentage goes under the minimum limit (set in | ||
29 | * scale3d.p_idle_min), 3d clocks are scaled up. An additional test is made | ||
30 | * over the time frame given in scale3d.p_fast_response for clocking up | ||
31 | * quickly in response to load peaks. | ||
32 | * | ||
33 | * 3d.emc clock is scaled proportionately to 3d clock, with a quadratic- | ||
34 | * bezier-like factor added to pull 3d.emc rate a bit lower. | ||
35 | */ | ||
36 | |||
37 | #include <linux/debugfs.h> | ||
38 | #include <linux/types.h> | ||
39 | #include <linux/clk.h> | ||
40 | #include <mach/clk.h> | ||
41 | #include <mach/hardware.h> | ||
42 | #include "scale3d.h" | ||
43 | #include "dev.h" | ||
44 | |||
45 | static int scale3d_is_enabled(void); | ||
46 | static void scale3d_enable(int enable); | ||
47 | |||
48 | #define POW2(x) ((x) * (x)) | ||
49 | |||
50 | /* | ||
51 | * debugfs parameters to control 3d clock scaling test | ||
52 | * | ||
53 | * period - time period for clock rate evaluation | ||
54 | * fast_response - time period for evaluation of 'busy' spikes | ||
55 | * idle_min - if less than [idle_min] percent idle over [fast_response] | ||
56 | * microseconds, clock up. | ||
57 | * idle_max - if over [idle_max] percent idle over [period] microseconds, | ||
58 | * clock down. | ||
59 | * max_scale - limits rate changes to no less than (100 - max_scale)% or | ||
60 | * (100 + 2 * max_scale)% of current clock rate | ||
61 | * verbosity - set above 5 for debug printouts | ||
62 | */ | ||
63 | |||
64 | struct scale3d_info_rec { | ||
65 | struct mutex lock; /* lock for timestamps etc */ | ||
66 | int enable; | ||
67 | int init; | ||
68 | ktime_t idle_frame; | ||
69 | ktime_t fast_frame; | ||
70 | ktime_t last_idle; | ||
71 | ktime_t last_short_term_idle; | ||
72 | int is_idle; | ||
73 | ktime_t last_tweak; | ||
74 | ktime_t last_down; | ||
75 | int fast_up_count; | ||
76 | int slow_down_count; | ||
77 | int is_scaled; | ||
78 | int fast_responses; | ||
79 | unsigned long idle_total; | ||
80 | unsigned long idle_short_term_total; | ||
81 | unsigned long max_rate_3d; | ||
82 | long emc_slope; | ||
83 | long emc_offset; | ||
84 | long emc_dip_slope; | ||
85 | long emc_dip_offset; | ||
86 | long emc_xmid; | ||
87 | unsigned long min_rate_3d; | ||
88 | struct work_struct work; | ||
89 | struct delayed_work idle_timer; | ||
90 | unsigned int scale; | ||
91 | unsigned int p_period; | ||
92 | unsigned int period; | ||
93 | unsigned int p_idle_min; | ||
94 | unsigned int idle_min; | ||
95 | unsigned int p_idle_max; | ||
96 | unsigned int idle_max; | ||
97 | unsigned int p_fast_response; | ||
98 | unsigned int fast_response; | ||
99 | unsigned int p_adjust; | ||
100 | unsigned int p_scale_emc; | ||
101 | unsigned int p_emc_dip; | ||
102 | unsigned int p_verbosity; | ||
103 | struct clk *clk_3d; | ||
104 | struct clk *clk_3d2; | ||
105 | struct clk *clk_3d_emc; | ||
106 | }; | ||
107 | |||
108 | static struct scale3d_info_rec scale3d; | ||
109 | |||
110 | static void scale3d_clocks(unsigned long percent) | ||
111 | { | ||
112 | unsigned long hz, curr; | ||
113 | |||
114 | if (!tegra_is_clk_enabled(scale3d.clk_3d)) | ||
115 | return; | ||
116 | |||
117 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) | ||
118 | if (!tegra_is_clk_enabled(scale3d.clk_3d2)) | ||
119 | return; | ||
120 | |||
121 | curr = clk_get_rate(scale3d.clk_3d); | ||
122 | hz = percent * (curr / 100); | ||
123 | |||
124 | if (!(hz >= scale3d.max_rate_3d && curr == scale3d.max_rate_3d)) { | ||
125 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) | ||
126 | clk_set_rate(scale3d.clk_3d2, 0); | ||
127 | clk_set_rate(scale3d.clk_3d, hz); | ||
128 | |||
129 | if (scale3d.p_scale_emc) { | ||
130 | long after = (long) clk_get_rate(scale3d.clk_3d); | ||
131 | hz = after * scale3d.emc_slope + scale3d.emc_offset; | ||
132 | if (scale3d.p_emc_dip) | ||
133 | hz -= | ||
134 | (scale3d.emc_dip_slope * | ||
135 | POW2(after / 1000 - scale3d.emc_xmid) + | ||
136 | scale3d.emc_dip_offset); | ||
137 | clk_set_rate(scale3d.clk_3d_emc, hz); | ||
138 | } | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static void scale3d_clocks_handler(struct work_struct *work) | ||
143 | { | ||
144 | unsigned int scale; | ||
145 | |||
146 | mutex_lock(&scale3d.lock); | ||
147 | scale = scale3d.scale; | ||
148 | mutex_unlock(&scale3d.lock); | ||
149 | |||
150 | if (scale != 0) | ||
151 | scale3d_clocks(scale); | ||
152 | } | ||
153 | |||
154 | void nvhost_scale3d_suspend(struct nvhost_device *dev) | ||
155 | { | ||
156 | if (!scale3d.enable) | ||
157 | return; | ||
158 | |||
159 | cancel_work_sync(&scale3d.work); | ||
160 | cancel_delayed_work(&scale3d.idle_timer); | ||
161 | } | ||
162 | |||
163 | /* set 3d clocks to max */ | ||
164 | static void reset_3d_clocks(void) | ||
165 | { | ||
166 | if (clk_get_rate(scale3d.clk_3d) != scale3d.max_rate_3d) { | ||
167 | clk_set_rate(scale3d.clk_3d, scale3d.max_rate_3d); | ||
168 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) | ||
169 | clk_set_rate(scale3d.clk_3d2, scale3d.max_rate_3d); | ||
170 | if (scale3d.p_scale_emc) | ||
171 | clk_set_rate(scale3d.clk_3d_emc, | ||
172 | clk_round_rate(scale3d.clk_3d_emc, UINT_MAX)); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static int scale3d_is_enabled(void) | ||
177 | { | ||
178 | int enable; | ||
179 | |||
180 | if (!scale3d.enable) | ||
181 | return 0; | ||
182 | |||
183 | mutex_lock(&scale3d.lock); | ||
184 | enable = scale3d.enable; | ||
185 | mutex_unlock(&scale3d.lock); | ||
186 | |||
187 | return enable; | ||
188 | } | ||
189 | |||
190 | static void scale3d_enable(int enable) | ||
191 | { | ||
192 | int disable = 0; | ||
193 | |||
194 | mutex_lock(&scale3d.lock); | ||
195 | |||
196 | if (enable) { | ||
197 | if (scale3d.max_rate_3d != scale3d.min_rate_3d) | ||
198 | scale3d.enable = 1; | ||
199 | } else { | ||
200 | scale3d.enable = 0; | ||
201 | disable = 1; | ||
202 | } | ||
203 | |||
204 | mutex_unlock(&scale3d.lock); | ||
205 | |||
206 | if (disable) | ||
207 | reset_3d_clocks(); | ||
208 | } | ||
209 | |||
210 | static void reset_scaling_counters(ktime_t time) | ||
211 | { | ||
212 | scale3d.idle_total = 0; | ||
213 | scale3d.idle_short_term_total = 0; | ||
214 | scale3d.last_idle = time; | ||
215 | scale3d.last_short_term_idle = time; | ||
216 | scale3d.idle_frame = time; | ||
217 | } | ||
218 | |||
219 | /* scaling_adjust - use scale up / scale down hint counts to adjust scaling | ||
220 | * parameters. | ||
221 | * | ||
222 | * hint_ratio is 100 x the ratio of scale up to scale down hints. Three cases | ||
223 | * are distinguished: | ||
224 | * | ||
225 | * hint_ratio < HINT_RATIO_MIN - set parameters to maximize scaling effect | ||
226 | * hint_ratio > HINT_RATIO_MAX - set parameters to minimize scaling effect | ||
227 | * hint_ratio between limits - scale parameters linearly | ||
228 | * | ||
229 | * the parameters adjusted are | ||
230 | * | ||
231 | * * fast_response time | ||
232 | * * period - time for scaling down estimate | ||
233 | * * idle_min percentage | ||
234 | * * idle_max percentage | ||
235 | */ | ||
236 | #define SCALING_ADJUST_PERIOD 1000000 | ||
237 | #define HINT_RATIO_MAX 400 | ||
238 | #define HINT_RATIO_MIN 100 | ||
239 | #define HINT_RATIO_MID ((HINT_RATIO_MAX + HINT_RATIO_MIN) / 2) | ||
240 | #define HINT_RATIO_DIFF (HINT_RATIO_MAX - HINT_RATIO_MIN) | ||
241 | |||
242 | static void scaling_adjust(ktime_t time) | ||
243 | { | ||
244 | long hint_ratio; | ||
245 | long fast_response_adjustment; | ||
246 | long period_adjustment; | ||
247 | int idle_min_adjustment; | ||
248 | int idle_max_adjustment; | ||
249 | unsigned long dt; | ||
250 | |||
251 | dt = (unsigned long) ktime_us_delta(time, scale3d.last_tweak); | ||
252 | if (dt < SCALING_ADJUST_PERIOD) | ||
253 | return; | ||
254 | |||
255 | hint_ratio = (100 * (scale3d.fast_up_count + 1)) / | ||
256 | (scale3d.slow_down_count + 1); | ||
257 | |||
258 | if (hint_ratio > HINT_RATIO_MAX) { | ||
259 | fast_response_adjustment = -((int) scale3d.p_fast_response) / 4; | ||
260 | period_adjustment = scale3d.p_period / 2; | ||
261 | idle_min_adjustment = scale3d.p_idle_min; | ||
262 | idle_max_adjustment = scale3d.p_idle_max; | ||
263 | } else if (hint_ratio < HINT_RATIO_MIN) { | ||
264 | fast_response_adjustment = scale3d.p_fast_response / 2; | ||
265 | period_adjustment = -((int) scale3d.p_period) / 4; | ||
266 | idle_min_adjustment = -((int) scale3d.p_idle_min) / 2; | ||
267 | idle_max_adjustment = -((int) scale3d.p_idle_max) / 2; | ||
268 | } else { | ||
269 | int diff; | ||
270 | int factor; | ||
271 | |||
272 | diff = HINT_RATIO_MID - hint_ratio; | ||
273 | if (diff < 0) | ||
274 | factor = -diff * 2; | ||
275 | else { | ||
276 | factor = -diff; | ||
277 | diff *= 2; | ||
278 | } | ||
279 | |||
280 | fast_response_adjustment = diff * | ||
281 | (scale3d.p_fast_response / (HINT_RATIO_DIFF * 2)); | ||
282 | period_adjustment = | ||
283 | diff * (scale3d.p_period / HINT_RATIO_DIFF); | ||
284 | idle_min_adjustment = | ||
285 | (factor * (int) scale3d.p_idle_min) / HINT_RATIO_DIFF; | ||
286 | idle_max_adjustment = | ||
287 | (factor * (int) scale3d.p_idle_max) / HINT_RATIO_DIFF; | ||
288 | } | ||
289 | |||
290 | scale3d.fast_response = | ||
291 | scale3d.p_fast_response + fast_response_adjustment; | ||
292 | scale3d.period = scale3d.p_period + period_adjustment; | ||
293 | scale3d.idle_min = scale3d.p_idle_min + idle_min_adjustment; | ||
294 | scale3d.idle_max = scale3d.p_idle_max + idle_max_adjustment; | ||
295 | |||
296 | if (scale3d.p_verbosity >= 10) | ||
297 | pr_info("scale3d stats: + %d - %d (/ %d) f %u p %u min %u max %u\n", | ||
298 | scale3d.fast_up_count, scale3d.slow_down_count, | ||
299 | scale3d.fast_responses, scale3d.fast_response, | ||
300 | scale3d.period, scale3d.idle_min, scale3d.idle_max); | ||
301 | |||
302 | scale3d.fast_up_count = 0; | ||
303 | scale3d.slow_down_count = 0; | ||
304 | scale3d.fast_responses = 0; | ||
305 | scale3d.last_down = time; | ||
306 | scale3d.last_tweak = time; | ||
307 | } | ||
308 | |||
309 | #undef SCALING_ADJUST_PERIOD | ||
310 | #undef HINT_RATIO_MAX | ||
311 | #undef HINT_RATIO_MIN | ||
312 | #undef HINT_RATIO_MID | ||
313 | #undef HINT_RATIO_DIFF | ||
314 | |||
315 | static void scaling_state_check(ktime_t time) | ||
316 | { | ||
317 | unsigned long dt; | ||
318 | |||
319 | /* adjustment: set scale parameters (fast_response, period) +/- 25% | ||
320 | * based on ratio of scale up to scale down hints | ||
321 | */ | ||
322 | if (scale3d.p_adjust) | ||
323 | scaling_adjust(time); | ||
324 | else { | ||
325 | scale3d.fast_response = scale3d.p_fast_response; | ||
326 | scale3d.period = scale3d.p_period; | ||
327 | scale3d.idle_min = scale3d.p_idle_min; | ||
328 | scale3d.idle_max = scale3d.p_idle_max; | ||
329 | } | ||
330 | |||
331 | /* check for load peaks */ | ||
332 | dt = (unsigned long) ktime_us_delta(time, scale3d.fast_frame); | ||
333 | if (dt > scale3d.fast_response) { | ||
334 | unsigned long idleness = | ||
335 | (scale3d.idle_short_term_total * 100) / dt; | ||
336 | scale3d.fast_responses++; | ||
337 | scale3d.fast_frame = time; | ||
338 | /* if too busy, scale up */ | ||
339 | if (idleness < scale3d.idle_min) { | ||
340 | scale3d.is_scaled = 0; | ||
341 | scale3d.fast_up_count++; | ||
342 | if (scale3d.p_verbosity >= 5) | ||
343 | pr_info("scale3d: %ld%% busy\n", | ||
344 | 100 - idleness); | ||
345 | |||
346 | reset_3d_clocks(); | ||
347 | reset_scaling_counters(time); | ||
348 | return; | ||
349 | } | ||
350 | scale3d.idle_short_term_total = 0; | ||
351 | scale3d.last_short_term_idle = time; | ||
352 | } | ||
353 | |||
354 | dt = (unsigned long) ktime_us_delta(time, scale3d.idle_frame); | ||
355 | if (dt > scale3d.period) { | ||
356 | unsigned long idleness = (scale3d.idle_total * 100) / dt; | ||
357 | |||
358 | if (scale3d.p_verbosity >= 5) | ||
359 | pr_info("scale3d: idle %lu, ~%lu%%\n", | ||
360 | scale3d.idle_total, idleness); | ||
361 | |||
362 | if (idleness > scale3d.idle_max) { | ||
363 | if (!scale3d.is_scaled) { | ||
364 | scale3d.is_scaled = 1; | ||
365 | scale3d.last_down = time; | ||
366 | } | ||
367 | scale3d.slow_down_count++; | ||
368 | /* if idle time is high, clock down */ | ||
369 | scale3d.scale = 100 - (idleness - scale3d.idle_min); | ||
370 | schedule_work(&scale3d.work); | ||
371 | } | ||
372 | |||
373 | reset_scaling_counters(time); | ||
374 | } | ||
375 | } | ||
376 | |||
377 | void nvhost_scale3d_notify_idle(struct nvhost_device *dev) | ||
378 | { | ||
379 | ktime_t t; | ||
380 | unsigned long dt; | ||
381 | |||
382 | if (!scale3d.enable) | ||
383 | return; | ||
384 | |||
385 | mutex_lock(&scale3d.lock); | ||
386 | |||
387 | t = ktime_get(); | ||
388 | |||
389 | if (scale3d.is_idle) { | ||
390 | dt = ktime_us_delta(t, scale3d.last_idle); | ||
391 | scale3d.idle_total += dt; | ||
392 | dt = ktime_us_delta(t, scale3d.last_short_term_idle); | ||
393 | scale3d.idle_short_term_total += dt; | ||
394 | } else | ||
395 | scale3d.is_idle = 1; | ||
396 | |||
397 | scale3d.last_idle = t; | ||
398 | scale3d.last_short_term_idle = t; | ||
399 | |||
400 | scaling_state_check(scale3d.last_idle); | ||
401 | |||
402 | /* delay idle_max % of 2 * fast_response time (given in microseconds) */ | ||
403 | schedule_delayed_work(&scale3d.idle_timer, | ||
404 | msecs_to_jiffies((scale3d.idle_max * scale3d.fast_response) | ||
405 | / 50000)); | ||
406 | |||
407 | mutex_unlock(&scale3d.lock); | ||
408 | } | ||
409 | |||
410 | void nvhost_scale3d_notify_busy(struct nvhost_device *dev) | ||
411 | { | ||
412 | unsigned long idle; | ||
413 | unsigned long short_term_idle; | ||
414 | ktime_t t; | ||
415 | |||
416 | if (!scale3d.enable) | ||
417 | return; | ||
418 | |||
419 | mutex_lock(&scale3d.lock); | ||
420 | |||
421 | cancel_delayed_work(&scale3d.idle_timer); | ||
422 | |||
423 | t = ktime_get(); | ||
424 | |||
425 | if (scale3d.is_idle) { | ||
426 | idle = (unsigned long) | ||
427 | ktime_us_delta(t, scale3d.last_idle); | ||
428 | scale3d.idle_total += idle; | ||
429 | short_term_idle = | ||
430 | ktime_us_delta(t, scale3d.last_short_term_idle); | ||
431 | scale3d.idle_short_term_total += short_term_idle; | ||
432 | scale3d.is_idle = 0; | ||
433 | } | ||
434 | |||
435 | scaling_state_check(t); | ||
436 | |||
437 | mutex_unlock(&scale3d.lock); | ||
438 | } | ||
439 | |||
440 | static void scale3d_idle_handler(struct work_struct *work) | ||
441 | { | ||
442 | int notify_idle = 0; | ||
443 | |||
444 | if (!scale3d.enable) | ||
445 | return; | ||
446 | |||
447 | mutex_lock(&scale3d.lock); | ||
448 | |||
449 | if (scale3d.is_idle && tegra_is_clk_enabled(scale3d.clk_3d)) { | ||
450 | unsigned long curr = clk_get_rate(scale3d.clk_3d); | ||
451 | if (curr > scale3d.min_rate_3d) | ||
452 | notify_idle = 1; | ||
453 | } | ||
454 | |||
455 | mutex_unlock(&scale3d.lock); | ||
456 | |||
457 | if (notify_idle) | ||
458 | nvhost_scale3d_notify_idle(NULL); | ||
459 | } | ||
460 | |||
461 | void nvhost_scale3d_reset() | ||
462 | { | ||
463 | ktime_t t; | ||
464 | |||
465 | if (!scale3d.enable) | ||
466 | return; | ||
467 | |||
468 | t = ktime_get(); | ||
469 | mutex_lock(&scale3d.lock); | ||
470 | reset_scaling_counters(t); | ||
471 | mutex_unlock(&scale3d.lock); | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * debugfs parameters to control 3d clock scaling | ||
476 | */ | ||
477 | |||
478 | void nvhost_scale3d_debug_init(struct dentry *de) | ||
479 | { | ||
480 | struct dentry *d, *f; | ||
481 | |||
482 | d = debugfs_create_dir("scaling", de); | ||
483 | if (!d) { | ||
484 | pr_err("scale3d: can\'t create debugfs directory\n"); | ||
485 | return; | ||
486 | } | ||
487 | |||
488 | #define CREATE_SCALE3D_FILE(fname) \ | ||
489 | do {\ | ||
490 | f = debugfs_create_u32(#fname, S_IRUGO | S_IWUSR, d,\ | ||
491 | &scale3d.p_##fname);\ | ||
492 | if (NULL == f) {\ | ||
493 | pr_err("scale3d: can\'t create file " #fname "\n");\ | ||
494 | return;\ | ||
495 | } \ | ||
496 | } while (0) | ||
497 | |||
498 | CREATE_SCALE3D_FILE(fast_response); | ||
499 | CREATE_SCALE3D_FILE(idle_min); | ||
500 | CREATE_SCALE3D_FILE(idle_max); | ||
501 | CREATE_SCALE3D_FILE(period); | ||
502 | CREATE_SCALE3D_FILE(adjust); | ||
503 | CREATE_SCALE3D_FILE(scale_emc); | ||
504 | CREATE_SCALE3D_FILE(emc_dip); | ||
505 | CREATE_SCALE3D_FILE(verbosity); | ||
506 | #undef CREATE_SCALE3D_FILE | ||
507 | } | ||
508 | |||
509 | static ssize_t enable_3d_scaling_show(struct device *device, | ||
510 | struct device_attribute *attr, char *buf) | ||
511 | { | ||
512 | ssize_t res; | ||
513 | |||
514 | res = snprintf(buf, PAGE_SIZE, "%d\n", scale3d_is_enabled()); | ||
515 | |||
516 | return res; | ||
517 | } | ||
518 | |||
519 | static ssize_t enable_3d_scaling_store(struct device *dev, | ||
520 | struct device_attribute *attr, const char *buf, size_t count) | ||
521 | { | ||
522 | unsigned long val = 0; | ||
523 | |||
524 | if (strict_strtoul(buf, 10, &val) < 0) | ||
525 | return -EINVAL; | ||
526 | |||
527 | scale3d_enable(val); | ||
528 | |||
529 | return count; | ||
530 | } | ||
531 | |||
532 | static DEVICE_ATTR(enable_3d_scaling, S_IRUGO | S_IWUSR, | ||
533 | enable_3d_scaling_show, enable_3d_scaling_store); | ||
534 | |||
535 | void nvhost_scale3d_init(struct nvhost_device *d) | ||
536 | { | ||
537 | if (!scale3d.init) { | ||
538 | int error; | ||
539 | unsigned long max_emc, min_emc; | ||
540 | long correction; | ||
541 | mutex_init(&scale3d.lock); | ||
542 | |||
543 | scale3d.clk_3d = d->clk[0]; | ||
544 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) { | ||
545 | scale3d.clk_3d2 = d->clk[1]; | ||
546 | scale3d.clk_3d_emc = d->clk[2]; | ||
547 | } else | ||
548 | scale3d.clk_3d_emc = d->clk[1]; | ||
549 | |||
550 | scale3d.max_rate_3d = clk_round_rate(scale3d.clk_3d, UINT_MAX); | ||
551 | scale3d.min_rate_3d = clk_round_rate(scale3d.clk_3d, 0); | ||
552 | |||
553 | if (scale3d.max_rate_3d == scale3d.min_rate_3d) { | ||
554 | pr_warn("scale3d: 3d max rate = min rate (%lu), " | ||
555 | "disabling\n", scale3d.max_rate_3d); | ||
556 | scale3d.enable = 0; | ||
557 | return; | ||
558 | } | ||
559 | |||
560 | /* emc scaling: | ||
561 | * | ||
562 | * Remc = S * R3d + O - (Sd * (R3d - Rm)^2 + Od) | ||
563 | * | ||
564 | * Remc - 3d.emc rate | ||
565 | * R3d - 3d.cbus rate | ||
566 | * Rm - 3d.cbus 'middle' rate = (max + min)/2 | ||
567 | * S - emc_slope | ||
568 | * O - emc_offset | ||
569 | * Sd - emc_dip_slope | ||
570 | * Od - emc_dip_offset | ||
571 | * | ||
572 | * this superposes a quadratic dip centered around the middle 3d | ||
573 | * frequency over a linear correlation of 3d.emc to 3d clock | ||
574 | * rates. | ||
575 | * | ||
576 | * S, O are chosen so that the maximum 3d rate produces the | ||
577 | * maximum 3d.emc rate exactly, and the minimum 3d rate produces | ||
578 | * at least the minimum 3d.emc rate. | ||
579 | * | ||
580 | * Sd and Od are chosen to produce the largest dip that will | ||
581 | * keep 3d.emc frequencies monotonously decreasing with 3d | ||
582 | * frequencies. To achieve this, the first derivative of Remc | ||
583 | * with respect to R3d should be zero for the minimal 3d rate: | ||
584 | * | ||
585 | * R'emc = S - 2 * Sd * (R3d - Rm) | ||
586 | * R'emc(R3d-min) = 0 | ||
587 | * S = 2 * Sd * (R3d-min - Rm) | ||
588 | * = 2 * Sd * (R3d-min - R3d-max) / 2 | ||
589 | * Sd = S / (R3d-min - R3d-max) | ||
590 | * | ||
591 | * +---------------------------------------------------+ | ||
592 | * | Sd = -(emc-max - emc-min) / (R3d-min - R3d-max)^2 | | ||
593 | * +---------------------------------------------------+ | ||
594 | * | ||
595 | * dip = Sd * (R3d - Rm)^2 + Od | ||
596 | * | ||
597 | * requiring dip(R3d-min) = 0 and dip(R3d-max) = 0 gives | ||
598 | * | ||
599 | * Sd * (R3d-min - Rm)^2 + Od = 0 | ||
600 | * Od = -Sd * ((R3d-min - R3d-max) / 2)^2 | ||
601 | * = -Sd * ((R3d-min - R3d-max)^2) / 4 | ||
602 | * | ||
603 | * +------------------------------+ | ||
604 | * | Od = (emc-max - emc-min) / 4 | | ||
605 | * +------------------------------+ | ||
606 | */ | ||
607 | |||
608 | max_emc = clk_round_rate(scale3d.clk_3d_emc, UINT_MAX); | ||
609 | min_emc = clk_round_rate(scale3d.clk_3d_emc, 0); | ||
610 | |||
611 | scale3d.emc_slope = (max_emc - min_emc) / | ||
612 | (scale3d.max_rate_3d - scale3d.min_rate_3d); | ||
613 | scale3d.emc_offset = max_emc - | ||
614 | scale3d.emc_slope * scale3d.max_rate_3d; | ||
615 | /* guarantee max 3d rate maps to max emc rate */ | ||
616 | scale3d.emc_offset += max_emc - | ||
617 | (scale3d.emc_slope * scale3d.max_rate_3d + | ||
618 | scale3d.emc_offset); | ||
619 | |||
620 | scale3d.emc_dip_offset = (max_emc - min_emc) / 4; | ||
621 | scale3d.emc_dip_slope = | ||
622 | -4 * (scale3d.emc_dip_offset / | ||
623 | (POW2(scale3d.max_rate_3d - scale3d.min_rate_3d))); | ||
624 | scale3d.emc_xmid = | ||
625 | (scale3d.max_rate_3d + scale3d.min_rate_3d) / 2; | ||
626 | correction = | ||
627 | scale3d.emc_dip_offset + | ||
628 | scale3d.emc_dip_slope * | ||
629 | POW2(scale3d.max_rate_3d - scale3d.emc_xmid); | ||
630 | scale3d.emc_dip_offset -= correction; | ||
631 | |||
632 | INIT_WORK(&scale3d.work, scale3d_clocks_handler); | ||
633 | INIT_DELAYED_WORK(&scale3d.idle_timer, scale3d_idle_handler); | ||
634 | |||
635 | /* set scaling parameter defaults */ | ||
636 | scale3d.enable = 1; | ||
637 | scale3d.period = scale3d.p_period = 100000; | ||
638 | scale3d.idle_min = scale3d.p_idle_min = 10; | ||
639 | scale3d.idle_max = scale3d.p_idle_max = 15; | ||
640 | scale3d.fast_response = scale3d.p_fast_response = 7000; | ||
641 | scale3d.p_scale_emc = 1; | ||
642 | scale3d.p_emc_dip = 1; | ||
643 | scale3d.p_verbosity = 0; | ||
644 | scale3d.p_adjust = 1; | ||
645 | |||
646 | error = device_create_file(&d->dev, | ||
647 | &dev_attr_enable_3d_scaling); | ||
648 | if (error) | ||
649 | dev_err(&d->dev, "failed to create sysfs attributes"); | ||
650 | |||
651 | scale3d.init = 1; | ||
652 | } | ||
653 | |||
654 | nvhost_scale3d_reset(); | ||
655 | } | ||
656 | |||
657 | void nvhost_scale3d_deinit(struct nvhost_device *dev) | ||
658 | { | ||
659 | device_remove_file(&dev->dev, &dev_attr_enable_3d_scaling); | ||
660 | scale3d.init = 0; | ||
661 | } | ||
diff --git a/drivers/video/tegra/host/gr3d/scale3d.h b/drivers/video/tegra/host/gr3d/scale3d.h new file mode 100644 index 00000000000..f8aae1d591a --- /dev/null +++ b/drivers/video/tegra/host/gr3d/scale3d.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/t30/scale3d.h | ||
3 | * | ||
4 | * Tegra Graphics Host 3D Clock Scaling | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef NVHOST_T30_SCALE3D_H | ||
22 | #define NVHOST_T30_SCALE3D_H | ||
23 | |||
24 | struct nvhost_device; | ||
25 | struct device; | ||
26 | struct dentry; | ||
27 | |||
28 | /* Initialization and de-initialization for module */ | ||
29 | void nvhost_scale3d_init(struct nvhost_device *); | ||
30 | void nvhost_scale3d_deinit(struct nvhost_device *); | ||
31 | |||
32 | /* Suspend is called when powering down module */ | ||
33 | void nvhost_scale3d_suspend(struct nvhost_device *); | ||
34 | |||
35 | /* reset 3d module load counters, called on resume */ | ||
36 | void nvhost_scale3d_reset(void); | ||
37 | |||
38 | /* | ||
39 | * call when performing submit to notify scaling mechanism that 3d module is | ||
40 | * in use | ||
41 | */ | ||
42 | void nvhost_scale3d_notify_busy(struct nvhost_device *); | ||
43 | void nvhost_scale3d_notify_idle(struct nvhost_device *); | ||
44 | |||
45 | void nvhost_scale3d_debug_init(struct dentry *de); | ||
46 | |||
47 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/Makefile b/drivers/video/tegra/host/host1x/Makefile new file mode 100644 index 00000000000..c3214ffe147 --- /dev/null +++ b/drivers/video/tegra/host/host1x/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | |||
3 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
4 | |||
5 | nvhost-host1x-objs = \ | ||
6 | host1x_syncpt.o \ | ||
7 | host1x_channel.o \ | ||
8 | host1x_intr.o \ | ||
9 | host1x_cdma.o \ | ||
10 | host1x_debug.o | ||
11 | |||
12 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-host1x.o | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.c b/drivers/video/tegra/host/host1x/host1x_cdma.c new file mode 100644 index 00000000000..cdd6026718b --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_cdma.c | |||
@@ -0,0 +1,665 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_cdma.c | ||
3 | * | ||
4 | * Tegra Graphics Host Command DMA | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include "nvhost_cdma.h" | ||
23 | #include "dev.h" | ||
24 | |||
25 | #include "host1x_hardware.h" | ||
26 | #include "host1x_syncpt.h" | ||
27 | #include "host1x_cdma.h" | ||
28 | #include "host1x_hwctx.h" | ||
29 | |||
30 | static inline u32 host1x_channel_dmactrl(int stop, int get_rst, int init_get) | ||
31 | { | ||
32 | return HOST1X_CREATE(CHANNEL_DMACTRL, DMASTOP, stop) | ||
33 | | HOST1X_CREATE(CHANNEL_DMACTRL, DMAGETRST, get_rst) | ||
34 | | HOST1X_CREATE(CHANNEL_DMACTRL, DMAINITGET, init_get); | ||
35 | } | ||
36 | |||
37 | static void cdma_timeout_handler(struct work_struct *work); | ||
38 | |||
39 | /* | ||
40 | * push_buffer | ||
41 | * | ||
42 | * The push buffer is a circular array of words to be fetched by command DMA. | ||
43 | * Note that it works slightly differently to the sync queue; fence == cur | ||
44 | * means that the push buffer is full, not empty. | ||
45 | */ | ||
46 | |||
47 | |||
48 | /** | ||
49 | * Reset to empty push buffer | ||
50 | */ | ||
51 | static void push_buffer_reset(struct push_buffer *pb) | ||
52 | { | ||
53 | pb->fence = PUSH_BUFFER_SIZE - 8; | ||
54 | pb->cur = 0; | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * Init push buffer resources | ||
59 | */ | ||
60 | static int push_buffer_init(struct push_buffer *pb) | ||
61 | { | ||
62 | struct nvhost_cdma *cdma = pb_to_cdma(pb); | ||
63 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
64 | pb->mem = NULL; | ||
65 | pb->mapped = NULL; | ||
66 | pb->phys = 0; | ||
67 | pb->nvmap = NULL; | ||
68 | |||
69 | BUG_ON(!cdma_pb_op(cdma).reset); | ||
70 | cdma_pb_op(cdma).reset(pb); | ||
71 | |||
72 | /* allocate and map pushbuffer memory */ | ||
73 | pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32, | ||
74 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
75 | if (IS_ERR_OR_NULL(pb->mem)) { | ||
76 | pb->mem = NULL; | ||
77 | goto fail; | ||
78 | } | ||
79 | pb->mapped = nvmap_mmap(pb->mem); | ||
80 | if (pb->mapped == NULL) | ||
81 | goto fail; | ||
82 | |||
83 | /* pin pushbuffer and get physical address */ | ||
84 | pb->phys = nvmap_pin(nvmap, pb->mem); | ||
85 | if (pb->phys >= 0xfffff000) { | ||
86 | pb->phys = 0; | ||
87 | goto fail; | ||
88 | } | ||
89 | |||
90 | /* memory for storing nvmap client and handles for each opcode pair */ | ||
91 | pb->nvmap = kzalloc(NVHOST_GATHER_QUEUE_SIZE * | ||
92 | sizeof(struct nvmap_client_handle), | ||
93 | GFP_KERNEL); | ||
94 | if (!pb->nvmap) | ||
95 | goto fail; | ||
96 | |||
97 | /* put the restart at the end of pushbuffer memory */ | ||
98 | *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = | ||
99 | nvhost_opcode_restart(pb->phys); | ||
100 | |||
101 | return 0; | ||
102 | |||
103 | fail: | ||
104 | cdma_pb_op(cdma).destroy(pb); | ||
105 | return -ENOMEM; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * Clean up push buffer resources | ||
110 | */ | ||
111 | static void push_buffer_destroy(struct push_buffer *pb) | ||
112 | { | ||
113 | struct nvhost_cdma *cdma = pb_to_cdma(pb); | ||
114 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
115 | if (pb->mapped) | ||
116 | nvmap_munmap(pb->mem, pb->mapped); | ||
117 | |||
118 | if (pb->phys != 0) | ||
119 | nvmap_unpin(nvmap, pb->mem); | ||
120 | |||
121 | if (pb->mem) | ||
122 | nvmap_free(nvmap, pb->mem); | ||
123 | |||
124 | kfree(pb->nvmap); | ||
125 | |||
126 | pb->mem = NULL; | ||
127 | pb->mapped = NULL; | ||
128 | pb->phys = 0; | ||
129 | pb->nvmap = 0; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Push two words to the push buffer | ||
134 | * Caller must ensure push buffer is not full | ||
135 | */ | ||
136 | static void push_buffer_push_to(struct push_buffer *pb, | ||
137 | struct nvmap_client *client, | ||
138 | struct nvmap_handle *handle, u32 op1, u32 op2) | ||
139 | { | ||
140 | u32 cur = pb->cur; | ||
141 | u32 *p = (u32 *)((u32)pb->mapped + cur); | ||
142 | u32 cur_nvmap = (cur/8) & (NVHOST_GATHER_QUEUE_SIZE - 1); | ||
143 | BUG_ON(cur == pb->fence); | ||
144 | *(p++) = op1; | ||
145 | *(p++) = op2; | ||
146 | pb->nvmap[cur_nvmap].client = client; | ||
147 | pb->nvmap[cur_nvmap].handle = handle; | ||
148 | pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1); | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * Pop a number of two word slots from the push buffer | ||
153 | * Caller must ensure push buffer is not empty | ||
154 | */ | ||
155 | static void push_buffer_pop_from(struct push_buffer *pb, | ||
156 | unsigned int slots) | ||
157 | { | ||
158 | /* Clear the nvmap references for old items from pb */ | ||
159 | unsigned int i; | ||
160 | u32 fence_nvmap = pb->fence/8; | ||
161 | for (i = 0; i < slots; i++) { | ||
162 | int cur_fence_nvmap = (fence_nvmap+i) | ||
163 | & (NVHOST_GATHER_QUEUE_SIZE - 1); | ||
164 | struct nvmap_client_handle *h = | ||
165 | &pb->nvmap[cur_fence_nvmap]; | ||
166 | h->client = NULL; | ||
167 | h->handle = NULL; | ||
168 | } | ||
169 | /* Advance the next write position */ | ||
170 | pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * Return the number of two word slots free in the push buffer | ||
175 | */ | ||
176 | static u32 push_buffer_space(struct push_buffer *pb) | ||
177 | { | ||
178 | return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8; | ||
179 | } | ||
180 | |||
181 | static u32 push_buffer_putptr(struct push_buffer *pb) | ||
182 | { | ||
183 | return pb->phys + pb->cur; | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * The syncpt incr buffer is filled with methods to increment syncpts, which | ||
188 | * is later GATHER-ed into the mainline PB. It's used when a timed out context | ||
189 | * is interleaved with other work, so needs to inline the syncpt increments | ||
190 | * to maintain the count (but otherwise does no work). | ||
191 | */ | ||
192 | |||
193 | /** | ||
194 | * Init timeout and syncpt incr buffer resources | ||
195 | */ | ||
196 | static int cdma_timeout_init(struct nvhost_cdma *cdma, | ||
197 | u32 syncpt_id) | ||
198 | { | ||
199 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
200 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
201 | struct syncpt_buffer *sb = &cdma->syncpt_buffer; | ||
202 | struct nvhost_channel *ch = cdma_to_channel(cdma); | ||
203 | u32 i = 0; | ||
204 | |||
205 | if (syncpt_id == NVSYNCPT_INVALID) | ||
206 | return -EINVAL; | ||
207 | |||
208 | /* allocate and map syncpt incr memory */ | ||
209 | sb->mem = nvmap_alloc(nvmap, | ||
210 | (SYNCPT_INCR_BUFFER_SIZE_WORDS * sizeof(u32)), 32, | ||
211 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
212 | if (IS_ERR_OR_NULL(sb->mem)) { | ||
213 | sb->mem = NULL; | ||
214 | goto fail; | ||
215 | } | ||
216 | sb->mapped = nvmap_mmap(sb->mem); | ||
217 | if (sb->mapped == NULL) | ||
218 | goto fail; | ||
219 | |||
220 | /* pin syncpt buffer and get physical address */ | ||
221 | sb->phys = nvmap_pin(nvmap, sb->mem); | ||
222 | if (sb->phys >= 0xfffff000) { | ||
223 | sb->phys = 0; | ||
224 | goto fail; | ||
225 | } | ||
226 | |||
227 | dev_dbg(&dev->dev->dev, "%s: SYNCPT_INCR buffer at 0x%x\n", | ||
228 | __func__, sb->phys); | ||
229 | |||
230 | sb->words_per_incr = (syncpt_id == NVSYNCPT_3D) ? 5 : 3; | ||
231 | sb->incr_per_buffer = (SYNCPT_INCR_BUFFER_SIZE_WORDS / | ||
232 | sb->words_per_incr); | ||
233 | |||
234 | /* init buffer with SETCL and INCR_SYNCPT methods */ | ||
235 | while (i < sb->incr_per_buffer) { | ||
236 | sb->mapped[i++] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
237 | 0, 0); | ||
238 | sb->mapped[i++] = nvhost_opcode_imm_incr_syncpt( | ||
239 | NV_SYNCPT_IMMEDIATE, | ||
240 | syncpt_id); | ||
241 | if (syncpt_id == NVSYNCPT_3D) { | ||
242 | /* also contains base increments */ | ||
243 | sb->mapped[i++] = nvhost_opcode_nonincr( | ||
244 | NV_CLASS_HOST_INCR_SYNCPT_BASE, | ||
245 | 1); | ||
246 | sb->mapped[i++] = nvhost_class_host_incr_syncpt_base( | ||
247 | NVWAITBASE_3D, 1); | ||
248 | } | ||
249 | sb->mapped[i++] = nvhost_opcode_setclass(ch->dev->class, | ||
250 | 0, 0); | ||
251 | } | ||
252 | wmb(); | ||
253 | |||
254 | INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); | ||
255 | cdma->timeout.initialized = true; | ||
256 | |||
257 | return 0; | ||
258 | fail: | ||
259 | cdma_op(cdma).timeout_destroy(cdma); | ||
260 | return -ENOMEM; | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * Clean up timeout syncpt buffer resources | ||
265 | */ | ||
266 | static void cdma_timeout_destroy(struct nvhost_cdma *cdma) | ||
267 | { | ||
268 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
269 | struct syncpt_buffer *sb = &cdma->syncpt_buffer; | ||
270 | |||
271 | if (sb->mapped) | ||
272 | nvmap_munmap(sb->mem, sb->mapped); | ||
273 | |||
274 | if (sb->phys != 0) | ||
275 | nvmap_unpin(nvmap, sb->mem); | ||
276 | |||
277 | if (sb->mem) | ||
278 | nvmap_free(nvmap, sb->mem); | ||
279 | |||
280 | sb->mem = NULL; | ||
281 | sb->mapped = NULL; | ||
282 | sb->phys = 0; | ||
283 | |||
284 | if (cdma->timeout.initialized) | ||
285 | cancel_delayed_work(&cdma->timeout.wq); | ||
286 | cdma->timeout.initialized = false; | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * Increment timedout buffer's syncpt via CPU. | ||
291 | */ | ||
292 | static void cdma_timeout_cpu_incr(struct nvhost_cdma *cdma, u32 getptr, | ||
293 | u32 syncpt_incrs, u32 syncval, u32 nr_slots) | ||
294 | { | ||
295 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
296 | struct push_buffer *pb = &cdma->push_buffer; | ||
297 | u32 i, getidx; | ||
298 | |||
299 | for (i = 0; i < syncpt_incrs; i++) | ||
300 | nvhost_syncpt_cpu_incr(&dev->syncpt, cdma->timeout.syncpt_id); | ||
301 | |||
302 | /* after CPU incr, ensure shadow is up to date */ | ||
303 | nvhost_syncpt_update_min(&dev->syncpt, cdma->timeout.syncpt_id); | ||
304 | |||
305 | /* update WAITBASE_3D by same number of incrs */ | ||
306 | if (cdma->timeout.syncpt_id == NVSYNCPT_3D) { | ||
307 | void __iomem *p; | ||
308 | p = dev->sync_aperture + HOST1X_SYNC_SYNCPT_BASE_0 + | ||
309 | (NVWAITBASE_3D * sizeof(u32)); | ||
310 | writel(syncval, p); | ||
311 | } | ||
312 | |||
313 | /* NOP all the PB slots */ | ||
314 | getidx = getptr - pb->phys; | ||
315 | while (nr_slots--) { | ||
316 | u32 *p = (u32 *)((u32)pb->mapped + getidx); | ||
317 | *(p++) = NVHOST_OPCODE_NOOP; | ||
318 | *(p++) = NVHOST_OPCODE_NOOP; | ||
319 | dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n", | ||
320 | __func__, pb->phys + getidx); | ||
321 | getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1); | ||
322 | } | ||
323 | wmb(); | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * This routine is called at the point we transition back into a timed | ||
328 | * ctx. The syncpts are incremented via pushbuffer with a flag indicating | ||
329 | * whether there's a CTXSAVE that should be still executed (for the | ||
330 | * preceding HW ctx). | ||
331 | */ | ||
332 | static void cdma_timeout_pb_incr(struct nvhost_cdma *cdma, u32 getptr, | ||
333 | u32 syncpt_incrs, u32 nr_slots, | ||
334 | bool exec_ctxsave) | ||
335 | { | ||
336 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
337 | struct syncpt_buffer *sb = &cdma->syncpt_buffer; | ||
338 | struct push_buffer *pb = &cdma->push_buffer; | ||
339 | struct host1x_hwctx *hwctx = to_host1x_hwctx(cdma->timeout.ctx); | ||
340 | u32 getidx, *p; | ||
341 | |||
342 | /* should have enough slots to incr to desired count */ | ||
343 | BUG_ON(syncpt_incrs > (nr_slots * sb->incr_per_buffer)); | ||
344 | |||
345 | getidx = getptr - pb->phys; | ||
346 | if (exec_ctxsave) { | ||
347 | /* don't disrupt the CTXSAVE of a good/non-timed out ctx */ | ||
348 | nr_slots -= hwctx->save_slots; | ||
349 | syncpt_incrs -= hwctx->save_incrs; | ||
350 | |||
351 | getidx += (hwctx->save_slots * 8); | ||
352 | getidx &= (PUSH_BUFFER_SIZE - 1); | ||
353 | |||
354 | dev_dbg(&dev->dev->dev, | ||
355 | "%s: exec CTXSAVE of prev ctx (slots %d, incrs %d)\n", | ||
356 | __func__, nr_slots, syncpt_incrs); | ||
357 | } | ||
358 | |||
359 | while (syncpt_incrs) { | ||
360 | u32 incrs, count; | ||
361 | |||
362 | /* GATHER count are incrs * number of DWORDs per incr */ | ||
363 | incrs = min(syncpt_incrs, sb->incr_per_buffer); | ||
364 | count = incrs * sb->words_per_incr; | ||
365 | |||
366 | p = (u32 *)((u32)pb->mapped + getidx); | ||
367 | *(p++) = nvhost_opcode_gather(count); | ||
368 | *(p++) = sb->phys; | ||
369 | |||
370 | dev_dbg(&dev->dev->dev, | ||
371 | "%s: GATHER at 0x%x, from 0x%x, dcount = %d\n", | ||
372 | __func__, | ||
373 | pb->phys + getidx, sb->phys, | ||
374 | (incrs * sb->words_per_incr)); | ||
375 | |||
376 | syncpt_incrs -= incrs; | ||
377 | getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1); | ||
378 | nr_slots--; | ||
379 | } | ||
380 | |||
381 | /* NOP remaining slots */ | ||
382 | while (nr_slots--) { | ||
383 | p = (u32 *)((u32)pb->mapped + getidx); | ||
384 | *(p++) = NVHOST_OPCODE_NOOP; | ||
385 | *(p++) = NVHOST_OPCODE_NOOP; | ||
386 | dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n", | ||
387 | __func__, pb->phys + getidx); | ||
388 | getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1); | ||
389 | } | ||
390 | wmb(); | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * Start channel DMA | ||
395 | */ | ||
396 | static void cdma_start(struct nvhost_cdma *cdma) | ||
397 | { | ||
398 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
399 | |||
400 | if (cdma->running) | ||
401 | return; | ||
402 | |||
403 | BUG_ON(!cdma_pb_op(cdma).putptr); | ||
404 | cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
405 | |||
406 | writel(host1x_channel_dmactrl(true, false, false), | ||
407 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
408 | |||
409 | /* set base, put, end pointer (all of memory) */ | ||
410 | writel(0, chan_regs + HOST1X_CHANNEL_DMASTART); | ||
411 | writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
412 | writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND); | ||
413 | |||
414 | /* reset GET */ | ||
415 | writel(host1x_channel_dmactrl(true, true, true), | ||
416 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
417 | |||
418 | /* start the command DMA */ | ||
419 | writel(host1x_channel_dmactrl(false, false, false), | ||
420 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
421 | |||
422 | cdma->running = true; | ||
423 | } | ||
424 | |||
425 | /** | ||
426 | * Similar to cdma_start(), but rather than starting from an idle | ||
427 | * state (where DMA GET is set to DMA PUT), on a timeout we restore | ||
428 | * DMA GET from an explicit value (so DMA may again be pending). | ||
429 | */ | ||
430 | static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr) | ||
431 | { | ||
432 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
433 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
434 | |||
435 | if (cdma->running) | ||
436 | return; | ||
437 | |||
438 | BUG_ON(!cdma_pb_op(cdma).putptr); | ||
439 | cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
440 | |||
441 | writel(host1x_channel_dmactrl(true, false, false), | ||
442 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
443 | |||
444 | /* set base, end pointer (all of memory) */ | ||
445 | writel(0, chan_regs + HOST1X_CHANNEL_DMASTART); | ||
446 | writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND); | ||
447 | |||
448 | /* set GET, by loading the value in PUT (then reset GET) */ | ||
449 | writel(getptr, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
450 | writel(host1x_channel_dmactrl(true, true, true), | ||
451 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
452 | |||
453 | dev_dbg(&dev->dev->dev, | ||
454 | "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", | ||
455 | __func__, | ||
456 | readl(chan_regs + HOST1X_CHANNEL_DMAGET), | ||
457 | readl(chan_regs + HOST1X_CHANNEL_DMAPUT), | ||
458 | cdma->last_put); | ||
459 | |||
460 | /* deassert GET reset and set PUT */ | ||
461 | writel(host1x_channel_dmactrl(true, false, false), | ||
462 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
463 | writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
464 | |||
465 | /* start the command DMA */ | ||
466 | writel(host1x_channel_dmactrl(false, false, false), | ||
467 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
468 | |||
469 | cdma->running = true; | ||
470 | } | ||
471 | |||
472 | /** | ||
473 | * Kick channel DMA into action by writing its PUT offset (if it has changed) | ||
474 | */ | ||
475 | static void cdma_kick(struct nvhost_cdma *cdma) | ||
476 | { | ||
477 | u32 put; | ||
478 | BUG_ON(!cdma_pb_op(cdma).putptr); | ||
479 | |||
480 | put = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
481 | |||
482 | if (put != cdma->last_put) { | ||
483 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
484 | wmb(); | ||
485 | writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
486 | cdma->last_put = put; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | static void cdma_stop(struct nvhost_cdma *cdma) | ||
491 | { | ||
492 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
493 | |||
494 | mutex_lock(&cdma->lock); | ||
495 | if (cdma->running) { | ||
496 | nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY); | ||
497 | writel(host1x_channel_dmactrl(true, false, false), | ||
498 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
499 | cdma->running = false; | ||
500 | } | ||
501 | mutex_unlock(&cdma->lock); | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * Retrieve the op pair at a slot offset from a DMA address | ||
506 | */ | ||
507 | void cdma_peek(struct nvhost_cdma *cdma, | ||
508 | u32 dmaget, int slot, u32 *out) | ||
509 | { | ||
510 | u32 offset = dmaget - cdma->push_buffer.phys; | ||
511 | u32 *p = cdma->push_buffer.mapped; | ||
512 | |||
513 | offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2; | ||
514 | out[0] = p[offset]; | ||
515 | out[1] = p[offset + 1]; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * Stops both channel's command processor and CDMA immediately. | ||
520 | * Also, tears down the channel and resets corresponding module. | ||
521 | */ | ||
522 | void cdma_timeout_teardown_begin(struct nvhost_cdma *cdma) | ||
523 | { | ||
524 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
525 | struct nvhost_channel *ch = cdma_to_channel(cdma); | ||
526 | u32 cmdproc_stop; | ||
527 | |||
528 | BUG_ON(cdma->torndown); | ||
529 | |||
530 | dev_dbg(&dev->dev->dev, | ||
531 | "begin channel teardown (channel id %d)\n", ch->chid); | ||
532 | |||
533 | cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
534 | cmdproc_stop |= BIT(ch->chid); | ||
535 | writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
536 | |||
537 | dev_dbg(&dev->dev->dev, | ||
538 | "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", | ||
539 | __func__, | ||
540 | readl(ch->aperture + HOST1X_CHANNEL_DMAGET), | ||
541 | readl(ch->aperture + HOST1X_CHANNEL_DMAPUT), | ||
542 | cdma->last_put); | ||
543 | |||
544 | writel(host1x_channel_dmactrl(true, false, false), | ||
545 | ch->aperture + HOST1X_CHANNEL_DMACTRL); | ||
546 | |||
547 | writel(BIT(ch->chid), dev->sync_aperture + HOST1X_SYNC_CH_TEARDOWN); | ||
548 | nvhost_module_reset(ch->dev); | ||
549 | |||
550 | cdma->running = false; | ||
551 | cdma->torndown = true; | ||
552 | } | ||
553 | |||
554 | void cdma_timeout_teardown_end(struct nvhost_cdma *cdma, u32 getptr) | ||
555 | { | ||
556 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
557 | struct nvhost_channel *ch = cdma_to_channel(cdma); | ||
558 | u32 cmdproc_stop; | ||
559 | |||
560 | BUG_ON(!cdma->torndown || cdma->running); | ||
561 | |||
562 | dev_dbg(&dev->dev->dev, | ||
563 | "end channel teardown (id %d, DMAGET restart = 0x%x)\n", | ||
564 | ch->chid, getptr); | ||
565 | |||
566 | cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
567 | cmdproc_stop &= ~(BIT(ch->chid)); | ||
568 | writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
569 | |||
570 | cdma->torndown = false; | ||
571 | cdma_timeout_restart(cdma, getptr); | ||
572 | } | ||
573 | |||
574 | /** | ||
575 | * If this timeout fires, it indicates the current sync_queue entry has | ||
576 | * exceeded its TTL and the userctx should be timed out and remaining | ||
577 | * submits already issued cleaned up (future submits return an error). | ||
578 | */ | ||
579 | static void cdma_timeout_handler(struct work_struct *work) | ||
580 | { | ||
581 | struct nvhost_cdma *cdma; | ||
582 | struct nvhost_master *dev; | ||
583 | struct nvhost_syncpt *sp; | ||
584 | struct nvhost_channel *ch; | ||
585 | |||
586 | u32 syncpt_val; | ||
587 | |||
588 | u32 prev_cmdproc, cmdproc_stop; | ||
589 | |||
590 | cdma = container_of(to_delayed_work(work), struct nvhost_cdma, | ||
591 | timeout.wq); | ||
592 | dev = cdma_to_dev(cdma); | ||
593 | sp = &dev->syncpt; | ||
594 | ch = cdma_to_channel(cdma); | ||
595 | |||
596 | mutex_lock(&cdma->lock); | ||
597 | |||
598 | if (!cdma->timeout.clientid) { | ||
599 | dev_dbg(&dev->dev->dev, | ||
600 | "cdma_timeout: expired, but has no clientid\n"); | ||
601 | mutex_unlock(&cdma->lock); | ||
602 | return; | ||
603 | } | ||
604 | |||
605 | /* stop processing to get a clean snapshot */ | ||
606 | prev_cmdproc = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
607 | cmdproc_stop = prev_cmdproc | BIT(ch->chid); | ||
608 | writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
609 | |||
610 | dev_dbg(&dev->dev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n", | ||
611 | prev_cmdproc, cmdproc_stop); | ||
612 | |||
613 | syncpt_val = nvhost_syncpt_update_min(&dev->syncpt, | ||
614 | cdma->timeout.syncpt_id); | ||
615 | |||
616 | /* has buffer actually completed? */ | ||
617 | if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) { | ||
618 | dev_dbg(&dev->dev->dev, | ||
619 | "cdma_timeout: expired, but buffer had completed\n"); | ||
620 | /* restore */ | ||
621 | cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid)); | ||
622 | writel(cmdproc_stop, | ||
623 | dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
624 | mutex_unlock(&cdma->lock); | ||
625 | return; | ||
626 | } | ||
627 | |||
628 | dev_warn(&dev->dev->dev, | ||
629 | "%s: timeout: %d (%s) ctx 0x%p, HW thresh %d, done %d\n", | ||
630 | __func__, | ||
631 | cdma->timeout.syncpt_id, | ||
632 | syncpt_op(sp).name(sp, cdma->timeout.syncpt_id), | ||
633 | cdma->timeout.ctx, | ||
634 | syncpt_val, cdma->timeout.syncpt_val); | ||
635 | |||
636 | /* stop HW, resetting channel/module */ | ||
637 | cdma_op(cdma).timeout_teardown_begin(cdma); | ||
638 | |||
639 | nvhost_cdma_update_sync_queue(cdma, sp, &dev->dev->dev); | ||
640 | mutex_unlock(&cdma->lock); | ||
641 | } | ||
642 | |||
643 | int host1x_init_cdma_support(struct nvhost_master *host) | ||
644 | { | ||
645 | host->op.cdma.start = cdma_start; | ||
646 | host->op.cdma.stop = cdma_stop; | ||
647 | host->op.cdma.kick = cdma_kick; | ||
648 | |||
649 | host->op.cdma.timeout_init = cdma_timeout_init; | ||
650 | host->op.cdma.timeout_destroy = cdma_timeout_destroy; | ||
651 | host->op.cdma.timeout_teardown_begin = cdma_timeout_teardown_begin; | ||
652 | host->op.cdma.timeout_teardown_end = cdma_timeout_teardown_end; | ||
653 | host->op.cdma.timeout_cpu_incr = cdma_timeout_cpu_incr; | ||
654 | host->op.cdma.timeout_pb_incr = cdma_timeout_pb_incr; | ||
655 | |||
656 | host->op.push_buffer.reset = push_buffer_reset; | ||
657 | host->op.push_buffer.init = push_buffer_init; | ||
658 | host->op.push_buffer.destroy = push_buffer_destroy; | ||
659 | host->op.push_buffer.push_to = push_buffer_push_to; | ||
660 | host->op.push_buffer.pop_from = push_buffer_pop_from; | ||
661 | host->op.push_buffer.space = push_buffer_space; | ||
662 | host->op.push_buffer.putptr = push_buffer_putptr; | ||
663 | |||
664 | return 0; | ||
665 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.h b/drivers/video/tegra/host/host1x/host1x_cdma.h new file mode 100644 index 00000000000..60909236a7c --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_cdma.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_cdma.h | ||
3 | * | ||
4 | * Tegra Graphics Host Channel | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_HOST1X_HOST1X_CDMA_H | ||
22 | #define __NVHOST_HOST1X_HOST1X_CDMA_H | ||
23 | |||
24 | /* Size of the sync queue. If it is too small, we won't be able to queue up | ||
25 | * many command buffers. If it is too large, we waste memory. */ | ||
26 | #define NVHOST_SYNC_QUEUE_SIZE 512 | ||
27 | |||
28 | /* Number of gathers we allow to be queued up per channel. Must be a | ||
29 | * power of two. Currently sized such that pushbuffer is 4KB (512*8B). */ | ||
30 | #define NVHOST_GATHER_QUEUE_SIZE 512 | ||
31 | |||
32 | /* 8 bytes per slot. (This number does not include the final RESTART.) */ | ||
33 | #define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8) | ||
34 | |||
35 | /* 4K page containing GATHERed methods to increment channel syncpts | ||
36 | * and replaces the original timed out contexts GATHER slots */ | ||
37 | #define SYNCPT_INCR_BUFFER_SIZE_WORDS (4096 / sizeof(u32)) | ||
38 | |||
39 | int host1x_init_cdma_support(struct nvhost_master *); | ||
40 | |||
41 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.c b/drivers/video/tegra/host/host1x/host1x_channel.c new file mode 100644 index 00000000000..b16a34f416a --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_channel.c | |||
@@ -0,0 +1,627 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/channel_host1x.c | ||
3 | * | ||
4 | * Tegra Graphics Host Channel | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_channel.h" | ||
22 | #include "dev.h" | ||
23 | #include "nvhost_hwctx.h" | ||
24 | #include <trace/events/nvhost.h> | ||
25 | #include <linux/slab.h> | ||
26 | |||
27 | #include "host1x_syncpt.h" | ||
28 | #include "host1x_channel.h" | ||
29 | #include "host1x_hardware.h" | ||
30 | #include "host1x_hwctx.h" | ||
31 | #include "nvhost_intr.h" | ||
32 | |||
33 | #define NV_FIFO_READ_TIMEOUT 200000 | ||
34 | |||
35 | static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val) | ||
36 | { | ||
37 | unsigned long waitbase; | ||
38 | unsigned long int waitbase_mask = ch->dev->waitbases; | ||
39 | if (ch->dev->waitbasesync) { | ||
40 | waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG); | ||
41 | nvhost_cdma_push(&ch->cdma, | ||
42 | nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
43 | NV_CLASS_HOST_LOAD_SYNCPT_BASE, | ||
44 | 1), | ||
45 | nvhost_class_host_load_syncpt_base(waitbase, | ||
46 | syncpt_val)); | ||
47 | } | ||
48 | } | ||
49 | |||
50 | static void *pre_submit_ctxsave(struct nvhost_job *job, | ||
51 | struct nvhost_hwctx *cur_ctx) | ||
52 | { | ||
53 | struct nvhost_channel *ch = job->ch; | ||
54 | void *ctxsave_waiter = NULL; | ||
55 | |||
56 | /* Is a save needed? */ | ||
57 | if (!cur_ctx || ch->cur_ctx == job->hwctx) | ||
58 | return NULL; | ||
59 | |||
60 | if (cur_ctx->has_timedout) { | ||
61 | dev_dbg(&ch->dev->dev, | ||
62 | "%s: skip save of timed out context (0x%p)\n", | ||
63 | __func__, ch->cur_ctx); | ||
64 | |||
65 | return NULL; | ||
66 | } | ||
67 | |||
68 | /* Allocate save waiter if needed */ | ||
69 | if (ch->ctxhandler->save_service) { | ||
70 | ctxsave_waiter = nvhost_intr_alloc_waiter(); | ||
71 | if (!ctxsave_waiter) | ||
72 | return ERR_PTR(-ENOMEM); | ||
73 | } | ||
74 | |||
75 | return ctxsave_waiter; | ||
76 | } | ||
77 | |||
78 | static void submit_ctxsave(struct nvhost_job *job, void *ctxsave_waiter, | ||
79 | struct nvhost_hwctx *cur_ctx) | ||
80 | { | ||
81 | struct nvhost_master *host = nvhost_get_host(job->ch->dev); | ||
82 | struct nvhost_channel *ch = job->ch; | ||
83 | u32 syncval; | ||
84 | int err; | ||
85 | u32 save_thresh = 0; | ||
86 | |||
87 | /* Is a save needed? */ | ||
88 | if (!cur_ctx || cur_ctx == job->hwctx || cur_ctx->has_timedout) | ||
89 | return; | ||
90 | |||
91 | /* Retrieve save threshold if we have a waiter */ | ||
92 | if (ctxsave_waiter) | ||
93 | save_thresh = | ||
94 | nvhost_syncpt_read_max(&host->syncpt, job->syncpt_id) | ||
95 | + to_host1x_hwctx(cur_ctx)->save_thresh; | ||
96 | |||
97 | /* Adjust the syncpoint max */ | ||
98 | job->syncpt_incrs += to_host1x_hwctx(cur_ctx)->save_incrs; | ||
99 | syncval = nvhost_syncpt_incr_max(&host->syncpt, | ||
100 | job->syncpt_id, | ||
101 | to_host1x_hwctx(cur_ctx)->save_incrs); | ||
102 | |||
103 | /* Send the save to channel */ | ||
104 | cur_ctx->valid = true; | ||
105 | ch->ctxhandler->save_push(cur_ctx, &ch->cdma); | ||
106 | nvhost_job_get_hwctx(job, cur_ctx); | ||
107 | |||
108 | /* Notify save service */ | ||
109 | if (ctxsave_waiter) { | ||
110 | err = nvhost_intr_add_action(&host->intr, | ||
111 | job->syncpt_id, | ||
112 | save_thresh, | ||
113 | NVHOST_INTR_ACTION_CTXSAVE, cur_ctx, | ||
114 | ctxsave_waiter, | ||
115 | NULL); | ||
116 | ctxsave_waiter = NULL; | ||
117 | WARN(err, "Failed to set ctx save interrupt"); | ||
118 | } | ||
119 | |||
120 | trace_nvhost_channel_context_save(ch->dev->name, cur_ctx); | ||
121 | } | ||
122 | |||
123 | static void submit_ctxrestore(struct nvhost_job *job) | ||
124 | { | ||
125 | struct nvhost_master *host = nvhost_get_host(job->ch->dev); | ||
126 | struct nvhost_channel *ch = job->ch; | ||
127 | u32 syncval; | ||
128 | struct host1x_hwctx *ctx = | ||
129 | job->hwctx ? to_host1x_hwctx(job->hwctx) : NULL; | ||
130 | |||
131 | /* First check if we have a valid context to restore */ | ||
132 | if(ch->cur_ctx == job->hwctx || !job->hwctx || !job->hwctx->valid) | ||
133 | return; | ||
134 | |||
135 | /* Increment syncpt max */ | ||
136 | job->syncpt_incrs += ctx->restore_incrs; | ||
137 | syncval = nvhost_syncpt_incr_max(&host->syncpt, | ||
138 | job->syncpt_id, | ||
139 | ctx->restore_incrs); | ||
140 | |||
141 | /* Send restore buffer to channel */ | ||
142 | nvhost_cdma_push_gather(&ch->cdma, | ||
143 | host->nvmap, | ||
144 | nvmap_ref_to_handle(ctx->restore), | ||
145 | nvhost_opcode_gather(ctx->restore_size), | ||
146 | ctx->restore_phys); | ||
147 | |||
148 | trace_nvhost_channel_context_restore(ch->dev->name, &ctx->hwctx); | ||
149 | } | ||
150 | |||
151 | void submit_nullkickoff(struct nvhost_job *job, int user_syncpt_incrs) | ||
152 | { | ||
153 | struct nvhost_channel *ch = job->ch; | ||
154 | int incr; | ||
155 | u32 op_incr; | ||
156 | |||
157 | /* push increments that correspond to nulled out commands */ | ||
158 | op_incr = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
159 | job->syncpt_id); | ||
160 | for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++) | ||
161 | nvhost_cdma_push(&ch->cdma, op_incr, op_incr); | ||
162 | if (user_syncpt_incrs & 1) | ||
163 | nvhost_cdma_push(&ch->cdma, op_incr, NVHOST_OPCODE_NOOP); | ||
164 | |||
165 | /* for 3d, waitbase needs to be incremented after each submit */ | ||
166 | if (ch->dev->class == NV_GRAPHICS_3D_CLASS_ID) { | ||
167 | u32 waitbase = to_host1x_hwctx_handler(job->hwctx->h)->waitbase; | ||
168 | nvhost_cdma_push(&ch->cdma, | ||
169 | nvhost_opcode_setclass( | ||
170 | NV_HOST1X_CLASS_ID, | ||
171 | NV_CLASS_HOST_INCR_SYNCPT_BASE, | ||
172 | 1), | ||
173 | nvhost_class_host_incr_syncpt_base( | ||
174 | waitbase, | ||
175 | user_syncpt_incrs)); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | void submit_gathers(struct nvhost_job *job) | ||
180 | { | ||
181 | /* push user gathers */ | ||
182 | int i = 0; | ||
183 | for ( ; i < job->num_gathers; i++) { | ||
184 | u32 op1 = nvhost_opcode_gather(job->gathers[i].words); | ||
185 | u32 op2 = job->gathers[i].mem; | ||
186 | nvhost_cdma_push_gather(&job->ch->cdma, | ||
187 | job->nvmap, job->unpins[i/2], | ||
188 | op1, op2); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | int host1x_channel_submit(struct nvhost_job *job) | ||
193 | { | ||
194 | struct nvhost_channel *ch = job->ch; | ||
195 | struct nvhost_syncpt *sp = &nvhost_get_host(job->ch->dev)->syncpt; | ||
196 | u32 user_syncpt_incrs = job->syncpt_incrs; | ||
197 | u32 prev_max = 0; | ||
198 | u32 syncval; | ||
199 | int err; | ||
200 | void *completed_waiter = NULL, *ctxsave_waiter = NULL; | ||
201 | |||
202 | /* Bail out on timed out contexts */ | ||
203 | if (job->hwctx && job->hwctx->has_timedout) | ||
204 | return -ETIMEDOUT; | ||
205 | |||
206 | /* Turn on the client module and host1x */ | ||
207 | nvhost_module_busy(ch->dev); | ||
208 | if (ch->dev->busy) | ||
209 | ch->dev->busy(ch->dev); | ||
210 | |||
211 | /* before error checks, return current max */ | ||
212 | prev_max = job->syncpt_end = | ||
213 | nvhost_syncpt_read_max(sp, job->syncpt_id); | ||
214 | |||
215 | /* get submit lock */ | ||
216 | err = mutex_lock_interruptible(&ch->submitlock); | ||
217 | if (err) { | ||
218 | nvhost_module_idle(ch->dev); | ||
219 | goto error; | ||
220 | } | ||
221 | |||
222 | /* Do the needed allocations */ | ||
223 | ctxsave_waiter = pre_submit_ctxsave(job, ch->cur_ctx); | ||
224 | if (IS_ERR(ctxsave_waiter)) { | ||
225 | err = PTR_ERR(ctxsave_waiter); | ||
226 | nvhost_module_idle(ch->dev); | ||
227 | mutex_unlock(&ch->submitlock); | ||
228 | goto error; | ||
229 | } | ||
230 | |||
231 | completed_waiter = nvhost_intr_alloc_waiter(); | ||
232 | if (!completed_waiter) { | ||
233 | nvhost_module_idle(ch->dev); | ||
234 | mutex_unlock(&ch->submitlock); | ||
235 | err = -ENOMEM; | ||
236 | goto error; | ||
237 | } | ||
238 | |||
239 | /* remove stale waits */ | ||
240 | if (job->num_waitchk) { | ||
241 | err = nvhost_syncpt_wait_check(sp, | ||
242 | job->nvmap, | ||
243 | job->waitchk_mask, | ||
244 | job->waitchk, | ||
245 | job->num_waitchk); | ||
246 | if (err) { | ||
247 | dev_warn(&ch->dev->dev, | ||
248 | "nvhost_syncpt_wait_check failed: %d\n", err); | ||
249 | mutex_unlock(&ch->submitlock); | ||
250 | nvhost_module_idle(ch->dev); | ||
251 | goto error; | ||
252 | } | ||
253 | } | ||
254 | |||
255 | /* begin a CDMA submit */ | ||
256 | err = nvhost_cdma_begin(&ch->cdma, job); | ||
257 | if (err) { | ||
258 | mutex_unlock(&ch->submitlock); | ||
259 | nvhost_module_idle(ch->dev); | ||
260 | goto error; | ||
261 | } | ||
262 | |||
263 | submit_ctxsave(job, ctxsave_waiter, ch->cur_ctx); | ||
264 | submit_ctxrestore(job); | ||
265 | ch->cur_ctx = job->hwctx; | ||
266 | |||
267 | syncval = nvhost_syncpt_incr_max(sp, | ||
268 | job->syncpt_id, user_syncpt_incrs); | ||
269 | |||
270 | job->syncpt_end = syncval; | ||
271 | |||
272 | /* add a setclass for modules that require it */ | ||
273 | if (ch->dev->class) | ||
274 | nvhost_cdma_push(&ch->cdma, | ||
275 | nvhost_opcode_setclass(ch->dev->class, 0, 0), | ||
276 | NVHOST_OPCODE_NOOP); | ||
277 | |||
278 | if (job->null_kickoff) | ||
279 | submit_nullkickoff(job, user_syncpt_incrs); | ||
280 | else | ||
281 | submit_gathers(job); | ||
282 | |||
283 | sync_waitbases(ch, job->syncpt_end); | ||
284 | |||
285 | /* end CDMA submit & stash pinned hMems into sync queue */ | ||
286 | nvhost_cdma_end(&ch->cdma, job); | ||
287 | |||
288 | trace_nvhost_channel_submitted(ch->dev->name, | ||
289 | prev_max, syncval); | ||
290 | |||
291 | /* schedule a submit complete interrupt */ | ||
292 | err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, | ||
293 | job->syncpt_id, syncval, | ||
294 | NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ch, | ||
295 | completed_waiter, | ||
296 | NULL); | ||
297 | completed_waiter = NULL; | ||
298 | WARN(err, "Failed to set submit complete interrupt"); | ||
299 | |||
300 | mutex_unlock(&ch->submitlock); | ||
301 | |||
302 | return 0; | ||
303 | |||
304 | error: | ||
305 | kfree(ctxsave_waiter); | ||
306 | kfree(completed_waiter); | ||
307 | return err; | ||
308 | } | ||
309 | |||
310 | int host1x_channel_read_3d_reg( | ||
311 | struct nvhost_channel *channel, | ||
312 | struct nvhost_hwctx *hwctx, | ||
313 | u32 offset, | ||
314 | u32 *value) | ||
315 | { | ||
316 | struct host1x_hwctx *hwctx_to_save = NULL; | ||
317 | struct nvhost_hwctx_handler *h = hwctx->h; | ||
318 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
319 | bool need_restore = false; | ||
320 | u32 syncpt_incrs = 4; | ||
321 | unsigned int pending = 0; | ||
322 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
323 | void *ref; | ||
324 | void *ctx_waiter, *read_waiter, *completed_waiter; | ||
325 | struct nvhost_job *job; | ||
326 | u32 syncval; | ||
327 | int err; | ||
328 | |||
329 | if (hwctx && hwctx->has_timedout) | ||
330 | return -ETIMEDOUT; | ||
331 | |||
332 | ctx_waiter = nvhost_intr_alloc_waiter(); | ||
333 | read_waiter = nvhost_intr_alloc_waiter(); | ||
334 | completed_waiter = nvhost_intr_alloc_waiter(); | ||
335 | if (!ctx_waiter || !read_waiter || !completed_waiter) { | ||
336 | err = -ENOMEM; | ||
337 | goto done; | ||
338 | } | ||
339 | |||
340 | job = nvhost_job_alloc(channel, hwctx, | ||
341 | NULL, | ||
342 | nvhost_get_host(channel->dev)->nvmap, 0, 0); | ||
343 | if (!job) { | ||
344 | err = -ENOMEM; | ||
345 | goto done; | ||
346 | } | ||
347 | |||
348 | /* keep module powered */ | ||
349 | nvhost_module_busy(channel->dev); | ||
350 | |||
351 | /* get submit lock */ | ||
352 | err = mutex_lock_interruptible(&channel->submitlock); | ||
353 | if (err) { | ||
354 | nvhost_module_idle(channel->dev); | ||
355 | return err; | ||
356 | } | ||
357 | |||
358 | /* context switch */ | ||
359 | if (channel->cur_ctx != hwctx) { | ||
360 | hwctx_to_save = channel->cur_ctx ? | ||
361 | to_host1x_hwctx(channel->cur_ctx) : NULL; | ||
362 | if (hwctx_to_save) { | ||
363 | syncpt_incrs += hwctx_to_save->save_incrs; | ||
364 | hwctx_to_save->hwctx.valid = true; | ||
365 | channel->ctxhandler->get(&hwctx_to_save->hwctx); | ||
366 | } | ||
367 | channel->cur_ctx = hwctx; | ||
368 | if (channel->cur_ctx && channel->cur_ctx->valid) { | ||
369 | need_restore = true; | ||
370 | syncpt_incrs += to_host1x_hwctx(channel->cur_ctx) | ||
371 | ->restore_incrs; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | syncval = nvhost_syncpt_incr_max(&nvhost_get_host(channel->dev)->syncpt, | ||
376 | p->syncpt, syncpt_incrs); | ||
377 | |||
378 | job->syncpt_id = p->syncpt; | ||
379 | job->syncpt_incrs = syncpt_incrs; | ||
380 | job->syncpt_end = syncval; | ||
381 | |||
382 | /* begin a CDMA submit */ | ||
383 | nvhost_cdma_begin(&channel->cdma, job); | ||
384 | |||
385 | /* push save buffer (pre-gather setup depends on unit) */ | ||
386 | if (hwctx_to_save) | ||
387 | h->save_push(&hwctx_to_save->hwctx, &channel->cdma); | ||
388 | |||
389 | /* gather restore buffer */ | ||
390 | if (need_restore) | ||
391 | nvhost_cdma_push(&channel->cdma, | ||
392 | nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx) | ||
393 | ->restore_size), | ||
394 | to_host1x_hwctx(channel->cur_ctx)->restore_phys); | ||
395 | |||
396 | /* Switch to 3D - wait for it to complete what it was doing */ | ||
397 | nvhost_cdma_push(&channel->cdma, | ||
398 | nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), | ||
399 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
400 | p->syncpt)); | ||
401 | nvhost_cdma_push(&channel->cdma, | ||
402 | nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
403 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), | ||
404 | nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
405 | p->waitbase, 1)); | ||
406 | /* Tell 3D to send register value to FIFO */ | ||
407 | nvhost_cdma_push(&channel->cdma, | ||
408 | nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1), | ||
409 | nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
410 | offset, false)); | ||
411 | nvhost_cdma_push(&channel->cdma, | ||
412 | nvhost_opcode_imm(NV_CLASS_HOST_INDDATA, 0), | ||
413 | NVHOST_OPCODE_NOOP); | ||
414 | /* Increment syncpt to indicate that FIFO can be read */ | ||
415 | nvhost_cdma_push(&channel->cdma, | ||
416 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, | ||
417 | p->syncpt), | ||
418 | NVHOST_OPCODE_NOOP); | ||
419 | /* Wait for value to be read from FIFO */ | ||
420 | nvhost_cdma_push(&channel->cdma, | ||
421 | nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), | ||
422 | nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
423 | p->waitbase, 3)); | ||
424 | /* Indicate submit complete */ | ||
425 | nvhost_cdma_push(&channel->cdma, | ||
426 | nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1), | ||
427 | nvhost_class_host_incr_syncpt_base(p->waitbase, 4)); | ||
428 | nvhost_cdma_push(&channel->cdma, | ||
429 | NVHOST_OPCODE_NOOP, | ||
430 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, | ||
431 | p->syncpt)); | ||
432 | |||
433 | /* end CDMA submit */ | ||
434 | nvhost_cdma_end(&channel->cdma, job); | ||
435 | nvhost_job_put(job); | ||
436 | job = NULL; | ||
437 | |||
438 | /* | ||
439 | * schedule a context save interrupt (to drain the host FIFO | ||
440 | * if necessary, and to release the restore buffer) | ||
441 | */ | ||
442 | if (hwctx_to_save) { | ||
443 | err = nvhost_intr_add_action( | ||
444 | &nvhost_get_host(channel->dev)->intr, | ||
445 | p->syncpt, | ||
446 | syncval - syncpt_incrs | ||
447 | + hwctx_to_save->save_incrs | ||
448 | - 1, | ||
449 | NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save, | ||
450 | ctx_waiter, | ||
451 | NULL); | ||
452 | ctx_waiter = NULL; | ||
453 | WARN(err, "Failed to set context save interrupt"); | ||
454 | } | ||
455 | |||
456 | /* Wait for FIFO to be ready */ | ||
457 | err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr, | ||
458 | p->syncpt, syncval - 2, | ||
459 | NVHOST_INTR_ACTION_WAKEUP, &wq, | ||
460 | read_waiter, | ||
461 | &ref); | ||
462 | read_waiter = NULL; | ||
463 | WARN(err, "Failed to set wakeup interrupt"); | ||
464 | wait_event(wq, | ||
465 | nvhost_syncpt_is_expired(&nvhost_get_host(channel->dev)->syncpt, | ||
466 | p->syncpt, syncval - 2)); | ||
467 | nvhost_intr_put_ref(&nvhost_get_host(channel->dev)->intr, ref); | ||
468 | |||
469 | /* Read the register value from FIFO */ | ||
470 | err = host1x_drain_read_fifo(channel->aperture, | ||
471 | value, 1, &pending); | ||
472 | |||
473 | /* Indicate we've read the value */ | ||
474 | nvhost_syncpt_cpu_incr(&nvhost_get_host(channel->dev)->syncpt, | ||
475 | p->syncpt); | ||
476 | |||
477 | /* Schedule a submit complete interrupt */ | ||
478 | err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr, | ||
479 | p->syncpt, syncval, | ||
480 | NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel, | ||
481 | completed_waiter, NULL); | ||
482 | completed_waiter = NULL; | ||
483 | WARN(err, "Failed to set submit complete interrupt"); | ||
484 | |||
485 | mutex_unlock(&channel->submitlock); | ||
486 | |||
487 | done: | ||
488 | kfree(ctx_waiter); | ||
489 | kfree(read_waiter); | ||
490 | kfree(completed_waiter); | ||
491 | return err; | ||
492 | } | ||
493 | |||
494 | |||
495 | int host1x_drain_read_fifo(void __iomem *chan_regs, | ||
496 | u32 *ptr, unsigned int count, unsigned int *pending) | ||
497 | { | ||
498 | unsigned int entries = *pending; | ||
499 | unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT; | ||
500 | while (count) { | ||
501 | unsigned int num; | ||
502 | |||
503 | while (!entries && time_before(jiffies, timeout)) { | ||
504 | /* query host for number of entries in fifo */ | ||
505 | entries = HOST1X_VAL(CHANNEL_FIFOSTAT, OUTFENTRIES, | ||
506 | readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT)); | ||
507 | if (!entries) | ||
508 | cpu_relax(); | ||
509 | } | ||
510 | |||
511 | /* timeout -> return error */ | ||
512 | if (!entries) | ||
513 | return -EIO; | ||
514 | |||
515 | num = min(entries, count); | ||
516 | entries -= num; | ||
517 | count -= num; | ||
518 | |||
519 | while (num & ~0x3) { | ||
520 | u32 arr[4]; | ||
521 | arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
522 | arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
523 | arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
524 | arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
525 | memcpy(ptr, arr, 4*sizeof(u32)); | ||
526 | ptr += 4; | ||
527 | num -= 4; | ||
528 | } | ||
529 | while (num--) | ||
530 | *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
531 | } | ||
532 | *pending = entries; | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | int host1x_save_context(struct nvhost_device *dev, u32 syncpt_id) | ||
538 | { | ||
539 | struct nvhost_channel *ch = dev->channel; | ||
540 | struct nvhost_hwctx *hwctx_to_save; | ||
541 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
542 | u32 syncpt_incrs, syncpt_val; | ||
543 | int err = 0; | ||
544 | void *ref; | ||
545 | void *ctx_waiter = NULL, *wakeup_waiter = NULL; | ||
546 | struct nvhost_job *job; | ||
547 | |||
548 | ctx_waiter = nvhost_intr_alloc_waiter(); | ||
549 | wakeup_waiter = nvhost_intr_alloc_waiter(); | ||
550 | if (!ctx_waiter || !wakeup_waiter) { | ||
551 | err = -ENOMEM; | ||
552 | goto done; | ||
553 | } | ||
554 | |||
555 | if (dev->busy) | ||
556 | dev->busy(dev); | ||
557 | |||
558 | mutex_lock(&ch->submitlock); | ||
559 | hwctx_to_save = ch->cur_ctx; | ||
560 | if (!hwctx_to_save) { | ||
561 | mutex_unlock(&ch->submitlock); | ||
562 | goto done; | ||
563 | } | ||
564 | |||
565 | job = nvhost_job_alloc(ch, hwctx_to_save, | ||
566 | NULL, | ||
567 | nvhost_get_host(ch->dev)->nvmap, 0, 0); | ||
568 | if (IS_ERR_OR_NULL(job)) { | ||
569 | err = PTR_ERR(job); | ||
570 | mutex_unlock(&ch->submitlock); | ||
571 | goto done; | ||
572 | } | ||
573 | |||
574 | hwctx_to_save->valid = true; | ||
575 | ch->ctxhandler->get(hwctx_to_save); | ||
576 | ch->cur_ctx = NULL; | ||
577 | |||
578 | syncpt_incrs = to_host1x_hwctx(hwctx_to_save)->save_incrs; | ||
579 | syncpt_val = nvhost_syncpt_incr_max(&nvhost_get_host(ch->dev)->syncpt, | ||
580 | syncpt_id, syncpt_incrs); | ||
581 | |||
582 | job->syncpt_id = syncpt_id; | ||
583 | job->syncpt_incrs = syncpt_incrs; | ||
584 | job->syncpt_end = syncpt_val; | ||
585 | |||
586 | err = nvhost_cdma_begin(&ch->cdma, job); | ||
587 | if (err) { | ||
588 | mutex_unlock(&ch->submitlock); | ||
589 | goto done; | ||
590 | } | ||
591 | |||
592 | ch->ctxhandler->save_push(hwctx_to_save, &ch->cdma); | ||
593 | nvhost_cdma_end(&ch->cdma, job); | ||
594 | nvhost_job_put(job); | ||
595 | job = NULL; | ||
596 | |||
597 | err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, syncpt_id, | ||
598 | syncpt_val - syncpt_incrs + | ||
599 | to_host1x_hwctx(hwctx_to_save)->save_thresh, | ||
600 | NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save, | ||
601 | ctx_waiter, | ||
602 | NULL); | ||
603 | ctx_waiter = NULL; | ||
604 | WARN(err, "Failed to set context save interrupt"); | ||
605 | |||
606 | err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, | ||
607 | syncpt_id, syncpt_val, | ||
608 | NVHOST_INTR_ACTION_WAKEUP, &wq, | ||
609 | wakeup_waiter, | ||
610 | &ref); | ||
611 | wakeup_waiter = NULL; | ||
612 | WARN(err, "Failed to set wakeup interrupt"); | ||
613 | wait_event(wq, | ||
614 | nvhost_syncpt_is_expired(&nvhost_get_host(ch->dev)->syncpt, | ||
615 | syncpt_id, syncpt_val)); | ||
616 | |||
617 | nvhost_intr_put_ref(&nvhost_get_host(ch->dev)->intr, ref); | ||
618 | |||
619 | nvhost_cdma_update(&ch->cdma); | ||
620 | |||
621 | mutex_unlock(&ch->submitlock); | ||
622 | |||
623 | done: | ||
624 | kfree(ctx_waiter); | ||
625 | kfree(wakeup_waiter); | ||
626 | return err; | ||
627 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.h b/drivers/video/tegra/host/host1x/host1x_channel.h new file mode 100644 index 00000000000..4113dbcada2 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_channel.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_channel.h | ||
3 | * | ||
4 | * Tegra Graphics Host Channel | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_HOST1X_CHANNEL_H | ||
22 | #define __NVHOST_HOST1X_CHANNEL_H | ||
23 | |||
24 | struct nvhost_job; | ||
25 | struct nvhost_channel; | ||
26 | struct nvhost_hwctx; | ||
27 | struct nvhost_device; | ||
28 | |||
29 | /* Submit job to a host1x client */ | ||
30 | int host1x_channel_submit(struct nvhost_job *job); | ||
31 | |||
32 | /* Read 3d register via FIFO */ | ||
33 | int host1x_channel_read_3d_reg( | ||
34 | struct nvhost_channel *channel, | ||
35 | struct nvhost_hwctx *hwctx, | ||
36 | u32 offset, | ||
37 | u32 *value); | ||
38 | |||
39 | /* Reads words from FIFO */ | ||
40 | int host1x_drain_read_fifo(void __iomem *chan_regs, | ||
41 | u32 *ptr, unsigned int count, unsigned int *pending); | ||
42 | |||
43 | int host1x_save_context(struct nvhost_device *dev, u32 syncpt_id); | ||
44 | |||
45 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_debug.c b/drivers/video/tegra/host/host1x/host1x_debug.c new file mode 100644 index 00000000000..1a1d764bbd6 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_debug.c | |||
@@ -0,0 +1,404 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_debug.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Erik Gilling <konkers@android.com> | ||
6 | * | ||
7 | * Copyright (C) 2011 NVIDIA Corporation | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/debugfs.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | #include <linux/mm.h> | ||
23 | |||
24 | #include <linux/io.h> | ||
25 | |||
26 | #include "dev.h" | ||
27 | #include "debug.h" | ||
28 | #include "nvhost_cdma.h" | ||
29 | #include "../../nvmap/nvmap.h" | ||
30 | |||
31 | #include "host1x_hardware.h" | ||
32 | #include "host1x_cdma.h" | ||
33 | |||
34 | #define NVHOST_DEBUG_MAX_PAGE_OFFSET 102400 | ||
35 | |||
36 | enum { | ||
37 | NVHOST_DBG_STATE_CMD = 0, | ||
38 | NVHOST_DBG_STATE_DATA = 1, | ||
39 | NVHOST_DBG_STATE_GATHER = 2 | ||
40 | }; | ||
41 | |||
42 | static int show_channel_command(struct output *o, u32 addr, u32 val, int *count) | ||
43 | { | ||
44 | unsigned mask; | ||
45 | unsigned subop; | ||
46 | |||
47 | switch (val >> 28) { | ||
48 | case 0x0: | ||
49 | mask = val & 0x3f; | ||
50 | if (mask) { | ||
51 | nvhost_debug_output(o, | ||
52 | "SETCL(class=%03x, offset=%03x, mask=%02x, [", | ||
53 | val >> 6 & 0x3ff, val >> 16 & 0xfff, mask); | ||
54 | *count = hweight8(mask); | ||
55 | return NVHOST_DBG_STATE_DATA; | ||
56 | } else { | ||
57 | nvhost_debug_output(o, "SETCL(class=%03x)\n", | ||
58 | val >> 6 & 0x3ff); | ||
59 | return NVHOST_DBG_STATE_CMD; | ||
60 | } | ||
61 | |||
62 | case 0x1: | ||
63 | nvhost_debug_output(o, "INCR(offset=%03x, [", | ||
64 | val >> 16 & 0xfff); | ||
65 | *count = val & 0xffff; | ||
66 | return NVHOST_DBG_STATE_DATA; | ||
67 | |||
68 | case 0x2: | ||
69 | nvhost_debug_output(o, "NONINCR(offset=%03x, [", | ||
70 | val >> 16 & 0xfff); | ||
71 | *count = val & 0xffff; | ||
72 | return NVHOST_DBG_STATE_DATA; | ||
73 | |||
74 | case 0x3: | ||
75 | mask = val & 0xffff; | ||
76 | nvhost_debug_output(o, "MASK(offset=%03x, mask=%03x, [", | ||
77 | val >> 16 & 0xfff, mask); | ||
78 | *count = hweight16(mask); | ||
79 | return NVHOST_DBG_STATE_DATA; | ||
80 | |||
81 | case 0x4: | ||
82 | nvhost_debug_output(o, "IMM(offset=%03x, data=%03x)\n", | ||
83 | val >> 16 & 0xfff, val & 0xffff); | ||
84 | return NVHOST_DBG_STATE_CMD; | ||
85 | |||
86 | case 0x5: | ||
87 | nvhost_debug_output(o, "RESTART(offset=%08x)\n", val << 4); | ||
88 | return NVHOST_DBG_STATE_CMD; | ||
89 | |||
90 | case 0x6: | ||
91 | nvhost_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[", | ||
92 | val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1, | ||
93 | val & 0x3fff); | ||
94 | *count = val & 0x3fff; /* TODO: insert */ | ||
95 | return NVHOST_DBG_STATE_GATHER; | ||
96 | |||
97 | case 0xe: | ||
98 | subop = val >> 24 & 0xf; | ||
99 | if (subop == 0) | ||
100 | nvhost_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n", | ||
101 | val & 0xff); | ||
102 | else if (subop == 1) | ||
103 | nvhost_debug_output(o, "RELEASE_MLOCK(index=%d)\n", | ||
104 | val & 0xff); | ||
105 | else | ||
106 | nvhost_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val); | ||
107 | return NVHOST_DBG_STATE_CMD; | ||
108 | |||
109 | default: | ||
110 | return NVHOST_DBG_STATE_CMD; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static void show_channel_gather(struct output *o, u32 addr, | ||
115 | phys_addr_t phys_addr, u32 words, struct nvhost_cdma *cdma); | ||
116 | |||
117 | static void show_channel_word(struct output *o, int *state, int *count, | ||
118 | u32 addr, u32 val, struct nvhost_cdma *cdma) | ||
119 | { | ||
120 | static int start_count, dont_print; | ||
121 | |||
122 | switch (*state) { | ||
123 | case NVHOST_DBG_STATE_CMD: | ||
124 | if (addr) | ||
125 | nvhost_debug_output(o, "%08x: %08x:", addr, val); | ||
126 | else | ||
127 | nvhost_debug_output(o, "%08x:", val); | ||
128 | |||
129 | *state = show_channel_command(o, addr, val, count); | ||
130 | dont_print = 0; | ||
131 | start_count = *count; | ||
132 | if (*state == NVHOST_DBG_STATE_DATA && *count == 0) { | ||
133 | *state = NVHOST_DBG_STATE_CMD; | ||
134 | nvhost_debug_output(o, "])\n"); | ||
135 | } | ||
136 | break; | ||
137 | |||
138 | case NVHOST_DBG_STATE_DATA: | ||
139 | (*count)--; | ||
140 | if (start_count - *count < 64) | ||
141 | nvhost_debug_output(o, "%08x%s", | ||
142 | val, *count > 0 ? ", " : "])\n"); | ||
143 | else if (!dont_print && (*count > 0)) { | ||
144 | nvhost_debug_output(o, "[truncated; %d more words]\n", | ||
145 | *count); | ||
146 | dont_print = 1; | ||
147 | } | ||
148 | if (*count == 0) | ||
149 | *state = NVHOST_DBG_STATE_CMD; | ||
150 | break; | ||
151 | |||
152 | case NVHOST_DBG_STATE_GATHER: | ||
153 | *state = NVHOST_DBG_STATE_CMD; | ||
154 | nvhost_debug_output(o, "%08x]):\n", val); | ||
155 | if (cdma) { | ||
156 | show_channel_gather(o, addr, val, | ||
157 | *count, cdma); | ||
158 | } | ||
159 | break; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | static void show_channel_gather(struct output *o, u32 addr, | ||
164 | phys_addr_t phys_addr, | ||
165 | u32 words, struct nvhost_cdma *cdma) | ||
166 | { | ||
167 | #if defined(CONFIG_TEGRA_NVMAP) | ||
168 | /* Map dmaget cursor to corresponding nvmap_handle */ | ||
169 | struct push_buffer *pb = &cdma->push_buffer; | ||
170 | u32 cur = addr - pb->phys; | ||
171 | struct nvmap_client_handle *nvmap = &pb->nvmap[cur/8]; | ||
172 | struct nvmap_handle_ref ref; | ||
173 | u32 *map_addr, offset; | ||
174 | phys_addr_t pin_addr; | ||
175 | int state, count, i; | ||
176 | |||
177 | if ((u32)nvmap->handle == NVHOST_CDMA_PUSH_GATHER_CTXSAVE) { | ||
178 | nvhost_debug_output(o, "[context save]\n"); | ||
179 | return; | ||
180 | } | ||
181 | |||
182 | if (!nvmap->handle || !nvmap->client | ||
183 | || atomic_read(&nvmap->handle->ref) < 1) { | ||
184 | nvhost_debug_output(o, "[already deallocated]\n"); | ||
185 | return; | ||
186 | } | ||
187 | |||
188 | /* Create a fake nvmap_handle_ref - nvmap requires it | ||
189 | * but accesses only the first field - nvmap_handle */ | ||
190 | ref.handle = nvmap->handle; | ||
191 | |||
192 | map_addr = nvmap_mmap(&ref); | ||
193 | if (!map_addr) { | ||
194 | nvhost_debug_output(o, "[could not mmap]\n"); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | /* Get base address from nvmap */ | ||
199 | pin_addr = nvmap_pin(nvmap->client, &ref); | ||
200 | if (IS_ERR_VALUE(pin_addr)) { | ||
201 | nvhost_debug_output(o, "[couldn't pin]\n"); | ||
202 | nvmap_munmap(&ref, map_addr); | ||
203 | return; | ||
204 | } | ||
205 | |||
206 | offset = phys_addr - pin_addr; | ||
207 | /* | ||
208 | * Sometimes we're given different hardware address to the same | ||
209 | * page - in these cases the offset will get an invalid number and | ||
210 | * we just have to bail out. | ||
211 | */ | ||
212 | if (offset > NVHOST_DEBUG_MAX_PAGE_OFFSET) { | ||
213 | nvhost_debug_output(o, "[address mismatch]\n"); | ||
214 | } else { | ||
215 | /* GATHER buffer starts always with commands */ | ||
216 | state = NVHOST_DBG_STATE_CMD; | ||
217 | for (i = 0; i < words; i++) | ||
218 | show_channel_word(o, &state, &count, | ||
219 | phys_addr + i * 4, | ||
220 | *(map_addr + offset/4 + i), | ||
221 | cdma); | ||
222 | } | ||
223 | nvmap_unpin(nvmap->client, &ref); | ||
224 | nvmap_munmap(&ref, map_addr); | ||
225 | #endif | ||
226 | } | ||
227 | |||
228 | static void show_channel_pair(struct output *o, u32 addr, | ||
229 | u32 w0, u32 w1, struct nvhost_cdma *cdma) | ||
230 | { | ||
231 | int state = NVHOST_DBG_STATE_CMD; | ||
232 | int count; | ||
233 | |||
234 | show_channel_word(o, &state, &count, addr, w0, cdma); | ||
235 | show_channel_word(o, &state, &count, addr+4, w1, cdma); | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * Retrieve the op pair at a slot offset from a DMA address | ||
240 | */ | ||
241 | static void cdma_peek(struct nvhost_cdma *cdma, | ||
242 | u32 dmaget, int slot, u32 *out) | ||
243 | { | ||
244 | u32 offset = dmaget - cdma->push_buffer.phys; | ||
245 | u32 *p = cdma->push_buffer.mapped; | ||
246 | |||
247 | offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2; | ||
248 | out[0] = p[offset]; | ||
249 | out[1] = p[offset + 1]; | ||
250 | } | ||
251 | |||
252 | u32 previous_oppair(struct nvhost_cdma *cdma, u32 cur) | ||
253 | { | ||
254 | u32 pb = cdma->push_buffer.phys; | ||
255 | u32 prev = cur-8; | ||
256 | if (prev < pb) | ||
257 | prev += PUSH_BUFFER_SIZE; | ||
258 | return prev; | ||
259 | } | ||
260 | |||
261 | static void t20_debug_show_channel_cdma(struct nvhost_master *m, | ||
262 | struct nvhost_channel *ch, struct output *o, int chid) | ||
263 | { | ||
264 | struct nvhost_channel *channel = ch; | ||
265 | struct nvhost_cdma *cdma = &channel->cdma; | ||
266 | u32 dmaput, dmaget, dmactrl; | ||
267 | u32 cbstat, cbread; | ||
268 | u32 val, base, baseval; | ||
269 | u32 pbw[2]; | ||
270 | |||
271 | dmaput = readl(channel->aperture + HOST1X_CHANNEL_DMAPUT); | ||
272 | dmaget = readl(channel->aperture + HOST1X_CHANNEL_DMAGET); | ||
273 | dmactrl = readl(channel->aperture + HOST1X_CHANNEL_DMACTRL); | ||
274 | cbread = readl(m->sync_aperture + HOST1X_SYNC_CBREAD_x(chid)); | ||
275 | cbstat = readl(m->sync_aperture + HOST1X_SYNC_CBSTAT_x(chid)); | ||
276 | |||
277 | nvhost_debug_output(o, "%d-%s (%d): ", chid, | ||
278 | channel->dev->name, | ||
279 | channel->dev->refcount); | ||
280 | |||
281 | if (HOST1X_VAL(CHANNEL_DMACTRL, DMASTOP, dmactrl) | ||
282 | || !channel->cdma.push_buffer.mapped) { | ||
283 | nvhost_debug_output(o, "inactive\n\n"); | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | switch (cbstat) { | ||
288 | case 0x00010008: | ||
289 | nvhost_debug_output(o, "waiting on syncpt %d val %d\n", | ||
290 | cbread >> 24, cbread & 0xffffff); | ||
291 | break; | ||
292 | |||
293 | case 0x00010009: | ||
294 | base = (cbread >> 16) & 0xff; | ||
295 | val = readl(m->sync_aperture + | ||
296 | HOST1X_SYNC_SYNCPT_BASE_x(base)); | ||
297 | baseval = HOST1X_VAL(SYNC_SYNCPT_BASE_0, BASE, val); | ||
298 | val = cbread & 0xffff; | ||
299 | nvhost_debug_output(o, "waiting on syncpt %d val %d " | ||
300 | "(base %d = %d; offset = %d)\n", | ||
301 | cbread >> 24, baseval + val, | ||
302 | base, baseval, val); | ||
303 | break; | ||
304 | |||
305 | default: | ||
306 | nvhost_debug_output(o, | ||
307 | "active class %02x, offset %04x, val %08x\n", | ||
308 | HOST1X_VAL(SYNC_CBSTAT_0, CBCLASS0, cbstat), | ||
309 | HOST1X_VAL(SYNC_CBSTAT_0, CBOFFSET0, cbstat), | ||
310 | cbread); | ||
311 | break; | ||
312 | } | ||
313 | |||
314 | nvhost_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n", | ||
315 | dmaput, dmaget, dmactrl); | ||
316 | nvhost_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat); | ||
317 | |||
318 | cdma_peek(cdma, dmaget, -1, pbw); | ||
319 | show_channel_pair(o, previous_oppair(cdma, dmaget), | ||
320 | pbw[0], pbw[1], &channel->cdma); | ||
321 | nvhost_debug_output(o, "\n"); | ||
322 | } | ||
323 | |||
324 | void t20_debug_show_channel_fifo(struct nvhost_master *m, | ||
325 | struct nvhost_channel *ch, struct output *o, int chid) | ||
326 | { | ||
327 | u32 val, rd_ptr, wr_ptr, start, end; | ||
328 | struct nvhost_channel *channel = ch; | ||
329 | int state, count; | ||
330 | |||
331 | nvhost_debug_output(o, "%d: fifo:\n", chid); | ||
332 | |||
333 | val = readl(channel->aperture + HOST1X_CHANNEL_FIFOSTAT); | ||
334 | nvhost_debug_output(o, "FIFOSTAT %08x\n", val); | ||
335 | if (HOST1X_VAL(CHANNEL_FIFOSTAT, CFEMPTY, val)) { | ||
336 | nvhost_debug_output(o, "[empty]\n"); | ||
337 | return; | ||
338 | } | ||
339 | |||
340 | writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
341 | writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1) | ||
342 | | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid), | ||
343 | m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
344 | |||
345 | val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_PTRS); | ||
346 | rd_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_RD_PTR, val); | ||
347 | wr_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_WR_PTR, val); | ||
348 | |||
349 | val = readl(m->sync_aperture + HOST1X_SYNC_CFx_SETUP(chid)); | ||
350 | start = HOST1X_VAL(SYNC_CF0_SETUP, BASE, val); | ||
351 | end = HOST1X_VAL(SYNC_CF0_SETUP, LIMIT, val); | ||
352 | |||
353 | state = NVHOST_DBG_STATE_CMD; | ||
354 | |||
355 | do { | ||
356 | writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
357 | writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1) | ||
358 | | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid) | ||
359 | | HOST1X_CREATE(SYNC_CFPEEK_CTRL, ADDR, rd_ptr), | ||
360 | m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
361 | val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_READ); | ||
362 | |||
363 | show_channel_word(o, &state, &count, 0, val, NULL); | ||
364 | |||
365 | if (rd_ptr == end) | ||
366 | rd_ptr = start; | ||
367 | else | ||
368 | rd_ptr++; | ||
369 | } while (rd_ptr != wr_ptr); | ||
370 | |||
371 | if (state == NVHOST_DBG_STATE_DATA) | ||
372 | nvhost_debug_output(o, ", ...])\n"); | ||
373 | nvhost_debug_output(o, "\n"); | ||
374 | |||
375 | writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
376 | } | ||
377 | |||
378 | static void t20_debug_show_mlocks(struct nvhost_master *m, struct output *o) | ||
379 | { | ||
380 | u32 __iomem *mlo_regs = m->sync_aperture + HOST1X_SYNC_MLOCK_OWNER_0; | ||
381 | int i; | ||
382 | |||
383 | nvhost_debug_output(o, "---- mlocks ----\n"); | ||
384 | for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) { | ||
385 | u32 owner = readl(mlo_regs + i); | ||
386 | if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CH_OWNS, owner)) | ||
387 | nvhost_debug_output(o, "%d: locked by channel %d\n", | ||
388 | i, HOST1X_VAL(SYNC_MLOCK_OWNER_0, CHID, owner)); | ||
389 | else if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CPU_OWNS, owner)) | ||
390 | nvhost_debug_output(o, "%d: locked by cpu\n", i); | ||
391 | else | ||
392 | nvhost_debug_output(o, "%d: unlocked\n", i); | ||
393 | } | ||
394 | nvhost_debug_output(o, "\n"); | ||
395 | } | ||
396 | |||
397 | int nvhost_init_t20_debug_support(struct nvhost_master *host) | ||
398 | { | ||
399 | host->op.debug.show_channel_cdma = t20_debug_show_channel_cdma; | ||
400 | host->op.debug.show_channel_fifo = t20_debug_show_channel_fifo; | ||
401 | host->op.debug.show_mlocks = t20_debug_show_mlocks; | ||
402 | |||
403 | return 0; | ||
404 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_hardware.h b/drivers/video/tegra/host/host1x/host1x_hardware.h new file mode 100644 index 00000000000..d13d5752364 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_hardware.h | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_hardware.h | ||
3 | * | ||
4 | * Tegra Graphics Host Register Offsets | ||
5 | * | ||
6 | * Copyright (c) 2010-2012 NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_HOST1X_HOST1X_HARDWARE_H | ||
22 | #define __NVHOST_HOST1X_HOST1X_HARDWARE_H | ||
23 | |||
24 | #include <linux/types.h> | ||
25 | #include <linux/bitops.h> | ||
26 | |||
27 | /* class ids */ | ||
28 | enum { | ||
29 | NV_HOST1X_CLASS_ID = 0x1, | ||
30 | NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20, | ||
31 | NV_GRAPHICS_3D_CLASS_ID = 0x60 | ||
32 | }; | ||
33 | |||
34 | |||
35 | /* channel registers */ | ||
36 | #define NV_HOST1X_CHANNELS 8 | ||
37 | #define NV_HOST1X_CHANNEL0_BASE 0 | ||
38 | #define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384 | ||
39 | #define NV_HOST1X_SYNC_MLOCK_NUM 16 | ||
40 | |||
41 | #define HOST1X_VAL(reg, field, regdata) \ | ||
42 | ((regdata >> HOST1X_##reg##_##field##_SHIFT) \ | ||
43 | & HOST1X_##reg##_##field##_MASK) | ||
44 | #define HOST1X_CREATE(reg, field, data) \ | ||
45 | ((data & HOST1X_##reg##_##field##_MASK) \ | ||
46 | << HOST1X_##reg##_##field##_SHIFT) \ | ||
47 | |||
48 | #define HOST1X_CHANNEL_FIFOSTAT 0x00 | ||
49 | #define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_SHIFT 10 | ||
50 | #define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_MASK 0x1 | ||
51 | #define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_SHIFT 24 | ||
52 | #define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_MASK 0x1f | ||
53 | #define HOST1X_CHANNEL_INDDATA 0x0c | ||
54 | #define HOST1X_CHANNEL_DMASTART 0x14 | ||
55 | #define HOST1X_CHANNEL_DMAPUT 0x18 | ||
56 | #define HOST1X_CHANNEL_DMAGET 0x1c | ||
57 | #define HOST1X_CHANNEL_DMAEND 0x20 | ||
58 | #define HOST1X_CHANNEL_DMACTRL 0x24 | ||
59 | #define HOST1X_CHANNEL_DMACTRL_DMASTOP_SHIFT 0 | ||
60 | #define HOST1X_CHANNEL_DMACTRL_DMASTOP_MASK 0x1 | ||
61 | #define HOST1X_CHANNEL_DMACTRL_DMAGETRST_SHIFT 1 | ||
62 | #define HOST1X_CHANNEL_DMACTRL_DMAGETRST_MASK 0x1 | ||
63 | #define HOST1X_CHANNEL_DMACTRL_DMAINITGET_SHIFT 2 | ||
64 | #define HOST1X_CHANNEL_DMACTRL_DMAINITGET_MASK 0x1 | ||
65 | |||
66 | #define HOST1X_CHANNEL_SYNC_REG_BASE 0x3000 | ||
67 | |||
68 | #define HOST1X_SYNC_INTMASK 0x4 | ||
69 | #define HOST1X_SYNC_INTC0MASK 0x8 | ||
70 | #define HOST1X_SYNC_HINTSTATUS 0x20 | ||
71 | #define HOST1X_SYNC_HINTMASK 0x24 | ||
72 | #define HOST1X_SYNC_HINTSTATUS_EXT 0x28 | ||
73 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_SHIFT 30 | ||
74 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_MASK 0x1 | ||
75 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_SHIFT 31 | ||
76 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_MASK 0x1 | ||
77 | #define HOST1X_SYNC_HINTMASK_EXT 0x2c | ||
78 | #define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS 0x40 | ||
79 | #define HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS 0x48 | ||
80 | #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE 0x60 | ||
81 | #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 0x68 | ||
82 | #define HOST1X_SYNC_CF0_SETUP 0x80 | ||
83 | #define HOST1X_SYNC_CF0_SETUP_BASE_SHIFT 0 | ||
84 | #define HOST1X_SYNC_CF0_SETUP_BASE_MASK 0x1ff | ||
85 | #define HOST1X_SYNC_CF0_SETUP_LIMIT_SHIFT 16 | ||
86 | #define HOST1X_SYNC_CF0_SETUP_LIMIT_MASK 0x1ff | ||
87 | #define HOST1X_SYNC_CFx_SETUP(x) (HOST1X_SYNC_CF0_SETUP + (4 * (x))) | ||
88 | |||
89 | #define HOST1X_SYNC_CMDPROC_STOP 0xac | ||
90 | #define HOST1X_SYNC_CH_TEARDOWN 0xb0 | ||
91 | #define HOST1X_SYNC_USEC_CLK 0x1a4 | ||
92 | #define HOST1X_SYNC_CTXSW_TIMEOUT_CFG 0x1a8 | ||
93 | #define HOST1X_SYNC_IP_BUSY_TIMEOUT 0x1bc | ||
94 | #define HOST1X_SYNC_IP_READ_TIMEOUT_ADDR 0x1c0 | ||
95 | #define HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR 0x1c4 | ||
96 | #define HOST1X_SYNC_MLOCK_0 0x2c0 | ||
97 | #define HOST1X_SYNC_MLOCK_OWNER_0 0x340 | ||
98 | #define HOST1X_SYNC_MLOCK_OWNER_0_CHID_SHIFT 8 | ||
99 | #define HOST1X_SYNC_MLOCK_OWNER_0_CHID_MASK 0xf | ||
100 | #define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_SHIFT 1 | ||
101 | #define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_MASK 0x1 | ||
102 | #define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_SHIFT 0 | ||
103 | #define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_MASK 0x1 | ||
104 | #define HOST1X_SYNC_SYNCPT_0 0x400 | ||
105 | #define HOST1X_SYNC_SYNCPT_INT_THRESH_0 0x500 | ||
106 | |||
107 | #define HOST1X_SYNC_SYNCPT_BASE_0 0x600 | ||
108 | #define HOST1X_SYNC_SYNCPT_BASE_0_BASE_SHIFT 0 | ||
109 | #define HOST1X_SYNC_SYNCPT_BASE_0_BASE_MASK 0xffff | ||
110 | #define HOST1X_SYNC_SYNCPT_BASE_x(x) (HOST1X_SYNC_SYNCPT_BASE_0 + (4 * (x))) | ||
111 | |||
112 | #define HOST1X_SYNC_SYNCPT_CPU_INCR 0x700 | ||
113 | |||
114 | #define HOST1X_SYNC_CBREAD_0 0x720 | ||
115 | #define HOST1X_SYNC_CBREAD_x(x) (HOST1X_SYNC_CBREAD_0 + (4 * (x))) | ||
116 | #define HOST1X_SYNC_CFPEEK_CTRL 0x74c | ||
117 | #define HOST1X_SYNC_CFPEEK_CTRL_ADDR_SHIFT 0 | ||
118 | #define HOST1X_SYNC_CFPEEK_CTRL_ADDR_MASK 0x1ff | ||
119 | #define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_SHIFT 16 | ||
120 | #define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_MASK 0x7 | ||
121 | #define HOST1X_SYNC_CFPEEK_CTRL_ENA_SHIFT 31 | ||
122 | #define HOST1X_SYNC_CFPEEK_CTRL_ENA_MASK 0x1 | ||
123 | #define HOST1X_SYNC_CFPEEK_READ 0x750 | ||
124 | #define HOST1X_SYNC_CFPEEK_PTRS 0x754 | ||
125 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_SHIFT 0 | ||
126 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_MASK 0x1ff | ||
127 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_SHIFT 16 | ||
128 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_MASK 0x1ff | ||
129 | #define HOST1X_SYNC_CBSTAT_0 0x758 | ||
130 | #define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_SHIFT 0 | ||
131 | #define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_MASK 0xffff | ||
132 | #define HOST1X_SYNC_CBSTAT_0_CBCLASS0_SHIFT 16 | ||
133 | #define HOST1X_SYNC_CBSTAT_0_CBCLASS0_MASK 0xffff | ||
134 | #define HOST1X_SYNC_CBSTAT_x(x) (HOST1X_SYNC_CBSTAT_0 + (4 * (x))) | ||
135 | |||
136 | /* sync registers */ | ||
137 | #define NV_HOST1X_SYNCPT_NB_PTS 32 | ||
138 | #define NV_HOST1X_SYNCPT_NB_BASES 8 | ||
139 | #define NV_HOST1X_NB_MLOCKS 16 | ||
140 | |||
141 | /* host class methods */ | ||
142 | enum { | ||
143 | NV_CLASS_HOST_INCR_SYNCPT = 0x0, | ||
144 | NV_CLASS_HOST_WAIT_SYNCPT = 0x8, | ||
145 | NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9, | ||
146 | NV_CLASS_HOST_LOAD_SYNCPT_BASE = 0xb, | ||
147 | NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc, | ||
148 | NV_CLASS_HOST_INDOFF = 0x2d, | ||
149 | NV_CLASS_HOST_INDDATA = 0x2e | ||
150 | }; | ||
151 | /* sync point conditionals */ | ||
152 | enum { | ||
153 | NV_SYNCPT_IMMEDIATE = 0x0, | ||
154 | NV_SYNCPT_OP_DONE = 0x1, | ||
155 | NV_SYNCPT_RD_DONE = 0x2, | ||
156 | NV_SYNCPT_REG_WR_SAFE = 0x3, | ||
157 | }; | ||
158 | |||
159 | static inline u32 nvhost_class_host_wait_syncpt( | ||
160 | unsigned indx, unsigned threshold) | ||
161 | { | ||
162 | return (indx << 24) | (threshold & 0xffffff); | ||
163 | } | ||
164 | |||
165 | static inline u32 nvhost_class_host_load_syncpt_base( | ||
166 | unsigned indx, unsigned threshold) | ||
167 | { | ||
168 | return (indx << 24) | (threshold & 0xffffff); | ||
169 | } | ||
170 | |||
171 | static inline u32 nvhost_class_host_wait_syncpt_base( | ||
172 | unsigned indx, unsigned base_indx, unsigned offset) | ||
173 | { | ||
174 | return (indx << 24) | (base_indx << 16) | offset; | ||
175 | } | ||
176 | |||
177 | static inline u32 nvhost_class_host_incr_syncpt_base( | ||
178 | unsigned base_indx, unsigned offset) | ||
179 | { | ||
180 | return (base_indx << 24) | offset; | ||
181 | } | ||
182 | |||
183 | static inline u32 nvhost_class_host_incr_syncpt( | ||
184 | unsigned cond, unsigned indx) | ||
185 | { | ||
186 | return (cond << 8) | indx; | ||
187 | } | ||
188 | |||
189 | enum { | ||
190 | NV_HOST_MODULE_HOST1X = 0, | ||
191 | NV_HOST_MODULE_MPE = 1, | ||
192 | NV_HOST_MODULE_GR3D = 6 | ||
193 | }; | ||
194 | |||
195 | static inline u32 nvhost_class_host_indoff_reg_write( | ||
196 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
197 | { | ||
198 | u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2); | ||
199 | if (auto_inc) | ||
200 | v |= BIT(27); | ||
201 | return v; | ||
202 | } | ||
203 | |||
204 | static inline u32 nvhost_class_host_indoff_reg_read( | ||
205 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
206 | { | ||
207 | u32 v = (mod_id << 18) | (offset << 2) | 1; | ||
208 | if (auto_inc) | ||
209 | v |= BIT(27); | ||
210 | return v; | ||
211 | } | ||
212 | |||
213 | |||
214 | /* cdma opcodes */ | ||
215 | static inline u32 nvhost_opcode_setclass( | ||
216 | unsigned class_id, unsigned offset, unsigned mask) | ||
217 | { | ||
218 | return (0 << 28) | (offset << 16) | (class_id << 6) | mask; | ||
219 | } | ||
220 | |||
221 | static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count) | ||
222 | { | ||
223 | return (1 << 28) | (offset << 16) | count; | ||
224 | } | ||
225 | |||
226 | static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count) | ||
227 | { | ||
228 | return (2 << 28) | (offset << 16) | count; | ||
229 | } | ||
230 | |||
231 | static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask) | ||
232 | { | ||
233 | return (3 << 28) | (offset << 16) | mask; | ||
234 | } | ||
235 | |||
236 | static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value) | ||
237 | { | ||
238 | return (4 << 28) | (offset << 16) | value; | ||
239 | } | ||
240 | |||
241 | static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx) | ||
242 | { | ||
243 | return nvhost_opcode_imm(NV_CLASS_HOST_INCR_SYNCPT, | ||
244 | nvhost_class_host_incr_syncpt(cond, indx)); | ||
245 | } | ||
246 | |||
247 | static inline u32 nvhost_opcode_restart(unsigned address) | ||
248 | { | ||
249 | return (5 << 28) | (address >> 4); | ||
250 | } | ||
251 | |||
252 | static inline u32 nvhost_opcode_gather(unsigned count) | ||
253 | { | ||
254 | return (6 << 28) | count; | ||
255 | } | ||
256 | |||
257 | static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count) | ||
258 | { | ||
259 | return (6 << 28) | (offset << 16) | BIT(15) | count; | ||
260 | } | ||
261 | |||
262 | static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count) | ||
263 | { | ||
264 | return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count; | ||
265 | } | ||
266 | |||
267 | #define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0) | ||
268 | |||
269 | static inline u32 nvhost_mask2(unsigned x, unsigned y) | ||
270 | { | ||
271 | return 1 | (1 << (y - x)); | ||
272 | } | ||
273 | |||
274 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_hwctx.h b/drivers/video/tegra/host/host1x/host1x_hwctx.h new file mode 100644 index 00000000000..7587642d0e1 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_hwctx.h | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_hwctx.h | ||
3 | * | ||
4 | * Tegra Graphics Host HOST1X Hardware Context Interface | ||
5 | * | ||
6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #ifndef __NVHOST_HOST1X_HWCTX_H | ||
24 | #define __NVHOST_HOST1X_HWCTX_H | ||
25 | |||
26 | #include <linux/kref.h> | ||
27 | |||
28 | struct nvhost_hwctx_handler; | ||
29 | struct nvhost_channel; | ||
30 | |||
31 | #define to_host1x_hwctx_handler(handler) \ | ||
32 | container_of((handler), struct host1x_hwctx_handler, h) | ||
33 | #define to_host1x_hwctx(h) container_of((h), struct host1x_hwctx, hwctx) | ||
34 | #define host1x_hwctx_handler(_hwctx) to_host1x_hwctx_handler((_hwctx)->hwctx.h) | ||
35 | |||
36 | struct host1x_hwctx { | ||
37 | struct nvhost_hwctx hwctx; | ||
38 | |||
39 | u32 save_incrs; | ||
40 | u32 save_thresh; | ||
41 | u32 save_slots; | ||
42 | |||
43 | struct nvmap_handle_ref *restore; | ||
44 | u32 *restore_virt; | ||
45 | phys_addr_t restore_phys; | ||
46 | u32 restore_size; | ||
47 | u32 restore_incrs; | ||
48 | }; | ||
49 | |||
50 | struct host1x_hwctx_handler { | ||
51 | struct nvhost_hwctx_handler h; | ||
52 | |||
53 | u32 syncpt; | ||
54 | u32 waitbase; | ||
55 | u32 restore_size; | ||
56 | u32 restore_incrs; | ||
57 | struct nvmap_handle_ref *save_buf; | ||
58 | u32 save_incrs; | ||
59 | u32 save_thresh; | ||
60 | u32 save_slots; | ||
61 | phys_addr_t save_phys; | ||
62 | u32 save_size; | ||
63 | }; | ||
64 | |||
65 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_intr.c b/drivers/video/tegra/host/host1x/host1x_intr.c new file mode 100644 index 00000000000..47e984e2943 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_intr.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_intr.c | ||
3 | * | ||
4 | * Tegra Graphics Host Interrupt Management | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/irq.h> | ||
23 | |||
24 | #include "nvhost_intr.h" | ||
25 | #include "dev.h" | ||
26 | #include "host1x_hardware.h" | ||
27 | |||
28 | |||
29 | /*** HW host sync management ***/ | ||
30 | |||
31 | static void t20_intr_init_host_sync(struct nvhost_intr *intr) | ||
32 | { | ||
33 | struct nvhost_master *dev = intr_to_dev(intr); | ||
34 | void __iomem *sync_regs = dev->sync_aperture; | ||
35 | /* disable the ip_busy_timeout. this prevents write drops, etc. | ||
36 | * there's no real way to recover from a hung client anyway. | ||
37 | */ | ||
38 | writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT); | ||
39 | |||
40 | /* increase the auto-ack timout to the maximum value. 2d will hang | ||
41 | * otherwise on ap20. | ||
42 | */ | ||
43 | writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG); | ||
44 | } | ||
45 | |||
46 | static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm) | ||
47 | { | ||
48 | struct nvhost_master *dev = intr_to_dev(intr); | ||
49 | void __iomem *sync_regs = dev->sync_aperture; | ||
50 | /* write microsecond clock register */ | ||
51 | writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK); | ||
52 | } | ||
53 | |||
54 | static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr, | ||
55 | u32 id, u32 thresh) | ||
56 | { | ||
57 | struct nvhost_master *dev = intr_to_dev(intr); | ||
58 | void __iomem *sync_regs = dev->sync_aperture; | ||
59 | thresh &= 0xffff; | ||
60 | writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4)); | ||
61 | } | ||
62 | |||
63 | static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id) | ||
64 | { | ||
65 | struct nvhost_master *dev = intr_to_dev(intr); | ||
66 | void __iomem *sync_regs = dev->sync_aperture; | ||
67 | writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0); | ||
68 | } | ||
69 | |||
70 | static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr) | ||
71 | { | ||
72 | struct nvhost_master *dev = intr_to_dev(intr); | ||
73 | void __iomem *sync_regs = dev->sync_aperture; | ||
74 | /* disable interrupts for both cpu's */ | ||
75 | writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE); | ||
76 | |||
77 | /* clear status for both cpu's */ | ||
78 | writel(0xffffffffu, sync_regs + | ||
79 | HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS); | ||
80 | writel(0xffffffffu, sync_regs + | ||
81 | HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS); | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * Sync point threshold interrupt service function | ||
86 | * Handles sync point threshold triggers, in interrupt context | ||
87 | */ | ||
88 | irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id) | ||
89 | { | ||
90 | struct nvhost_intr_syncpt *syncpt = dev_id; | ||
91 | unsigned int id = syncpt->id; | ||
92 | struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt); | ||
93 | |||
94 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
95 | |||
96 | writel(BIT(id), | ||
97 | sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE); | ||
98 | writel(BIT(id), | ||
99 | sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS); | ||
100 | |||
101 | return IRQ_WAKE_THREAD; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * Host general interrupt service function | ||
106 | * Handles read / write failures | ||
107 | */ | ||
108 | static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id) | ||
109 | { | ||
110 | struct nvhost_intr *intr = dev_id; | ||
111 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
112 | u32 stat; | ||
113 | u32 ext_stat; | ||
114 | u32 addr; | ||
115 | |||
116 | stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS); | ||
117 | ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); | ||
118 | |||
119 | if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_READ_INT, ext_stat)) { | ||
120 | addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR); | ||
121 | pr_err("Host read timeout at address %x\n", addr); | ||
122 | } | ||
123 | |||
124 | if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_WRITE_INT, ext_stat)) { | ||
125 | addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR); | ||
126 | pr_err("Host write timeout at address %x\n", addr); | ||
127 | } | ||
128 | |||
129 | writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); | ||
130 | writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS); | ||
131 | |||
132 | return IRQ_HANDLED; | ||
133 | } | ||
134 | static int t20_intr_request_host_general_irq(struct nvhost_intr *intr) | ||
135 | { | ||
136 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
137 | int err; | ||
138 | |||
139 | if (intr->host_general_irq_requested) | ||
140 | return 0; | ||
141 | |||
142 | /* master disable for general (not syncpt) host interrupts */ | ||
143 | writel(0, sync_regs + HOST1X_SYNC_INTMASK); | ||
144 | |||
145 | /* clear status & extstatus */ | ||
146 | writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); | ||
147 | writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS); | ||
148 | |||
149 | err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0, | ||
150 | "host_status", intr); | ||
151 | if (err) | ||
152 | return err; | ||
153 | |||
154 | /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */ | ||
155 | writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT); | ||
156 | |||
157 | /* enable extra interrupt sources */ | ||
158 | writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK); | ||
159 | |||
160 | /* enable host module interrupt to CPU0 */ | ||
161 | writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK); | ||
162 | |||
163 | /* master enable for general (not syncpt) host interrupts */ | ||
164 | writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK); | ||
165 | |||
166 | intr->host_general_irq_requested = true; | ||
167 | |||
168 | return err; | ||
169 | } | ||
170 | |||
171 | static void t20_intr_free_host_general_irq(struct nvhost_intr *intr) | ||
172 | { | ||
173 | if (intr->host_general_irq_requested) { | ||
174 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
175 | |||
176 | /* master disable for general (not syncpt) host interrupts */ | ||
177 | writel(0, sync_regs + HOST1X_SYNC_INTMASK); | ||
178 | |||
179 | free_irq(intr->host_general_irq, intr); | ||
180 | intr->host_general_irq_requested = false; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | static int t20_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt) | ||
185 | { | ||
186 | int err; | ||
187 | if (syncpt->irq_requested) | ||
188 | return 0; | ||
189 | |||
190 | err = request_threaded_irq(syncpt->irq, | ||
191 | t20_intr_syncpt_thresh_isr, | ||
192 | nvhost_syncpt_thresh_fn, | ||
193 | 0, syncpt->thresh_irq_name, syncpt); | ||
194 | if (err) | ||
195 | return err; | ||
196 | |||
197 | syncpt->irq_requested = 1; | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | int nvhost_init_t20_intr_support(struct nvhost_master *host) | ||
202 | { | ||
203 | host->op.intr.init_host_sync = t20_intr_init_host_sync; | ||
204 | host->op.intr.set_host_clocks_per_usec = | ||
205 | t20_intr_set_host_clocks_per_usec; | ||
206 | host->op.intr.set_syncpt_threshold = t20_intr_set_syncpt_threshold; | ||
207 | host->op.intr.enable_syncpt_intr = t20_intr_enable_syncpt_intr; | ||
208 | host->op.intr.disable_all_syncpt_intrs = | ||
209 | t20_intr_disable_all_syncpt_intrs; | ||
210 | host->op.intr.request_host_general_irq = | ||
211 | t20_intr_request_host_general_irq; | ||
212 | host->op.intr.free_host_general_irq = | ||
213 | t20_intr_free_host_general_irq; | ||
214 | host->op.intr.request_syncpt_irq = | ||
215 | t20_request_syncpt_irq; | ||
216 | |||
217 | return 0; | ||
218 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.c b/drivers/video/tegra/host/host1x/host1x_syncpt.c new file mode 100644 index 00000000000..b0fd9970aaa --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_syncpt.c | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_syncpt.c | ||
3 | * | ||
4 | * Tegra Graphics Host Syncpoints for HOST1X | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/nvhost_ioctl.h> | ||
22 | #include "nvhost_syncpt.h" | ||
23 | #include "dev.h" | ||
24 | #include "host1x_syncpt.h" | ||
25 | #include "host1x_hardware.h" | ||
26 | |||
27 | /** | ||
28 | * Write the current syncpoint value back to hw. | ||
29 | */ | ||
30 | static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id) | ||
31 | { | ||
32 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
33 | int min = nvhost_syncpt_read_min(sp, id); | ||
34 | writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4)); | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * Write the current waitbase value back to hw. | ||
39 | */ | ||
40 | static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id) | ||
41 | { | ||
42 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
43 | writel(sp->base_val[id], | ||
44 | dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4)); | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | * Read waitbase value from hw. | ||
49 | */ | ||
50 | static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) | ||
51 | { | ||
52 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
53 | sp->base_val[id] = readl(dev->sync_aperture + | ||
54 | (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4)); | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * Updates the last value read from hardware. | ||
59 | * (was nvhost_syncpt_update_min) | ||
60 | */ | ||
61 | static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) | ||
62 | { | ||
63 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
64 | void __iomem *sync_regs = dev->sync_aperture; | ||
65 | u32 old, live; | ||
66 | |||
67 | do { | ||
68 | old = nvhost_syncpt_read_min(sp, id); | ||
69 | live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4)); | ||
70 | } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old); | ||
71 | |||
72 | if (!nvhost_syncpt_check_max(sp, id, live)) | ||
73 | dev_err(&syncpt_to_dev(sp)->dev->dev, | ||
74 | "%s failed: id=%u\n", | ||
75 | __func__, | ||
76 | id); | ||
77 | |||
78 | return live; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * Write a cpu syncpoint increment to the hardware, without touching | ||
83 | * the cache. Caller is responsible for host being powered. | ||
84 | */ | ||
85 | static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) | ||
86 | { | ||
87 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
88 | BUG_ON(!nvhost_module_powered(dev->dev)); | ||
89 | if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) { | ||
90 | dev_err(&syncpt_to_dev(sp)->dev->dev, | ||
91 | "Trying to increment syncpoint id %d beyond max\n", | ||
92 | id); | ||
93 | nvhost_debug_dump(syncpt_to_dev(sp)); | ||
94 | return; | ||
95 | } | ||
96 | writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR); | ||
97 | wmb(); | ||
98 | } | ||
99 | |||
100 | /* check for old WAITs to be removed (avoiding a wrap) */ | ||
101 | static int t20_syncpt_wait_check(struct nvhost_syncpt *sp, | ||
102 | struct nvmap_client *nvmap, | ||
103 | u32 waitchk_mask, | ||
104 | struct nvhost_waitchk *wait, | ||
105 | int num_waitchk) | ||
106 | { | ||
107 | u32 idx; | ||
108 | int err = 0; | ||
109 | |||
110 | /* get current syncpt values */ | ||
111 | for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) { | ||
112 | if (BIT(idx) & waitchk_mask) | ||
113 | nvhost_syncpt_update_min(sp, idx); | ||
114 | } | ||
115 | |||
116 | BUG_ON(!wait && !num_waitchk); | ||
117 | |||
118 | /* compare syncpt vs wait threshold */ | ||
119 | while (num_waitchk) { | ||
120 | u32 override; | ||
121 | |||
122 | BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS); | ||
123 | if (nvhost_syncpt_is_expired(sp, | ||
124 | wait->syncpt_id, wait->thresh)) { | ||
125 | /* | ||
126 | * NULL an already satisfied WAIT_SYNCPT host method, | ||
127 | * by patching its args in the command stream. The | ||
128 | * method data is changed to reference a reserved | ||
129 | * (never given out or incr) NVSYNCPT_GRAPHICS_HOST | ||
130 | * syncpt with a matching threshold value of 0, so | ||
131 | * is guaranteed to be popped by the host HW. | ||
132 | */ | ||
133 | dev_dbg(&syncpt_to_dev(sp)->dev->dev, | ||
134 | "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", | ||
135 | wait->syncpt_id, | ||
136 | syncpt_op(sp).name(sp, wait->syncpt_id), | ||
137 | wait->thresh, | ||
138 | nvhost_syncpt_read_min(sp, wait->syncpt_id)); | ||
139 | |||
140 | /* patch the wait */ | ||
141 | override = nvhost_class_host_wait_syncpt( | ||
142 | NVSYNCPT_GRAPHICS_HOST, 0); | ||
143 | err = nvmap_patch_word(nvmap, | ||
144 | (struct nvmap_handle *)wait->mem, | ||
145 | wait->offset, override); | ||
146 | if (err) | ||
147 | break; | ||
148 | } | ||
149 | |||
150 | wait++; | ||
151 | num_waitchk--; | ||
152 | } | ||
153 | return err; | ||
154 | } | ||
155 | |||
156 | |||
157 | static const char *s_syncpt_names[32] = { | ||
158 | "gfx_host", | ||
159 | "", "", "", "", "", "", "", | ||
160 | "disp0_a", "disp1_a", "avp_0", | ||
161 | "csi_vi_0", "csi_vi_1", | ||
162 | "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4", | ||
163 | "2d_0", "2d_1", | ||
164 | "disp0_b", "disp1_b", | ||
165 | "3d", | ||
166 | "mpe", | ||
167 | "disp0_c", "disp1_c", | ||
168 | "vblank0", "vblank1", | ||
169 | "mpe_ebm_eof", "mpe_wr_safe", | ||
170 | "2d_tinyblt", | ||
171 | "dsi" | ||
172 | }; | ||
173 | |||
174 | static const char *t20_syncpt_name(struct nvhost_syncpt *s, u32 id) | ||
175 | { | ||
176 | BUG_ON(id >= ARRAY_SIZE(s_syncpt_names)); | ||
177 | return s_syncpt_names[id]; | ||
178 | } | ||
179 | |||
180 | static void t20_syncpt_debug(struct nvhost_syncpt *sp) | ||
181 | { | ||
182 | u32 i; | ||
183 | for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) { | ||
184 | u32 max = nvhost_syncpt_read_max(sp, i); | ||
185 | u32 min = nvhost_syncpt_update_min(sp, i); | ||
186 | if (!max && !min) | ||
187 | continue; | ||
188 | dev_info(&syncpt_to_dev(sp)->dev->dev, | ||
189 | "id %d (%s) min %d max %d\n", | ||
190 | i, syncpt_op(sp).name(sp, i), | ||
191 | min, max); | ||
192 | |||
193 | } | ||
194 | |||
195 | for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++) { | ||
196 | u32 base_val; | ||
197 | t20_syncpt_read_wait_base(sp, i); | ||
198 | base_val = sp->base_val[i]; | ||
199 | if (base_val) | ||
200 | dev_info(&syncpt_to_dev(sp)->dev->dev, | ||
201 | "waitbase id %d val %d\n", | ||
202 | i, base_val); | ||
203 | |||
204 | } | ||
205 | } | ||
206 | |||
207 | static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp, | ||
208 | unsigned int idx) | ||
209 | { | ||
210 | void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture; | ||
211 | /* mlock registers returns 0 when the lock is aquired. | ||
212 | * writing 0 clears the lock. */ | ||
213 | return !!readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4)); | ||
214 | } | ||
215 | |||
216 | static void syncpt_mutex_unlock(struct nvhost_syncpt *sp, | ||
217 | unsigned int idx) | ||
218 | { | ||
219 | void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture; | ||
220 | |||
221 | writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4)); | ||
222 | } | ||
223 | |||
224 | int host1x_init_syncpt_support(struct nvhost_master *host) | ||
225 | { | ||
226 | |||
227 | host->sync_aperture = host->aperture + | ||
228 | (NV_HOST1X_CHANNEL0_BASE + | ||
229 | HOST1X_CHANNEL_SYNC_REG_BASE); | ||
230 | |||
231 | host->op.syncpt.reset = t20_syncpt_reset; | ||
232 | host->op.syncpt.reset_wait_base = t20_syncpt_reset_wait_base; | ||
233 | host->op.syncpt.read_wait_base = t20_syncpt_read_wait_base; | ||
234 | host->op.syncpt.update_min = t20_syncpt_update_min; | ||
235 | host->op.syncpt.cpu_incr = t20_syncpt_cpu_incr; | ||
236 | host->op.syncpt.wait_check = t20_syncpt_wait_check; | ||
237 | host->op.syncpt.debug = t20_syncpt_debug; | ||
238 | host->op.syncpt.name = t20_syncpt_name; | ||
239 | host->op.syncpt.mutex_try_lock = syncpt_mutex_try_lock; | ||
240 | host->op.syncpt.mutex_unlock = syncpt_mutex_unlock; | ||
241 | |||
242 | host->syncpt.nb_pts = NV_HOST1X_SYNCPT_NB_PTS; | ||
243 | host->syncpt.nb_bases = NV_HOST1X_SYNCPT_NB_BASES; | ||
244 | host->syncpt.client_managed = NVSYNCPTS_CLIENT_MANAGED; | ||
245 | host->syncpt.nb_mlocks = NV_HOST1X_SYNC_MLOCK_NUM; | ||
246 | |||
247 | return 0; | ||
248 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.h b/drivers/video/tegra/host/host1x/host1x_syncpt.h new file mode 100644 index 00000000000..0d263dc92ed --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_syncpt.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/host1x/host1x_syncpt.h | ||
3 | * | ||
4 | * Tegra Graphics Host Syncpoints for HOST1X | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_HOST1X_HOST1X_SYNCPT_H | ||
22 | #define __NVHOST_HOST1X_HOST1X_SYNCPT_H | ||
23 | |||
24 | #define NVSYNCPT_DISP0_A (8) | ||
25 | #define NVSYNCPT_DISP1_A (9) | ||
26 | #define NVSYNCPT_AVP_0 (10) | ||
27 | #define NVSYNCPT_CSI_VI_0 (11) | ||
28 | #define NVSYNCPT_CSI_VI_1 (12) | ||
29 | #define NVSYNCPT_VI_ISP_0 (13) | ||
30 | #define NVSYNCPT_VI_ISP_1 (14) | ||
31 | #define NVSYNCPT_VI_ISP_2 (15) | ||
32 | #define NVSYNCPT_VI_ISP_3 (16) | ||
33 | #define NVSYNCPT_VI_ISP_4 (17) | ||
34 | #define NVSYNCPT_2D_0 (18) | ||
35 | #define NVSYNCPT_2D_1 (19) | ||
36 | #define NVSYNCPT_DISP0_B (20) | ||
37 | #define NVSYNCPT_DISP1_B (21) | ||
38 | #define NVSYNCPT_3D (22) | ||
39 | #define NVSYNCPT_MPE (23) | ||
40 | #define NVSYNCPT_DISP0_C (24) | ||
41 | #define NVSYNCPT_DISP1_C (25) | ||
42 | #define NVSYNCPT_VBLANK0 (26) | ||
43 | #define NVSYNCPT_VBLANK1 (27) | ||
44 | #define NVSYNCPT_MPE_EBM_EOF (28) | ||
45 | #define NVSYNCPT_MPE_WR_SAFE (29) | ||
46 | #define NVSYNCPT_DSI (31) | ||
47 | |||
48 | |||
49 | /*#define NVSYNCPT_2D_CHANNEL2_0 (20) */ | ||
50 | /*#define NVSYNCPT_2D_CHANNEL2_1 (21) */ | ||
51 | /*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/ | ||
52 | /*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/ | ||
53 | |||
54 | /* sync points that are wholly managed by the client */ | ||
55 | #define NVSYNCPTS_CLIENT_MANAGED ( \ | ||
56 | BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | \ | ||
57 | BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | \ | ||
58 | BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | \ | ||
59 | BIT(NVSYNCPT_DSI) | \ | ||
60 | BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1) | \ | ||
61 | BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | \ | ||
62 | BIT(NVSYNCPT_VI_ISP_1) | BIT(NVSYNCPT_VI_ISP_2) | \ | ||
63 | BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | \ | ||
64 | BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \ | ||
65 | BIT(NVSYNCPT_2D_1) | BIT(NVSYNCPT_AVP_0)) | ||
66 | |||
67 | |||
68 | #define NVWAITBASE_2D_0 (1) | ||
69 | #define NVWAITBASE_2D_1 (2) | ||
70 | #define NVWAITBASE_3D (3) | ||
71 | #define NVWAITBASE_MPE (4) | ||
72 | |||
73 | struct nvhost_master; | ||
74 | int host1x_init_syncpt(struct nvhost_master *host); | ||
75 | int host1x_init_syncpt_support(struct nvhost_master *host); | ||
76 | |||
77 | #endif | ||
diff --git a/drivers/video/tegra/host/isp/Makefile b/drivers/video/tegra/host/isp/Makefile new file mode 100644 index 00000000000..7bcdc33c83d --- /dev/null +++ b/drivers/video/tegra/host/isp/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
3 | |||
4 | nvhost-isp-objs = \ | ||
5 | isp.o | ||
6 | |||
7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-isp.o | ||
diff --git a/drivers/video/tegra/host/isp/isp.c b/drivers/video/tegra/host/isp/isp.c new file mode 100644 index 00000000000..f39dc644b27 --- /dev/null +++ b/drivers/video/tegra/host/isp/isp.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/isp/isp.c | ||
3 | * | ||
4 | * Tegra Graphics ISP | ||
5 | * | ||
6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "dev.h" | ||
22 | #include "bus_client.h" | ||
23 | |||
24 | static int __devinit isp_probe(struct nvhost_device *dev) | ||
25 | { | ||
26 | return nvhost_client_device_init(dev); | ||
27 | } | ||
28 | |||
29 | static int __exit isp_remove(struct nvhost_device *dev) | ||
30 | { | ||
31 | /* Add clean-up */ | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static int isp_suspend(struct nvhost_device *dev, pm_message_t state) | ||
36 | { | ||
37 | return nvhost_client_device_suspend(dev); | ||
38 | } | ||
39 | |||
40 | static int isp_resume(struct nvhost_device *dev) | ||
41 | { | ||
42 | dev_info(&dev->dev, "resuming\n"); | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | struct nvhost_device *isp_device; | ||
47 | |||
48 | static struct nvhost_driver isp_driver = { | ||
49 | .probe = isp_probe, | ||
50 | .remove = __exit_p(isp_remove), | ||
51 | #ifdef CONFIG_PM | ||
52 | .suspend = isp_suspend, | ||
53 | .resume = isp_resume, | ||
54 | #endif | ||
55 | .driver = { | ||
56 | .owner = THIS_MODULE, | ||
57 | .name = "isp", | ||
58 | } | ||
59 | }; | ||
60 | |||
61 | static int __init isp_init(void) | ||
62 | { | ||
63 | int err; | ||
64 | |||
65 | isp_device = nvhost_get_device("isp"); | ||
66 | if (!isp_device) | ||
67 | return -ENXIO; | ||
68 | |||
69 | err = nvhost_device_register(isp_device); | ||
70 | if (err) | ||
71 | return err; | ||
72 | |||
73 | return nvhost_driver_register(&isp_driver); | ||
74 | } | ||
75 | |||
76 | static void __exit isp_exit(void) | ||
77 | { | ||
78 | nvhost_driver_unregister(&isp_driver); | ||
79 | } | ||
80 | |||
81 | module_init(isp_init); | ||
82 | module_exit(isp_exit); | ||
diff --git a/drivers/video/tegra/host/mpe/Makefile b/drivers/video/tegra/host/mpe/Makefile new file mode 100644 index 00000000000..efd77bb88fe --- /dev/null +++ b/drivers/video/tegra/host/mpe/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
3 | |||
4 | nvhost-mpe-objs = \ | ||
5 | mpe.o | ||
6 | |||
7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-mpe.o | ||
diff --git a/drivers/video/tegra/host/mpe/mpe.c b/drivers/video/tegra/host/mpe/mpe.c new file mode 100644 index 00000000000..28002aa637a --- /dev/null +++ b/drivers/video/tegra/host/mpe/mpe.c | |||
@@ -0,0 +1,638 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/mpe/mpe.c | ||
3 | * | ||
4 | * Tegra Graphics Host MPE | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_hwctx.h" | ||
22 | #include "dev.h" | ||
23 | #include "host1x/host1x_hardware.h" | ||
24 | #include "host1x/host1x_channel.h" | ||
25 | #include "host1x/host1x_syncpt.h" | ||
26 | #include "host1x/host1x_hwctx.h" | ||
27 | #include "t20/t20.h" | ||
28 | #include <linux/slab.h> | ||
29 | #include "bus_client.h" | ||
30 | |||
31 | enum { | ||
32 | HWCTX_REGINFO_NORMAL = 0, | ||
33 | HWCTX_REGINFO_STASH, | ||
34 | HWCTX_REGINFO_CALCULATE, | ||
35 | HWCTX_REGINFO_WRITEBACK | ||
36 | }; | ||
37 | |||
38 | const struct hwctx_reginfo ctxsave_regs_mpe[] = { | ||
39 | HWCTX_REGINFO(0x124, 1, STASH), | ||
40 | HWCTX_REGINFO(0x123, 1, STASH), | ||
41 | HWCTX_REGINFO(0x103, 1, STASH), | ||
42 | HWCTX_REGINFO(0x074, 1, STASH), | ||
43 | HWCTX_REGINFO(0x021, 1, NORMAL), | ||
44 | HWCTX_REGINFO(0x020, 1, STASH), | ||
45 | HWCTX_REGINFO(0x024, 2, NORMAL), | ||
46 | HWCTX_REGINFO(0x0e6, 1, NORMAL), | ||
47 | HWCTX_REGINFO(0x3fc, 1, NORMAL), | ||
48 | HWCTX_REGINFO(0x3d0, 1, NORMAL), | ||
49 | HWCTX_REGINFO(0x3d4, 1, NORMAL), | ||
50 | HWCTX_REGINFO(0x013, 1, NORMAL), | ||
51 | HWCTX_REGINFO(0x022, 1, NORMAL), | ||
52 | HWCTX_REGINFO(0x030, 4, NORMAL), | ||
53 | HWCTX_REGINFO(0x023, 1, NORMAL), | ||
54 | HWCTX_REGINFO(0x070, 1, NORMAL), | ||
55 | HWCTX_REGINFO(0x0a0, 9, NORMAL), | ||
56 | HWCTX_REGINFO(0x071, 1, NORMAL), | ||
57 | HWCTX_REGINFO(0x100, 4, NORMAL), | ||
58 | HWCTX_REGINFO(0x104, 2, NORMAL), | ||
59 | HWCTX_REGINFO(0x108, 9, NORMAL), | ||
60 | HWCTX_REGINFO(0x112, 2, NORMAL), | ||
61 | HWCTX_REGINFO(0x114, 1, STASH), | ||
62 | HWCTX_REGINFO(0x014, 1, NORMAL), | ||
63 | HWCTX_REGINFO(0x072, 1, NORMAL), | ||
64 | HWCTX_REGINFO(0x200, 1, NORMAL), | ||
65 | HWCTX_REGINFO(0x0d1, 1, NORMAL), | ||
66 | HWCTX_REGINFO(0x0d0, 1, NORMAL), | ||
67 | HWCTX_REGINFO(0x0c0, 1, NORMAL), | ||
68 | HWCTX_REGINFO(0x0c3, 2, NORMAL), | ||
69 | HWCTX_REGINFO(0x0d2, 1, NORMAL), | ||
70 | HWCTX_REGINFO(0x0d8, 1, NORMAL), | ||
71 | HWCTX_REGINFO(0x0e0, 2, NORMAL), | ||
72 | HWCTX_REGINFO(0x07f, 2, NORMAL), | ||
73 | HWCTX_REGINFO(0x084, 8, NORMAL), | ||
74 | HWCTX_REGINFO(0x0d3, 1, NORMAL), | ||
75 | HWCTX_REGINFO(0x040, 13, NORMAL), | ||
76 | HWCTX_REGINFO(0x050, 6, NORMAL), | ||
77 | HWCTX_REGINFO(0x058, 1, NORMAL), | ||
78 | HWCTX_REGINFO(0x057, 1, NORMAL), | ||
79 | HWCTX_REGINFO(0x111, 1, NORMAL), | ||
80 | HWCTX_REGINFO(0x130, 3, NORMAL), | ||
81 | HWCTX_REGINFO(0x201, 1, NORMAL), | ||
82 | HWCTX_REGINFO(0x068, 2, NORMAL), | ||
83 | HWCTX_REGINFO(0x08c, 1, NORMAL), | ||
84 | HWCTX_REGINFO(0x0cf, 1, NORMAL), | ||
85 | HWCTX_REGINFO(0x082, 2, NORMAL), | ||
86 | HWCTX_REGINFO(0x075, 1, NORMAL), | ||
87 | HWCTX_REGINFO(0x0e8, 1, NORMAL), | ||
88 | HWCTX_REGINFO(0x056, 1, NORMAL), | ||
89 | HWCTX_REGINFO(0x057, 1, NORMAL), | ||
90 | HWCTX_REGINFO(0x073, 1, CALCULATE), | ||
91 | HWCTX_REGINFO(0x074, 1, NORMAL), | ||
92 | HWCTX_REGINFO(0x075, 1, NORMAL), | ||
93 | HWCTX_REGINFO(0x076, 1, STASH), | ||
94 | HWCTX_REGINFO(0x11a, 9, NORMAL), | ||
95 | HWCTX_REGINFO(0x123, 1, NORMAL), | ||
96 | HWCTX_REGINFO(0x124, 1, NORMAL), | ||
97 | HWCTX_REGINFO(0x12a, 5, NORMAL), | ||
98 | HWCTX_REGINFO(0x12f, 1, STASH), | ||
99 | HWCTX_REGINFO(0x125, 2, NORMAL), | ||
100 | HWCTX_REGINFO(0x034, 1, NORMAL), | ||
101 | HWCTX_REGINFO(0x133, 2, NORMAL), | ||
102 | HWCTX_REGINFO(0x127, 1, NORMAL), | ||
103 | HWCTX_REGINFO(0x106, 1, WRITEBACK), | ||
104 | HWCTX_REGINFO(0x107, 1, WRITEBACK) | ||
105 | }; | ||
106 | |||
107 | #define NR_STASHES 8 | ||
108 | #define NR_WRITEBACKS 2 | ||
109 | |||
110 | #define RC_RAM_LOAD_CMD 0x115 | ||
111 | #define RC_RAM_LOAD_DATA 0x116 | ||
112 | #define RC_RAM_READ_CMD 0x128 | ||
113 | #define RC_RAM_READ_DATA 0x129 | ||
114 | #define RC_RAM_SIZE 692 | ||
115 | |||
116 | #define IRFR_RAM_LOAD_CMD 0xc5 | ||
117 | #define IRFR_RAM_LOAD_DATA 0xc6 | ||
118 | #define IRFR_RAM_READ_CMD 0xcd | ||
119 | #define IRFR_RAM_READ_DATA 0xce | ||
120 | #define IRFR_RAM_SIZE 408 | ||
121 | |||
122 | struct mpe_save_info { | ||
123 | u32 in[NR_STASHES]; | ||
124 | u32 out[NR_WRITEBACKS]; | ||
125 | unsigned in_pos; | ||
126 | unsigned out_pos; | ||
127 | u32 h264_mode; | ||
128 | }; | ||
129 | |||
130 | |||
131 | /*** restore ***/ | ||
132 | |||
133 | static unsigned int restore_size; | ||
134 | |||
135 | static void restore_begin(struct host1x_hwctx_handler *h, u32 *ptr) | ||
136 | { | ||
137 | /* set class to host */ | ||
138 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
139 | NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
140 | /* increment sync point base */ | ||
141 | ptr[1] = nvhost_class_host_incr_syncpt_base(h->waitbase, 1); | ||
142 | /* set class to MPE */ | ||
143 | ptr[2] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0); | ||
144 | } | ||
145 | #define RESTORE_BEGIN_SIZE 3 | ||
146 | |||
147 | static void restore_ram(u32 *ptr, unsigned words, | ||
148 | unsigned cmd_reg, unsigned data_reg) | ||
149 | { | ||
150 | ptr[0] = nvhost_opcode_imm(cmd_reg, words); | ||
151 | ptr[1] = nvhost_opcode_nonincr(data_reg, words); | ||
152 | } | ||
153 | #define RESTORE_RAM_SIZE 2 | ||
154 | |||
155 | static void restore_end(struct host1x_hwctx_handler *h, u32 *ptr) | ||
156 | { | ||
157 | /* syncpt increment to track restore gather. */ | ||
158 | ptr[0] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
159 | h->syncpt); | ||
160 | } | ||
161 | #define RESTORE_END_SIZE 1 | ||
162 | |||
163 | static u32 *setup_restore_regs(u32 *ptr, | ||
164 | const struct hwctx_reginfo *regs, | ||
165 | unsigned int nr_regs) | ||
166 | { | ||
167 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
168 | |||
169 | for ( ; regs != rend; ++regs) { | ||
170 | u32 offset = regs->offset; | ||
171 | u32 count = regs->count; | ||
172 | *ptr++ = nvhost_opcode_incr(offset, count); | ||
173 | ptr += count; | ||
174 | } | ||
175 | return ptr; | ||
176 | } | ||
177 | |||
178 | static u32 *setup_restore_ram(u32 *ptr, unsigned words, | ||
179 | unsigned cmd_reg, unsigned data_reg) | ||
180 | { | ||
181 | restore_ram(ptr, words, cmd_reg, data_reg); | ||
182 | return ptr + (RESTORE_RAM_SIZE + words); | ||
183 | } | ||
184 | |||
185 | static void setup_restore(struct host1x_hwctx_handler *h, u32 *ptr) | ||
186 | { | ||
187 | restore_begin(h, ptr); | ||
188 | ptr += RESTORE_BEGIN_SIZE; | ||
189 | |||
190 | ptr = setup_restore_regs(ptr, ctxsave_regs_mpe, | ||
191 | ARRAY_SIZE(ctxsave_regs_mpe)); | ||
192 | |||
193 | ptr = setup_restore_ram(ptr, RC_RAM_SIZE, | ||
194 | RC_RAM_LOAD_CMD, RC_RAM_LOAD_DATA); | ||
195 | |||
196 | ptr = setup_restore_ram(ptr, IRFR_RAM_SIZE, | ||
197 | IRFR_RAM_LOAD_CMD, IRFR_RAM_LOAD_DATA); | ||
198 | |||
199 | restore_end(h, ptr); | ||
200 | |||
201 | wmb(); | ||
202 | } | ||
203 | |||
204 | |||
205 | /*** save ***/ | ||
206 | struct save_info { | ||
207 | u32 *ptr; | ||
208 | unsigned int save_count; | ||
209 | unsigned int restore_count; | ||
210 | }; | ||
211 | |||
212 | static void __init save_begin(struct host1x_hwctx_handler *h, u32 *ptr) | ||
213 | { | ||
214 | /* MPE: when done, increment syncpt to base+1 */ | ||
215 | ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0); | ||
216 | ptr[1] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, h->syncpt); | ||
217 | /* host: wait for syncpt base+1 */ | ||
218 | ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
219 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
220 | ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 1); | ||
221 | /* host: signal context read thread to start reading */ | ||
222 | ptr[4] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, h->syncpt); | ||
223 | } | ||
224 | #define SAVE_BEGIN_SIZE 5 | ||
225 | |||
226 | static void __init save_direct(u32 *ptr, u32 start_reg, u32 count) | ||
227 | { | ||
228 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
229 | NV_CLASS_HOST_INDOFF, 1); | ||
230 | ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE, | ||
231 | start_reg, true); | ||
232 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
233 | } | ||
234 | #define SAVE_DIRECT_SIZE 3 | ||
235 | |||
236 | static void __init save_set_ram_cmd(u32 *ptr, u32 cmd_reg, u32 count) | ||
237 | { | ||
238 | ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
239 | cmd_reg, 1); | ||
240 | ptr[1] = count; | ||
241 | } | ||
242 | #define SAVE_SET_RAM_CMD_SIZE 2 | ||
243 | |||
244 | static void __init save_read_ram_data_nasty(u32 *ptr, u32 data_reg) | ||
245 | { | ||
246 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
247 | NV_CLASS_HOST_INDOFF, 1); | ||
248 | ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE, | ||
249 | data_reg, false); | ||
250 | ptr[2] = nvhost_opcode_imm(NV_CLASS_HOST_INDDATA, 0); | ||
251 | /* write junk data to avoid 'cached problem with register memory' */ | ||
252 | ptr[3] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
253 | data_reg, 1); | ||
254 | ptr[4] = 0x99; | ||
255 | } | ||
256 | #define SAVE_READ_RAM_DATA_NASTY_SIZE 5 | ||
257 | |||
258 | static void __init save_end(struct host1x_hwctx_handler *h, u32 *ptr) | ||
259 | { | ||
260 | /* Wait for context read service to finish (cpu incr 3) */ | ||
261 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
262 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
263 | ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 3); | ||
264 | /* Advance syncpoint base */ | ||
265 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
266 | ptr[3] = nvhost_class_host_incr_syncpt_base(h->waitbase, 3); | ||
267 | /* set class back to the unit */ | ||
268 | ptr[4] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0); | ||
269 | } | ||
270 | #define SAVE_END_SIZE 5 | ||
271 | |||
272 | static void __init setup_save_regs(struct save_info *info, | ||
273 | const struct hwctx_reginfo *regs, | ||
274 | unsigned int nr_regs) | ||
275 | { | ||
276 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
277 | u32 *ptr = info->ptr; | ||
278 | unsigned int save_count = info->save_count; | ||
279 | unsigned int restore_count = info->restore_count; | ||
280 | |||
281 | for ( ; regs != rend; ++regs) { | ||
282 | u32 offset = regs->offset; | ||
283 | u32 count = regs->count; | ||
284 | if (regs->type != HWCTX_REGINFO_WRITEBACK) { | ||
285 | if (ptr) { | ||
286 | save_direct(ptr, offset, count); | ||
287 | ptr += SAVE_DIRECT_SIZE; | ||
288 | memset(ptr, 0, count * 4); | ||
289 | ptr += count; | ||
290 | } | ||
291 | save_count += (SAVE_DIRECT_SIZE + count); | ||
292 | } | ||
293 | restore_count += (1 + count); | ||
294 | } | ||
295 | |||
296 | info->ptr = ptr; | ||
297 | info->save_count = save_count; | ||
298 | info->restore_count = restore_count; | ||
299 | } | ||
300 | |||
301 | static void __init setup_save_ram_nasty(struct save_info *info, unsigned words, | ||
302 | unsigned cmd_reg, unsigned data_reg) | ||
303 | { | ||
304 | u32 *ptr = info->ptr; | ||
305 | unsigned int save_count = info->save_count; | ||
306 | unsigned int restore_count = info->restore_count; | ||
307 | unsigned i; | ||
308 | |||
309 | if (ptr) { | ||
310 | save_set_ram_cmd(ptr, cmd_reg, words); | ||
311 | ptr += SAVE_SET_RAM_CMD_SIZE; | ||
312 | for (i = words; i; --i) { | ||
313 | save_read_ram_data_nasty(ptr, data_reg); | ||
314 | ptr += SAVE_READ_RAM_DATA_NASTY_SIZE; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | save_count += SAVE_SET_RAM_CMD_SIZE; | ||
319 | save_count += words * SAVE_READ_RAM_DATA_NASTY_SIZE; | ||
320 | restore_count += (RESTORE_RAM_SIZE + words); | ||
321 | |||
322 | info->ptr = ptr; | ||
323 | info->save_count = save_count; | ||
324 | info->restore_count = restore_count; | ||
325 | } | ||
326 | |||
327 | static void __init setup_save(struct host1x_hwctx_handler *h, u32 *ptr) | ||
328 | { | ||
329 | struct save_info info = { | ||
330 | ptr, | ||
331 | SAVE_BEGIN_SIZE, | ||
332 | RESTORE_BEGIN_SIZE | ||
333 | }; | ||
334 | |||
335 | if (info.ptr) { | ||
336 | save_begin(h, info.ptr); | ||
337 | info.ptr += SAVE_BEGIN_SIZE; | ||
338 | } | ||
339 | |||
340 | setup_save_regs(&info, ctxsave_regs_mpe, | ||
341 | ARRAY_SIZE(ctxsave_regs_mpe)); | ||
342 | |||
343 | setup_save_ram_nasty(&info, RC_RAM_SIZE, | ||
344 | RC_RAM_READ_CMD, RC_RAM_READ_DATA); | ||
345 | |||
346 | setup_save_ram_nasty(&info, IRFR_RAM_SIZE, | ||
347 | IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA); | ||
348 | |||
349 | if (info.ptr) { | ||
350 | save_end(h, info.ptr); | ||
351 | info.ptr += SAVE_END_SIZE; | ||
352 | } | ||
353 | |||
354 | wmb(); | ||
355 | |||
356 | h->save_size = info.save_count + SAVE_END_SIZE; | ||
357 | restore_size = info.restore_count + RESTORE_END_SIZE; | ||
358 | } | ||
359 | |||
360 | |||
361 | static u32 calculate_mpe(u32 word, struct mpe_save_info *msi) | ||
362 | { | ||
363 | u32 buffer_full_read = msi->in[0] & 0x01ffffff; | ||
364 | u32 byte_len = msi->in[1]; | ||
365 | u32 drain = (msi->in[2] >> 2) & 0x007fffff; | ||
366 | u32 rep_frame = msi->in[3] & 0x0000ffff; | ||
367 | u32 h264_mode = (msi->in[4] >> 11) & 1; | ||
368 | int new_buffer_full; | ||
369 | |||
370 | if (h264_mode) | ||
371 | byte_len >>= 3; | ||
372 | new_buffer_full = buffer_full_read + byte_len - (drain * 4); | ||
373 | msi->out[0] = max(0, new_buffer_full); | ||
374 | msi->out[1] = rep_frame; | ||
375 | if (rep_frame == 0) | ||
376 | word &= 0xffff0000; | ||
377 | return word; | ||
378 | } | ||
379 | |||
380 | static u32 *save_regs(u32 *ptr, unsigned int *pending, | ||
381 | struct nvhost_channel *channel, | ||
382 | const struct hwctx_reginfo *regs, | ||
383 | unsigned int nr_regs, | ||
384 | struct mpe_save_info *msi) | ||
385 | { | ||
386 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
387 | |||
388 | for ( ; regs != rend; ++regs) { | ||
389 | u32 count = regs->count; | ||
390 | ++ptr; /* restore incr */ | ||
391 | if (regs->type == HWCTX_REGINFO_NORMAL) { | ||
392 | host1x_drain_read_fifo(channel->aperture, | ||
393 | ptr, count, pending); | ||
394 | ptr += count; | ||
395 | } else { | ||
396 | u32 word; | ||
397 | if (regs->type == HWCTX_REGINFO_WRITEBACK) { | ||
398 | BUG_ON(msi->out_pos >= NR_WRITEBACKS); | ||
399 | word = msi->out[msi->out_pos++]; | ||
400 | } else { | ||
401 | host1x_drain_read_fifo(channel->aperture, | ||
402 | &word, 1, pending); | ||
403 | if (regs->type == HWCTX_REGINFO_STASH) { | ||
404 | BUG_ON(msi->in_pos >= NR_STASHES); | ||
405 | msi->in[msi->in_pos++] = word; | ||
406 | } else { | ||
407 | word = calculate_mpe(word, msi); | ||
408 | } | ||
409 | } | ||
410 | *ptr++ = word; | ||
411 | } | ||
412 | } | ||
413 | return ptr; | ||
414 | } | ||
415 | |||
416 | static u32 *save_ram(u32 *ptr, unsigned int *pending, | ||
417 | struct nvhost_channel *channel, | ||
418 | unsigned words, unsigned cmd_reg, unsigned data_reg) | ||
419 | { | ||
420 | int err = 0; | ||
421 | ptr += RESTORE_RAM_SIZE; | ||
422 | err = host1x_drain_read_fifo(channel->aperture, ptr, words, pending); | ||
423 | WARN_ON(err); | ||
424 | return ptr + words; | ||
425 | } | ||
426 | |||
427 | |||
428 | /*** ctxmpe ***/ | ||
429 | |||
430 | static struct nvhost_hwctx *ctxmpe_alloc(struct nvhost_hwctx_handler *h, | ||
431 | struct nvhost_channel *ch) | ||
432 | { | ||
433 | struct nvmap_client *nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
434 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
435 | struct host1x_hwctx *ctx; | ||
436 | |||
437 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
438 | if (!ctx) | ||
439 | return NULL; | ||
440 | ctx->restore = nvmap_alloc(nvmap, restore_size * 4, 32, | ||
441 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
442 | if (IS_ERR_OR_NULL(ctx->restore)) { | ||
443 | kfree(ctx); | ||
444 | return NULL; | ||
445 | } | ||
446 | |||
447 | ctx->restore_virt = nvmap_mmap(ctx->restore); | ||
448 | if (!ctx->restore_virt) { | ||
449 | nvmap_free(nvmap, ctx->restore); | ||
450 | kfree(ctx); | ||
451 | return NULL; | ||
452 | } | ||
453 | |||
454 | kref_init(&ctx->hwctx.ref); | ||
455 | ctx->hwctx.h = &p->h; | ||
456 | ctx->hwctx.channel = ch; | ||
457 | ctx->hwctx.valid = false; | ||
458 | ctx->save_incrs = 3; | ||
459 | ctx->save_thresh = 2; | ||
460 | ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); | ||
461 | ctx->restore_size = restore_size; | ||
462 | ctx->restore_incrs = 1; | ||
463 | |||
464 | setup_restore(p, ctx->restore_virt); | ||
465 | |||
466 | return &ctx->hwctx; | ||
467 | } | ||
468 | |||
469 | static void ctxmpe_get(struct nvhost_hwctx *ctx) | ||
470 | { | ||
471 | kref_get(&ctx->ref); | ||
472 | } | ||
473 | |||
474 | static void ctxmpe_free(struct kref *ref) | ||
475 | { | ||
476 | struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref); | ||
477 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
478 | struct nvmap_client *nvmap = | ||
479 | nvhost_get_host(nctx->channel->dev)->nvmap; | ||
480 | |||
481 | if (ctx->restore_virt) | ||
482 | nvmap_munmap(ctx->restore, ctx->restore_virt); | ||
483 | nvmap_unpin(nvmap, ctx->restore); | ||
484 | nvmap_free(nvmap, ctx->restore); | ||
485 | kfree(ctx); | ||
486 | } | ||
487 | |||
488 | static void ctxmpe_put(struct nvhost_hwctx *ctx) | ||
489 | { | ||
490 | kref_put(&ctx->ref, ctxmpe_free); | ||
491 | } | ||
492 | |||
493 | static void ctxmpe_save_push(struct nvhost_hwctx *nctx, | ||
494 | struct nvhost_cdma *cdma) | ||
495 | { | ||
496 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
497 | struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx); | ||
498 | nvhost_cdma_push(cdma, | ||
499 | nvhost_opcode_gather(h->save_size), | ||
500 | h->save_phys); | ||
501 | } | ||
502 | |||
503 | static void ctxmpe_save_service(struct nvhost_hwctx *nctx) | ||
504 | { | ||
505 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
506 | struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx); | ||
507 | |||
508 | u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE; | ||
509 | unsigned int pending = 0; | ||
510 | struct mpe_save_info msi; | ||
511 | |||
512 | msi.in_pos = 0; | ||
513 | msi.out_pos = 0; | ||
514 | |||
515 | ptr = save_regs(ptr, &pending, nctx->channel, | ||
516 | ctxsave_regs_mpe, ARRAY_SIZE(ctxsave_regs_mpe), &msi); | ||
517 | |||
518 | ptr = save_ram(ptr, &pending, nctx->channel, | ||
519 | RC_RAM_SIZE, RC_RAM_READ_CMD, RC_RAM_READ_DATA); | ||
520 | |||
521 | ptr = save_ram(ptr, &pending, nctx->channel, | ||
522 | IRFR_RAM_SIZE, IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA); | ||
523 | |||
524 | wmb(); | ||
525 | nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt, | ||
526 | h->syncpt); | ||
527 | } | ||
528 | |||
529 | struct nvhost_hwctx_handler * __init nvhost_mpe_ctxhandler_init( | ||
530 | u32 syncpt, u32 waitbase, | ||
531 | struct nvhost_channel *ch) | ||
532 | { | ||
533 | struct nvmap_client *nvmap; | ||
534 | u32 *save_ptr; | ||
535 | struct host1x_hwctx_handler *p; | ||
536 | |||
537 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
538 | if (!p) | ||
539 | return NULL; | ||
540 | |||
541 | nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
542 | |||
543 | p->syncpt = syncpt; | ||
544 | p->waitbase = waitbase; | ||
545 | |||
546 | setup_save(p, NULL); | ||
547 | |||
548 | p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, | ||
549 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
550 | if (IS_ERR(p->save_buf)) { | ||
551 | p->save_buf = NULL; | ||
552 | return NULL; | ||
553 | } | ||
554 | |||
555 | save_ptr = nvmap_mmap(p->save_buf); | ||
556 | if (!save_ptr) { | ||
557 | nvmap_free(nvmap, p->save_buf); | ||
558 | p->save_buf = NULL; | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | p->save_phys = nvmap_pin(nvmap, p->save_buf); | ||
563 | |||
564 | setup_save(p, save_ptr); | ||
565 | |||
566 | p->h.alloc = ctxmpe_alloc; | ||
567 | p->h.save_push = ctxmpe_save_push; | ||
568 | p->h.save_service = ctxmpe_save_service; | ||
569 | p->h.get = ctxmpe_get; | ||
570 | p->h.put = ctxmpe_put; | ||
571 | |||
572 | return &p->h; | ||
573 | } | ||
574 | |||
575 | int nvhost_mpe_prepare_power_off(struct nvhost_device *dev) | ||
576 | { | ||
577 | return host1x_save_context(dev, NVSYNCPT_MPE); | ||
578 | } | ||
579 | |||
580 | static int __devinit mpe_probe(struct nvhost_device *dev) | ||
581 | { | ||
582 | return nvhost_client_device_init(dev); | ||
583 | } | ||
584 | |||
585 | static int __exit mpe_remove(struct nvhost_device *dev) | ||
586 | { | ||
587 | /* Add clean-up */ | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | static int mpe_suspend(struct nvhost_device *dev, pm_message_t state) | ||
592 | { | ||
593 | return nvhost_client_device_suspend(dev); | ||
594 | } | ||
595 | |||
596 | static int mpe_resume(struct nvhost_device *dev) | ||
597 | { | ||
598 | dev_info(&dev->dev, "resuming\n"); | ||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | struct nvhost_device *mpe_device; | ||
603 | |||
604 | static struct nvhost_driver mpe_driver = { | ||
605 | .probe = mpe_probe, | ||
606 | .remove = __exit_p(mpe_remove), | ||
607 | #ifdef CONFIG_PM | ||
608 | .suspend = mpe_suspend, | ||
609 | .resume = mpe_resume, | ||
610 | #endif | ||
611 | .driver = { | ||
612 | .owner = THIS_MODULE, | ||
613 | .name = "mpe", | ||
614 | } | ||
615 | }; | ||
616 | |||
617 | static int __init mpe_init(void) | ||
618 | { | ||
619 | int err; | ||
620 | |||
621 | mpe_device = nvhost_get_device("mpe"); | ||
622 | if (!mpe_device) | ||
623 | return -ENXIO; | ||
624 | |||
625 | err = nvhost_device_register(mpe_device); | ||
626 | if (err) | ||
627 | return err; | ||
628 | |||
629 | return nvhost_driver_register(&mpe_driver); | ||
630 | } | ||
631 | |||
632 | static void __exit mpe_exit(void) | ||
633 | { | ||
634 | nvhost_driver_unregister(&mpe_driver); | ||
635 | } | ||
636 | |||
637 | module_init(mpe_init); | ||
638 | module_exit(mpe_exit); | ||
diff --git a/drivers/video/tegra/host/mpe/mpe.h b/drivers/video/tegra/host/mpe/mpe.h new file mode 100644 index 00000000000..1bc2a8a04c1 --- /dev/null +++ b/drivers/video/tegra/host/mpe/mpe.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/mpe/mpe.h | ||
3 | * | ||
4 | * Tegra Graphics Host MPE | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_MPE_MPE_H | ||
22 | #define __NVHOST_MPE_MPE_H | ||
23 | |||
24 | struct nvhost_hwctx_handler; | ||
25 | struct nvhost_device; | ||
26 | |||
27 | struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init( | ||
28 | u32 syncpt, u32 waitbase, | ||
29 | struct nvhost_channel *ch); | ||
30 | int nvhost_mpe_prepare_power_off(struct nvhost_device *dev); | ||
31 | |||
32 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c new file mode 100644 index 00000000000..318f209651a --- /dev/null +++ b/drivers/video/tegra/host/nvhost_acm.c | |||
@@ -0,0 +1,467 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_acm.c | ||
3 | * | ||
4 | * Tegra Graphics Host Automatic Clock Management | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_acm.h" | ||
22 | #include "dev.h" | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/device.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <mach/powergate.h> | ||
31 | #include <mach/clk.h> | ||
32 | #include <mach/hardware.h> | ||
33 | |||
34 | #define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ) | ||
35 | #define POWERGATE_DELAY 10 | ||
36 | #define MAX_DEVID_LENGTH 16 | ||
37 | |||
38 | DEFINE_MUTEX(client_list_lock); | ||
39 | |||
40 | struct nvhost_module_client { | ||
41 | struct list_head node; | ||
42 | unsigned long rate[NVHOST_MODULE_MAX_CLOCKS]; | ||
43 | void *priv; | ||
44 | }; | ||
45 | |||
46 | static void do_powergate_locked(int id) | ||
47 | { | ||
48 | if (id != -1 && tegra_powergate_is_powered(id)) | ||
49 | tegra_powergate_partition(id); | ||
50 | } | ||
51 | |||
52 | static void do_unpowergate_locked(int id) | ||
53 | { | ||
54 | if (id != -1) | ||
55 | tegra_unpowergate_partition(id); | ||
56 | } | ||
57 | |||
58 | void nvhost_module_reset(struct nvhost_device *dev) | ||
59 | { | ||
60 | dev_dbg(&dev->dev, | ||
61 | "%s: asserting %s module reset (id %d, id2 %d)\n", | ||
62 | __func__, dev->name, | ||
63 | dev->powergate_ids[0], dev->powergate_ids[1]); | ||
64 | |||
65 | mutex_lock(&dev->lock); | ||
66 | |||
67 | /* assert module and mc client reset */ | ||
68 | if (dev->powergate_ids[0] != -1) { | ||
69 | tegra_powergate_mc_disable(dev->powergate_ids[0]); | ||
70 | tegra_periph_reset_assert(dev->clk[0]); | ||
71 | tegra_powergate_mc_flush(dev->powergate_ids[0]); | ||
72 | } | ||
73 | if (dev->powergate_ids[1] != -1) { | ||
74 | tegra_powergate_mc_disable(dev->powergate_ids[1]); | ||
75 | tegra_periph_reset_assert(dev->clk[1]); | ||
76 | tegra_powergate_mc_flush(dev->powergate_ids[1]); | ||
77 | } | ||
78 | |||
79 | udelay(POWERGATE_DELAY); | ||
80 | |||
81 | /* deassert reset */ | ||
82 | if (dev->powergate_ids[0] != -1) { | ||
83 | tegra_powergate_mc_flush_done(dev->powergate_ids[0]); | ||
84 | tegra_periph_reset_deassert(dev->clk[0]); | ||
85 | tegra_powergate_mc_enable(dev->powergate_ids[0]); | ||
86 | } | ||
87 | if (dev->powergate_ids[1] != -1) { | ||
88 | tegra_powergate_mc_flush_done(dev->powergate_ids[1]); | ||
89 | tegra_periph_reset_deassert(dev->clk[1]); | ||
90 | tegra_powergate_mc_enable(dev->powergate_ids[1]); | ||
91 | } | ||
92 | |||
93 | mutex_unlock(&dev->lock); | ||
94 | |||
95 | dev_dbg(&dev->dev, "%s: module %s out of reset\n", | ||
96 | __func__, dev->name); | ||
97 | } | ||
98 | |||
99 | static void to_state_clockgated_locked(struct nvhost_device *dev) | ||
100 | { | ||
101 | if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) { | ||
102 | int i; | ||
103 | for (i = 0; i < dev->num_clks; i++) | ||
104 | clk_disable(dev->clk[i]); | ||
105 | if (dev->dev.parent) | ||
106 | nvhost_module_idle(to_nvhost_device(dev->dev.parent)); | ||
107 | } else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED | ||
108 | && dev->can_powergate) { | ||
109 | do_unpowergate_locked(dev->powergate_ids[0]); | ||
110 | do_unpowergate_locked(dev->powergate_ids[1]); | ||
111 | } | ||
112 | dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED; | ||
113 | } | ||
114 | |||
115 | static void to_state_running_locked(struct nvhost_device *dev) | ||
116 | { | ||
117 | int prev_state = dev->powerstate; | ||
118 | if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED) | ||
119 | to_state_clockgated_locked(dev); | ||
120 | if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) { | ||
121 | int i; | ||
122 | |||
123 | if (dev->dev.parent) | ||
124 | nvhost_module_busy(to_nvhost_device(dev->dev.parent)); | ||
125 | |||
126 | for (i = 0; i < dev->num_clks; i++) { | ||
127 | int err = clk_enable(dev->clk[i]); | ||
128 | BUG_ON(err); | ||
129 | } | ||
130 | |||
131 | if (prev_state == NVHOST_POWER_STATE_POWERGATED | ||
132 | && dev->finalize_poweron) | ||
133 | dev->finalize_poweron(dev); | ||
134 | } | ||
135 | dev->powerstate = NVHOST_POWER_STATE_RUNNING; | ||
136 | } | ||
137 | |||
138 | /* This gets called from powergate_handler() and from module suspend. | ||
139 | * Module suspend is done for all modules, runtime power gating only | ||
140 | * for modules with can_powergate set. | ||
141 | */ | ||
142 | static int to_state_powergated_locked(struct nvhost_device *dev) | ||
143 | { | ||
144 | int err = 0; | ||
145 | |||
146 | if (dev->prepare_poweroff | ||
147 | && dev->powerstate != NVHOST_POWER_STATE_POWERGATED) { | ||
148 | /* Clock needs to be on in prepare_poweroff */ | ||
149 | to_state_running_locked(dev); | ||
150 | err = dev->prepare_poweroff(dev); | ||
151 | if (err) | ||
152 | return err; | ||
153 | } | ||
154 | |||
155 | if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) | ||
156 | to_state_clockgated_locked(dev); | ||
157 | |||
158 | if (dev->can_powergate) { | ||
159 | do_powergate_locked(dev->powergate_ids[0]); | ||
160 | do_powergate_locked(dev->powergate_ids[1]); | ||
161 | } | ||
162 | |||
163 | dev->powerstate = NVHOST_POWER_STATE_POWERGATED; | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static void schedule_powergating_locked(struct nvhost_device *dev) | ||
168 | { | ||
169 | if (dev->can_powergate) | ||
170 | schedule_delayed_work(&dev->powerstate_down, | ||
171 | msecs_to_jiffies(dev->powergate_delay)); | ||
172 | } | ||
173 | |||
174 | static void schedule_clockgating_locked(struct nvhost_device *dev) | ||
175 | { | ||
176 | schedule_delayed_work(&dev->powerstate_down, | ||
177 | msecs_to_jiffies(dev->clockgate_delay)); | ||
178 | } | ||
179 | |||
180 | void nvhost_module_busy(struct nvhost_device *dev) | ||
181 | { | ||
182 | if (dev->busy) | ||
183 | dev->busy(dev); | ||
184 | |||
185 | mutex_lock(&dev->lock); | ||
186 | cancel_delayed_work(&dev->powerstate_down); | ||
187 | |||
188 | dev->refcount++; | ||
189 | if (dev->refcount > 0 && !nvhost_module_powered(dev)) | ||
190 | to_state_running_locked(dev); | ||
191 | mutex_unlock(&dev->lock); | ||
192 | } | ||
193 | |||
194 | static void powerstate_down_handler(struct work_struct *work) | ||
195 | { | ||
196 | struct nvhost_device *dev; | ||
197 | |||
198 | dev = container_of(to_delayed_work(work), | ||
199 | struct nvhost_device, | ||
200 | powerstate_down); | ||
201 | |||
202 | mutex_lock(&dev->lock); | ||
203 | if (dev->refcount == 0) { | ||
204 | switch (dev->powerstate) { | ||
205 | case NVHOST_POWER_STATE_RUNNING: | ||
206 | to_state_clockgated_locked(dev); | ||
207 | schedule_powergating_locked(dev); | ||
208 | break; | ||
209 | case NVHOST_POWER_STATE_CLOCKGATED: | ||
210 | if (to_state_powergated_locked(dev)) | ||
211 | schedule_powergating_locked(dev); | ||
212 | break; | ||
213 | default: | ||
214 | break; | ||
215 | } | ||
216 | } | ||
217 | mutex_unlock(&dev->lock); | ||
218 | } | ||
219 | |||
220 | |||
221 | void nvhost_module_idle_mult(struct nvhost_device *dev, int refs) | ||
222 | { | ||
223 | bool kick = false; | ||
224 | |||
225 | mutex_lock(&dev->lock); | ||
226 | dev->refcount -= refs; | ||
227 | if (dev->refcount == 0) { | ||
228 | if (nvhost_module_powered(dev)) | ||
229 | schedule_clockgating_locked(dev); | ||
230 | kick = true; | ||
231 | } | ||
232 | mutex_unlock(&dev->lock); | ||
233 | |||
234 | if (kick) { | ||
235 | wake_up(&dev->idle_wq); | ||
236 | |||
237 | if (dev->idle) | ||
238 | dev->idle(dev); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate, | ||
243 | int index) | ||
244 | { | ||
245 | struct clk *c; | ||
246 | |||
247 | c = dev->clk[index]; | ||
248 | if (IS_ERR_OR_NULL(c)) | ||
249 | return -EINVAL; | ||
250 | |||
251 | /* Need to enable client to get correct rate */ | ||
252 | nvhost_module_busy(dev); | ||
253 | *rate = clk_get_rate(c); | ||
254 | nvhost_module_idle(dev); | ||
255 | return 0; | ||
256 | |||
257 | } | ||
258 | |||
259 | static int nvhost_module_update_rate(struct nvhost_device *dev, int index) | ||
260 | { | ||
261 | unsigned long rate = 0; | ||
262 | struct nvhost_module_client *m; | ||
263 | |||
264 | if (!dev->clk[index]) | ||
265 | return -EINVAL; | ||
266 | |||
267 | list_for_each_entry(m, &dev->client_list, node) { | ||
268 | rate = max(m->rate[index], rate); | ||
269 | } | ||
270 | if (!rate) | ||
271 | rate = clk_round_rate(dev->clk[index], | ||
272 | dev->clocks[index].default_rate); | ||
273 | |||
274 | return clk_set_rate(dev->clk[index], rate); | ||
275 | } | ||
276 | |||
277 | int nvhost_module_set_rate(struct nvhost_device *dev, void *priv, | ||
278 | unsigned long rate, int index) | ||
279 | { | ||
280 | struct nvhost_module_client *m; | ||
281 | int i, ret = 0; | ||
282 | |||
283 | mutex_lock(&client_list_lock); | ||
284 | list_for_each_entry(m, &dev->client_list, node) { | ||
285 | if (m->priv == priv) { | ||
286 | for (i = 0; i < dev->num_clks; i++) | ||
287 | m->rate[i] = clk_round_rate(dev->clk[i], rate); | ||
288 | break; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | for (i = 0; i < dev->num_clks; i++) { | ||
293 | ret = nvhost_module_update_rate(dev, i); | ||
294 | if (ret < 0) | ||
295 | break; | ||
296 | } | ||
297 | mutex_unlock(&client_list_lock); | ||
298 | return ret; | ||
299 | |||
300 | } | ||
301 | |||
302 | int nvhost_module_add_client(struct nvhost_device *dev, void *priv) | ||
303 | { | ||
304 | int i; | ||
305 | unsigned long rate; | ||
306 | struct nvhost_module_client *client; | ||
307 | |||
308 | client = kzalloc(sizeof(*client), GFP_KERNEL); | ||
309 | if (!client) | ||
310 | return -ENOMEM; | ||
311 | |||
312 | INIT_LIST_HEAD(&client->node); | ||
313 | client->priv = priv; | ||
314 | |||
315 | for (i = 0; i < dev->num_clks; i++) { | ||
316 | rate = clk_round_rate(dev->clk[i], | ||
317 | dev->clocks[i].default_rate); | ||
318 | client->rate[i] = rate; | ||
319 | } | ||
320 | mutex_lock(&client_list_lock); | ||
321 | list_add_tail(&client->node, &dev->client_list); | ||
322 | mutex_unlock(&client_list_lock); | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | void nvhost_module_remove_client(struct nvhost_device *dev, void *priv) | ||
327 | { | ||
328 | int i; | ||
329 | struct nvhost_module_client *m; | ||
330 | |||
331 | mutex_lock(&client_list_lock); | ||
332 | list_for_each_entry(m, &dev->client_list, node) { | ||
333 | if (priv == m->priv) { | ||
334 | list_del(&m->node); | ||
335 | break; | ||
336 | } | ||
337 | } | ||
338 | if (m) { | ||
339 | kfree(m); | ||
340 | for (i = 0; i < dev->num_clks; i++) | ||
341 | nvhost_module_update_rate(dev, i); | ||
342 | } | ||
343 | mutex_unlock(&client_list_lock); | ||
344 | } | ||
345 | |||
346 | int nvhost_module_init(struct nvhost_device *dev) | ||
347 | { | ||
348 | int i = 0; | ||
349 | |||
350 | /* initialize clocks to known state */ | ||
351 | INIT_LIST_HEAD(&dev->client_list); | ||
352 | while (dev->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) { | ||
353 | char devname[MAX_DEVID_LENGTH]; | ||
354 | long rate = dev->clocks[i].default_rate; | ||
355 | struct clk *c; | ||
356 | |||
357 | snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name); | ||
358 | c = clk_get_sys(devname, dev->clocks[i].name); | ||
359 | BUG_ON(IS_ERR_OR_NULL(c)); | ||
360 | |||
361 | rate = clk_round_rate(c, rate); | ||
362 | clk_enable(c); | ||
363 | clk_set_rate(c, rate); | ||
364 | clk_disable(c); | ||
365 | dev->clk[i] = c; | ||
366 | i++; | ||
367 | } | ||
368 | dev->num_clks = i; | ||
369 | |||
370 | mutex_init(&dev->lock); | ||
371 | init_waitqueue_head(&dev->idle_wq); | ||
372 | INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler); | ||
373 | |||
374 | /* power gate units that we can power gate */ | ||
375 | if (dev->can_powergate) { | ||
376 | do_powergate_locked(dev->powergate_ids[0]); | ||
377 | do_powergate_locked(dev->powergate_ids[1]); | ||
378 | dev->powerstate = NVHOST_POWER_STATE_POWERGATED; | ||
379 | } else { | ||
380 | do_unpowergate_locked(dev->powergate_ids[0]); | ||
381 | do_unpowergate_locked(dev->powergate_ids[1]); | ||
382 | dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED; | ||
383 | } | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static int is_module_idle(struct nvhost_device *dev) | ||
389 | { | ||
390 | int count; | ||
391 | mutex_lock(&dev->lock); | ||
392 | count = dev->refcount; | ||
393 | mutex_unlock(&dev->lock); | ||
394 | return (count == 0); | ||
395 | } | ||
396 | |||
397 | static void debug_not_idle(struct nvhost_master *host) | ||
398 | { | ||
399 | int i; | ||
400 | bool lock_released = true; | ||
401 | |||
402 | for (i = 0; i < host->nb_channels; i++) { | ||
403 | struct nvhost_device *dev = host->channels[i].dev; | ||
404 | mutex_lock(&dev->lock); | ||
405 | if (dev->name) | ||
406 | dev_warn(&host->dev->dev, | ||
407 | "tegra_grhost: %s: refcnt %d\n", dev->name, | ||
408 | dev->refcount); | ||
409 | mutex_unlock(&dev->lock); | ||
410 | } | ||
411 | |||
412 | for (i = 0; i < host->syncpt.nb_mlocks; i++) { | ||
413 | int c = atomic_read(&host->syncpt.lock_counts[i]); | ||
414 | if (c) { | ||
415 | dev_warn(&host->dev->dev, | ||
416 | "tegra_grhost: lock id %d: refcnt %d\n", | ||
417 | i, c); | ||
418 | lock_released = false; | ||
419 | } | ||
420 | } | ||
421 | if (lock_released) | ||
422 | dev_dbg(&host->dev->dev, "tegra_grhost: all locks released\n"); | ||
423 | } | ||
424 | |||
425 | int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend) | ||
426 | { | ||
427 | int ret; | ||
428 | struct nvhost_master *host = nvhost_get_host(dev); | ||
429 | |||
430 | if (system_suspend && !is_module_idle(dev)) | ||
431 | debug_not_idle(host); | ||
432 | |||
433 | ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev), | ||
434 | ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT); | ||
435 | if (ret == 0) { | ||
436 | dev_info(&dev->dev, "%s prevented suspend\n", | ||
437 | dev->name); | ||
438 | return -EBUSY; | ||
439 | } | ||
440 | |||
441 | if (system_suspend) | ||
442 | dev_dbg(&dev->dev, "tegra_grhost: entered idle\n"); | ||
443 | |||
444 | mutex_lock(&dev->lock); | ||
445 | cancel_delayed_work(&dev->powerstate_down); | ||
446 | to_state_powergated_locked(dev); | ||
447 | mutex_unlock(&dev->lock); | ||
448 | |||
449 | if (dev->suspend) | ||
450 | dev->suspend(dev); | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | void nvhost_module_deinit(struct nvhost_device *dev) | ||
456 | { | ||
457 | int i; | ||
458 | |||
459 | if (dev->deinit) | ||
460 | dev->deinit(dev); | ||
461 | |||
462 | nvhost_module_suspend(dev, false); | ||
463 | for (i = 0; i < dev->num_clks; i++) | ||
464 | clk_put(dev->clk[i]); | ||
465 | dev->powerstate = NVHOST_POWER_STATE_DEINIT; | ||
466 | } | ||
467 | |||
diff --git a/drivers/video/tegra/host/nvhost_acm.h b/drivers/video/tegra/host/nvhost_acm.h new file mode 100644 index 00000000000..a12c0c3fa32 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_acm.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_acm.h | ||
3 | * | ||
4 | * Tegra Graphics Host Automatic Clock Management | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_ACM_H | ||
22 | #define __NVHOST_ACM_H | ||
23 | |||
24 | #include <linux/workqueue.h> | ||
25 | #include <linux/wait.h> | ||
26 | #include <linux/mutex.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/nvhost.h> | ||
29 | |||
30 | /* Sets clocks and powergating state for a module */ | ||
31 | int nvhost_module_init(struct nvhost_device *ndev); | ||
32 | void nvhost_module_deinit(struct nvhost_device *dev); | ||
33 | int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend); | ||
34 | |||
35 | void nvhost_module_reset(struct nvhost_device *dev); | ||
36 | void nvhost_module_busy(struct nvhost_device *dev); | ||
37 | void nvhost_module_idle_mult(struct nvhost_device *dev, int refs); | ||
38 | int nvhost_module_add_client(struct nvhost_device *dev, | ||
39 | void *priv); | ||
40 | void nvhost_module_remove_client(struct nvhost_device *dev, | ||
41 | void *priv); | ||
42 | int nvhost_module_get_rate(struct nvhost_device *dev, | ||
43 | unsigned long *rate, | ||
44 | int index); | ||
45 | int nvhost_module_set_rate(struct nvhost_device *dev, void *priv, | ||
46 | unsigned long rate, int index); | ||
47 | |||
48 | |||
49 | static inline bool nvhost_module_powered(struct nvhost_device *dev) | ||
50 | { | ||
51 | return dev->powerstate == NVHOST_POWER_STATE_RUNNING; | ||
52 | } | ||
53 | |||
54 | static inline void nvhost_module_idle(struct nvhost_device *dev) | ||
55 | { | ||
56 | nvhost_module_idle_mult(dev, 1); | ||
57 | } | ||
58 | |||
59 | |||
60 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_cdma.c b/drivers/video/tegra/host/nvhost_cdma.c new file mode 100644 index 00000000000..775d761e65c --- /dev/null +++ b/drivers/video/tegra/host/nvhost_cdma.c | |||
@@ -0,0 +1,508 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_cdma.c | ||
3 | * | ||
4 | * Tegra Graphics Host Command DMA | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_cdma.h" | ||
22 | #include "dev.h" | ||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | #include <linux/slab.h> | ||
26 | #include <linux/kfifo.h> | ||
27 | #include <trace/events/nvhost.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | |||
30 | /* | ||
31 | * TODO: | ||
32 | * stats | ||
33 | * - for figuring out what to optimize further | ||
34 | * resizable push buffer | ||
35 | * - some channels hardly need any, some channels (3d) could use more | ||
36 | */ | ||
37 | |||
38 | /** | ||
39 | * Add an entry to the sync queue. | ||
40 | */ | ||
41 | static void add_to_sync_queue(struct nvhost_cdma *cdma, | ||
42 | struct nvhost_job *job, | ||
43 | u32 nr_slots, | ||
44 | u32 first_get) | ||
45 | { | ||
46 | BUG_ON(job->syncpt_id == NVSYNCPT_INVALID); | ||
47 | |||
48 | job->first_get = first_get; | ||
49 | job->num_slots = nr_slots; | ||
50 | nvhost_job_get(job); | ||
51 | list_add_tail(&job->list, &cdma->sync_queue); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * Return the status of the cdma's sync queue or push buffer for the given event | ||
56 | * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-) | ||
57 | * - pb space: returns the number of free slots in the channel's push buffer | ||
58 | * Must be called with the cdma lock held. | ||
59 | */ | ||
60 | static unsigned int cdma_status_locked(struct nvhost_cdma *cdma, | ||
61 | enum cdma_event event) | ||
62 | { | ||
63 | switch (event) { | ||
64 | case CDMA_EVENT_SYNC_QUEUE_EMPTY: | ||
65 | return list_empty(&cdma->sync_queue) ? 1 : 0; | ||
66 | case CDMA_EVENT_PUSH_BUFFER_SPACE: { | ||
67 | struct push_buffer *pb = &cdma->push_buffer; | ||
68 | BUG_ON(!cdma_pb_op(cdma).space); | ||
69 | return cdma_pb_op(cdma).space(pb); | ||
70 | } | ||
71 | default: | ||
72 | return 0; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * Sleep (if necessary) until the requested event happens | ||
78 | * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty. | ||
79 | * - Returns 1 | ||
80 | * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer | ||
81 | * - Return the amount of space (> 0) | ||
82 | * Must be called with the cdma lock held. | ||
83 | */ | ||
84 | unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma, | ||
85 | enum cdma_event event) | ||
86 | { | ||
87 | for (;;) { | ||
88 | unsigned int space = cdma_status_locked(cdma, event); | ||
89 | if (space) | ||
90 | return space; | ||
91 | |||
92 | trace_nvhost_wait_cdma(cdma_to_channel(cdma)->dev->name, | ||
93 | event); | ||
94 | |||
95 | BUG_ON(cdma->event != CDMA_EVENT_NONE); | ||
96 | cdma->event = event; | ||
97 | |||
98 | mutex_unlock(&cdma->lock); | ||
99 | down(&cdma->sem); | ||
100 | mutex_lock(&cdma->lock); | ||
101 | } | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * Start timer for a buffer submition that has completed yet. | ||
107 | * Must be called with the cdma lock held. | ||
108 | */ | ||
109 | static void cdma_start_timer_locked(struct nvhost_cdma *cdma, | ||
110 | struct nvhost_job *job) | ||
111 | { | ||
112 | BUG_ON(!job); | ||
113 | if (cdma->timeout.clientid) { | ||
114 | /* timer already started */ | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | cdma->timeout.ctx = job->hwctx; | ||
119 | cdma->timeout.clientid = job->clientid; | ||
120 | cdma->timeout.syncpt_id = job->syncpt_id; | ||
121 | cdma->timeout.syncpt_val = job->syncpt_end; | ||
122 | cdma->timeout.start_ktime = ktime_get(); | ||
123 | |||
124 | schedule_delayed_work(&cdma->timeout.wq, | ||
125 | msecs_to_jiffies(job->timeout)); | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * Stop timer when a buffer submition completes. | ||
130 | * Must be called with the cdma lock held. | ||
131 | */ | ||
132 | static void stop_cdma_timer_locked(struct nvhost_cdma *cdma) | ||
133 | { | ||
134 | cancel_delayed_work(&cdma->timeout.wq); | ||
135 | cdma->timeout.ctx = NULL; | ||
136 | cdma->timeout.clientid = 0; | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * For all sync queue entries that have already finished according to the | ||
141 | * current sync point registers: | ||
142 | * - unpin & unref their mems | ||
143 | * - pop their push buffer slots | ||
144 | * - remove them from the sync queue | ||
145 | * This is normally called from the host code's worker thread, but can be | ||
146 | * called manually if necessary. | ||
147 | * Must be called with the cdma lock held. | ||
148 | */ | ||
149 | static void update_cdma_locked(struct nvhost_cdma *cdma) | ||
150 | { | ||
151 | bool signal = false; | ||
152 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
153 | struct nvhost_syncpt *sp = &dev->syncpt; | ||
154 | struct nvhost_job *job, *n; | ||
155 | |||
156 | BUG_ON(!cdma->running); | ||
157 | |||
158 | /* | ||
159 | * Walk the sync queue, reading the sync point registers as necessary, | ||
160 | * to consume as many sync queue entries as possible without blocking | ||
161 | */ | ||
162 | list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { | ||
163 | BUG_ON(job->syncpt_id == NVSYNCPT_INVALID); | ||
164 | |||
165 | /* Check whether this syncpt has completed, and bail if not */ | ||
166 | if (!nvhost_syncpt_is_expired(sp, | ||
167 | job->syncpt_id, job->syncpt_end)) { | ||
168 | /* Start timer on next pending syncpt */ | ||
169 | if (job->timeout) | ||
170 | cdma_start_timer_locked(cdma, job); | ||
171 | break; | ||
172 | } | ||
173 | |||
174 | /* Cancel timeout, when a buffer completes */ | ||
175 | if (cdma->timeout.clientid) | ||
176 | stop_cdma_timer_locked(cdma); | ||
177 | |||
178 | /* Unpin the memory */ | ||
179 | nvhost_job_unpin(job); | ||
180 | |||
181 | /* Pop push buffer slots */ | ||
182 | if (job->num_slots) { | ||
183 | struct push_buffer *pb = &cdma->push_buffer; | ||
184 | BUG_ON(!cdma_pb_op(cdma).pop_from); | ||
185 | cdma_pb_op(cdma).pop_from(pb, job->num_slots); | ||
186 | if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE) | ||
187 | signal = true; | ||
188 | } | ||
189 | |||
190 | list_del(&job->list); | ||
191 | nvhost_job_put(job); | ||
192 | } | ||
193 | |||
194 | if (list_empty(&cdma->sync_queue) && | ||
195 | cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY) | ||
196 | signal = true; | ||
197 | |||
198 | /* Wake up CdmaWait() if the requested event happened */ | ||
199 | if (signal) { | ||
200 | cdma->event = CDMA_EVENT_NONE; | ||
201 | up(&cdma->sem); | ||
202 | } | ||
203 | } | ||
204 | |||
205 | void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma, | ||
206 | struct nvhost_syncpt *syncpt, struct device *dev) | ||
207 | { | ||
208 | u32 get_restart; | ||
209 | u32 syncpt_incrs; | ||
210 | bool exec_ctxsave; | ||
211 | struct nvhost_job *job = NULL; | ||
212 | u32 syncpt_val; | ||
213 | |||
214 | syncpt_val = nvhost_syncpt_update_min(syncpt, cdma->timeout.syncpt_id); | ||
215 | |||
216 | dev_dbg(dev, | ||
217 | "%s: starting cleanup (thresh %d)\n", | ||
218 | __func__, syncpt_val); | ||
219 | |||
220 | /* | ||
221 | * Move the sync_queue read pointer to the first entry that hasn't | ||
222 | * completed based on the current HW syncpt value. It's likely there | ||
223 | * won't be any (i.e. we're still at the head), but covers the case | ||
224 | * where a syncpt incr happens just prior/during the teardown. | ||
225 | */ | ||
226 | |||
227 | dev_dbg(dev, | ||
228 | "%s: skip completed buffers still in sync_queue\n", | ||
229 | __func__); | ||
230 | |||
231 | list_for_each_entry(job, &cdma->sync_queue, list) { | ||
232 | if (syncpt_val < job->syncpt_end) | ||
233 | break; | ||
234 | |||
235 | nvhost_job_dump(dev, job); | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Walk the sync_queue, first incrementing with the CPU syncpts that | ||
240 | * are partially executed (the first buffer) or fully skipped while | ||
241 | * still in the current context (slots are also NOP-ed). | ||
242 | * | ||
243 | * At the point contexts are interleaved, syncpt increments must be | ||
244 | * done inline with the pushbuffer from a GATHER buffer to maintain | ||
245 | * the order (slots are modified to be a GATHER of syncpt incrs). | ||
246 | * | ||
247 | * Note: save in get_restart the location where the timed out buffer | ||
248 | * started in the PB, so we can start the refetch from there (with the | ||
249 | * modified NOP-ed PB slots). This lets things appear to have completed | ||
250 | * properly for this buffer and resources are freed. | ||
251 | */ | ||
252 | |||
253 | dev_dbg(dev, | ||
254 | "%s: perform CPU incr on pending same ctx buffers\n", | ||
255 | __func__); | ||
256 | |||
257 | get_restart = cdma->last_put; | ||
258 | if (!list_empty(&cdma->sync_queue)) | ||
259 | get_restart = job->first_get; | ||
260 | |||
261 | /* do CPU increments as long as this context continues */ | ||
262 | list_for_each_entry_from(job, &cdma->sync_queue, list) { | ||
263 | /* different context, gets us out of this loop */ | ||
264 | if (job->clientid != cdma->timeout.clientid) | ||
265 | break; | ||
266 | |||
267 | /* won't need a timeout when replayed */ | ||
268 | job->timeout = 0; | ||
269 | |||
270 | syncpt_incrs = job->syncpt_end - syncpt_val; | ||
271 | dev_dbg(dev, | ||
272 | "%s: CPU incr (%d)\n", __func__, syncpt_incrs); | ||
273 | |||
274 | nvhost_job_dump(dev, job); | ||
275 | |||
276 | /* safe to use CPU to incr syncpts */ | ||
277 | cdma_op(cdma).timeout_cpu_incr(cdma, | ||
278 | job->first_get, | ||
279 | syncpt_incrs, | ||
280 | job->syncpt_end, | ||
281 | job->num_slots); | ||
282 | |||
283 | syncpt_val += syncpt_incrs; | ||
284 | } | ||
285 | |||
286 | dev_dbg(dev, | ||
287 | "%s: GPU incr blocked interleaved ctx buffers\n", | ||
288 | __func__); | ||
289 | |||
290 | exec_ctxsave = false; | ||
291 | |||
292 | /* setup GPU increments */ | ||
293 | list_for_each_entry_from(job, &cdma->sync_queue, list) { | ||
294 | /* same context, increment in the pushbuffer */ | ||
295 | if (job->clientid == cdma->timeout.clientid) { | ||
296 | /* won't need a timeout when replayed */ | ||
297 | job->timeout = 0; | ||
298 | |||
299 | /* update buffer's syncpts in the pushbuffer */ | ||
300 | cdma_op(cdma).timeout_pb_incr(cdma, | ||
301 | job->first_get, | ||
302 | job->syncpt_incrs, | ||
303 | job->num_slots, | ||
304 | exec_ctxsave); | ||
305 | |||
306 | exec_ctxsave = false; | ||
307 | } else { | ||
308 | dev_dbg(dev, | ||
309 | "%s: switch to a different userctx\n", | ||
310 | __func__); | ||
311 | /* | ||
312 | * If previous context was the timed out context | ||
313 | * then clear its CTXSAVE in this slot. | ||
314 | */ | ||
315 | exec_ctxsave = true; | ||
316 | } | ||
317 | |||
318 | nvhost_job_dump(dev, job); | ||
319 | } | ||
320 | |||
321 | dev_dbg(dev, | ||
322 | "%s: finished sync_queue modification\n", __func__); | ||
323 | |||
324 | /* roll back DMAGET and start up channel again */ | ||
325 | cdma_op(cdma).timeout_teardown_end(cdma, get_restart); | ||
326 | |||
327 | if (cdma->timeout.ctx) | ||
328 | cdma->timeout.ctx->has_timedout = true; | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * Create a cdma | ||
333 | */ | ||
334 | int nvhost_cdma_init(struct nvhost_cdma *cdma) | ||
335 | { | ||
336 | int err; | ||
337 | struct push_buffer *pb = &cdma->push_buffer; | ||
338 | BUG_ON(!cdma_pb_op(cdma).init); | ||
339 | mutex_init(&cdma->lock); | ||
340 | sema_init(&cdma->sem, 0); | ||
341 | |||
342 | INIT_LIST_HEAD(&cdma->sync_queue); | ||
343 | |||
344 | cdma->event = CDMA_EVENT_NONE; | ||
345 | cdma->running = false; | ||
346 | cdma->torndown = false; | ||
347 | |||
348 | err = cdma_pb_op(cdma).init(pb); | ||
349 | if (err) | ||
350 | return err; | ||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * Destroy a cdma | ||
356 | */ | ||
357 | void nvhost_cdma_deinit(struct nvhost_cdma *cdma) | ||
358 | { | ||
359 | struct push_buffer *pb = &cdma->push_buffer; | ||
360 | |||
361 | BUG_ON(!cdma_pb_op(cdma).destroy); | ||
362 | BUG_ON(cdma->running); | ||
363 | cdma_pb_op(cdma).destroy(pb); | ||
364 | cdma_op(cdma).timeout_destroy(cdma); | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * Begin a cdma submit | ||
369 | */ | ||
370 | int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job) | ||
371 | { | ||
372 | mutex_lock(&cdma->lock); | ||
373 | |||
374 | if (job->timeout) { | ||
375 | /* init state on first submit with timeout value */ | ||
376 | if (!cdma->timeout.initialized) { | ||
377 | int err; | ||
378 | BUG_ON(!cdma_op(cdma).timeout_init); | ||
379 | err = cdma_op(cdma).timeout_init(cdma, | ||
380 | job->syncpt_id); | ||
381 | if (err) { | ||
382 | mutex_unlock(&cdma->lock); | ||
383 | return err; | ||
384 | } | ||
385 | } | ||
386 | } | ||
387 | if (!cdma->running) { | ||
388 | BUG_ON(!cdma_op(cdma).start); | ||
389 | cdma_op(cdma).start(cdma); | ||
390 | } | ||
391 | cdma->slots_free = 0; | ||
392 | cdma->slots_used = 0; | ||
393 | cdma->first_get = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | /** | ||
398 | * Push two words into a push buffer slot | ||
399 | * Blocks as necessary if the push buffer is full. | ||
400 | */ | ||
401 | void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2) | ||
402 | { | ||
403 | nvhost_cdma_push_gather(cdma, NULL, NULL, op1, op2); | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * Push two words into a push buffer slot | ||
408 | * Blocks as necessary if the push buffer is full. | ||
409 | */ | ||
410 | void nvhost_cdma_push_gather(struct nvhost_cdma *cdma, | ||
411 | struct nvmap_client *client, | ||
412 | struct nvmap_handle *handle, u32 op1, u32 op2) | ||
413 | { | ||
414 | u32 slots_free = cdma->slots_free; | ||
415 | struct push_buffer *pb = &cdma->push_buffer; | ||
416 | BUG_ON(!cdma_pb_op(cdma).push_to); | ||
417 | BUG_ON(!cdma_op(cdma).kick); | ||
418 | if (slots_free == 0) { | ||
419 | cdma_op(cdma).kick(cdma); | ||
420 | slots_free = nvhost_cdma_wait_locked(cdma, | ||
421 | CDMA_EVENT_PUSH_BUFFER_SPACE); | ||
422 | } | ||
423 | cdma->slots_free = slots_free - 1; | ||
424 | cdma->slots_used++; | ||
425 | cdma_pb_op(cdma).push_to(pb, client, handle, op1, op2); | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * End a cdma submit | ||
430 | * Kick off DMA, add job to the sync queue, and a number of slots to be freed | ||
431 | * from the pushbuffer. The handles for a submit must all be pinned at the same | ||
432 | * time, but they can be unpinned in smaller chunks. | ||
433 | */ | ||
434 | void nvhost_cdma_end(struct nvhost_cdma *cdma, | ||
435 | struct nvhost_job *job) | ||
436 | { | ||
437 | bool was_idle = list_empty(&cdma->sync_queue); | ||
438 | |||
439 | BUG_ON(!cdma_op(cdma).kick); | ||
440 | cdma_op(cdma).kick(cdma); | ||
441 | |||
442 | BUG_ON(job->syncpt_id == NVSYNCPT_INVALID); | ||
443 | |||
444 | add_to_sync_queue(cdma, | ||
445 | job, | ||
446 | cdma->slots_used, | ||
447 | cdma->first_get); | ||
448 | |||
449 | /* start timer on idle -> active transitions */ | ||
450 | if (job->timeout && was_idle) | ||
451 | cdma_start_timer_locked(cdma, job); | ||
452 | |||
453 | mutex_unlock(&cdma->lock); | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * Update cdma state according to current sync point values | ||
458 | */ | ||
459 | void nvhost_cdma_update(struct nvhost_cdma *cdma) | ||
460 | { | ||
461 | mutex_lock(&cdma->lock); | ||
462 | update_cdma_locked(cdma); | ||
463 | mutex_unlock(&cdma->lock); | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * Wait for push buffer to be empty. | ||
468 | * @cdma pointer to channel cdma | ||
469 | * @timeout timeout in ms | ||
470 | * Returns -ETIME if timeout was reached, zero if push buffer is empty. | ||
471 | */ | ||
472 | int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout) | ||
473 | { | ||
474 | unsigned int space, err = 0; | ||
475 | unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); | ||
476 | |||
477 | /* | ||
478 | * Wait for at most timeout ms. Recalculate timeout at each iteration | ||
479 | * to better keep within given timeout. | ||
480 | */ | ||
481 | while(!err && time_before(jiffies, end_jiffies)) { | ||
482 | int timeout_jiffies = end_jiffies - jiffies; | ||
483 | |||
484 | mutex_lock(&cdma->lock); | ||
485 | space = cdma_status_locked(cdma, | ||
486 | CDMA_EVENT_SYNC_QUEUE_EMPTY); | ||
487 | if (space) { | ||
488 | mutex_unlock(&cdma->lock); | ||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * Wait for sync queue to become empty. If there is already | ||
494 | * an event pending, we need to poll. | ||
495 | */ | ||
496 | if (cdma->event != CDMA_EVENT_NONE) { | ||
497 | mutex_unlock(&cdma->lock); | ||
498 | schedule(); | ||
499 | } else { | ||
500 | cdma->event = CDMA_EVENT_SYNC_QUEUE_EMPTY; | ||
501 | |||
502 | mutex_unlock(&cdma->lock); | ||
503 | err = down_timeout(&cdma->sem, | ||
504 | jiffies_to_msecs(timeout_jiffies)); | ||
505 | } | ||
506 | } | ||
507 | return err; | ||
508 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_cdma.h b/drivers/video/tegra/host/nvhost_cdma.h new file mode 100644 index 00000000000..9cb9b827725 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_cdma.h | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_cdma.h | ||
3 | * | ||
4 | * Tegra Graphics Host Command DMA | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_CDMA_H | ||
22 | #define __NVHOST_CDMA_H | ||
23 | |||
24 | #include <linux/sched.h> | ||
25 | #include <linux/semaphore.h> | ||
26 | |||
27 | #include <linux/nvhost.h> | ||
28 | #include <mach/nvmap.h> | ||
29 | #include <linux/list.h> | ||
30 | |||
31 | #include "nvhost_acm.h" | ||
32 | |||
33 | struct nvhost_syncpt; | ||
34 | struct nvhost_userctx_timeout; | ||
35 | struct nvhost_job; | ||
36 | |||
37 | /* | ||
38 | * cdma | ||
39 | * | ||
40 | * This is in charge of a host command DMA channel. | ||
41 | * Sends ops to a push buffer, and takes responsibility for unpinning | ||
42 | * (& possibly freeing) of memory after those ops have completed. | ||
43 | * Producer: | ||
44 | * begin | ||
45 | * push - send ops to the push buffer | ||
46 | * end - start command DMA and enqueue handles to be unpinned | ||
47 | * Consumer: | ||
48 | * update - call to update sync queue and push buffer, unpin memory | ||
49 | */ | ||
50 | |||
51 | struct nvmap_client_handle { | ||
52 | struct nvmap_client *client; | ||
53 | struct nvmap_handle *handle; | ||
54 | }; | ||
55 | |||
56 | struct push_buffer { | ||
57 | struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */ | ||
58 | u32 *mapped; /* mapped pushbuffer memory */ | ||
59 | u32 phys; /* physical address of pushbuffer */ | ||
60 | u32 fence; /* index we've written */ | ||
61 | u32 cur; /* index to write to */ | ||
62 | struct nvmap_client_handle *nvmap; | ||
63 | /* nvmap handle for each opcode pair */ | ||
64 | }; | ||
65 | |||
66 | struct syncpt_buffer { | ||
67 | struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */ | ||
68 | u32 *mapped; /* mapped gather buffer (at channel offset */ | ||
69 | u32 phys; /* physical address (at channel offset) */ | ||
70 | u32 incr_per_buffer; /* max # of incrs per GATHER */ | ||
71 | u32 words_per_incr; /* # of DWORDS in buffer to incr a syncpt */ | ||
72 | }; | ||
73 | |||
74 | struct buffer_timeout { | ||
75 | struct delayed_work wq; /* work queue */ | ||
76 | bool initialized; /* timer one-time setup flag */ | ||
77 | u32 syncpt_id; /* buffer completion syncpt id */ | ||
78 | u32 syncpt_val; /* syncpt value when completed */ | ||
79 | ktime_t start_ktime; /* starting time */ | ||
80 | /* context timeout information */ | ||
81 | struct nvhost_hwctx *ctx; | ||
82 | int clientid; | ||
83 | }; | ||
84 | |||
85 | enum cdma_event { | ||
86 | CDMA_EVENT_NONE, /* not waiting for any event */ | ||
87 | CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */ | ||
88 | CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */ | ||
89 | }; | ||
90 | |||
91 | struct nvhost_cdma { | ||
92 | struct mutex lock; /* controls access to shared state */ | ||
93 | struct semaphore sem; /* signalled when event occurs */ | ||
94 | enum cdma_event event; /* event that sem is waiting for */ | ||
95 | unsigned int slots_used; /* pb slots used in current submit */ | ||
96 | unsigned int slots_free; /* pb slots free in current submit */ | ||
97 | unsigned int first_get; /* DMAGET value, where submit begins */ | ||
98 | unsigned int last_put; /* last value written to DMAPUT */ | ||
99 | struct push_buffer push_buffer; /* channel's push buffer */ | ||
100 | struct syncpt_buffer syncpt_buffer; /* syncpt incr buffer */ | ||
101 | struct list_head sync_queue; /* job queue */ | ||
102 | struct buffer_timeout timeout; /* channel's timeout state/wq */ | ||
103 | bool running; | ||
104 | bool torndown; | ||
105 | }; | ||
106 | |||
107 | #define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma) | ||
108 | #define cdma_to_dev(cdma) nvhost_get_host(cdma_to_channel(cdma)->dev) | ||
109 | #define cdma_op(cdma) (cdma_to_dev(cdma)->op.cdma) | ||
110 | #define cdma_to_nvmap(cdma) ((cdma_to_dev(cdma))->nvmap) | ||
111 | #define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer) | ||
112 | #define cdma_pb_op(cdma) (cdma_to_dev(cdma)->op.push_buffer) | ||
113 | |||
114 | int nvhost_cdma_init(struct nvhost_cdma *cdma); | ||
115 | void nvhost_cdma_deinit(struct nvhost_cdma *cdma); | ||
116 | void nvhost_cdma_stop(struct nvhost_cdma *cdma); | ||
117 | int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job); | ||
118 | void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2); | ||
119 | #define NVHOST_CDMA_PUSH_GATHER_CTXSAVE 0xffffffff | ||
120 | void nvhost_cdma_push_gather(struct nvhost_cdma *cdma, | ||
121 | struct nvmap_client *client, | ||
122 | struct nvmap_handle *handle, u32 op1, u32 op2); | ||
123 | void nvhost_cdma_end(struct nvhost_cdma *cdma, | ||
124 | struct nvhost_job *job); | ||
125 | void nvhost_cdma_update(struct nvhost_cdma *cdma); | ||
126 | int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout); | ||
127 | void nvhost_cdma_peek(struct nvhost_cdma *cdma, | ||
128 | u32 dmaget, int slot, u32 *out); | ||
129 | unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma, | ||
130 | enum cdma_event event); | ||
131 | void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma, | ||
132 | struct nvhost_syncpt *syncpt, struct device *dev); | ||
133 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_channel.c b/drivers/video/tegra/host/nvhost_channel.c new file mode 100644 index 00000000000..a7c03308134 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_channel.c | |||
@@ -0,0 +1,158 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_channel.c | ||
3 | * | ||
4 | * Tegra Graphics Host Channel | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_channel.h" | ||
22 | #include "dev.h" | ||
23 | #include "nvhost_job.h" | ||
24 | #include <trace/events/nvhost.h> | ||
25 | #include <linux/nvhost_ioctl.h> | ||
26 | #include <linux/slab.h> | ||
27 | |||
28 | #include <linux/platform_device.h> | ||
29 | |||
30 | #define NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT 50 | ||
31 | |||
32 | int nvhost_channel_init(struct nvhost_channel *ch, | ||
33 | struct nvhost_master *dev, int index) | ||
34 | { | ||
35 | int err; | ||
36 | struct nvhost_device *ndev; | ||
37 | struct resource *r = NULL; | ||
38 | void __iomem *regs = NULL; | ||
39 | struct resource *reg_mem = NULL; | ||
40 | |||
41 | /* Link nvhost_device to nvhost_channel */ | ||
42 | err = host_channel_op(dev).init(ch, dev, index); | ||
43 | if (err < 0) { | ||
44 | dev_err(&dev->dev->dev, "failed to init channel %d\n", | ||
45 | index); | ||
46 | return err; | ||
47 | } | ||
48 | ndev = ch->dev; | ||
49 | ndev->channel = ch; | ||
50 | |||
51 | /* Map IO memory related to nvhost_device */ | ||
52 | if (ndev->moduleid != NVHOST_MODULE_NONE) { | ||
53 | /* First one is host1x - skip that */ | ||
54 | r = nvhost_get_resource(dev->dev, | ||
55 | IORESOURCE_MEM, ndev->moduleid + 1); | ||
56 | if (!r) | ||
57 | goto fail; | ||
58 | |||
59 | reg_mem = request_mem_region(r->start, | ||
60 | resource_size(r), ndev->name); | ||
61 | if (!reg_mem) | ||
62 | goto fail; | ||
63 | |||
64 | regs = ioremap(r->start, resource_size(r)); | ||
65 | if (!regs) | ||
66 | goto fail; | ||
67 | |||
68 | ndev->reg_mem = reg_mem; | ||
69 | ndev->aperture = regs; | ||
70 | } | ||
71 | return 0; | ||
72 | |||
73 | fail: | ||
74 | if (reg_mem) | ||
75 | release_mem_region(r->start, resource_size(r)); | ||
76 | if (regs) | ||
77 | iounmap(regs); | ||
78 | dev_err(&ndev->dev, "failed to get register memory\n"); | ||
79 | return -ENXIO; | ||
80 | |||
81 | } | ||
82 | |||
83 | int nvhost_channel_submit(struct nvhost_job *job) | ||
84 | { | ||
85 | /* Low priority submits wait until sync queue is empty. Ignores result | ||
86 | * from nvhost_cdma_flush, as we submit either when push buffer is | ||
87 | * empty or when we reach the timeout. */ | ||
88 | if (job->priority < NVHOST_PRIORITY_MEDIUM) | ||
89 | (void)nvhost_cdma_flush(&job->ch->cdma, | ||
90 | NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT); | ||
91 | |||
92 | return channel_op(job->ch).submit(job); | ||
93 | } | ||
94 | |||
95 | struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch) | ||
96 | { | ||
97 | int err = 0; | ||
98 | mutex_lock(&ch->reflock); | ||
99 | if (ch->refcount == 0) { | ||
100 | if (ch->dev->init) | ||
101 | ch->dev->init(ch->dev); | ||
102 | err = nvhost_cdma_init(&ch->cdma); | ||
103 | } else if (ch->dev->exclusive) { | ||
104 | err = -EBUSY; | ||
105 | } | ||
106 | if (!err) | ||
107 | ch->refcount++; | ||
108 | |||
109 | mutex_unlock(&ch->reflock); | ||
110 | |||
111 | /* Keep alive modules that needs to be when a channel is open */ | ||
112 | if (!err && ch->dev->keepalive) | ||
113 | nvhost_module_busy(ch->dev); | ||
114 | |||
115 | return err ? NULL : ch; | ||
116 | } | ||
117 | |||
118 | void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx) | ||
119 | { | ||
120 | BUG_ON(!channel_cdma_op(ch).stop); | ||
121 | |||
122 | if (ctx) { | ||
123 | mutex_lock(&ch->submitlock); | ||
124 | if (ch->cur_ctx == ctx) | ||
125 | ch->cur_ctx = NULL; | ||
126 | mutex_unlock(&ch->submitlock); | ||
127 | } | ||
128 | |||
129 | /* Allow keep-alive'd module to be turned off */ | ||
130 | if (ch->dev->keepalive) | ||
131 | nvhost_module_idle(ch->dev); | ||
132 | |||
133 | mutex_lock(&ch->reflock); | ||
134 | if (ch->refcount == 1) { | ||
135 | channel_cdma_op(ch).stop(&ch->cdma); | ||
136 | nvhost_cdma_deinit(&ch->cdma); | ||
137 | nvhost_module_suspend(ch->dev, false); | ||
138 | } | ||
139 | ch->refcount--; | ||
140 | mutex_unlock(&ch->reflock); | ||
141 | } | ||
142 | |||
143 | int nvhost_channel_suspend(struct nvhost_channel *ch) | ||
144 | { | ||
145 | int ret = 0; | ||
146 | |||
147 | mutex_lock(&ch->reflock); | ||
148 | BUG_ON(!channel_cdma_op(ch).stop); | ||
149 | |||
150 | if (ch->refcount) { | ||
151 | ret = nvhost_module_suspend(ch->dev, false); | ||
152 | if (!ret) | ||
153 | channel_cdma_op(ch).stop(&ch->cdma); | ||
154 | } | ||
155 | mutex_unlock(&ch->reflock); | ||
156 | |||
157 | return ret; | ||
158 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_channel.h b/drivers/video/tegra/host/nvhost_channel.h new file mode 100644 index 00000000000..7b946c8ee85 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_channel.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_channel.h | ||
3 | * | ||
4 | * Tegra Graphics Host Channel | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_CHANNEL_H | ||
22 | #define __NVHOST_CHANNEL_H | ||
23 | |||
24 | #include "nvhost_cdma.h" | ||
25 | #include "nvhost_acm.h" | ||
26 | #include "nvhost_hwctx.h" | ||
27 | #include "nvhost_job.h" | ||
28 | |||
29 | #include <linux/cdev.h> | ||
30 | #include <linux/io.h> | ||
31 | |||
32 | #define NVHOST_MAX_WAIT_CHECKS 256 | ||
33 | #define NVHOST_MAX_GATHERS 512 | ||
34 | #define NVHOST_MAX_HANDLES 1280 | ||
35 | #define NVHOST_MAX_POWERGATE_IDS 2 | ||
36 | |||
37 | struct nvhost_master; | ||
38 | struct nvhost_waitchk; | ||
39 | struct nvhost_device; | ||
40 | |||
41 | struct nvhost_channel_gather { | ||
42 | u32 words; | ||
43 | phys_addr_t mem; | ||
44 | u32 mem_id; | ||
45 | int offset; | ||
46 | }; | ||
47 | |||
48 | struct nvhost_channel { | ||
49 | int refcount; | ||
50 | int chid; | ||
51 | u32 syncpt_id; | ||
52 | struct mutex reflock; | ||
53 | struct mutex submitlock; | ||
54 | void __iomem *aperture; | ||
55 | struct nvhost_hwctx *cur_ctx; | ||
56 | struct device *node; | ||
57 | struct nvhost_device *dev; | ||
58 | struct cdev cdev; | ||
59 | struct nvhost_hwctx_handler *ctxhandler; | ||
60 | struct nvhost_cdma cdma; | ||
61 | }; | ||
62 | |||
63 | int nvhost_channel_init( | ||
64 | struct nvhost_channel *ch, | ||
65 | struct nvhost_master *dev, int index); | ||
66 | |||
67 | int nvhost_channel_submit(struct nvhost_job *job); | ||
68 | |||
69 | struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch); | ||
70 | void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx); | ||
71 | int nvhost_channel_suspend(struct nvhost_channel *ch); | ||
72 | |||
73 | #define channel_cdma_op(ch) (nvhost_get_host(ch->dev)->op.cdma) | ||
74 | #define channel_op(ch) (nvhost_get_host(ch->dev)->op.channel) | ||
75 | #define host_channel_op(host) (host->op.channel) | ||
76 | |||
77 | int nvhost_channel_drain_read_fifo(void __iomem *chan_regs, | ||
78 | u32 *ptr, unsigned int count, unsigned int *pending); | ||
79 | |||
80 | int nvhost_channel_read_3d_reg( | ||
81 | struct nvhost_channel *channel, | ||
82 | struct nvhost_hwctx *hwctx, | ||
83 | u32 offset, | ||
84 | u32 *value); | ||
85 | |||
86 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_hwctx.h b/drivers/video/tegra/host/nvhost_hwctx.h new file mode 100644 index 00000000000..02a3976f01c --- /dev/null +++ b/drivers/video/tegra/host/nvhost_hwctx.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_hwctx.h | ||
3 | * | ||
4 | * Tegra Graphics Host Hardware Context Interface | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_HWCTX_H | ||
22 | #define __NVHOST_HWCTX_H | ||
23 | |||
24 | #include <linux/string.h> | ||
25 | #include <linux/kref.h> | ||
26 | |||
27 | #include <linux/nvhost.h> | ||
28 | #include <mach/nvmap.h> | ||
29 | |||
30 | struct nvhost_channel; | ||
31 | struct nvhost_cdma; | ||
32 | |||
33 | struct nvhost_hwctx { | ||
34 | struct kref ref; | ||
35 | struct nvhost_hwctx_handler *h; | ||
36 | struct nvhost_channel *channel; | ||
37 | bool valid; | ||
38 | bool has_timedout; | ||
39 | }; | ||
40 | |||
41 | struct nvhost_hwctx_handler { | ||
42 | struct nvhost_hwctx * (*alloc) (struct nvhost_hwctx_handler *h, | ||
43 | struct nvhost_channel *ch); | ||
44 | void (*get) (struct nvhost_hwctx *ctx); | ||
45 | void (*put) (struct nvhost_hwctx *ctx); | ||
46 | void (*save_push) (struct nvhost_hwctx *ctx, | ||
47 | struct nvhost_cdma *cdma); | ||
48 | void (*save_service) (struct nvhost_hwctx *ctx); | ||
49 | void *priv; | ||
50 | }; | ||
51 | |||
52 | |||
53 | struct hwctx_reginfo { | ||
54 | unsigned int offset:12; | ||
55 | unsigned int count:16; | ||
56 | unsigned int type:2; | ||
57 | }; | ||
58 | |||
59 | enum { | ||
60 | HWCTX_REGINFO_DIRECT = 0, | ||
61 | HWCTX_REGINFO_INDIRECT, | ||
62 | HWCTX_REGINFO_INDIRECT_4X | ||
63 | }; | ||
64 | |||
65 | #define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type} | ||
66 | |||
67 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c new file mode 100644 index 00000000000..7c4bdc7bafb --- /dev/null +++ b/drivers/video/tegra/host/nvhost_intr.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_intr.c | ||
3 | * | ||
4 | * Tegra Graphics Host Interrupt Management | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "nvhost_intr.h" | ||
22 | #include "dev.h" | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <trace/events/nvhost.h> | ||
27 | |||
28 | |||
29 | |||
30 | |||
31 | |||
32 | /*** Wait list management ***/ | ||
33 | |||
34 | struct nvhost_waitlist { | ||
35 | struct list_head list; | ||
36 | struct kref refcount; | ||
37 | u32 thresh; | ||
38 | enum nvhost_intr_action action; | ||
39 | atomic_t state; | ||
40 | void *data; | ||
41 | int count; | ||
42 | }; | ||
43 | |||
44 | enum waitlist_state { | ||
45 | WLS_PENDING, | ||
46 | WLS_REMOVED, | ||
47 | WLS_CANCELLED, | ||
48 | WLS_HANDLED | ||
49 | }; | ||
50 | |||
51 | static void waiter_release(struct kref *kref) | ||
52 | { | ||
53 | kfree(container_of(kref, struct nvhost_waitlist, refcount)); | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * add a waiter to a waiter queue, sorted by threshold | ||
58 | * returns true if it was added at the head of the queue | ||
59 | */ | ||
60 | static bool add_waiter_to_queue(struct nvhost_waitlist *waiter, | ||
61 | struct list_head *queue) | ||
62 | { | ||
63 | struct nvhost_waitlist *pos; | ||
64 | u32 thresh = waiter->thresh; | ||
65 | |||
66 | list_for_each_entry_reverse(pos, queue, list) | ||
67 | if ((s32)(pos->thresh - thresh) <= 0) { | ||
68 | list_add(&waiter->list, &pos->list); | ||
69 | return false; | ||
70 | } | ||
71 | |||
72 | list_add(&waiter->list, queue); | ||
73 | return true; | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * run through a waiter queue for a single sync point ID | ||
78 | * and gather all completed waiters into lists by actions | ||
79 | */ | ||
80 | static void remove_completed_waiters(struct list_head *head, u32 sync, | ||
81 | struct list_head completed[NVHOST_INTR_ACTION_COUNT]) | ||
82 | { | ||
83 | struct list_head *dest; | ||
84 | struct nvhost_waitlist *waiter, *next, *prev; | ||
85 | |||
86 | list_for_each_entry_safe(waiter, next, head, list) { | ||
87 | if ((s32)(waiter->thresh - sync) > 0) | ||
88 | break; | ||
89 | |||
90 | dest = completed + waiter->action; | ||
91 | |||
92 | /* consolidate submit cleanups */ | ||
93 | if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE | ||
94 | && !list_empty(dest)) { | ||
95 | prev = list_entry(dest->prev, | ||
96 | struct nvhost_waitlist, list); | ||
97 | if (prev->data == waiter->data) { | ||
98 | prev->count++; | ||
99 | dest = NULL; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* PENDING->REMOVED or CANCELLED->HANDLED */ | ||
104 | if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) { | ||
105 | list_del(&waiter->list); | ||
106 | kref_put(&waiter->refcount, waiter_release); | ||
107 | } else { | ||
108 | list_move_tail(&waiter->list, dest); | ||
109 | } | ||
110 | } | ||
111 | } | ||
112 | |||
113 | void reset_threshold_interrupt(struct nvhost_intr *intr, | ||
114 | struct list_head *head, | ||
115 | unsigned int id) | ||
116 | { | ||
117 | u32 thresh = list_first_entry(head, | ||
118 | struct nvhost_waitlist, list)->thresh; | ||
119 | BUG_ON(!(intr_op(intr).set_syncpt_threshold && | ||
120 | intr_op(intr).enable_syncpt_intr)); | ||
121 | |||
122 | intr_op(intr).set_syncpt_threshold(intr, id, thresh); | ||
123 | intr_op(intr).enable_syncpt_intr(intr, id); | ||
124 | } | ||
125 | |||
126 | |||
127 | static void action_submit_complete(struct nvhost_waitlist *waiter) | ||
128 | { | ||
129 | struct nvhost_channel *channel = waiter->data; | ||
130 | int nr_completed = waiter->count; | ||
131 | |||
132 | /* Add nr_completed to trace */ | ||
133 | trace_nvhost_channel_submit_complete(channel->dev->name, | ||
134 | nr_completed, waiter->thresh); | ||
135 | |||
136 | nvhost_cdma_update(&channel->cdma); | ||
137 | nvhost_module_idle_mult(channel->dev, nr_completed); | ||
138 | } | ||
139 | |||
140 | static void action_ctxsave(struct nvhost_waitlist *waiter) | ||
141 | { | ||
142 | struct nvhost_hwctx *hwctx = waiter->data; | ||
143 | struct nvhost_channel *channel = hwctx->channel; | ||
144 | |||
145 | if (channel->ctxhandler->save_service) | ||
146 | channel->ctxhandler->save_service(hwctx); | ||
147 | } | ||
148 | |||
149 | static void action_wakeup(struct nvhost_waitlist *waiter) | ||
150 | { | ||
151 | wait_queue_head_t *wq = waiter->data; | ||
152 | |||
153 | wake_up(wq); | ||
154 | } | ||
155 | |||
156 | static void action_wakeup_interruptible(struct nvhost_waitlist *waiter) | ||
157 | { | ||
158 | wait_queue_head_t *wq = waiter->data; | ||
159 | |||
160 | wake_up_interruptible(wq); | ||
161 | } | ||
162 | |||
163 | typedef void (*action_handler)(struct nvhost_waitlist *waiter); | ||
164 | |||
165 | static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = { | ||
166 | action_submit_complete, | ||
167 | action_ctxsave, | ||
168 | action_wakeup, | ||
169 | action_wakeup_interruptible, | ||
170 | }; | ||
171 | |||
172 | static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT]) | ||
173 | { | ||
174 | struct list_head *head = completed; | ||
175 | int i; | ||
176 | |||
177 | for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) { | ||
178 | action_handler handler = action_handlers[i]; | ||
179 | struct nvhost_waitlist *waiter, *next; | ||
180 | |||
181 | list_for_each_entry_safe(waiter, next, head, list) { | ||
182 | list_del(&waiter->list); | ||
183 | handler(waiter); | ||
184 | WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED); | ||
185 | kref_put(&waiter->refcount, waiter_release); | ||
186 | } | ||
187 | } | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * Remove & handle all waiters that have completed for the given syncpt | ||
192 | */ | ||
193 | static int process_wait_list(struct nvhost_intr *intr, | ||
194 | struct nvhost_intr_syncpt *syncpt, | ||
195 | u32 threshold) | ||
196 | { | ||
197 | struct list_head completed[NVHOST_INTR_ACTION_COUNT]; | ||
198 | unsigned int i; | ||
199 | int empty; | ||
200 | |||
201 | for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i) | ||
202 | INIT_LIST_HEAD(completed + i); | ||
203 | |||
204 | spin_lock(&syncpt->lock); | ||
205 | |||
206 | remove_completed_waiters(&syncpt->wait_head, threshold, completed); | ||
207 | |||
208 | empty = list_empty(&syncpt->wait_head); | ||
209 | if (!empty) | ||
210 | reset_threshold_interrupt(intr, &syncpt->wait_head, | ||
211 | syncpt->id); | ||
212 | |||
213 | spin_unlock(&syncpt->lock); | ||
214 | |||
215 | run_handlers(completed); | ||
216 | |||
217 | return empty; | ||
218 | } | ||
219 | |||
220 | /*** host syncpt interrupt service functions ***/ | ||
221 | /** | ||
222 | * Sync point threshold interrupt service thread function | ||
223 | * Handles sync point threshold triggers, in thread context | ||
224 | */ | ||
225 | irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id) | ||
226 | { | ||
227 | struct nvhost_intr_syncpt *syncpt = dev_id; | ||
228 | unsigned int id = syncpt->id; | ||
229 | struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt); | ||
230 | struct nvhost_master *dev = intr_to_dev(intr); | ||
231 | |||
232 | (void)process_wait_list(intr, syncpt, | ||
233 | nvhost_syncpt_update_min(&dev->syncpt, id)); | ||
234 | |||
235 | return IRQ_HANDLED; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * free a syncpt's irq. syncpt interrupt should be disabled first. | ||
240 | */ | ||
241 | static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt) | ||
242 | { | ||
243 | if (syncpt->irq_requested) { | ||
244 | free_irq(syncpt->irq, syncpt); | ||
245 | syncpt->irq_requested = 0; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | |||
250 | /*** host general interrupt service functions ***/ | ||
251 | |||
252 | |||
253 | /*** Main API ***/ | ||
254 | |||
255 | int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh, | ||
256 | enum nvhost_intr_action action, void *data, | ||
257 | void *_waiter, | ||
258 | void **ref) | ||
259 | { | ||
260 | struct nvhost_waitlist *waiter = _waiter; | ||
261 | struct nvhost_intr_syncpt *syncpt; | ||
262 | int queue_was_empty; | ||
263 | int err; | ||
264 | |||
265 | BUG_ON(waiter == NULL); | ||
266 | |||
267 | BUG_ON(!(intr_op(intr).set_syncpt_threshold && | ||
268 | intr_op(intr).enable_syncpt_intr)); | ||
269 | |||
270 | /* initialize a new waiter */ | ||
271 | INIT_LIST_HEAD(&waiter->list); | ||
272 | kref_init(&waiter->refcount); | ||
273 | if (ref) | ||
274 | kref_get(&waiter->refcount); | ||
275 | waiter->thresh = thresh; | ||
276 | waiter->action = action; | ||
277 | atomic_set(&waiter->state, WLS_PENDING); | ||
278 | waiter->data = data; | ||
279 | waiter->count = 1; | ||
280 | |||
281 | BUG_ON(id >= intr_to_dev(intr)->syncpt.nb_pts); | ||
282 | syncpt = intr->syncpt + id; | ||
283 | |||
284 | spin_lock(&syncpt->lock); | ||
285 | |||
286 | /* lazily request irq for this sync point */ | ||
287 | if (!syncpt->irq_requested) { | ||
288 | spin_unlock(&syncpt->lock); | ||
289 | |||
290 | mutex_lock(&intr->mutex); | ||
291 | BUG_ON(!(intr_op(intr).request_syncpt_irq)); | ||
292 | err = intr_op(intr).request_syncpt_irq(syncpt); | ||
293 | mutex_unlock(&intr->mutex); | ||
294 | |||
295 | if (err) { | ||
296 | kfree(waiter); | ||
297 | return err; | ||
298 | } | ||
299 | |||
300 | spin_lock(&syncpt->lock); | ||
301 | } | ||
302 | |||
303 | queue_was_empty = list_empty(&syncpt->wait_head); | ||
304 | |||
305 | if (add_waiter_to_queue(waiter, &syncpt->wait_head)) { | ||
306 | /* added at head of list - new threshold value */ | ||
307 | intr_op(intr).set_syncpt_threshold(intr, id, thresh); | ||
308 | |||
309 | /* added as first waiter - enable interrupt */ | ||
310 | if (queue_was_empty) | ||
311 | intr_op(intr).enable_syncpt_intr(intr, id); | ||
312 | } | ||
313 | |||
314 | spin_unlock(&syncpt->lock); | ||
315 | |||
316 | if (ref) | ||
317 | *ref = waiter; | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | void *nvhost_intr_alloc_waiter() | ||
322 | { | ||
323 | return kzalloc(sizeof(struct nvhost_waitlist), | ||
324 | GFP_KERNEL|__GFP_REPEAT); | ||
325 | } | ||
326 | |||
327 | void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref) | ||
328 | { | ||
329 | struct nvhost_waitlist *waiter = ref; | ||
330 | |||
331 | while (atomic_cmpxchg(&waiter->state, | ||
332 | WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED) | ||
333 | schedule(); | ||
334 | |||
335 | kref_put(&waiter->refcount, waiter_release); | ||
336 | } | ||
337 | |||
338 | |||
339 | /*** Init & shutdown ***/ | ||
340 | |||
341 | int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync) | ||
342 | { | ||
343 | unsigned int id; | ||
344 | struct nvhost_intr_syncpt *syncpt; | ||
345 | struct nvhost_master *host = | ||
346 | container_of(intr, struct nvhost_master, intr); | ||
347 | u32 nb_pts = host->syncpt.nb_pts; | ||
348 | |||
349 | mutex_init(&intr->mutex); | ||
350 | intr->host_general_irq = irq_gen; | ||
351 | intr->host_general_irq_requested = false; | ||
352 | |||
353 | for (id = 0, syncpt = intr->syncpt; | ||
354 | id < nb_pts; | ||
355 | ++id, ++syncpt) { | ||
356 | syncpt->intr = &host->intr; | ||
357 | syncpt->id = id; | ||
358 | syncpt->irq = irq_sync + id; | ||
359 | syncpt->irq_requested = 0; | ||
360 | spin_lock_init(&syncpt->lock); | ||
361 | INIT_LIST_HEAD(&syncpt->wait_head); | ||
362 | snprintf(syncpt->thresh_irq_name, | ||
363 | sizeof(syncpt->thresh_irq_name), | ||
364 | "host_sp_%02d", id); | ||
365 | } | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | void nvhost_intr_deinit(struct nvhost_intr *intr) | ||
371 | { | ||
372 | nvhost_intr_stop(intr); | ||
373 | } | ||
374 | |||
375 | void nvhost_intr_start(struct nvhost_intr *intr, u32 hz) | ||
376 | { | ||
377 | BUG_ON(!(intr_op(intr).init_host_sync && | ||
378 | intr_op(intr).set_host_clocks_per_usec && | ||
379 | intr_op(intr).request_host_general_irq)); | ||
380 | |||
381 | mutex_lock(&intr->mutex); | ||
382 | |||
383 | intr_op(intr).init_host_sync(intr); | ||
384 | intr_op(intr).set_host_clocks_per_usec(intr, | ||
385 | (hz + 1000000 - 1)/1000000); | ||
386 | |||
387 | intr_op(intr).request_host_general_irq(intr); | ||
388 | |||
389 | mutex_unlock(&intr->mutex); | ||
390 | } | ||
391 | |||
392 | void nvhost_intr_stop(struct nvhost_intr *intr) | ||
393 | { | ||
394 | unsigned int id; | ||
395 | struct nvhost_intr_syncpt *syncpt; | ||
396 | u32 nb_pts = intr_to_dev(intr)->syncpt.nb_pts; | ||
397 | |||
398 | BUG_ON(!(intr_op(intr).disable_all_syncpt_intrs && | ||
399 | intr_op(intr).free_host_general_irq)); | ||
400 | |||
401 | mutex_lock(&intr->mutex); | ||
402 | |||
403 | intr_op(intr).disable_all_syncpt_intrs(intr); | ||
404 | |||
405 | for (id = 0, syncpt = intr->syncpt; | ||
406 | id < nb_pts; | ||
407 | ++id, ++syncpt) { | ||
408 | struct nvhost_waitlist *waiter, *next; | ||
409 | list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) { | ||
410 | if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) | ||
411 | == WLS_CANCELLED) { | ||
412 | list_del(&waiter->list); | ||
413 | kref_put(&waiter->refcount, waiter_release); | ||
414 | } | ||
415 | } | ||
416 | |||
417 | if (!list_empty(&syncpt->wait_head)) { /* output diagnostics */ | ||
418 | printk(KERN_DEBUG "%s id=%d\n", __func__, id); | ||
419 | BUG_ON(1); | ||
420 | } | ||
421 | |||
422 | free_syncpt_irq(syncpt); | ||
423 | } | ||
424 | |||
425 | intr_op(intr).free_host_general_irq(intr); | ||
426 | |||
427 | mutex_unlock(&intr->mutex); | ||
428 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_intr.h b/drivers/video/tegra/host/nvhost_intr.h new file mode 100644 index 00000000000..26ab04ebd4a --- /dev/null +++ b/drivers/video/tegra/host/nvhost_intr.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_intr.h | ||
3 | * | ||
4 | * Tegra Graphics Host Interrupt Management | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_INTR_H | ||
22 | #define __NVHOST_INTR_H | ||
23 | |||
24 | #include <linux/kthread.h> | ||
25 | #include <linux/semaphore.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | |||
28 | struct nvhost_channel; | ||
29 | |||
30 | enum nvhost_intr_action { | ||
31 | /** | ||
32 | * Perform cleanup after a submit has completed. | ||
33 | * 'data' points to a channel | ||
34 | */ | ||
35 | NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0, | ||
36 | |||
37 | /** | ||
38 | * Save a HW context. | ||
39 | * 'data' points to a context | ||
40 | */ | ||
41 | NVHOST_INTR_ACTION_CTXSAVE, | ||
42 | |||
43 | /** | ||
44 | * Wake up a task. | ||
45 | * 'data' points to a wait_queue_head_t | ||
46 | */ | ||
47 | NVHOST_INTR_ACTION_WAKEUP, | ||
48 | |||
49 | /** | ||
50 | * Wake up a interruptible task. | ||
51 | * 'data' points to a wait_queue_head_t | ||
52 | */ | ||
53 | NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, | ||
54 | |||
55 | NVHOST_INTR_ACTION_COUNT | ||
56 | }; | ||
57 | |||
58 | struct nvhost_intr; | ||
59 | |||
60 | struct nvhost_intr_syncpt { | ||
61 | struct nvhost_intr *intr; | ||
62 | u8 id; | ||
63 | u8 irq_requested; | ||
64 | u16 irq; | ||
65 | spinlock_t lock; | ||
66 | struct list_head wait_head; | ||
67 | char thresh_irq_name[12]; | ||
68 | }; | ||
69 | |||
70 | struct nvhost_intr { | ||
71 | struct nvhost_intr_syncpt *syncpt; | ||
72 | struct mutex mutex; | ||
73 | int host_general_irq; | ||
74 | bool host_general_irq_requested; | ||
75 | }; | ||
76 | #define intr_to_dev(x) container_of(x, struct nvhost_master, intr) | ||
77 | #define intr_op(intr) (intr_to_dev(intr)->op.intr) | ||
78 | #define intr_syncpt_to_intr(is) (is->intr) | ||
79 | |||
80 | /** | ||
81 | * Schedule an action to be taken when a sync point reaches the given threshold. | ||
82 | * | ||
83 | * @id the sync point | ||
84 | * @thresh the threshold | ||
85 | * @action the action to take | ||
86 | * @data a pointer to extra data depending on action, see above | ||
87 | * @waiter waiter allocated with nvhost_intr_alloc_waiter - assumes ownership | ||
88 | * @ref must be passed if cancellation is possible, else NULL | ||
89 | * | ||
90 | * This is a non-blocking api. | ||
91 | */ | ||
92 | int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh, | ||
93 | enum nvhost_intr_action action, void *data, | ||
94 | void *waiter, | ||
95 | void **ref); | ||
96 | |||
97 | /** | ||
98 | * Allocate a waiter. | ||
99 | */ | ||
100 | void *nvhost_intr_alloc_waiter(void); | ||
101 | |||
102 | /** | ||
103 | * Unreference an action submitted to nvhost_intr_add_action(). | ||
104 | * You must call this if you passed non-NULL as ref. | ||
105 | * @ref the ref returned from nvhost_intr_add_action() | ||
106 | */ | ||
107 | void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref); | ||
108 | |||
109 | int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync); | ||
110 | void nvhost_intr_deinit(struct nvhost_intr *intr); | ||
111 | void nvhost_intr_start(struct nvhost_intr *intr, u32 hz); | ||
112 | void nvhost_intr_stop(struct nvhost_intr *intr); | ||
113 | |||
114 | irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id); | ||
115 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_job.c b/drivers/video/tegra/host/nvhost_job.c new file mode 100644 index 00000000000..df7a62d689b --- /dev/null +++ b/drivers/video/tegra/host/nvhost_job.c | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_job.c | ||
3 | * | ||
4 | * Tegra Graphics Host Job | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include <linux/kref.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <mach/nvmap.h> | ||
26 | #include "nvhost_channel.h" | ||
27 | #include "nvhost_job.h" | ||
28 | #include "dev.h" | ||
29 | |||
30 | /* Magic to use to fill freed handle slots */ | ||
31 | #define BAD_MAGIC 0xdeadbeef | ||
32 | |||
33 | static int job_size(struct nvhost_submit_hdr_ext *hdr) | ||
34 | { | ||
35 | int num_pins = hdr ? (hdr->num_relocs + hdr->num_cmdbufs)*2 : 0; | ||
36 | int num_waitchks = hdr ? hdr->num_waitchks : 0; | ||
37 | |||
38 | return sizeof(struct nvhost_job) | ||
39 | + num_pins * sizeof(struct nvmap_pinarray_elem) | ||
40 | + num_pins * sizeof(struct nvmap_handle *) | ||
41 | + num_waitchks * sizeof(struct nvhost_waitchk); | ||
42 | } | ||
43 | |||
44 | static int gather_size(int num_cmdbufs) | ||
45 | { | ||
46 | return num_cmdbufs * sizeof(struct nvhost_channel_gather); | ||
47 | } | ||
48 | |||
49 | static void free_gathers(struct nvhost_job *job) | ||
50 | { | ||
51 | if (job->gathers) { | ||
52 | nvmap_munmap(job->gather_mem, job->gathers); | ||
53 | job->gathers = NULL; | ||
54 | } | ||
55 | if (job->gather_mem) { | ||
56 | nvmap_free(job->nvmap, job->gather_mem); | ||
57 | job->gather_mem = NULL; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | static int alloc_gathers(struct nvhost_job *job, | ||
62 | int num_cmdbufs) | ||
63 | { | ||
64 | int err = 0; | ||
65 | |||
66 | job->gather_mem = NULL; | ||
67 | job->gathers = NULL; | ||
68 | job->gather_mem_size = 0; | ||
69 | |||
70 | if (num_cmdbufs) { | ||
71 | /* Allocate memory */ | ||
72 | job->gather_mem = nvmap_alloc(job->nvmap, | ||
73 | gather_size(num_cmdbufs), | ||
74 | 32, NVMAP_HANDLE_CACHEABLE, 0); | ||
75 | if (IS_ERR_OR_NULL(job->gather_mem)) { | ||
76 | err = PTR_ERR(job->gather_mem); | ||
77 | job->gather_mem = NULL; | ||
78 | goto error; | ||
79 | } | ||
80 | job->gather_mem_size = gather_size(num_cmdbufs); | ||
81 | |||
82 | /* Map memory to kernel */ | ||
83 | job->gathers = nvmap_mmap(job->gather_mem); | ||
84 | if (IS_ERR_OR_NULL(job->gathers)) { | ||
85 | err = PTR_ERR(job->gathers); | ||
86 | job->gathers = NULL; | ||
87 | goto error; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | return 0; | ||
92 | |||
93 | error: | ||
94 | free_gathers(job); | ||
95 | return err; | ||
96 | } | ||
97 | |||
98 | static int realloc_gathers(struct nvhost_job *oldjob, | ||
99 | struct nvhost_job *newjob, | ||
100 | int num_cmdbufs) | ||
101 | { | ||
102 | int err = 0; | ||
103 | |||
104 | /* Check if we can reuse gather buffer */ | ||
105 | if (oldjob->gather_mem_size < gather_size(num_cmdbufs) | ||
106 | || oldjob->nvmap != newjob->nvmap) { | ||
107 | free_gathers(oldjob); | ||
108 | err = alloc_gathers(newjob, num_cmdbufs); | ||
109 | } else { | ||
110 | newjob->gather_mem = oldjob->gather_mem; | ||
111 | newjob->gathers = oldjob->gathers; | ||
112 | newjob->gather_mem_size = oldjob->gather_mem_size; | ||
113 | |||
114 | oldjob->gather_mem = NULL; | ||
115 | oldjob->gathers = NULL; | ||
116 | oldjob->gather_mem_size = 0; | ||
117 | } | ||
118 | return err; | ||
119 | } | ||
120 | |||
121 | static void init_fields(struct nvhost_job *job, | ||
122 | struct nvhost_submit_hdr_ext *hdr, | ||
123 | int priority, int clientid) | ||
124 | { | ||
125 | int num_pins = hdr ? (hdr->num_relocs + hdr->num_cmdbufs)*2 : 0; | ||
126 | int num_waitchks = hdr ? hdr->num_waitchks : 0; | ||
127 | void *mem = job; | ||
128 | |||
129 | /* First init state to zero */ | ||
130 | job->num_gathers = 0; | ||
131 | job->num_pins = 0; | ||
132 | job->num_unpins = 0; | ||
133 | job->num_waitchk = 0; | ||
134 | job->waitchk_mask = 0; | ||
135 | job->syncpt_id = 0; | ||
136 | job->syncpt_incrs = 0; | ||
137 | job->syncpt_end = 0; | ||
138 | job->priority = priority; | ||
139 | job->clientid = clientid; | ||
140 | job->null_kickoff = false; | ||
141 | job->first_get = 0; | ||
142 | job->num_slots = 0; | ||
143 | |||
144 | /* Redistribute memory to the structs */ | ||
145 | mem += sizeof(struct nvhost_job); | ||
146 | if (num_pins) { | ||
147 | job->pinarray = mem; | ||
148 | mem += num_pins * sizeof(struct nvmap_pinarray_elem); | ||
149 | job->unpins = mem; | ||
150 | mem += num_pins * sizeof(struct nvmap_handle *); | ||
151 | } else { | ||
152 | job->pinarray = NULL; | ||
153 | job->unpins = NULL; | ||
154 | } | ||
155 | |||
156 | job->waitchk = num_waitchks ? mem : NULL; | ||
157 | |||
158 | /* Copy information from header */ | ||
159 | if (hdr) { | ||
160 | job->waitchk_mask = hdr->waitchk_mask; | ||
161 | job->syncpt_id = hdr->syncpt_id; | ||
162 | job->syncpt_incrs = hdr->syncpt_incrs; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch, | ||
167 | struct nvhost_hwctx *hwctx, | ||
168 | struct nvhost_submit_hdr_ext *hdr, | ||
169 | struct nvmap_client *nvmap, | ||
170 | int priority, | ||
171 | int clientid) | ||
172 | { | ||
173 | struct nvhost_job *job = NULL; | ||
174 | int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0; | ||
175 | int err = 0; | ||
176 | |||
177 | job = vzalloc(job_size(hdr)); | ||
178 | if (!job) | ||
179 | goto error; | ||
180 | |||
181 | kref_init(&job->ref); | ||
182 | job->ch = ch; | ||
183 | job->hwctx = hwctx; | ||
184 | if (hwctx) | ||
185 | hwctx->h->get(hwctx); | ||
186 | job->nvmap = nvmap ? nvmap_client_get(nvmap) : NULL; | ||
187 | |||
188 | err = alloc_gathers(job, num_cmdbufs); | ||
189 | if (err) | ||
190 | goto error; | ||
191 | |||
192 | init_fields(job, hdr, priority, clientid); | ||
193 | |||
194 | return job; | ||
195 | |||
196 | error: | ||
197 | if (job) | ||
198 | nvhost_job_put(job); | ||
199 | return NULL; | ||
200 | } | ||
201 | |||
202 | struct nvhost_job *nvhost_job_realloc( | ||
203 | struct nvhost_job *oldjob, | ||
204 | struct nvhost_hwctx *hwctx, | ||
205 | struct nvhost_submit_hdr_ext *hdr, | ||
206 | struct nvmap_client *nvmap, | ||
207 | int priority, int clientid) | ||
208 | { | ||
209 | struct nvhost_job *newjob = NULL; | ||
210 | int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0; | ||
211 | int err = 0; | ||
212 | |||
213 | newjob = vzalloc(job_size(hdr)); | ||
214 | if (!newjob) | ||
215 | goto error; | ||
216 | kref_init(&newjob->ref); | ||
217 | newjob->ch = oldjob->ch; | ||
218 | newjob->hwctx = hwctx; | ||
219 | if (hwctx) | ||
220 | newjob->hwctx->h->get(newjob->hwctx); | ||
221 | newjob->timeout = oldjob->timeout; | ||
222 | newjob->nvmap = nvmap ? nvmap_client_get(nvmap) : NULL; | ||
223 | |||
224 | err = realloc_gathers(oldjob, newjob, num_cmdbufs); | ||
225 | if (err) | ||
226 | goto error; | ||
227 | |||
228 | nvhost_job_put(oldjob); | ||
229 | |||
230 | init_fields(newjob, hdr, priority, clientid); | ||
231 | |||
232 | return newjob; | ||
233 | |||
234 | error: | ||
235 | if (newjob) | ||
236 | nvhost_job_put(newjob); | ||
237 | if (oldjob) | ||
238 | nvhost_job_put(oldjob); | ||
239 | return NULL; | ||
240 | } | ||
241 | |||
242 | void nvhost_job_get(struct nvhost_job *job) | ||
243 | { | ||
244 | kref_get(&job->ref); | ||
245 | } | ||
246 | |||
247 | static void job_free(struct kref *ref) | ||
248 | { | ||
249 | struct nvhost_job *job = container_of(ref, struct nvhost_job, ref); | ||
250 | |||
251 | if (job->hwctxref) | ||
252 | job->hwctxref->h->put(job->hwctxref); | ||
253 | if (job->hwctx) | ||
254 | job->hwctx->h->put(job->hwctx); | ||
255 | if (job->gathers) | ||
256 | nvmap_munmap(job->gather_mem, job->gathers); | ||
257 | if (job->gather_mem) | ||
258 | nvmap_free(job->nvmap, job->gather_mem); | ||
259 | if (job->nvmap) | ||
260 | nvmap_client_put(job->nvmap); | ||
261 | vfree(job); | ||
262 | } | ||
263 | |||
264 | /* Acquire reference to a hardware context. Used for keeping saved contexts in | ||
265 | * memory. */ | ||
266 | void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx) | ||
267 | { | ||
268 | BUG_ON(job->hwctxref); | ||
269 | |||
270 | job->hwctxref = hwctx; | ||
271 | hwctx->h->get(hwctx); | ||
272 | } | ||
273 | |||
274 | void nvhost_job_put(struct nvhost_job *job) | ||
275 | { | ||
276 | kref_put(&job->ref, job_free); | ||
277 | } | ||
278 | |||
279 | void nvhost_job_add_gather(struct nvhost_job *job, | ||
280 | u32 mem_id, u32 words, u32 offset) | ||
281 | { | ||
282 | struct nvmap_pinarray_elem *pin; | ||
283 | struct nvhost_channel_gather *cur_gather = | ||
284 | &job->gathers[job->num_gathers]; | ||
285 | |||
286 | pin = &job->pinarray[job->num_pins++]; | ||
287 | pin->patch_mem = (u32)nvmap_ref_to_handle(job->gather_mem); | ||
288 | pin->patch_offset = (void *)&(cur_gather->mem) - (void *)job->gathers; | ||
289 | pin->pin_mem = nvmap_convert_handle_u2k(mem_id); | ||
290 | pin->pin_offset = offset; | ||
291 | cur_gather->words = words; | ||
292 | cur_gather->mem_id = mem_id; | ||
293 | cur_gather->offset = offset; | ||
294 | job->num_gathers += 1; | ||
295 | } | ||
296 | |||
297 | int nvhost_job_pin(struct nvhost_job *job) | ||
298 | { | ||
299 | int err = 0; | ||
300 | |||
301 | /* pin mem handles and patch physical addresses */ | ||
302 | job->num_unpins = nvmap_pin_array(job->nvmap, | ||
303 | nvmap_ref_to_handle(job->gather_mem), | ||
304 | job->pinarray, job->num_pins, | ||
305 | job->unpins); | ||
306 | if (job->num_unpins < 0) | ||
307 | err = job->num_unpins; | ||
308 | |||
309 | return err; | ||
310 | } | ||
311 | |||
312 | void nvhost_job_unpin(struct nvhost_job *job) | ||
313 | { | ||
314 | nvmap_unpin_handles(job->nvmap, job->unpins, | ||
315 | job->num_unpins); | ||
316 | memset(job->unpins, BAD_MAGIC, | ||
317 | job->num_unpins * sizeof(struct nvmap_handle *)); | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * Debug routine used to dump job entries | ||
322 | */ | ||
323 | void nvhost_job_dump(struct device *dev, struct nvhost_job *job) | ||
324 | { | ||
325 | dev_dbg(dev, " SYNCPT_ID %d\n", | ||
326 | job->syncpt_id); | ||
327 | dev_dbg(dev, " SYNCPT_VAL %d\n", | ||
328 | job->syncpt_end); | ||
329 | dev_dbg(dev, " FIRST_GET 0x%x\n", | ||
330 | job->first_get); | ||
331 | dev_dbg(dev, " TIMEOUT %d\n", | ||
332 | job->timeout); | ||
333 | dev_dbg(dev, " CTX 0x%p\n", | ||
334 | job->hwctx); | ||
335 | dev_dbg(dev, " NUM_SLOTS %d\n", | ||
336 | job->num_slots); | ||
337 | dev_dbg(dev, " NUM_HANDLES %d\n", | ||
338 | job->num_unpins); | ||
339 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_job.h b/drivers/video/tegra/host/nvhost_job.h new file mode 100644 index 00000000000..ad9d1af60da --- /dev/null +++ b/drivers/video/tegra/host/nvhost_job.h | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_job.h | ||
3 | * | ||
4 | * Tegra Graphics Host Interrupt Management | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_JOB_H | ||
22 | #define __NVHOST_JOB_H | ||
23 | |||
24 | #include <linux/nvhost_ioctl.h> | ||
25 | |||
26 | struct nvhost_channel; | ||
27 | struct nvhost_hwctx; | ||
28 | struct nvmap_client; | ||
29 | struct nvhost_waitchk; | ||
30 | struct nvmap_handle; | ||
31 | |||
32 | /* | ||
33 | * Each submit is tracked as a nvhost_job. | ||
34 | */ | ||
35 | struct nvhost_job { | ||
36 | /* When refcount goes to zero, job can be freed */ | ||
37 | struct kref ref; | ||
38 | |||
39 | /* List entry */ | ||
40 | struct list_head list; | ||
41 | |||
42 | /* Channel where job is submitted to */ | ||
43 | struct nvhost_channel *ch; | ||
44 | |||
45 | /* Hardware context valid for this client */ | ||
46 | struct nvhost_hwctx *hwctx; | ||
47 | int clientid; | ||
48 | |||
49 | /* Nvmap to be used for pinning & unpinning memory */ | ||
50 | struct nvmap_client *nvmap; | ||
51 | |||
52 | /* Gathers and their memory */ | ||
53 | struct nvmap_handle_ref *gather_mem; | ||
54 | struct nvhost_channel_gather *gathers; | ||
55 | int num_gathers; | ||
56 | int gather_mem_size; | ||
57 | |||
58 | /* Wait checks to be processed at submit time */ | ||
59 | struct nvhost_waitchk *waitchk; | ||
60 | int num_waitchk; | ||
61 | u32 waitchk_mask; | ||
62 | |||
63 | /* Array of handles to be pinned & unpinned */ | ||
64 | struct nvmap_pinarray_elem *pinarray; | ||
65 | int num_pins; | ||
66 | struct nvmap_handle **unpins; | ||
67 | int num_unpins; | ||
68 | |||
69 | /* Sync point id, number of increments and end related to the submit */ | ||
70 | u32 syncpt_id; | ||
71 | u32 syncpt_incrs; | ||
72 | u32 syncpt_end; | ||
73 | |||
74 | /* Priority of this submit. */ | ||
75 | int priority; | ||
76 | |||
77 | /* Maximum time to wait for this job */ | ||
78 | int timeout; | ||
79 | |||
80 | /* Null kickoff prevents submit from being sent to hardware */ | ||
81 | bool null_kickoff; | ||
82 | |||
83 | /* Index and number of slots used in the push buffer */ | ||
84 | int first_get; | ||
85 | int num_slots; | ||
86 | |||
87 | /* Context to be freed */ | ||
88 | struct nvhost_hwctx *hwctxref; | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * Allocate memory for a job. Just enough memory will be allocated to | ||
93 | * accomodate the submit announced in submit header. | ||
94 | */ | ||
95 | struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch, | ||
96 | struct nvhost_hwctx *hwctx, | ||
97 | struct nvhost_submit_hdr_ext *hdr, | ||
98 | struct nvmap_client *nvmap, | ||
99 | int priority, int clientid); | ||
100 | |||
101 | /* | ||
102 | * Allocate memory for a job. Just enough memory will be allocated to | ||
103 | * accomodate the submit announced in submit header. Gather memory from | ||
104 | * oldjob will be reused, and nvhost_job_put() will be called to it. | ||
105 | */ | ||
106 | struct nvhost_job *nvhost_job_realloc(struct nvhost_job *oldjob, | ||
107 | struct nvhost_hwctx *hwctx, | ||
108 | struct nvhost_submit_hdr_ext *hdr, | ||
109 | struct nvmap_client *nvmap, | ||
110 | int priority, int clientid); | ||
111 | |||
112 | /* | ||
113 | * Add a gather to a job. | ||
114 | */ | ||
115 | void nvhost_job_add_gather(struct nvhost_job *job, | ||
116 | u32 mem_id, u32 words, u32 offset); | ||
117 | |||
118 | /* | ||
119 | * Increment reference going to nvhost_job. | ||
120 | */ | ||
121 | void nvhost_job_get(struct nvhost_job *job); | ||
122 | |||
123 | /* | ||
124 | * Increment reference for a hardware context. | ||
125 | */ | ||
126 | void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx); | ||
127 | |||
128 | /* | ||
129 | * Decrement reference job, free if goes to zero. | ||
130 | */ | ||
131 | void nvhost_job_put(struct nvhost_job *job); | ||
132 | |||
133 | /* | ||
134 | * Pin memory related to job. This handles relocation of addresses to the | ||
135 | * host1x address space. Handles both the gather memory and any other memory | ||
136 | * referred to from the gather buffers. | ||
137 | */ | ||
138 | int nvhost_job_pin(struct nvhost_job *job); | ||
139 | |||
140 | /* | ||
141 | * Unpin memory related to job. | ||
142 | */ | ||
143 | void nvhost_job_unpin(struct nvhost_job *job); | ||
144 | |||
145 | /* | ||
146 | * Dump contents of job to debug output. | ||
147 | */ | ||
148 | void nvhost_job_dump(struct device *dev, struct nvhost_job *job); | ||
149 | |||
150 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c new file mode 100644 index 00000000000..eb5176ea1bf --- /dev/null +++ b/drivers/video/tegra/host/nvhost_syncpt.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_syncpt.c | ||
3 | * | ||
4 | * Tegra Graphics Host Syncpoints | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/nvhost_ioctl.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include "nvhost_syncpt.h" | ||
24 | #include "dev.h" | ||
25 | |||
26 | #define MAX_STUCK_CHECK_COUNT 15 | ||
27 | |||
28 | /** | ||
29 | * Resets syncpoint and waitbase values to sw shadows | ||
30 | */ | ||
31 | void nvhost_syncpt_reset(struct nvhost_syncpt *sp) | ||
32 | { | ||
33 | u32 i; | ||
34 | BUG_ON(!(syncpt_op(sp).reset && syncpt_op(sp).reset_wait_base)); | ||
35 | |||
36 | for (i = 0; i < sp->nb_pts; i++) | ||
37 | syncpt_op(sp).reset(sp, i); | ||
38 | for (i = 0; i < sp->nb_bases; i++) | ||
39 | syncpt_op(sp).reset_wait_base(sp, i); | ||
40 | wmb(); | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * Updates sw shadow state for client managed registers | ||
45 | */ | ||
46 | void nvhost_syncpt_save(struct nvhost_syncpt *sp) | ||
47 | { | ||
48 | u32 i; | ||
49 | BUG_ON(!(syncpt_op(sp).update_min && syncpt_op(sp).read_wait_base)); | ||
50 | |||
51 | for (i = 0; i < sp->nb_pts; i++) { | ||
52 | if (client_managed(i)) | ||
53 | syncpt_op(sp).update_min(sp, i); | ||
54 | else | ||
55 | BUG_ON(!nvhost_syncpt_min_eq_max(sp, i)); | ||
56 | } | ||
57 | |||
58 | for (i = 0; i < sp->nb_bases; i++) | ||
59 | syncpt_op(sp).read_wait_base(sp, i); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * Updates the last value read from hardware. | ||
64 | */ | ||
65 | u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) | ||
66 | { | ||
67 | BUG_ON(!syncpt_op(sp).update_min); | ||
68 | |||
69 | return syncpt_op(sp).update_min(sp, id); | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * Get the current syncpoint value | ||
74 | */ | ||
75 | u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) | ||
76 | { | ||
77 | u32 val; | ||
78 | BUG_ON(!syncpt_op(sp).update_min); | ||
79 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
80 | val = syncpt_op(sp).update_min(sp, id); | ||
81 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
82 | return val; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * Get the current syncpoint base | ||
87 | */ | ||
88 | u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) | ||
89 | { | ||
90 | u32 val; | ||
91 | BUG_ON(!syncpt_op(sp).read_wait_base); | ||
92 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
93 | syncpt_op(sp).read_wait_base(sp, id); | ||
94 | val = sp->base_val[id]; | ||
95 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
96 | return val; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * Write a cpu syncpoint increment to the hardware, without touching | ||
101 | * the cache. Caller is responsible for host being powered. | ||
102 | */ | ||
103 | void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) | ||
104 | { | ||
105 | BUG_ON(!syncpt_op(sp).cpu_incr); | ||
106 | syncpt_op(sp).cpu_incr(sp, id); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * Increment syncpoint value from cpu, updating cache | ||
111 | */ | ||
112 | void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) | ||
113 | { | ||
114 | if (client_managed(id)) | ||
115 | nvhost_syncpt_incr_max(sp, id, 1); | ||
116 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
117 | nvhost_syncpt_cpu_incr(sp, id); | ||
118 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * Main entrypoint for syncpoint value waits. | ||
123 | */ | ||
124 | int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, | ||
125 | u32 thresh, u32 timeout, u32 *value) | ||
126 | { | ||
127 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
128 | void *ref; | ||
129 | void *waiter; | ||
130 | int err = 0, check_count = 0, low_timeout = 0; | ||
131 | u32 val; | ||
132 | |||
133 | if (value) | ||
134 | *value = 0; | ||
135 | |||
136 | /* first check cache */ | ||
137 | if (nvhost_syncpt_is_expired(sp, id, thresh)) { | ||
138 | if (value) | ||
139 | *value = nvhost_syncpt_read_min(sp, id); | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | /* keep host alive */ | ||
144 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
145 | |||
146 | /* try to read from register */ | ||
147 | val = syncpt_op(sp).update_min(sp, id); | ||
148 | if (nvhost_syncpt_is_expired(sp, id, thresh)) { | ||
149 | if (value) | ||
150 | *value = val; | ||
151 | goto done; | ||
152 | } | ||
153 | |||
154 | if (!timeout) { | ||
155 | err = -EAGAIN; | ||
156 | goto done; | ||
157 | } | ||
158 | |||
159 | /* schedule a wakeup when the syncpoint value is reached */ | ||
160 | waiter = nvhost_intr_alloc_waiter(); | ||
161 | if (!waiter) { | ||
162 | err = -ENOMEM; | ||
163 | goto done; | ||
164 | } | ||
165 | |||
166 | err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, | ||
167 | NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, | ||
168 | waiter, | ||
169 | &ref); | ||
170 | if (err) | ||
171 | goto done; | ||
172 | |||
173 | err = -EAGAIN; | ||
174 | /* Caller-specified timeout may be impractically low */ | ||
175 | if (timeout < SYNCPT_CHECK_PERIOD) | ||
176 | low_timeout = timeout; | ||
177 | |||
178 | /* wait for the syncpoint, or timeout, or signal */ | ||
179 | while (timeout) { | ||
180 | u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); | ||
181 | int remain = wait_event_interruptible_timeout(wq, | ||
182 | nvhost_syncpt_is_expired(sp, id, thresh), | ||
183 | check); | ||
184 | if (remain > 0) { | ||
185 | if (value) | ||
186 | *value = nvhost_syncpt_read_min(sp, id); | ||
187 | err = 0; | ||
188 | break; | ||
189 | } | ||
190 | if (remain < 0) { | ||
191 | err = remain; | ||
192 | break; | ||
193 | } | ||
194 | if (timeout != NVHOST_NO_TIMEOUT) | ||
195 | timeout -= check; | ||
196 | if (timeout) { | ||
197 | dev_warn(&syncpt_to_dev(sp)->dev->dev, | ||
198 | "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n", | ||
199 | current->comm, id, syncpt_op(sp).name(sp, id), | ||
200 | thresh, timeout); | ||
201 | syncpt_op(sp).debug(sp); | ||
202 | if (check_count > MAX_STUCK_CHECK_COUNT) { | ||
203 | if (low_timeout) { | ||
204 | dev_warn(&syncpt_to_dev(sp)->dev->dev, | ||
205 | "is timeout %d too low?\n", | ||
206 | low_timeout); | ||
207 | } | ||
208 | nvhost_debug_dump(syncpt_to_dev(sp)); | ||
209 | BUG(); | ||
210 | } | ||
211 | check_count++; | ||
212 | } | ||
213 | } | ||
214 | nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref); | ||
215 | |||
216 | done: | ||
217 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
218 | return err; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * Returns true if syncpoint is expired, false if we may need to wait | ||
223 | */ | ||
224 | bool nvhost_syncpt_is_expired( | ||
225 | struct nvhost_syncpt *sp, | ||
226 | u32 id, | ||
227 | u32 thresh) | ||
228 | { | ||
229 | u32 current_val; | ||
230 | u32 future_val; | ||
231 | smp_rmb(); | ||
232 | current_val = (u32)atomic_read(&sp->min_val[id]); | ||
233 | future_val = (u32)atomic_read(&sp->max_val[id]); | ||
234 | |||
235 | /* Note the use of unsigned arithmetic here (mod 1<<32). | ||
236 | * | ||
237 | * c = current_val = min_val = the current value of the syncpoint. | ||
238 | * t = thresh = the value we are checking | ||
239 | * f = future_val = max_val = the value c will reach when all | ||
240 | * outstanding increments have completed. | ||
241 | * | ||
242 | * Note that c always chases f until it reaches f. | ||
243 | * | ||
244 | * Dtf = (f - t) | ||
245 | * Dtc = (c - t) | ||
246 | * | ||
247 | * Consider all cases: | ||
248 | * | ||
249 | * A) .....c..t..f..... Dtf < Dtc need to wait | ||
250 | * B) .....c.....f..t.. Dtf > Dtc expired | ||
251 | * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large) | ||
252 | * | ||
253 | * Any case where f==c: always expired (for any t). Dtf == Dcf | ||
254 | * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0) | ||
255 | * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0, | ||
256 | * Dtc!=0) | ||
257 | * | ||
258 | * Other cases: | ||
259 | * | ||
260 | * A) .....t..f..c..... Dtf < Dtc need to wait | ||
261 | * A) .....f..c..t..... Dtf < Dtc need to wait | ||
262 | * A) .....f..t..c..... Dtf > Dtc expired | ||
263 | * | ||
264 | * So: | ||
265 | * Dtf >= Dtc implies EXPIRED (return true) | ||
266 | * Dtf < Dtc implies WAIT (return false) | ||
267 | * | ||
268 | * Note: If t is expired then we *cannot* wait on it. We would wait | ||
269 | * forever (hang the system). | ||
270 | * | ||
271 | * Note: do NOT get clever and remove the -thresh from both sides. It | ||
272 | * is NOT the same. | ||
273 | * | ||
274 | * If future valueis zero, we have a client managed sync point. In that | ||
275 | * case we do a direct comparison. | ||
276 | */ | ||
277 | if (!client_managed(id)) | ||
278 | return future_val - thresh >= current_val - thresh; | ||
279 | else | ||
280 | return (s32)(current_val - thresh) >= 0; | ||
281 | } | ||
282 | |||
283 | void nvhost_syncpt_debug(struct nvhost_syncpt *sp) | ||
284 | { | ||
285 | syncpt_op(sp).debug(sp); | ||
286 | } | ||
287 | |||
288 | int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx) | ||
289 | { | ||
290 | struct nvhost_master *host = syncpt_to_dev(sp); | ||
291 | u32 reg; | ||
292 | |||
293 | nvhost_module_busy(host->dev); | ||
294 | reg = syncpt_op(sp).mutex_try_lock(sp, idx); | ||
295 | if (reg) { | ||
296 | nvhost_module_idle(host->dev); | ||
297 | return -EBUSY; | ||
298 | } | ||
299 | atomic_inc(&sp->lock_counts[idx]); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx) | ||
304 | { | ||
305 | syncpt_op(sp).mutex_unlock(sp, idx); | ||
306 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
307 | atomic_dec(&sp->lock_counts[idx]); | ||
308 | } | ||
309 | |||
310 | /* check for old WAITs to be removed (avoiding a wrap) */ | ||
311 | int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp, | ||
312 | struct nvmap_client *nvmap, | ||
313 | u32 waitchk_mask, | ||
314 | struct nvhost_waitchk *wait, | ||
315 | int num_waitchk) | ||
316 | { | ||
317 | return syncpt_op(sp).wait_check(sp, nvmap, | ||
318 | waitchk_mask, wait, num_waitchk); | ||
319 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_syncpt.h b/drivers/video/tegra/host/nvhost_syncpt.h new file mode 100644 index 00000000000..5b339178d1e --- /dev/null +++ b/drivers/video/tegra/host/nvhost_syncpt.h | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/nvhost_syncpt.h | ||
3 | * | ||
4 | * Tegra Graphics Host Syncpoints | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVHOST_SYNCPT_H | ||
22 | #define __NVHOST_SYNCPT_H | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/nvhost.h> | ||
27 | #include <mach/nvmap.h> | ||
28 | #include <linux/atomic.h> | ||
29 | |||
30 | struct nvhost_syncpt; | ||
31 | struct nvhost_waitchk; | ||
32 | |||
33 | /* host managed and invalid syncpt id */ | ||
34 | #define NVSYNCPT_GRAPHICS_HOST (0) | ||
35 | #define NVSYNCPT_INVALID (-1) | ||
36 | |||
37 | struct nvhost_syncpt { | ||
38 | atomic_t *min_val; | ||
39 | atomic_t *max_val; | ||
40 | u32 *base_val; | ||
41 | u32 nb_pts; | ||
42 | u32 nb_bases; | ||
43 | u32 client_managed; | ||
44 | atomic_t *lock_counts; | ||
45 | u32 nb_mlocks; | ||
46 | }; | ||
47 | |||
48 | int nvhost_syncpt_init(struct nvhost_syncpt *); | ||
49 | #define client_managed(id) (BIT(id) & sp->client_managed) | ||
50 | #define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt) | ||
51 | #define syncpt_op(sp) (syncpt_to_dev(sp)->op.syncpt) | ||
52 | #define SYNCPT_CHECK_PERIOD (2*HZ) | ||
53 | |||
54 | |||
55 | /** | ||
56 | * Updates the value sent to hardware. | ||
57 | */ | ||
58 | static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp, | ||
59 | u32 id, u32 incrs) | ||
60 | { | ||
61 | return (u32)atomic_add_return(incrs, &sp->max_val[id]); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * Updated the value sent to hardware. | ||
66 | */ | ||
67 | static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp, | ||
68 | u32 id, u32 val) | ||
69 | { | ||
70 | atomic_set(&sp->max_val[id], val); | ||
71 | smp_wmb(); | ||
72 | return val; | ||
73 | } | ||
74 | |||
75 | static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id) | ||
76 | { | ||
77 | smp_rmb(); | ||
78 | return (u32)atomic_read(&sp->max_val[id]); | ||
79 | } | ||
80 | |||
81 | static inline u32 nvhost_syncpt_read_min(struct nvhost_syncpt *sp, u32 id) | ||
82 | { | ||
83 | smp_rmb(); | ||
84 | return (u32)atomic_read(&sp->min_val[id]); | ||
85 | } | ||
86 | |||
87 | static inline bool nvhost_syncpt_check_max(struct nvhost_syncpt *sp, | ||
88 | u32 id, u32 real) | ||
89 | { | ||
90 | u32 max; | ||
91 | if (client_managed(id)) | ||
92 | return true; | ||
93 | max = nvhost_syncpt_read_max(sp, id); | ||
94 | return (s32)(max - real) >= 0; | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * Returns true if syncpoint min == max | ||
99 | */ | ||
100 | static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id) | ||
101 | { | ||
102 | int min, max; | ||
103 | smp_rmb(); | ||
104 | min = atomic_read(&sp->min_val[id]); | ||
105 | max = atomic_read(&sp->max_val[id]); | ||
106 | return (min == max); | ||
107 | } | ||
108 | |||
109 | void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id); | ||
110 | |||
111 | u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id); | ||
112 | bool nvhost_syncpt_is_expired(struct nvhost_syncpt *sp, u32 id, u32 thresh); | ||
113 | |||
114 | void nvhost_syncpt_save(struct nvhost_syncpt *sp); | ||
115 | |||
116 | void nvhost_syncpt_reset(struct nvhost_syncpt *sp); | ||
117 | |||
118 | u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id); | ||
119 | u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id); | ||
120 | |||
121 | void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id); | ||
122 | |||
123 | int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, | ||
124 | u32 timeout, u32 *value); | ||
125 | |||
126 | static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh) | ||
127 | { | ||
128 | return nvhost_syncpt_wait_timeout(sp, id, thresh, | ||
129 | MAX_SCHEDULE_TIMEOUT, NULL); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Check driver supplied waitchk structs for syncpt thresholds | ||
134 | * that have already been satisfied and NULL the comparison (to | ||
135 | * avoid a wrap condition in the HW). | ||
136 | * | ||
137 | * @param: sp - global shadowed syncpt struct | ||
138 | * @param: nvmap - needed to access command buffer | ||
139 | * @param: mask - bit mask of syncpt IDs referenced in WAITs | ||
140 | * @param: wait - start of filled in array of waitchk structs | ||
141 | * @param: waitend - end ptr (one beyond last valid waitchk) | ||
142 | */ | ||
143 | int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp, | ||
144 | struct nvmap_client *nvmap, | ||
145 | u32 mask, | ||
146 | struct nvhost_waitchk *wait, | ||
147 | int num_waitchk); | ||
148 | |||
149 | void nvhost_syncpt_debug(struct nvhost_syncpt *sp); | ||
150 | |||
151 | int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx); | ||
152 | |||
153 | void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx); | ||
154 | |||
155 | #endif | ||
diff --git a/drivers/video/tegra/host/t20/Makefile b/drivers/video/tegra/host/t20/Makefile new file mode 100644 index 00000000000..c2ade9bf925 --- /dev/null +++ b/drivers/video/tegra/host/t20/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | |||
3 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
4 | |||
5 | nvhost-t20-objs = \ | ||
6 | t20.o | ||
7 | |||
8 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t20.o | ||
diff --git a/drivers/video/tegra/host/t20/t20.c b/drivers/video/tegra/host/t20/t20.c new file mode 100644 index 00000000000..24ddedc842e --- /dev/null +++ b/drivers/video/tegra/host/t20/t20.c | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/t20/t20.c | ||
3 | * | ||
4 | * Tegra Graphics Init for T20 Architecture Chips | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include <mach/powergate.h> | ||
23 | #include "dev.h" | ||
24 | #include "t20.h" | ||
25 | #include "host1x/host1x_channel.h" | ||
26 | #include "host1x/host1x_syncpt.h" | ||
27 | #include "host1x/host1x_hardware.h" | ||
28 | #include "host1x/host1x_cdma.h" | ||
29 | #include "gr3d/gr3d.h" | ||
30 | #include "gr3d/gr3d_t20.h" | ||
31 | #include "mpe/mpe.h" | ||
32 | #include "nvhost_hwctx.h" | ||
33 | |||
34 | #define NVMODMUTEX_2D_FULL (1) | ||
35 | #define NVMODMUTEX_2D_SIMPLE (2) | ||
36 | #define NVMODMUTEX_2D_SB_A (3) | ||
37 | #define NVMODMUTEX_2D_SB_B (4) | ||
38 | #define NVMODMUTEX_3D (5) | ||
39 | #define NVMODMUTEX_DISPLAYA (6) | ||
40 | #define NVMODMUTEX_DISPLAYB (7) | ||
41 | #define NVMODMUTEX_VI (8) | ||
42 | #define NVMODMUTEX_DSI (9) | ||
43 | |||
44 | #define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1) | ||
45 | |||
46 | struct nvhost_device t20_devices[] = { | ||
47 | { | ||
48 | /* channel 0 */ | ||
49 | .name = "display", | ||
50 | .id = -1, | ||
51 | .index = 0, | ||
52 | .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | | ||
53 | BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | | ||
54 | BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | | ||
55 | BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1), | ||
56 | .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB), | ||
57 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
58 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
59 | .moduleid = NVHOST_MODULE_NONE, | ||
60 | }, | ||
61 | { | ||
62 | /* channel 1 */ | ||
63 | .name = "gr3d", | ||
64 | .id = -1, | ||
65 | .index = 1, | ||
66 | .syncpts = BIT(NVSYNCPT_3D), | ||
67 | .waitbases = BIT(NVWAITBASE_3D), | ||
68 | .modulemutexes = BIT(NVMODMUTEX_3D), | ||
69 | .class = NV_GRAPHICS_3D_CLASS_ID, | ||
70 | .prepare_poweroff = nvhost_gr3d_prepare_power_off, | ||
71 | .alloc_hwctx_handler = nvhost_gr3d_t20_ctxhandler_init, | ||
72 | .clocks = {{"gr3d", UINT_MAX}, {"emc", UINT_MAX}, {} }, | ||
73 | .powergate_ids = {TEGRA_POWERGATE_3D, -1}, | ||
74 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
75 | .moduleid = NVHOST_MODULE_NONE, | ||
76 | }, | ||
77 | { | ||
78 | /* channel 2 */ | ||
79 | .name = "gr2d", | ||
80 | .id = -1, | ||
81 | .index = 2, | ||
82 | .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1), | ||
83 | .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1), | ||
84 | .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) | | ||
85 | BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B), | ||
86 | .clocks = { {"gr2d", UINT_MAX}, | ||
87 | {"epp", UINT_MAX}, | ||
88 | {"emc", UINT_MAX} }, | ||
89 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
90 | .clockgate_delay = 0, | ||
91 | .moduleid = NVHOST_MODULE_NONE, | ||
92 | }, | ||
93 | { | ||
94 | /* channel 3 */ | ||
95 | .name = "isp", | ||
96 | .id = -1, | ||
97 | .index = 3, | ||
98 | .syncpts = 0, | ||
99 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
100 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
101 | .moduleid = NVHOST_MODULE_ISP, | ||
102 | }, | ||
103 | { | ||
104 | /* channel 4 */ | ||
105 | .name = "vi", | ||
106 | .id = -1, | ||
107 | .index = 4, | ||
108 | .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | | ||
109 | BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) | | ||
110 | BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) | | ||
111 | BIT(NVSYNCPT_VI_ISP_4), | ||
112 | .modulemutexes = BIT(NVMODMUTEX_VI), | ||
113 | .exclusive = true, | ||
114 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
115 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
116 | .moduleid = NVHOST_MODULE_VI, | ||
117 | }, | ||
118 | { | ||
119 | /* channel 5 */ | ||
120 | .name = "mpe", | ||
121 | .id = -1, | ||
122 | .index = 5, | ||
123 | .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) | | ||
124 | BIT(NVSYNCPT_MPE_WR_SAFE), | ||
125 | .waitbases = BIT(NVWAITBASE_MPE), | ||
126 | .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
127 | .waitbasesync = true, | ||
128 | .keepalive = true, | ||
129 | .prepare_poweroff = nvhost_mpe_prepare_power_off, | ||
130 | .alloc_hwctx_handler = nvhost_mpe_ctxhandler_init, | ||
131 | .clocks = { {"mpe", UINT_MAX}, | ||
132 | {"emc", UINT_MAX} }, | ||
133 | .powergate_ids = {TEGRA_POWERGATE_MPE, -1}, | ||
134 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
135 | .moduleid = NVHOST_MODULE_MPE, | ||
136 | }, | ||
137 | { | ||
138 | /* channel 6 */ | ||
139 | .name = "dsi", | ||
140 | .id = -1, | ||
141 | .index = 6, | ||
142 | .syncpts = BIT(NVSYNCPT_DSI), | ||
143 | .modulemutexes = BIT(NVMODMUTEX_DSI), | ||
144 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
145 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
146 | .moduleid = NVHOST_MODULE_NONE, | ||
147 | } }; | ||
148 | |||
149 | |||
150 | static inline void __iomem *t20_channel_aperture(void __iomem *p, int ndx) | ||
151 | { | ||
152 | p += NV_HOST1X_CHANNEL0_BASE; | ||
153 | p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES; | ||
154 | return p; | ||
155 | } | ||
156 | |||
157 | static inline int t20_nvhost_hwctx_handler_init(struct nvhost_channel *ch) | ||
158 | { | ||
159 | int err = 0; | ||
160 | unsigned long syncpts = ch->dev->syncpts; | ||
161 | unsigned long waitbases = ch->dev->waitbases; | ||
162 | u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG); | ||
163 | u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG); | ||
164 | |||
165 | if (ch->dev->alloc_hwctx_handler) { | ||
166 | ch->ctxhandler = ch->dev->alloc_hwctx_handler(syncpt, | ||
167 | waitbase, ch); | ||
168 | if (!ch->ctxhandler) | ||
169 | err = -ENOMEM; | ||
170 | } | ||
171 | |||
172 | return err; | ||
173 | } | ||
174 | |||
175 | static int t20_channel_init(struct nvhost_channel *ch, | ||
176 | struct nvhost_master *dev, int index) | ||
177 | { | ||
178 | ch->chid = index; | ||
179 | mutex_init(&ch->reflock); | ||
180 | mutex_init(&ch->submitlock); | ||
181 | |||
182 | ch->aperture = t20_channel_aperture(dev->aperture, index); | ||
183 | |||
184 | return t20_nvhost_hwctx_handler_init(ch); | ||
185 | } | ||
186 | |||
187 | int nvhost_init_t20_channel_support(struct nvhost_master *host) | ||
188 | { | ||
189 | host->nb_channels = NVHOST_NUMCHANNELS; | ||
190 | |||
191 | host->op.channel.init = t20_channel_init; | ||
192 | host->op.channel.submit = host1x_channel_submit; | ||
193 | host->op.channel.read3dreg = host1x_channel_read_3d_reg; | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | struct nvhost_device *t20_get_nvhost_device(struct nvhost_master *host, | ||
199 | char *name) | ||
200 | { | ||
201 | int i; | ||
202 | |||
203 | for (i = 0; i < host->nb_channels; i++) { | ||
204 | if (strcmp(t20_devices[i].name, name) == 0) | ||
205 | return &t20_devices[i]; | ||
206 | } | ||
207 | |||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | int nvhost_init_t20_support(struct nvhost_master *host) | ||
212 | { | ||
213 | int err; | ||
214 | |||
215 | /* don't worry about cleaning up on failure... "remove" does it. */ | ||
216 | err = nvhost_init_t20_channel_support(host); | ||
217 | if (err) | ||
218 | return err; | ||
219 | err = host1x_init_cdma_support(host); | ||
220 | if (err) | ||
221 | return err; | ||
222 | err = nvhost_init_t20_debug_support(host); | ||
223 | if (err) | ||
224 | return err; | ||
225 | err = host1x_init_syncpt_support(host); | ||
226 | if (err) | ||
227 | return err; | ||
228 | err = nvhost_init_t20_intr_support(host); | ||
229 | if (err) | ||
230 | return err; | ||
231 | host->op.nvhost_dev.get_nvhost_device = t20_get_nvhost_device; | ||
232 | return 0; | ||
233 | } | ||
diff --git a/drivers/video/tegra/host/t20/t20.h b/drivers/video/tegra/host/t20/t20.h new file mode 100644 index 00000000000..93555a55b58 --- /dev/null +++ b/drivers/video/tegra/host/t20/t20.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/t20/t20.h | ||
3 | * | ||
4 | * Tegra Graphics Chip support for T20 | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | #ifndef _NVHOST_T20_H_ | ||
21 | #define _NVHOST_T20_H_ | ||
22 | |||
23 | struct nvhost_master; | ||
24 | struct nvhost_module; | ||
25 | |||
26 | int nvhost_init_t20_channel_support(struct nvhost_master *); | ||
27 | int nvhost_init_t20_debug_support(struct nvhost_master *); | ||
28 | int nvhost_init_t20_syncpt_support(struct nvhost_master *); | ||
29 | int nvhost_init_t20_intr_support(struct nvhost_master *); | ||
30 | int nvhost_init_t20_support(struct nvhost_master *host); | ||
31 | int nvhost_t20_save_context(struct nvhost_module *mod, u32 syncpt_id); | ||
32 | |||
33 | #endif /* _NVHOST_T20_H_ */ | ||
diff --git a/drivers/video/tegra/host/t30/Makefile b/drivers/video/tegra/host/t30/Makefile new file mode 100644 index 00000000000..b343eb4fc7c --- /dev/null +++ b/drivers/video/tegra/host/t30/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | |||
3 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
4 | |||
5 | nvhost-t30-objs = \ | ||
6 | t30.o | ||
7 | |||
8 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t30.o | ||
diff --git a/drivers/video/tegra/host/t30/t30.c b/drivers/video/tegra/host/t30/t30.c new file mode 100644 index 00000000000..8a8b1f4d924 --- /dev/null +++ b/drivers/video/tegra/host/t30/t30.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/t30/t30.c | ||
3 | * | ||
4 | * Tegra Graphics Init for T30 Architecture Chips | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/mutex.h> | ||
22 | #include <mach/powergate.h> | ||
23 | #include <mach/iomap.h> | ||
24 | #include "dev.h" | ||
25 | #include "t20/t20.h" | ||
26 | #include "t30.h" | ||
27 | #include "gr3d/gr3d.h" | ||
28 | #include "mpe/mpe.h" | ||
29 | #include "gr3d/gr3d_t30.h" | ||
30 | #include "gr3d/scale3d.h" | ||
31 | #include "host1x/host1x_hardware.h" | ||
32 | #include "host1x/host1x_cdma.h" | ||
33 | #include "host1x/host1x_syncpt.h" | ||
34 | #include "chip_support.h" | ||
35 | |||
36 | #define NVMODMUTEX_2D_FULL (1) | ||
37 | #define NVMODMUTEX_2D_SIMPLE (2) | ||
38 | #define NVMODMUTEX_2D_SB_A (3) | ||
39 | #define NVMODMUTEX_2D_SB_B (4) | ||
40 | #define NVMODMUTEX_3D (5) | ||
41 | #define NVMODMUTEX_DISPLAYA (6) | ||
42 | #define NVMODMUTEX_DISPLAYB (7) | ||
43 | #define NVMODMUTEX_VI (8) | ||
44 | #define NVMODMUTEX_DSI (9) | ||
45 | |||
46 | #define NVHOST_CHANNEL_BASE 0 | ||
47 | |||
48 | struct nvhost_device t30_devices[] = { | ||
49 | { | ||
50 | /* channel 0 */ | ||
51 | .name = "display", | ||
52 | .id = -1, | ||
53 | .index = 0, | ||
54 | .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | | ||
55 | BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | | ||
56 | BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | | ||
57 | BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1), | ||
58 | .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB), | ||
59 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
60 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
61 | .moduleid = NVHOST_MODULE_NONE, | ||
62 | }, | ||
63 | { | ||
64 | /* channel 1 */ | ||
65 | .name = "gr3d", | ||
66 | .id = -1, | ||
67 | .index = 1, | ||
68 | .syncpts = BIT(NVSYNCPT_3D), | ||
69 | .waitbases = BIT(NVWAITBASE_3D), | ||
70 | .modulemutexes = BIT(NVMODMUTEX_3D), | ||
71 | .class = NV_GRAPHICS_3D_CLASS_ID, | ||
72 | .prepare_poweroff = nvhost_gr3d_prepare_power_off, | ||
73 | .busy = nvhost_scale3d_notify_busy, | ||
74 | .idle = nvhost_scale3d_notify_idle, | ||
75 | .init = nvhost_scale3d_init, | ||
76 | .deinit = nvhost_scale3d_deinit, | ||
77 | .suspend = nvhost_scale3d_suspend, | ||
78 | .alloc_hwctx_handler = nvhost_gr3d_t30_ctxhandler_init, | ||
79 | .clocks = { {"gr3d", UINT_MAX}, | ||
80 | {"gr3d2", UINT_MAX}, | ||
81 | {"emc", UINT_MAX} }, | ||
82 | .powergate_ids = { TEGRA_POWERGATE_3D, | ||
83 | TEGRA_POWERGATE_3D1 }, | ||
84 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
85 | .can_powergate = false, | ||
86 | .powergate_delay = 250, | ||
87 | .moduleid = NVHOST_MODULE_NONE, | ||
88 | }, | ||
89 | { | ||
90 | /* channel 2 */ | ||
91 | .name = "gr2d", | ||
92 | .id = -1, | ||
93 | .index = 2, | ||
94 | .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1), | ||
95 | .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1), | ||
96 | .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) | | ||
97 | BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B), | ||
98 | .clocks = { {"gr2d", 0}, | ||
99 | {"epp", 0}, | ||
100 | {"emc", 300000000} }, | ||
101 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
102 | .clockgate_delay = 0, | ||
103 | .moduleid = NVHOST_MODULE_NONE, | ||
104 | }, | ||
105 | { | ||
106 | /* channel 3 */ | ||
107 | .name = "isp", | ||
108 | .id = -1, | ||
109 | .index = 3, | ||
110 | .syncpts = 0, | ||
111 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
112 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
113 | .moduleid = NVHOST_MODULE_ISP, | ||
114 | }, | ||
115 | { | ||
116 | /* channel 4 */ | ||
117 | .name = "vi", | ||
118 | .id = -1, | ||
119 | .index = 4, | ||
120 | .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | | ||
121 | BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) | | ||
122 | BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) | | ||
123 | BIT(NVSYNCPT_VI_ISP_4), | ||
124 | .modulemutexes = BIT(NVMODMUTEX_VI), | ||
125 | .exclusive = true, | ||
126 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
127 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
128 | .moduleid = NVHOST_MODULE_VI, | ||
129 | }, | ||
130 | { | ||
131 | /* channel 5 */ | ||
132 | .name = "mpe", | ||
133 | .id = -1, | ||
134 | .index = 5, | ||
135 | .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) | | ||
136 | BIT(NVSYNCPT_MPE_WR_SAFE), | ||
137 | .waitbases = BIT(NVWAITBASE_MPE), | ||
138 | .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
139 | .waitbasesync = true, | ||
140 | .keepalive = true, | ||
141 | .prepare_poweroff = nvhost_mpe_prepare_power_off, | ||
142 | .alloc_hwctx_handler = nvhost_mpe_ctxhandler_init, | ||
143 | .clocks = { {"mpe", UINT_MAX}, | ||
144 | {"emc", UINT_MAX} }, | ||
145 | .powergate_ids = {TEGRA_POWERGATE_MPE, -1}, | ||
146 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
147 | .can_powergate = true, | ||
148 | .powergate_delay = 100, | ||
149 | .moduleid = NVHOST_MODULE_MPE, | ||
150 | }, | ||
151 | { | ||
152 | /* channel 6 */ | ||
153 | .name = "dsi", | ||
154 | .id = -1, | ||
155 | .index = 6, | ||
156 | .syncpts = BIT(NVSYNCPT_DSI), | ||
157 | .modulemutexes = BIT(NVMODMUTEX_DSI), | ||
158 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
159 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
160 | .moduleid = NVHOST_MODULE_NONE, | ||
161 | } }; | ||
162 | |||
163 | static inline int t30_nvhost_hwctx_handler_init(struct nvhost_channel *ch) | ||
164 | { | ||
165 | int err = 0; | ||
166 | unsigned long syncpts = ch->dev->syncpts; | ||
167 | unsigned long waitbases = ch->dev->waitbases; | ||
168 | u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG); | ||
169 | u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG); | ||
170 | |||
171 | if (ch->dev->alloc_hwctx_handler) { | ||
172 | ch->ctxhandler = ch->dev->alloc_hwctx_handler(syncpt, | ||
173 | waitbase, ch); | ||
174 | if (!ch->ctxhandler) | ||
175 | err = -ENOMEM; | ||
176 | } | ||
177 | |||
178 | return err; | ||
179 | } | ||
180 | |||
181 | static inline void __iomem *t30_channel_aperture(void __iomem *p, int ndx) | ||
182 | { | ||
183 | ndx += NVHOST_CHANNEL_BASE; | ||
184 | p += NV_HOST1X_CHANNEL0_BASE; | ||
185 | p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES; | ||
186 | return p; | ||
187 | } | ||
188 | |||
189 | static int t30_channel_init(struct nvhost_channel *ch, | ||
190 | struct nvhost_master *dev, int index) | ||
191 | { | ||
192 | ch->chid = index; | ||
193 | mutex_init(&ch->reflock); | ||
194 | mutex_init(&ch->submitlock); | ||
195 | |||
196 | ch->aperture = t30_channel_aperture(dev->aperture, index); | ||
197 | |||
198 | return t30_nvhost_hwctx_handler_init(ch); | ||
199 | } | ||
200 | |||
201 | int nvhost_init_t30_channel_support(struct nvhost_master *host) | ||
202 | { | ||
203 | int result = nvhost_init_t20_channel_support(host); | ||
204 | host->op.channel.init = t30_channel_init; | ||
205 | |||
206 | return result; | ||
207 | } | ||
208 | int nvhost_init_t30_debug_support(struct nvhost_master *host) | ||
209 | { | ||
210 | nvhost_init_t20_debug_support(host); | ||
211 | host->op.debug.debug_init = nvhost_scale3d_debug_init; | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | struct nvhost_device *t30_get_nvhost_device(struct nvhost_master *host, | ||
217 | char *name) | ||
218 | { | ||
219 | int i; | ||
220 | |||
221 | for (i = 0; i < host->nb_channels; i++) { | ||
222 | if (strcmp(t30_devices[i].name, name) == 0) | ||
223 | return &t30_devices[i]; | ||
224 | } | ||
225 | |||
226 | return NULL; | ||
227 | } | ||
228 | |||
229 | int nvhost_init_t30_support(struct nvhost_master *host) | ||
230 | { | ||
231 | int err; | ||
232 | |||
233 | /* don't worry about cleaning up on failure... "remove" does it. */ | ||
234 | err = nvhost_init_t30_channel_support(host); | ||
235 | if (err) | ||
236 | return err; | ||
237 | err = host1x_init_cdma_support(host); | ||
238 | if (err) | ||
239 | return err; | ||
240 | err = nvhost_init_t30_debug_support(host); | ||
241 | if (err) | ||
242 | return err; | ||
243 | err = host1x_init_syncpt_support(host); | ||
244 | if (err) | ||
245 | return err; | ||
246 | err = nvhost_init_t20_intr_support(host); | ||
247 | if (err) | ||
248 | return err; | ||
249 | host->op.nvhost_dev.get_nvhost_device = t30_get_nvhost_device; | ||
250 | return 0; | ||
251 | } | ||
diff --git a/drivers/video/tegra/host/t30/t30.h b/drivers/video/tegra/host/t30/t30.h new file mode 100644 index 00000000000..0446dbd19b3 --- /dev/null +++ b/drivers/video/tegra/host/t30/t30.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/t30/t30.h | ||
3 | * | ||
4 | * Tegra Graphics Chip support for Tegra3 | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | #ifndef _NVHOST_T30_H_ | ||
21 | #define _NVHOST_T30_H_ | ||
22 | |||
23 | struct nvhost_master; | ||
24 | |||
25 | int nvhost_init_t30_channel_support(struct nvhost_master *); | ||
26 | int nvhost_init_t30_debug_support(struct nvhost_master *); | ||
27 | int nvhost_init_t30_support(struct nvhost_master *host); | ||
28 | |||
29 | #endif /* _NVHOST_T30_H_ */ | ||
diff --git a/drivers/video/tegra/host/vi/Makefile b/drivers/video/tegra/host/vi/Makefile new file mode 100644 index 00000000000..8c130e49814 --- /dev/null +++ b/drivers/video/tegra/host/vi/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
3 | |||
4 | nvhost-vi-objs = \ | ||
5 | vi.o | ||
6 | |||
7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-vi.o | ||
diff --git a/drivers/video/tegra/host/vi/vi.c b/drivers/video/tegra/host/vi/vi.c new file mode 100644 index 00000000000..71d517152ad --- /dev/null +++ b/drivers/video/tegra/host/vi/vi.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/host/vi/vi.c | ||
3 | * | ||
4 | * Tegra Graphics Host VI | ||
5 | * | ||
6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "dev.h" | ||
22 | #include "bus_client.h" | ||
23 | |||
24 | static int __devinit vi_probe(struct nvhost_device *dev) | ||
25 | { | ||
26 | return nvhost_client_device_init(dev); | ||
27 | } | ||
28 | |||
29 | static int __exit vi_remove(struct nvhost_device *dev) | ||
30 | { | ||
31 | /* Add clean-up */ | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static int vi_suspend(struct nvhost_device *dev, pm_message_t state) | ||
36 | { | ||
37 | return nvhost_client_device_suspend(dev); | ||
38 | } | ||
39 | |||
40 | static int vi_resume(struct nvhost_device *dev) | ||
41 | { | ||
42 | dev_info(&dev->dev, "resuming\n"); | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | struct nvhost_device *vi_device; | ||
47 | |||
48 | static struct nvhost_driver vi_driver = { | ||
49 | .probe = vi_probe, | ||
50 | .remove = __exit_p(vi_remove), | ||
51 | #ifdef CONFIG_PM | ||
52 | .suspend = vi_suspend, | ||
53 | .resume = vi_resume, | ||
54 | #endif | ||
55 | .driver = { | ||
56 | .owner = THIS_MODULE, | ||
57 | .name = "vi", | ||
58 | } | ||
59 | }; | ||
60 | |||
61 | static int __init vi_init(void) | ||
62 | { | ||
63 | int err; | ||
64 | |||
65 | vi_device = nvhost_get_device("vi"); | ||
66 | if (!vi_device) | ||
67 | return -ENXIO; | ||
68 | |||
69 | err = nvhost_device_register(vi_device); | ||
70 | if (err) | ||
71 | return err; | ||
72 | |||
73 | return nvhost_driver_register(&vi_driver); | ||
74 | } | ||
75 | |||
76 | static void __exit vi_exit(void) | ||
77 | { | ||
78 | nvhost_driver_unregister(&vi_driver); | ||
79 | } | ||
80 | |||
81 | module_init(vi_init); | ||
82 | module_exit(vi_exit); | ||
diff --git a/drivers/video/tegra/nvmap/Makefile b/drivers/video/tegra/nvmap/Makefile new file mode 100644 index 00000000000..95d7f68836a --- /dev/null +++ b/drivers/video/tegra/nvmap/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | obj-y += nvmap.o | ||
3 | obj-y += nvmap_dev.o | ||
4 | obj-y += nvmap_handle.o | ||
5 | obj-y += nvmap_heap.o | ||
6 | obj-y += nvmap_ioctl.o | ||
7 | obj-${CONFIG_NVMAP_RECLAIM_UNPINNED_VM} += nvmap_mru.o \ No newline at end of file | ||
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c new file mode 100644 index 00000000000..b4b6241618d --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap.c | |||
@@ -0,0 +1,871 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap.c | ||
3 | * | ||
4 | * Memory manager for Tegra GPU | ||
5 | * | ||
6 | * Copyright (c) 2009-2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/err.h> | ||
24 | #include <linux/highmem.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/rbtree.h> | ||
27 | #include <linux/vmalloc.h> | ||
28 | #include <linux/wait.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/tlbflush.h> | ||
33 | |||
34 | #include <mach/iovmm.h> | ||
35 | #include <mach/nvmap.h> | ||
36 | |||
37 | #include "nvmap.h" | ||
38 | #include "nvmap_mru.h" | ||
39 | |||
40 | /* private nvmap_handle flag for pinning duplicate detection */ | ||
41 | #define NVMAP_HANDLE_VISITED (0x1ul << 31) | ||
42 | |||
43 | /* map the backing pages for a heap_pgalloc handle into its IOVMM area */ | ||
44 | static void map_iovmm_area(struct nvmap_handle *h) | ||
45 | { | ||
46 | tegra_iovmm_addr_t va; | ||
47 | unsigned long i; | ||
48 | |||
49 | BUG_ON(!h->heap_pgalloc || !h->pgalloc.area); | ||
50 | BUG_ON(h->size & ~PAGE_MASK); | ||
51 | WARN_ON(!h->pgalloc.dirty); | ||
52 | |||
53 | for (va = h->pgalloc.area->iovm_start, i = 0; | ||
54 | va < (h->pgalloc.area->iovm_start + h->size); | ||
55 | i++, va += PAGE_SIZE) { | ||
56 | BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i]))); | ||
57 | tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va, | ||
58 | page_to_pfn(h->pgalloc.pages[i])); | ||
59 | } | ||
60 | h->pgalloc.dirty = false; | ||
61 | } | ||
62 | |||
63 | /* must be called inside nvmap_pin_lock, to ensure that an entire stream | ||
64 | * of pins will complete without racing with a second stream. handle should | ||
65 | * have nvmap_handle_get (or nvmap_validate_get) called before calling | ||
66 | * this function. */ | ||
67 | static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h) | ||
68 | { | ||
69 | struct tegra_iovmm_area *area; | ||
70 | BUG_ON(!h->alloc); | ||
71 | |||
72 | nvmap_mru_lock(client->share); | ||
73 | if (atomic_inc_return(&h->pin) == 1) { | ||
74 | if (h->heap_pgalloc && !h->pgalloc.contig) { | ||
75 | area = nvmap_handle_iovmm_locked(client, h); | ||
76 | if (!area) { | ||
77 | /* no race here, inside the pin mutex */ | ||
78 | atomic_dec(&h->pin); | ||
79 | nvmap_mru_unlock(client->share); | ||
80 | return -ENOMEM; | ||
81 | } | ||
82 | if (area != h->pgalloc.area) | ||
83 | h->pgalloc.dirty = true; | ||
84 | h->pgalloc.area = area; | ||
85 | } | ||
86 | } | ||
87 | nvmap_mru_unlock(client->share); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | /* doesn't need to be called inside nvmap_pin_lock, since this will only | ||
92 | * expand the available VM area */ | ||
93 | static int handle_unpin(struct nvmap_client *client, | ||
94 | struct nvmap_handle *h, int free_vm) | ||
95 | { | ||
96 | int ret = 0; | ||
97 | nvmap_mru_lock(client->share); | ||
98 | |||
99 | if (atomic_read(&h->pin) == 0) { | ||
100 | nvmap_err(client, "%s unpinning unpinned handle %p\n", | ||
101 | current->group_leader->comm, h); | ||
102 | nvmap_mru_unlock(client->share); | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | BUG_ON(!h->alloc); | ||
107 | |||
108 | if (!atomic_dec_return(&h->pin)) { | ||
109 | if (h->heap_pgalloc && h->pgalloc.area) { | ||
110 | /* if a secure handle is clean (i.e., mapped into | ||
111 | * IOVMM, it needs to be zapped on unpin. */ | ||
112 | if (h->secure && !h->pgalloc.dirty) { | ||
113 | tegra_iovmm_zap_vm(h->pgalloc.area); | ||
114 | h->pgalloc.dirty = true; | ||
115 | } | ||
116 | if (free_vm) { | ||
117 | tegra_iovmm_free_vm(h->pgalloc.area); | ||
118 | h->pgalloc.area = NULL; | ||
119 | } else | ||
120 | nvmap_mru_insert_locked(client->share, h); | ||
121 | ret = 1; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | nvmap_mru_unlock(client->share); | ||
126 | nvmap_handle_put(h); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | static int pin_array_locked(struct nvmap_client *client, | ||
131 | struct nvmap_handle **h, int count) | ||
132 | { | ||
133 | int pinned; | ||
134 | int i; | ||
135 | int err = 0; | ||
136 | |||
137 | for (pinned = 0; pinned < count; pinned++) { | ||
138 | err = pin_locked(client, h[pinned]); | ||
139 | if (err) | ||
140 | break; | ||
141 | } | ||
142 | |||
143 | if (err) { | ||
144 | /* unpin pinned handles */ | ||
145 | for (i = 0; i < pinned; i++) { | ||
146 | /* inc ref counter, because | ||
147 | * handle_unpin decrements it */ | ||
148 | nvmap_handle_get(h[i]); | ||
149 | /* unpin handles and free vm */ | ||
150 | handle_unpin(client, h[i], true); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if (err && tegra_iovmm_get_max_free(client->share->iovmm) >= | ||
155 | client->iovm_limit) { | ||
156 | /* First attempt to pin in empty iovmm | ||
157 | * may still fail because of fragmentation caused by | ||
158 | * placing handles in MRU areas. After such failure | ||
159 | * all MRU gets cleaned and iovm space is freed. | ||
160 | * | ||
161 | * We have to do pinning again here since there might be is | ||
162 | * no more incoming pin_wait wakeup calls from unpin | ||
163 | * operations */ | ||
164 | for (pinned = 0; pinned < count; pinned++) { | ||
165 | err = pin_locked(client, h[pinned]); | ||
166 | if (err) | ||
167 | break; | ||
168 | } | ||
169 | if (err) { | ||
170 | pr_err("Pinning in empty iovmm failed!!!\n"); | ||
171 | BUG_ON(1); | ||
172 | } | ||
173 | } | ||
174 | return err; | ||
175 | } | ||
176 | |||
177 | static int wait_pin_array_locked(struct nvmap_client *client, | ||
178 | struct nvmap_handle **h, int count) | ||
179 | { | ||
180 | int ret = 0; | ||
181 | |||
182 | ret = pin_array_locked(client, h, count); | ||
183 | |||
184 | if (ret) { | ||
185 | ret = wait_event_interruptible(client->share->pin_wait, | ||
186 | !pin_array_locked(client, h, count)); | ||
187 | } | ||
188 | return ret ? -EINTR : 0; | ||
189 | } | ||
190 | |||
191 | static int handle_unpin_noref(struct nvmap_client *client, unsigned long id) | ||
192 | { | ||
193 | struct nvmap_handle *h; | ||
194 | int w; | ||
195 | |||
196 | h = nvmap_validate_get(client, id); | ||
197 | if (unlikely(!h)) { | ||
198 | nvmap_err(client, "%s attempting to unpin invalid handle %p\n", | ||
199 | current->group_leader->comm, (void *)id); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | nvmap_err(client, "%s unpinning unreferenced handle %p\n", | ||
204 | current->group_leader->comm, h); | ||
205 | WARN_ON(1); | ||
206 | |||
207 | w = handle_unpin(client, h, false); | ||
208 | nvmap_handle_put(h); | ||
209 | return w; | ||
210 | } | ||
211 | |||
212 | void nvmap_unpin_ids(struct nvmap_client *client, | ||
213 | unsigned int nr, const unsigned long *ids) | ||
214 | { | ||
215 | unsigned int i; | ||
216 | int do_wake = 0; | ||
217 | |||
218 | for (i = 0; i < nr; i++) { | ||
219 | struct nvmap_handle_ref *ref; | ||
220 | |||
221 | if (!ids[i]) | ||
222 | continue; | ||
223 | |||
224 | nvmap_ref_lock(client); | ||
225 | ref = _nvmap_validate_id_locked(client, ids[i]); | ||
226 | if (ref) { | ||
227 | struct nvmap_handle *h = ref->handle; | ||
228 | int e = atomic_add_unless(&ref->pin, -1, 0); | ||
229 | |||
230 | nvmap_ref_unlock(client); | ||
231 | |||
232 | if (!e) { | ||
233 | nvmap_err(client, "%s unpinning unpinned " | ||
234 | "handle %08lx\n", | ||
235 | current->group_leader->comm, ids[i]); | ||
236 | } else { | ||
237 | do_wake |= handle_unpin(client, h, false); | ||
238 | } | ||
239 | } else { | ||
240 | nvmap_ref_unlock(client); | ||
241 | if (client->super) | ||
242 | do_wake |= handle_unpin_noref(client, ids[i]); | ||
243 | else | ||
244 | nvmap_err(client, "%s unpinning invalid " | ||
245 | "handle %08lx\n", | ||
246 | current->group_leader->comm, ids[i]); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | if (do_wake) | ||
251 | wake_up(&client->share->pin_wait); | ||
252 | } | ||
253 | |||
254 | /* pins a list of handle_ref objects; same conditions apply as to | ||
255 | * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */ | ||
256 | int nvmap_pin_ids(struct nvmap_client *client, | ||
257 | unsigned int nr, const unsigned long *ids) | ||
258 | { | ||
259 | int ret = 0; | ||
260 | unsigned int i; | ||
261 | struct nvmap_handle **h = (struct nvmap_handle **)ids; | ||
262 | struct nvmap_handle_ref *ref; | ||
263 | |||
264 | /* to optimize for the common case (client provided valid handle | ||
265 | * references and the pin succeeds), increment the handle_ref pin | ||
266 | * count during validation. in error cases, the tree will need to | ||
267 | * be re-walked, since the handle_ref is discarded so that an | ||
268 | * allocation isn't required. if a handle_ref is not found, | ||
269 | * locally validate that the caller has permission to pin the handle; | ||
270 | * handle_refs are not created in this case, so it is possible that | ||
271 | * if the caller crashes after pinning a global handle, the handle | ||
272 | * will be permanently leaked. */ | ||
273 | nvmap_ref_lock(client); | ||
274 | for (i = 0; i < nr && !ret; i++) { | ||
275 | ref = _nvmap_validate_id_locked(client, ids[i]); | ||
276 | if (ref) { | ||
277 | atomic_inc(&ref->pin); | ||
278 | nvmap_handle_get(h[i]); | ||
279 | } else { | ||
280 | struct nvmap_handle *verify; | ||
281 | nvmap_ref_unlock(client); | ||
282 | verify = nvmap_validate_get(client, ids[i]); | ||
283 | if (verify) | ||
284 | nvmap_warn(client, "%s pinning unreferenced " | ||
285 | "handle %p\n", | ||
286 | current->group_leader->comm, h[i]); | ||
287 | else | ||
288 | ret = -EPERM; | ||
289 | nvmap_ref_lock(client); | ||
290 | } | ||
291 | } | ||
292 | nvmap_ref_unlock(client); | ||
293 | |||
294 | nr = i; | ||
295 | |||
296 | if (ret) | ||
297 | goto out; | ||
298 | |||
299 | ret = mutex_lock_interruptible(&client->share->pin_lock); | ||
300 | if (WARN_ON(ret)) | ||
301 | goto out; | ||
302 | |||
303 | ret = wait_pin_array_locked(client, h, nr); | ||
304 | |||
305 | mutex_unlock(&client->share->pin_lock); | ||
306 | |||
307 | if (ret) { | ||
308 | ret = -EINTR; | ||
309 | } else { | ||
310 | for (i = 0; i < nr; i++) { | ||
311 | if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty) | ||
312 | map_iovmm_area(h[i]); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | out: | ||
317 | if (ret) { | ||
318 | nvmap_ref_lock(client); | ||
319 | for (i = 0; i < nr; i++) { | ||
320 | ref = _nvmap_validate_id_locked(client, ids[i]); | ||
321 | if (!ref) { | ||
322 | nvmap_warn(client, "%s freed handle %p " | ||
323 | "during pinning\n", | ||
324 | current->group_leader->comm, | ||
325 | (void *)ids[i]); | ||
326 | continue; | ||
327 | } | ||
328 | atomic_dec(&ref->pin); | ||
329 | } | ||
330 | nvmap_ref_unlock(client); | ||
331 | |||
332 | for (i = 0; i < nr; i++) | ||
333 | nvmap_handle_put(h[i]); | ||
334 | } | ||
335 | |||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | static phys_addr_t handle_phys(struct nvmap_handle *h) | ||
340 | { | ||
341 | phys_addr_t addr; | ||
342 | |||
343 | if (h->heap_pgalloc && h->pgalloc.contig) { | ||
344 | addr = page_to_phys(h->pgalloc.pages[0]); | ||
345 | } else if (h->heap_pgalloc) { | ||
346 | BUG_ON(!h->pgalloc.area); | ||
347 | addr = h->pgalloc.area->iovm_start; | ||
348 | } else { | ||
349 | addr = h->carveout->base; | ||
350 | } | ||
351 | |||
352 | return addr; | ||
353 | } | ||
354 | |||
355 | /* stores the physical address (+offset) of each handle relocation entry | ||
356 | * into its output location. see nvmap_pin_array for more details. | ||
357 | * | ||
358 | * each entry in arr (i.e., each relocation request) specifies two handles: | ||
359 | * the handle to pin (pin), and the handle where the address of pin should be | ||
360 | * written (patch). in pseudocode, this loop basically looks like: | ||
361 | * | ||
362 | * for (i = 0; i < nr; i++) { | ||
363 | * (pin, pin_offset, patch, patch_offset) = arr[i]; | ||
364 | * patch[patch_offset] = address_of(pin) + pin_offset; | ||
365 | * } | ||
366 | */ | ||
367 | static int nvmap_reloc_pin_array(struct nvmap_client *client, | ||
368 | const struct nvmap_pinarray_elem *arr, | ||
369 | int nr, struct nvmap_handle *gather) | ||
370 | { | ||
371 | struct nvmap_handle *last_patch = NULL; | ||
372 | unsigned int last_pfn = 0; | ||
373 | pte_t **pte; | ||
374 | void *addr; | ||
375 | int i; | ||
376 | |||
377 | pte = nvmap_alloc_pte(client->dev, &addr); | ||
378 | if (IS_ERR(pte)) | ||
379 | return PTR_ERR(pte); | ||
380 | |||
381 | for (i = 0; i < nr; i++) { | ||
382 | struct nvmap_handle *patch; | ||
383 | struct nvmap_handle *pin; | ||
384 | phys_addr_t reloc_addr; | ||
385 | phys_addr_t phys; | ||
386 | unsigned int pfn; | ||
387 | |||
388 | /* all of the handles are validated and get'ted prior to | ||
389 | * calling this function, so casting is safe here */ | ||
390 | pin = (struct nvmap_handle *)arr[i].pin_mem; | ||
391 | |||
392 | if (arr[i].patch_mem == (unsigned long)last_patch) { | ||
393 | patch = last_patch; | ||
394 | } else if (arr[i].patch_mem == (unsigned long)gather) { | ||
395 | patch = gather; | ||
396 | } else { | ||
397 | if (last_patch) | ||
398 | nvmap_handle_put(last_patch); | ||
399 | |||
400 | patch = nvmap_get_handle_id(client, arr[i].patch_mem); | ||
401 | if (!patch) { | ||
402 | nvmap_free_pte(client->dev, pte); | ||
403 | return -EPERM; | ||
404 | } | ||
405 | last_patch = patch; | ||
406 | } | ||
407 | |||
408 | if (patch->heap_pgalloc) { | ||
409 | unsigned int page = arr[i].patch_offset >> PAGE_SHIFT; | ||
410 | phys = page_to_phys(patch->pgalloc.pages[page]); | ||
411 | phys += (arr[i].patch_offset & ~PAGE_MASK); | ||
412 | } else { | ||
413 | phys = patch->carveout->base + arr[i].patch_offset; | ||
414 | } | ||
415 | |||
416 | pfn = __phys_to_pfn(phys); | ||
417 | if (pfn != last_pfn) { | ||
418 | pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel); | ||
419 | phys_addr_t kaddr = (phys_addr_t)addr; | ||
420 | set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot)); | ||
421 | flush_tlb_kernel_page(kaddr); | ||
422 | last_pfn = pfn; | ||
423 | } | ||
424 | |||
425 | reloc_addr = handle_phys(pin) + arr[i].pin_offset; | ||
426 | reloc_addr >>= arr[i].reloc_shift; | ||
427 | __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK)); | ||
428 | } | ||
429 | |||
430 | nvmap_free_pte(client->dev, pte); | ||
431 | |||
432 | if (last_patch) | ||
433 | nvmap_handle_put(last_patch); | ||
434 | |||
435 | wmb(); | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static int nvmap_validate_get_pin_array(struct nvmap_client *client, | ||
441 | const struct nvmap_pinarray_elem *arr, | ||
442 | int nr, struct nvmap_handle **h) | ||
443 | { | ||
444 | int i; | ||
445 | int ret = 0; | ||
446 | int count = 0; | ||
447 | |||
448 | nvmap_ref_lock(client); | ||
449 | |||
450 | for (i = 0; i < nr; i++) { | ||
451 | struct nvmap_handle_ref *ref; | ||
452 | |||
453 | if (need_resched()) { | ||
454 | nvmap_ref_unlock(client); | ||
455 | schedule(); | ||
456 | nvmap_ref_lock(client); | ||
457 | } | ||
458 | |||
459 | ref = _nvmap_validate_id_locked(client, arr[i].pin_mem); | ||
460 | |||
461 | if (!ref) | ||
462 | nvmap_warn(client, "falied to validate id\n"); | ||
463 | else if (!ref->handle) | ||
464 | nvmap_warn(client, "id had no associated handle\n"); | ||
465 | else if (!ref->handle->alloc) | ||
466 | nvmap_warn(client, "handle had no allocation\n"); | ||
467 | |||
468 | if (!ref || !ref->handle || !ref->handle->alloc) { | ||
469 | ret = -EPERM; | ||
470 | break; | ||
471 | } | ||
472 | |||
473 | /* a handle may be referenced multiple times in arr, but | ||
474 | * it will only be pinned once; this ensures that the | ||
475 | * minimum number of sync-queue slots in the host driver | ||
476 | * are dedicated to storing unpin lists, which allows | ||
477 | * for greater parallelism between the CPU and graphics | ||
478 | * processor */ | ||
479 | if (ref->handle->flags & NVMAP_HANDLE_VISITED) | ||
480 | continue; | ||
481 | |||
482 | ref->handle->flags |= NVMAP_HANDLE_VISITED; | ||
483 | |||
484 | h[count] = nvmap_handle_get(ref->handle); | ||
485 | BUG_ON(!h[count]); | ||
486 | count++; | ||
487 | } | ||
488 | |||
489 | nvmap_ref_unlock(client); | ||
490 | |||
491 | if (ret) { | ||
492 | for (i = 0; i < count; i++) { | ||
493 | h[i]->flags &= ~NVMAP_HANDLE_VISITED; | ||
494 | nvmap_handle_put(h[i]); | ||
495 | } | ||
496 | } | ||
497 | |||
498 | return ret ?: count; | ||
499 | } | ||
500 | |||
501 | /* a typical mechanism host1x clients use for using the Tegra graphics | ||
502 | * processor is to build a command buffer which contains relocatable | ||
503 | * memory handle commands, and rely on the kernel to convert these in-place | ||
504 | * to addresses which are understood by the GPU hardware. | ||
505 | * | ||
506 | * this is implemented by having clients provide a sideband array | ||
507 | * of relocatable handles (+ offsets) and the location in the command | ||
508 | * buffer handle to patch with the GPU address when the client submits | ||
509 | * its command buffer to the host1x driver. | ||
510 | * | ||
511 | * the host driver also uses this relocation mechanism internally to | ||
512 | * relocate the client's (unpinned) command buffers into host-addressable | ||
513 | * memory. | ||
514 | * | ||
515 | * @client: nvmap_client which should be used for validation; should be | ||
516 | * owned by the process which is submitting command buffers | ||
517 | * @gather: special handle for relocated command buffer outputs used | ||
518 | * internally by the host driver. if this handle is encountered | ||
519 | * as an output handle in the relocation array, it is assumed | ||
520 | * to be a known-good output and is not validated. | ||
521 | * @arr: array of ((relocatable handle, offset), (output handle, offset)) | ||
522 | * tuples. | ||
523 | * @nr: number of entries in arr | ||
524 | * @unique_arr: list of nvmap_handle objects which were pinned by | ||
525 | * nvmap_pin_array. must be unpinned by the caller after the | ||
526 | * command buffers referenced in gather have completed. | ||
527 | */ | ||
528 | int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather, | ||
529 | const struct nvmap_pinarray_elem *arr, int nr, | ||
530 | struct nvmap_handle **unique_arr) | ||
531 | { | ||
532 | int count = 0; | ||
533 | int ret = 0; | ||
534 | int i; | ||
535 | |||
536 | if (mutex_lock_interruptible(&client->share->pin_lock)) { | ||
537 | nvmap_warn(client, "%s interrupted when acquiring pin lock\n", | ||
538 | current->group_leader->comm); | ||
539 | return -EINTR; | ||
540 | } | ||
541 | |||
542 | count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr); | ||
543 | if (count < 0) { | ||
544 | mutex_unlock(&client->share->pin_lock); | ||
545 | nvmap_warn(client, "failed to validate pin array\n"); | ||
546 | return count; | ||
547 | } | ||
548 | |||
549 | for (i = 0; i < count; i++) | ||
550 | unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED; | ||
551 | |||
552 | ret = wait_pin_array_locked(client, unique_arr, count); | ||
553 | |||
554 | mutex_unlock(&client->share->pin_lock); | ||
555 | |||
556 | if (!ret) | ||
557 | ret = nvmap_reloc_pin_array(client, arr, nr, gather); | ||
558 | |||
559 | if (WARN_ON(ret)) { | ||
560 | for (i = 0; i < count; i++) | ||
561 | nvmap_handle_put(unique_arr[i]); | ||
562 | return ret; | ||
563 | } else { | ||
564 | for (i = 0; i < count; i++) { | ||
565 | if (unique_arr[i]->heap_pgalloc && | ||
566 | unique_arr[i]->pgalloc.dirty) | ||
567 | map_iovmm_area(unique_arr[i]); | ||
568 | } | ||
569 | } | ||
570 | |||
571 | return count; | ||
572 | } | ||
573 | |||
574 | phys_addr_t nvmap_pin(struct nvmap_client *client, | ||
575 | struct nvmap_handle_ref *ref) | ||
576 | { | ||
577 | struct nvmap_handle *h; | ||
578 | phys_addr_t phys; | ||
579 | int ret = 0; | ||
580 | |||
581 | h = nvmap_handle_get(ref->handle); | ||
582 | if (WARN_ON(!h)) | ||
583 | return -EINVAL; | ||
584 | |||
585 | atomic_inc(&ref->pin); | ||
586 | |||
587 | if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) { | ||
588 | ret = -EINTR; | ||
589 | } else { | ||
590 | ret = wait_pin_array_locked(client, &h, 1); | ||
591 | mutex_unlock(&client->share->pin_lock); | ||
592 | } | ||
593 | |||
594 | if (ret) { | ||
595 | atomic_dec(&ref->pin); | ||
596 | nvmap_handle_put(h); | ||
597 | } else { | ||
598 | if (h->heap_pgalloc && h->pgalloc.dirty) | ||
599 | map_iovmm_area(h); | ||
600 | phys = handle_phys(h); | ||
601 | } | ||
602 | |||
603 | return ret ?: phys; | ||
604 | } | ||
605 | |||
606 | phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) | ||
607 | { | ||
608 | struct nvmap_handle *h; | ||
609 | phys_addr_t phys; | ||
610 | |||
611 | h = nvmap_get_handle_id(c, id); | ||
612 | if (!h) | ||
613 | return -EPERM; | ||
614 | mutex_lock(&h->lock); | ||
615 | phys = handle_phys(h); | ||
616 | mutex_unlock(&h->lock); | ||
617 | nvmap_handle_put(h); | ||
618 | |||
619 | return phys; | ||
620 | } | ||
621 | |||
622 | void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref) | ||
623 | { | ||
624 | if (!ref) | ||
625 | return; | ||
626 | |||
627 | atomic_dec(&ref->pin); | ||
628 | if (handle_unpin(client, ref->handle, false)) | ||
629 | wake_up(&client->share->pin_wait); | ||
630 | } | ||
631 | |||
632 | void nvmap_unpin_handles(struct nvmap_client *client, | ||
633 | struct nvmap_handle **h, int nr) | ||
634 | { | ||
635 | int i; | ||
636 | int do_wake = 0; | ||
637 | |||
638 | for (i = 0; i < nr; i++) { | ||
639 | if (WARN_ON(!h[i])) | ||
640 | continue; | ||
641 | do_wake |= handle_unpin(client, h[i], false); | ||
642 | } | ||
643 | |||
644 | if (do_wake) | ||
645 | wake_up(&client->share->pin_wait); | ||
646 | } | ||
647 | |||
648 | void *nvmap_mmap(struct nvmap_handle_ref *ref) | ||
649 | { | ||
650 | struct nvmap_handle *h; | ||
651 | pgprot_t prot; | ||
652 | unsigned long adj_size; | ||
653 | unsigned long offs; | ||
654 | struct vm_struct *v; | ||
655 | void *p; | ||
656 | |||
657 | h = nvmap_handle_get(ref->handle); | ||
658 | if (!h) | ||
659 | return NULL; | ||
660 | |||
661 | prot = nvmap_pgprot(h, pgprot_kernel); | ||
662 | |||
663 | if (h->heap_pgalloc) | ||
664 | return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT, | ||
665 | -1, prot); | ||
666 | |||
667 | /* carveout - explicitly map the pfns into a vmalloc area */ | ||
668 | |||
669 | nvmap_usecount_inc(h); | ||
670 | |||
671 | adj_size = h->carveout->base & ~PAGE_MASK; | ||
672 | adj_size += h->size; | ||
673 | adj_size = PAGE_ALIGN(adj_size); | ||
674 | |||
675 | v = alloc_vm_area(adj_size); | ||
676 | if (!v) { | ||
677 | nvmap_usecount_dec(h); | ||
678 | nvmap_handle_put(h); | ||
679 | return NULL; | ||
680 | } | ||
681 | |||
682 | p = v->addr + (h->carveout->base & ~PAGE_MASK); | ||
683 | |||
684 | for (offs = 0; offs < adj_size; offs += PAGE_SIZE) { | ||
685 | unsigned long addr = (unsigned long) v->addr + offs; | ||
686 | unsigned int pfn; | ||
687 | pgd_t *pgd; | ||
688 | pud_t *pud; | ||
689 | pmd_t *pmd; | ||
690 | pte_t *pte; | ||
691 | |||
692 | pfn = __phys_to_pfn(h->carveout->base + offs); | ||
693 | pgd = pgd_offset_k(addr); | ||
694 | pud = pud_alloc(&init_mm, pgd, addr); | ||
695 | if (!pud) | ||
696 | break; | ||
697 | pmd = pmd_alloc(&init_mm, pud, addr); | ||
698 | if (!pmd) | ||
699 | break; | ||
700 | pte = pte_alloc_kernel(pmd, addr); | ||
701 | if (!pte) | ||
702 | break; | ||
703 | set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); | ||
704 | flush_tlb_kernel_page(addr); | ||
705 | } | ||
706 | |||
707 | if (offs != adj_size) { | ||
708 | free_vm_area(v); | ||
709 | nvmap_usecount_dec(h); | ||
710 | nvmap_handle_put(h); | ||
711 | return NULL; | ||
712 | } | ||
713 | |||
714 | /* leave the handle ref count incremented by 1, so that | ||
715 | * the handle will not be freed while the kernel mapping exists. | ||
716 | * nvmap_handle_put will be called by unmapping this address */ | ||
717 | return p; | ||
718 | } | ||
719 | |||
720 | void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr) | ||
721 | { | ||
722 | struct nvmap_handle *h; | ||
723 | |||
724 | if (!ref) | ||
725 | return; | ||
726 | |||
727 | h = ref->handle; | ||
728 | |||
729 | if (h->heap_pgalloc) { | ||
730 | vm_unmap_ram(addr, h->size >> PAGE_SHIFT); | ||
731 | } else { | ||
732 | struct vm_struct *vm; | ||
733 | addr -= (h->carveout->base & ~PAGE_MASK); | ||
734 | vm = remove_vm_area(addr); | ||
735 | BUG_ON(!vm); | ||
736 | kfree(vm); | ||
737 | nvmap_usecount_dec(h); | ||
738 | } | ||
739 | nvmap_handle_put(h); | ||
740 | } | ||
741 | |||
742 | struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size, | ||
743 | size_t align, unsigned int flags, | ||
744 | unsigned int heap_mask) | ||
745 | { | ||
746 | const unsigned int default_heap = (NVMAP_HEAP_SYSMEM | | ||
747 | NVMAP_HEAP_CARVEOUT_GENERIC); | ||
748 | struct nvmap_handle_ref *r = NULL; | ||
749 | int err; | ||
750 | |||
751 | if (heap_mask == 0) | ||
752 | heap_mask = default_heap; | ||
753 | |||
754 | r = nvmap_create_handle(client, size); | ||
755 | if (IS_ERR(r)) | ||
756 | return r; | ||
757 | |||
758 | err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r), | ||
759 | heap_mask, align, flags); | ||
760 | |||
761 | if (err) { | ||
762 | nvmap_free_handle_id(client, nvmap_ref_to_id(r)); | ||
763 | return ERR_PTR(err); | ||
764 | } | ||
765 | |||
766 | return r; | ||
767 | } | ||
768 | |||
769 | /* allocates memory with specifed iovm_start address. */ | ||
770 | struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, | ||
771 | size_t size, size_t align, unsigned int flags, unsigned int iovm_start) | ||
772 | { | ||
773 | int err; | ||
774 | struct nvmap_handle *h; | ||
775 | struct nvmap_handle_ref *r; | ||
776 | const unsigned int default_heap = NVMAP_HEAP_IOVMM; | ||
777 | |||
778 | /* size need to be more than one page. | ||
779 | * otherwise heap preference would change to system heap. | ||
780 | */ | ||
781 | if (size <= PAGE_SIZE) | ||
782 | size = PAGE_SIZE << 1; | ||
783 | r = nvmap_create_handle(client, size); | ||
784 | if (IS_ERR_OR_NULL(r)) | ||
785 | return r; | ||
786 | |||
787 | h = r->handle; | ||
788 | h->pgalloc.iovm_addr = iovm_start; | ||
789 | err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r), | ||
790 | default_heap, align, flags); | ||
791 | if (err) | ||
792 | goto fail; | ||
793 | |||
794 | err = mutex_lock_interruptible(&client->share->pin_lock); | ||
795 | if (WARN_ON(err)) | ||
796 | goto fail; | ||
797 | err = pin_locked(client, h); | ||
798 | mutex_unlock(&client->share->pin_lock); | ||
799 | if (err) | ||
800 | goto fail; | ||
801 | return r; | ||
802 | |||
803 | fail: | ||
804 | nvmap_free_handle_id(client, nvmap_ref_to_id(r)); | ||
805 | return ERR_PTR(err); | ||
806 | } | ||
807 | |||
808 | void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
809 | { | ||
810 | unsigned long ref_id = nvmap_ref_to_id(r); | ||
811 | |||
812 | nvmap_unpin_ids(client, 1, &ref_id); | ||
813 | nvmap_free_handle_id(client, ref_id); | ||
814 | } | ||
815 | |||
816 | void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
817 | { | ||
818 | if (!r) | ||
819 | return; | ||
820 | |||
821 | nvmap_free_handle_id(client, nvmap_ref_to_id(r)); | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * create a mapping to the user's buffer and write it | ||
826 | * (uses similar logic from nvmap_reloc_pin_array to map the cmdbuf) | ||
827 | */ | ||
828 | int nvmap_patch_word(struct nvmap_client *client, | ||
829 | struct nvmap_handle *patch, | ||
830 | u32 patch_offset, u32 patch_value) | ||
831 | { | ||
832 | phys_addr_t phys; | ||
833 | unsigned long kaddr; | ||
834 | unsigned int pfn; | ||
835 | void *addr; | ||
836 | pte_t **pte; | ||
837 | pgprot_t prot; | ||
838 | |||
839 | if (patch_offset >= patch->size) { | ||
840 | nvmap_warn(client, "read/write outside of handle\n"); | ||
841 | return -EFAULT; | ||
842 | } | ||
843 | |||
844 | pte = nvmap_alloc_pte(client->dev, &addr); | ||
845 | if (IS_ERR(pte)) | ||
846 | return PTR_ERR(pte); | ||
847 | |||
848 | /* derive physaddr of cmdbuf WAIT to patch */ | ||
849 | if (patch->heap_pgalloc) { | ||
850 | unsigned int page = patch_offset >> PAGE_SHIFT; | ||
851 | phys = page_to_phys(patch->pgalloc.pages[page]); | ||
852 | phys += (patch_offset & ~PAGE_MASK); | ||
853 | } else { | ||
854 | phys = patch->carveout->base + patch_offset; | ||
855 | } | ||
856 | |||
857 | pfn = __phys_to_pfn(phys); | ||
858 | prot = nvmap_pgprot(patch, pgprot_kernel); | ||
859 | kaddr = (unsigned long)addr; | ||
860 | |||
861 | /* write PTE, so addr points to cmdbuf PFN */ | ||
862 | set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot)); | ||
863 | flush_tlb_kernel_page(kaddr); | ||
864 | |||
865 | /* write patch_value to addr + page offset */ | ||
866 | __raw_writel(patch_value, addr + (phys & ~PAGE_MASK)); | ||
867 | |||
868 | nvmap_free_pte(client->dev, pte); | ||
869 | wmb(); | ||
870 | return 0; | ||
871 | } | ||
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h new file mode 100644 index 00000000000..44a0d86b603 --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap.h | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap.h | ||
3 | * | ||
4 | * GPU memory management driver for Tegra | ||
5 | * | ||
6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | *' | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H | ||
24 | #define __VIDEO_TEGRA_NVMAP_NVMAP_H | ||
25 | |||
26 | #include <linux/list.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/rbtree.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/wait.h> | ||
32 | #include <linux/atomic.h> | ||
33 | #include <mach/nvmap.h> | ||
34 | #include "nvmap_heap.h" | ||
35 | |||
36 | struct nvmap_device; | ||
37 | struct page; | ||
38 | struct tegra_iovmm_area; | ||
39 | |||
40 | #if defined(CONFIG_TEGRA_NVMAP) | ||
41 | #define nvmap_err(_client, _fmt, ...) \ | ||
42 | dev_err(nvmap_client_to_device(_client), \ | ||
43 | "%s: "_fmt, __func__, ##__VA_ARGS__) | ||
44 | |||
45 | #define nvmap_warn(_client, _fmt, ...) \ | ||
46 | dev_warn(nvmap_client_to_device(_client), \ | ||
47 | "%s: "_fmt, __func__, ##__VA_ARGS__) | ||
48 | |||
49 | #define nvmap_debug(_client, _fmt, ...) \ | ||
50 | dev_dbg(nvmap_client_to_device(_client), \ | ||
51 | "%s: "_fmt, __func__, ##__VA_ARGS__) | ||
52 | |||
53 | #define nvmap_ref_to_id(_ref) ((unsigned long)(_ref)->handle) | ||
54 | |||
55 | /* handles allocated using shared system memory (either IOVMM- or high-order | ||
56 | * page allocations */ | ||
57 | struct nvmap_pgalloc { | ||
58 | struct page **pages; | ||
59 | struct tegra_iovmm_area *area; | ||
60 | struct list_head mru_list; /* MRU entry for IOVMM reclamation */ | ||
61 | bool contig; /* contiguous system memory */ | ||
62 | bool dirty; /* area is invalid and needs mapping */ | ||
63 | u32 iovm_addr; /* is non-zero, if client need specific iova mapping */ | ||
64 | }; | ||
65 | |||
66 | struct nvmap_handle { | ||
67 | struct rb_node node; /* entry on global handle tree */ | ||
68 | atomic_t ref; /* reference count (i.e., # of duplications) */ | ||
69 | atomic_t pin; /* pin count */ | ||
70 | unsigned int usecount; /* how often is used */ | ||
71 | unsigned long flags; | ||
72 | size_t size; /* padded (as-allocated) size */ | ||
73 | size_t orig_size; /* original (as-requested) size */ | ||
74 | size_t align; | ||
75 | struct nvmap_client *owner; | ||
76 | struct nvmap_device *dev; | ||
77 | union { | ||
78 | struct nvmap_pgalloc pgalloc; | ||
79 | struct nvmap_heap_block *carveout; | ||
80 | }; | ||
81 | bool global; /* handle may be duplicated by other clients */ | ||
82 | bool secure; /* zap IOVMM area on unpin */ | ||
83 | bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */ | ||
84 | bool alloc; /* handle has memory allocated */ | ||
85 | unsigned int userflags; /* flags passed from userspace */ | ||
86 | struct mutex lock; | ||
87 | }; | ||
88 | |||
89 | #define NVMAP_DEFAULT_PAGE_POOL_SIZE 8192 | ||
90 | #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE | ||
91 | #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE | ||
92 | #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE | ||
93 | #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE | ||
94 | #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1) | ||
95 | |||
96 | struct nvmap_page_pool { | ||
97 | struct mutex lock; | ||
98 | int npages; | ||
99 | struct page **page_array; | ||
100 | struct page **shrink_array; | ||
101 | int max_pages; | ||
102 | int flags; | ||
103 | }; | ||
104 | |||
105 | int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags); | ||
106 | |||
107 | struct nvmap_share { | ||
108 | struct tegra_iovmm_client *iovmm; | ||
109 | wait_queue_head_t pin_wait; | ||
110 | struct mutex pin_lock; | ||
111 | union { | ||
112 | struct nvmap_page_pool pools[NVMAP_NUM_POOLS]; | ||
113 | struct { | ||
114 | struct nvmap_page_pool uc_pool; | ||
115 | struct nvmap_page_pool wc_pool; | ||
116 | struct nvmap_page_pool iwb_pool; | ||
117 | struct nvmap_page_pool wb_pool; | ||
118 | }; | ||
119 | }; | ||
120 | #ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM | ||
121 | struct mutex mru_lock; | ||
122 | struct list_head *mru_lists; | ||
123 | int nr_mru; | ||
124 | #endif | ||
125 | }; | ||
126 | |||
127 | struct nvmap_carveout_commit { | ||
128 | size_t commit; | ||
129 | struct list_head list; | ||
130 | }; | ||
131 | |||
132 | struct nvmap_client { | ||
133 | const char *name; | ||
134 | struct nvmap_device *dev; | ||
135 | struct nvmap_share *share; | ||
136 | struct rb_root handle_refs; | ||
137 | atomic_t iovm_commit; | ||
138 | size_t iovm_limit; | ||
139 | struct mutex ref_lock; | ||
140 | bool super; | ||
141 | atomic_t count; | ||
142 | struct task_struct *task; | ||
143 | struct list_head list; | ||
144 | struct nvmap_carveout_commit carveout_commit[0]; | ||
145 | }; | ||
146 | |||
147 | struct nvmap_vma_priv { | ||
148 | struct nvmap_handle *handle; | ||
149 | size_t offs; | ||
150 | atomic_t count; /* number of processes cloning the VMA */ | ||
151 | }; | ||
152 | |||
153 | static inline void nvmap_ref_lock(struct nvmap_client *priv) | ||
154 | { | ||
155 | mutex_lock(&priv->ref_lock); | ||
156 | } | ||
157 | |||
158 | static inline void nvmap_ref_unlock(struct nvmap_client *priv) | ||
159 | { | ||
160 | mutex_unlock(&priv->ref_lock); | ||
161 | } | ||
162 | #endif /* CONFIG_TEGRA_NVMAP */ | ||
163 | |||
164 | struct device *nvmap_client_to_device(struct nvmap_client *client); | ||
165 | |||
166 | pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr); | ||
167 | |||
168 | pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr); | ||
169 | |||
170 | void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte); | ||
171 | |||
172 | void nvmap_usecount_inc(struct nvmap_handle *h); | ||
173 | void nvmap_usecount_dec(struct nvmap_handle *h); | ||
174 | |||
175 | struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev, | ||
176 | struct nvmap_handle *handle, | ||
177 | unsigned long type); | ||
178 | |||
179 | unsigned long nvmap_carveout_usage(struct nvmap_client *c, | ||
180 | struct nvmap_heap_block *b); | ||
181 | |||
182 | struct nvmap_carveout_node; | ||
183 | void nvmap_carveout_commit_add(struct nvmap_client *client, | ||
184 | struct nvmap_carveout_node *node, size_t len); | ||
185 | |||
186 | void nvmap_carveout_commit_subtract(struct nvmap_client *client, | ||
187 | struct nvmap_carveout_node *node, | ||
188 | size_t len); | ||
189 | |||
190 | struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev); | ||
191 | |||
192 | struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client, | ||
193 | unsigned long handle); | ||
194 | |||
195 | struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv, | ||
196 | unsigned long id); | ||
197 | |||
198 | struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client, | ||
199 | unsigned long id); | ||
200 | |||
201 | struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client, | ||
202 | size_t size); | ||
203 | |||
204 | struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, | ||
205 | unsigned long id); | ||
206 | |||
207 | int nvmap_alloc_handle_id(struct nvmap_client *client, | ||
208 | unsigned long id, unsigned int heap_mask, | ||
209 | size_t align, unsigned int flags); | ||
210 | |||
211 | void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id); | ||
212 | |||
213 | int nvmap_pin_ids(struct nvmap_client *client, | ||
214 | unsigned int nr, const unsigned long *ids); | ||
215 | |||
216 | void nvmap_unpin_ids(struct nvmap_client *priv, | ||
217 | unsigned int nr, const unsigned long *ids); | ||
218 | |||
219 | void _nvmap_handle_free(struct nvmap_handle *h); | ||
220 | |||
221 | int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h); | ||
222 | |||
223 | void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h); | ||
224 | |||
225 | #if defined(CONFIG_TEGRA_NVMAP) | ||
226 | static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h) | ||
227 | { | ||
228 | if (unlikely(atomic_inc_return(&h->ref) <= 1)) { | ||
229 | pr_err("%s: %s getting a freed handle\n", | ||
230 | __func__, current->group_leader->comm); | ||
231 | if (atomic_read(&h->ref) <= 0) | ||
232 | return NULL; | ||
233 | } | ||
234 | return h; | ||
235 | } | ||
236 | |||
237 | static inline void nvmap_handle_put(struct nvmap_handle *h) | ||
238 | { | ||
239 | int cnt = atomic_dec_return(&h->ref); | ||
240 | |||
241 | if (WARN_ON(cnt < 0)) { | ||
242 | pr_err("%s: %s put to negative references\n", | ||
243 | __func__, current->comm); | ||
244 | } else if (cnt == 0) | ||
245 | _nvmap_handle_free(h); | ||
246 | } | ||
247 | |||
248 | static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot) | ||
249 | { | ||
250 | if (h->flags == NVMAP_HANDLE_UNCACHEABLE) | ||
251 | return pgprot_noncached(prot); | ||
252 | else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE) | ||
253 | return pgprot_writecombine(prot); | ||
254 | else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) | ||
255 | return pgprot_inner_writeback(prot); | ||
256 | return prot; | ||
257 | } | ||
258 | #else /* CONFIG_TEGRA_NVMAP */ | ||
259 | struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h); | ||
260 | void nvmap_handle_put(struct nvmap_handle *h); | ||
261 | pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot); | ||
262 | #endif /* !CONFIG_TEGRA_NVMAP */ | ||
263 | |||
264 | int is_nvmap_vma(struct vm_area_struct *vma); | ||
265 | |||
266 | struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, | ||
267 | size_t size, size_t align, unsigned int flags, unsigned int iova_start); | ||
268 | |||
269 | void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r); | ||
270 | |||
271 | #endif | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_common.h b/drivers/video/tegra/nvmap/nvmap_common.h new file mode 100644 index 00000000000..6da010720bb --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_common.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_common.h | ||
3 | * | ||
4 | * GPU memory management driver for Tegra | ||
5 | * | ||
6 | * Copyright (c) 2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | *' | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | extern void v7_flush_kern_cache_all(void *); | ||
24 | extern void v7_clean_kern_cache_all(void *); | ||
25 | |||
26 | #define FLUSH_CLEAN_BY_SET_WAY_THRESHOLD (8 * PAGE_SIZE) | ||
27 | |||
28 | static inline void inner_flush_cache_all(void) | ||
29 | { | ||
30 | on_each_cpu(v7_flush_kern_cache_all, NULL, 1); | ||
31 | } | ||
32 | |||
33 | static inline void inner_clean_cache_all(void) | ||
34 | { | ||
35 | on_each_cpu(v7_clean_kern_cache_all, NULL, 1); | ||
36 | } | ||
37 | |||
38 | extern void __flush_dcache_page(struct address_space *, struct page *); | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c new file mode 100644 index 00000000000..f84f38c93aa --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_dev.c | |||
@@ -0,0 +1,1436 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_dev.c | ||
3 | * | ||
4 | * User-space interface to nvmap | ||
5 | * | ||
6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/backing-dev.h> | ||
24 | #include <linux/bitmap.h> | ||
25 | #include <linux/debugfs.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/miscdevice.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/oom.h> | ||
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/seq_file.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/uaccess.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include <asm/cacheflush.h> | ||
39 | #include <asm/tlbflush.h> | ||
40 | |||
41 | #include <mach/iovmm.h> | ||
42 | #include <mach/nvmap.h> | ||
43 | |||
44 | #include "nvmap.h" | ||
45 | #include "nvmap_ioctl.h" | ||
46 | #include "nvmap_mru.h" | ||
47 | #include "nvmap_common.h" | ||
48 | |||
49 | #define NVMAP_NUM_PTES 64 | ||
50 | #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */ | ||
51 | |||
52 | #ifdef CONFIG_NVMAP_CARVEOUT_KILLER | ||
53 | static bool carveout_killer = true; | ||
54 | #else | ||
55 | static bool carveout_killer; | ||
56 | #endif | ||
57 | module_param(carveout_killer, bool, 0640); | ||
58 | |||
59 | struct nvmap_carveout_node { | ||
60 | unsigned int heap_bit; | ||
61 | struct nvmap_heap *carveout; | ||
62 | int index; | ||
63 | struct list_head clients; | ||
64 | spinlock_t clients_lock; | ||
65 | }; | ||
66 | |||
67 | struct nvmap_device { | ||
68 | struct vm_struct *vm_rgn; | ||
69 | pte_t *ptes[NVMAP_NUM_PTES]; | ||
70 | unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG]; | ||
71 | unsigned int lastpte; | ||
72 | spinlock_t ptelock; | ||
73 | |||
74 | struct rb_root handles; | ||
75 | spinlock_t handle_lock; | ||
76 | wait_queue_head_t pte_wait; | ||
77 | struct miscdevice dev_super; | ||
78 | struct miscdevice dev_user; | ||
79 | struct nvmap_carveout_node *heaps; | ||
80 | int nr_carveouts; | ||
81 | struct nvmap_share iovmm_master; | ||
82 | struct list_head clients; | ||
83 | spinlock_t clients_lock; | ||
84 | }; | ||
85 | |||
86 | struct nvmap_device *nvmap_dev; | ||
87 | |||
88 | static struct backing_dev_info nvmap_bdi = { | ||
89 | .ra_pages = 0, | ||
90 | .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK | | ||
91 | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP), | ||
92 | }; | ||
93 | |||
94 | static int nvmap_open(struct inode *inode, struct file *filp); | ||
95 | static int nvmap_release(struct inode *inode, struct file *filp); | ||
96 | static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); | ||
97 | static int nvmap_map(struct file *filp, struct vm_area_struct *vma); | ||
98 | static void nvmap_vma_open(struct vm_area_struct *vma); | ||
99 | static void nvmap_vma_close(struct vm_area_struct *vma); | ||
100 | static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
101 | |||
102 | static const struct file_operations nvmap_user_fops = { | ||
103 | .owner = THIS_MODULE, | ||
104 | .open = nvmap_open, | ||
105 | .release = nvmap_release, | ||
106 | .unlocked_ioctl = nvmap_ioctl, | ||
107 | .mmap = nvmap_map, | ||
108 | }; | ||
109 | |||
110 | static const struct file_operations nvmap_super_fops = { | ||
111 | .owner = THIS_MODULE, | ||
112 | .open = nvmap_open, | ||
113 | .release = nvmap_release, | ||
114 | .unlocked_ioctl = nvmap_ioctl, | ||
115 | .mmap = nvmap_map, | ||
116 | }; | ||
117 | |||
118 | static struct vm_operations_struct nvmap_vma_ops = { | ||
119 | .open = nvmap_vma_open, | ||
120 | .close = nvmap_vma_close, | ||
121 | .fault = nvmap_vma_fault, | ||
122 | }; | ||
123 | |||
124 | int is_nvmap_vma(struct vm_area_struct *vma) | ||
125 | { | ||
126 | return vma->vm_ops == &nvmap_vma_ops; | ||
127 | } | ||
128 | |||
129 | struct device *nvmap_client_to_device(struct nvmap_client *client) | ||
130 | { | ||
131 | if (client->super) | ||
132 | return client->dev->dev_super.this_device; | ||
133 | else | ||
134 | return client->dev->dev_user.this_device; | ||
135 | } | ||
136 | |||
137 | struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev) | ||
138 | { | ||
139 | return &dev->iovmm_master; | ||
140 | } | ||
141 | |||
142 | /* allocates a PTE for the caller's use; returns the PTE pointer or | ||
143 | * a negative errno. may be called from IRQs */ | ||
144 | pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr) | ||
145 | { | ||
146 | unsigned long flags; | ||
147 | unsigned long bit; | ||
148 | |||
149 | spin_lock_irqsave(&dev->ptelock, flags); | ||
150 | bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte); | ||
151 | if (bit == NVMAP_NUM_PTES) { | ||
152 | bit = find_first_zero_bit(dev->ptebits, dev->lastpte); | ||
153 | if (bit == dev->lastpte) | ||
154 | bit = NVMAP_NUM_PTES; | ||
155 | } | ||
156 | |||
157 | if (bit == NVMAP_NUM_PTES) { | ||
158 | spin_unlock_irqrestore(&dev->ptelock, flags); | ||
159 | return ERR_PTR(-ENOMEM); | ||
160 | } | ||
161 | |||
162 | dev->lastpte = bit; | ||
163 | set_bit(bit, dev->ptebits); | ||
164 | spin_unlock_irqrestore(&dev->ptelock, flags); | ||
165 | |||
166 | *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE; | ||
167 | return &(dev->ptes[bit]); | ||
168 | } | ||
169 | |||
170 | /* allocates a PTE for the caller's use; returns the PTE pointer or | ||
171 | * a negative errno. must be called from sleepable contexts */ | ||
172 | pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr) | ||
173 | { | ||
174 | int ret; | ||
175 | pte_t **pte; | ||
176 | ret = wait_event_interruptible(dev->pte_wait, | ||
177 | !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr))); | ||
178 | |||
179 | if (ret == -ERESTARTSYS) | ||
180 | return ERR_PTR(-EINTR); | ||
181 | |||
182 | return pte; | ||
183 | } | ||
184 | |||
185 | /* frees a PTE */ | ||
186 | void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte) | ||
187 | { | ||
188 | unsigned long addr; | ||
189 | unsigned int bit = pte - dev->ptes; | ||
190 | unsigned long flags; | ||
191 | |||
192 | if (WARN_ON(bit >= NVMAP_NUM_PTES)) | ||
193 | return; | ||
194 | |||
195 | addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE; | ||
196 | set_pte_at(&init_mm, addr, *pte, 0); | ||
197 | |||
198 | spin_lock_irqsave(&dev->ptelock, flags); | ||
199 | clear_bit(bit, dev->ptebits); | ||
200 | spin_unlock_irqrestore(&dev->ptelock, flags); | ||
201 | wake_up(&dev->pte_wait); | ||
202 | } | ||
203 | |||
204 | /* verifies that the handle ref value "ref" is a valid handle ref for the | ||
205 | * file. caller must hold the file's ref_lock prior to calling this function */ | ||
206 | struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c, | ||
207 | unsigned long id) | ||
208 | { | ||
209 | struct rb_node *n = c->handle_refs.rb_node; | ||
210 | |||
211 | while (n) { | ||
212 | struct nvmap_handle_ref *ref; | ||
213 | ref = rb_entry(n, struct nvmap_handle_ref, node); | ||
214 | if ((unsigned long)ref->handle == id) | ||
215 | return ref; | ||
216 | else if (id > (unsigned long)ref->handle) | ||
217 | n = n->rb_right; | ||
218 | else | ||
219 | n = n->rb_left; | ||
220 | } | ||
221 | |||
222 | return NULL; | ||
223 | } | ||
224 | |||
225 | struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client, | ||
226 | unsigned long id) | ||
227 | { | ||
228 | struct nvmap_handle_ref *ref; | ||
229 | struct nvmap_handle *h = NULL; | ||
230 | |||
231 | nvmap_ref_lock(client); | ||
232 | ref = _nvmap_validate_id_locked(client, id); | ||
233 | if (ref) | ||
234 | h = ref->handle; | ||
235 | if (h) | ||
236 | h = nvmap_handle_get(h); | ||
237 | nvmap_ref_unlock(client); | ||
238 | return h; | ||
239 | } | ||
240 | |||
241 | unsigned long nvmap_carveout_usage(struct nvmap_client *c, | ||
242 | struct nvmap_heap_block *b) | ||
243 | { | ||
244 | struct nvmap_heap *h = nvmap_block_to_heap(b); | ||
245 | struct nvmap_carveout_node *n; | ||
246 | int i; | ||
247 | |||
248 | for (i = 0; i < c->dev->nr_carveouts; i++) { | ||
249 | n = &c->dev->heaps[i]; | ||
250 | if (n->carveout == h) | ||
251 | return n->heap_bit; | ||
252 | } | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * This routine is used to flush the carveout memory from cache. | ||
258 | * Why cache flush is needed for carveout? Consider the case, where a piece of | ||
259 | * carveout is allocated as cached and released. After this, if the same memory is | ||
260 | * allocated for uncached request and the memory is not flushed out from cache. | ||
261 | * In this case, the client might pass this to H/W engine and it could start modify | ||
262 | * the memory. As this was cached earlier, it might have some portion of it in cache. | ||
263 | * During cpu request to read/write other memory, the cached portion of this memory | ||
264 | * might get flushed back to main memory and would cause corruptions, if it happens | ||
265 | * after H/W writes data to memory. | ||
266 | * | ||
267 | * But flushing out the memory blindly on each carveout allocation is redundant. | ||
268 | * | ||
269 | * In order to optimize the carveout buffer cache flushes, the following | ||
270 | * strategy is used. | ||
271 | * | ||
272 | * The whole Carveout is flushed out from cache during its initialization. | ||
273 | * During allocation, carveout buffers are not flused from cache. | ||
274 | * During deallocation, carveout buffers are flushed, if they were allocated as cached. | ||
275 | * if they were allocated as uncached/writecombined, no cache flush is needed. | ||
276 | * Just draining store buffers is enough. | ||
277 | */ | ||
278 | int nvmap_flush_heap_block(struct nvmap_client *client, | ||
279 | struct nvmap_heap_block *block, size_t len, unsigned int prot) | ||
280 | { | ||
281 | pte_t **pte; | ||
282 | void *addr; | ||
283 | phys_addr_t kaddr; | ||
284 | phys_addr_t phys = block->base; | ||
285 | phys_addr_t end = block->base + len; | ||
286 | |||
287 | if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE) | ||
288 | goto out; | ||
289 | |||
290 | if (len >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD) { | ||
291 | inner_flush_cache_all(); | ||
292 | if (prot != NVMAP_HANDLE_INNER_CACHEABLE) | ||
293 | outer_flush_range(block->base, block->base + len); | ||
294 | goto out; | ||
295 | } | ||
296 | |||
297 | pte = nvmap_alloc_pte((client ? client->dev : nvmap_dev), &addr); | ||
298 | if (IS_ERR(pte)) | ||
299 | return PTR_ERR(pte); | ||
300 | |||
301 | kaddr = (phys_addr_t)addr; | ||
302 | |||
303 | while (phys < end) { | ||
304 | phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK; | ||
305 | unsigned long pfn = __phys_to_pfn(phys); | ||
306 | void *base = (void *)kaddr + (phys & ~PAGE_MASK); | ||
307 | |||
308 | next = min(next, end); | ||
309 | set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel)); | ||
310 | flush_tlb_kernel_page(kaddr); | ||
311 | __cpuc_flush_dcache_area(base, next - phys); | ||
312 | phys = next; | ||
313 | } | ||
314 | |||
315 | if (prot != NVMAP_HANDLE_INNER_CACHEABLE) | ||
316 | outer_flush_range(block->base, block->base + len); | ||
317 | |||
318 | nvmap_free_pte((client ? client->dev : nvmap_dev), pte); | ||
319 | out: | ||
320 | wmb(); | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | void nvmap_carveout_commit_add(struct nvmap_client *client, | ||
325 | struct nvmap_carveout_node *node, | ||
326 | size_t len) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | |||
330 | nvmap_ref_lock(client); | ||
331 | spin_lock_irqsave(&node->clients_lock, flags); | ||
332 | BUG_ON(list_empty(&client->carveout_commit[node->index].list) && | ||
333 | client->carveout_commit[node->index].commit != 0); | ||
334 | |||
335 | client->carveout_commit[node->index].commit += len; | ||
336 | /* if this client isn't already on the list of nodes for this heap, | ||
337 | add it */ | ||
338 | if (list_empty(&client->carveout_commit[node->index].list)) { | ||
339 | list_add(&client->carveout_commit[node->index].list, | ||
340 | &node->clients); | ||
341 | } | ||
342 | spin_unlock_irqrestore(&node->clients_lock, flags); | ||
343 | nvmap_ref_unlock(client); | ||
344 | } | ||
345 | |||
346 | void nvmap_carveout_commit_subtract(struct nvmap_client *client, | ||
347 | struct nvmap_carveout_node *node, | ||
348 | size_t len) | ||
349 | { | ||
350 | unsigned long flags; | ||
351 | |||
352 | if (!client) | ||
353 | return; | ||
354 | |||
355 | spin_lock_irqsave(&node->clients_lock, flags); | ||
356 | BUG_ON(client->carveout_commit[node->index].commit < len); | ||
357 | client->carveout_commit[node->index].commit -= len; | ||
358 | /* if no more allocation in this carveout for this node, delete it */ | ||
359 | if (!client->carveout_commit[node->index].commit) | ||
360 | list_del_init(&client->carveout_commit[node->index].list); | ||
361 | spin_unlock_irqrestore(&node->clients_lock, flags); | ||
362 | } | ||
363 | |||
364 | static struct nvmap_client *get_client_from_carveout_commit( | ||
365 | struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit) | ||
366 | { | ||
367 | struct nvmap_carveout_commit *first_commit = commit - node->index; | ||
368 | return (void *)first_commit - offsetof(struct nvmap_client, | ||
369 | carveout_commit); | ||
370 | } | ||
371 | |||
372 | static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim); | ||
373 | static int wait_count; | ||
374 | bool nvmap_shrink_carveout(struct nvmap_carveout_node *node) | ||
375 | { | ||
376 | struct nvmap_carveout_commit *commit; | ||
377 | size_t selected_size = 0; | ||
378 | int selected_oom_adj = OOM_ADJUST_MIN; | ||
379 | struct task_struct *selected_task = NULL; | ||
380 | unsigned long flags; | ||
381 | bool wait = false; | ||
382 | int current_oom_adj = OOM_ADJUST_MIN; | ||
383 | |||
384 | task_lock(current); | ||
385 | if (current->signal) | ||
386 | current_oom_adj = current->signal->oom_adj; | ||
387 | task_unlock(current); | ||
388 | |||
389 | spin_lock_irqsave(&node->clients_lock, flags); | ||
390 | /* find the task with the smallest oom_adj (lowest priority) | ||
391 | * and largest carveout allocation -- ignore kernel allocations, | ||
392 | * there's no way to handle them */ | ||
393 | list_for_each_entry(commit, &node->clients, list) { | ||
394 | struct nvmap_client *client = | ||
395 | get_client_from_carveout_commit(node, commit); | ||
396 | size_t size = commit->commit; | ||
397 | struct task_struct *task = client->task; | ||
398 | struct signal_struct *sig; | ||
399 | |||
400 | if (!task) | ||
401 | continue; | ||
402 | |||
403 | task_lock(task); | ||
404 | sig = task->signal; | ||
405 | if (!task->mm || !sig) | ||
406 | goto end; | ||
407 | /* don't try to kill current */ | ||
408 | if (task == current->group_leader) | ||
409 | goto end; | ||
410 | /* don't try to kill higher priority tasks */ | ||
411 | if (sig->oom_adj < current_oom_adj) | ||
412 | goto end; | ||
413 | if (sig->oom_adj < selected_oom_adj) | ||
414 | goto end; | ||
415 | if (sig->oom_adj == selected_oom_adj && | ||
416 | size <= selected_size) | ||
417 | goto end; | ||
418 | selected_oom_adj = sig->oom_adj; | ||
419 | selected_size = size; | ||
420 | selected_task = task; | ||
421 | end: | ||
422 | task_unlock(task); | ||
423 | } | ||
424 | if (selected_task) { | ||
425 | wait = true; | ||
426 | if (fatal_signal_pending(selected_task)) { | ||
427 | pr_warning("carveout_killer: process %d dying " | ||
428 | "slowly\n", selected_task->pid); | ||
429 | goto out; | ||
430 | } | ||
431 | pr_info("carveout_killer: killing process %d with oom_adj %d " | ||
432 | "to reclaim %d (for process with oom_adj %d)\n", | ||
433 | selected_task->pid, selected_oom_adj, | ||
434 | selected_size, current_oom_adj); | ||
435 | force_sig(SIGKILL, selected_task); | ||
436 | } | ||
437 | out: | ||
438 | spin_unlock_irqrestore(&node->clients_lock, flags); | ||
439 | return wait; | ||
440 | } | ||
441 | |||
442 | static | ||
443 | struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client, | ||
444 | struct nvmap_handle *handle, | ||
445 | unsigned long type) | ||
446 | { | ||
447 | struct nvmap_carveout_node *co_heap; | ||
448 | struct nvmap_device *dev = client->dev; | ||
449 | int i; | ||
450 | |||
451 | for (i = 0; i < dev->nr_carveouts; i++) { | ||
452 | struct nvmap_heap_block *block; | ||
453 | co_heap = &dev->heaps[i]; | ||
454 | |||
455 | if (!(co_heap->heap_bit & type)) | ||
456 | continue; | ||
457 | |||
458 | block = nvmap_heap_alloc(co_heap->carveout, handle); | ||
459 | if (block) | ||
460 | return block; | ||
461 | } | ||
462 | return NULL; | ||
463 | } | ||
464 | |||
465 | static bool nvmap_carveout_freed(int count) | ||
466 | { | ||
467 | smp_rmb(); | ||
468 | return count != wait_count; | ||
469 | } | ||
470 | |||
471 | struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client, | ||
472 | struct nvmap_handle *handle, | ||
473 | unsigned long type) | ||
474 | { | ||
475 | struct nvmap_heap_block *block; | ||
476 | struct nvmap_carveout_node *co_heap; | ||
477 | struct nvmap_device *dev = client->dev; | ||
478 | int i; | ||
479 | unsigned long end = jiffies + | ||
480 | msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME); | ||
481 | int count = 0; | ||
482 | |||
483 | do { | ||
484 | block = do_nvmap_carveout_alloc(client, handle, type); | ||
485 | if (!carveout_killer) | ||
486 | return block; | ||
487 | |||
488 | if (block) | ||
489 | return block; | ||
490 | |||
491 | if (!count++) { | ||
492 | char task_comm[TASK_COMM_LEN]; | ||
493 | if (client->task) | ||
494 | get_task_comm(task_comm, client->task); | ||
495 | else | ||
496 | task_comm[0] = 0; | ||
497 | pr_info("%s: failed to allocate %u bytes for " | ||
498 | "process %s, firing carveout " | ||
499 | "killer!\n", __func__, handle->size, task_comm); | ||
500 | |||
501 | } else { | ||
502 | pr_info("%s: still can't allocate %u bytes, " | ||
503 | "attempt %d!\n", __func__, handle->size, count); | ||
504 | } | ||
505 | |||
506 | /* shrink carveouts that matter and try again */ | ||
507 | for (i = 0; i < dev->nr_carveouts; i++) { | ||
508 | int count; | ||
509 | co_heap = &dev->heaps[i]; | ||
510 | |||
511 | if (!(co_heap->heap_bit & type)) | ||
512 | continue; | ||
513 | |||
514 | count = wait_count; | ||
515 | /* indicates we didn't find anything to kill, | ||
516 | might as well stop trying */ | ||
517 | if (!nvmap_shrink_carveout(co_heap)) | ||
518 | return NULL; | ||
519 | |||
520 | if (time_is_after_jiffies(end)) | ||
521 | wait_event_interruptible_timeout(wait_reclaim, | ||
522 | nvmap_carveout_freed(count), | ||
523 | end - jiffies); | ||
524 | } | ||
525 | } while (time_is_after_jiffies(end)); | ||
526 | |||
527 | if (time_is_before_jiffies(end)) | ||
528 | pr_info("carveout_killer: timeout expired without " | ||
529 | "allocation succeeding.\n"); | ||
530 | |||
531 | return NULL; | ||
532 | } | ||
533 | |||
534 | /* remove a handle from the device's tree of all handles; called | ||
535 | * when freeing handles. */ | ||
536 | int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h) | ||
537 | { | ||
538 | spin_lock(&dev->handle_lock); | ||
539 | |||
540 | /* re-test inside the spinlock if the handle really has no clients; | ||
541 | * only remove the handle if it is unreferenced */ | ||
542 | if (atomic_add_return(0, &h->ref) > 0) { | ||
543 | spin_unlock(&dev->handle_lock); | ||
544 | return -EBUSY; | ||
545 | } | ||
546 | smp_rmb(); | ||
547 | BUG_ON(atomic_read(&h->ref) < 0); | ||
548 | BUG_ON(atomic_read(&h->pin) != 0); | ||
549 | |||
550 | rb_erase(&h->node, &dev->handles); | ||
551 | |||
552 | spin_unlock(&dev->handle_lock); | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | /* adds a newly-created handle to the device master tree */ | ||
557 | void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h) | ||
558 | { | ||
559 | struct rb_node **p; | ||
560 | struct rb_node *parent = NULL; | ||
561 | |||
562 | spin_lock(&dev->handle_lock); | ||
563 | p = &dev->handles.rb_node; | ||
564 | while (*p) { | ||
565 | struct nvmap_handle *b; | ||
566 | |||
567 | parent = *p; | ||
568 | b = rb_entry(parent, struct nvmap_handle, node); | ||
569 | if (h > b) | ||
570 | p = &parent->rb_right; | ||
571 | else | ||
572 | p = &parent->rb_left; | ||
573 | } | ||
574 | rb_link_node(&h->node, parent, p); | ||
575 | rb_insert_color(&h->node, &dev->handles); | ||
576 | spin_unlock(&dev->handle_lock); | ||
577 | } | ||
578 | |||
579 | /* validates that a handle is in the device master tree, and that the | ||
580 | * client has permission to access it */ | ||
581 | struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client, | ||
582 | unsigned long id) | ||
583 | { | ||
584 | struct nvmap_handle *h = NULL; | ||
585 | struct rb_node *n; | ||
586 | |||
587 | spin_lock(&client->dev->handle_lock); | ||
588 | |||
589 | n = client->dev->handles.rb_node; | ||
590 | |||
591 | while (n) { | ||
592 | h = rb_entry(n, struct nvmap_handle, node); | ||
593 | if ((unsigned long)h == id) { | ||
594 | if (client->super || h->global || (h->owner == client)) | ||
595 | h = nvmap_handle_get(h); | ||
596 | else | ||
597 | h = NULL; | ||
598 | spin_unlock(&client->dev->handle_lock); | ||
599 | return h; | ||
600 | } | ||
601 | if (id > (unsigned long)h) | ||
602 | n = n->rb_right; | ||
603 | else | ||
604 | n = n->rb_left; | ||
605 | } | ||
606 | spin_unlock(&client->dev->handle_lock); | ||
607 | return NULL; | ||
608 | } | ||
609 | |||
610 | struct nvmap_client *nvmap_create_client(struct nvmap_device *dev, | ||
611 | const char *name) | ||
612 | { | ||
613 | struct nvmap_client *client; | ||
614 | struct task_struct *task; | ||
615 | int i; | ||
616 | |||
617 | if (WARN_ON(!dev)) | ||
618 | return NULL; | ||
619 | |||
620 | client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit) | ||
621 | * dev->nr_carveouts), GFP_KERNEL); | ||
622 | if (!client) | ||
623 | return NULL; | ||
624 | |||
625 | client->name = name; | ||
626 | client->super = true; | ||
627 | client->dev = dev; | ||
628 | /* TODO: allocate unique IOVMM client for each nvmap client */ | ||
629 | client->share = &dev->iovmm_master; | ||
630 | client->handle_refs = RB_ROOT; | ||
631 | |||
632 | atomic_set(&client->iovm_commit, 0); | ||
633 | |||
634 | client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm); | ||
635 | |||
636 | for (i = 0; i < dev->nr_carveouts; i++) { | ||
637 | INIT_LIST_HEAD(&client->carveout_commit[i].list); | ||
638 | client->carveout_commit[i].commit = 0; | ||
639 | } | ||
640 | |||
641 | get_task_struct(current->group_leader); | ||
642 | task_lock(current->group_leader); | ||
643 | /* don't bother to store task struct for kernel threads, | ||
644 | they can't be killed anyway */ | ||
645 | if (current->flags & PF_KTHREAD) { | ||
646 | put_task_struct(current->group_leader); | ||
647 | task = NULL; | ||
648 | } else { | ||
649 | task = current->group_leader; | ||
650 | } | ||
651 | task_unlock(current->group_leader); | ||
652 | client->task = task; | ||
653 | |||
654 | mutex_init(&client->ref_lock); | ||
655 | atomic_set(&client->count, 1); | ||
656 | |||
657 | spin_lock(&dev->clients_lock); | ||
658 | list_add(&client->list, &dev->clients); | ||
659 | spin_unlock(&dev->clients_lock); | ||
660 | return client; | ||
661 | } | ||
662 | |||
663 | static void destroy_client(struct nvmap_client *client) | ||
664 | { | ||
665 | struct rb_node *n; | ||
666 | int i; | ||
667 | |||
668 | if (!client) | ||
669 | return; | ||
670 | |||
671 | |||
672 | while ((n = rb_first(&client->handle_refs))) { | ||
673 | struct nvmap_handle_ref *ref; | ||
674 | int pins, dupes; | ||
675 | |||
676 | ref = rb_entry(n, struct nvmap_handle_ref, node); | ||
677 | rb_erase(&ref->node, &client->handle_refs); | ||
678 | |||
679 | smp_rmb(); | ||
680 | pins = atomic_read(&ref->pin); | ||
681 | |||
682 | if (ref->handle->owner == client) | ||
683 | ref->handle->owner = NULL; | ||
684 | |||
685 | while (pins--) | ||
686 | nvmap_unpin_handles(client, &ref->handle, 1); | ||
687 | |||
688 | dupes = atomic_read(&ref->dupes); | ||
689 | while (dupes--) | ||
690 | nvmap_handle_put(ref->handle); | ||
691 | |||
692 | kfree(ref); | ||
693 | } | ||
694 | |||
695 | if (carveout_killer) { | ||
696 | wait_count++; | ||
697 | smp_wmb(); | ||
698 | wake_up_all(&wait_reclaim); | ||
699 | } | ||
700 | |||
701 | for (i = 0; i < client->dev->nr_carveouts; i++) | ||
702 | list_del(&client->carveout_commit[i].list); | ||
703 | |||
704 | if (client->task) | ||
705 | put_task_struct(client->task); | ||
706 | |||
707 | spin_lock(&client->dev->clients_lock); | ||
708 | list_del(&client->list); | ||
709 | spin_unlock(&client->dev->clients_lock); | ||
710 | kfree(client); | ||
711 | } | ||
712 | |||
713 | struct nvmap_client *nvmap_client_get(struct nvmap_client *client) | ||
714 | { | ||
715 | if (WARN_ON(!client)) | ||
716 | return NULL; | ||
717 | |||
718 | if (WARN_ON(!atomic_add_unless(&client->count, 1, 0))) | ||
719 | return NULL; | ||
720 | |||
721 | return client; | ||
722 | } | ||
723 | |||
724 | struct nvmap_client *nvmap_client_get_file(int fd) | ||
725 | { | ||
726 | struct nvmap_client *client = ERR_PTR(-EFAULT); | ||
727 | struct file *f = fget(fd); | ||
728 | if (!f) | ||
729 | return ERR_PTR(-EINVAL); | ||
730 | |||
731 | if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) { | ||
732 | client = f->private_data; | ||
733 | atomic_inc(&client->count); | ||
734 | } | ||
735 | |||
736 | fput(f); | ||
737 | return client; | ||
738 | } | ||
739 | |||
740 | void nvmap_client_put(struct nvmap_client *client) | ||
741 | { | ||
742 | if (!client) | ||
743 | return; | ||
744 | |||
745 | if (!atomic_dec_return(&client->count)) | ||
746 | destroy_client(client); | ||
747 | } | ||
748 | |||
749 | static int nvmap_open(struct inode *inode, struct file *filp) | ||
750 | { | ||
751 | struct miscdevice *miscdev = filp->private_data; | ||
752 | struct nvmap_device *dev = dev_get_drvdata(miscdev->parent); | ||
753 | struct nvmap_client *priv; | ||
754 | int ret; | ||
755 | |||
756 | ret = nonseekable_open(inode, filp); | ||
757 | if (unlikely(ret)) | ||
758 | return ret; | ||
759 | |||
760 | BUG_ON(dev != nvmap_dev); | ||
761 | priv = nvmap_create_client(dev, "user"); | ||
762 | if (!priv) | ||
763 | return -ENOMEM; | ||
764 | |||
765 | priv->super = (filp->f_op == &nvmap_super_fops); | ||
766 | |||
767 | filp->f_mapping->backing_dev_info = &nvmap_bdi; | ||
768 | |||
769 | filp->private_data = priv; | ||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | static int nvmap_release(struct inode *inode, struct file *filp) | ||
774 | { | ||
775 | nvmap_client_put(filp->private_data); | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | static int nvmap_map(struct file *filp, struct vm_area_struct *vma) | ||
780 | { | ||
781 | struct nvmap_vma_priv *priv; | ||
782 | |||
783 | /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA | ||
784 | * will be stored in vm_private_data and faulted in. until the | ||
785 | * ioctl is made, the VMA is mapped no-access */ | ||
786 | vma->vm_private_data = NULL; | ||
787 | |||
788 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
789 | if (!priv) | ||
790 | return -ENOMEM; | ||
791 | |||
792 | priv->offs = 0; | ||
793 | priv->handle = NULL; | ||
794 | atomic_set(&priv->count, 1); | ||
795 | |||
796 | vma->vm_flags |= VM_SHARED; | ||
797 | vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED); | ||
798 | vma->vm_ops = &nvmap_vma_ops; | ||
799 | vma->vm_private_data = priv; | ||
800 | |||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
805 | { | ||
806 | int err = 0; | ||
807 | void __user *uarg = (void __user *)arg; | ||
808 | |||
809 | if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC) | ||
810 | return -ENOTTY; | ||
811 | |||
812 | if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR) | ||
813 | return -ENOTTY; | ||
814 | |||
815 | if (_IOC_DIR(cmd) & _IOC_READ) | ||
816 | err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd)); | ||
817 | if (_IOC_DIR(cmd) & _IOC_WRITE) | ||
818 | err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd)); | ||
819 | |||
820 | if (err) | ||
821 | return -EFAULT; | ||
822 | |||
823 | switch (cmd) { | ||
824 | case NVMAP_IOC_CLAIM: | ||
825 | nvmap_warn(filp->private_data, "preserved handles not" | ||
826 | "supported\n"); | ||
827 | err = -ENODEV; | ||
828 | break; | ||
829 | case NVMAP_IOC_CREATE: | ||
830 | case NVMAP_IOC_FROM_ID: | ||
831 | err = nvmap_ioctl_create(filp, cmd, uarg); | ||
832 | break; | ||
833 | |||
834 | case NVMAP_IOC_GET_ID: | ||
835 | err = nvmap_ioctl_getid(filp, uarg); | ||
836 | break; | ||
837 | |||
838 | case NVMAP_IOC_PARAM: | ||
839 | err = nvmap_ioctl_get_param(filp, uarg); | ||
840 | break; | ||
841 | |||
842 | case NVMAP_IOC_UNPIN_MULT: | ||
843 | case NVMAP_IOC_PIN_MULT: | ||
844 | err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg); | ||
845 | break; | ||
846 | |||
847 | case NVMAP_IOC_ALLOC: | ||
848 | err = nvmap_ioctl_alloc(filp, uarg); | ||
849 | break; | ||
850 | |||
851 | case NVMAP_IOC_FREE: | ||
852 | err = nvmap_ioctl_free(filp, arg); | ||
853 | break; | ||
854 | |||
855 | case NVMAP_IOC_MMAP: | ||
856 | err = nvmap_map_into_caller_ptr(filp, uarg); | ||
857 | break; | ||
858 | |||
859 | case NVMAP_IOC_WRITE: | ||
860 | case NVMAP_IOC_READ: | ||
861 | err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg); | ||
862 | break; | ||
863 | |||
864 | case NVMAP_IOC_CACHE: | ||
865 | err = nvmap_ioctl_cache_maint(filp, uarg); | ||
866 | break; | ||
867 | |||
868 | default: | ||
869 | return -ENOTTY; | ||
870 | } | ||
871 | return err; | ||
872 | } | ||
873 | |||
874 | /* to ensure that the backing store for the VMA isn't freed while a fork'd | ||
875 | * reference still exists, nvmap_vma_open increments the reference count on | ||
876 | * the handle, and nvmap_vma_close decrements it. alternatively, we could | ||
877 | * disallow copying of the vma, or behave like pmem and zap the pages. FIXME. | ||
878 | */ | ||
879 | static void nvmap_vma_open(struct vm_area_struct *vma) | ||
880 | { | ||
881 | struct nvmap_vma_priv *priv; | ||
882 | |||
883 | priv = vma->vm_private_data; | ||
884 | |||
885 | BUG_ON(!priv); | ||
886 | |||
887 | atomic_inc(&priv->count); | ||
888 | } | ||
889 | |||
890 | static void nvmap_vma_close(struct vm_area_struct *vma) | ||
891 | { | ||
892 | struct nvmap_vma_priv *priv = vma->vm_private_data; | ||
893 | |||
894 | if (priv) { | ||
895 | if (priv->handle) { | ||
896 | nvmap_usecount_dec(priv->handle); | ||
897 | BUG_ON(priv->handle->usecount < 0); | ||
898 | } | ||
899 | if (!atomic_dec_return(&priv->count)) { | ||
900 | if (priv->handle) | ||
901 | nvmap_handle_put(priv->handle); | ||
902 | kfree(priv); | ||
903 | } | ||
904 | } | ||
905 | vma->vm_private_data = NULL; | ||
906 | } | ||
907 | |||
908 | static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
909 | { | ||
910 | struct nvmap_vma_priv *priv; | ||
911 | unsigned long offs; | ||
912 | |||
913 | offs = (unsigned long)(vmf->virtual_address - vma->vm_start); | ||
914 | priv = vma->vm_private_data; | ||
915 | if (!priv || !priv->handle || !priv->handle->alloc) | ||
916 | return VM_FAULT_SIGBUS; | ||
917 | |||
918 | offs += priv->offs; | ||
919 | /* if the VMA was split for some reason, vm_pgoff will be the VMA's | ||
920 | * offset from the original VMA */ | ||
921 | offs += (vma->vm_pgoff << PAGE_SHIFT); | ||
922 | |||
923 | if (offs >= priv->handle->size) | ||
924 | return VM_FAULT_SIGBUS; | ||
925 | |||
926 | if (!priv->handle->heap_pgalloc) { | ||
927 | unsigned long pfn; | ||
928 | BUG_ON(priv->handle->carveout->base & ~PAGE_MASK); | ||
929 | pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT); | ||
930 | vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | ||
931 | return VM_FAULT_NOPAGE; | ||
932 | } else { | ||
933 | struct page *page; | ||
934 | offs >>= PAGE_SHIFT; | ||
935 | page = priv->handle->pgalloc.pages[offs]; | ||
936 | if (page) | ||
937 | get_page(page); | ||
938 | vmf->page = page; | ||
939 | return (page) ? 0 : VM_FAULT_SIGBUS; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | static ssize_t attr_show_usage(struct device *dev, | ||
944 | struct device_attribute *attr, char *buf) | ||
945 | { | ||
946 | struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev); | ||
947 | |||
948 | return sprintf(buf, "%08x\n", node->heap_bit); | ||
949 | } | ||
950 | |||
951 | static struct device_attribute heap_attr_show_usage = | ||
952 | __ATTR(usage, S_IRUGO, attr_show_usage, NULL); | ||
953 | |||
954 | static struct attribute *heap_extra_attrs[] = { | ||
955 | &heap_attr_show_usage.attr, | ||
956 | NULL, | ||
957 | }; | ||
958 | |||
959 | static struct attribute_group heap_extra_attr_group = { | ||
960 | .attrs = heap_extra_attrs, | ||
961 | }; | ||
962 | |||
963 | static void client_stringify(struct nvmap_client *client, struct seq_file *s) | ||
964 | { | ||
965 | char task_comm[TASK_COMM_LEN]; | ||
966 | if (!client->task) { | ||
967 | seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0); | ||
968 | return; | ||
969 | } | ||
970 | get_task_comm(task_comm, client->task); | ||
971 | seq_printf(s, "%-18s %18s %8u", client->name, task_comm, | ||
972 | client->task->pid); | ||
973 | } | ||
974 | |||
975 | static void allocations_stringify(struct nvmap_client *client, | ||
976 | struct seq_file *s) | ||
977 | { | ||
978 | unsigned long base = 0; | ||
979 | struct rb_node *n = rb_first(&client->handle_refs); | ||
980 | |||
981 | for (; n != NULL; n = rb_next(n)) { | ||
982 | struct nvmap_handle_ref *ref = | ||
983 | rb_entry(n, struct nvmap_handle_ref, node); | ||
984 | struct nvmap_handle *handle = ref->handle; | ||
985 | if (handle->alloc && !handle->heap_pgalloc) { | ||
986 | seq_printf(s, "%-18s %-18s %8lx %10u %8x\n", "", "", | ||
987 | (unsigned long)(handle->carveout->base), | ||
988 | handle->size, handle->userflags); | ||
989 | } else if (handle->alloc && handle->heap_pgalloc) { | ||
990 | seq_printf(s, "%-18s %-18s %8lx %10u %8x\n", "", "", | ||
991 | base, handle->size, handle->userflags); | ||
992 | } | ||
993 | } | ||
994 | } | ||
995 | |||
996 | static int nvmap_debug_allocations_show(struct seq_file *s, void *unused) | ||
997 | { | ||
998 | struct nvmap_carveout_node *node = s->private; | ||
999 | struct nvmap_carveout_commit *commit; | ||
1000 | unsigned long flags; | ||
1001 | unsigned int total = 0; | ||
1002 | |||
1003 | spin_lock_irqsave(&node->clients_lock, flags); | ||
1004 | seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID", | ||
1005 | "SIZE", "FLAGS"); | ||
1006 | seq_printf(s, "%-18s %18s %8s %10s\n", "", "", | ||
1007 | "BASE", "SIZE"); | ||
1008 | list_for_each_entry(commit, &node->clients, list) { | ||
1009 | struct nvmap_client *client = | ||
1010 | get_client_from_carveout_commit(node, commit); | ||
1011 | client_stringify(client, s); | ||
1012 | seq_printf(s, " %10u\n", commit->commit); | ||
1013 | allocations_stringify(client, s); | ||
1014 | seq_printf(s, "\n"); | ||
1015 | total += commit->commit; | ||
1016 | } | ||
1017 | seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total); | ||
1018 | spin_unlock_irqrestore(&node->clients_lock, flags); | ||
1019 | |||
1020 | return 0; | ||
1021 | } | ||
1022 | |||
1023 | static int nvmap_debug_allocations_open(struct inode *inode, struct file *file) | ||
1024 | { | ||
1025 | return single_open(file, nvmap_debug_allocations_show, | ||
1026 | inode->i_private); | ||
1027 | } | ||
1028 | |||
1029 | static const struct file_operations debug_allocations_fops = { | ||
1030 | .open = nvmap_debug_allocations_open, | ||
1031 | .read = seq_read, | ||
1032 | .llseek = seq_lseek, | ||
1033 | .release = single_release, | ||
1034 | }; | ||
1035 | |||
1036 | static int nvmap_debug_clients_show(struct seq_file *s, void *unused) | ||
1037 | { | ||
1038 | struct nvmap_carveout_node *node = s->private; | ||
1039 | struct nvmap_carveout_commit *commit; | ||
1040 | unsigned long flags; | ||
1041 | unsigned int total = 0; | ||
1042 | |||
1043 | spin_lock_irqsave(&node->clients_lock, flags); | ||
1044 | seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID", | ||
1045 | "SIZE"); | ||
1046 | list_for_each_entry(commit, &node->clients, list) { | ||
1047 | struct nvmap_client *client = | ||
1048 | get_client_from_carveout_commit(node, commit); | ||
1049 | client_stringify(client, s); | ||
1050 | seq_printf(s, " %10u\n", commit->commit); | ||
1051 | total += commit->commit; | ||
1052 | } | ||
1053 | seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total); | ||
1054 | spin_unlock_irqrestore(&node->clients_lock, flags); | ||
1055 | |||
1056 | return 0; | ||
1057 | } | ||
1058 | |||
1059 | static int nvmap_debug_clients_open(struct inode *inode, struct file *file) | ||
1060 | { | ||
1061 | return single_open(file, nvmap_debug_clients_show, inode->i_private); | ||
1062 | } | ||
1063 | |||
1064 | static const struct file_operations debug_clients_fops = { | ||
1065 | .open = nvmap_debug_clients_open, | ||
1066 | .read = seq_read, | ||
1067 | .llseek = seq_lseek, | ||
1068 | .release = single_release, | ||
1069 | }; | ||
1070 | |||
1071 | static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused) | ||
1072 | { | ||
1073 | unsigned long flags; | ||
1074 | unsigned int total = 0; | ||
1075 | struct nvmap_client *client; | ||
1076 | struct nvmap_device *dev = s->private; | ||
1077 | |||
1078 | spin_lock_irqsave(&dev->clients_lock, flags); | ||
1079 | seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID", | ||
1080 | "SIZE"); | ||
1081 | list_for_each_entry(client, &dev->clients, list) { | ||
1082 | client_stringify(client, s); | ||
1083 | seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit)); | ||
1084 | total += atomic_read(&client->iovm_commit); | ||
1085 | } | ||
1086 | seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total); | ||
1087 | spin_unlock_irqrestore(&dev->clients_lock, flags); | ||
1088 | |||
1089 | return 0; | ||
1090 | } | ||
1091 | |||
1092 | static int nvmap_debug_iovmm_clients_open(struct inode *inode, | ||
1093 | struct file *file) | ||
1094 | { | ||
1095 | return single_open(file, nvmap_debug_iovmm_clients_show, | ||
1096 | inode->i_private); | ||
1097 | } | ||
1098 | |||
1099 | static const struct file_operations debug_iovmm_clients_fops = { | ||
1100 | .open = nvmap_debug_iovmm_clients_open, | ||
1101 | .read = seq_read, | ||
1102 | .llseek = seq_lseek, | ||
1103 | .release = single_release, | ||
1104 | }; | ||
1105 | |||
1106 | static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused) | ||
1107 | { | ||
1108 | unsigned long flags; | ||
1109 | unsigned int total = 0; | ||
1110 | struct nvmap_client *client; | ||
1111 | struct nvmap_device *dev = s->private; | ||
1112 | |||
1113 | spin_lock_irqsave(&dev->clients_lock, flags); | ||
1114 | seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID", | ||
1115 | "SIZE"); | ||
1116 | seq_printf(s, "%-18s %18s %8s %10s\n", "", "", | ||
1117 | "BASE", "SIZE"); | ||
1118 | list_for_each_entry(client, &dev->clients, list) { | ||
1119 | client_stringify(client, s); | ||
1120 | seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit)); | ||
1121 | allocations_stringify(client, s); | ||
1122 | seq_printf(s, "\n"); | ||
1123 | total += atomic_read(&client->iovm_commit); | ||
1124 | } | ||
1125 | seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total); | ||
1126 | spin_unlock_irqrestore(&dev->clients_lock, flags); | ||
1127 | |||
1128 | return 0; | ||
1129 | } | ||
1130 | |||
1131 | static int nvmap_debug_iovmm_allocations_open(struct inode *inode, | ||
1132 | struct file *file) | ||
1133 | { | ||
1134 | return single_open(file, nvmap_debug_iovmm_allocations_show, | ||
1135 | inode->i_private); | ||
1136 | } | ||
1137 | |||
1138 | static const struct file_operations debug_iovmm_allocations_fops = { | ||
1139 | .open = nvmap_debug_iovmm_allocations_open, | ||
1140 | .read = seq_read, | ||
1141 | .llseek = seq_lseek, | ||
1142 | .release = single_release, | ||
1143 | }; | ||
1144 | |||
1145 | static int nvmap_probe(struct platform_device *pdev) | ||
1146 | { | ||
1147 | struct nvmap_platform_data *plat = pdev->dev.platform_data; | ||
1148 | struct nvmap_device *dev; | ||
1149 | struct dentry *nvmap_debug_root; | ||
1150 | unsigned int i; | ||
1151 | int e; | ||
1152 | |||
1153 | if (!plat) { | ||
1154 | dev_err(&pdev->dev, "no platform data?\n"); | ||
1155 | return -ENODEV; | ||
1156 | } | ||
1157 | |||
1158 | if (WARN_ON(nvmap_dev != NULL)) { | ||
1159 | dev_err(&pdev->dev, "only one nvmap device may be present\n"); | ||
1160 | return -ENODEV; | ||
1161 | } | ||
1162 | |||
1163 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
1164 | if (!dev) { | ||
1165 | dev_err(&pdev->dev, "out of memory for device\n"); | ||
1166 | return -ENOMEM; | ||
1167 | } | ||
1168 | |||
1169 | dev->dev_user.minor = MISC_DYNAMIC_MINOR; | ||
1170 | dev->dev_user.name = "nvmap"; | ||
1171 | dev->dev_user.fops = &nvmap_user_fops; | ||
1172 | dev->dev_user.parent = &pdev->dev; | ||
1173 | |||
1174 | dev->dev_super.minor = MISC_DYNAMIC_MINOR; | ||
1175 | dev->dev_super.name = "knvmap"; | ||
1176 | dev->dev_super.fops = &nvmap_super_fops; | ||
1177 | dev->dev_super.parent = &pdev->dev; | ||
1178 | |||
1179 | dev->handles = RB_ROOT; | ||
1180 | |||
1181 | init_waitqueue_head(&dev->pte_wait); | ||
1182 | |||
1183 | init_waitqueue_head(&dev->iovmm_master.pin_wait); | ||
1184 | mutex_init(&dev->iovmm_master.pin_lock); | ||
1185 | for (i = 0; i < NVMAP_NUM_POOLS; i++) | ||
1186 | nvmap_page_pool_init(&dev->iovmm_master.pools[i], i); | ||
1187 | |||
1188 | dev->iovmm_master.iovmm = | ||
1189 | tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL, | ||
1190 | &(dev->dev_user)); | ||
1191 | #ifdef CONFIG_TEGRA_IOVMM | ||
1192 | if (!dev->iovmm_master.iovmm) { | ||
1193 | e = PTR_ERR(dev->iovmm_master.iovmm); | ||
1194 | dev_err(&pdev->dev, "couldn't create iovmm client\n"); | ||
1195 | goto fail; | ||
1196 | } | ||
1197 | #endif | ||
1198 | dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE); | ||
1199 | if (!dev->vm_rgn) { | ||
1200 | e = -ENOMEM; | ||
1201 | dev_err(&pdev->dev, "couldn't allocate remapping region\n"); | ||
1202 | goto fail; | ||
1203 | } | ||
1204 | e = nvmap_mru_init(&dev->iovmm_master); | ||
1205 | if (e) { | ||
1206 | dev_err(&pdev->dev, "couldn't initialize MRU lists\n"); | ||
1207 | goto fail; | ||
1208 | } | ||
1209 | |||
1210 | spin_lock_init(&dev->ptelock); | ||
1211 | spin_lock_init(&dev->handle_lock); | ||
1212 | INIT_LIST_HEAD(&dev->clients); | ||
1213 | spin_lock_init(&dev->clients_lock); | ||
1214 | |||
1215 | for (i = 0; i < NVMAP_NUM_PTES; i++) { | ||
1216 | unsigned long addr; | ||
1217 | pgd_t *pgd; | ||
1218 | pud_t *pud; | ||
1219 | pmd_t *pmd; | ||
1220 | |||
1221 | addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE); | ||
1222 | pgd = pgd_offset_k(addr); | ||
1223 | pud = pud_alloc(&init_mm, pgd, addr); | ||
1224 | if (!pud) { | ||
1225 | e = -ENOMEM; | ||
1226 | dev_err(&pdev->dev, "couldn't allocate page tables\n"); | ||
1227 | goto fail; | ||
1228 | } | ||
1229 | pmd = pmd_alloc(&init_mm, pud, addr); | ||
1230 | if (!pmd) { | ||
1231 | e = -ENOMEM; | ||
1232 | dev_err(&pdev->dev, "couldn't allocate page tables\n"); | ||
1233 | goto fail; | ||
1234 | } | ||
1235 | dev->ptes[i] = pte_alloc_kernel(pmd, addr); | ||
1236 | if (!dev->ptes[i]) { | ||
1237 | e = -ENOMEM; | ||
1238 | dev_err(&pdev->dev, "couldn't allocate page tables\n"); | ||
1239 | goto fail; | ||
1240 | } | ||
1241 | } | ||
1242 | |||
1243 | e = misc_register(&dev->dev_user); | ||
1244 | if (e) { | ||
1245 | dev_err(&pdev->dev, "unable to register miscdevice %s\n", | ||
1246 | dev->dev_user.name); | ||
1247 | goto fail; | ||
1248 | } | ||
1249 | |||
1250 | e = misc_register(&dev->dev_super); | ||
1251 | if (e) { | ||
1252 | dev_err(&pdev->dev, "unable to register miscdevice %s\n", | ||
1253 | dev->dev_super.name); | ||
1254 | goto fail; | ||
1255 | } | ||
1256 | |||
1257 | dev->nr_carveouts = 0; | ||
1258 | dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) * | ||
1259 | plat->nr_carveouts, GFP_KERNEL); | ||
1260 | if (!dev->heaps) { | ||
1261 | e = -ENOMEM; | ||
1262 | dev_err(&pdev->dev, "couldn't allocate carveout memory\n"); | ||
1263 | goto fail; | ||
1264 | } | ||
1265 | |||
1266 | nvmap_debug_root = debugfs_create_dir("nvmap", NULL); | ||
1267 | if (IS_ERR_OR_NULL(nvmap_debug_root)) | ||
1268 | dev_err(&pdev->dev, "couldn't create debug files\n"); | ||
1269 | |||
1270 | for (i = 0; i < plat->nr_carveouts; i++) { | ||
1271 | struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts]; | ||
1272 | const struct nvmap_platform_carveout *co = &plat->carveouts[i]; | ||
1273 | if (!co->size) | ||
1274 | continue; | ||
1275 | node->carveout = nvmap_heap_create(dev->dev_user.this_device, | ||
1276 | co->name, co->base, co->size, | ||
1277 | co->buddy_size, node); | ||
1278 | if (!node->carveout) { | ||
1279 | e = -ENOMEM; | ||
1280 | dev_err(&pdev->dev, "couldn't create %s\n", co->name); | ||
1281 | goto fail_heaps; | ||
1282 | } | ||
1283 | node->index = dev->nr_carveouts; | ||
1284 | dev->nr_carveouts++; | ||
1285 | spin_lock_init(&node->clients_lock); | ||
1286 | INIT_LIST_HEAD(&node->clients); | ||
1287 | node->heap_bit = co->usage_mask; | ||
1288 | if (nvmap_heap_create_group(node->carveout, | ||
1289 | &heap_extra_attr_group)) | ||
1290 | dev_warn(&pdev->dev, "couldn't add extra attributes\n"); | ||
1291 | |||
1292 | dev_info(&pdev->dev, "created carveout %s (%uKiB)\n", | ||
1293 | co->name, co->size / 1024); | ||
1294 | |||
1295 | if (!IS_ERR_OR_NULL(nvmap_debug_root)) { | ||
1296 | struct dentry *heap_root = | ||
1297 | debugfs_create_dir(co->name, nvmap_debug_root); | ||
1298 | if (!IS_ERR_OR_NULL(heap_root)) { | ||
1299 | debugfs_create_file("clients", 0664, heap_root, | ||
1300 | node, &debug_clients_fops); | ||
1301 | debugfs_create_file("allocations", 0664, | ||
1302 | heap_root, node, &debug_allocations_fops); | ||
1303 | } | ||
1304 | } | ||
1305 | } | ||
1306 | if (!IS_ERR_OR_NULL(nvmap_debug_root)) { | ||
1307 | struct dentry *iovmm_root = | ||
1308 | debugfs_create_dir("iovmm", nvmap_debug_root); | ||
1309 | if (!IS_ERR_OR_NULL(iovmm_root)) { | ||
1310 | debugfs_create_file("clients", 0664, iovmm_root, | ||
1311 | dev, &debug_iovmm_clients_fops); | ||
1312 | debugfs_create_file("allocations", 0664, iovmm_root, | ||
1313 | dev, &debug_iovmm_allocations_fops); | ||
1314 | for (i = 0; i < NVMAP_NUM_POOLS; i++) { | ||
1315 | char name[40]; | ||
1316 | char *memtype_string[] = {"uc", "wc", | ||
1317 | "iwb", "wb"}; | ||
1318 | sprintf(name, "%s_page_pool_available_pages", | ||
1319 | memtype_string[i]); | ||
1320 | debugfs_create_u32(name, S_IRUGO|S_IWUSR, | ||
1321 | iovmm_root, | ||
1322 | &dev->iovmm_master.pools[i].npages); | ||
1323 | } | ||
1324 | } | ||
1325 | } | ||
1326 | |||
1327 | platform_set_drvdata(pdev, dev); | ||
1328 | nvmap_dev = dev; | ||
1329 | |||
1330 | return 0; | ||
1331 | fail_heaps: | ||
1332 | for (i = 0; i < dev->nr_carveouts; i++) { | ||
1333 | struct nvmap_carveout_node *node = &dev->heaps[i]; | ||
1334 | nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group); | ||
1335 | nvmap_heap_destroy(node->carveout); | ||
1336 | } | ||
1337 | fail: | ||
1338 | kfree(dev->heaps); | ||
1339 | nvmap_mru_destroy(&dev->iovmm_master); | ||
1340 | if (dev->dev_super.minor != MISC_DYNAMIC_MINOR) | ||
1341 | misc_deregister(&dev->dev_super); | ||
1342 | if (dev->dev_user.minor != MISC_DYNAMIC_MINOR) | ||
1343 | misc_deregister(&dev->dev_user); | ||
1344 | if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm)) | ||
1345 | tegra_iovmm_free_client(dev->iovmm_master.iovmm); | ||
1346 | if (dev->vm_rgn) | ||
1347 | free_vm_area(dev->vm_rgn); | ||
1348 | kfree(dev); | ||
1349 | nvmap_dev = NULL; | ||
1350 | return e; | ||
1351 | } | ||
1352 | |||
1353 | static int nvmap_remove(struct platform_device *pdev) | ||
1354 | { | ||
1355 | struct nvmap_device *dev = platform_get_drvdata(pdev); | ||
1356 | struct rb_node *n; | ||
1357 | struct nvmap_handle *h; | ||
1358 | int i; | ||
1359 | |||
1360 | misc_deregister(&dev->dev_super); | ||
1361 | misc_deregister(&dev->dev_user); | ||
1362 | |||
1363 | while ((n = rb_first(&dev->handles))) { | ||
1364 | h = rb_entry(n, struct nvmap_handle, node); | ||
1365 | rb_erase(&h->node, &dev->handles); | ||
1366 | kfree(h); | ||
1367 | } | ||
1368 | |||
1369 | if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm)) | ||
1370 | tegra_iovmm_free_client(dev->iovmm_master.iovmm); | ||
1371 | |||
1372 | nvmap_mru_destroy(&dev->iovmm_master); | ||
1373 | |||
1374 | for (i = 0; i < dev->nr_carveouts; i++) { | ||
1375 | struct nvmap_carveout_node *node = &dev->heaps[i]; | ||
1376 | nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group); | ||
1377 | nvmap_heap_destroy(node->carveout); | ||
1378 | } | ||
1379 | kfree(dev->heaps); | ||
1380 | |||
1381 | free_vm_area(dev->vm_rgn); | ||
1382 | kfree(dev); | ||
1383 | nvmap_dev = NULL; | ||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1387 | static int nvmap_suspend(struct platform_device *pdev, pm_message_t state) | ||
1388 | { | ||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1392 | static int nvmap_resume(struct platform_device *pdev) | ||
1393 | { | ||
1394 | return 0; | ||
1395 | } | ||
1396 | |||
1397 | static struct platform_driver nvmap_driver = { | ||
1398 | .probe = nvmap_probe, | ||
1399 | .remove = nvmap_remove, | ||
1400 | .suspend = nvmap_suspend, | ||
1401 | .resume = nvmap_resume, | ||
1402 | |||
1403 | .driver = { | ||
1404 | .name = "tegra-nvmap", | ||
1405 | .owner = THIS_MODULE, | ||
1406 | }, | ||
1407 | }; | ||
1408 | |||
1409 | static int __init nvmap_init_driver(void) | ||
1410 | { | ||
1411 | int e; | ||
1412 | |||
1413 | nvmap_dev = NULL; | ||
1414 | |||
1415 | e = nvmap_heap_init(); | ||
1416 | if (e) | ||
1417 | goto fail; | ||
1418 | |||
1419 | e = platform_driver_register(&nvmap_driver); | ||
1420 | if (e) { | ||
1421 | nvmap_heap_deinit(); | ||
1422 | goto fail; | ||
1423 | } | ||
1424 | |||
1425 | fail: | ||
1426 | return e; | ||
1427 | } | ||
1428 | fs_initcall(nvmap_init_driver); | ||
1429 | |||
1430 | static void __exit nvmap_exit_driver(void) | ||
1431 | { | ||
1432 | platform_driver_unregister(&nvmap_driver); | ||
1433 | nvmap_heap_deinit(); | ||
1434 | nvmap_dev = NULL; | ||
1435 | } | ||
1436 | module_exit(nvmap_exit_driver); | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c new file mode 100644 index 00000000000..539b7ce9801 --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_handle.c | |||
@@ -0,0 +1,1020 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_handle.c | ||
3 | * | ||
4 | * Handle allocation and freeing routines for nvmap | ||
5 | * | ||
6 | * Copyright (c) 2009-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
24 | |||
25 | #include <linux/err.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/rbtree.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/fs.h> | ||
33 | |||
34 | #include <asm/cacheflush.h> | ||
35 | #include <asm/outercache.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | |||
38 | #include <mach/iovmm.h> | ||
39 | #include <mach/nvmap.h> | ||
40 | |||
41 | #include <linux/vmstat.h> | ||
42 | #include <linux/swap.h> | ||
43 | #include <linux/shrinker.h> | ||
44 | #include <linux/moduleparam.h> | ||
45 | |||
46 | #include "nvmap.h" | ||
47 | #include "nvmap_mru.h" | ||
48 | #include "nvmap_common.h" | ||
49 | |||
50 | #define PRINT_CARVEOUT_CONVERSION 0 | ||
51 | #if PRINT_CARVEOUT_CONVERSION | ||
52 | #define PR_INFO pr_info | ||
53 | #else | ||
54 | #define PR_INFO(...) | ||
55 | #endif | ||
56 | |||
57 | #define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \ | ||
58 | NVMAP_HEAP_CARVEOUT_VPR) | ||
59 | #ifdef CONFIG_NVMAP_HIGHMEM_ONLY | ||
60 | #define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN) | ||
61 | #else | ||
62 | #define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) | ||
63 | #endif | ||
64 | /* handles may be arbitrarily large (16+MiB), and any handle allocated from | ||
65 | * the kernel (i.e., not a carveout handle) includes its array of pages. to | ||
66 | * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN, | ||
67 | * the array is allocated using vmalloc. */ | ||
68 | #define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2) | ||
69 | #define NVMAP_TEST_PAGE_POOL_SHRINKER 1 | ||
70 | static bool enable_pp = 1; | ||
71 | static int pool_size[NVMAP_NUM_POOLS]; | ||
72 | |||
73 | static char *s_memtype_str[] = { | ||
74 | "uc", | ||
75 | "wc", | ||
76 | "iwb", | ||
77 | "wb", | ||
78 | }; | ||
79 | |||
80 | static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool) | ||
81 | { | ||
82 | mutex_lock(&pool->lock); | ||
83 | } | ||
84 | |||
85 | static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool) | ||
86 | { | ||
87 | mutex_unlock(&pool->lock); | ||
88 | } | ||
89 | |||
90 | static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool) | ||
91 | { | ||
92 | struct page *page = NULL; | ||
93 | |||
94 | if (pool->npages > 0) | ||
95 | page = pool->page_array[--pool->npages]; | ||
96 | return page; | ||
97 | } | ||
98 | |||
99 | static struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool) | ||
100 | { | ||
101 | struct page *page = NULL; | ||
102 | |||
103 | if (pool) { | ||
104 | nvmap_page_pool_lock(pool); | ||
105 | page = nvmap_page_pool_alloc_locked(pool); | ||
106 | nvmap_page_pool_unlock(pool); | ||
107 | } | ||
108 | return page; | ||
109 | } | ||
110 | |||
111 | static bool nvmap_page_pool_release_locked(struct nvmap_page_pool *pool, | ||
112 | struct page *page) | ||
113 | { | ||
114 | int ret = false; | ||
115 | |||
116 | if (enable_pp && pool->npages < pool->max_pages) { | ||
117 | pool->page_array[pool->npages++] = page; | ||
118 | ret = true; | ||
119 | } | ||
120 | return ret; | ||
121 | } | ||
122 | |||
123 | static bool nvmap_page_pool_release(struct nvmap_page_pool *pool, | ||
124 | struct page *page) | ||
125 | { | ||
126 | int ret = false; | ||
127 | |||
128 | if (pool) { | ||
129 | nvmap_page_pool_lock(pool); | ||
130 | ret = nvmap_page_pool_release_locked(pool, page); | ||
131 | nvmap_page_pool_unlock(pool); | ||
132 | } | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool) | ||
137 | { | ||
138 | return pool->npages; | ||
139 | } | ||
140 | |||
141 | static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free) | ||
142 | { | ||
143 | int i = nr_free; | ||
144 | int idx = 0; | ||
145 | struct page *page; | ||
146 | |||
147 | if (!nr_free) | ||
148 | return nr_free; | ||
149 | nvmap_page_pool_lock(pool); | ||
150 | while (i) { | ||
151 | page = nvmap_page_pool_alloc_locked(pool); | ||
152 | if (!page) | ||
153 | break; | ||
154 | pool->shrink_array[idx++] = page; | ||
155 | i--; | ||
156 | } | ||
157 | |||
158 | if (idx) | ||
159 | set_pages_array_wb(pool->shrink_array, idx); | ||
160 | while (idx--) | ||
161 | __free_page(pool->shrink_array[idx]); | ||
162 | nvmap_page_pool_unlock(pool); | ||
163 | return i; | ||
164 | } | ||
165 | |||
166 | static int nvmap_page_pool_get_unused_pages(void) | ||
167 | { | ||
168 | unsigned int i; | ||
169 | int total = 0; | ||
170 | struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); | ||
171 | |||
172 | for (i = 0; i < NVMAP_NUM_POOLS; i++) | ||
173 | total += nvmap_page_pool_get_available_count(&share->pools[i]); | ||
174 | |||
175 | return total; | ||
176 | } | ||
177 | |||
178 | static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size) | ||
179 | { | ||
180 | int available_pages; | ||
181 | int pages_to_release = 0; | ||
182 | struct page **page_array = NULL; | ||
183 | struct page **shrink_array = NULL; | ||
184 | |||
185 | if (size == pool->max_pages) | ||
186 | return; | ||
187 | repeat: | ||
188 | nvmap_page_pool_free(pool, pages_to_release); | ||
189 | nvmap_page_pool_lock(pool); | ||
190 | available_pages = nvmap_page_pool_get_available_count(pool); | ||
191 | if (available_pages > size) { | ||
192 | nvmap_page_pool_unlock(pool); | ||
193 | pages_to_release = available_pages - size; | ||
194 | goto repeat; | ||
195 | } | ||
196 | |||
197 | if (size == 0) { | ||
198 | vfree(pool->page_array); | ||
199 | vfree(pool->shrink_array); | ||
200 | pool->page_array = pool->shrink_array = NULL; | ||
201 | goto out; | ||
202 | } | ||
203 | |||
204 | page_array = vmalloc(sizeof(struct page *) * size); | ||
205 | shrink_array = vmalloc(sizeof(struct page *) * size); | ||
206 | if (!page_array || !shrink_array) | ||
207 | goto fail; | ||
208 | |||
209 | memcpy(page_array, pool->page_array, | ||
210 | pool->npages * sizeof(struct page *)); | ||
211 | vfree(pool->page_array); | ||
212 | vfree(pool->shrink_array); | ||
213 | pool->page_array = page_array; | ||
214 | pool->shrink_array = shrink_array; | ||
215 | out: | ||
216 | pr_debug("%s pool resized to %d from %d pages", | ||
217 | s_memtype_str[pool->flags], size, pool->max_pages); | ||
218 | pool->max_pages = size; | ||
219 | goto exit; | ||
220 | fail: | ||
221 | vfree(page_array); | ||
222 | vfree(shrink_array); | ||
223 | pr_err("failed"); | ||
224 | exit: | ||
225 | nvmap_page_pool_unlock(pool); | ||
226 | } | ||
227 | |||
228 | static int nvmap_page_pool_shrink(struct shrinker *shrinker, | ||
229 | struct shrink_control *sc) | ||
230 | { | ||
231 | unsigned int i; | ||
232 | unsigned int pool_offset; | ||
233 | struct nvmap_page_pool *pool; | ||
234 | int shrink_pages = sc->nr_to_scan; | ||
235 | static atomic_t start_pool = ATOMIC_INIT(-1); | ||
236 | struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); | ||
237 | |||
238 | if (!shrink_pages) | ||
239 | goto out; | ||
240 | |||
241 | pr_debug("sh_pages=%d", shrink_pages); | ||
242 | |||
243 | for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) { | ||
244 | pool_offset = atomic_add_return(1, &start_pool) % | ||
245 | NVMAP_NUM_POOLS; | ||
246 | pool = &share->pools[pool_offset]; | ||
247 | shrink_pages = nvmap_page_pool_free(pool, shrink_pages); | ||
248 | } | ||
249 | out: | ||
250 | return nvmap_page_pool_get_unused_pages(); | ||
251 | } | ||
252 | |||
253 | static struct shrinker nvmap_page_pool_shrinker = { | ||
254 | .shrink = nvmap_page_pool_shrink, | ||
255 | .seeks = 1, | ||
256 | }; | ||
257 | |||
258 | static void shrink_page_pools(int *total_pages, int *available_pages) | ||
259 | { | ||
260 | struct shrink_control sc; | ||
261 | |||
262 | sc.gfp_mask = GFP_KERNEL; | ||
263 | sc.nr_to_scan = 0; | ||
264 | *total_pages = nvmap_page_pool_shrink(NULL, &sc); | ||
265 | sc.nr_to_scan = *total_pages * 2; | ||
266 | *available_pages = nvmap_page_pool_shrink(NULL, &sc); | ||
267 | } | ||
268 | |||
269 | #if NVMAP_TEST_PAGE_POOL_SHRINKER | ||
270 | static bool shrink_pp; | ||
271 | static int shrink_set(const char *arg, const struct kernel_param *kp) | ||
272 | { | ||
273 | int cpu = smp_processor_id(); | ||
274 | unsigned long long t1, t2; | ||
275 | int total_pages, available_pages; | ||
276 | |||
277 | param_set_bool(arg, kp); | ||
278 | |||
279 | if (shrink_pp) { | ||
280 | t1 = cpu_clock(cpu); | ||
281 | shrink_page_pools(&total_pages, &available_pages); | ||
282 | t2 = cpu_clock(cpu); | ||
283 | pr_info("shrink page pools: time=%lldns, " | ||
284 | "total_pages_released=%d, free_pages_available=%d", | ||
285 | t2-t1, total_pages, available_pages); | ||
286 | } | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int shrink_get(char *buff, const struct kernel_param *kp) | ||
291 | { | ||
292 | return param_get_bool(buff, kp); | ||
293 | } | ||
294 | |||
295 | static struct kernel_param_ops shrink_ops = { | ||
296 | .get = shrink_get, | ||
297 | .set = shrink_set, | ||
298 | }; | ||
299 | |||
300 | module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644); | ||
301 | #endif | ||
302 | |||
303 | static int enable_pp_set(const char *arg, const struct kernel_param *kp) | ||
304 | { | ||
305 | int total_pages, available_pages; | ||
306 | |||
307 | param_set_bool(arg, kp); | ||
308 | |||
309 | if (!enable_pp) { | ||
310 | shrink_page_pools(&total_pages, &available_pages); | ||
311 | pr_info("disabled page pools and released pages, " | ||
312 | "total_pages_released=%d, free_pages_available=%d", | ||
313 | total_pages, available_pages); | ||
314 | } | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int enable_pp_get(char *buff, const struct kernel_param *kp) | ||
319 | { | ||
320 | return param_get_int(buff, kp); | ||
321 | } | ||
322 | |||
323 | static struct kernel_param_ops enable_pp_ops = { | ||
324 | .get = enable_pp_get, | ||
325 | .set = enable_pp_set, | ||
326 | }; | ||
327 | |||
328 | module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644); | ||
329 | |||
330 | #define POOL_SIZE_SET(m, i) \ | ||
331 | static int pool_size_##m##_set(const char *arg, const struct kernel_param *kp) \ | ||
332 | { \ | ||
333 | struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); \ | ||
334 | param_set_int(arg, kp); \ | ||
335 | nvmap_page_pool_resize(&share->pools[i], pool_size[i]); \ | ||
336 | return 0; \ | ||
337 | } | ||
338 | |||
339 | #define POOL_SIZE_GET(m) \ | ||
340 | static int pool_size_##m##_get(char *buff, const struct kernel_param *kp) \ | ||
341 | { \ | ||
342 | return param_get_int(buff, kp); \ | ||
343 | } | ||
344 | |||
345 | #define POOL_SIZE_OPS(m) \ | ||
346 | static struct kernel_param_ops pool_size_##m##_ops = { \ | ||
347 | .get = pool_size_##m##_get, \ | ||
348 | .set = pool_size_##m##_set, \ | ||
349 | }; | ||
350 | |||
351 | #define POOL_SIZE_MOUDLE_PARAM_CB(m, i) \ | ||
352 | module_param_cb(m##_pool_size, &pool_size_##m##_ops, &pool_size[i], 0644) | ||
353 | |||
354 | POOL_SIZE_SET(uc, NVMAP_HANDLE_UNCACHEABLE); | ||
355 | POOL_SIZE_GET(uc); | ||
356 | POOL_SIZE_OPS(uc); | ||
357 | POOL_SIZE_MOUDLE_PARAM_CB(uc, NVMAP_HANDLE_UNCACHEABLE); | ||
358 | |||
359 | POOL_SIZE_SET(wc, NVMAP_HANDLE_WRITE_COMBINE); | ||
360 | POOL_SIZE_GET(wc); | ||
361 | POOL_SIZE_OPS(wc); | ||
362 | POOL_SIZE_MOUDLE_PARAM_CB(wc, NVMAP_HANDLE_WRITE_COMBINE); | ||
363 | |||
364 | POOL_SIZE_SET(iwb, NVMAP_HANDLE_INNER_CACHEABLE); | ||
365 | POOL_SIZE_GET(iwb); | ||
366 | POOL_SIZE_OPS(iwb); | ||
367 | POOL_SIZE_MOUDLE_PARAM_CB(iwb, NVMAP_HANDLE_INNER_CACHEABLE); | ||
368 | |||
369 | POOL_SIZE_SET(wb, NVMAP_HANDLE_CACHEABLE); | ||
370 | POOL_SIZE_GET(wb); | ||
371 | POOL_SIZE_OPS(wb); | ||
372 | POOL_SIZE_MOUDLE_PARAM_CB(wb, NVMAP_HANDLE_CACHEABLE); | ||
373 | |||
374 | int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags) | ||
375 | { | ||
376 | struct page *page; | ||
377 | int i; | ||
378 | static int reg = 1; | ||
379 | struct sysinfo info; | ||
380 | typedef int (*set_pages_array) (struct page **pages, int addrinarray); | ||
381 | set_pages_array s_cpa[] = { | ||
382 | set_pages_array_uc, | ||
383 | set_pages_array_wc, | ||
384 | set_pages_array_iwb, | ||
385 | set_pages_array_wb | ||
386 | }; | ||
387 | |||
388 | BUG_ON(flags >= NVMAP_NUM_POOLS); | ||
389 | memset(pool, 0x0, sizeof(*pool)); | ||
390 | mutex_init(&pool->lock); | ||
391 | pool->flags = flags; | ||
392 | |||
393 | /* No default pool for cached memory. */ | ||
394 | if (flags == NVMAP_HANDLE_CACHEABLE) | ||
395 | return 0; | ||
396 | |||
397 | si_meminfo(&info); | ||
398 | if (!pool_size[flags]) { | ||
399 | /* Use 3/8th of total ram for page pools. | ||
400 | * 1/8th for uc, 1/8th for wc and 1/8th for iwb. | ||
401 | */ | ||
402 | pool->max_pages = info.totalram >> 3; | ||
403 | } | ||
404 | if (pool->max_pages <= 0 || pool->max_pages >= info.totalram) | ||
405 | pool->max_pages = NVMAP_DEFAULT_PAGE_POOL_SIZE; | ||
406 | pool_size[flags] = pool->max_pages; | ||
407 | pr_info("nvmap %s page pool size=%d pages", | ||
408 | s_memtype_str[flags], pool->max_pages); | ||
409 | pool->page_array = vmalloc(sizeof(void *) * pool->max_pages); | ||
410 | pool->shrink_array = vmalloc(sizeof(struct page *) * pool->max_pages); | ||
411 | if (!pool->page_array || !pool->shrink_array) | ||
412 | goto fail; | ||
413 | |||
414 | if (reg) { | ||
415 | reg = 0; | ||
416 | register_shrinker(&nvmap_page_pool_shrinker); | ||
417 | } | ||
418 | |||
419 | nvmap_page_pool_lock(pool); | ||
420 | for (i = 0; i < pool->max_pages; i++) { | ||
421 | page = alloc_page(GFP_NVMAP); | ||
422 | if (!page) | ||
423 | goto do_cpa; | ||
424 | if (!nvmap_page_pool_release_locked(pool, page)) { | ||
425 | __free_page(page); | ||
426 | goto do_cpa; | ||
427 | } | ||
428 | } | ||
429 | do_cpa: | ||
430 | (*s_cpa[flags])(pool->page_array, pool->npages); | ||
431 | nvmap_page_pool_unlock(pool); | ||
432 | return 0; | ||
433 | fail: | ||
434 | pool->max_pages = 0; | ||
435 | vfree(pool->shrink_array); | ||
436 | vfree(pool->page_array); | ||
437 | return -ENOMEM; | ||
438 | } | ||
439 | |||
440 | static inline void *altalloc(size_t len) | ||
441 | { | ||
442 | if (len >= PAGELIST_VMALLOC_MIN) | ||
443 | return vmalloc(len); | ||
444 | else | ||
445 | return kmalloc(len, GFP_KERNEL); | ||
446 | } | ||
447 | |||
448 | static inline void altfree(void *ptr, size_t len) | ||
449 | { | ||
450 | if (!ptr) | ||
451 | return; | ||
452 | |||
453 | if (len >= PAGELIST_VMALLOC_MIN) | ||
454 | vfree(ptr); | ||
455 | else | ||
456 | kfree(ptr); | ||
457 | } | ||
458 | |||
459 | void _nvmap_handle_free(struct nvmap_handle *h) | ||
460 | { | ||
461 | struct nvmap_share *share = nvmap_get_share_from_dev(h->dev); | ||
462 | unsigned int i, nr_page, page_index = 0; | ||
463 | struct nvmap_page_pool *pool = NULL; | ||
464 | |||
465 | if (nvmap_handle_remove(h->dev, h) != 0) | ||
466 | return; | ||
467 | |||
468 | if (!h->alloc) | ||
469 | goto out; | ||
470 | |||
471 | if (!h->heap_pgalloc) { | ||
472 | nvmap_usecount_inc(h); | ||
473 | nvmap_heap_free(h->carveout); | ||
474 | goto out; | ||
475 | } | ||
476 | |||
477 | nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE); | ||
478 | |||
479 | BUG_ON(h->size & ~PAGE_MASK); | ||
480 | BUG_ON(!h->pgalloc.pages); | ||
481 | |||
482 | nvmap_mru_remove(share, h); | ||
483 | |||
484 | if (h->flags < NVMAP_NUM_POOLS) | ||
485 | pool = &share->pools[h->flags]; | ||
486 | |||
487 | while (page_index < nr_page) { | ||
488 | if (!nvmap_page_pool_release(pool, | ||
489 | h->pgalloc.pages[page_index])) | ||
490 | break; | ||
491 | page_index++; | ||
492 | } | ||
493 | |||
494 | if (page_index == nr_page) | ||
495 | goto skip_attr_restore; | ||
496 | |||
497 | /* Restore page attributes. */ | ||
498 | if (h->flags == NVMAP_HANDLE_WRITE_COMBINE || | ||
499 | h->flags == NVMAP_HANDLE_UNCACHEABLE || | ||
500 | h->flags == NVMAP_HANDLE_INNER_CACHEABLE) | ||
501 | set_pages_array_wb(&h->pgalloc.pages[page_index], | ||
502 | nr_page - page_index); | ||
503 | |||
504 | skip_attr_restore: | ||
505 | if (h->pgalloc.area) | ||
506 | tegra_iovmm_free_vm(h->pgalloc.area); | ||
507 | |||
508 | for (i = page_index; i < nr_page; i++) | ||
509 | __free_page(h->pgalloc.pages[i]); | ||
510 | |||
511 | altfree(h->pgalloc.pages, nr_page * sizeof(struct page *)); | ||
512 | |||
513 | out: | ||
514 | kfree(h); | ||
515 | } | ||
516 | |||
517 | static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size) | ||
518 | { | ||
519 | struct page *page, *p, *e; | ||
520 | unsigned int order; | ||
521 | |||
522 | size = PAGE_ALIGN(size); | ||
523 | order = get_order(size); | ||
524 | page = alloc_pages(gfp, order); | ||
525 | |||
526 | if (!page) | ||
527 | return NULL; | ||
528 | |||
529 | split_page(page, order); | ||
530 | e = page + (1 << order); | ||
531 | for (p = page + (size >> PAGE_SHIFT); p < e; p++) | ||
532 | __free_page(p); | ||
533 | |||
534 | return page; | ||
535 | } | ||
536 | |||
537 | static int handle_page_alloc(struct nvmap_client *client, | ||
538 | struct nvmap_handle *h, bool contiguous) | ||
539 | { | ||
540 | size_t size = PAGE_ALIGN(h->size); | ||
541 | struct nvmap_share *share = nvmap_get_share_from_dev(h->dev); | ||
542 | unsigned int nr_page = size >> PAGE_SHIFT; | ||
543 | pgprot_t prot; | ||
544 | unsigned int i = 0, page_index = 0; | ||
545 | struct page **pages; | ||
546 | struct nvmap_page_pool *pool = NULL; | ||
547 | |||
548 | pages = altalloc(nr_page * sizeof(*pages)); | ||
549 | if (!pages) | ||
550 | return -ENOMEM; | ||
551 | |||
552 | prot = nvmap_pgprot(h, pgprot_kernel); | ||
553 | |||
554 | h->pgalloc.area = NULL; | ||
555 | if (contiguous) { | ||
556 | struct page *page; | ||
557 | page = nvmap_alloc_pages_exact(GFP_NVMAP, size); | ||
558 | if (!page) | ||
559 | goto fail; | ||
560 | |||
561 | for (i = 0; i < nr_page; i++) | ||
562 | pages[i] = nth_page(page, i); | ||
563 | |||
564 | } else { | ||
565 | if (h->flags < NVMAP_NUM_POOLS) | ||
566 | pool = &share->pools[h->flags]; | ||
567 | |||
568 | for (i = 0; i < nr_page; i++) { | ||
569 | /* Get pages from pool, if available. */ | ||
570 | pages[i] = nvmap_page_pool_alloc(pool); | ||
571 | if (!pages[i]) | ||
572 | break; | ||
573 | page_index++; | ||
574 | } | ||
575 | |||
576 | for (; i < nr_page; i++) { | ||
577 | pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, | ||
578 | PAGE_SIZE); | ||
579 | if (!pages[i]) | ||
580 | goto fail; | ||
581 | } | ||
582 | |||
583 | #ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM | ||
584 | h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm, | ||
585 | NULL, size, h->align, prot, | ||
586 | h->pgalloc.iovm_addr); | ||
587 | if (!h->pgalloc.area) | ||
588 | goto fail; | ||
589 | |||
590 | h->pgalloc.dirty = true; | ||
591 | #endif | ||
592 | } | ||
593 | |||
594 | if (nr_page == page_index) | ||
595 | goto skip_attr_change; | ||
596 | |||
597 | /* Update the pages mapping in kernel page table. */ | ||
598 | if (h->flags == NVMAP_HANDLE_WRITE_COMBINE) | ||
599 | set_pages_array_wc(&pages[page_index], | ||
600 | nr_page - page_index); | ||
601 | else if (h->flags == NVMAP_HANDLE_UNCACHEABLE) | ||
602 | set_pages_array_uc(&pages[page_index], | ||
603 | nr_page - page_index); | ||
604 | else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) | ||
605 | set_pages_array_iwb(&pages[page_index], | ||
606 | nr_page - page_index); | ||
607 | |||
608 | skip_attr_change: | ||
609 | h->size = size; | ||
610 | h->pgalloc.pages = pages; | ||
611 | h->pgalloc.contig = contiguous; | ||
612 | INIT_LIST_HEAD(&h->pgalloc.mru_list); | ||
613 | return 0; | ||
614 | |||
615 | fail: | ||
616 | while (i--) { | ||
617 | set_pages_array_wb(&pages[i], 1); | ||
618 | __free_page(pages[i]); | ||
619 | } | ||
620 | altfree(pages, nr_page * sizeof(*pages)); | ||
621 | wmb(); | ||
622 | return -ENOMEM; | ||
623 | } | ||
624 | |||
625 | static void alloc_handle(struct nvmap_client *client, | ||
626 | struct nvmap_handle *h, unsigned int type) | ||
627 | { | ||
628 | BUG_ON(type & (type - 1)); | ||
629 | |||
630 | #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM | ||
631 | #define __NVMAP_HEAP_CARVEOUT (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_CARVEOUT_VPR) | ||
632 | #define __NVMAP_HEAP_IOVMM (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC) | ||
633 | if (type & NVMAP_HEAP_CARVEOUT_GENERIC) { | ||
634 | #ifdef CONFIG_NVMAP_ALLOW_SYSMEM | ||
635 | if (h->size <= PAGE_SIZE) { | ||
636 | PR_INFO("###CARVEOUT CONVERTED TO SYSMEM " | ||
637 | "0x%x bytes %s(%d)###\n", | ||
638 | h->size, current->comm, current->pid); | ||
639 | goto sysheap; | ||
640 | } | ||
641 | #endif | ||
642 | PR_INFO("###CARVEOUT CONVERTED TO IOVM " | ||
643 | "0x%x bytes %s(%d)###\n", | ||
644 | h->size, current->comm, current->pid); | ||
645 | } | ||
646 | #else | ||
647 | #define __NVMAP_HEAP_CARVEOUT NVMAP_HEAP_CARVEOUT_MASK | ||
648 | #define __NVMAP_HEAP_IOVMM NVMAP_HEAP_IOVMM | ||
649 | #endif | ||
650 | |||
651 | if (type & __NVMAP_HEAP_CARVEOUT) { | ||
652 | struct nvmap_heap_block *b; | ||
653 | #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM | ||
654 | PR_INFO("###IRAM REQUEST RETAINED " | ||
655 | "0x%x bytes %s(%d)###\n", | ||
656 | h->size, current->comm, current->pid); | ||
657 | #endif | ||
658 | /* Protect handle from relocation */ | ||
659 | nvmap_usecount_inc(h); | ||
660 | |||
661 | b = nvmap_carveout_alloc(client, h, type); | ||
662 | if (b) { | ||
663 | h->heap_pgalloc = false; | ||
664 | h->alloc = true; | ||
665 | nvmap_carveout_commit_add(client, | ||
666 | nvmap_heap_to_arg(nvmap_block_to_heap(b)), | ||
667 | h->size); | ||
668 | } | ||
669 | nvmap_usecount_dec(h); | ||
670 | |||
671 | } else if (type & __NVMAP_HEAP_IOVMM) { | ||
672 | size_t reserved = PAGE_ALIGN(h->size); | ||
673 | int commit = 0; | ||
674 | int ret; | ||
675 | |||
676 | /* increment the committed IOVM space prior to allocation | ||
677 | * to avoid race conditions with other threads simultaneously | ||
678 | * allocating. */ | ||
679 | commit = atomic_add_return(reserved, | ||
680 | &client->iovm_commit); | ||
681 | |||
682 | if (commit < client->iovm_limit) | ||
683 | ret = handle_page_alloc(client, h, false); | ||
684 | else | ||
685 | ret = -ENOMEM; | ||
686 | |||
687 | if (!ret) { | ||
688 | h->heap_pgalloc = true; | ||
689 | h->alloc = true; | ||
690 | } else { | ||
691 | atomic_sub(reserved, &client->iovm_commit); | ||
692 | } | ||
693 | |||
694 | } else if (type & NVMAP_HEAP_SYSMEM) { | ||
695 | #if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) && \ | ||
696 | defined(CONFIG_NVMAP_ALLOW_SYSMEM) | ||
697 | sysheap: | ||
698 | #endif | ||
699 | if (handle_page_alloc(client, h, true) == 0) { | ||
700 | BUG_ON(!h->pgalloc.contig); | ||
701 | h->heap_pgalloc = true; | ||
702 | h->alloc = true; | ||
703 | } | ||
704 | } | ||
705 | } | ||
706 | |||
707 | /* small allocations will try to allocate from generic OS memory before | ||
708 | * any of the limited heaps, to increase the effective memory for graphics | ||
709 | * allocations, and to reduce fragmentation of the graphics heaps with | ||
710 | * sub-page splinters */ | ||
711 | static const unsigned int heap_policy_small[] = { | ||
712 | NVMAP_HEAP_CARVEOUT_VPR, | ||
713 | NVMAP_HEAP_CARVEOUT_IRAM, | ||
714 | #ifdef CONFIG_NVMAP_ALLOW_SYSMEM | ||
715 | NVMAP_HEAP_SYSMEM, | ||
716 | #endif | ||
717 | NVMAP_HEAP_CARVEOUT_MASK, | ||
718 | NVMAP_HEAP_IOVMM, | ||
719 | 0, | ||
720 | }; | ||
721 | |||
722 | static const unsigned int heap_policy_large[] = { | ||
723 | NVMAP_HEAP_CARVEOUT_VPR, | ||
724 | NVMAP_HEAP_CARVEOUT_IRAM, | ||
725 | NVMAP_HEAP_IOVMM, | ||
726 | NVMAP_HEAP_CARVEOUT_MASK, | ||
727 | #ifdef CONFIG_NVMAP_ALLOW_SYSMEM | ||
728 | NVMAP_HEAP_SYSMEM, | ||
729 | #endif | ||
730 | 0, | ||
731 | }; | ||
732 | |||
733 | /* Do not override single page policy if there is not much space to | ||
734 | avoid invoking system oom killer. */ | ||
735 | #define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000 | ||
736 | |||
737 | int nvmap_alloc_handle_id(struct nvmap_client *client, | ||
738 | unsigned long id, unsigned int heap_mask, | ||
739 | size_t align, unsigned int flags) | ||
740 | { | ||
741 | struct nvmap_handle *h = NULL; | ||
742 | const unsigned int *alloc_policy; | ||
743 | int nr_page; | ||
744 | int err = -ENOMEM; | ||
745 | |||
746 | h = nvmap_get_handle_id(client, id); | ||
747 | |||
748 | if (!h) | ||
749 | return -EINVAL; | ||
750 | |||
751 | if (h->alloc) | ||
752 | goto out; | ||
753 | |||
754 | h->userflags = flags; | ||
755 | nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT); | ||
756 | h->secure = !!(flags & NVMAP_HANDLE_SECURE); | ||
757 | h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG); | ||
758 | h->align = max_t(size_t, align, L1_CACHE_BYTES); | ||
759 | |||
760 | #ifndef CONFIG_TEGRA_IOVMM | ||
761 | if (heap_mask & NVMAP_HEAP_IOVMM) { | ||
762 | heap_mask &= NVMAP_HEAP_IOVMM; | ||
763 | heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC; | ||
764 | } | ||
765 | #endif | ||
766 | #ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM | ||
767 | #ifdef CONFIG_NVMAP_ALLOW_SYSMEM | ||
768 | /* Allow single pages allocations in system memory to save | ||
769 | * carveout space and avoid extra iovm mappings */ | ||
770 | if (nr_page == 1) { | ||
771 | if (heap_mask & NVMAP_HEAP_IOVMM) | ||
772 | heap_mask |= NVMAP_HEAP_SYSMEM; | ||
773 | else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) { | ||
774 | /* Calculate size of free physical pages | ||
775 | * managed by kernel */ | ||
776 | unsigned long freeMem = | ||
777 | (global_page_state(NR_FREE_PAGES) + | ||
778 | global_page_state(NR_FILE_PAGES) - | ||
779 | total_swapcache_pages) << PAGE_SHIFT; | ||
780 | |||
781 | if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD) | ||
782 | heap_mask |= NVMAP_HEAP_SYSMEM; | ||
783 | } | ||
784 | } | ||
785 | #endif | ||
786 | |||
787 | /* This restriction is deprecated as alignments greater than | ||
788 | PAGE_SIZE are now correctly handled, but it is retained for | ||
789 | AP20 compatibility. */ | ||
790 | if (h->align > PAGE_SIZE) | ||
791 | heap_mask &= NVMAP_HEAP_CARVEOUT_MASK; | ||
792 | #endif | ||
793 | /* secure allocations can only be served from secure heaps */ | ||
794 | if (h->secure) | ||
795 | heap_mask &= NVMAP_SECURE_HEAPS; | ||
796 | |||
797 | if (!heap_mask) { | ||
798 | err = -EINVAL; | ||
799 | goto out; | ||
800 | } | ||
801 | |||
802 | alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large; | ||
803 | |||
804 | while (!h->alloc && *alloc_policy) { | ||
805 | unsigned int heap_type; | ||
806 | |||
807 | heap_type = *alloc_policy++; | ||
808 | heap_type &= heap_mask; | ||
809 | |||
810 | if (!heap_type) | ||
811 | continue; | ||
812 | |||
813 | heap_mask &= ~heap_type; | ||
814 | |||
815 | while (heap_type && !h->alloc) { | ||
816 | unsigned int heap; | ||
817 | |||
818 | /* iterate possible heaps MSB-to-LSB, since higher- | ||
819 | * priority carveouts will have higher usage masks */ | ||
820 | heap = 1 << __fls(heap_type); | ||
821 | alloc_handle(client, h, heap); | ||
822 | heap_type &= ~heap; | ||
823 | } | ||
824 | } | ||
825 | |||
826 | out: | ||
827 | err = (h->alloc) ? 0 : err; | ||
828 | nvmap_handle_put(h); | ||
829 | return err; | ||
830 | } | ||
831 | |||
832 | void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id) | ||
833 | { | ||
834 | struct nvmap_handle_ref *ref; | ||
835 | struct nvmap_handle *h; | ||
836 | int pins; | ||
837 | |||
838 | nvmap_ref_lock(client); | ||
839 | |||
840 | ref = _nvmap_validate_id_locked(client, id); | ||
841 | if (!ref) { | ||
842 | nvmap_ref_unlock(client); | ||
843 | return; | ||
844 | } | ||
845 | |||
846 | BUG_ON(!ref->handle); | ||
847 | h = ref->handle; | ||
848 | |||
849 | if (atomic_dec_return(&ref->dupes)) { | ||
850 | nvmap_ref_unlock(client); | ||
851 | goto out; | ||
852 | } | ||
853 | |||
854 | smp_rmb(); | ||
855 | pins = atomic_read(&ref->pin); | ||
856 | rb_erase(&ref->node, &client->handle_refs); | ||
857 | |||
858 | if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig) | ||
859 | atomic_sub(h->size, &client->iovm_commit); | ||
860 | |||
861 | if (h->alloc && !h->heap_pgalloc) { | ||
862 | mutex_lock(&h->lock); | ||
863 | nvmap_carveout_commit_subtract(client, | ||
864 | nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)), | ||
865 | h->size); | ||
866 | mutex_unlock(&h->lock); | ||
867 | } | ||
868 | |||
869 | nvmap_ref_unlock(client); | ||
870 | |||
871 | if (pins) | ||
872 | nvmap_err(client, "%s freeing pinned handle %p\n", | ||
873 | current->group_leader->comm, h); | ||
874 | |||
875 | while (pins--) | ||
876 | nvmap_unpin_handles(client, &ref->handle, 1); | ||
877 | |||
878 | if (h->owner == client) | ||
879 | h->owner = NULL; | ||
880 | |||
881 | kfree(ref); | ||
882 | |||
883 | out: | ||
884 | BUG_ON(!atomic_read(&h->ref)); | ||
885 | nvmap_handle_put(h); | ||
886 | } | ||
887 | |||
888 | static void add_handle_ref(struct nvmap_client *client, | ||
889 | struct nvmap_handle_ref *ref) | ||
890 | { | ||
891 | struct rb_node **p, *parent = NULL; | ||
892 | |||
893 | nvmap_ref_lock(client); | ||
894 | p = &client->handle_refs.rb_node; | ||
895 | while (*p) { | ||
896 | struct nvmap_handle_ref *node; | ||
897 | parent = *p; | ||
898 | node = rb_entry(parent, struct nvmap_handle_ref, node); | ||
899 | if (ref->handle > node->handle) | ||
900 | p = &parent->rb_right; | ||
901 | else | ||
902 | p = &parent->rb_left; | ||
903 | } | ||
904 | rb_link_node(&ref->node, parent, p); | ||
905 | rb_insert_color(&ref->node, &client->handle_refs); | ||
906 | nvmap_ref_unlock(client); | ||
907 | } | ||
908 | |||
909 | struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client, | ||
910 | size_t size) | ||
911 | { | ||
912 | struct nvmap_handle *h; | ||
913 | struct nvmap_handle_ref *ref = NULL; | ||
914 | |||
915 | if (!client) | ||
916 | return ERR_PTR(-EINVAL); | ||
917 | |||
918 | if (!size) | ||
919 | return ERR_PTR(-EINVAL); | ||
920 | |||
921 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
922 | if (!h) | ||
923 | return ERR_PTR(-ENOMEM); | ||
924 | |||
925 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
926 | if (!ref) { | ||
927 | kfree(h); | ||
928 | return ERR_PTR(-ENOMEM); | ||
929 | } | ||
930 | |||
931 | atomic_set(&h->ref, 1); | ||
932 | atomic_set(&h->pin, 0); | ||
933 | h->owner = client; | ||
934 | h->dev = client->dev; | ||
935 | BUG_ON(!h->owner); | ||
936 | h->size = h->orig_size = size; | ||
937 | h->flags = NVMAP_HANDLE_WRITE_COMBINE; | ||
938 | mutex_init(&h->lock); | ||
939 | |||
940 | nvmap_handle_add(client->dev, h); | ||
941 | |||
942 | atomic_set(&ref->dupes, 1); | ||
943 | ref->handle = h; | ||
944 | atomic_set(&ref->pin, 0); | ||
945 | add_handle_ref(client, ref); | ||
946 | return ref; | ||
947 | } | ||
948 | |||
949 | struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, | ||
950 | unsigned long id) | ||
951 | { | ||
952 | struct nvmap_handle_ref *ref = NULL; | ||
953 | struct nvmap_handle *h = NULL; | ||
954 | |||
955 | BUG_ON(!client || client->dev != nvmap_dev); | ||
956 | /* on success, the reference count for the handle should be | ||
957 | * incremented, so the success paths will not call nvmap_handle_put */ | ||
958 | h = nvmap_validate_get(client, id); | ||
959 | |||
960 | if (!h) { | ||
961 | nvmap_debug(client, "%s duplicate handle failed\n", | ||
962 | current->group_leader->comm); | ||
963 | return ERR_PTR(-EPERM); | ||
964 | } | ||
965 | |||
966 | if (!h->alloc) { | ||
967 | nvmap_err(client, "%s duplicating unallocated handle\n", | ||
968 | current->group_leader->comm); | ||
969 | nvmap_handle_put(h); | ||
970 | return ERR_PTR(-EINVAL); | ||
971 | } | ||
972 | |||
973 | nvmap_ref_lock(client); | ||
974 | ref = _nvmap_validate_id_locked(client, (unsigned long)h); | ||
975 | |||
976 | if (ref) { | ||
977 | /* handle already duplicated in client; just increment | ||
978 | * the reference count rather than re-duplicating it */ | ||
979 | atomic_inc(&ref->dupes); | ||
980 | nvmap_ref_unlock(client); | ||
981 | return ref; | ||
982 | } | ||
983 | |||
984 | nvmap_ref_unlock(client); | ||
985 | |||
986 | /* verify that adding this handle to the process' access list | ||
987 | * won't exceed the IOVM limit */ | ||
988 | if (h->heap_pgalloc && !h->pgalloc.contig) { | ||
989 | int oc; | ||
990 | oc = atomic_add_return(h->size, &client->iovm_commit); | ||
991 | if (oc > client->iovm_limit && !client->super) { | ||
992 | atomic_sub(h->size, &client->iovm_commit); | ||
993 | nvmap_handle_put(h); | ||
994 | nvmap_err(client, "duplicating %p in %s over-commits" | ||
995 | " IOVMM space\n", (void *)id, | ||
996 | current->group_leader->comm); | ||
997 | return ERR_PTR(-ENOMEM); | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
1002 | if (!ref) { | ||
1003 | nvmap_handle_put(h); | ||
1004 | return ERR_PTR(-ENOMEM); | ||
1005 | } | ||
1006 | |||
1007 | if (!h->heap_pgalloc) { | ||
1008 | mutex_lock(&h->lock); | ||
1009 | nvmap_carveout_commit_add(client, | ||
1010 | nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)), | ||
1011 | h->size); | ||
1012 | mutex_unlock(&h->lock); | ||
1013 | } | ||
1014 | |||
1015 | atomic_set(&ref->dupes, 1); | ||
1016 | ref->handle = h; | ||
1017 | atomic_set(&ref->pin, 0); | ||
1018 | add_handle_ref(client, ref); | ||
1019 | return ref; | ||
1020 | } | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c new file mode 100644 index 00000000000..7474f31534f --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_heap.c | |||
@@ -0,0 +1,1113 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_heap.c | ||
3 | * | ||
4 | * GPU heap allocator. | ||
5 | * | ||
6 | * Copyright (c) 2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/device.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/err.h> | ||
30 | |||
31 | #include <mach/nvmap.h> | ||
32 | #include "nvmap.h" | ||
33 | #include "nvmap_heap.h" | ||
34 | #include "nvmap_common.h" | ||
35 | |||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/cacheflush.h> | ||
38 | |||
39 | /* | ||
40 | * "carveouts" are platform-defined regions of physically contiguous memory | ||
41 | * which are not managed by the OS. a platform may specify multiple carveouts, | ||
42 | * for either small special-purpose memory regions (like IRAM on Tegra SoCs) | ||
43 | * or reserved regions of main system memory. | ||
44 | * | ||
45 | * the carveout allocator returns allocations which are physically contiguous. | ||
46 | * to reduce external fragmentation, the allocation algorithm implemented in | ||
47 | * this file employs 3 strategies for keeping allocations of similar size | ||
48 | * grouped together inside the larger heap: the "small", "normal" and "huge" | ||
49 | * strategies. the size thresholds (in bytes) for determining which strategy | ||
50 | * to employ should be provided by the platform for each heap. it is possible | ||
51 | * for a platform to define a heap where only the "normal" strategy is used. | ||
52 | * | ||
53 | * o "normal" allocations use an address-order first-fit allocator (called | ||
54 | * BOTTOM_UP in the code below). each allocation is rounded up to be | ||
55 | * an integer multiple of the "small" allocation size. | ||
56 | * | ||
57 | * o "huge" allocations use an address-order last-fit allocator (called | ||
58 | * TOP_DOWN in the code below). like "normal" allocations, each allocation | ||
59 | * is rounded up to be an integer multiple of the "small" allocation size. | ||
60 | * | ||
61 | * o "small" allocations are treated differently: the heap manager maintains | ||
62 | * a pool of "small"-sized blocks internally from which allocations less | ||
63 | * than 1/2 of the "small" size are buddy-allocated. if a "small" allocation | ||
64 | * is requested and none of the buddy sub-heaps is able to service it, | ||
65 | * the heap manager will try to allocate a new buddy-heap. | ||
66 | * | ||
67 | * this allocator is intended to keep "splinters" colocated in the carveout, | ||
68 | * and to ensure that the minimum free block size in the carveout (i.e., the | ||
69 | * "small" threshold) is still a meaningful size. | ||
70 | * | ||
71 | */ | ||
72 | |||
73 | #define MAX_BUDDY_NR 128 /* maximum buddies in a buddy allocator */ | ||
74 | |||
75 | enum direction { | ||
76 | TOP_DOWN, | ||
77 | BOTTOM_UP | ||
78 | }; | ||
79 | |||
80 | enum block_type { | ||
81 | BLOCK_FIRST_FIT, /* block was allocated directly from the heap */ | ||
82 | BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */ | ||
83 | BLOCK_EMPTY, | ||
84 | }; | ||
85 | |||
86 | struct heap_stat { | ||
87 | size_t free; /* total free size */ | ||
88 | size_t free_largest; /* largest free block */ | ||
89 | size_t free_count; /* number of free blocks */ | ||
90 | size_t total; /* total size */ | ||
91 | size_t largest; /* largest unique block */ | ||
92 | size_t count; /* total number of blocks */ | ||
93 | /* fast compaction attempt counter */ | ||
94 | unsigned int compaction_count_fast; | ||
95 | /* full compaction attempt counter */ | ||
96 | unsigned int compaction_count_full; | ||
97 | }; | ||
98 | |||
99 | struct buddy_heap; | ||
100 | |||
101 | struct buddy_block { | ||
102 | struct nvmap_heap_block block; | ||
103 | struct buddy_heap *heap; | ||
104 | }; | ||
105 | |||
106 | struct list_block { | ||
107 | struct nvmap_heap_block block; | ||
108 | struct list_head all_list; | ||
109 | unsigned int mem_prot; | ||
110 | unsigned long orig_addr; | ||
111 | size_t size; | ||
112 | size_t align; | ||
113 | struct nvmap_heap *heap; | ||
114 | struct list_head free_list; | ||
115 | }; | ||
116 | |||
117 | struct combo_block { | ||
118 | union { | ||
119 | struct list_block lb; | ||
120 | struct buddy_block bb; | ||
121 | }; | ||
122 | }; | ||
123 | |||
124 | struct buddy_bits { | ||
125 | unsigned int alloc:1; | ||
126 | unsigned int order:7; /* log2(MAX_BUDDY_NR); */ | ||
127 | }; | ||
128 | |||
129 | struct buddy_heap { | ||
130 | struct list_block *heap_base; | ||
131 | unsigned int nr_buddies; | ||
132 | struct list_head buddy_list; | ||
133 | struct buddy_bits bitmap[MAX_BUDDY_NR]; | ||
134 | }; | ||
135 | |||
136 | struct nvmap_heap { | ||
137 | struct list_head all_list; | ||
138 | struct list_head free_list; | ||
139 | struct mutex lock; | ||
140 | struct list_head buddy_list; | ||
141 | unsigned int min_buddy_shift; | ||
142 | unsigned int buddy_heap_size; | ||
143 | unsigned int small_alloc; | ||
144 | const char *name; | ||
145 | void *arg; | ||
146 | struct device dev; | ||
147 | }; | ||
148 | |||
149 | static struct kmem_cache *buddy_heap_cache; | ||
150 | static struct kmem_cache *block_cache; | ||
151 | |||
152 | static inline struct nvmap_heap *parent_of(struct buddy_heap *heap) | ||
153 | { | ||
154 | return heap->heap_base->heap; | ||
155 | } | ||
156 | |||
157 | static inline unsigned int order_of(size_t len, size_t min_shift) | ||
158 | { | ||
159 | len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1; | ||
160 | return fls(len)-1; | ||
161 | } | ||
162 | |||
163 | /* returns the free size in bytes of the buddy heap; must be called while | ||
164 | * holding the parent heap's lock. */ | ||
165 | static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat) | ||
166 | { | ||
167 | unsigned int index; | ||
168 | unsigned int shift = parent_of(heap)->min_buddy_shift; | ||
169 | |||
170 | for (index = 0; index < heap->nr_buddies; | ||
171 | index += (1 << heap->bitmap[index].order)) { | ||
172 | size_t curr = 1 << (heap->bitmap[index].order + shift); | ||
173 | |||
174 | stat->largest = max(stat->largest, curr); | ||
175 | stat->total += curr; | ||
176 | stat->count++; | ||
177 | |||
178 | if (!heap->bitmap[index].alloc) { | ||
179 | stat->free += curr; | ||
180 | stat->free_largest = max(stat->free_largest, curr); | ||
181 | stat->free_count++; | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | |||
186 | /* returns the free size of the heap (including any free blocks in any | ||
187 | * buddy-heap suballocators; must be called while holding the parent | ||
188 | * heap's lock. */ | ||
189 | static unsigned long heap_stat(struct nvmap_heap *heap, struct heap_stat *stat) | ||
190 | { | ||
191 | struct buddy_heap *bh; | ||
192 | struct list_block *l = NULL; | ||
193 | unsigned long base = -1ul; | ||
194 | |||
195 | memset(stat, 0, sizeof(*stat)); | ||
196 | mutex_lock(&heap->lock); | ||
197 | list_for_each_entry(l, &heap->all_list, all_list) { | ||
198 | stat->total += l->size; | ||
199 | stat->largest = max(l->size, stat->largest); | ||
200 | stat->count++; | ||
201 | base = min(base, l->orig_addr); | ||
202 | } | ||
203 | |||
204 | list_for_each_entry(bh, &heap->buddy_list, buddy_list) { | ||
205 | buddy_stat(bh, stat); | ||
206 | /* the total counts are double-counted for buddy heaps | ||
207 | * since the blocks allocated for buddy heaps exist in the | ||
208 | * all_list; subtract out the doubly-added stats */ | ||
209 | stat->total -= bh->heap_base->size; | ||
210 | stat->count--; | ||
211 | } | ||
212 | |||
213 | list_for_each_entry(l, &heap->free_list, free_list) { | ||
214 | stat->free += l->size; | ||
215 | stat->free_count++; | ||
216 | stat->free_largest = max(l->size, stat->free_largest); | ||
217 | } | ||
218 | mutex_unlock(&heap->lock); | ||
219 | |||
220 | return base; | ||
221 | } | ||
222 | |||
223 | static ssize_t heap_name_show(struct device *dev, | ||
224 | struct device_attribute *attr, char *buf); | ||
225 | |||
226 | static ssize_t heap_stat_show(struct device *dev, | ||
227 | struct device_attribute *attr, char *buf); | ||
228 | |||
229 | static struct device_attribute heap_stat_total_max = | ||
230 | __ATTR(total_max, S_IRUGO, heap_stat_show, NULL); | ||
231 | |||
232 | static struct device_attribute heap_stat_total_count = | ||
233 | __ATTR(total_count, S_IRUGO, heap_stat_show, NULL); | ||
234 | |||
235 | static struct device_attribute heap_stat_total_size = | ||
236 | __ATTR(total_size, S_IRUGO, heap_stat_show, NULL); | ||
237 | |||
238 | static struct device_attribute heap_stat_free_max = | ||
239 | __ATTR(free_max, S_IRUGO, heap_stat_show, NULL); | ||
240 | |||
241 | static struct device_attribute heap_stat_free_count = | ||
242 | __ATTR(free_count, S_IRUGO, heap_stat_show, NULL); | ||
243 | |||
244 | static struct device_attribute heap_stat_free_size = | ||
245 | __ATTR(free_size, S_IRUGO, heap_stat_show, NULL); | ||
246 | |||
247 | static struct device_attribute heap_stat_base = | ||
248 | __ATTR(base, S_IRUGO, heap_stat_show, NULL); | ||
249 | |||
250 | static struct device_attribute heap_attr_name = | ||
251 | __ATTR(name, S_IRUGO, heap_name_show, NULL); | ||
252 | |||
253 | static struct attribute *heap_stat_attrs[] = { | ||
254 | &heap_stat_total_max.attr, | ||
255 | &heap_stat_total_count.attr, | ||
256 | &heap_stat_total_size.attr, | ||
257 | &heap_stat_free_max.attr, | ||
258 | &heap_stat_free_count.attr, | ||
259 | &heap_stat_free_size.attr, | ||
260 | &heap_stat_base.attr, | ||
261 | &heap_attr_name.attr, | ||
262 | NULL, | ||
263 | }; | ||
264 | |||
265 | static struct attribute_group heap_stat_attr_group = { | ||
266 | .attrs = heap_stat_attrs, | ||
267 | }; | ||
268 | |||
269 | static ssize_t heap_name_show(struct device *dev, | ||
270 | struct device_attribute *attr, char *buf) | ||
271 | { | ||
272 | |||
273 | struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev); | ||
274 | return sprintf(buf, "%s\n", heap->name); | ||
275 | } | ||
276 | |||
277 | static ssize_t heap_stat_show(struct device *dev, | ||
278 | struct device_attribute *attr, char *buf) | ||
279 | { | ||
280 | struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev); | ||
281 | struct heap_stat stat; | ||
282 | unsigned long base; | ||
283 | |||
284 | base = heap_stat(heap, &stat); | ||
285 | |||
286 | if (attr == &heap_stat_total_max) | ||
287 | return sprintf(buf, "%u\n", stat.largest); | ||
288 | else if (attr == &heap_stat_total_count) | ||
289 | return sprintf(buf, "%u\n", stat.count); | ||
290 | else if (attr == &heap_stat_total_size) | ||
291 | return sprintf(buf, "%u\n", stat.total); | ||
292 | else if (attr == &heap_stat_free_max) | ||
293 | return sprintf(buf, "%u\n", stat.free_largest); | ||
294 | else if (attr == &heap_stat_free_count) | ||
295 | return sprintf(buf, "%u\n", stat.free_count); | ||
296 | else if (attr == &heap_stat_free_size) | ||
297 | return sprintf(buf, "%u\n", stat.free); | ||
298 | else if (attr == &heap_stat_base) | ||
299 | return sprintf(buf, "%08lx\n", base); | ||
300 | else | ||
301 | return -EINVAL; | ||
302 | } | ||
303 | #ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
304 | static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap, | ||
305 | size_t size, size_t align, | ||
306 | unsigned int mem_prot) | ||
307 | { | ||
308 | unsigned int index = 0; | ||
309 | unsigned int min_shift = parent_of(heap)->min_buddy_shift; | ||
310 | unsigned int order = order_of(size, min_shift); | ||
311 | unsigned int align_mask; | ||
312 | unsigned int best = heap->nr_buddies; | ||
313 | struct buddy_block *b; | ||
314 | |||
315 | if (heap->heap_base->mem_prot != mem_prot) | ||
316 | return NULL; | ||
317 | |||
318 | align = max(align, (size_t)(1 << min_shift)); | ||
319 | align_mask = (align >> min_shift) - 1; | ||
320 | |||
321 | for (index = 0; index < heap->nr_buddies; | ||
322 | index += (1 << heap->bitmap[index].order)) { | ||
323 | |||
324 | if (heap->bitmap[index].alloc || (index & align_mask) || | ||
325 | (heap->bitmap[index].order < order)) | ||
326 | continue; | ||
327 | |||
328 | if (best == heap->nr_buddies || | ||
329 | heap->bitmap[index].order < heap->bitmap[best].order) | ||
330 | best = index; | ||
331 | |||
332 | if (heap->bitmap[best].order == order) | ||
333 | break; | ||
334 | } | ||
335 | |||
336 | if (best == heap->nr_buddies) | ||
337 | return NULL; | ||
338 | |||
339 | b = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
340 | if (!b) | ||
341 | return NULL; | ||
342 | |||
343 | while (heap->bitmap[best].order != order) { | ||
344 | unsigned int buddy; | ||
345 | heap->bitmap[best].order--; | ||
346 | buddy = best ^ (1 << heap->bitmap[best].order); | ||
347 | heap->bitmap[buddy].order = heap->bitmap[best].order; | ||
348 | heap->bitmap[buddy].alloc = 0; | ||
349 | } | ||
350 | heap->bitmap[best].alloc = 1; | ||
351 | b->block.base = heap->heap_base->block.base + (best << min_shift); | ||
352 | b->heap = heap; | ||
353 | b->block.type = BLOCK_BUDDY; | ||
354 | return &b->block; | ||
355 | } | ||
356 | #endif | ||
357 | |||
358 | static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block) | ||
359 | { | ||
360 | struct buddy_block *b = container_of(block, struct buddy_block, block); | ||
361 | struct buddy_heap *h = b->heap; | ||
362 | unsigned int min_shift = parent_of(h)->min_buddy_shift; | ||
363 | unsigned int index; | ||
364 | |||
365 | index = (block->base - h->heap_base->block.base) >> min_shift; | ||
366 | h->bitmap[index].alloc = 0; | ||
367 | |||
368 | for (;;) { | ||
369 | unsigned int buddy = index ^ (1 << h->bitmap[index].order); | ||
370 | if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc || | ||
371 | h->bitmap[buddy].order != h->bitmap[index].order) | ||
372 | break; | ||
373 | |||
374 | h->bitmap[buddy].order++; | ||
375 | h->bitmap[index].order++; | ||
376 | index = min(buddy, index); | ||
377 | } | ||
378 | |||
379 | kmem_cache_free(block_cache, b); | ||
380 | if ((1 << h->bitmap[0].order) == h->nr_buddies) | ||
381 | return h; | ||
382 | |||
383 | return NULL; | ||
384 | } | ||
385 | |||
386 | |||
387 | /* | ||
388 | * base_max limits position of allocated chunk in memory. | ||
389 | * if base_max is 0 then there is no such limitation. | ||
390 | */ | ||
391 | static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap, | ||
392 | size_t len, size_t align, | ||
393 | unsigned int mem_prot, | ||
394 | unsigned long base_max) | ||
395 | { | ||
396 | struct list_block *b = NULL; | ||
397 | struct list_block *i = NULL; | ||
398 | struct list_block *rem = NULL; | ||
399 | unsigned long fix_base; | ||
400 | enum direction dir; | ||
401 | |||
402 | /* since pages are only mappable with one cache attribute, | ||
403 | * and most allocations from carveout heaps are DMA coherent | ||
404 | * (i.e., non-cacheable), round cacheable allocations up to | ||
405 | * a page boundary to ensure that the physical pages will | ||
406 | * only be mapped one way. */ | ||
407 | if (mem_prot == NVMAP_HANDLE_CACHEABLE || | ||
408 | mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) { | ||
409 | align = max_t(size_t, align, PAGE_SIZE); | ||
410 | len = PAGE_ALIGN(len); | ||
411 | } | ||
412 | |||
413 | #ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
414 | dir = BOTTOM_UP; | ||
415 | #else | ||
416 | dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN; | ||
417 | #endif | ||
418 | |||
419 | if (dir == BOTTOM_UP) { | ||
420 | list_for_each_entry(i, &heap->free_list, free_list) { | ||
421 | size_t fix_size; | ||
422 | fix_base = ALIGN(i->block.base, align); | ||
423 | fix_size = i->size - (fix_base - i->block.base); | ||
424 | |||
425 | /* needed for compaction. relocated chunk | ||
426 | * should never go up */ | ||
427 | if (base_max && fix_base > base_max) | ||
428 | break; | ||
429 | |||
430 | if (fix_size >= len) { | ||
431 | b = i; | ||
432 | break; | ||
433 | } | ||
434 | } | ||
435 | } else { | ||
436 | list_for_each_entry_reverse(i, &heap->free_list, free_list) { | ||
437 | if (i->size >= len) { | ||
438 | fix_base = i->block.base + i->size - len; | ||
439 | fix_base &= ~(align-1); | ||
440 | if (fix_base >= i->block.base) { | ||
441 | b = i; | ||
442 | break; | ||
443 | } | ||
444 | } | ||
445 | } | ||
446 | } | ||
447 | |||
448 | if (!b) | ||
449 | return NULL; | ||
450 | |||
451 | if (dir == BOTTOM_UP) | ||
452 | b->block.type = BLOCK_FIRST_FIT; | ||
453 | |||
454 | /* split free block */ | ||
455 | if (b->block.base != fix_base) { | ||
456 | /* insert a new free block before allocated */ | ||
457 | rem = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
458 | if (!rem) { | ||
459 | b->orig_addr = b->block.base; | ||
460 | b->block.base = fix_base; | ||
461 | b->size -= (b->block.base - b->orig_addr); | ||
462 | goto out; | ||
463 | } | ||
464 | |||
465 | rem->block.type = BLOCK_EMPTY; | ||
466 | rem->block.base = b->block.base; | ||
467 | rem->orig_addr = rem->block.base; | ||
468 | rem->size = fix_base - rem->block.base; | ||
469 | b->block.base = fix_base; | ||
470 | b->orig_addr = fix_base; | ||
471 | b->size -= rem->size; | ||
472 | list_add_tail(&rem->all_list, &b->all_list); | ||
473 | list_add_tail(&rem->free_list, &b->free_list); | ||
474 | } | ||
475 | |||
476 | b->orig_addr = b->block.base; | ||
477 | |||
478 | if (b->size > len) { | ||
479 | /* insert a new free block after allocated */ | ||
480 | rem = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
481 | if (!rem) | ||
482 | goto out; | ||
483 | |||
484 | rem->block.type = BLOCK_EMPTY; | ||
485 | rem->block.base = b->block.base + len; | ||
486 | rem->size = b->size - len; | ||
487 | BUG_ON(rem->size > b->size); | ||
488 | rem->orig_addr = rem->block.base; | ||
489 | b->size = len; | ||
490 | list_add(&rem->all_list, &b->all_list); | ||
491 | list_add(&rem->free_list, &b->free_list); | ||
492 | } | ||
493 | |||
494 | out: | ||
495 | list_del(&b->free_list); | ||
496 | b->heap = heap; | ||
497 | b->mem_prot = mem_prot; | ||
498 | b->align = align; | ||
499 | return &b->block; | ||
500 | } | ||
501 | |||
502 | #ifdef DEBUG_FREE_LIST | ||
503 | static void freelist_debug(struct nvmap_heap *heap, const char *title, | ||
504 | struct list_block *token) | ||
505 | { | ||
506 | int i; | ||
507 | struct list_block *n; | ||
508 | |||
509 | dev_debug(&heap->dev, "%s\n", title); | ||
510 | i = 0; | ||
511 | list_for_each_entry(n, &heap->free_list, free_list) { | ||
512 | dev_debug(&heap->dev, "\t%d [%p..%p]%s\n", i, (void *)n->orig_addr, | ||
513 | (void *)(n->orig_addr + n->size), | ||
514 | (n == token) ? "<--" : ""); | ||
515 | i++; | ||
516 | } | ||
517 | } | ||
518 | #else | ||
519 | #define freelist_debug(_heap, _title, _token) do { } while (0) | ||
520 | #endif | ||
521 | |||
522 | static struct list_block *do_heap_free(struct nvmap_heap_block *block) | ||
523 | { | ||
524 | struct list_block *b = container_of(block, struct list_block, block); | ||
525 | struct list_block *n = NULL; | ||
526 | struct nvmap_heap *heap = b->heap; | ||
527 | |||
528 | BUG_ON(b->block.base > b->orig_addr); | ||
529 | b->size += (b->block.base - b->orig_addr); | ||
530 | b->block.base = b->orig_addr; | ||
531 | |||
532 | freelist_debug(heap, "free list before", b); | ||
533 | |||
534 | /* Find position of first free block to the right of freed one */ | ||
535 | list_for_each_entry(n, &heap->free_list, free_list) { | ||
536 | if (n->block.base > b->block.base) | ||
537 | break; | ||
538 | } | ||
539 | |||
540 | /* Add freed block before found free one */ | ||
541 | list_add_tail(&b->free_list, &n->free_list); | ||
542 | BUG_ON(list_empty(&b->all_list)); | ||
543 | |||
544 | freelist_debug(heap, "free list pre-merge", b); | ||
545 | |||
546 | /* merge freed block with next if they connect | ||
547 | * freed block becomes bigger, next one is destroyed */ | ||
548 | if (!list_is_last(&b->free_list, &heap->free_list)) { | ||
549 | n = list_first_entry(&b->free_list, struct list_block, free_list); | ||
550 | if (n->block.base == b->block.base + b->size) { | ||
551 | list_del(&n->all_list); | ||
552 | list_del(&n->free_list); | ||
553 | BUG_ON(b->orig_addr >= n->orig_addr); | ||
554 | b->size += n->size; | ||
555 | kmem_cache_free(block_cache, n); | ||
556 | } | ||
557 | } | ||
558 | |||
559 | /* merge freed block with prev if they connect | ||
560 | * previous free block becomes bigger, freed one is destroyed */ | ||
561 | if (b->free_list.prev != &heap->free_list) { | ||
562 | n = list_entry(b->free_list.prev, struct list_block, free_list); | ||
563 | if (n->block.base + n->size == b->block.base) { | ||
564 | list_del(&b->all_list); | ||
565 | list_del(&b->free_list); | ||
566 | BUG_ON(n->orig_addr >= b->orig_addr); | ||
567 | n->size += b->size; | ||
568 | kmem_cache_free(block_cache, b); | ||
569 | b = n; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | freelist_debug(heap, "free list after", b); | ||
574 | b->block.type = BLOCK_EMPTY; | ||
575 | return b; | ||
576 | } | ||
577 | |||
578 | #ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
579 | |||
580 | static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h, | ||
581 | size_t len, size_t align, | ||
582 | unsigned int mem_prot) | ||
583 | { | ||
584 | struct buddy_heap *bh; | ||
585 | struct nvmap_heap_block *b = NULL; | ||
586 | |||
587 | list_for_each_entry(bh, &h->buddy_list, buddy_list) { | ||
588 | b = buddy_alloc(bh, len, align, mem_prot); | ||
589 | if (b) | ||
590 | return b; | ||
591 | } | ||
592 | |||
593 | /* no buddy heaps could service this allocation: try to create a new | ||
594 | * buddy heap instead */ | ||
595 | bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL); | ||
596 | if (!bh) | ||
597 | return NULL; | ||
598 | |||
599 | b = do_heap_alloc(h, h->buddy_heap_size, | ||
600 | h->buddy_heap_size, mem_prot, 0); | ||
601 | if (!b) { | ||
602 | kmem_cache_free(buddy_heap_cache, bh); | ||
603 | return NULL; | ||
604 | } | ||
605 | |||
606 | bh->heap_base = container_of(b, struct list_block, block); | ||
607 | bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift; | ||
608 | bh->bitmap[0].alloc = 0; | ||
609 | bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift); | ||
610 | list_add_tail(&bh->buddy_list, &h->buddy_list); | ||
611 | return buddy_alloc(bh, len, align, mem_prot); | ||
612 | } | ||
613 | |||
614 | #endif | ||
615 | |||
616 | #ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
617 | |||
618 | static int do_heap_copy_listblock(struct nvmap_device *dev, | ||
619 | unsigned long dst_base, unsigned long src_base, size_t len) | ||
620 | { | ||
621 | pte_t **pte_src = NULL; | ||
622 | pte_t **pte_dst = NULL; | ||
623 | void *addr_src = NULL; | ||
624 | void *addr_dst = NULL; | ||
625 | unsigned long kaddr_src; | ||
626 | unsigned long kaddr_dst; | ||
627 | unsigned long phys_src = src_base; | ||
628 | unsigned long phys_dst = dst_base; | ||
629 | unsigned long pfn_src; | ||
630 | unsigned long pfn_dst; | ||
631 | int error = 0; | ||
632 | |||
633 | pgprot_t prot = pgprot_writecombine(pgprot_kernel); | ||
634 | |||
635 | int page; | ||
636 | |||
637 | pte_src = nvmap_alloc_pte(dev, &addr_src); | ||
638 | if (IS_ERR(pte_src)) { | ||
639 | pr_err("Error when allocating pte_src\n"); | ||
640 | pte_src = NULL; | ||
641 | error = -1; | ||
642 | goto fail; | ||
643 | } | ||
644 | |||
645 | pte_dst = nvmap_alloc_pte(dev, &addr_dst); | ||
646 | if (IS_ERR(pte_dst)) { | ||
647 | pr_err("Error while allocating pte_dst\n"); | ||
648 | pte_dst = NULL; | ||
649 | error = -1; | ||
650 | goto fail; | ||
651 | } | ||
652 | |||
653 | kaddr_src = (unsigned long)addr_src; | ||
654 | kaddr_dst = (unsigned long)addr_dst; | ||
655 | |||
656 | BUG_ON(phys_dst > phys_src); | ||
657 | BUG_ON((phys_src & PAGE_MASK) != phys_src); | ||
658 | BUG_ON((phys_dst & PAGE_MASK) != phys_dst); | ||
659 | BUG_ON((len & PAGE_MASK) != len); | ||
660 | |||
661 | for (page = 0; page < (len >> PAGE_SHIFT) ; page++) { | ||
662 | |||
663 | pfn_src = __phys_to_pfn(phys_src) + page; | ||
664 | pfn_dst = __phys_to_pfn(phys_dst) + page; | ||
665 | |||
666 | set_pte_at(&init_mm, kaddr_src, *pte_src, | ||
667 | pfn_pte(pfn_src, prot)); | ||
668 | flush_tlb_kernel_page(kaddr_src); | ||
669 | |||
670 | set_pte_at(&init_mm, kaddr_dst, *pte_dst, | ||
671 | pfn_pte(pfn_dst, prot)); | ||
672 | flush_tlb_kernel_page(kaddr_dst); | ||
673 | |||
674 | memcpy(addr_dst, addr_src, PAGE_SIZE); | ||
675 | } | ||
676 | |||
677 | fail: | ||
678 | if (pte_src) | ||
679 | nvmap_free_pte(dev, pte_src); | ||
680 | if (pte_dst) | ||
681 | nvmap_free_pte(dev, pte_dst); | ||
682 | return error; | ||
683 | } | ||
684 | |||
685 | |||
686 | static struct nvmap_heap_block *do_heap_relocate_listblock( | ||
687 | struct list_block *block, bool fast) | ||
688 | { | ||
689 | struct nvmap_heap_block *heap_block = &block->block; | ||
690 | struct nvmap_heap_block *heap_block_new = NULL; | ||
691 | struct nvmap_heap *heap = block->heap; | ||
692 | struct nvmap_handle *handle = heap_block->handle; | ||
693 | unsigned long src_base = heap_block->base; | ||
694 | unsigned long dst_base; | ||
695 | size_t src_size = block->size; | ||
696 | size_t src_align = block->align; | ||
697 | unsigned int src_prot = block->mem_prot; | ||
698 | int error = 0; | ||
699 | struct nvmap_share *share; | ||
700 | |||
701 | if (!handle) { | ||
702 | pr_err("INVALID HANDLE!\n"); | ||
703 | return NULL; | ||
704 | } | ||
705 | |||
706 | mutex_lock(&handle->lock); | ||
707 | |||
708 | share = nvmap_get_share_from_dev(handle->dev); | ||
709 | |||
710 | /* TODO: It is possible to use only handle lock and no share | ||
711 | * pin_lock, but then we'll need to lock every handle during | ||
712 | * each pinning operation. Need to estimate performance impact | ||
713 | * if we decide to simplify locking this way. */ | ||
714 | mutex_lock(&share->pin_lock); | ||
715 | |||
716 | /* abort if block is pinned */ | ||
717 | if (atomic_read(&handle->pin)) | ||
718 | goto fail; | ||
719 | /* abort if block is mapped */ | ||
720 | if (handle->usecount) | ||
721 | goto fail; | ||
722 | |||
723 | if (fast) { | ||
724 | /* Fast compaction path - first allocate, then free. */ | ||
725 | heap_block_new = do_heap_alloc(heap, src_size, src_align, | ||
726 | src_prot, src_base); | ||
727 | if (heap_block_new) | ||
728 | do_heap_free(heap_block); | ||
729 | else | ||
730 | goto fail; | ||
731 | } else { | ||
732 | /* Full compaction path, first free, then allocate | ||
733 | * It is slower but provide best compaction results */ | ||
734 | do_heap_free(heap_block); | ||
735 | heap_block_new = do_heap_alloc(heap, src_size, src_align, | ||
736 | src_prot, src_base); | ||
737 | /* Allocation should always succeed*/ | ||
738 | BUG_ON(!heap_block_new); | ||
739 | } | ||
740 | |||
741 | /* update handle */ | ||
742 | handle->carveout = heap_block_new; | ||
743 | heap_block_new->handle = handle; | ||
744 | |||
745 | /* copy source data to new block location */ | ||
746 | dst_base = heap_block_new->base; | ||
747 | |||
748 | /* new allocation should always go lower addresses */ | ||
749 | BUG_ON(dst_base >= src_base); | ||
750 | |||
751 | error = do_heap_copy_listblock(handle->dev, | ||
752 | dst_base, src_base, src_size); | ||
753 | BUG_ON(error); | ||
754 | |||
755 | fail: | ||
756 | mutex_unlock(&share->pin_lock); | ||
757 | mutex_unlock(&handle->lock); | ||
758 | return heap_block_new; | ||
759 | } | ||
760 | |||
761 | static void nvmap_heap_compact(struct nvmap_heap *heap, | ||
762 | size_t requested_size, bool fast) | ||
763 | { | ||
764 | struct list_block *block_current = NULL; | ||
765 | struct list_block *block_prev = NULL; | ||
766 | struct list_block *block_next = NULL; | ||
767 | |||
768 | struct list_head *ptr, *ptr_prev, *ptr_next; | ||
769 | int relocation_count = 0; | ||
770 | |||
771 | ptr = heap->all_list.next; | ||
772 | |||
773 | /* walk through all blocks */ | ||
774 | while (ptr != &heap->all_list) { | ||
775 | block_current = list_entry(ptr, struct list_block, all_list); | ||
776 | |||
777 | ptr_prev = ptr->prev; | ||
778 | ptr_next = ptr->next; | ||
779 | |||
780 | if (block_current->block.type != BLOCK_EMPTY) { | ||
781 | ptr = ptr_next; | ||
782 | continue; | ||
783 | } | ||
784 | |||
785 | if (fast && block_current->size >= requested_size) | ||
786 | break; | ||
787 | |||
788 | /* relocate prev block */ | ||
789 | if (ptr_prev != &heap->all_list) { | ||
790 | |||
791 | block_prev = list_entry(ptr_prev, | ||
792 | struct list_block, all_list); | ||
793 | |||
794 | BUG_ON(block_prev->block.type != BLOCK_FIRST_FIT); | ||
795 | |||
796 | if (do_heap_relocate_listblock(block_prev, true)) { | ||
797 | |||
798 | /* After relocation current free block can be | ||
799 | * destroyed when it is merged with previous | ||
800 | * free block. Updated pointer to new free | ||
801 | * block can be obtained from the next block */ | ||
802 | relocation_count++; | ||
803 | ptr = ptr_next->prev; | ||
804 | continue; | ||
805 | } | ||
806 | } | ||
807 | |||
808 | if (ptr_next != &heap->all_list) { | ||
809 | |||
810 | block_next = list_entry(ptr_next, | ||
811 | struct list_block, all_list); | ||
812 | |||
813 | BUG_ON(block_next->block.type != BLOCK_FIRST_FIT); | ||
814 | |||
815 | if (do_heap_relocate_listblock(block_next, fast)) { | ||
816 | ptr = ptr_prev->next; | ||
817 | relocation_count++; | ||
818 | continue; | ||
819 | } | ||
820 | } | ||
821 | ptr = ptr_next; | ||
822 | } | ||
823 | pr_err("Relocated %d chunks\n", relocation_count); | ||
824 | } | ||
825 | #endif | ||
826 | |||
827 | void nvmap_usecount_inc(struct nvmap_handle *h) | ||
828 | { | ||
829 | if (h->alloc && !h->heap_pgalloc) { | ||
830 | mutex_lock(&h->lock); | ||
831 | h->usecount++; | ||
832 | mutex_unlock(&h->lock); | ||
833 | } else { | ||
834 | h->usecount++; | ||
835 | } | ||
836 | } | ||
837 | |||
838 | |||
839 | void nvmap_usecount_dec(struct nvmap_handle *h) | ||
840 | { | ||
841 | h->usecount--; | ||
842 | } | ||
843 | |||
844 | /* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to | ||
845 | * align bytes. */ | ||
846 | struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, | ||
847 | struct nvmap_handle *handle) | ||
848 | { | ||
849 | struct nvmap_heap_block *b; | ||
850 | size_t len = handle->size; | ||
851 | size_t align = handle->align; | ||
852 | unsigned int prot = handle->flags; | ||
853 | |||
854 | mutex_lock(&h->lock); | ||
855 | |||
856 | #ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
857 | /* Align to page size */ | ||
858 | align = ALIGN(align, PAGE_SIZE); | ||
859 | len = ALIGN(len, PAGE_SIZE); | ||
860 | b = do_heap_alloc(h, len, align, prot, 0); | ||
861 | if (!b) { | ||
862 | pr_err("Compaction triggered!\n"); | ||
863 | nvmap_heap_compact(h, len, true); | ||
864 | b = do_heap_alloc(h, len, align, prot, 0); | ||
865 | if (!b) { | ||
866 | pr_err("Full compaction triggered!\n"); | ||
867 | nvmap_heap_compact(h, len, false); | ||
868 | b = do_heap_alloc(h, len, align, prot, 0); | ||
869 | } | ||
870 | } | ||
871 | #else | ||
872 | if (len <= h->buddy_heap_size / 2) { | ||
873 | b = do_buddy_alloc(h, len, align, prot); | ||
874 | } else { | ||
875 | if (h->buddy_heap_size) | ||
876 | len = ALIGN(len, h->buddy_heap_size); | ||
877 | align = max(align, (size_t)L1_CACHE_BYTES); | ||
878 | b = do_heap_alloc(h, len, align, prot, 0); | ||
879 | } | ||
880 | #endif | ||
881 | |||
882 | if (b) { | ||
883 | b->handle = handle; | ||
884 | handle->carveout = b; | ||
885 | } | ||
886 | mutex_unlock(&h->lock); | ||
887 | return b; | ||
888 | } | ||
889 | |||
890 | struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b) | ||
891 | { | ||
892 | if (b->type == BLOCK_BUDDY) { | ||
893 | struct buddy_block *bb; | ||
894 | bb = container_of(b, struct buddy_block, block); | ||
895 | return parent_of(bb->heap); | ||
896 | } else { | ||
897 | struct list_block *lb; | ||
898 | lb = container_of(b, struct list_block, block); | ||
899 | return lb->heap; | ||
900 | } | ||
901 | } | ||
902 | |||
903 | /* nvmap_heap_free: frees block b*/ | ||
904 | void nvmap_heap_free(struct nvmap_heap_block *b) | ||
905 | { | ||
906 | struct buddy_heap *bh = NULL; | ||
907 | struct nvmap_heap *h = nvmap_block_to_heap(b); | ||
908 | struct list_block *lb; | ||
909 | |||
910 | mutex_lock(&h->lock); | ||
911 | if (b->type == BLOCK_BUDDY) | ||
912 | bh = do_buddy_free(b); | ||
913 | else { | ||
914 | lb = container_of(b, struct list_block, block); | ||
915 | nvmap_flush_heap_block(NULL, b, lb->size, lb->mem_prot); | ||
916 | do_heap_free(b); | ||
917 | } | ||
918 | |||
919 | if (bh) { | ||
920 | list_del(&bh->buddy_list); | ||
921 | mutex_unlock(&h->lock); | ||
922 | nvmap_heap_free(&bh->heap_base->block); | ||
923 | kmem_cache_free(buddy_heap_cache, bh); | ||
924 | } else | ||
925 | mutex_unlock(&h->lock); | ||
926 | } | ||
927 | |||
928 | |||
929 | static void heap_release(struct device *heap) | ||
930 | { | ||
931 | } | ||
932 | |||
933 | /* nvmap_heap_create: create a heap object of len bytes, starting from | ||
934 | * address base. | ||
935 | * | ||
936 | * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2 | ||
937 | * of the buddy heap size will use a buddy sub-allocator, where each buddy | ||
938 | * heap is buddy_size bytes (should be a power of 2). all other allocations | ||
939 | * will be rounded up to be a multiple of buddy_size bytes. | ||
940 | */ | ||
941 | struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name, | ||
942 | phys_addr_t base, size_t len, | ||
943 | size_t buddy_size, void *arg) | ||
944 | { | ||
945 | struct nvmap_heap *h = NULL; | ||
946 | struct list_block *l = NULL; | ||
947 | |||
948 | if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) { | ||
949 | dev_warn(parent, "%s: buddy_size %u too small\n", __func__, | ||
950 | buddy_size); | ||
951 | buddy_size = 0; | ||
952 | } else if (WARN_ON(buddy_size >= len)) { | ||
953 | dev_warn(parent, "%s: buddy_size %u too large\n", __func__, | ||
954 | buddy_size); | ||
955 | buddy_size = 0; | ||
956 | } else if (WARN_ON(buddy_size & (buddy_size - 1))) { | ||
957 | dev_warn(parent, "%s: buddy_size %u not a power of 2\n", | ||
958 | __func__, buddy_size); | ||
959 | buddy_size = 1 << (ilog2(buddy_size) + 1); | ||
960 | } | ||
961 | |||
962 | if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) { | ||
963 | unsigned long orig = base; | ||
964 | dev_warn(parent, "%s: base address %p not aligned to " | ||
965 | "buddy_size %u\n", __func__, (void *)base, buddy_size); | ||
966 | base = ALIGN(base, buddy_size); | ||
967 | len -= (base - orig); | ||
968 | } | ||
969 | |||
970 | if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) { | ||
971 | dev_warn(parent, "%s: length %u not aligned to " | ||
972 | "buddy_size %u\n", __func__, len, buddy_size); | ||
973 | len &= ~(buddy_size - 1); | ||
974 | } | ||
975 | |||
976 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
977 | if (!h) { | ||
978 | dev_err(parent, "%s: out of memory\n", __func__); | ||
979 | goto fail_alloc; | ||
980 | } | ||
981 | |||
982 | l = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
983 | if (!l) { | ||
984 | dev_err(parent, "%s: out of memory\n", __func__); | ||
985 | goto fail_alloc; | ||
986 | } | ||
987 | |||
988 | dev_set_name(&h->dev, "heap-%s", name); | ||
989 | h->name = name; | ||
990 | h->arg = arg; | ||
991 | h->dev.parent = parent; | ||
992 | h->dev.driver = NULL; | ||
993 | h->dev.release = heap_release; | ||
994 | if (device_register(&h->dev)) { | ||
995 | dev_err(parent, "%s: failed to register %s\n", __func__, | ||
996 | dev_name(&h->dev)); | ||
997 | goto fail_alloc; | ||
998 | } | ||
999 | if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) { | ||
1000 | dev_err(&h->dev, "%s: failed to create attributes\n", __func__); | ||
1001 | goto fail_register; | ||
1002 | } | ||
1003 | h->small_alloc = max(2 * buddy_size, len / 256); | ||
1004 | h->buddy_heap_size = buddy_size; | ||
1005 | if (buddy_size) | ||
1006 | h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR); | ||
1007 | INIT_LIST_HEAD(&h->free_list); | ||
1008 | INIT_LIST_HEAD(&h->buddy_list); | ||
1009 | INIT_LIST_HEAD(&h->all_list); | ||
1010 | mutex_init(&h->lock); | ||
1011 | l->block.base = base; | ||
1012 | l->block.type = BLOCK_EMPTY; | ||
1013 | l->size = len; | ||
1014 | l->orig_addr = base; | ||
1015 | list_add_tail(&l->free_list, &h->free_list); | ||
1016 | list_add_tail(&l->all_list, &h->all_list); | ||
1017 | |||
1018 | inner_flush_cache_all(); | ||
1019 | outer_flush_range(base, base + len); | ||
1020 | wmb(); | ||
1021 | return h; | ||
1022 | |||
1023 | fail_register: | ||
1024 | device_unregister(&h->dev); | ||
1025 | fail_alloc: | ||
1026 | if (l) | ||
1027 | kmem_cache_free(block_cache, l); | ||
1028 | kfree(h); | ||
1029 | return NULL; | ||
1030 | } | ||
1031 | |||
1032 | void *nvmap_heap_device_to_arg(struct device *dev) | ||
1033 | { | ||
1034 | struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev); | ||
1035 | return heap->arg; | ||
1036 | } | ||
1037 | |||
1038 | void *nvmap_heap_to_arg(struct nvmap_heap *heap) | ||
1039 | { | ||
1040 | return heap->arg; | ||
1041 | } | ||
1042 | |||
1043 | /* nvmap_heap_destroy: frees all resources in heap */ | ||
1044 | void nvmap_heap_destroy(struct nvmap_heap *heap) | ||
1045 | { | ||
1046 | WARN_ON(!list_empty(&heap->buddy_list)); | ||
1047 | |||
1048 | sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group); | ||
1049 | device_unregister(&heap->dev); | ||
1050 | |||
1051 | while (!list_empty(&heap->buddy_list)) { | ||
1052 | struct buddy_heap *b; | ||
1053 | b = list_first_entry(&heap->buddy_list, struct buddy_heap, | ||
1054 | buddy_list); | ||
1055 | list_del(&heap->buddy_list); | ||
1056 | nvmap_heap_free(&b->heap_base->block); | ||
1057 | kmem_cache_free(buddy_heap_cache, b); | ||
1058 | } | ||
1059 | |||
1060 | WARN_ON(!list_is_singular(&heap->all_list)); | ||
1061 | while (!list_empty(&heap->all_list)) { | ||
1062 | struct list_block *l; | ||
1063 | l = list_first_entry(&heap->all_list, struct list_block, | ||
1064 | all_list); | ||
1065 | list_del(&l->all_list); | ||
1066 | kmem_cache_free(block_cache, l); | ||
1067 | } | ||
1068 | |||
1069 | kfree(heap); | ||
1070 | } | ||
1071 | |||
1072 | /* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */ | ||
1073 | int nvmap_heap_create_group(struct nvmap_heap *heap, | ||
1074 | const struct attribute_group *grp) | ||
1075 | { | ||
1076 | return sysfs_create_group(&heap->dev.kobj, grp); | ||
1077 | } | ||
1078 | |||
1079 | /* nvmap_heap_remove_group: removes the attribute_group grp */ | ||
1080 | void nvmap_heap_remove_group(struct nvmap_heap *heap, | ||
1081 | const struct attribute_group *grp) | ||
1082 | { | ||
1083 | sysfs_remove_group(&heap->dev.kobj, grp); | ||
1084 | } | ||
1085 | |||
1086 | int nvmap_heap_init(void) | ||
1087 | { | ||
1088 | BUG_ON(buddy_heap_cache != NULL); | ||
1089 | buddy_heap_cache = KMEM_CACHE(buddy_heap, 0); | ||
1090 | if (!buddy_heap_cache) { | ||
1091 | pr_err("%s: unable to create buddy heap cache\n", __func__); | ||
1092 | return -ENOMEM; | ||
1093 | } | ||
1094 | |||
1095 | block_cache = KMEM_CACHE(combo_block, 0); | ||
1096 | if (!block_cache) { | ||
1097 | kmem_cache_destroy(buddy_heap_cache); | ||
1098 | pr_err("%s: unable to create block cache\n", __func__); | ||
1099 | return -ENOMEM; | ||
1100 | } | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | void nvmap_heap_deinit(void) | ||
1105 | { | ||
1106 | if (buddy_heap_cache) | ||
1107 | kmem_cache_destroy(buddy_heap_cache); | ||
1108 | if (block_cache) | ||
1109 | kmem_cache_destroy(block_cache); | ||
1110 | |||
1111 | block_cache = NULL; | ||
1112 | buddy_heap_cache = NULL; | ||
1113 | } | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.h b/drivers/video/tegra/nvmap/nvmap_heap.h new file mode 100644 index 00000000000..158a1fa3d33 --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_heap.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_heap.h | ||
3 | * | ||
4 | * GPU heap allocator. | ||
5 | * | ||
6 | * Copyright (c) 2010-2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #ifndef __NVMAP_HEAP_H | ||
24 | #define __NVMAP_HEAP_H | ||
25 | |||
26 | struct device; | ||
27 | struct nvmap_heap; | ||
28 | struct attribute_group; | ||
29 | |||
30 | struct nvmap_heap_block { | ||
31 | phys_addr_t base; | ||
32 | unsigned int type; | ||
33 | struct nvmap_handle *handle; | ||
34 | }; | ||
35 | |||
36 | #define NVMAP_HEAP_MIN_BUDDY_SIZE 8192 | ||
37 | |||
38 | struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name, | ||
39 | phys_addr_t base, size_t len, | ||
40 | unsigned int buddy_size, void *arg); | ||
41 | |||
42 | void nvmap_heap_destroy(struct nvmap_heap *heap); | ||
43 | |||
44 | void *nvmap_heap_device_to_arg(struct device *dev); | ||
45 | |||
46 | void *nvmap_heap_to_arg(struct nvmap_heap *heap); | ||
47 | |||
48 | struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap, | ||
49 | struct nvmap_handle *handle); | ||
50 | |||
51 | struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b); | ||
52 | |||
53 | void nvmap_heap_free(struct nvmap_heap_block *block); | ||
54 | |||
55 | int nvmap_heap_create_group(struct nvmap_heap *heap, | ||
56 | const struct attribute_group *grp); | ||
57 | |||
58 | void nvmap_heap_remove_group(struct nvmap_heap *heap, | ||
59 | const struct attribute_group *grp); | ||
60 | |||
61 | int __init nvmap_heap_init(void); | ||
62 | |||
63 | void nvmap_heap_deinit(void); | ||
64 | |||
65 | int nvmap_flush_heap_block(struct nvmap_client *client, | ||
66 | struct nvmap_heap_block *block, size_t len, unsigned int prot); | ||
67 | |||
68 | #endif | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c new file mode 100644 index 00000000000..58bc71d5046 --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c | |||
@@ -0,0 +1,749 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_ioctl.c | ||
3 | * | ||
4 | * User-space interface to nvmap | ||
5 | * | ||
6 | * Copyright (c) 2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | |||
29 | #include <asm/cacheflush.h> | ||
30 | #include <asm/outercache.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | |||
33 | #include <mach/iovmm.h> | ||
34 | #include <mach/nvmap.h> | ||
35 | |||
36 | #include "nvmap_ioctl.h" | ||
37 | #include "nvmap.h" | ||
38 | #include "nvmap_common.h" | ||
39 | |||
40 | static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h, | ||
41 | int is_read, unsigned long h_offs, | ||
42 | unsigned long sys_addr, unsigned long h_stride, | ||
43 | unsigned long sys_stride, unsigned long elem_size, | ||
44 | unsigned long count); | ||
45 | |||
46 | static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h, | ||
47 | unsigned long start, unsigned long end, unsigned int op); | ||
48 | |||
49 | |||
50 | int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg) | ||
51 | { | ||
52 | struct nvmap_pin_handle op; | ||
53 | struct nvmap_handle *h; | ||
54 | unsigned long on_stack[16]; | ||
55 | unsigned long *refs; | ||
56 | unsigned long __user *output; | ||
57 | unsigned int i; | ||
58 | int err = 0; | ||
59 | |||
60 | if (copy_from_user(&op, arg, sizeof(op))) | ||
61 | return -EFAULT; | ||
62 | |||
63 | if (!op.count) | ||
64 | return -EINVAL; | ||
65 | |||
66 | if (op.count > 1) { | ||
67 | size_t bytes = op.count * sizeof(unsigned long *); | ||
68 | |||
69 | if (op.count > ARRAY_SIZE(on_stack)) | ||
70 | refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL); | ||
71 | else | ||
72 | refs = on_stack; | ||
73 | |||
74 | if (!refs) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | if (copy_from_user(refs, (void *)op.handles, bytes)) { | ||
78 | err = -EFAULT; | ||
79 | goto out; | ||
80 | } | ||
81 | } else { | ||
82 | refs = on_stack; | ||
83 | on_stack[0] = (unsigned long)op.handles; | ||
84 | } | ||
85 | |||
86 | if (is_pin) | ||
87 | err = nvmap_pin_ids(filp->private_data, op.count, refs); | ||
88 | else | ||
89 | nvmap_unpin_ids(filp->private_data, op.count, refs); | ||
90 | |||
91 | /* skip the output stage on unpin */ | ||
92 | if (err || !is_pin) | ||
93 | goto out; | ||
94 | |||
95 | /* it is guaranteed that if nvmap_pin_ids returns 0 that | ||
96 | * all of the handle_ref objects are valid, so dereferencing | ||
97 | * directly here is safe */ | ||
98 | if (op.count > 1) | ||
99 | output = (unsigned long __user *)op.addr; | ||
100 | else { | ||
101 | struct nvmap_pin_handle __user *tmp = arg; | ||
102 | output = (unsigned long __user *)&(tmp->addr); | ||
103 | } | ||
104 | |||
105 | if (!output) | ||
106 | goto out; | ||
107 | |||
108 | for (i = 0; i < op.count && !err; i++) { | ||
109 | unsigned long addr; | ||
110 | |||
111 | h = (struct nvmap_handle *)refs[i]; | ||
112 | |||
113 | if (h->heap_pgalloc && h->pgalloc.contig) | ||
114 | addr = page_to_phys(h->pgalloc.pages[0]); | ||
115 | else if (h->heap_pgalloc) | ||
116 | addr = h->pgalloc.area->iovm_start; | ||
117 | else | ||
118 | addr = h->carveout->base; | ||
119 | |||
120 | err = put_user(addr, &output[i]); | ||
121 | } | ||
122 | |||
123 | if (err) | ||
124 | nvmap_unpin_ids(filp->private_data, op.count, refs); | ||
125 | |||
126 | out: | ||
127 | if (refs != on_stack) | ||
128 | kfree(refs); | ||
129 | |||
130 | return err; | ||
131 | } | ||
132 | |||
133 | int nvmap_ioctl_getid(struct file *filp, void __user *arg) | ||
134 | { | ||
135 | struct nvmap_client *client = filp->private_data; | ||
136 | struct nvmap_create_handle op; | ||
137 | struct nvmap_handle *h = NULL; | ||
138 | |||
139 | if (copy_from_user(&op, arg, sizeof(op))) | ||
140 | return -EFAULT; | ||
141 | |||
142 | if (!op.handle) | ||
143 | return -EINVAL; | ||
144 | |||
145 | h = nvmap_get_handle_id(client, op.handle); | ||
146 | |||
147 | if (!h) | ||
148 | return -EPERM; | ||
149 | |||
150 | op.id = (__u32)h; | ||
151 | if (client == h->owner) | ||
152 | h->global = true; | ||
153 | |||
154 | nvmap_handle_put(h); | ||
155 | |||
156 | return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0; | ||
157 | } | ||
158 | |||
159 | int nvmap_ioctl_alloc(struct file *filp, void __user *arg) | ||
160 | { | ||
161 | struct nvmap_alloc_handle op; | ||
162 | struct nvmap_client *client = filp->private_data; | ||
163 | |||
164 | if (copy_from_user(&op, arg, sizeof(op))) | ||
165 | return -EFAULT; | ||
166 | |||
167 | if (!op.handle) | ||
168 | return -EINVAL; | ||
169 | |||
170 | if (op.align & (op.align - 1)) | ||
171 | return -EINVAL; | ||
172 | |||
173 | /* user-space handles are aligned to page boundaries, to prevent | ||
174 | * data leakage. */ | ||
175 | op.align = max_t(size_t, op.align, PAGE_SIZE); | ||
176 | |||
177 | return nvmap_alloc_handle_id(client, op.handle, op.heap_mask, | ||
178 | op.align, op.flags); | ||
179 | } | ||
180 | |||
181 | int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg) | ||
182 | { | ||
183 | struct nvmap_create_handle op; | ||
184 | struct nvmap_handle_ref *ref = NULL; | ||
185 | struct nvmap_client *client = filp->private_data; | ||
186 | int err = 0; | ||
187 | |||
188 | if (copy_from_user(&op, arg, sizeof(op))) | ||
189 | return -EFAULT; | ||
190 | |||
191 | if (!client) | ||
192 | return -ENODEV; | ||
193 | |||
194 | if (cmd == NVMAP_IOC_CREATE) { | ||
195 | ref = nvmap_create_handle(client, PAGE_ALIGN(op.size)); | ||
196 | if (!IS_ERR(ref)) | ||
197 | ref->handle->orig_size = op.size; | ||
198 | } else if (cmd == NVMAP_IOC_FROM_ID) { | ||
199 | ref = nvmap_duplicate_handle_id(client, op.id); | ||
200 | } else { | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | if (IS_ERR(ref)) | ||
205 | return PTR_ERR(ref); | ||
206 | |||
207 | op.handle = nvmap_ref_to_id(ref); | ||
208 | if (copy_to_user(arg, &op, sizeof(op))) { | ||
209 | err = -EFAULT; | ||
210 | nvmap_free_handle_id(client, op.handle); | ||
211 | } | ||
212 | |||
213 | return err; | ||
214 | } | ||
215 | |||
216 | int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg) | ||
217 | { | ||
218 | struct nvmap_client *client = filp->private_data; | ||
219 | struct nvmap_map_caller op; | ||
220 | struct nvmap_vma_priv *vpriv; | ||
221 | struct vm_area_struct *vma; | ||
222 | struct nvmap_handle *h = NULL; | ||
223 | unsigned int cache_flags; | ||
224 | int err = 0; | ||
225 | |||
226 | if (copy_from_user(&op, arg, sizeof(op))) | ||
227 | return -EFAULT; | ||
228 | |||
229 | if (!op.handle) | ||
230 | return -EINVAL; | ||
231 | |||
232 | h = nvmap_get_handle_id(client, op.handle); | ||
233 | |||
234 | if (!h) | ||
235 | return -EPERM; | ||
236 | |||
237 | down_read(¤t->mm->mmap_sem); | ||
238 | |||
239 | vma = find_vma(current->mm, op.addr); | ||
240 | if (!vma || !vma->vm_private_data) { | ||
241 | err = -ENOMEM; | ||
242 | goto out; | ||
243 | } | ||
244 | |||
245 | if (op.offset & ~PAGE_MASK) { | ||
246 | err = -EFAULT; | ||
247 | goto out; | ||
248 | } | ||
249 | |||
250 | if ((op.offset + op.length) > h->size) { | ||
251 | err = -EADDRNOTAVAIL; | ||
252 | goto out; | ||
253 | } | ||
254 | |||
255 | vpriv = vma->vm_private_data; | ||
256 | BUG_ON(!vpriv); | ||
257 | |||
258 | /* the VMA must exactly match the requested mapping operation, and the | ||
259 | * VMA that is targetted must have been created by this driver | ||
260 | */ | ||
261 | if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) || | ||
262 | (vma->vm_end-vma->vm_start != op.length)) { | ||
263 | err = -EPERM; | ||
264 | goto out; | ||
265 | } | ||
266 | |||
267 | /* verify that each mmap() system call creates a unique VMA */ | ||
268 | |||
269 | if (vpriv->handle && (h == vpriv->handle)) { | ||
270 | goto out; | ||
271 | } else if (vpriv->handle) { | ||
272 | err = -EADDRNOTAVAIL; | ||
273 | goto out; | ||
274 | } | ||
275 | |||
276 | nvmap_usecount_inc(h); | ||
277 | |||
278 | if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) { | ||
279 | nvmap_usecount_dec(h); | ||
280 | err = -EFAULT; | ||
281 | goto out; | ||
282 | } | ||
283 | |||
284 | vpriv->handle = h; | ||
285 | vpriv->offs = op.offset; | ||
286 | |||
287 | cache_flags = op.flags & NVMAP_HANDLE_CACHE_FLAG; | ||
288 | if ((cache_flags == NVMAP_HANDLE_INNER_CACHEABLE || | ||
289 | cache_flags == NVMAP_HANDLE_CACHEABLE) && | ||
290 | (h->flags == NVMAP_HANDLE_UNCACHEABLE || | ||
291 | h->flags == NVMAP_HANDLE_WRITE_COMBINE)) { | ||
292 | if (h->size & ~PAGE_MASK) { | ||
293 | pr_err("\n%s:attempt to convert a buffer from uc/wc to" | ||
294 | " wb, whose size is not a multiple of page size." | ||
295 | " request ignored.\n", __func__); | ||
296 | } else { | ||
297 | unsigned int nr_page = h->size >> PAGE_SHIFT; | ||
298 | wmb(); | ||
299 | /* override allocation time cache coherency attributes. */ | ||
300 | h->flags &= ~NVMAP_HANDLE_CACHE_FLAG; | ||
301 | h->flags |= cache_flags; | ||
302 | |||
303 | /* Update page attributes, if the memory is allocated | ||
304 | * from system heap pages. | ||
305 | */ | ||
306 | if (cache_flags == NVMAP_HANDLE_INNER_CACHEABLE && | ||
307 | h->heap_pgalloc) | ||
308 | set_pages_array_iwb(h->pgalloc.pages, nr_page); | ||
309 | else if (h->heap_pgalloc) | ||
310 | set_pages_array_wb(h->pgalloc.pages, nr_page); | ||
311 | } | ||
312 | } | ||
313 | vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot); | ||
314 | |||
315 | out: | ||
316 | up_read(¤t->mm->mmap_sem); | ||
317 | |||
318 | if (err) | ||
319 | nvmap_handle_put(h); | ||
320 | return err; | ||
321 | } | ||
322 | |||
323 | int nvmap_ioctl_get_param(struct file *filp, void __user* arg) | ||
324 | { | ||
325 | struct nvmap_handle_param op; | ||
326 | struct nvmap_client *client = filp->private_data; | ||
327 | struct nvmap_handle *h; | ||
328 | int err = 0; | ||
329 | |||
330 | if (copy_from_user(&op, arg, sizeof(op))) | ||
331 | return -EFAULT; | ||
332 | |||
333 | h = nvmap_get_handle_id(client, op.handle); | ||
334 | if (!h) | ||
335 | return -EINVAL; | ||
336 | |||
337 | switch (op.param) { | ||
338 | case NVMAP_HANDLE_PARAM_SIZE: | ||
339 | op.result = h->orig_size; | ||
340 | break; | ||
341 | case NVMAP_HANDLE_PARAM_ALIGNMENT: | ||
342 | mutex_lock(&h->lock); | ||
343 | if (!h->alloc) | ||
344 | op.result = 0; | ||
345 | else if (h->heap_pgalloc) | ||
346 | op.result = PAGE_SIZE; | ||
347 | else if (h->carveout->base) | ||
348 | op.result = (h->carveout->base & -h->carveout->base); | ||
349 | else | ||
350 | op.result = SZ_4M; | ||
351 | mutex_unlock(&h->lock); | ||
352 | break; | ||
353 | case NVMAP_HANDLE_PARAM_BASE: | ||
354 | if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin))) | ||
355 | op.result = -1ul; | ||
356 | else if (!h->heap_pgalloc) { | ||
357 | mutex_lock(&h->lock); | ||
358 | op.result = h->carveout->base; | ||
359 | mutex_unlock(&h->lock); | ||
360 | } else if (h->pgalloc.contig) | ||
361 | op.result = page_to_phys(h->pgalloc.pages[0]); | ||
362 | else if (h->pgalloc.area) | ||
363 | op.result = h->pgalloc.area->iovm_start; | ||
364 | else | ||
365 | op.result = -1ul; | ||
366 | break; | ||
367 | case NVMAP_HANDLE_PARAM_HEAP: | ||
368 | if (!h->alloc) | ||
369 | op.result = 0; | ||
370 | else if (!h->heap_pgalloc) { | ||
371 | mutex_lock(&h->lock); | ||
372 | op.result = nvmap_carveout_usage(client, h->carveout); | ||
373 | mutex_unlock(&h->lock); | ||
374 | } else if (h->pgalloc.contig) | ||
375 | op.result = NVMAP_HEAP_SYSMEM; | ||
376 | else | ||
377 | op.result = NVMAP_HEAP_IOVMM; | ||
378 | break; | ||
379 | default: | ||
380 | err = -EINVAL; | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | if (!err && copy_to_user(arg, &op, sizeof(op))) | ||
385 | err = -EFAULT; | ||
386 | |||
387 | nvmap_handle_put(h); | ||
388 | return err; | ||
389 | } | ||
390 | |||
391 | int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg) | ||
392 | { | ||
393 | struct nvmap_client *client = filp->private_data; | ||
394 | struct nvmap_rw_handle __user *uarg = arg; | ||
395 | struct nvmap_rw_handle op; | ||
396 | struct nvmap_handle *h; | ||
397 | ssize_t copied; | ||
398 | int err = 0; | ||
399 | |||
400 | if (copy_from_user(&op, arg, sizeof(op))) | ||
401 | return -EFAULT; | ||
402 | |||
403 | if (!op.handle || !op.addr || !op.count || !op.elem_size) | ||
404 | return -EINVAL; | ||
405 | |||
406 | h = nvmap_get_handle_id(client, op.handle); | ||
407 | if (!h) | ||
408 | return -EPERM; | ||
409 | |||
410 | nvmap_usecount_inc(h); | ||
411 | |||
412 | copied = rw_handle(client, h, is_read, op.offset, | ||
413 | (unsigned long)op.addr, op.hmem_stride, | ||
414 | op.user_stride, op.elem_size, op.count); | ||
415 | |||
416 | if (copied < 0) { | ||
417 | err = copied; | ||
418 | copied = 0; | ||
419 | } else if (copied < (op.count * op.elem_size)) | ||
420 | err = -EINTR; | ||
421 | |||
422 | __put_user(copied, &uarg->count); | ||
423 | |||
424 | nvmap_usecount_dec(h); | ||
425 | |||
426 | nvmap_handle_put(h); | ||
427 | |||
428 | return err; | ||
429 | } | ||
430 | |||
431 | int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg) | ||
432 | { | ||
433 | struct nvmap_client *client = filp->private_data; | ||
434 | struct nvmap_cache_op op; | ||
435 | struct vm_area_struct *vma; | ||
436 | struct nvmap_vma_priv *vpriv; | ||
437 | unsigned long start; | ||
438 | unsigned long end; | ||
439 | int err = 0; | ||
440 | |||
441 | if (copy_from_user(&op, arg, sizeof(op))) | ||
442 | return -EFAULT; | ||
443 | |||
444 | if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB || | ||
445 | op.op > NVMAP_CACHE_OP_WB_INV) | ||
446 | return -EINVAL; | ||
447 | |||
448 | down_read(¤t->mm->mmap_sem); | ||
449 | |||
450 | vma = find_vma(current->active_mm, (unsigned long)op.addr); | ||
451 | if (!vma || !is_nvmap_vma(vma) || | ||
452 | (unsigned long)op.addr + op.len > vma->vm_end) { | ||
453 | err = -EADDRNOTAVAIL; | ||
454 | goto out; | ||
455 | } | ||
456 | |||
457 | vpriv = (struct nvmap_vma_priv *)vma->vm_private_data; | ||
458 | |||
459 | if ((unsigned long)vpriv->handle != op.handle) { | ||
460 | err = -EFAULT; | ||
461 | goto out; | ||
462 | } | ||
463 | |||
464 | start = (unsigned long)op.addr - vma->vm_start; | ||
465 | end = start + op.len; | ||
466 | |||
467 | err = cache_maint(client, vpriv->handle, start, end, op.op); | ||
468 | out: | ||
469 | up_read(¤t->mm->mmap_sem); | ||
470 | return err; | ||
471 | } | ||
472 | |||
473 | int nvmap_ioctl_free(struct file *filp, unsigned long arg) | ||
474 | { | ||
475 | struct nvmap_client *client = filp->private_data; | ||
476 | |||
477 | if (!arg) | ||
478 | return 0; | ||
479 | |||
480 | nvmap_free_handle_id(client, arg); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | static void inner_cache_maint(unsigned int op, void *vaddr, size_t size) | ||
485 | { | ||
486 | if (op == NVMAP_CACHE_OP_WB_INV) | ||
487 | dmac_flush_range(vaddr, vaddr + size); | ||
488 | else if (op == NVMAP_CACHE_OP_INV) | ||
489 | dmac_map_area(vaddr, size, DMA_FROM_DEVICE); | ||
490 | else | ||
491 | dmac_map_area(vaddr, size, DMA_TO_DEVICE); | ||
492 | } | ||
493 | |||
494 | static void outer_cache_maint(unsigned int op, unsigned long paddr, size_t size) | ||
495 | { | ||
496 | if (op == NVMAP_CACHE_OP_WB_INV) | ||
497 | outer_flush_range(paddr, paddr + size); | ||
498 | else if (op == NVMAP_CACHE_OP_INV) | ||
499 | outer_inv_range(paddr, paddr + size); | ||
500 | else | ||
501 | outer_clean_range(paddr, paddr + size); | ||
502 | } | ||
503 | |||
504 | static void heap_page_cache_maint(struct nvmap_client *client, | ||
505 | struct nvmap_handle *h, unsigned long start, unsigned long end, | ||
506 | unsigned int op, bool inner, bool outer, pte_t **pte, | ||
507 | unsigned long kaddr, pgprot_t prot) | ||
508 | { | ||
509 | struct page *page; | ||
510 | unsigned long paddr; | ||
511 | unsigned long next; | ||
512 | unsigned long off; | ||
513 | size_t size; | ||
514 | |||
515 | while (start < end) { | ||
516 | page = h->pgalloc.pages[start >> PAGE_SHIFT]; | ||
517 | next = min(((start + PAGE_SIZE) & PAGE_MASK), end); | ||
518 | off = start & ~PAGE_MASK; | ||
519 | size = next - start; | ||
520 | paddr = page_to_phys(page) + off; | ||
521 | |||
522 | if (inner) { | ||
523 | void *vaddr = (void *)kaddr + off; | ||
524 | BUG_ON(!pte); | ||
525 | BUG_ON(!kaddr); | ||
526 | set_pte_at(&init_mm, kaddr, *pte, | ||
527 | pfn_pte(__phys_to_pfn(paddr), prot)); | ||
528 | flush_tlb_kernel_page(kaddr); | ||
529 | inner_cache_maint(op, vaddr, size); | ||
530 | } | ||
531 | |||
532 | if (outer) | ||
533 | outer_cache_maint(op, paddr, size); | ||
534 | start = next; | ||
535 | } | ||
536 | } | ||
537 | |||
538 | static bool fast_cache_maint(struct nvmap_client *client, struct nvmap_handle *h, | ||
539 | unsigned long start, unsigned long end, unsigned int op) | ||
540 | { | ||
541 | int ret = false; | ||
542 | |||
543 | if ((op == NVMAP_CACHE_OP_INV) || | ||
544 | ((end - start) < FLUSH_CLEAN_BY_SET_WAY_THRESHOLD)) | ||
545 | goto out; | ||
546 | |||
547 | if (op == NVMAP_CACHE_OP_WB_INV) | ||
548 | inner_flush_cache_all(); | ||
549 | else if (op == NVMAP_CACHE_OP_WB) | ||
550 | inner_clean_cache_all(); | ||
551 | |||
552 | if (h->heap_pgalloc && (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)) { | ||
553 | heap_page_cache_maint(client, h, start, end, op, | ||
554 | false, true, NULL, 0, 0); | ||
555 | } else if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) { | ||
556 | start += h->carveout->base; | ||
557 | end += h->carveout->base; | ||
558 | outer_cache_maint(op, start, end - start); | ||
559 | } | ||
560 | ret = true; | ||
561 | out: | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h, | ||
566 | unsigned long start, unsigned long end, unsigned int op) | ||
567 | { | ||
568 | pgprot_t prot; | ||
569 | pte_t **pte = NULL; | ||
570 | unsigned long kaddr; | ||
571 | unsigned long loop; | ||
572 | int err = 0; | ||
573 | |||
574 | h = nvmap_handle_get(h); | ||
575 | if (!h) | ||
576 | return -EFAULT; | ||
577 | |||
578 | if (!h->alloc) { | ||
579 | err = -EFAULT; | ||
580 | goto out; | ||
581 | } | ||
582 | |||
583 | wmb(); | ||
584 | if (h->flags == NVMAP_HANDLE_UNCACHEABLE || | ||
585 | h->flags == NVMAP_HANDLE_WRITE_COMBINE || start == end) | ||
586 | goto out; | ||
587 | |||
588 | if (fast_cache_maint(client, h, start, end, op)) | ||
589 | goto out; | ||
590 | |||
591 | prot = nvmap_pgprot(h, pgprot_kernel); | ||
592 | pte = nvmap_alloc_pte(client->dev, (void **)&kaddr); | ||
593 | if (IS_ERR(pte)) { | ||
594 | err = PTR_ERR(pte); | ||
595 | pte = NULL; | ||
596 | goto out; | ||
597 | } | ||
598 | |||
599 | if (h->heap_pgalloc) { | ||
600 | heap_page_cache_maint(client, h, start, end, op, true, | ||
601 | (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ? false : true, | ||
602 | pte, kaddr, prot); | ||
603 | goto out; | ||
604 | } | ||
605 | |||
606 | if (start > h->size || end > h->size) { | ||
607 | nvmap_warn(client, "cache maintenance outside handle\n"); | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | |||
611 | /* lock carveout from relocation by mapcount */ | ||
612 | nvmap_usecount_inc(h); | ||
613 | |||
614 | start += h->carveout->base; | ||
615 | end += h->carveout->base; | ||
616 | |||
617 | loop = start; | ||
618 | |||
619 | while (loop < end) { | ||
620 | unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK; | ||
621 | void *base = (void *)kaddr + (loop & ~PAGE_MASK); | ||
622 | next = min(next, end); | ||
623 | |||
624 | set_pte_at(&init_mm, kaddr, *pte, | ||
625 | pfn_pte(__phys_to_pfn(loop), prot)); | ||
626 | flush_tlb_kernel_page(kaddr); | ||
627 | |||
628 | inner_cache_maint(op, base, next - loop); | ||
629 | loop = next; | ||
630 | } | ||
631 | |||
632 | if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) | ||
633 | outer_cache_maint(op, start, end - start); | ||
634 | |||
635 | /* unlock carveout */ | ||
636 | nvmap_usecount_dec(h); | ||
637 | |||
638 | out: | ||
639 | if (pte) | ||
640 | nvmap_free_pte(client->dev, pte); | ||
641 | nvmap_handle_put(h); | ||
642 | return err; | ||
643 | } | ||
644 | |||
645 | static int rw_handle_page(struct nvmap_handle *h, int is_read, | ||
646 | phys_addr_t start, unsigned long rw_addr, | ||
647 | unsigned long bytes, unsigned long kaddr, pte_t *pte) | ||
648 | { | ||
649 | pgprot_t prot = nvmap_pgprot(h, pgprot_kernel); | ||
650 | unsigned long end = start + bytes; | ||
651 | int err = 0; | ||
652 | |||
653 | while (!err && start < end) { | ||
654 | struct page *page = NULL; | ||
655 | phys_addr_t phys; | ||
656 | size_t count; | ||
657 | void *src; | ||
658 | |||
659 | if (!h->heap_pgalloc) { | ||
660 | phys = h->carveout->base + start; | ||
661 | } else { | ||
662 | page = h->pgalloc.pages[start >> PAGE_SHIFT]; | ||
663 | BUG_ON(!page); | ||
664 | get_page(page); | ||
665 | phys = page_to_phys(page) + (start & ~PAGE_MASK); | ||
666 | } | ||
667 | |||
668 | set_pte_at(&init_mm, kaddr, pte, | ||
669 | pfn_pte(__phys_to_pfn(phys), prot)); | ||
670 | flush_tlb_kernel_page(kaddr); | ||
671 | |||
672 | src = (void *)kaddr + (phys & ~PAGE_MASK); | ||
673 | phys = PAGE_SIZE - (phys & ~PAGE_MASK); | ||
674 | count = min_t(size_t, end - start, phys); | ||
675 | |||
676 | if (is_read) | ||
677 | err = copy_to_user((void *)rw_addr, src, count); | ||
678 | else | ||
679 | err = copy_from_user(src, (void *)rw_addr, count); | ||
680 | |||
681 | if (err) | ||
682 | err = -EFAULT; | ||
683 | |||
684 | rw_addr += count; | ||
685 | start += count; | ||
686 | |||
687 | if (page) | ||
688 | put_page(page); | ||
689 | } | ||
690 | |||
691 | return err; | ||
692 | } | ||
693 | |||
694 | static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h, | ||
695 | int is_read, unsigned long h_offs, | ||
696 | unsigned long sys_addr, unsigned long h_stride, | ||
697 | unsigned long sys_stride, unsigned long elem_size, | ||
698 | unsigned long count) | ||
699 | { | ||
700 | ssize_t copied = 0; | ||
701 | pte_t **pte; | ||
702 | void *addr; | ||
703 | int ret = 0; | ||
704 | |||
705 | if (!elem_size) | ||
706 | return -EINVAL; | ||
707 | |||
708 | if (!h->alloc) | ||
709 | return -EFAULT; | ||
710 | |||
711 | if (elem_size == h_stride && elem_size == sys_stride) { | ||
712 | elem_size *= count; | ||
713 | h_stride = elem_size; | ||
714 | sys_stride = elem_size; | ||
715 | count = 1; | ||
716 | } | ||
717 | |||
718 | pte = nvmap_alloc_pte(client->dev, &addr); | ||
719 | if (IS_ERR(pte)) | ||
720 | return PTR_ERR(pte); | ||
721 | |||
722 | while (count--) { | ||
723 | if (h_offs + elem_size > h->size) { | ||
724 | nvmap_warn(client, "read/write outside of handle\n"); | ||
725 | ret = -EFAULT; | ||
726 | break; | ||
727 | } | ||
728 | if (is_read) | ||
729 | cache_maint(client, h, h_offs, | ||
730 | h_offs + elem_size, NVMAP_CACHE_OP_INV); | ||
731 | |||
732 | ret = rw_handle_page(h, is_read, h_offs, sys_addr, | ||
733 | elem_size, (unsigned long)addr, *pte); | ||
734 | |||
735 | if (ret) | ||
736 | break; | ||
737 | |||
738 | if (!is_read) | ||
739 | cache_maint(client, h, h_offs, | ||
740 | h_offs + elem_size, NVMAP_CACHE_OP_WB); | ||
741 | |||
742 | copied += elem_size; | ||
743 | sys_addr += sys_stride; | ||
744 | h_offs += h_stride; | ||
745 | } | ||
746 | |||
747 | nvmap_free_pte(client->dev, pte); | ||
748 | return ret ?: copied; | ||
749 | } | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.h b/drivers/video/tegra/nvmap/nvmap_ioctl.h new file mode 100644 index 00000000000..c802cd4dd7a --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_ioctl.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_ioctl.h | ||
3 | * | ||
4 | * ioctl declarations for nvmap | ||
5 | * | ||
6 | * Copyright (c) 2010, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #ifndef __VIDEO_TEGRA_NVMAP_IOCTL_H | ||
24 | #define __VIDEO_TEGRA_NVMAP_IOCTL_H | ||
25 | |||
26 | #include <linux/ioctl.h> | ||
27 | #include <linux/file.h> | ||
28 | |||
29 | #include <mach/nvmap.h> | ||
30 | |||
31 | enum { | ||
32 | NVMAP_HANDLE_PARAM_SIZE = 1, | ||
33 | NVMAP_HANDLE_PARAM_ALIGNMENT, | ||
34 | NVMAP_HANDLE_PARAM_BASE, | ||
35 | NVMAP_HANDLE_PARAM_HEAP, | ||
36 | }; | ||
37 | |||
38 | enum { | ||
39 | NVMAP_CACHE_OP_WB = 0, | ||
40 | NVMAP_CACHE_OP_INV, | ||
41 | NVMAP_CACHE_OP_WB_INV, | ||
42 | }; | ||
43 | |||
44 | |||
45 | struct nvmap_create_handle { | ||
46 | union { | ||
47 | __u32 key; /* ClaimPreservedHandle */ | ||
48 | __u32 id; /* FromId */ | ||
49 | __u32 size; /* CreateHandle */ | ||
50 | }; | ||
51 | __u32 handle; | ||
52 | }; | ||
53 | |||
54 | struct nvmap_alloc_handle { | ||
55 | __u32 handle; | ||
56 | __u32 heap_mask; | ||
57 | __u32 flags; | ||
58 | __u32 align; | ||
59 | }; | ||
60 | |||
61 | struct nvmap_map_caller { | ||
62 | __u32 handle; /* hmem */ | ||
63 | __u32 offset; /* offset into hmem; should be page-aligned */ | ||
64 | __u32 length; /* number of bytes to map */ | ||
65 | __u32 flags; | ||
66 | unsigned long addr; /* user pointer */ | ||
67 | }; | ||
68 | |||
69 | struct nvmap_rw_handle { | ||
70 | unsigned long addr; /* user pointer */ | ||
71 | __u32 handle; /* hmem */ | ||
72 | __u32 offset; /* offset into hmem */ | ||
73 | __u32 elem_size; /* individual atom size */ | ||
74 | __u32 hmem_stride; /* delta in bytes between atoms in hmem */ | ||
75 | __u32 user_stride; /* delta in bytes between atoms in user */ | ||
76 | __u32 count; /* number of atoms to copy */ | ||
77 | }; | ||
78 | |||
79 | struct nvmap_pin_handle { | ||
80 | unsigned long handles; /* array of handles to pin/unpin */ | ||
81 | unsigned long addr; /* array of addresses to return */ | ||
82 | __u32 count; /* number of entries in handles */ | ||
83 | }; | ||
84 | |||
85 | struct nvmap_handle_param { | ||
86 | __u32 handle; | ||
87 | __u32 param; | ||
88 | unsigned long result; | ||
89 | }; | ||
90 | |||
91 | struct nvmap_cache_op { | ||
92 | unsigned long addr; | ||
93 | __u32 handle; | ||
94 | __u32 len; | ||
95 | __s32 op; | ||
96 | }; | ||
97 | |||
98 | #define NVMAP_IOC_MAGIC 'N' | ||
99 | |||
100 | /* Creates a new memory handle. On input, the argument is the size of the new | ||
101 | * handle; on return, the argument is the name of the new handle | ||
102 | */ | ||
103 | #define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle) | ||
104 | #define NVMAP_IOC_CLAIM _IOWR(NVMAP_IOC_MAGIC, 1, struct nvmap_create_handle) | ||
105 | #define NVMAP_IOC_FROM_ID _IOWR(NVMAP_IOC_MAGIC, 2, struct nvmap_create_handle) | ||
106 | |||
107 | /* Actually allocates memory for the specified handle */ | ||
108 | #define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle) | ||
109 | |||
110 | /* Frees a memory handle, unpinning any pinned pages and unmapping any mappings | ||
111 | */ | ||
112 | #define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4) | ||
113 | |||
114 | /* Maps the region of the specified handle into a user-provided virtual address | ||
115 | * that was previously created via an mmap syscall on this fd */ | ||
116 | #define NVMAP_IOC_MMAP _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller) | ||
117 | |||
118 | /* Reads/writes data (possibly strided) from a user-provided buffer into the | ||
119 | * hmem at the specified offset */ | ||
120 | #define NVMAP_IOC_WRITE _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle) | ||
121 | #define NVMAP_IOC_READ _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle) | ||
122 | |||
123 | #define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param) | ||
124 | |||
125 | /* Pins a list of memory handles into IO-addressable memory (either IOVMM | ||
126 | * space or physical memory, depending on the allocation), and returns the | ||
127 | * address. Handles may be pinned recursively. */ | ||
128 | #define NVMAP_IOC_PIN_MULT _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle) | ||
129 | #define NVMAP_IOC_UNPIN_MULT _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle) | ||
130 | |||
131 | #define NVMAP_IOC_CACHE _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op) | ||
132 | |||
133 | /* Returns a global ID usable to allow a remote process to create a handle | ||
134 | * reference to the same handle */ | ||
135 | #define NVMAP_IOC_GET_ID _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle) | ||
136 | |||
137 | #define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_GET_ID)) | ||
138 | |||
139 | int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg); | ||
140 | |||
141 | int nvmap_ioctl_get_param(struct file *filp, void __user* arg); | ||
142 | |||
143 | int nvmap_ioctl_getid(struct file *filp, void __user *arg); | ||
144 | |||
145 | int nvmap_ioctl_alloc(struct file *filp, void __user *arg); | ||
146 | |||
147 | int nvmap_ioctl_free(struct file *filp, unsigned long arg); | ||
148 | |||
149 | int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg); | ||
150 | |||
151 | int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg); | ||
152 | |||
153 | int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg); | ||
154 | |||
155 | int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg); | ||
156 | |||
157 | |||
158 | |||
159 | #endif | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.c b/drivers/video/tegra/nvmap/nvmap_mru.c new file mode 100644 index 00000000000..f54d44923eb --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_mru.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_mru.c | ||
3 | * | ||
4 | * IOVMM virtualization support for nvmap | ||
5 | * | ||
6 | * Copyright (c) 2009-2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/list.h> | ||
24 | #include <linux/slab.h> | ||
25 | |||
26 | #include <asm/pgtable.h> | ||
27 | |||
28 | #include <mach/iovmm.h> | ||
29 | |||
30 | #include "nvmap.h" | ||
31 | #include "nvmap_mru.h" | ||
32 | |||
33 | /* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM), | ||
34 | * unpinned handles are placed onto a most-recently-used eviction list; | ||
35 | * multiple lists are maintained, segmented by size (sizes were chosen to | ||
36 | * roughly correspond with common sizes for graphics surfaces). | ||
37 | * | ||
38 | * if a handle is located on the MRU list, then the code below may | ||
39 | * steal its IOVMM area at any time to satisfy a pin operation if no | ||
40 | * free IOVMM space is available | ||
41 | */ | ||
42 | |||
43 | static const size_t mru_cutoff[] = { | ||
44 | 262144, 393216, 786432, 1048576, 1572864 | ||
45 | }; | ||
46 | |||
47 | static inline struct list_head *mru_list(struct nvmap_share *share, size_t size) | ||
48 | { | ||
49 | unsigned int i; | ||
50 | |||
51 | BUG_ON(!share->mru_lists); | ||
52 | for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++) | ||
53 | if (size <= mru_cutoff[i]) | ||
54 | break; | ||
55 | |||
56 | return &share->mru_lists[i]; | ||
57 | } | ||
58 | |||
59 | size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm) | ||
60 | { | ||
61 | size_t vm_size = tegra_iovmm_get_vm_size(iovmm); | ||
62 | return (vm_size >> 2) * 3; | ||
63 | } | ||
64 | |||
65 | /* nvmap_mru_vma_lock should be acquired by the caller before calling this */ | ||
66 | void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h) | ||
67 | { | ||
68 | size_t len = h->pgalloc.area->iovm_length; | ||
69 | list_add(&h->pgalloc.mru_list, mru_list(share, len)); | ||
70 | } | ||
71 | |||
72 | void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h) | ||
73 | { | ||
74 | nvmap_mru_lock(s); | ||
75 | if (!list_empty(&h->pgalloc.mru_list)) | ||
76 | list_del(&h->pgalloc.mru_list); | ||
77 | nvmap_mru_unlock(s); | ||
78 | INIT_LIST_HEAD(&h->pgalloc.mru_list); | ||
79 | } | ||
80 | |||
81 | /* returns a tegra_iovmm_area for a handle. if the handle already has | ||
82 | * an iovmm_area allocated, the handle is simply removed from its MRU list | ||
83 | * and the existing iovmm_area is returned. | ||
84 | * | ||
85 | * if no existing allocation exists, try to allocate a new IOVMM area. | ||
86 | * | ||
87 | * if a new area can not be allocated, try to re-use the most-recently-unpinned | ||
88 | * handle's allocation. | ||
89 | * | ||
90 | * and if that fails, iteratively evict handles from the MRU lists and free | ||
91 | * their allocations, until the new allocation succeeds. | ||
92 | */ | ||
93 | struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c, | ||
94 | struct nvmap_handle *h) | ||
95 | { | ||
96 | struct list_head *mru; | ||
97 | struct nvmap_handle *evict = NULL; | ||
98 | struct tegra_iovmm_area *vm = NULL; | ||
99 | unsigned int i, idx; | ||
100 | pgprot_t prot; | ||
101 | |||
102 | BUG_ON(!h || !c || !c->share); | ||
103 | |||
104 | prot = nvmap_pgprot(h, pgprot_kernel); | ||
105 | |||
106 | if (h->pgalloc.area) { | ||
107 | BUG_ON(list_empty(&h->pgalloc.mru_list)); | ||
108 | list_del(&h->pgalloc.mru_list); | ||
109 | INIT_LIST_HEAD(&h->pgalloc.mru_list); | ||
110 | return h->pgalloc.area; | ||
111 | } | ||
112 | |||
113 | vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, | ||
114 | h->size, h->align, prot, | ||
115 | h->pgalloc.iovm_addr); | ||
116 | |||
117 | if (vm) { | ||
118 | INIT_LIST_HEAD(&h->pgalloc.mru_list); | ||
119 | return vm; | ||
120 | } | ||
121 | /* if client is looking for specific iovm address, return from here. */ | ||
122 | if ((vm == NULL) && (h->pgalloc.iovm_addr != 0)) | ||
123 | return NULL; | ||
124 | /* attempt to re-use the most recently unpinned IOVMM area in the | ||
125 | * same size bin as the current handle. If that fails, iteratively | ||
126 | * evict handles (starting from the current bin) until an allocation | ||
127 | * succeeds or no more areas can be evicted */ | ||
128 | mru = mru_list(c->share, h->size); | ||
129 | if (!list_empty(mru)) | ||
130 | evict = list_first_entry(mru, struct nvmap_handle, | ||
131 | pgalloc.mru_list); | ||
132 | |||
133 | if (evict && evict->pgalloc.area->iovm_length >= h->size) { | ||
134 | list_del(&evict->pgalloc.mru_list); | ||
135 | vm = evict->pgalloc.area; | ||
136 | evict->pgalloc.area = NULL; | ||
137 | INIT_LIST_HEAD(&evict->pgalloc.mru_list); | ||
138 | return vm; | ||
139 | } | ||
140 | |||
141 | idx = mru - c->share->mru_lists; | ||
142 | |||
143 | for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) { | ||
144 | if (idx >= c->share->nr_mru) | ||
145 | idx = 0; | ||
146 | mru = &c->share->mru_lists[idx]; | ||
147 | while (!list_empty(mru) && !vm) { | ||
148 | evict = list_first_entry(mru, struct nvmap_handle, | ||
149 | pgalloc.mru_list); | ||
150 | |||
151 | BUG_ON(atomic_read(&evict->pin) != 0); | ||
152 | BUG_ON(!evict->pgalloc.area); | ||
153 | list_del(&evict->pgalloc.mru_list); | ||
154 | INIT_LIST_HEAD(&evict->pgalloc.mru_list); | ||
155 | tegra_iovmm_free_vm(evict->pgalloc.area); | ||
156 | evict->pgalloc.area = NULL; | ||
157 | vm = tegra_iovmm_create_vm(c->share->iovmm, | ||
158 | NULL, h->size, h->align, | ||
159 | prot, h->pgalloc.iovm_addr); | ||
160 | } | ||
161 | } | ||
162 | return vm; | ||
163 | } | ||
164 | |||
165 | int nvmap_mru_init(struct nvmap_share *share) | ||
166 | { | ||
167 | int i; | ||
168 | mutex_init(&share->mru_lock); | ||
169 | share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1; | ||
170 | |||
171 | share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru, | ||
172 | GFP_KERNEL); | ||
173 | |||
174 | if (!share->mru_lists) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | for (i = 0; i < share->nr_mru; i++) | ||
178 | INIT_LIST_HEAD(&share->mru_lists[i]); | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | void nvmap_mru_destroy(struct nvmap_share *share) | ||
184 | { | ||
185 | kfree(share->mru_lists); | ||
186 | share->mru_lists = NULL; | ||
187 | } | ||
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.h b/drivers/video/tegra/nvmap/nvmap_mru.h new file mode 100644 index 00000000000..6c94630bc3e --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_mru.h | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap_mru.c | ||
3 | * | ||
4 | * IOVMM virtualization support for nvmap | ||
5 | * | ||
6 | * Copyright (c) 2009-2010, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | |||
24 | #ifndef __VIDEO_TEGRA_NVMAP_MRU_H | ||
25 | #define __VIDEO_TEGRA_NVMAP_MRU_H | ||
26 | |||
27 | #include <linux/spinlock.h> | ||
28 | |||
29 | #include "nvmap.h" | ||
30 | |||
31 | struct tegra_iovmm_area; | ||
32 | struct tegra_iovmm_client; | ||
33 | |||
34 | #ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM | ||
35 | |||
36 | static inline void nvmap_mru_lock(struct nvmap_share *share) | ||
37 | { | ||
38 | mutex_lock(&share->mru_lock); | ||
39 | } | ||
40 | |||
41 | static inline void nvmap_mru_unlock(struct nvmap_share *share) | ||
42 | { | ||
43 | mutex_unlock(&share->mru_lock); | ||
44 | } | ||
45 | |||
46 | int nvmap_mru_init(struct nvmap_share *share); | ||
47 | |||
48 | void nvmap_mru_destroy(struct nvmap_share *share); | ||
49 | |||
50 | size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm); | ||
51 | |||
52 | void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h); | ||
53 | |||
54 | void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h); | ||
55 | |||
56 | struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c, | ||
57 | struct nvmap_handle *h); | ||
58 | |||
59 | #else | ||
60 | |||
61 | #define nvmap_mru_lock(_s) do { } while (0) | ||
62 | #define nvmap_mru_unlock(_s) do { } while (0) | ||
63 | #define nvmap_mru_init(_s) 0 | ||
64 | #define nvmap_mru_destroy(_s) do { } while (0) | ||
65 | #define nvmap_mru_vm_size(_a) tegra_iovmm_get_vm_size(_a) | ||
66 | |||
67 | static inline void nvmap_mru_insert_locked(struct nvmap_share *share, | ||
68 | struct nvmap_handle *h) | ||
69 | { } | ||
70 | |||
71 | static inline void nvmap_mru_remove(struct nvmap_share *s, | ||
72 | struct nvmap_handle *h) | ||
73 | { } | ||
74 | |||
75 | static inline struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c, | ||
76 | struct nvmap_handle *h) | ||
77 | { | ||
78 | BUG_ON(!h->pgalloc.area); | ||
79 | return h->pgalloc.area; | ||
80 | } | ||
81 | |||
82 | #endif | ||
83 | |||
84 | #endif | ||