diff options
Diffstat (limited to 'drivers/gpu')
134 files changed, 8475 insertions, 1529 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 5b1cb7e73ee8..8e7fa4dbaed8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -192,6 +192,8 @@ source "drivers/gpu/drm/tilcdc/Kconfig" | |||
192 | 192 | ||
193 | source "drivers/gpu/drm/qxl/Kconfig" | 193 | source "drivers/gpu/drm/qxl/Kconfig" |
194 | 194 | ||
195 | source "drivers/gpu/drm/bochs/Kconfig" | ||
196 | |||
195 | source "drivers/gpu/drm/msm/Kconfig" | 197 | source "drivers/gpu/drm/msm/Kconfig" |
196 | 198 | ||
197 | source "drivers/gpu/drm/tegra/Kconfig" | 199 | source "drivers/gpu/drm/tegra/Kconfig" |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index d1a5c7277678..292a79d64146 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -58,6 +58,7 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ | |||
58 | obj-$(CONFIG_DRM_OMAP) += omapdrm/ | 58 | obj-$(CONFIG_DRM_OMAP) += omapdrm/ |
59 | obj-$(CONFIG_DRM_TILCDC) += tilcdc/ | 59 | obj-$(CONFIG_DRM_TILCDC) += tilcdc/ |
60 | obj-$(CONFIG_DRM_QXL) += qxl/ | 60 | obj-$(CONFIG_DRM_QXL) += qxl/ |
61 | obj-$(CONFIG_DRM_BOCHS) += bochs/ | ||
61 | obj-$(CONFIG_DRM_MSM) += msm/ | 62 | obj-$(CONFIG_DRM_MSM) += msm/ |
62 | obj-$(CONFIG_DRM_TEGRA) += tegra/ | 63 | obj-$(CONFIG_DRM_TEGRA) += tegra/ |
63 | obj-y += i2c/ | 64 | obj-y += i2c/ |
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig new file mode 100644 index 000000000000..c8fcf12019f0 --- /dev/null +++ b/drivers/gpu/drm/bochs/Kconfig | |||
@@ -0,0 +1,11 @@ | |||
1 | config DRM_BOCHS | ||
2 | tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" | ||
3 | depends on DRM && PCI | ||
4 | select DRM_KMS_HELPER | ||
5 | select FB_SYS_FILLRECT | ||
6 | select FB_SYS_COPYAREA | ||
7 | select FB_SYS_IMAGEBLIT | ||
8 | select DRM_TTM | ||
9 | help | ||
10 | Choose this option for qemu. | ||
11 | If M is selected the module will be called bochs-drm. | ||
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile new file mode 100644 index 000000000000..844a55614920 --- /dev/null +++ b/drivers/gpu/drm/bochs/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | ccflags-y := -Iinclude/drm | ||
2 | bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o | ||
3 | |||
4 | obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o | ||
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h new file mode 100644 index 000000000000..741965c001a6 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs.h | |||
@@ -0,0 +1,164 @@ | |||
1 | #include <linux/io.h> | ||
2 | #include <linux/fb.h> | ||
3 | |||
4 | #include <drm/drmP.h> | ||
5 | #include <drm/drm_crtc.h> | ||
6 | #include <drm/drm_crtc_helper.h> | ||
7 | #include <drm/drm_fb_helper.h> | ||
8 | |||
9 | #include <ttm/ttm_bo_driver.h> | ||
10 | #include <ttm/ttm_page_alloc.h> | ||
11 | |||
12 | /* ---------------------------------------------------------------------- */ | ||
13 | |||
14 | #define VBE_DISPI_IOPORT_INDEX 0x01CE | ||
15 | #define VBE_DISPI_IOPORT_DATA 0x01CF | ||
16 | |||
17 | #define VBE_DISPI_INDEX_ID 0x0 | ||
18 | #define VBE_DISPI_INDEX_XRES 0x1 | ||
19 | #define VBE_DISPI_INDEX_YRES 0x2 | ||
20 | #define VBE_DISPI_INDEX_BPP 0x3 | ||
21 | #define VBE_DISPI_INDEX_ENABLE 0x4 | ||
22 | #define VBE_DISPI_INDEX_BANK 0x5 | ||
23 | #define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 | ||
24 | #define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 | ||
25 | #define VBE_DISPI_INDEX_X_OFFSET 0x8 | ||
26 | #define VBE_DISPI_INDEX_Y_OFFSET 0x9 | ||
27 | #define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa | ||
28 | |||
29 | #define VBE_DISPI_ID0 0xB0C0 | ||
30 | #define VBE_DISPI_ID1 0xB0C1 | ||
31 | #define VBE_DISPI_ID2 0xB0C2 | ||
32 | #define VBE_DISPI_ID3 0xB0C3 | ||
33 | #define VBE_DISPI_ID4 0xB0C4 | ||
34 | #define VBE_DISPI_ID5 0xB0C5 | ||
35 | |||
36 | #define VBE_DISPI_DISABLED 0x00 | ||
37 | #define VBE_DISPI_ENABLED 0x01 | ||
38 | #define VBE_DISPI_GETCAPS 0x02 | ||
39 | #define VBE_DISPI_8BIT_DAC 0x20 | ||
40 | #define VBE_DISPI_LFB_ENABLED 0x40 | ||
41 | #define VBE_DISPI_NOCLEARMEM 0x80 | ||
42 | |||
43 | /* ---------------------------------------------------------------------- */ | ||
44 | |||
45 | enum bochs_types { | ||
46 | BOCHS_QEMU_STDVGA, | ||
47 | BOCHS_UNKNOWN, | ||
48 | }; | ||
49 | |||
50 | struct bochs_framebuffer { | ||
51 | struct drm_framebuffer base; | ||
52 | struct drm_gem_object *obj; | ||
53 | }; | ||
54 | |||
55 | struct bochs_device { | ||
56 | /* hw */ | ||
57 | void __iomem *mmio; | ||
58 | int ioports; | ||
59 | void __iomem *fb_map; | ||
60 | unsigned long fb_base; | ||
61 | unsigned long fb_size; | ||
62 | |||
63 | /* mode */ | ||
64 | u16 xres; | ||
65 | u16 yres; | ||
66 | u16 yres_virtual; | ||
67 | u32 stride; | ||
68 | u32 bpp; | ||
69 | |||
70 | /* drm */ | ||
71 | struct drm_device *dev; | ||
72 | struct drm_crtc crtc; | ||
73 | struct drm_encoder encoder; | ||
74 | struct drm_connector connector; | ||
75 | bool mode_config_initialized; | ||
76 | |||
77 | /* ttm */ | ||
78 | struct { | ||
79 | struct drm_global_reference mem_global_ref; | ||
80 | struct ttm_bo_global_ref bo_global_ref; | ||
81 | struct ttm_bo_device bdev; | ||
82 | bool initialized; | ||
83 | } ttm; | ||
84 | |||
85 | /* fbdev */ | ||
86 | struct { | ||
87 | struct bochs_framebuffer gfb; | ||
88 | struct drm_fb_helper helper; | ||
89 | int size; | ||
90 | int x1, y1, x2, y2; /* dirty rect */ | ||
91 | spinlock_t dirty_lock; | ||
92 | bool initialized; | ||
93 | } fb; | ||
94 | }; | ||
95 | |||
96 | #define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base) | ||
97 | |||
98 | struct bochs_bo { | ||
99 | struct ttm_buffer_object bo; | ||
100 | struct ttm_placement placement; | ||
101 | struct ttm_bo_kmap_obj kmap; | ||
102 | struct drm_gem_object gem; | ||
103 | u32 placements[3]; | ||
104 | int pin_count; | ||
105 | }; | ||
106 | |||
107 | static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo) | ||
108 | { | ||
109 | return container_of(bo, struct bochs_bo, bo); | ||
110 | } | ||
111 | |||
112 | static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem) | ||
113 | { | ||
114 | return container_of(gem, struct bochs_bo, gem); | ||
115 | } | ||
116 | |||
117 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | ||
118 | |||
119 | static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo) | ||
120 | { | ||
121 | return drm_vma_node_offset_addr(&bo->bo.vma_node); | ||
122 | } | ||
123 | |||
124 | /* ---------------------------------------------------------------------- */ | ||
125 | |||
126 | /* bochs_hw.c */ | ||
127 | int bochs_hw_init(struct drm_device *dev, uint32_t flags); | ||
128 | void bochs_hw_fini(struct drm_device *dev); | ||
129 | |||
130 | void bochs_hw_setmode(struct bochs_device *bochs, | ||
131 | struct drm_display_mode *mode); | ||
132 | void bochs_hw_setbase(struct bochs_device *bochs, | ||
133 | int x, int y, u64 addr); | ||
134 | |||
135 | /* bochs_mm.c */ | ||
136 | int bochs_mm_init(struct bochs_device *bochs); | ||
137 | void bochs_mm_fini(struct bochs_device *bochs); | ||
138 | int bochs_mmap(struct file *filp, struct vm_area_struct *vma); | ||
139 | |||
140 | int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, | ||
141 | struct drm_gem_object **obj); | ||
142 | int bochs_gem_init_object(struct drm_gem_object *obj); | ||
143 | void bochs_gem_free_object(struct drm_gem_object *obj); | ||
144 | int bochs_dumb_create(struct drm_file *file, struct drm_device *dev, | ||
145 | struct drm_mode_create_dumb *args); | ||
146 | int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, | ||
147 | uint32_t handle, uint64_t *offset); | ||
148 | |||
149 | int bochs_framebuffer_init(struct drm_device *dev, | ||
150 | struct bochs_framebuffer *gfb, | ||
151 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
152 | struct drm_gem_object *obj); | ||
153 | int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); | ||
154 | int bochs_bo_unpin(struct bochs_bo *bo); | ||
155 | |||
156 | extern const struct drm_mode_config_funcs bochs_mode_funcs; | ||
157 | |||
158 | /* bochs_kms.c */ | ||
159 | int bochs_kms_init(struct bochs_device *bochs); | ||
160 | void bochs_kms_fini(struct bochs_device *bochs); | ||
161 | |||
162 | /* bochs_fbdev.c */ | ||
163 | int bochs_fbdev_init(struct bochs_device *bochs); | ||
164 | void bochs_fbdev_fini(struct bochs_device *bochs); | ||
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c new file mode 100644 index 000000000000..395bba261c9a --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_drv.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | |||
8 | #include <linux/mm.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/slab.h> | ||
11 | |||
12 | #include "bochs.h" | ||
13 | |||
14 | static bool enable_fbdev = true; | ||
15 | module_param_named(fbdev, enable_fbdev, bool, 0444); | ||
16 | MODULE_PARM_DESC(fbdev, "register fbdev device"); | ||
17 | |||
18 | /* ---------------------------------------------------------------------- */ | ||
19 | /* drm interface */ | ||
20 | |||
21 | static int bochs_unload(struct drm_device *dev) | ||
22 | { | ||
23 | struct bochs_device *bochs = dev->dev_private; | ||
24 | |||
25 | bochs_fbdev_fini(bochs); | ||
26 | bochs_kms_fini(bochs); | ||
27 | bochs_mm_fini(bochs); | ||
28 | bochs_hw_fini(dev); | ||
29 | kfree(bochs); | ||
30 | dev->dev_private = NULL; | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | static int bochs_load(struct drm_device *dev, unsigned long flags) | ||
35 | { | ||
36 | struct bochs_device *bochs; | ||
37 | int ret; | ||
38 | |||
39 | bochs = kzalloc(sizeof(*bochs), GFP_KERNEL); | ||
40 | if (bochs == NULL) | ||
41 | return -ENOMEM; | ||
42 | dev->dev_private = bochs; | ||
43 | bochs->dev = dev; | ||
44 | |||
45 | ret = bochs_hw_init(dev, flags); | ||
46 | if (ret) | ||
47 | goto err; | ||
48 | |||
49 | ret = bochs_mm_init(bochs); | ||
50 | if (ret) | ||
51 | goto err; | ||
52 | |||
53 | ret = bochs_kms_init(bochs); | ||
54 | if (ret) | ||
55 | goto err; | ||
56 | |||
57 | if (enable_fbdev) | ||
58 | bochs_fbdev_init(bochs); | ||
59 | |||
60 | return 0; | ||
61 | |||
62 | err: | ||
63 | bochs_unload(dev); | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | static const struct file_operations bochs_fops = { | ||
68 | .owner = THIS_MODULE, | ||
69 | .open = drm_open, | ||
70 | .release = drm_release, | ||
71 | .unlocked_ioctl = drm_ioctl, | ||
72 | #ifdef CONFIG_COMPAT | ||
73 | .compat_ioctl = drm_compat_ioctl, | ||
74 | #endif | ||
75 | .poll = drm_poll, | ||
76 | .read = drm_read, | ||
77 | .llseek = no_llseek, | ||
78 | .mmap = bochs_mmap, | ||
79 | }; | ||
80 | |||
81 | static struct drm_driver bochs_driver = { | ||
82 | .driver_features = DRIVER_GEM | DRIVER_MODESET, | ||
83 | .load = bochs_load, | ||
84 | .unload = bochs_unload, | ||
85 | .fops = &bochs_fops, | ||
86 | .name = "bochs-drm", | ||
87 | .desc = "bochs dispi vga interface (qemu stdvga)", | ||
88 | .date = "20130925", | ||
89 | .major = 1, | ||
90 | .minor = 0, | ||
91 | .gem_free_object = bochs_gem_free_object, | ||
92 | .dumb_create = bochs_dumb_create, | ||
93 | .dumb_map_offset = bochs_dumb_mmap_offset, | ||
94 | .dumb_destroy = drm_gem_dumb_destroy, | ||
95 | }; | ||
96 | |||
97 | /* ---------------------------------------------------------------------- */ | ||
98 | /* pci interface */ | ||
99 | |||
100 | static int bochs_kick_out_firmware_fb(struct pci_dev *pdev) | ||
101 | { | ||
102 | struct apertures_struct *ap; | ||
103 | |||
104 | ap = alloc_apertures(1); | ||
105 | if (!ap) | ||
106 | return -ENOMEM; | ||
107 | |||
108 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
109 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
110 | remove_conflicting_framebuffers(ap, "bochsdrmfb", false); | ||
111 | kfree(ap); | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int bochs_pci_probe(struct pci_dev *pdev, | ||
117 | const struct pci_device_id *ent) | ||
118 | { | ||
119 | int ret; | ||
120 | |||
121 | ret = bochs_kick_out_firmware_fb(pdev); | ||
122 | if (ret) | ||
123 | return ret; | ||
124 | |||
125 | return drm_get_pci_dev(pdev, ent, &bochs_driver); | ||
126 | } | ||
127 | |||
128 | static void bochs_pci_remove(struct pci_dev *pdev) | ||
129 | { | ||
130 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
131 | |||
132 | drm_put_dev(dev); | ||
133 | } | ||
134 | |||
135 | static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = { | ||
136 | { | ||
137 | .vendor = 0x1234, | ||
138 | .device = 0x1111, | ||
139 | .subvendor = 0x1af4, | ||
140 | .subdevice = 0x1100, | ||
141 | .driver_data = BOCHS_QEMU_STDVGA, | ||
142 | }, | ||
143 | { | ||
144 | .vendor = 0x1234, | ||
145 | .device = 0x1111, | ||
146 | .subvendor = PCI_ANY_ID, | ||
147 | .subdevice = PCI_ANY_ID, | ||
148 | .driver_data = BOCHS_UNKNOWN, | ||
149 | }, | ||
150 | { /* end of list */ } | ||
151 | }; | ||
152 | |||
153 | static struct pci_driver bochs_pci_driver = { | ||
154 | .name = "bochs-drm", | ||
155 | .id_table = bochs_pci_tbl, | ||
156 | .probe = bochs_pci_probe, | ||
157 | .remove = bochs_pci_remove, | ||
158 | }; | ||
159 | |||
160 | /* ---------------------------------------------------------------------- */ | ||
161 | /* module init/exit */ | ||
162 | |||
163 | static int __init bochs_init(void) | ||
164 | { | ||
165 | return drm_pci_init(&bochs_driver, &bochs_pci_driver); | ||
166 | } | ||
167 | |||
168 | static void __exit bochs_exit(void) | ||
169 | { | ||
170 | drm_pci_exit(&bochs_driver, &bochs_pci_driver); | ||
171 | } | ||
172 | |||
173 | module_init(bochs_init); | ||
174 | module_exit(bochs_exit); | ||
175 | |||
176 | MODULE_DEVICE_TABLE(pci, bochs_pci_tbl); | ||
177 | MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); | ||
178 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c new file mode 100644 index 000000000000..4da5206b7cc9 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | |||
8 | #include "bochs.h" | ||
9 | |||
10 | /* ---------------------------------------------------------------------- */ | ||
11 | |||
12 | static struct fb_ops bochsfb_ops = { | ||
13 | .owner = THIS_MODULE, | ||
14 | .fb_check_var = drm_fb_helper_check_var, | ||
15 | .fb_set_par = drm_fb_helper_set_par, | ||
16 | .fb_fillrect = sys_fillrect, | ||
17 | .fb_copyarea = sys_copyarea, | ||
18 | .fb_imageblit = sys_imageblit, | ||
19 | .fb_pan_display = drm_fb_helper_pan_display, | ||
20 | .fb_blank = drm_fb_helper_blank, | ||
21 | .fb_setcmap = drm_fb_helper_setcmap, | ||
22 | }; | ||
23 | |||
24 | static int bochsfb_create_object(struct bochs_device *bochs, | ||
25 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
26 | struct drm_gem_object **gobj_p) | ||
27 | { | ||
28 | struct drm_device *dev = bochs->dev; | ||
29 | struct drm_gem_object *gobj; | ||
30 | u32 size; | ||
31 | int ret = 0; | ||
32 | |||
33 | size = mode_cmd->pitches[0] * mode_cmd->height; | ||
34 | ret = bochs_gem_create(dev, size, true, &gobj); | ||
35 | if (ret) | ||
36 | return ret; | ||
37 | |||
38 | *gobj_p = gobj; | ||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static int bochsfb_create(struct drm_fb_helper *helper, | ||
43 | struct drm_fb_helper_surface_size *sizes) | ||
44 | { | ||
45 | struct bochs_device *bochs = | ||
46 | container_of(helper, struct bochs_device, fb.helper); | ||
47 | struct drm_device *dev = bochs->dev; | ||
48 | struct fb_info *info; | ||
49 | struct drm_framebuffer *fb; | ||
50 | struct drm_mode_fb_cmd2 mode_cmd; | ||
51 | struct device *device = &dev->pdev->dev; | ||
52 | struct drm_gem_object *gobj = NULL; | ||
53 | struct bochs_bo *bo = NULL; | ||
54 | int size, ret; | ||
55 | |||
56 | if (sizes->surface_bpp != 32) | ||
57 | return -EINVAL; | ||
58 | |||
59 | mode_cmd.width = sizes->surface_width; | ||
60 | mode_cmd.height = sizes->surface_height; | ||
61 | mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); | ||
62 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | ||
63 | sizes->surface_depth); | ||
64 | size = mode_cmd.pitches[0] * mode_cmd.height; | ||
65 | |||
66 | /* alloc, pin & map bo */ | ||
67 | ret = bochsfb_create_object(bochs, &mode_cmd, &gobj); | ||
68 | if (ret) { | ||
69 | DRM_ERROR("failed to create fbcon backing object %d\n", ret); | ||
70 | return ret; | ||
71 | } | ||
72 | |||
73 | bo = gem_to_bochs_bo(gobj); | ||
74 | |||
75 | ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); | ||
76 | if (ret) | ||
77 | return ret; | ||
78 | |||
79 | ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); | ||
80 | if (ret) { | ||
81 | DRM_ERROR("failed to pin fbcon\n"); | ||
82 | ttm_bo_unreserve(&bo->bo); | ||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, | ||
87 | &bo->kmap); | ||
88 | if (ret) { | ||
89 | DRM_ERROR("failed to kmap fbcon\n"); | ||
90 | ttm_bo_unreserve(&bo->bo); | ||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | ttm_bo_unreserve(&bo->bo); | ||
95 | |||
96 | /* init fb device */ | ||
97 | info = framebuffer_alloc(0, device); | ||
98 | if (info == NULL) | ||
99 | return -ENOMEM; | ||
100 | |||
101 | info->par = &bochs->fb.helper; | ||
102 | |||
103 | ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); | ||
104 | if (ret) | ||
105 | return ret; | ||
106 | |||
107 | bochs->fb.size = size; | ||
108 | |||
109 | /* setup helper */ | ||
110 | fb = &bochs->fb.gfb.base; | ||
111 | bochs->fb.helper.fb = fb; | ||
112 | bochs->fb.helper.fbdev = info; | ||
113 | |||
114 | strcpy(info->fix.id, "bochsdrmfb"); | ||
115 | |||
116 | info->flags = FBINFO_DEFAULT; | ||
117 | info->fbops = &bochsfb_ops; | ||
118 | |||
119 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | ||
120 | drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width, | ||
121 | sizes->fb_height); | ||
122 | |||
123 | info->screen_base = bo->kmap.virtual; | ||
124 | info->screen_size = size; | ||
125 | |||
126 | #if 0 | ||
127 | /* FIXME: get this right for mmap(/dev/fb0) */ | ||
128 | info->fix.smem_start = bochs_bo_mmap_offset(bo); | ||
129 | info->fix.smem_len = size; | ||
130 | #endif | ||
131 | |||
132 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
133 | if (ret) { | ||
134 | DRM_ERROR("%s: can't allocate color map\n", info->fix.id); | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static int bochs_fbdev_destroy(struct bochs_device *bochs) | ||
142 | { | ||
143 | struct bochs_framebuffer *gfb = &bochs->fb.gfb; | ||
144 | struct fb_info *info; | ||
145 | |||
146 | DRM_DEBUG_DRIVER("\n"); | ||
147 | |||
148 | if (bochs->fb.helper.fbdev) { | ||
149 | info = bochs->fb.helper.fbdev; | ||
150 | |||
151 | unregister_framebuffer(info); | ||
152 | if (info->cmap.len) | ||
153 | fb_dealloc_cmap(&info->cmap); | ||
154 | framebuffer_release(info); | ||
155 | } | ||
156 | |||
157 | if (gfb->obj) { | ||
158 | drm_gem_object_unreference_unlocked(gfb->obj); | ||
159 | gfb->obj = NULL; | ||
160 | } | ||
161 | |||
162 | drm_fb_helper_fini(&bochs->fb.helper); | ||
163 | drm_framebuffer_unregister_private(&gfb->base); | ||
164 | drm_framebuffer_cleanup(&gfb->base); | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | ||
170 | u16 blue, int regno) | ||
171 | { | ||
172 | } | ||
173 | |||
174 | void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
175 | u16 *blue, int regno) | ||
176 | { | ||
177 | *red = regno; | ||
178 | *green = regno; | ||
179 | *blue = regno; | ||
180 | } | ||
181 | |||
182 | static struct drm_fb_helper_funcs bochs_fb_helper_funcs = { | ||
183 | .gamma_set = bochs_fb_gamma_set, | ||
184 | .gamma_get = bochs_fb_gamma_get, | ||
185 | .fb_probe = bochsfb_create, | ||
186 | }; | ||
187 | |||
188 | int bochs_fbdev_init(struct bochs_device *bochs) | ||
189 | { | ||
190 | int ret; | ||
191 | |||
192 | bochs->fb.helper.funcs = &bochs_fb_helper_funcs; | ||
193 | spin_lock_init(&bochs->fb.dirty_lock); | ||
194 | |||
195 | ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, | ||
196 | 1, 1); | ||
197 | if (ret) | ||
198 | return ret; | ||
199 | |||
200 | drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); | ||
201 | drm_helper_disable_unused_functions(bochs->dev); | ||
202 | drm_fb_helper_initial_config(&bochs->fb.helper, 32); | ||
203 | |||
204 | bochs->fb.initialized = true; | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | void bochs_fbdev_fini(struct bochs_device *bochs) | ||
209 | { | ||
210 | if (!bochs->fb.initialized) | ||
211 | return; | ||
212 | |||
213 | bochs_fbdev_destroy(bochs); | ||
214 | bochs->fb.initialized = false; | ||
215 | } | ||
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c new file mode 100644 index 000000000000..dbe619e6aab4 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_hw.c | |||
@@ -0,0 +1,177 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | |||
8 | #include "bochs.h" | ||
9 | |||
10 | /* ---------------------------------------------------------------------- */ | ||
11 | |||
12 | static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) | ||
13 | { | ||
14 | if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) | ||
15 | return; | ||
16 | |||
17 | if (bochs->mmio) { | ||
18 | int offset = ioport - 0x3c0 + 0x400; | ||
19 | writeb(val, bochs->mmio + offset); | ||
20 | } else { | ||
21 | outb(val, ioport); | ||
22 | } | ||
23 | } | ||
24 | |||
25 | static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) | ||
26 | { | ||
27 | u16 ret = 0; | ||
28 | |||
29 | if (bochs->mmio) { | ||
30 | int offset = 0x500 + (reg << 1); | ||
31 | ret = readw(bochs->mmio + offset); | ||
32 | } else { | ||
33 | outw(reg, VBE_DISPI_IOPORT_INDEX); | ||
34 | ret = inw(VBE_DISPI_IOPORT_DATA); | ||
35 | } | ||
36 | return ret; | ||
37 | } | ||
38 | |||
39 | static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) | ||
40 | { | ||
41 | if (bochs->mmio) { | ||
42 | int offset = 0x500 + (reg << 1); | ||
43 | writew(val, bochs->mmio + offset); | ||
44 | } else { | ||
45 | outw(reg, VBE_DISPI_IOPORT_INDEX); | ||
46 | outw(val, VBE_DISPI_IOPORT_DATA); | ||
47 | } | ||
48 | } | ||
49 | |||
50 | int bochs_hw_init(struct drm_device *dev, uint32_t flags) | ||
51 | { | ||
52 | struct bochs_device *bochs = dev->dev_private; | ||
53 | struct pci_dev *pdev = dev->pdev; | ||
54 | unsigned long addr, size, mem, ioaddr, iosize; | ||
55 | u16 id; | ||
56 | |||
57 | if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */ | ||
58 | (pdev->resource[2].flags & IORESOURCE_MEM)) { | ||
59 | /* mmio bar with vga and bochs registers present */ | ||
60 | if (pci_request_region(pdev, 2, "bochs-drm") != 0) { | ||
61 | DRM_ERROR("Cannot request mmio region\n"); | ||
62 | return -EBUSY; | ||
63 | } | ||
64 | ioaddr = pci_resource_start(pdev, 2); | ||
65 | iosize = pci_resource_len(pdev, 2); | ||
66 | bochs->mmio = ioremap(ioaddr, iosize); | ||
67 | if (bochs->mmio == NULL) { | ||
68 | DRM_ERROR("Cannot map mmio region\n"); | ||
69 | return -ENOMEM; | ||
70 | } | ||
71 | } else { | ||
72 | ioaddr = VBE_DISPI_IOPORT_INDEX; | ||
73 | iosize = 2; | ||
74 | if (!request_region(ioaddr, iosize, "bochs-drm")) { | ||
75 | DRM_ERROR("Cannot request ioports\n"); | ||
76 | return -EBUSY; | ||
77 | } | ||
78 | bochs->ioports = 1; | ||
79 | } | ||
80 | |||
81 | id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID); | ||
82 | mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K) | ||
83 | * 64 * 1024; | ||
84 | if ((id & 0xfff0) != VBE_DISPI_ID0) { | ||
85 | DRM_ERROR("ID mismatch\n"); | ||
86 | return -ENODEV; | ||
87 | } | ||
88 | |||
89 | if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) | ||
90 | return -ENODEV; | ||
91 | addr = pci_resource_start(pdev, 0); | ||
92 | size = pci_resource_len(pdev, 0); | ||
93 | if (addr == 0) | ||
94 | return -ENODEV; | ||
95 | if (size != mem) { | ||
96 | DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n", | ||
97 | size, mem); | ||
98 | size = min(size, mem); | ||
99 | } | ||
100 | |||
101 | if (pci_request_region(pdev, 0, "bochs-drm") != 0) { | ||
102 | DRM_ERROR("Cannot request framebuffer\n"); | ||
103 | return -EBUSY; | ||
104 | } | ||
105 | |||
106 | bochs->fb_map = ioremap(addr, size); | ||
107 | if (bochs->fb_map == NULL) { | ||
108 | DRM_ERROR("Cannot map framebuffer\n"); | ||
109 | return -ENOMEM; | ||
110 | } | ||
111 | bochs->fb_base = addr; | ||
112 | bochs->fb_size = size; | ||
113 | |||
114 | DRM_INFO("Found bochs VGA, ID 0x%x.\n", id); | ||
115 | DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n", | ||
116 | size / 1024, addr, | ||
117 | bochs->ioports ? "ioports" : "mmio", | ||
118 | ioaddr); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | void bochs_hw_fini(struct drm_device *dev) | ||
123 | { | ||
124 | struct bochs_device *bochs = dev->dev_private; | ||
125 | |||
126 | if (bochs->mmio) | ||
127 | iounmap(bochs->mmio); | ||
128 | if (bochs->ioports) | ||
129 | release_region(VBE_DISPI_IOPORT_INDEX, 2); | ||
130 | if (bochs->fb_map) | ||
131 | iounmap(bochs->fb_map); | ||
132 | pci_release_regions(dev->pdev); | ||
133 | } | ||
134 | |||
135 | void bochs_hw_setmode(struct bochs_device *bochs, | ||
136 | struct drm_display_mode *mode) | ||
137 | { | ||
138 | bochs->xres = mode->hdisplay; | ||
139 | bochs->yres = mode->vdisplay; | ||
140 | bochs->bpp = 32; | ||
141 | bochs->stride = mode->hdisplay * (bochs->bpp / 8); | ||
142 | bochs->yres_virtual = bochs->fb_size / bochs->stride; | ||
143 | |||
144 | DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n", | ||
145 | bochs->xres, bochs->yres, bochs->bpp, | ||
146 | bochs->yres_virtual); | ||
147 | |||
148 | bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */ | ||
149 | |||
150 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp); | ||
151 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres); | ||
152 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres); | ||
153 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0); | ||
154 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres); | ||
155 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT, | ||
156 | bochs->yres_virtual); | ||
157 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0); | ||
158 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0); | ||
159 | |||
160 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, | ||
161 | VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED); | ||
162 | } | ||
163 | |||
164 | void bochs_hw_setbase(struct bochs_device *bochs, | ||
165 | int x, int y, u64 addr) | ||
166 | { | ||
167 | unsigned long offset = (unsigned long)addr + | ||
168 | y * bochs->stride + | ||
169 | x * (bochs->bpp / 8); | ||
170 | int vy = offset / bochs->stride; | ||
171 | int vx = (offset % bochs->stride) * 8 / bochs->bpp; | ||
172 | |||
173 | DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n", | ||
174 | x, y, addr, offset, vx, vy); | ||
175 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx); | ||
176 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy); | ||
177 | } | ||
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c new file mode 100644 index 000000000000..62ec7d4b3816 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_kms.c | |||
@@ -0,0 +1,294 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | |||
8 | #include "bochs.h" | ||
9 | |||
10 | static int defx = 1024; | ||
11 | static int defy = 768; | ||
12 | |||
13 | module_param(defx, int, 0444); | ||
14 | module_param(defy, int, 0444); | ||
15 | MODULE_PARM_DESC(defx, "default x resolution"); | ||
16 | MODULE_PARM_DESC(defy, "default y resolution"); | ||
17 | |||
18 | /* ---------------------------------------------------------------------- */ | ||
19 | |||
20 | static void bochs_crtc_load_lut(struct drm_crtc *crtc) | ||
21 | { | ||
22 | } | ||
23 | |||
24 | static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
25 | { | ||
26 | switch (mode) { | ||
27 | case DRM_MODE_DPMS_ON: | ||
28 | case DRM_MODE_DPMS_STANDBY: | ||
29 | case DRM_MODE_DPMS_SUSPEND: | ||
30 | case DRM_MODE_DPMS_OFF: | ||
31 | default: | ||
32 | return; | ||
33 | } | ||
34 | } | ||
35 | |||
36 | static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc, | ||
37 | const struct drm_display_mode *mode, | ||
38 | struct drm_display_mode *adjusted_mode) | ||
39 | { | ||
40 | return true; | ||
41 | } | ||
42 | |||
43 | static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
44 | struct drm_framebuffer *old_fb) | ||
45 | { | ||
46 | struct bochs_device *bochs = | ||
47 | container_of(crtc, struct bochs_device, crtc); | ||
48 | struct bochs_framebuffer *bochs_fb; | ||
49 | struct bochs_bo *bo; | ||
50 | u64 gpu_addr = 0; | ||
51 | int ret; | ||
52 | |||
53 | if (old_fb) { | ||
54 | bochs_fb = to_bochs_framebuffer(old_fb); | ||
55 | bo = gem_to_bochs_bo(bochs_fb->obj); | ||
56 | ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); | ||
57 | if (ret) { | ||
58 | DRM_ERROR("failed to reserve old_fb bo\n"); | ||
59 | } else { | ||
60 | bochs_bo_unpin(bo); | ||
61 | ttm_bo_unreserve(&bo->bo); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | if (WARN_ON(crtc->fb == NULL)) | ||
66 | return -EINVAL; | ||
67 | |||
68 | bochs_fb = to_bochs_framebuffer(crtc->fb); | ||
69 | bo = gem_to_bochs_bo(bochs_fb->obj); | ||
70 | ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); | ||
71 | if (ret) | ||
72 | return ret; | ||
73 | |||
74 | ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); | ||
75 | if (ret) { | ||
76 | ttm_bo_unreserve(&bo->bo); | ||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | ttm_bo_unreserve(&bo->bo); | ||
81 | bochs_hw_setbase(bochs, x, y, gpu_addr); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static int bochs_crtc_mode_set(struct drm_crtc *crtc, | ||
86 | struct drm_display_mode *mode, | ||
87 | struct drm_display_mode *adjusted_mode, | ||
88 | int x, int y, struct drm_framebuffer *old_fb) | ||
89 | { | ||
90 | struct bochs_device *bochs = | ||
91 | container_of(crtc, struct bochs_device, crtc); | ||
92 | |||
93 | bochs_hw_setmode(bochs, mode); | ||
94 | bochs_crtc_mode_set_base(crtc, x, y, old_fb); | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static void bochs_crtc_prepare(struct drm_crtc *crtc) | ||
99 | { | ||
100 | } | ||
101 | |||
102 | static void bochs_crtc_commit(struct drm_crtc *crtc) | ||
103 | { | ||
104 | } | ||
105 | |||
106 | static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
107 | u16 *blue, uint32_t start, uint32_t size) | ||
108 | { | ||
109 | } | ||
110 | |||
111 | /* These provide the minimum set of functions required to handle a CRTC */ | ||
112 | static const struct drm_crtc_funcs bochs_crtc_funcs = { | ||
113 | .gamma_set = bochs_crtc_gamma_set, | ||
114 | .set_config = drm_crtc_helper_set_config, | ||
115 | .destroy = drm_crtc_cleanup, | ||
116 | }; | ||
117 | |||
118 | static const struct drm_crtc_helper_funcs bochs_helper_funcs = { | ||
119 | .dpms = bochs_crtc_dpms, | ||
120 | .mode_fixup = bochs_crtc_mode_fixup, | ||
121 | .mode_set = bochs_crtc_mode_set, | ||
122 | .mode_set_base = bochs_crtc_mode_set_base, | ||
123 | .prepare = bochs_crtc_prepare, | ||
124 | .commit = bochs_crtc_commit, | ||
125 | .load_lut = bochs_crtc_load_lut, | ||
126 | }; | ||
127 | |||
128 | static void bochs_crtc_init(struct drm_device *dev) | ||
129 | { | ||
130 | struct bochs_device *bochs = dev->dev_private; | ||
131 | struct drm_crtc *crtc = &bochs->crtc; | ||
132 | |||
133 | drm_crtc_init(dev, crtc, &bochs_crtc_funcs); | ||
134 | drm_mode_crtc_set_gamma_size(crtc, 256); | ||
135 | drm_crtc_helper_add(crtc, &bochs_helper_funcs); | ||
136 | } | ||
137 | |||
138 | static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder, | ||
139 | const struct drm_display_mode *mode, | ||
140 | struct drm_display_mode *adjusted_mode) | ||
141 | { | ||
142 | return true; | ||
143 | } | ||
144 | |||
145 | static void bochs_encoder_mode_set(struct drm_encoder *encoder, | ||
146 | struct drm_display_mode *mode, | ||
147 | struct drm_display_mode *adjusted_mode) | ||
148 | { | ||
149 | } | ||
150 | |||
151 | static void bochs_encoder_dpms(struct drm_encoder *encoder, int state) | ||
152 | { | ||
153 | } | ||
154 | |||
155 | static void bochs_encoder_prepare(struct drm_encoder *encoder) | ||
156 | { | ||
157 | } | ||
158 | |||
159 | static void bochs_encoder_commit(struct drm_encoder *encoder) | ||
160 | { | ||
161 | } | ||
162 | |||
163 | static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = { | ||
164 | .dpms = bochs_encoder_dpms, | ||
165 | .mode_fixup = bochs_encoder_mode_fixup, | ||
166 | .mode_set = bochs_encoder_mode_set, | ||
167 | .prepare = bochs_encoder_prepare, | ||
168 | .commit = bochs_encoder_commit, | ||
169 | }; | ||
170 | |||
171 | static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = { | ||
172 | .destroy = drm_encoder_cleanup, | ||
173 | }; | ||
174 | |||
175 | static void bochs_encoder_init(struct drm_device *dev) | ||
176 | { | ||
177 | struct bochs_device *bochs = dev->dev_private; | ||
178 | struct drm_encoder *encoder = &bochs->encoder; | ||
179 | |||
180 | encoder->possible_crtcs = 0x1; | ||
181 | drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs, | ||
182 | DRM_MODE_ENCODER_DAC); | ||
183 | drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs); | ||
184 | } | ||
185 | |||
186 | |||
187 | int bochs_connector_get_modes(struct drm_connector *connector) | ||
188 | { | ||
189 | int count; | ||
190 | |||
191 | count = drm_add_modes_noedid(connector, 8192, 8192); | ||
192 | drm_set_preferred_mode(connector, defx, defy); | ||
193 | return count; | ||
194 | } | ||
195 | |||
196 | static int bochs_connector_mode_valid(struct drm_connector *connector, | ||
197 | struct drm_display_mode *mode) | ||
198 | { | ||
199 | struct bochs_device *bochs = | ||
200 | container_of(connector, struct bochs_device, connector); | ||
201 | unsigned long size = mode->hdisplay * mode->vdisplay * 4; | ||
202 | |||
203 | /* | ||
204 | * Make sure we can fit two framebuffers into video memory. | ||
205 | * This allows up to 1600x1200 with 16 MB (default size). | ||
206 | * If you want more try this: | ||
207 | * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs' | ||
208 | */ | ||
209 | if (size * 2 > bochs->fb_size) | ||
210 | return MODE_BAD; | ||
211 | |||
212 | return MODE_OK; | ||
213 | } | ||
214 | |||
215 | static struct drm_encoder * | ||
216 | bochs_connector_best_encoder(struct drm_connector *connector) | ||
217 | { | ||
218 | int enc_id = connector->encoder_ids[0]; | ||
219 | struct drm_mode_object *obj; | ||
220 | struct drm_encoder *encoder; | ||
221 | |||
222 | /* pick the encoder ids */ | ||
223 | if (enc_id) { | ||
224 | obj = drm_mode_object_find(connector->dev, enc_id, | ||
225 | DRM_MODE_OBJECT_ENCODER); | ||
226 | if (!obj) | ||
227 | return NULL; | ||
228 | encoder = obj_to_encoder(obj); | ||
229 | return encoder; | ||
230 | } | ||
231 | return NULL; | ||
232 | } | ||
233 | |||
234 | static enum drm_connector_status bochs_connector_detect(struct drm_connector | ||
235 | *connector, bool force) | ||
236 | { | ||
237 | return connector_status_connected; | ||
238 | } | ||
239 | |||
240 | struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { | ||
241 | .get_modes = bochs_connector_get_modes, | ||
242 | .mode_valid = bochs_connector_mode_valid, | ||
243 | .best_encoder = bochs_connector_best_encoder, | ||
244 | }; | ||
245 | |||
246 | struct drm_connector_funcs bochs_connector_connector_funcs = { | ||
247 | .dpms = drm_helper_connector_dpms, | ||
248 | .detect = bochs_connector_detect, | ||
249 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
250 | .destroy = drm_connector_cleanup, | ||
251 | }; | ||
252 | |||
253 | static void bochs_connector_init(struct drm_device *dev) | ||
254 | { | ||
255 | struct bochs_device *bochs = dev->dev_private; | ||
256 | struct drm_connector *connector = &bochs->connector; | ||
257 | |||
258 | drm_connector_init(dev, connector, &bochs_connector_connector_funcs, | ||
259 | DRM_MODE_CONNECTOR_VIRTUAL); | ||
260 | drm_connector_helper_add(connector, | ||
261 | &bochs_connector_connector_helper_funcs); | ||
262 | } | ||
263 | |||
264 | |||
265 | int bochs_kms_init(struct bochs_device *bochs) | ||
266 | { | ||
267 | drm_mode_config_init(bochs->dev); | ||
268 | bochs->mode_config_initialized = true; | ||
269 | |||
270 | bochs->dev->mode_config.max_width = 8192; | ||
271 | bochs->dev->mode_config.max_height = 8192; | ||
272 | |||
273 | bochs->dev->mode_config.fb_base = bochs->fb_base; | ||
274 | bochs->dev->mode_config.preferred_depth = 24; | ||
275 | bochs->dev->mode_config.prefer_shadow = 0; | ||
276 | |||
277 | bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs; | ||
278 | |||
279 | bochs_crtc_init(bochs->dev); | ||
280 | bochs_encoder_init(bochs->dev); | ||
281 | bochs_connector_init(bochs->dev); | ||
282 | drm_mode_connector_attach_encoder(&bochs->connector, | ||
283 | &bochs->encoder); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | void bochs_kms_fini(struct bochs_device *bochs) | ||
289 | { | ||
290 | if (bochs->mode_config_initialized) { | ||
291 | drm_mode_config_cleanup(bochs->dev); | ||
292 | bochs->mode_config_initialized = false; | ||
293 | } | ||
294 | } | ||
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c new file mode 100644 index 000000000000..ce6858765b37 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
@@ -0,0 +1,546 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | |||
8 | #include "bochs.h" | ||
9 | |||
10 | static void bochs_ttm_placement(struct bochs_bo *bo, int domain); | ||
11 | |||
12 | /* ---------------------------------------------------------------------- */ | ||
13 | |||
14 | static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd) | ||
15 | { | ||
16 | return container_of(bd, struct bochs_device, ttm.bdev); | ||
17 | } | ||
18 | |||
19 | static int bochs_ttm_mem_global_init(struct drm_global_reference *ref) | ||
20 | { | ||
21 | return ttm_mem_global_init(ref->object); | ||
22 | } | ||
23 | |||
24 | static void bochs_ttm_mem_global_release(struct drm_global_reference *ref) | ||
25 | { | ||
26 | ttm_mem_global_release(ref->object); | ||
27 | } | ||
28 | |||
29 | static int bochs_ttm_global_init(struct bochs_device *bochs) | ||
30 | { | ||
31 | struct drm_global_reference *global_ref; | ||
32 | int r; | ||
33 | |||
34 | global_ref = &bochs->ttm.mem_global_ref; | ||
35 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
36 | global_ref->size = sizeof(struct ttm_mem_global); | ||
37 | global_ref->init = &bochs_ttm_mem_global_init; | ||
38 | global_ref->release = &bochs_ttm_mem_global_release; | ||
39 | r = drm_global_item_ref(global_ref); | ||
40 | if (r != 0) { | ||
41 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
42 | "subsystem.\n"); | ||
43 | return r; | ||
44 | } | ||
45 | |||
46 | bochs->ttm.bo_global_ref.mem_glob = | ||
47 | bochs->ttm.mem_global_ref.object; | ||
48 | global_ref = &bochs->ttm.bo_global_ref.ref; | ||
49 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
50 | global_ref->size = sizeof(struct ttm_bo_global); | ||
51 | global_ref->init = &ttm_bo_global_init; | ||
52 | global_ref->release = &ttm_bo_global_release; | ||
53 | r = drm_global_item_ref(global_ref); | ||
54 | if (r != 0) { | ||
55 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
56 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | ||
57 | return r; | ||
58 | } | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void bochs_ttm_global_release(struct bochs_device *bochs) | ||
64 | { | ||
65 | if (bochs->ttm.mem_global_ref.release == NULL) | ||
66 | return; | ||
67 | |||
68 | drm_global_item_unref(&bochs->ttm.bo_global_ref.ref); | ||
69 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | ||
70 | bochs->ttm.mem_global_ref.release = NULL; | ||
71 | } | ||
72 | |||
73 | |||
74 | static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo) | ||
75 | { | ||
76 | struct bochs_bo *bo; | ||
77 | |||
78 | bo = container_of(tbo, struct bochs_bo, bo); | ||
79 | drm_gem_object_release(&bo->gem); | ||
80 | kfree(bo); | ||
81 | } | ||
82 | |||
83 | static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo) | ||
84 | { | ||
85 | if (bo->destroy == &bochs_bo_ttm_destroy) | ||
86 | return true; | ||
87 | return false; | ||
88 | } | ||
89 | |||
90 | static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | ||
91 | struct ttm_mem_type_manager *man) | ||
92 | { | ||
93 | switch (type) { | ||
94 | case TTM_PL_SYSTEM: | ||
95 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | ||
96 | man->available_caching = TTM_PL_MASK_CACHING; | ||
97 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
98 | break; | ||
99 | case TTM_PL_VRAM: | ||
100 | man->func = &ttm_bo_manager_func; | ||
101 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | ||
102 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
103 | man->available_caching = TTM_PL_FLAG_UNCACHED | | ||
104 | TTM_PL_FLAG_WC; | ||
105 | man->default_caching = TTM_PL_FLAG_WC; | ||
106 | break; | ||
107 | default: | ||
108 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | ||
109 | return -EINVAL; | ||
110 | } | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static void | ||
115 | bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | ||
116 | { | ||
117 | struct bochs_bo *bochsbo = bochs_bo(bo); | ||
118 | |||
119 | if (!bochs_ttm_bo_is_bochs_bo(bo)) | ||
120 | return; | ||
121 | |||
122 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM); | ||
123 | *pl = bochsbo->placement; | ||
124 | } | ||
125 | |||
126 | static int bochs_bo_verify_access(struct ttm_buffer_object *bo, | ||
127 | struct file *filp) | ||
128 | { | ||
129 | struct bochs_bo *bochsbo = bochs_bo(bo); | ||
130 | |||
131 | return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp); | ||
132 | } | ||
133 | |||
134 | static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev, | ||
135 | struct ttm_mem_reg *mem) | ||
136 | { | ||
137 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
138 | struct bochs_device *bochs = bochs_bdev(bdev); | ||
139 | |||
140 | mem->bus.addr = NULL; | ||
141 | mem->bus.offset = 0; | ||
142 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | ||
143 | mem->bus.base = 0; | ||
144 | mem->bus.is_iomem = false; | ||
145 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | ||
146 | return -EINVAL; | ||
147 | switch (mem->mem_type) { | ||
148 | case TTM_PL_SYSTEM: | ||
149 | /* system memory */ | ||
150 | return 0; | ||
151 | case TTM_PL_VRAM: | ||
152 | mem->bus.offset = mem->start << PAGE_SHIFT; | ||
153 | mem->bus.base = bochs->fb_base; | ||
154 | mem->bus.is_iomem = true; | ||
155 | break; | ||
156 | default: | ||
157 | return -EINVAL; | ||
158 | break; | ||
159 | } | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev, | ||
164 | struct ttm_mem_reg *mem) | ||
165 | { | ||
166 | } | ||
167 | |||
168 | static int bochs_bo_move(struct ttm_buffer_object *bo, | ||
169 | bool evict, bool interruptible, | ||
170 | bool no_wait_gpu, | ||
171 | struct ttm_mem_reg *new_mem) | ||
172 | { | ||
173 | return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||
174 | } | ||
175 | |||
176 | |||
177 | static void bochs_ttm_backend_destroy(struct ttm_tt *tt) | ||
178 | { | ||
179 | ttm_tt_fini(tt); | ||
180 | kfree(tt); | ||
181 | } | ||
182 | |||
183 | static struct ttm_backend_func bochs_tt_backend_func = { | ||
184 | .destroy = &bochs_ttm_backend_destroy, | ||
185 | }; | ||
186 | |||
187 | static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev, | ||
188 | unsigned long size, | ||
189 | uint32_t page_flags, | ||
190 | struct page *dummy_read_page) | ||
191 | { | ||
192 | struct ttm_tt *tt; | ||
193 | |||
194 | tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); | ||
195 | if (tt == NULL) | ||
196 | return NULL; | ||
197 | tt->func = &bochs_tt_backend_func; | ||
198 | if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { | ||
199 | kfree(tt); | ||
200 | return NULL; | ||
201 | } | ||
202 | return tt; | ||
203 | } | ||
204 | |||
205 | struct ttm_bo_driver bochs_bo_driver = { | ||
206 | .ttm_tt_create = bochs_ttm_tt_create, | ||
207 | .ttm_tt_populate = ttm_pool_populate, | ||
208 | .ttm_tt_unpopulate = ttm_pool_unpopulate, | ||
209 | .init_mem_type = bochs_bo_init_mem_type, | ||
210 | .evict_flags = bochs_bo_evict_flags, | ||
211 | .move = bochs_bo_move, | ||
212 | .verify_access = bochs_bo_verify_access, | ||
213 | .io_mem_reserve = &bochs_ttm_io_mem_reserve, | ||
214 | .io_mem_free = &bochs_ttm_io_mem_free, | ||
215 | }; | ||
216 | |||
217 | int bochs_mm_init(struct bochs_device *bochs) | ||
218 | { | ||
219 | struct ttm_bo_device *bdev = &bochs->ttm.bdev; | ||
220 | int ret; | ||
221 | |||
222 | ret = bochs_ttm_global_init(bochs); | ||
223 | if (ret) | ||
224 | return ret; | ||
225 | |||
226 | ret = ttm_bo_device_init(&bochs->ttm.bdev, | ||
227 | bochs->ttm.bo_global_ref.ref.object, | ||
228 | &bochs_bo_driver, DRM_FILE_PAGE_OFFSET, | ||
229 | true); | ||
230 | if (ret) { | ||
231 | DRM_ERROR("Error initialising bo driver; %d\n", ret); | ||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, | ||
236 | bochs->fb_size >> PAGE_SHIFT); | ||
237 | if (ret) { | ||
238 | DRM_ERROR("Failed ttm VRAM init: %d\n", ret); | ||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | bochs->ttm.initialized = true; | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | void bochs_mm_fini(struct bochs_device *bochs) | ||
247 | { | ||
248 | if (!bochs->ttm.initialized) | ||
249 | return; | ||
250 | |||
251 | ttm_bo_device_release(&bochs->ttm.bdev); | ||
252 | bochs_ttm_global_release(bochs); | ||
253 | bochs->ttm.initialized = false; | ||
254 | } | ||
255 | |||
256 | static void bochs_ttm_placement(struct bochs_bo *bo, int domain) | ||
257 | { | ||
258 | u32 c = 0; | ||
259 | bo->placement.fpfn = 0; | ||
260 | bo->placement.lpfn = 0; | ||
261 | bo->placement.placement = bo->placements; | ||
262 | bo->placement.busy_placement = bo->placements; | ||
263 | if (domain & TTM_PL_FLAG_VRAM) { | ||
264 | bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | ||
265 | | TTM_PL_FLAG_VRAM; | ||
266 | } | ||
267 | if (domain & TTM_PL_FLAG_SYSTEM) { | ||
268 | bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | ||
269 | } | ||
270 | if (!c) { | ||
271 | bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | ||
272 | } | ||
273 | bo->placement.num_placement = c; | ||
274 | bo->placement.num_busy_placement = c; | ||
275 | } | ||
276 | |||
277 | static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo) | ||
278 | { | ||
279 | return bo->bo.offset; | ||
280 | } | ||
281 | |||
282 | int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) | ||
283 | { | ||
284 | int i, ret; | ||
285 | |||
286 | if (bo->pin_count) { | ||
287 | bo->pin_count++; | ||
288 | if (gpu_addr) | ||
289 | *gpu_addr = bochs_bo_gpu_offset(bo); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | bochs_ttm_placement(bo, pl_flag); | ||
294 | for (i = 0; i < bo->placement.num_placement; i++) | ||
295 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
296 | ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); | ||
297 | if (ret) | ||
298 | return ret; | ||
299 | |||
300 | bo->pin_count = 1; | ||
301 | if (gpu_addr) | ||
302 | *gpu_addr = bochs_bo_gpu_offset(bo); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | int bochs_bo_unpin(struct bochs_bo *bo) | ||
307 | { | ||
308 | int i, ret; | ||
309 | |||
310 | if (!bo->pin_count) { | ||
311 | DRM_ERROR("unpin bad %p\n", bo); | ||
312 | return 0; | ||
313 | } | ||
314 | bo->pin_count--; | ||
315 | |||
316 | if (bo->pin_count) | ||
317 | return 0; | ||
318 | |||
319 | for (i = 0; i < bo->placement.num_placement; i++) | ||
320 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | ||
321 | ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); | ||
322 | if (ret) | ||
323 | return ret; | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | int bochs_mmap(struct file *filp, struct vm_area_struct *vma) | ||
329 | { | ||
330 | struct drm_file *file_priv; | ||
331 | struct bochs_device *bochs; | ||
332 | |||
333 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | ||
334 | return drm_mmap(filp, vma); | ||
335 | |||
336 | file_priv = filp->private_data; | ||
337 | bochs = file_priv->minor->dev->dev_private; | ||
338 | return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev); | ||
339 | } | ||
340 | |||
341 | /* ---------------------------------------------------------------------- */ | ||
342 | |||
343 | static int bochs_bo_create(struct drm_device *dev, int size, int align, | ||
344 | uint32_t flags, struct bochs_bo **pbochsbo) | ||
345 | { | ||
346 | struct bochs_device *bochs = dev->dev_private; | ||
347 | struct bochs_bo *bochsbo; | ||
348 | size_t acc_size; | ||
349 | int ret; | ||
350 | |||
351 | bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL); | ||
352 | if (!bochsbo) | ||
353 | return -ENOMEM; | ||
354 | |||
355 | ret = drm_gem_object_init(dev, &bochsbo->gem, size); | ||
356 | if (ret) { | ||
357 | kfree(bochsbo); | ||
358 | return ret; | ||
359 | } | ||
360 | |||
361 | bochsbo->bo.bdev = &bochs->ttm.bdev; | ||
362 | bochsbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
363 | |||
364 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | ||
365 | |||
366 | acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size, | ||
367 | sizeof(struct bochs_bo)); | ||
368 | |||
369 | ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, | ||
370 | ttm_bo_type_device, &bochsbo->placement, | ||
371 | align >> PAGE_SHIFT, false, NULL, acc_size, | ||
372 | NULL, bochs_bo_ttm_destroy); | ||
373 | if (ret) | ||
374 | return ret; | ||
375 | |||
376 | *pbochsbo = bochsbo; | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, | ||
381 | struct drm_gem_object **obj) | ||
382 | { | ||
383 | struct bochs_bo *bochsbo; | ||
384 | int ret; | ||
385 | |||
386 | *obj = NULL; | ||
387 | |||
388 | size = ALIGN(size, PAGE_SIZE); | ||
389 | if (size == 0) | ||
390 | return -EINVAL; | ||
391 | |||
392 | ret = bochs_bo_create(dev, size, 0, 0, &bochsbo); | ||
393 | if (ret) { | ||
394 | if (ret != -ERESTARTSYS) | ||
395 | DRM_ERROR("failed to allocate GEM object\n"); | ||
396 | return ret; | ||
397 | } | ||
398 | *obj = &bochsbo->gem; | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | int bochs_dumb_create(struct drm_file *file, struct drm_device *dev, | ||
403 | struct drm_mode_create_dumb *args) | ||
404 | { | ||
405 | struct drm_gem_object *gobj; | ||
406 | u32 handle; | ||
407 | int ret; | ||
408 | |||
409 | args->pitch = args->width * ((args->bpp + 7) / 8); | ||
410 | args->size = args->pitch * args->height; | ||
411 | |||
412 | ret = bochs_gem_create(dev, args->size, false, | ||
413 | &gobj); | ||
414 | if (ret) | ||
415 | return ret; | ||
416 | |||
417 | ret = drm_gem_handle_create(file, gobj, &handle); | ||
418 | drm_gem_object_unreference_unlocked(gobj); | ||
419 | if (ret) | ||
420 | return ret; | ||
421 | |||
422 | args->handle = handle; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static void bochs_bo_unref(struct bochs_bo **bo) | ||
427 | { | ||
428 | struct ttm_buffer_object *tbo; | ||
429 | |||
430 | if ((*bo) == NULL) | ||
431 | return; | ||
432 | |||
433 | tbo = &((*bo)->bo); | ||
434 | ttm_bo_unref(&tbo); | ||
435 | if (tbo == NULL) | ||
436 | *bo = NULL; | ||
437 | |||
438 | } | ||
439 | |||
440 | void bochs_gem_free_object(struct drm_gem_object *obj) | ||
441 | { | ||
442 | struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj); | ||
443 | |||
444 | if (!bochs_bo) | ||
445 | return; | ||
446 | bochs_bo_unref(&bochs_bo); | ||
447 | } | ||
448 | |||
449 | int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, | ||
450 | uint32_t handle, uint64_t *offset) | ||
451 | { | ||
452 | struct drm_gem_object *obj; | ||
453 | int ret; | ||
454 | struct bochs_bo *bo; | ||
455 | |||
456 | mutex_lock(&dev->struct_mutex); | ||
457 | obj = drm_gem_object_lookup(dev, file, handle); | ||
458 | if (obj == NULL) { | ||
459 | ret = -ENOENT; | ||
460 | goto out_unlock; | ||
461 | } | ||
462 | |||
463 | bo = gem_to_bochs_bo(obj); | ||
464 | *offset = bochs_bo_mmap_offset(bo); | ||
465 | |||
466 | drm_gem_object_unreference(obj); | ||
467 | ret = 0; | ||
468 | out_unlock: | ||
469 | mutex_unlock(&dev->struct_mutex); | ||
470 | return ret; | ||
471 | |||
472 | } | ||
473 | |||
474 | /* ---------------------------------------------------------------------- */ | ||
475 | |||
476 | static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
477 | { | ||
478 | struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb); | ||
479 | if (bochs_fb->obj) | ||
480 | drm_gem_object_unreference_unlocked(bochs_fb->obj); | ||
481 | drm_framebuffer_cleanup(fb); | ||
482 | kfree(fb); | ||
483 | } | ||
484 | |||
485 | static const struct drm_framebuffer_funcs bochs_fb_funcs = { | ||
486 | .destroy = bochs_user_framebuffer_destroy, | ||
487 | }; | ||
488 | |||
489 | int bochs_framebuffer_init(struct drm_device *dev, | ||
490 | struct bochs_framebuffer *gfb, | ||
491 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
492 | struct drm_gem_object *obj) | ||
493 | { | ||
494 | int ret; | ||
495 | |||
496 | drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd); | ||
497 | gfb->obj = obj; | ||
498 | ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs); | ||
499 | if (ret) { | ||
500 | DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); | ||
501 | return ret; | ||
502 | } | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static struct drm_framebuffer * | ||
507 | bochs_user_framebuffer_create(struct drm_device *dev, | ||
508 | struct drm_file *filp, | ||
509 | struct drm_mode_fb_cmd2 *mode_cmd) | ||
510 | { | ||
511 | struct drm_gem_object *obj; | ||
512 | struct bochs_framebuffer *bochs_fb; | ||
513 | int ret; | ||
514 | |||
515 | DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n", | ||
516 | mode_cmd->width, mode_cmd->height, | ||
517 | (mode_cmd->pixel_format) & 0xff, | ||
518 | (mode_cmd->pixel_format >> 8) & 0xff, | ||
519 | (mode_cmd->pixel_format >> 16) & 0xff, | ||
520 | (mode_cmd->pixel_format >> 24) & 0xff); | ||
521 | |||
522 | if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888) | ||
523 | return ERR_PTR(-ENOENT); | ||
524 | |||
525 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]); | ||
526 | if (obj == NULL) | ||
527 | return ERR_PTR(-ENOENT); | ||
528 | |||
529 | bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL); | ||
530 | if (!bochs_fb) { | ||
531 | drm_gem_object_unreference_unlocked(obj); | ||
532 | return ERR_PTR(-ENOMEM); | ||
533 | } | ||
534 | |||
535 | ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj); | ||
536 | if (ret) { | ||
537 | drm_gem_object_unreference_unlocked(obj); | ||
538 | kfree(bochs_fb); | ||
539 | return ERR_PTR(ret); | ||
540 | } | ||
541 | return &bochs_fb->base; | ||
542 | } | ||
543 | |||
544 | const struct drm_mode_config_funcs bochs_mode_funcs = { | ||
545 | .fb_create = bochs_user_framebuffer_create, | ||
546 | }; | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index b27e95666fab..e63a7533f849 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c | |||
@@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper, | |||
233 | info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; | 233 | info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; |
234 | info->apertures->ranges[0].size = cdev->mc.vram_size; | 234 | info->apertures->ranges[0].size = cdev->mc.vram_size; |
235 | 235 | ||
236 | info->fix.smem_start = cdev->dev->mode_config.fb_base; | ||
237 | info->fix.smem_len = cdev->mc.vram_size; | ||
238 | |||
236 | info->screen_base = sysram; | 239 | info->screen_base = sysram; |
237 | info->screen_size = size; | 240 | info->screen_size = size; |
238 | 241 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 7e074a4d2848..9d096a0c5f8d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <drm/drmP.h> | 14 | #include <drm/drmP.h> |
15 | #include <drm/drm_crtc_helper.h> | 15 | #include <drm/drm_crtc_helper.h> |
16 | 16 | ||
17 | #include <linux/anon_inodes.h> | ||
18 | |||
17 | #include <drm/exynos_drm.h> | 19 | #include <drm/exynos_drm.h> |
18 | 20 | ||
19 | #include "exynos_drm_drv.h" | 21 | #include "exynos_drm_drv.h" |
@@ -152,9 +154,14 @@ static int exynos_drm_unload(struct drm_device *dev) | |||
152 | return 0; | 154 | return 0; |
153 | } | 155 | } |
154 | 156 | ||
157 | static const struct file_operations exynos_drm_gem_fops = { | ||
158 | .mmap = exynos_drm_gem_mmap_buffer, | ||
159 | }; | ||
160 | |||
155 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | 161 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) |
156 | { | 162 | { |
157 | struct drm_exynos_file_private *file_priv; | 163 | struct drm_exynos_file_private *file_priv; |
164 | struct file *anon_filp; | ||
158 | int ret; | 165 | int ret; |
159 | 166 | ||
160 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); | 167 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
@@ -169,6 +176,16 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
169 | file->driver_priv = NULL; | 176 | file->driver_priv = NULL; |
170 | } | 177 | } |
171 | 178 | ||
179 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, | ||
180 | NULL, 0); | ||
181 | if (IS_ERR(anon_filp)) { | ||
182 | kfree(file_priv); | ||
183 | return PTR_ERR(anon_filp); | ||
184 | } | ||
185 | |||
186 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; | ||
187 | file_priv->anon_filp = anon_filp; | ||
188 | |||
172 | return ret; | 189 | return ret; |
173 | } | 190 | } |
174 | 191 | ||
@@ -181,6 +198,7 @@ static void exynos_drm_preclose(struct drm_device *dev, | |||
181 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | 198 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) |
182 | { | 199 | { |
183 | struct exynos_drm_private *private = dev->dev_private; | 200 | struct exynos_drm_private *private = dev->dev_private; |
201 | struct drm_exynos_file_private *file_priv; | ||
184 | struct drm_pending_vblank_event *v, *vt; | 202 | struct drm_pending_vblank_event *v, *vt; |
185 | struct drm_pending_event *e, *et; | 203 | struct drm_pending_event *e, *et; |
186 | unsigned long flags; | 204 | unsigned long flags; |
@@ -206,6 +224,9 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | |||
206 | } | 224 | } |
207 | spin_unlock_irqrestore(&dev->event_lock, flags); | 225 | spin_unlock_irqrestore(&dev->event_lock, flags); |
208 | 226 | ||
227 | file_priv = file->driver_priv; | ||
228 | if (file_priv->anon_filp) | ||
229 | fput(file_priv->anon_filp); | ||
209 | 230 | ||
210 | kfree(file->driver_priv); | 231 | kfree(file->driver_priv); |
211 | file->driver_priv = NULL; | 232 | file->driver_priv = NULL; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index eaa19668bf00..0eaf5a27e120 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -226,6 +226,7 @@ struct exynos_drm_ipp_private { | |||
226 | struct drm_exynos_file_private { | 226 | struct drm_exynos_file_private { |
227 | struct exynos_drm_g2d_private *g2d_priv; | 227 | struct exynos_drm_g2d_private *g2d_priv; |
228 | struct exynos_drm_ipp_private *ipp_priv; | 228 | struct exynos_drm_ipp_private *ipp_priv; |
229 | struct file *anon_filp; | ||
229 | }; | 230 | }; |
230 | 231 | ||
231 | /* | 232 | /* |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 267aca91b70d..a20440ce32e6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -347,7 +347,7 @@ static void fimd_wait_for_vblank(struct device *dev) | |||
347 | */ | 347 | */ |
348 | if (!wait_event_timeout(ctx->wait_vsync_queue, | 348 | if (!wait_event_timeout(ctx->wait_vsync_queue, |
349 | !atomic_read(&ctx->wait_vsync_event), | 349 | !atomic_read(&ctx->wait_vsync_event), |
350 | DRM_HZ/20)) | 350 | HZ/20)) |
351 | DRM_DEBUG_KMS("vblank wait timed out.\n"); | 351 | DRM_DEBUG_KMS("vblank wait timed out.\n"); |
352 | } | 352 | } |
353 | 353 | ||
@@ -706,7 +706,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) | |||
706 | /* set wait vsync event to zero and wake up queue. */ | 706 | /* set wait vsync event to zero and wake up queue. */ |
707 | if (atomic_read(&ctx->wait_vsync_event)) { | 707 | if (atomic_read(&ctx->wait_vsync_event)) { |
708 | atomic_set(&ctx->wait_vsync_event, 0); | 708 | atomic_set(&ctx->wait_vsync_event, 0); |
709 | DRM_WAKEUP(&ctx->wait_vsync_queue); | 709 | wake_up(&ctx->wait_vsync_queue); |
710 | } | 710 | } |
711 | out: | 711 | out: |
712 | return IRQ_HANDLED; | 712 | return IRQ_HANDLED; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 1ade191d84f4..49b8c9b22902 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -338,46 +338,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | |||
338 | &args->offset); | 338 | &args->offset); |
339 | } | 339 | } |
340 | 340 | ||
341 | static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev, | 341 | int exynos_drm_gem_mmap_buffer(struct file *filp, |
342 | struct file *filp) | ||
343 | { | ||
344 | struct drm_file *file_priv; | ||
345 | |||
346 | /* find current process's drm_file from filelist. */ | ||
347 | list_for_each_entry(file_priv, &drm_dev->filelist, lhead) | ||
348 | if (file_priv->filp == filp) | ||
349 | return file_priv; | ||
350 | |||
351 | WARN_ON(1); | ||
352 | |||
353 | return ERR_PTR(-EFAULT); | ||
354 | } | ||
355 | |||
356 | static int exynos_drm_gem_mmap_buffer(struct file *filp, | ||
357 | struct vm_area_struct *vma) | 342 | struct vm_area_struct *vma) |
358 | { | 343 | { |
359 | struct drm_gem_object *obj = filp->private_data; | 344 | struct drm_gem_object *obj = filp->private_data; |
360 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 345 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
361 | struct drm_device *drm_dev = obj->dev; | 346 | struct drm_device *drm_dev = obj->dev; |
362 | struct exynos_drm_gem_buf *buffer; | 347 | struct exynos_drm_gem_buf *buffer; |
363 | struct drm_file *file_priv; | ||
364 | unsigned long vm_size; | 348 | unsigned long vm_size; |
365 | int ret; | 349 | int ret; |
366 | 350 | ||
351 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | ||
352 | |||
367 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | 353 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
368 | vma->vm_private_data = obj; | 354 | vma->vm_private_data = obj; |
369 | vma->vm_ops = drm_dev->driver->gem_vm_ops; | 355 | vma->vm_ops = drm_dev->driver->gem_vm_ops; |
370 | 356 | ||
371 | /* restore it to driver's fops. */ | ||
372 | filp->f_op = fops_get(drm_dev->driver->fops); | ||
373 | |||
374 | file_priv = exynos_drm_find_drm_file(drm_dev, filp); | ||
375 | if (IS_ERR(file_priv)) | ||
376 | return PTR_ERR(file_priv); | ||
377 | |||
378 | /* restore it to drm_file. */ | ||
379 | filp->private_data = file_priv; | ||
380 | |||
381 | update_vm_cache_attr(exynos_gem_obj, vma); | 357 | update_vm_cache_attr(exynos_gem_obj, vma); |
382 | 358 | ||
383 | vm_size = vma->vm_end - vma->vm_start; | 359 | vm_size = vma->vm_end - vma->vm_start; |
@@ -411,15 +387,13 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
411 | return 0; | 387 | return 0; |
412 | } | 388 | } |
413 | 389 | ||
414 | static const struct file_operations exynos_drm_gem_fops = { | ||
415 | .mmap = exynos_drm_gem_mmap_buffer, | ||
416 | }; | ||
417 | |||
418 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | 390 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, |
419 | struct drm_file *file_priv) | 391 | struct drm_file *file_priv) |
420 | { | 392 | { |
393 | struct drm_exynos_file_private *exynos_file_priv; | ||
421 | struct drm_exynos_gem_mmap *args = data; | 394 | struct drm_exynos_gem_mmap *args = data; |
422 | struct drm_gem_object *obj; | 395 | struct drm_gem_object *obj; |
396 | struct file *anon_filp; | ||
423 | unsigned long addr; | 397 | unsigned long addr; |
424 | 398 | ||
425 | if (!(dev->driver->driver_features & DRIVER_GEM)) { | 399 | if (!(dev->driver->driver_features & DRIVER_GEM)) { |
@@ -427,47 +401,25 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
427 | return -ENODEV; | 401 | return -ENODEV; |
428 | } | 402 | } |
429 | 403 | ||
404 | mutex_lock(&dev->struct_mutex); | ||
405 | |||
430 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 406 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
431 | if (!obj) { | 407 | if (!obj) { |
432 | DRM_ERROR("failed to lookup gem object.\n"); | 408 | DRM_ERROR("failed to lookup gem object.\n"); |
409 | mutex_unlock(&dev->struct_mutex); | ||
433 | return -EINVAL; | 410 | return -EINVAL; |
434 | } | 411 | } |
435 | 412 | ||
436 | /* | 413 | exynos_file_priv = file_priv->driver_priv; |
437 | * We have to use gem object and its fops for specific mmaper, | 414 | anon_filp = exynos_file_priv->anon_filp; |
438 | * but vm_mmap() can deliver only filp. So we have to change | 415 | anon_filp->private_data = obj; |
439 | * filp->f_op and filp->private_data temporarily, then restore | ||
440 | * again. So it is important to keep lock until restoration the | ||
441 | * settings to prevent others from misuse of filp->f_op or | ||
442 | * filp->private_data. | ||
443 | */ | ||
444 | mutex_lock(&dev->struct_mutex); | ||
445 | |||
446 | /* | ||
447 | * Set specific mmper's fops. And it will be restored by | ||
448 | * exynos_drm_gem_mmap_buffer to dev->driver->fops. | ||
449 | * This is used to call specific mapper temporarily. | ||
450 | */ | ||
451 | file_priv->filp->f_op = &exynos_drm_gem_fops; | ||
452 | |||
453 | /* | ||
454 | * Set gem object to private_data so that specific mmaper | ||
455 | * can get the gem object. And it will be restored by | ||
456 | * exynos_drm_gem_mmap_buffer to drm_file. | ||
457 | */ | ||
458 | file_priv->filp->private_data = obj; | ||
459 | 416 | ||
460 | addr = vm_mmap(file_priv->filp, 0, args->size, | 417 | addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE, |
461 | PROT_READ | PROT_WRITE, MAP_SHARED, 0); | 418 | MAP_SHARED, 0); |
462 | 419 | ||
463 | drm_gem_object_unreference(obj); | 420 | drm_gem_object_unreference(obj); |
464 | 421 | ||
465 | if (IS_ERR_VALUE(addr)) { | 422 | if (IS_ERR_VALUE(addr)) { |
466 | /* check filp->f_op, filp->private_data are restored */ | ||
467 | if (file_priv->filp->f_op == &exynos_drm_gem_fops) { | ||
468 | file_priv->filp->f_op = fops_get(dev->driver->fops); | ||
469 | file_priv->filp->private_data = file_priv; | ||
470 | } | ||
471 | mutex_unlock(&dev->struct_mutex); | 423 | mutex_unlock(&dev->struct_mutex); |
472 | return (int)addr; | 424 | return (int)addr; |
473 | } | 425 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 702ec3abe85c..fde860c7eba3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -122,6 +122,9 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | |||
122 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | 122 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, |
123 | struct drm_file *file_priv); | 123 | struct drm_file *file_priv); |
124 | 124 | ||
125 | int exynos_drm_gem_mmap_buffer(struct file *filp, | ||
126 | struct vm_area_struct *vma); | ||
127 | |||
125 | /* map user space allocated by malloc to pages. */ | 128 | /* map user space allocated by malloc to pages. */ |
126 | int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, | 129 | int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, |
127 | struct drm_file *file_priv); | 130 | struct drm_file *file_priv); |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index f39ab7554fc9..bb103fb4519e 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
@@ -2,8 +2,7 @@ | |||
2 | config DRM_MSM | 2 | config DRM_MSM |
3 | tristate "MSM DRM" | 3 | tristate "MSM DRM" |
4 | depends on DRM | 4 | depends on DRM |
5 | depends on ARCH_MSM | 5 | depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST) |
6 | depends on ARCH_MSM8960 | ||
7 | select DRM_KMS_HELPER | 6 | select DRM_KMS_HELPER |
8 | select SHMEM | 7 | select SHMEM |
9 | select TMPFS | 8 | select TMPFS |
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index e5fa12b0d21e..4f977a593bea 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile | |||
@@ -12,18 +12,27 @@ msm-y := \ | |||
12 | hdmi/hdmi_i2c.o \ | 12 | hdmi/hdmi_i2c.o \ |
13 | hdmi/hdmi_phy_8960.o \ | 13 | hdmi/hdmi_phy_8960.o \ |
14 | hdmi/hdmi_phy_8x60.o \ | 14 | hdmi/hdmi_phy_8x60.o \ |
15 | mdp4/mdp4_crtc.o \ | 15 | hdmi/hdmi_phy_8x74.o \ |
16 | mdp4/mdp4_dtv_encoder.o \ | 16 | mdp/mdp_format.o \ |
17 | mdp4/mdp4_format.o \ | 17 | mdp/mdp_kms.o \ |
18 | mdp4/mdp4_irq.o \ | 18 | mdp/mdp4/mdp4_crtc.o \ |
19 | mdp4/mdp4_kms.o \ | 19 | mdp/mdp4/mdp4_dtv_encoder.o \ |
20 | mdp4/mdp4_plane.o \ | 20 | mdp/mdp4/mdp4_irq.o \ |
21 | mdp/mdp4/mdp4_kms.o \ | ||
22 | mdp/mdp4/mdp4_plane.o \ | ||
23 | mdp/mdp5/mdp5_crtc.o \ | ||
24 | mdp/mdp5/mdp5_encoder.o \ | ||
25 | mdp/mdp5/mdp5_irq.o \ | ||
26 | mdp/mdp5/mdp5_kms.o \ | ||
27 | mdp/mdp5/mdp5_plane.o \ | ||
28 | mdp/mdp5/mdp5_smp.o \ | ||
21 | msm_drv.o \ | 29 | msm_drv.o \ |
22 | msm_fb.o \ | 30 | msm_fb.o \ |
23 | msm_gem.o \ | 31 | msm_gem.o \ |
24 | msm_gem_prime.o \ | 32 | msm_gem_prime.o \ |
25 | msm_gem_submit.o \ | 33 | msm_gem_submit.o \ |
26 | msm_gpu.o \ | 34 | msm_gpu.o \ |
35 | msm_iommu.o \ | ||
27 | msm_ringbuffer.o | 36 | msm_ringbuffer.o |
28 | 37 | ||
29 | msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o | 38 | msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o |
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES index e036f6c1db94..9c4255b98021 100644 --- a/drivers/gpu/drm/msm/NOTES +++ b/drivers/gpu/drm/msm/NOTES | |||
@@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different | |||
4 | display controller blocks at play: | 4 | display controller blocks at play: |
5 | + MDP3 - ?? seems to be what is on geeksphone peak device | 5 | + MDP3 - ?? seems to be what is on geeksphone peak device |
6 | + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) | 6 | + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) |
7 | + MDSS - snapdragon 800 | 7 | + MDP5 - snapdragon 800 |
8 | 8 | ||
9 | (I don't have a completely clear picture on which display controller | 9 | (I don't have a completely clear picture on which display controller |
10 | maps to which part #) | 10 | maps to which part #) |
@@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq. Even though the connectors | |||
46 | may have their own irqs which they install themselves. For this reason | 46 | may have their own irqs which they install themselves. For this reason |
47 | the display controller is the "master" device. | 47 | the display controller is the "master" device. |
48 | 48 | ||
49 | For MDP5, the mapping is: | ||
50 | |||
51 | plane -> PIPE{RGBn,VIGn} \ | ||
52 | crtc -> LM (layer mixer) |-> MDP "device" | ||
53 | encoder -> INTF / | ||
54 | connector -> HDMI/DSI/eDP/etc --> other device(s) | ||
55 | |||
56 | Unlike MDP4, it appears we can get by with a single encoder, rather | ||
57 | than needing a different implementation for DTV, DSI, etc. (Ie. the | ||
58 | register interface is same, just different bases.) | ||
59 | |||
60 | Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI, | ||
61 | etc) are routed through MDP. | ||
62 | |||
63 | And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from | ||
64 | which blocks need to be allocated to the active pipes based on fetch | ||
65 | stride. | ||
66 | |||
49 | Each connector probably ends up being a separate device, just for the | 67 | Each connector probably ends up being a separate device, just for the |
50 | logistics of finding/mapping io region, irq, etc. Idealy we would | 68 | logistics of finding/mapping io region, irq, etc. Idealy we would |
51 | have a better way than just stashing the platform device in a global | 69 | have a better way than just stashing the platform device in a global |
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 9588098741b5..85d615e7d62f 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h | |||
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
17 | 18 | ||
18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -202,6 +203,12 @@ enum a2xx_rb_copy_sample_select { | |||
202 | SAMPLE_0123 = 6, | 203 | SAMPLE_0123 = 6, |
203 | }; | 204 | }; |
204 | 205 | ||
206 | enum adreno_mmu_clnt_beh { | ||
207 | BEH_NEVR = 0, | ||
208 | BEH_TRAN_RNG = 1, | ||
209 | BEH_TRAN_FLT = 2, | ||
210 | }; | ||
211 | |||
205 | enum sq_tex_clamp { | 212 | enum sq_tex_clamp { |
206 | SQ_TEX_WRAP = 0, | 213 | SQ_TEX_WRAP = 0, |
207 | SQ_TEX_MIRROR = 1, | 214 | SQ_TEX_MIRROR = 1, |
@@ -238,6 +245,92 @@ enum sq_tex_filter { | |||
238 | 245 | ||
239 | #define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 | 246 | #define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 |
240 | 247 | ||
248 | #define REG_A2XX_MH_MMU_CONFIG 0x00000040 | ||
249 | #define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001 | ||
250 | #define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002 | ||
251 | #define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030 | ||
252 | #define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4 | ||
253 | static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
254 | { | ||
255 | return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK; | ||
256 | } | ||
257 | #define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0 | ||
258 | #define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6 | ||
259 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
260 | { | ||
261 | return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK; | ||
262 | } | ||
263 | #define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300 | ||
264 | #define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8 | ||
265 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
266 | { | ||
267 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK; | ||
268 | } | ||
269 | #define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00 | ||
270 | #define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10 | ||
271 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
272 | { | ||
273 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK; | ||
274 | } | ||
275 | #define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000 | ||
276 | #define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12 | ||
277 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
278 | { | ||
279 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK; | ||
280 | } | ||
281 | #define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000 | ||
282 | #define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14 | ||
283 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
284 | { | ||
285 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK; | ||
286 | } | ||
287 | #define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000 | ||
288 | #define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16 | ||
289 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
290 | { | ||
291 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK; | ||
292 | } | ||
293 | #define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000 | ||
294 | #define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18 | ||
295 | static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
296 | { | ||
297 | return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK; | ||
298 | } | ||
299 | #define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000 | ||
300 | #define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20 | ||
301 | static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
302 | { | ||
303 | return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK; | ||
304 | } | ||
305 | #define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000 | ||
306 | #define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22 | ||
307 | static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
308 | { | ||
309 | return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK; | ||
310 | } | ||
311 | #define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000 | ||
312 | #define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24 | ||
313 | static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
314 | { | ||
315 | return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK; | ||
316 | } | ||
317 | |||
318 | #define REG_A2XX_MH_MMU_VA_RANGE 0x00000041 | ||
319 | |||
320 | #define REG_A2XX_MH_MMU_PT_BASE 0x00000042 | ||
321 | |||
322 | #define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043 | ||
323 | |||
324 | #define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044 | ||
325 | |||
326 | #define REG_A2XX_MH_MMU_INVALIDATE 0x00000045 | ||
327 | |||
328 | #define REG_A2XX_MH_MMU_MPU_BASE 0x00000046 | ||
329 | |||
330 | #define REG_A2XX_MH_MMU_MPU_END 0x00000047 | ||
331 | |||
332 | #define REG_A2XX_NQWAIT_UNTIL 0x00000394 | ||
333 | |||
241 | #define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395 | 334 | #define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395 |
242 | 335 | ||
243 | #define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397 | 336 | #define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397 |
@@ -276,20 +369,6 @@ enum sq_tex_filter { | |||
276 | 369 | ||
277 | #define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 | 370 | #define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 |
278 | 371 | ||
279 | #define REG_A2XX_CP_ST_BASE 0x0000044d | ||
280 | |||
281 | #define REG_A2XX_CP_ST_BUFSZ 0x0000044e | ||
282 | |||
283 | #define REG_A2XX_CP_IB1_BASE 0x00000458 | ||
284 | |||
285 | #define REG_A2XX_CP_IB1_BUFSZ 0x00000459 | ||
286 | |||
287 | #define REG_A2XX_CP_IB2_BASE 0x0000045a | ||
288 | |||
289 | #define REG_A2XX_CP_IB2_BUFSZ 0x0000045b | ||
290 | |||
291 | #define REG_A2XX_CP_STAT 0x0000047f | ||
292 | |||
293 | #define REG_A2XX_RBBM_STATUS 0x000005d0 | 372 | #define REG_A2XX_RBBM_STATUS 0x000005d0 |
294 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f | 373 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f |
295 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 | 374 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 |
@@ -808,6 +887,12 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val) | |||
808 | 887 | ||
809 | #define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 | 888 | #define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 |
810 | 889 | ||
890 | #define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9 | ||
891 | |||
892 | #define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc | ||
893 | |||
894 | #define REG_A2XX_VGT_IMMED_DATA 0x000021fd | ||
895 | |||
811 | #define REG_A2XX_RB_DEPTHCONTROL 0x00002200 | 896 | #define REG_A2XX_RB_DEPTHCONTROL 0x00002200 |
812 | #define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 | 897 | #define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 |
813 | #define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 | 898 | #define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 |
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index d4afdf657559..a7be56163d23 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h | |||
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
17 | 18 | ||
18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -292,6 +293,8 @@ enum a3xx_tex_type { | |||
292 | #define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 | 293 | #define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 |
293 | #define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 | 294 | #define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 |
294 | 295 | ||
296 | #define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040 | ||
297 | |||
295 | #define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 | 298 | #define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 |
296 | 299 | ||
297 | #define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 | 300 | #define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 |
@@ -304,6 +307,8 @@ enum a3xx_tex_type { | |||
304 | 307 | ||
305 | #define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a | 308 | #define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a |
306 | 309 | ||
310 | #define REG_A3XX_RBBM_INT_SET_CMD 0x00000060 | ||
311 | |||
307 | #define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 | 312 | #define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 |
308 | 313 | ||
309 | #define REG_A3XX_RBBM_INT_0_MASK 0x00000063 | 314 | #define REG_A3XX_RBBM_INT_0_MASK 0x00000063 |
@@ -937,13 +942,13 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val) | |||
937 | return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; | 942 | return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; |
938 | } | 943 | } |
939 | 944 | ||
940 | #define REG_A3XX_UNKNOWN_20E8 0x000020e8 | 945 | #define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8 |
941 | 946 | ||
942 | #define REG_A3XX_UNKNOWN_20E9 0x000020e9 | 947 | #define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9 |
943 | 948 | ||
944 | #define REG_A3XX_UNKNOWN_20EA 0x000020ea | 949 | #define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea |
945 | 950 | ||
946 | #define REG_A3XX_UNKNOWN_20EB 0x000020eb | 951 | #define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb |
947 | 952 | ||
948 | #define REG_A3XX_RB_COPY_CONTROL 0x000020ec | 953 | #define REG_A3XX_RB_COPY_CONTROL 0x000020ec |
949 | #define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 | 954 | #define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 |
@@ -1026,7 +1031,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) | |||
1026 | #define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 | 1031 | #define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 |
1027 | #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 | 1032 | #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 |
1028 | 1033 | ||
1029 | #define REG_A3XX_UNKNOWN_2101 0x00002101 | 1034 | #define REG_A3XX_RB_DEPTH_CLEAR 0x00002101 |
1030 | 1035 | ||
1031 | #define REG_A3XX_RB_DEPTH_INFO 0x00002102 | 1036 | #define REG_A3XX_RB_DEPTH_INFO 0x00002102 |
1032 | #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 | 1037 | #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 |
@@ -1103,11 +1108,11 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v | |||
1103 | return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; | 1108 | return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; |
1104 | } | 1109 | } |
1105 | 1110 | ||
1106 | #define REG_A3XX_UNKNOWN_2105 0x00002105 | 1111 | #define REG_A3XX_RB_STENCIL_CLEAR 0x00002105 |
1107 | 1112 | ||
1108 | #define REG_A3XX_UNKNOWN_2106 0x00002106 | 1113 | #define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106 |
1109 | 1114 | ||
1110 | #define REG_A3XX_UNKNOWN_2107 0x00002107 | 1115 | #define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107 |
1111 | 1116 | ||
1112 | #define REG_A3XX_RB_STENCILREFMASK 0x00002108 | 1117 | #define REG_A3XX_RB_STENCILREFMASK 0x00002108 |
1113 | #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff | 1118 | #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff |
@@ -1149,20 +1154,31 @@ static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) | |||
1149 | return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; | 1154 | return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; |
1150 | } | 1155 | } |
1151 | 1156 | ||
1152 | #define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e | 1157 | #define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c |
1153 | #define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff | 1158 | #define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002 |
1154 | #define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0 | 1159 | |
1155 | static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val) | 1160 | #define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e |
1161 | #define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff | ||
1162 | #define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0 | ||
1163 | static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val) | ||
1156 | { | 1164 | { |
1157 | return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK; | 1165 | return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK; |
1158 | } | 1166 | } |
1159 | #define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000 | 1167 | #define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000 |
1160 | #define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16 | 1168 | #define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16 |
1161 | static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val) | 1169 | static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val) |
1162 | { | 1170 | { |
1163 | return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK; | 1171 | return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK; |
1164 | } | 1172 | } |
1165 | 1173 | ||
1174 | #define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110 | ||
1175 | |||
1176 | #define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111 | ||
1177 | |||
1178 | #define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114 | ||
1179 | |||
1180 | #define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115 | ||
1181 | |||
1166 | #define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 | 1182 | #define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 |
1167 | 1183 | ||
1168 | #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea | 1184 | #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea |
@@ -1309,6 +1325,8 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) | |||
1309 | 1325 | ||
1310 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 | 1326 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 |
1311 | 1327 | ||
1328 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216 | ||
1329 | |||
1312 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 | 1330 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 |
1313 | 1331 | ||
1314 | #define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a | 1332 | #define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a |
@@ -1491,12 +1509,13 @@ static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0 | |||
1491 | 1509 | ||
1492 | #define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 | 1510 | #define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 |
1493 | #define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 | 1511 | #define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 |
1494 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000 | 1512 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000 |
1495 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 | 1513 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 |
1496 | static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) | 1514 | static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) |
1497 | { | 1515 | { |
1498 | return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; | 1516 | return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; |
1499 | } | 1517 | } |
1518 | #define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000 | ||
1500 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 | 1519 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 |
1501 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 | 1520 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 |
1502 | static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) | 1521 | static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) |
@@ -1669,7 +1688,7 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) | |||
1669 | 1688 | ||
1670 | #define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 | 1689 | #define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 |
1671 | 1690 | ||
1672 | #define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6 | 1691 | #define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6 |
1673 | 1692 | ||
1674 | #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 | 1693 | #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 |
1675 | 1694 | ||
@@ -1772,7 +1791,7 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) | |||
1772 | 1791 | ||
1773 | #define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 | 1792 | #define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 |
1774 | 1793 | ||
1775 | #define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4 | 1794 | #define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4 |
1776 | 1795 | ||
1777 | #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 | 1796 | #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 |
1778 | 1797 | ||
@@ -1943,6 +1962,9 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00 | |||
1943 | 1962 | ||
1944 | static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } | 1963 | static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } |
1945 | 1964 | ||
1965 | #define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c | ||
1966 | #define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001 | ||
1967 | |||
1946 | #define REG_A3XX_UNKNOWN_0C3D 0x00000c3d | 1968 | #define REG_A3XX_UNKNOWN_0C3D 0x00000c3d |
1947 | 1969 | ||
1948 | #define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 | 1970 | #define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 |
@@ -1953,7 +1975,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000 | |||
1953 | 1975 | ||
1954 | #define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b | 1976 | #define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b |
1955 | 1977 | ||
1956 | #define REG_A3XX_UNKNOWN_0C81 0x00000c81 | 1978 | #define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81 |
1957 | 1979 | ||
1958 | #define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 | 1980 | #define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 |
1959 | 1981 | ||
@@ -1975,22 +1997,24 @@ static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x000 | |||
1975 | 1997 | ||
1976 | #define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 | 1998 | #define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 |
1977 | 1999 | ||
2000 | #define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1 | ||
2001 | |||
1978 | #define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 | 2002 | #define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 |
1979 | 2003 | ||
1980 | #define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 | 2004 | #define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 |
1981 | 2005 | ||
1982 | #define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0 | 2006 | #define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0 |
1983 | #define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff | 2007 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff |
1984 | #define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0 | 2008 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0 |
1985 | static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val) | 2009 | static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val) |
1986 | { | 2010 | { |
1987 | return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK; | 2011 | return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK; |
1988 | } | 2012 | } |
1989 | #define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000 | 2013 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000 |
1990 | #define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14 | 2014 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14 |
1991 | static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val) | 2015 | static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val) |
1992 | { | 2016 | { |
1993 | return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK; | 2017 | return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK; |
1994 | } | 2018 | } |
1995 | 2019 | ||
1996 | #define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 | 2020 | #define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 |
@@ -2088,6 +2112,14 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op | |||
2088 | 2112 | ||
2089 | #define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 | 2113 | #define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 |
2090 | 2114 | ||
2115 | #define REG_A3XX_VGT_CL_INITIATOR 0x000021f0 | ||
2116 | |||
2117 | #define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9 | ||
2118 | |||
2119 | #define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc | ||
2120 | |||
2121 | #define REG_A3XX_VGT_IMMED_DATA 0x000021fd | ||
2122 | |||
2091 | #define REG_A3XX_TEX_SAMP_0 0x00000000 | 2123 | #define REG_A3XX_TEX_SAMP_0 0x00000000 |
2092 | #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 | 2124 | #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 |
2093 | #define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c | 2125 | #define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c |
@@ -2123,6 +2155,18 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) | |||
2123 | #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 | 2155 | #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 |
2124 | 2156 | ||
2125 | #define REG_A3XX_TEX_SAMP_1 0x00000001 | 2157 | #define REG_A3XX_TEX_SAMP_1 0x00000001 |
2158 | #define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000 | ||
2159 | #define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12 | ||
2160 | static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val) | ||
2161 | { | ||
2162 | return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK; | ||
2163 | } | ||
2164 | #define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000 | ||
2165 | #define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22 | ||
2166 | static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) | ||
2167 | { | ||
2168 | return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK; | ||
2169 | } | ||
2126 | 2170 | ||
2127 | #define REG_A3XX_TEX_CONST_0 0x00000000 | 2171 | #define REG_A3XX_TEX_CONST_0 0x00000000 |
2128 | #define A3XX_TEX_CONST_0_TILED 0x00000001 | 2172 | #define A3XX_TEX_CONST_0_TILED 0x00000001 |
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 035bd13dc8bd..461df93e825e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c | |||
@@ -15,6 +15,10 @@ | |||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifdef CONFIG_MSM_OCMEM | ||
19 | # include <mach/ocmem.h> | ||
20 | #endif | ||
21 | |||
18 | #include "a3xx_gpu.h" | 22 | #include "a3xx_gpu.h" |
19 | 23 | ||
20 | #define A3XX_INT0_MASK \ | 24 | #define A3XX_INT0_MASK \ |
@@ -63,6 +67,7 @@ static void a3xx_me_init(struct msm_gpu *gpu) | |||
63 | static int a3xx_hw_init(struct msm_gpu *gpu) | 67 | static int a3xx_hw_init(struct msm_gpu *gpu) |
64 | { | 68 | { |
65 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 69 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
70 | struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); | ||
66 | uint32_t *ptr, len; | 71 | uint32_t *ptr, len; |
67 | int i, ret; | 72 | int i, ret; |
68 | 73 | ||
@@ -105,6 +110,21 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
105 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); | 110 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); |
106 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); | 111 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); |
107 | 112 | ||
113 | } else if (adreno_is_a330v2(adreno_gpu)) { | ||
114 | /* | ||
115 | * Most of the VBIF registers on 8974v2 have the correct | ||
116 | * values at power on, so we won't modify those if we don't | ||
117 | * need to | ||
118 | */ | ||
119 | /* Enable 1k sort: */ | ||
120 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); | ||
121 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); | ||
122 | /* Enable WR-REQ: */ | ||
123 | gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); | ||
124 | gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); | ||
125 | /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ | ||
126 | gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); | ||
127 | |||
108 | } else if (adreno_is_a330(adreno_gpu)) { | 128 | } else if (adreno_is_a330(adreno_gpu)) { |
109 | /* Set up 16 deep read/write request queues: */ | 129 | /* Set up 16 deep read/write request queues: */ |
110 | gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); | 130 | gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); |
@@ -121,10 +141,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
121 | /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ | 141 | /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ |
122 | gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); | 142 | gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); |
123 | /* Set up AOOO: */ | 143 | /* Set up AOOO: */ |
124 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff); | 144 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f); |
125 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff); | 145 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f); |
126 | /* Enable 1K sort: */ | 146 | /* Enable 1K sort: */ |
127 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff); | 147 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); |
128 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); | 148 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); |
129 | /* Disable VBIF clock gating. This is to enable AXI running | 149 | /* Disable VBIF clock gating. This is to enable AXI running |
130 | * higher frequency than GPU: | 150 | * higher frequency than GPU: |
@@ -162,14 +182,23 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
162 | gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); | 182 | gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); |
163 | 183 | ||
164 | /* Enable Clock gating: */ | 184 | /* Enable Clock gating: */ |
165 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); | 185 | if (adreno_is_a320(adreno_gpu)) |
166 | 186 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); | |
167 | /* Set the OCMEM base address for A330 */ | 187 | else if (adreno_is_a330v2(adreno_gpu)) |
168 | //TODO: | 188 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); |
169 | // if (adreno_is_a330(adreno_gpu)) { | 189 | else if (adreno_is_a330(adreno_gpu)) |
170 | // gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, | 190 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff); |
171 | // (unsigned int)(a3xx_gpu->ocmem_base >> 14)); | 191 | |
172 | // } | 192 | if (adreno_is_a330v2(adreno_gpu)) |
193 | gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455); | ||
194 | else if (adreno_is_a330(adreno_gpu)) | ||
195 | gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); | ||
196 | |||
197 | /* Set the OCMEM base address for A330, etc */ | ||
198 | if (a3xx_gpu->ocmem_hdl) { | ||
199 | gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, | ||
200 | (unsigned int)(a3xx_gpu->ocmem_base >> 14)); | ||
201 | } | ||
173 | 202 | ||
174 | /* Turn on performance counters: */ | 203 | /* Turn on performance counters: */ |
175 | gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); | 204 | gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); |
@@ -219,7 +248,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
219 | /* Load PM4: */ | 248 | /* Load PM4: */ |
220 | ptr = (uint32_t *)(adreno_gpu->pm4->data); | 249 | ptr = (uint32_t *)(adreno_gpu->pm4->data); |
221 | len = adreno_gpu->pm4->size / 4; | 250 | len = adreno_gpu->pm4->size / 4; |
222 | DBG("loading PM4 ucode version: %u", ptr[0]); | 251 | DBG("loading PM4 ucode version: %x", ptr[1]); |
223 | 252 | ||
224 | gpu_write(gpu, REG_AXXX_CP_DEBUG, | 253 | gpu_write(gpu, REG_AXXX_CP_DEBUG, |
225 | AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | | 254 | AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | |
@@ -231,19 +260,26 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
231 | /* Load PFP: */ | 260 | /* Load PFP: */ |
232 | ptr = (uint32_t *)(adreno_gpu->pfp->data); | 261 | ptr = (uint32_t *)(adreno_gpu->pfp->data); |
233 | len = adreno_gpu->pfp->size / 4; | 262 | len = adreno_gpu->pfp->size / 4; |
234 | DBG("loading PFP ucode version: %u", ptr[0]); | 263 | DBG("loading PFP ucode version: %x", ptr[5]); |
235 | 264 | ||
236 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); | 265 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); |
237 | for (i = 1; i < len; i++) | 266 | for (i = 1; i < len; i++) |
238 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); | 267 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); |
239 | 268 | ||
240 | /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ | 269 | /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ |
241 | if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) | 270 | if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) { |
242 | gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, | 271 | gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, |
243 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | | 272 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | |
244 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | | 273 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | |
245 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); | 274 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); |
246 | 275 | } else if (adreno_is_a330(adreno_gpu)) { | |
276 | /* NOTE: this (value take from downstream android driver) | ||
277 | * includes some bits outside of the known bitfields. But | ||
278 | * A330 has this "MERCIU queue" thing too, which might | ||
279 | * explain a new bitfield or reshuffling: | ||
280 | */ | ||
281 | gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008); | ||
282 | } | ||
247 | 283 | ||
248 | /* clear ME_HALT to start micro engine */ | 284 | /* clear ME_HALT to start micro engine */ |
249 | gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); | 285 | gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); |
@@ -253,6 +289,14 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
253 | return 0; | 289 | return 0; |
254 | } | 290 | } |
255 | 291 | ||
292 | static void a3xx_recover(struct msm_gpu *gpu) | ||
293 | { | ||
294 | gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); | ||
295 | gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); | ||
296 | gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); | ||
297 | adreno_recover(gpu); | ||
298 | } | ||
299 | |||
256 | static void a3xx_destroy(struct msm_gpu *gpu) | 300 | static void a3xx_destroy(struct msm_gpu *gpu) |
257 | { | 301 | { |
258 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 302 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
@@ -261,6 +305,12 @@ static void a3xx_destroy(struct msm_gpu *gpu) | |||
261 | DBG("%s", gpu->name); | 305 | DBG("%s", gpu->name); |
262 | 306 | ||
263 | adreno_gpu_cleanup(adreno_gpu); | 307 | adreno_gpu_cleanup(adreno_gpu); |
308 | |||
309 | #ifdef CONFIG_MSM_OCMEM | ||
310 | if (a3xx_gpu->ocmem_base) | ||
311 | ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); | ||
312 | #endif | ||
313 | |||
264 | put_device(&a3xx_gpu->pdev->dev); | 314 | put_device(&a3xx_gpu->pdev->dev); |
265 | kfree(a3xx_gpu); | 315 | kfree(a3xx_gpu); |
266 | } | 316 | } |
@@ -371,7 +421,7 @@ static const struct adreno_gpu_funcs funcs = { | |||
371 | .hw_init = a3xx_hw_init, | 421 | .hw_init = a3xx_hw_init, |
372 | .pm_suspend = msm_gpu_pm_suspend, | 422 | .pm_suspend = msm_gpu_pm_suspend, |
373 | .pm_resume = msm_gpu_pm_resume, | 423 | .pm_resume = msm_gpu_pm_resume, |
374 | .recover = adreno_recover, | 424 | .recover = a3xx_recover, |
375 | .last_fence = adreno_last_fence, | 425 | .last_fence = adreno_last_fence, |
376 | .submit = adreno_submit, | 426 | .submit = adreno_submit, |
377 | .flush = adreno_flush, | 427 | .flush = adreno_flush, |
@@ -387,6 +437,7 @@ static const struct adreno_gpu_funcs funcs = { | |||
387 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) | 437 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) |
388 | { | 438 | { |
389 | struct a3xx_gpu *a3xx_gpu = NULL; | 439 | struct a3xx_gpu *a3xx_gpu = NULL; |
440 | struct adreno_gpu *adreno_gpu; | ||
390 | struct msm_gpu *gpu; | 441 | struct msm_gpu *gpu; |
391 | struct platform_device *pdev = a3xx_pdev; | 442 | struct platform_device *pdev = a3xx_pdev; |
392 | struct adreno_platform_config *config; | 443 | struct adreno_platform_config *config; |
@@ -406,7 +457,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) | |||
406 | goto fail; | 457 | goto fail; |
407 | } | 458 | } |
408 | 459 | ||
409 | gpu = &a3xx_gpu->base.base; | 460 | adreno_gpu = &a3xx_gpu->base; |
461 | gpu = &adreno_gpu->base; | ||
410 | 462 | ||
411 | get_device(&pdev->dev); | 463 | get_device(&pdev->dev); |
412 | a3xx_gpu->pdev = pdev; | 464 | a3xx_gpu->pdev = pdev; |
@@ -414,16 +466,46 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) | |||
414 | gpu->fast_rate = config->fast_rate; | 466 | gpu->fast_rate = config->fast_rate; |
415 | gpu->slow_rate = config->slow_rate; | 467 | gpu->slow_rate = config->slow_rate; |
416 | gpu->bus_freq = config->bus_freq; | 468 | gpu->bus_freq = config->bus_freq; |
469 | #ifdef CONFIG_MSM_BUS_SCALING | ||
470 | gpu->bus_scale_table = config->bus_scale_table; | ||
471 | #endif | ||
417 | 472 | ||
418 | DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", | 473 | DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", |
419 | gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); | 474 | gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); |
420 | 475 | ||
421 | ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base, | 476 | ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev); |
422 | &funcs, config->rev); | ||
423 | if (ret) | 477 | if (ret) |
424 | goto fail; | 478 | goto fail; |
425 | 479 | ||
426 | return &a3xx_gpu->base.base; | 480 | /* if needed, allocate gmem: */ |
481 | if (adreno_is_a330(adreno_gpu)) { | ||
482 | #ifdef CONFIG_MSM_OCMEM | ||
483 | /* TODO this is different/missing upstream: */ | ||
484 | struct ocmem_buf *ocmem_hdl = | ||
485 | ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); | ||
486 | |||
487 | a3xx_gpu->ocmem_hdl = ocmem_hdl; | ||
488 | a3xx_gpu->ocmem_base = ocmem_hdl->addr; | ||
489 | adreno_gpu->gmem = ocmem_hdl->len; | ||
490 | DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, | ||
491 | a3xx_gpu->ocmem_base); | ||
492 | #endif | ||
493 | } | ||
494 | |||
495 | if (!gpu->mmu) { | ||
496 | /* TODO we think it is possible to configure the GPU to | ||
497 | * restrict access to VRAM carveout. But the required | ||
498 | * registers are unknown. For now just bail out and | ||
499 | * limp along with just modesetting. If it turns out | ||
500 | * to not be possible to restrict access, then we must | ||
501 | * implement a cmdstream validator. | ||
502 | */ | ||
503 | dev_err(dev->dev, "No memory protection without IOMMU\n"); | ||
504 | ret = -ENXIO; | ||
505 | goto fail; | ||
506 | } | ||
507 | |||
508 | return gpu; | ||
427 | 509 | ||
428 | fail: | 510 | fail: |
429 | if (a3xx_gpu) | 511 | if (a3xx_gpu) |
@@ -436,19 +518,59 @@ fail: | |||
436 | * The a3xx device: | 518 | * The a3xx device: |
437 | */ | 519 | */ |
438 | 520 | ||
521 | #if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF) | ||
522 | # include <mach/kgsl.h> | ||
523 | #endif | ||
524 | |||
439 | static int a3xx_probe(struct platform_device *pdev) | 525 | static int a3xx_probe(struct platform_device *pdev) |
440 | { | 526 | { |
441 | static struct adreno_platform_config config = {}; | 527 | static struct adreno_platform_config config = {}; |
442 | #ifdef CONFIG_OF | 528 | #ifdef CONFIG_OF |
443 | /* TODO */ | 529 | struct device_node *child, *node = pdev->dev.of_node; |
530 | u32 val; | ||
531 | int ret; | ||
532 | |||
533 | ret = of_property_read_u32(node, "qcom,chipid", &val); | ||
534 | if (ret) { | ||
535 | dev_err(&pdev->dev, "could not find chipid: %d\n", ret); | ||
536 | return ret; | ||
537 | } | ||
538 | |||
539 | config.rev = ADRENO_REV((val >> 24) & 0xff, | ||
540 | (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff); | ||
541 | |||
542 | /* find clock rates: */ | ||
543 | config.fast_rate = 0; | ||
544 | config.slow_rate = ~0; | ||
545 | for_each_child_of_node(node, child) { | ||
546 | if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) { | ||
547 | struct device_node *pwrlvl; | ||
548 | for_each_child_of_node(child, pwrlvl) { | ||
549 | ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val); | ||
550 | if (ret) { | ||
551 | dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret); | ||
552 | return ret; | ||
553 | } | ||
554 | config.fast_rate = max(config.fast_rate, val); | ||
555 | config.slow_rate = min(config.slow_rate, val); | ||
556 | } | ||
557 | } | ||
558 | } | ||
559 | |||
560 | if (!config.fast_rate) { | ||
561 | dev_err(&pdev->dev, "could not find clk rates\n"); | ||
562 | return -ENXIO; | ||
563 | } | ||
564 | |||
444 | #else | 565 | #else |
566 | struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; | ||
445 | uint32_t version = socinfo_get_version(); | 567 | uint32_t version = socinfo_get_version(); |
446 | if (cpu_is_apq8064ab()) { | 568 | if (cpu_is_apq8064ab()) { |
447 | config.fast_rate = 450000000; | 569 | config.fast_rate = 450000000; |
448 | config.slow_rate = 27000000; | 570 | config.slow_rate = 27000000; |
449 | config.bus_freq = 4; | 571 | config.bus_freq = 4; |
450 | config.rev = ADRENO_REV(3, 2, 1, 0); | 572 | config.rev = ADRENO_REV(3, 2, 1, 0); |
451 | } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) { | 573 | } else if (cpu_is_apq8064()) { |
452 | config.fast_rate = 400000000; | 574 | config.fast_rate = 400000000; |
453 | config.slow_rate = 27000000; | 575 | config.slow_rate = 27000000; |
454 | config.bus_freq = 4; | 576 | config.bus_freq = 4; |
@@ -461,6 +583,16 @@ static int a3xx_probe(struct platform_device *pdev) | |||
461 | else | 583 | else |
462 | config.rev = ADRENO_REV(3, 2, 0, 0); | 584 | config.rev = ADRENO_REV(3, 2, 0, 0); |
463 | 585 | ||
586 | } else if (cpu_is_msm8960ab()) { | ||
587 | config.fast_rate = 400000000; | ||
588 | config.slow_rate = 320000000; | ||
589 | config.bus_freq = 4; | ||
590 | |||
591 | if (SOCINFO_VERSION_MINOR(version) == 0) | ||
592 | config.rev = ADRENO_REV(3, 2, 1, 0); | ||
593 | else | ||
594 | config.rev = ADRENO_REV(3, 2, 1, 1); | ||
595 | |||
464 | } else if (cpu_is_msm8930()) { | 596 | } else if (cpu_is_msm8930()) { |
465 | config.fast_rate = 400000000; | 597 | config.fast_rate = 400000000; |
466 | config.slow_rate = 27000000; | 598 | config.slow_rate = 27000000; |
@@ -473,6 +605,9 @@ static int a3xx_probe(struct platform_device *pdev) | |||
473 | config.rev = ADRENO_REV(3, 0, 5, 0); | 605 | config.rev = ADRENO_REV(3, 0, 5, 0); |
474 | 606 | ||
475 | } | 607 | } |
608 | # ifdef CONFIG_MSM_BUS_SCALING | ||
609 | config.bus_scale_table = pdata->bus_scale_table; | ||
610 | # endif | ||
476 | #endif | 611 | #endif |
477 | pdev->dev.platform_data = &config; | 612 | pdev->dev.platform_data = &config; |
478 | a3xx_pdev = pdev; | 613 | a3xx_pdev = pdev; |
@@ -485,10 +620,19 @@ static int a3xx_remove(struct platform_device *pdev) | |||
485 | return 0; | 620 | return 0; |
486 | } | 621 | } |
487 | 622 | ||
623 | static const struct of_device_id dt_match[] = { | ||
624 | { .compatible = "qcom,kgsl-3d0" }, | ||
625 | {} | ||
626 | }; | ||
627 | MODULE_DEVICE_TABLE(of, dt_match); | ||
628 | |||
488 | static struct platform_driver a3xx_driver = { | 629 | static struct platform_driver a3xx_driver = { |
489 | .probe = a3xx_probe, | 630 | .probe = a3xx_probe, |
490 | .remove = a3xx_remove, | 631 | .remove = a3xx_remove, |
491 | .driver.name = "kgsl-3d0", | 632 | .driver = { |
633 | .name = "kgsl-3d0", | ||
634 | .of_match_table = dt_match, | ||
635 | }, | ||
492 | }; | 636 | }; |
493 | 637 | ||
494 | void __init a3xx_register(void) | 638 | void __init a3xx_register(void) |
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index 32c398c2d00a..bb9a8ca0507b 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h | |||
@@ -24,6 +24,10 @@ | |||
24 | struct a3xx_gpu { | 24 | struct a3xx_gpu { |
25 | struct adreno_gpu base; | 25 | struct adreno_gpu base; |
26 | struct platform_device *pdev; | 26 | struct platform_device *pdev; |
27 | |||
28 | /* if OCMEM is used for GMEM: */ | ||
29 | uint32_t ocmem_base; | ||
30 | void *ocmem_hdl; | ||
27 | }; | 31 | }; |
28 | #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) | 32 | #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) |
29 | 33 | ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index 33dcc606c7c5..d6e6ce2d1abd 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h | |||
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
17 | 18 | ||
18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -115,96 +116,6 @@ enum adreno_rb_depth_format { | |||
115 | DEPTHX_24_8 = 1, | 116 | DEPTHX_24_8 = 1, |
116 | }; | 117 | }; |
117 | 118 | ||
118 | enum adreno_mmu_clnt_beh { | ||
119 | BEH_NEVR = 0, | ||
120 | BEH_TRAN_RNG = 1, | ||
121 | BEH_TRAN_FLT = 2, | ||
122 | }; | ||
123 | |||
124 | #define REG_AXXX_MH_MMU_CONFIG 0x00000040 | ||
125 | #define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001 | ||
126 | #define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002 | ||
127 | #define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030 | ||
128 | #define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4 | ||
129 | static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
130 | { | ||
131 | return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK; | ||
132 | } | ||
133 | #define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0 | ||
134 | #define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6 | ||
135 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
136 | { | ||
137 | return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK; | ||
138 | } | ||
139 | #define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300 | ||
140 | #define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8 | ||
141 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
142 | { | ||
143 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK; | ||
144 | } | ||
145 | #define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00 | ||
146 | #define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10 | ||
147 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
148 | { | ||
149 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK; | ||
150 | } | ||
151 | #define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000 | ||
152 | #define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12 | ||
153 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
154 | { | ||
155 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK; | ||
156 | } | ||
157 | #define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000 | ||
158 | #define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14 | ||
159 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
160 | { | ||
161 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK; | ||
162 | } | ||
163 | #define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000 | ||
164 | #define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16 | ||
165 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
166 | { | ||
167 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK; | ||
168 | } | ||
169 | #define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000 | ||
170 | #define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18 | ||
171 | static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
172 | { | ||
173 | return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK; | ||
174 | } | ||
175 | #define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000 | ||
176 | #define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20 | ||
177 | static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
178 | { | ||
179 | return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK; | ||
180 | } | ||
181 | #define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000 | ||
182 | #define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22 | ||
183 | static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
184 | { | ||
185 | return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK; | ||
186 | } | ||
187 | #define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000 | ||
188 | #define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24 | ||
189 | static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
190 | { | ||
191 | return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK; | ||
192 | } | ||
193 | |||
194 | #define REG_AXXX_MH_MMU_VA_RANGE 0x00000041 | ||
195 | |||
196 | #define REG_AXXX_MH_MMU_PT_BASE 0x00000042 | ||
197 | |||
198 | #define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043 | ||
199 | |||
200 | #define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044 | ||
201 | |||
202 | #define REG_AXXX_MH_MMU_INVALIDATE 0x00000045 | ||
203 | |||
204 | #define REG_AXXX_MH_MMU_MPU_BASE 0x00000046 | ||
205 | |||
206 | #define REG_AXXX_MH_MMU_MPU_END 0x00000047 | ||
207 | |||
208 | #define REG_AXXX_CP_RB_BASE 0x000001c0 | 119 | #define REG_AXXX_CP_RB_BASE 0x000001c0 |
209 | 120 | ||
210 | #define REG_AXXX_CP_RB_CNTL 0x000001c1 | 121 | #define REG_AXXX_CP_RB_CNTL 0x000001c1 |
@@ -275,6 +186,18 @@ static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val) | |||
275 | } | 186 | } |
276 | 187 | ||
277 | #define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 | 188 | #define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 |
189 | #define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000 | ||
190 | #define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16 | ||
191 | static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val) | ||
192 | { | ||
193 | return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK; | ||
194 | } | ||
195 | #define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000 | ||
196 | #define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24 | ||
197 | static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val) | ||
198 | { | ||
199 | return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK; | ||
200 | } | ||
278 | 201 | ||
279 | #define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 | 202 | #define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 |
280 | #define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f | 203 | #define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f |
@@ -402,6 +325,36 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) | |||
402 | return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; | 325 | return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; |
403 | } | 326 | } |
404 | 327 | ||
328 | #define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440 | ||
329 | |||
330 | #define REG_AXXX_CP_STQ_ST_STAT 0x00000443 | ||
331 | |||
332 | #define REG_AXXX_CP_ST_BASE 0x0000044d | ||
333 | |||
334 | #define REG_AXXX_CP_ST_BUFSZ 0x0000044e | ||
335 | |||
336 | #define REG_AXXX_CP_MEQ_STAT 0x0000044f | ||
337 | |||
338 | #define REG_AXXX_CP_MIU_TAG_STAT 0x00000452 | ||
339 | |||
340 | #define REG_AXXX_CP_BIN_MASK_LO 0x00000454 | ||
341 | |||
342 | #define REG_AXXX_CP_BIN_MASK_HI 0x00000455 | ||
343 | |||
344 | #define REG_AXXX_CP_BIN_SELECT_LO 0x00000456 | ||
345 | |||
346 | #define REG_AXXX_CP_BIN_SELECT_HI 0x00000457 | ||
347 | |||
348 | #define REG_AXXX_CP_IB1_BASE 0x00000458 | ||
349 | |||
350 | #define REG_AXXX_CP_IB1_BUFSZ 0x00000459 | ||
351 | |||
352 | #define REG_AXXX_CP_IB2_BASE 0x0000045a | ||
353 | |||
354 | #define REG_AXXX_CP_IB2_BUFSZ 0x0000045b | ||
355 | |||
356 | #define REG_AXXX_CP_STAT 0x0000047f | ||
357 | |||
405 | #define REG_AXXX_CP_SCRATCH_REG0 0x00000578 | 358 | #define REG_AXXX_CP_SCRATCH_REG0 0x00000578 |
406 | 359 | ||
407 | #define REG_AXXX_CP_SCRATCH_REG1 0x00000579 | 360 | #define REG_AXXX_CP_SCRATCH_REG1 0x00000579 |
@@ -418,6 +371,26 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) | |||
418 | 371 | ||
419 | #define REG_AXXX_CP_SCRATCH_REG7 0x0000057f | 372 | #define REG_AXXX_CP_SCRATCH_REG7 0x0000057f |
420 | 373 | ||
374 | #define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600 | ||
375 | |||
376 | #define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601 | ||
377 | |||
378 | #define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602 | ||
379 | |||
380 | #define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603 | ||
381 | |||
382 | #define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604 | ||
383 | |||
384 | #define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605 | ||
385 | |||
386 | #define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606 | ||
387 | |||
388 | #define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607 | ||
389 | |||
390 | #define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608 | ||
391 | |||
392 | #define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609 | ||
393 | |||
421 | #define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a | 394 | #define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a |
422 | 395 | ||
423 | #define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b | 396 | #define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b |
@@ -428,5 +401,11 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) | |||
428 | 401 | ||
429 | #define REG_AXXX_CP_ME_NRT_DATA 0x0000060e | 402 | #define REG_AXXX_CP_ME_NRT_DATA 0x0000060e |
430 | 403 | ||
404 | #define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612 | ||
405 | |||
406 | #define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613 | ||
407 | |||
408 | #define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614 | ||
409 | |||
431 | 410 | ||
432 | #endif /* ADRENO_COMMON_XML */ | 411 | #endif /* ADRENO_COMMON_XML */ |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a0b9d8a95b16..d321099abdd4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include "adreno_gpu.h" | 18 | #include "adreno_gpu.h" |
19 | #include "msm_gem.h" | 19 | #include "msm_gem.h" |
20 | #include "msm_mmu.h" | ||
20 | 21 | ||
21 | struct adreno_info { | 22 | struct adreno_info { |
22 | struct adreno_rev rev; | 23 | struct adreno_rev rev; |
@@ -44,7 +45,7 @@ static const struct adreno_info gpulist[] = { | |||
44 | .pfpfw = "a300_pfp.fw", | 45 | .pfpfw = "a300_pfp.fw", |
45 | .gmem = SZ_512K, | 46 | .gmem = SZ_512K, |
46 | }, { | 47 | }, { |
47 | .rev = ADRENO_REV(3, 3, 0, 0), | 48 | .rev = ADRENO_REV(3, 3, 0, ANY_ID), |
48 | .revn = 330, | 49 | .revn = 330, |
49 | .name = "A330", | 50 | .name = "A330", |
50 | .pm4fw = "a330_pm4.fw", | 51 | .pm4fw = "a330_pm4.fw", |
@@ -53,6 +54,11 @@ static const struct adreno_info gpulist[] = { | |||
53 | }, | 54 | }, |
54 | }; | 55 | }; |
55 | 56 | ||
57 | MODULE_FIRMWARE("a300_pm4.fw"); | ||
58 | MODULE_FIRMWARE("a300_pfp.fw"); | ||
59 | MODULE_FIRMWARE("a330_pm4.fw"); | ||
60 | MODULE_FIRMWARE("a330_pfp.fw"); | ||
61 | |||
56 | #define RB_SIZE SZ_32K | 62 | #define RB_SIZE SZ_32K |
57 | #define RB_BLKSIZE 16 | 63 | #define RB_BLKSIZE 16 |
58 | 64 | ||
@@ -65,7 +71,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) | |||
65 | *value = adreno_gpu->info->revn; | 71 | *value = adreno_gpu->info->revn; |
66 | return 0; | 72 | return 0; |
67 | case MSM_PARAM_GMEM_SIZE: | 73 | case MSM_PARAM_GMEM_SIZE: |
68 | *value = adreno_gpu->info->gmem; | 74 | *value = adreno_gpu->gmem; |
69 | return 0; | 75 | return 0; |
70 | default: | 76 | default: |
71 | DBG("%s: invalid param: %u", gpu->name, param); | 77 | DBG("%s: invalid param: %u", gpu->name, param); |
@@ -86,7 +92,7 @@ int adreno_hw_init(struct msm_gpu *gpu) | |||
86 | gpu_write(gpu, REG_AXXX_CP_RB_CNTL, | 92 | gpu_write(gpu, REG_AXXX_CP_RB_CNTL, |
87 | /* size is log2(quad-words): */ | 93 | /* size is log2(quad-words): */ |
88 | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | | 94 | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | |
89 | AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE)); | 95 | AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8))); |
90 | 96 | ||
91 | /* Setup ringbuffer address: */ | 97 | /* Setup ringbuffer address: */ |
92 | gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); | 98 | gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); |
@@ -286,6 +292,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
286 | struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, | 292 | struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, |
287 | struct adreno_rev rev) | 293 | struct adreno_rev rev) |
288 | { | 294 | { |
295 | struct msm_mmu *mmu; | ||
289 | int i, ret; | 296 | int i, ret; |
290 | 297 | ||
291 | /* identify gpu: */ | 298 | /* identify gpu: */ |
@@ -311,6 +318,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
311 | rev.core, rev.major, rev.minor, rev.patchid); | 318 | rev.core, rev.major, rev.minor, rev.patchid); |
312 | 319 | ||
313 | gpu->funcs = funcs; | 320 | gpu->funcs = funcs; |
321 | gpu->gmem = gpu->info->gmem; | ||
314 | gpu->rev = rev; | 322 | gpu->rev = rev; |
315 | 323 | ||
316 | ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); | 324 | ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); |
@@ -333,10 +341,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
333 | if (ret) | 341 | if (ret) |
334 | return ret; | 342 | return ret; |
335 | 343 | ||
336 | ret = msm_iommu_attach(drm, gpu->base.iommu, | 344 | mmu = gpu->base.mmu; |
337 | iommu_ports, ARRAY_SIZE(iommu_ports)); | 345 | if (mmu) { |
338 | if (ret) | 346 | ret = mmu->funcs->attach(mmu, iommu_ports, |
339 | return ret; | 347 | ARRAY_SIZE(iommu_ports)); |
348 | if (ret) | ||
349 | return ret; | ||
350 | } | ||
340 | 351 | ||
341 | gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), | 352 | gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), |
342 | MSM_BO_UNCACHED); | 353 | MSM_BO_UNCACHED); |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index f73abfba7c22..ca11ea4da165 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h | |||
@@ -51,6 +51,7 @@ struct adreno_gpu { | |||
51 | struct msm_gpu base; | 51 | struct msm_gpu base; |
52 | struct adreno_rev rev; | 52 | struct adreno_rev rev; |
53 | const struct adreno_info *info; | 53 | const struct adreno_info *info; |
54 | uint32_t gmem; /* actual gmem size */ | ||
54 | uint32_t revn; /* numeric revision name */ | 55 | uint32_t revn; /* numeric revision name */ |
55 | const struct adreno_gpu_funcs *funcs; | 56 | const struct adreno_gpu_funcs *funcs; |
56 | 57 | ||
@@ -70,6 +71,9 @@ struct adreno_gpu { | |||
70 | struct adreno_platform_config { | 71 | struct adreno_platform_config { |
71 | struct adreno_rev rev; | 72 | struct adreno_rev rev; |
72 | uint32_t fast_rate, slow_rate, bus_freq; | 73 | uint32_t fast_rate, slow_rate, bus_freq; |
74 | #ifdef CONFIG_MSM_BUS_SCALING | ||
75 | struct msm_bus_scale_pdata *bus_scale_table; | ||
76 | #endif | ||
73 | }; | 77 | }; |
74 | 78 | ||
75 | #define ADRENO_IDLE_TIMEOUT (20 * 1000) | 79 | #define ADRENO_IDLE_TIMEOUT (20 * 1000) |
@@ -94,6 +98,11 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu) | |||
94 | return gpu->revn == 330; | 98 | return gpu->revn == 330; |
95 | } | 99 | } |
96 | 100 | ||
101 | static inline bool adreno_is_a330v2(struct adreno_gpu *gpu) | ||
102 | { | ||
103 | return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); | ||
104 | } | ||
105 | |||
97 | int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); | 106 | int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); |
98 | int adreno_hw_init(struct msm_gpu *gpu); | 107 | int adreno_hw_init(struct msm_gpu *gpu); |
99 | uint32_t adreno_last_fence(struct msm_gpu *gpu); | 108 | uint32_t adreno_last_fence(struct msm_gpu *gpu); |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 259ad709b0cc..ae992c71703f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | |||
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
17 | 18 | ||
18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -66,13 +67,15 @@ enum vgt_event_type { | |||
66 | 67 | ||
67 | enum pc_di_primtype { | 68 | enum pc_di_primtype { |
68 | DI_PT_NONE = 0, | 69 | DI_PT_NONE = 0, |
69 | DI_PT_POINTLIST = 1, | 70 | DI_PT_POINTLIST_A2XX = 1, |
70 | DI_PT_LINELIST = 2, | 71 | DI_PT_LINELIST = 2, |
71 | DI_PT_LINESTRIP = 3, | 72 | DI_PT_LINESTRIP = 3, |
72 | DI_PT_TRILIST = 4, | 73 | DI_PT_TRILIST = 4, |
73 | DI_PT_TRIFAN = 5, | 74 | DI_PT_TRIFAN = 5, |
74 | DI_PT_TRISTRIP = 6, | 75 | DI_PT_TRISTRIP = 6, |
76 | DI_PT_LINELOOP = 7, | ||
75 | DI_PT_RECTLIST = 8, | 77 | DI_PT_RECTLIST = 8, |
78 | DI_PT_POINTLIST_A3XX = 9, | ||
76 | DI_PT_QUADLIST = 13, | 79 | DI_PT_QUADLIST = 13, |
77 | DI_PT_QUADSTRIP = 14, | 80 | DI_PT_QUADSTRIP = 14, |
78 | DI_PT_POLYGON = 15, | 81 | DI_PT_POLYGON = 15, |
@@ -119,7 +122,7 @@ enum adreno_pm4_type3_packets { | |||
119 | CP_WAIT_FOR_IDLE = 38, | 122 | CP_WAIT_FOR_IDLE = 38, |
120 | CP_WAIT_REG_MEM = 60, | 123 | CP_WAIT_REG_MEM = 60, |
121 | CP_WAIT_REG_EQ = 82, | 124 | CP_WAIT_REG_EQ = 82, |
122 | CP_WAT_REG_GTE = 83, | 125 | CP_WAIT_REG_GTE = 83, |
123 | CP_WAIT_UNTIL_READ = 92, | 126 | CP_WAIT_UNTIL_READ = 92, |
124 | CP_WAIT_IB_PFD_COMPLETE = 93, | 127 | CP_WAIT_IB_PFD_COMPLETE = 93, |
125 | CP_REG_RMW = 33, | 128 | CP_REG_RMW = 33, |
@@ -151,7 +154,6 @@ enum adreno_pm4_type3_packets { | |||
151 | CP_CONTEXT_UPDATE = 94, | 154 | CP_CONTEXT_UPDATE = 94, |
152 | CP_INTERRUPT = 64, | 155 | CP_INTERRUPT = 64, |
153 | CP_IM_STORE = 44, | 156 | CP_IM_STORE = 44, |
154 | CP_SET_BIN_BASE_OFFSET = 75, | ||
155 | CP_SET_DRAW_INIT_FLAGS = 75, | 157 | CP_SET_DRAW_INIT_FLAGS = 75, |
156 | CP_SET_PROTECTED_MODE = 95, | 158 | CP_SET_PROTECTED_MODE = 95, |
157 | CP_LOAD_STATE = 48, | 159 | CP_LOAD_STATE = 48, |
@@ -159,6 +161,16 @@ enum adreno_pm4_type3_packets { | |||
159 | CP_COND_INDIRECT_BUFFER_PFD = 50, | 161 | CP_COND_INDIRECT_BUFFER_PFD = 50, |
160 | CP_INDIRECT_BUFFER_PFE = 63, | 162 | CP_INDIRECT_BUFFER_PFE = 63, |
161 | CP_SET_BIN = 76, | 163 | CP_SET_BIN = 76, |
164 | CP_TEST_TWO_MEMS = 113, | ||
165 | CP_WAIT_FOR_ME = 19, | ||
166 | IN_IB_PREFETCH_END = 23, | ||
167 | IN_SUBBLK_PREFETCH = 31, | ||
168 | IN_INSTR_PREFETCH = 32, | ||
169 | IN_INSTR_MATCH = 71, | ||
170 | IN_CONST_PREFETCH = 73, | ||
171 | IN_INCR_UPDT_STATE = 85, | ||
172 | IN_INCR_UPDT_CONST = 86, | ||
173 | IN_INCR_UPDT_INSTR = 87, | ||
162 | }; | 174 | }; |
163 | 175 | ||
164 | enum adreno_state_block { | 176 | enum adreno_state_block { |
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 6d4c62bf70dc..87be647e3825 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h | |||
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
19 | 21 | ||
20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index d1df38bf5747..747a6ef4211f 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h | |||
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
19 | 21 | ||
20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 0030a111302d..48e03acf19bf 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h | |||
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
19 | 21 | ||
20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 50d11df35b21..6f1588aa9071 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c | |||
@@ -41,7 +41,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) | |||
41 | power_on ? "Enable" : "Disable", ctrl); | 41 | power_on ? "Enable" : "Disable", ctrl); |
42 | } | 42 | } |
43 | 43 | ||
44 | static irqreturn_t hdmi_irq(int irq, void *dev_id) | 44 | irqreturn_t hdmi_irq(int irq, void *dev_id) |
45 | { | 45 | { |
46 | struct hdmi *hdmi = dev_id; | 46 | struct hdmi *hdmi = dev_id; |
47 | 47 | ||
@@ -71,13 +71,13 @@ void hdmi_destroy(struct kref *kref) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | /* initialize connector */ | 73 | /* initialize connector */ |
74 | int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | 74 | struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) |
75 | { | 75 | { |
76 | struct hdmi *hdmi = NULL; | 76 | struct hdmi *hdmi = NULL; |
77 | struct msm_drm_private *priv = dev->dev_private; | 77 | struct msm_drm_private *priv = dev->dev_private; |
78 | struct platform_device *pdev = hdmi_pdev; | 78 | struct platform_device *pdev = hdmi_pdev; |
79 | struct hdmi_platform_config *config; | 79 | struct hdmi_platform_config *config; |
80 | int ret; | 80 | int i, ret; |
81 | 81 | ||
82 | if (!pdev) { | 82 | if (!pdev) { |
83 | dev_err(dev->dev, "no hdmi device\n"); | 83 | dev_err(dev->dev, "no hdmi device\n"); |
@@ -99,6 +99,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
99 | 99 | ||
100 | hdmi->dev = dev; | 100 | hdmi->dev = dev; |
101 | hdmi->pdev = pdev; | 101 | hdmi->pdev = pdev; |
102 | hdmi->config = config; | ||
102 | hdmi->encoder = encoder; | 103 | hdmi->encoder = encoder; |
103 | 104 | ||
104 | /* not sure about which phy maps to which msm.. probably I miss some */ | 105 | /* not sure about which phy maps to which msm.. probably I miss some */ |
@@ -114,44 +115,70 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
114 | goto fail; | 115 | goto fail; |
115 | } | 116 | } |
116 | 117 | ||
117 | hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI"); | 118 | hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); |
118 | if (IS_ERR(hdmi->mmio)) { | 119 | if (IS_ERR(hdmi->mmio)) { |
119 | ret = PTR_ERR(hdmi->mmio); | 120 | ret = PTR_ERR(hdmi->mmio); |
120 | goto fail; | 121 | goto fail; |
121 | } | 122 | } |
122 | 123 | ||
123 | hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs"); | 124 | BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs)); |
124 | if (IS_ERR(hdmi->mvs)) | 125 | for (i = 0; i < config->hpd_reg_cnt; i++) { |
125 | hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs"); | 126 | struct regulator *reg; |
126 | if (IS_ERR(hdmi->mvs)) { | 127 | |
127 | ret = PTR_ERR(hdmi->mvs); | 128 | reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]); |
128 | dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret); | 129 | if (IS_ERR(reg)) { |
129 | goto fail; | 130 | ret = PTR_ERR(reg); |
131 | dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n", | ||
132 | config->hpd_reg_names[i], ret); | ||
133 | goto fail; | ||
134 | } | ||
135 | |||
136 | hdmi->hpd_regs[i] = reg; | ||
130 | } | 137 | } |
131 | 138 | ||
132 | hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0"); | 139 | BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs)); |
133 | if (IS_ERR(hdmi->mpp0)) | 140 | for (i = 0; i < config->pwr_reg_cnt; i++) { |
134 | hdmi->mpp0 = NULL; | 141 | struct regulator *reg; |
135 | 142 | ||
136 | hdmi->clk = devm_clk_get(&pdev->dev, "core_clk"); | 143 | reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]); |
137 | if (IS_ERR(hdmi->clk)) { | 144 | if (IS_ERR(reg)) { |
138 | ret = PTR_ERR(hdmi->clk); | 145 | ret = PTR_ERR(reg); |
139 | dev_err(dev->dev, "failed to get 'clk': %d\n", ret); | 146 | dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n", |
140 | goto fail; | 147 | config->pwr_reg_names[i], ret); |
148 | goto fail; | ||
149 | } | ||
150 | |||
151 | hdmi->pwr_regs[i] = reg; | ||
141 | } | 152 | } |
142 | 153 | ||
143 | hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk"); | 154 | BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks)); |
144 | if (IS_ERR(hdmi->m_pclk)) { | 155 | for (i = 0; i < config->hpd_clk_cnt; i++) { |
145 | ret = PTR_ERR(hdmi->m_pclk); | 156 | struct clk *clk; |
146 | dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret); | 157 | |
147 | goto fail; | 158 | clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]); |
159 | if (IS_ERR(clk)) { | ||
160 | ret = PTR_ERR(clk); | ||
161 | dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n", | ||
162 | config->hpd_clk_names[i], ret); | ||
163 | goto fail; | ||
164 | } | ||
165 | |||
166 | hdmi->hpd_clks[i] = clk; | ||
148 | } | 167 | } |
149 | 168 | ||
150 | hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk"); | 169 | BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks)); |
151 | if (IS_ERR(hdmi->s_pclk)) { | 170 | for (i = 0; i < config->pwr_clk_cnt; i++) { |
152 | ret = PTR_ERR(hdmi->s_pclk); | 171 | struct clk *clk; |
153 | dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret); | 172 | |
154 | goto fail; | 173 | clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]); |
174 | if (IS_ERR(clk)) { | ||
175 | ret = PTR_ERR(clk); | ||
176 | dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n", | ||
177 | config->pwr_clk_names[i], ret); | ||
178 | goto fail; | ||
179 | } | ||
180 | |||
181 | hdmi->pwr_clks[i] = clk; | ||
155 | } | 182 | } |
156 | 183 | ||
157 | hdmi->i2c = hdmi_i2c_init(hdmi); | 184 | hdmi->i2c = hdmi_i2c_init(hdmi); |
@@ -178,20 +205,22 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
178 | goto fail; | 205 | goto fail; |
179 | } | 206 | } |
180 | 207 | ||
181 | hdmi->irq = platform_get_irq(pdev, 0); | 208 | if (!config->shared_irq) { |
182 | if (hdmi->irq < 0) { | 209 | hdmi->irq = platform_get_irq(pdev, 0); |
183 | ret = hdmi->irq; | 210 | if (hdmi->irq < 0) { |
184 | dev_err(dev->dev, "failed to get irq: %d\n", ret); | 211 | ret = hdmi->irq; |
185 | goto fail; | 212 | dev_err(dev->dev, "failed to get irq: %d\n", ret); |
186 | } | 213 | goto fail; |
214 | } | ||
187 | 215 | ||
188 | ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, | 216 | ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, |
189 | NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, | 217 | NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
190 | "hdmi_isr", hdmi); | 218 | "hdmi_isr", hdmi); |
191 | if (ret < 0) { | 219 | if (ret < 0) { |
192 | dev_err(dev->dev, "failed to request IRQ%u: %d\n", | 220 | dev_err(dev->dev, "failed to request IRQ%u: %d\n", |
193 | hdmi->irq, ret); | 221 | hdmi->irq, ret); |
194 | goto fail; | 222 | goto fail; |
223 | } | ||
195 | } | 224 | } |
196 | 225 | ||
197 | encoder->bridge = hdmi->bridge; | 226 | encoder->bridge = hdmi->bridge; |
@@ -199,7 +228,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
199 | priv->bridges[priv->num_bridges++] = hdmi->bridge; | 228 | priv->bridges[priv->num_bridges++] = hdmi->bridge; |
200 | priv->connectors[priv->num_connectors++] = hdmi->connector; | 229 | priv->connectors[priv->num_connectors++] = hdmi->connector; |
201 | 230 | ||
202 | return 0; | 231 | return hdmi; |
203 | 232 | ||
204 | fail: | 233 | fail: |
205 | if (hdmi) { | 234 | if (hdmi) { |
@@ -211,37 +240,100 @@ fail: | |||
211 | hdmi_destroy(&hdmi->refcount); | 240 | hdmi_destroy(&hdmi->refcount); |
212 | } | 241 | } |
213 | 242 | ||
214 | return ret; | 243 | return ERR_PTR(ret); |
215 | } | 244 | } |
216 | 245 | ||
217 | /* | 246 | /* |
218 | * The hdmi device: | 247 | * The hdmi device: |
219 | */ | 248 | */ |
220 | 249 | ||
250 | #include <linux/of_gpio.h> | ||
251 | |||
221 | static int hdmi_dev_probe(struct platform_device *pdev) | 252 | static int hdmi_dev_probe(struct platform_device *pdev) |
222 | { | 253 | { |
223 | static struct hdmi_platform_config config = {}; | 254 | static struct hdmi_platform_config config = {}; |
224 | #ifdef CONFIG_OF | 255 | #ifdef CONFIG_OF |
225 | /* TODO */ | 256 | struct device_node *of_node = pdev->dev.of_node; |
257 | |||
258 | int get_gpio(const char *name) | ||
259 | { | ||
260 | int gpio = of_get_named_gpio(of_node, name, 0); | ||
261 | if (gpio < 0) { | ||
262 | dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n", | ||
263 | name, gpio); | ||
264 | gpio = -1; | ||
265 | } | ||
266 | return gpio; | ||
267 | } | ||
268 | |||
269 | /* TODO actually use DT.. */ | ||
270 | static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; | ||
271 | static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; | ||
272 | static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; | ||
273 | static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; | ||
274 | |||
275 | config.phy_init = hdmi_phy_8x74_init; | ||
276 | config.mmio_name = "core_physical"; | ||
277 | config.hpd_reg_names = hpd_reg_names; | ||
278 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
279 | config.pwr_reg_names = pwr_reg_names; | ||
280 | config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); | ||
281 | config.hpd_clk_names = hpd_clk_names; | ||
282 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
283 | config.pwr_clk_names = pwr_clk_names; | ||
284 | config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); | ||
285 | config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk"); | ||
286 | config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data"); | ||
287 | config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd"); | ||
288 | config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en"); | ||
289 | config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel"); | ||
290 | config.shared_irq = true; | ||
291 | |||
226 | #else | 292 | #else |
293 | static const char *hpd_clk_names[] = { | ||
294 | "core_clk", "master_iface_clk", "slave_iface_clk", | ||
295 | }; | ||
227 | if (cpu_is_apq8064()) { | 296 | if (cpu_is_apq8064()) { |
297 | static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; | ||
228 | config.phy_init = hdmi_phy_8960_init; | 298 | config.phy_init = hdmi_phy_8960_init; |
299 | config.mmio_name = "hdmi_msm_hdmi_addr"; | ||
300 | config.hpd_reg_names = hpd_reg_names; | ||
301 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
302 | config.hpd_clk_names = hpd_clk_names; | ||
303 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
229 | config.ddc_clk_gpio = 70; | 304 | config.ddc_clk_gpio = 70; |
230 | config.ddc_data_gpio = 71; | 305 | config.ddc_data_gpio = 71; |
231 | config.hpd_gpio = 72; | 306 | config.hpd_gpio = 72; |
232 | config.pmic_gpio = 13 + NR_GPIO_IRQS; | 307 | config.mux_en_gpio = -1; |
233 | } else if (cpu_is_msm8960()) { | 308 | config.mux_sel_gpio = 13 + NR_GPIO_IRQS; |
309 | } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { | ||
310 | static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; | ||
234 | config.phy_init = hdmi_phy_8960_init; | 311 | config.phy_init = hdmi_phy_8960_init; |
312 | config.mmio_name = "hdmi_msm_hdmi_addr"; | ||
313 | config.hpd_reg_names = hpd_reg_names; | ||
314 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
315 | config.hpd_clk_names = hpd_clk_names; | ||
316 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
235 | config.ddc_clk_gpio = 100; | 317 | config.ddc_clk_gpio = 100; |
236 | config.ddc_data_gpio = 101; | 318 | config.ddc_data_gpio = 101; |
237 | config.hpd_gpio = 102; | 319 | config.hpd_gpio = 102; |
238 | config.pmic_gpio = -1; | 320 | config.mux_en_gpio = -1; |
321 | config.mux_sel_gpio = -1; | ||
239 | } else if (cpu_is_msm8x60()) { | 322 | } else if (cpu_is_msm8x60()) { |
323 | static const char *hpd_reg_names[] = { | ||
324 | "8901_hdmi_mvs", "8901_mpp0" | ||
325 | }; | ||
240 | config.phy_init = hdmi_phy_8x60_init; | 326 | config.phy_init = hdmi_phy_8x60_init; |
327 | config.mmio_name = "hdmi_msm_hdmi_addr"; | ||
328 | config.hpd_reg_names = hpd_reg_names; | ||
329 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
330 | config.hpd_clk_names = hpd_clk_names; | ||
331 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
241 | config.ddc_clk_gpio = 170; | 332 | config.ddc_clk_gpio = 170; |
242 | config.ddc_data_gpio = 171; | 333 | config.ddc_data_gpio = 171; |
243 | config.hpd_gpio = 172; | 334 | config.hpd_gpio = 172; |
244 | config.pmic_gpio = -1; | 335 | config.mux_en_gpio = -1; |
336 | config.mux_sel_gpio = -1; | ||
245 | } | 337 | } |
246 | #endif | 338 | #endif |
247 | pdev->dev.platform_data = &config; | 339 | pdev->dev.platform_data = &config; |
@@ -255,10 +347,19 @@ static int hdmi_dev_remove(struct platform_device *pdev) | |||
255 | return 0; | 347 | return 0; |
256 | } | 348 | } |
257 | 349 | ||
350 | static const struct of_device_id dt_match[] = { | ||
351 | { .compatible = "qcom,hdmi-tx" }, | ||
352 | {} | ||
353 | }; | ||
354 | MODULE_DEVICE_TABLE(of, dt_match); | ||
355 | |||
258 | static struct platform_driver hdmi_driver = { | 356 | static struct platform_driver hdmi_driver = { |
259 | .probe = hdmi_dev_probe, | 357 | .probe = hdmi_dev_probe, |
260 | .remove = hdmi_dev_remove, | 358 | .remove = hdmi_dev_remove, |
261 | .driver.name = "hdmi_msm", | 359 | .driver = { |
360 | .name = "hdmi_msm", | ||
361 | .of_match_table = dt_match, | ||
362 | }, | ||
262 | }; | 363 | }; |
263 | 364 | ||
264 | void __init hdmi_register(void) | 365 | void __init hdmi_register(void) |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 2c2ec566394c..41b29add70b1 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | 29 | ||
30 | struct hdmi_phy; | 30 | struct hdmi_phy; |
31 | struct hdmi_platform_config; | ||
31 | 32 | ||
32 | struct hdmi { | 33 | struct hdmi { |
33 | struct kref refcount; | 34 | struct kref refcount; |
@@ -35,14 +36,14 @@ struct hdmi { | |||
35 | struct drm_device *dev; | 36 | struct drm_device *dev; |
36 | struct platform_device *pdev; | 37 | struct platform_device *pdev; |
37 | 38 | ||
38 | void __iomem *mmio; | 39 | const struct hdmi_platform_config *config; |
39 | 40 | ||
40 | struct regulator *mvs; /* HDMI_5V */ | 41 | void __iomem *mmio; |
41 | struct regulator *mpp0; /* External 5V */ | ||
42 | 42 | ||
43 | struct clk *clk; | 43 | struct regulator *hpd_regs[2]; |
44 | struct clk *m_pclk; | 44 | struct regulator *pwr_regs[2]; |
45 | struct clk *s_pclk; | 45 | struct clk *hpd_clks[3]; |
46 | struct clk *pwr_clks[2]; | ||
46 | 47 | ||
47 | struct hdmi_phy *phy; | 48 | struct hdmi_phy *phy; |
48 | struct i2c_adapter *i2c; | 49 | struct i2c_adapter *i2c; |
@@ -60,7 +61,29 @@ struct hdmi { | |||
60 | /* platform config data (ie. from DT, or pdata) */ | 61 | /* platform config data (ie. from DT, or pdata) */ |
61 | struct hdmi_platform_config { | 62 | struct hdmi_platform_config { |
62 | struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); | 63 | struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); |
63 | int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio; | 64 | const char *mmio_name; |
65 | |||
66 | /* regulators that need to be on for hpd: */ | ||
67 | const char **hpd_reg_names; | ||
68 | int hpd_reg_cnt; | ||
69 | |||
70 | /* regulators that need to be on for screen pwr: */ | ||
71 | const char **pwr_reg_names; | ||
72 | int pwr_reg_cnt; | ||
73 | |||
74 | /* clks that need to be on for hpd: */ | ||
75 | const char **hpd_clk_names; | ||
76 | int hpd_clk_cnt; | ||
77 | |||
78 | /* clks that need to be on for screen pwr (ie pixel clk): */ | ||
79 | const char **pwr_clk_names; | ||
80 | int pwr_clk_cnt; | ||
81 | |||
82 | /* gpio's: */ | ||
83 | int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; | ||
84 | |||
85 | /* older devices had their own irq, mdp5+ it is shared w/ mdp: */ | ||
86 | bool shared_irq; | ||
64 | }; | 87 | }; |
65 | 88 | ||
66 | void hdmi_set_mode(struct hdmi *hdmi, bool power_on); | 89 | void hdmi_set_mode(struct hdmi *hdmi, bool power_on); |
@@ -106,6 +129,7 @@ struct hdmi_phy { | |||
106 | 129 | ||
107 | struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); | 130 | struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); |
108 | struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); | 131 | struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); |
132 | struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi); | ||
109 | 133 | ||
110 | /* | 134 | /* |
111 | * hdmi bridge: | 135 | * hdmi bridge: |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 4e939f82918c..e2636582cfd7 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h | |||
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
19 | 21 | ||
20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -212,6 +214,20 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state | |||
212 | #define REG_HDMI_HDCP_RESET 0x00000130 | 214 | #define REG_HDMI_HDCP_RESET 0x00000130 |
213 | #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 | 215 | #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 |
214 | 216 | ||
217 | #define REG_HDMI_VENSPEC_INFO0 0x0000016c | ||
218 | |||
219 | #define REG_HDMI_VENSPEC_INFO1 0x00000170 | ||
220 | |||
221 | #define REG_HDMI_VENSPEC_INFO2 0x00000174 | ||
222 | |||
223 | #define REG_HDMI_VENSPEC_INFO3 0x00000178 | ||
224 | |||
225 | #define REG_HDMI_VENSPEC_INFO4 0x0000017c | ||
226 | |||
227 | #define REG_HDMI_VENSPEC_INFO5 0x00000180 | ||
228 | |||
229 | #define REG_HDMI_VENSPEC_INFO6 0x00000184 | ||
230 | |||
215 | #define REG_HDMI_AUDIO_CFG 0x000001d0 | 231 | #define REG_HDMI_AUDIO_CFG 0x000001d0 |
216 | #define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001 | 232 | #define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001 |
217 | #define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0 | 233 | #define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0 |
@@ -235,6 +251,9 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val) | |||
235 | return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK; | 251 | return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK; |
236 | } | 252 | } |
237 | 253 | ||
254 | #define REG_HDMI_DDC_ARBITRATION 0x00000210 | ||
255 | #define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010 | ||
256 | |||
238 | #define REG_HDMI_DDC_INT_CTRL 0x00000214 | 257 | #define REG_HDMI_DDC_INT_CTRL 0x00000214 |
239 | #define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001 | 258 | #define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001 |
240 | #define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002 | 259 | #define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002 |
@@ -340,6 +359,20 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) | |||
340 | return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; | 359 | return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; |
341 | } | 360 | } |
342 | 361 | ||
362 | #define REG_HDMI_CEC_STATUS 0x00000298 | ||
363 | |||
364 | #define REG_HDMI_CEC_INT 0x0000029c | ||
365 | |||
366 | #define REG_HDMI_CEC_ADDR 0x000002a0 | ||
367 | |||
368 | #define REG_HDMI_CEC_TIME 0x000002a4 | ||
369 | |||
370 | #define REG_HDMI_CEC_REFTIMER 0x000002a8 | ||
371 | |||
372 | #define REG_HDMI_CEC_RD_DATA 0x000002ac | ||
373 | |||
374 | #define REG_HDMI_CEC_RD_FILTER 0x000002b0 | ||
375 | |||
343 | #define REG_HDMI_ACTIVE_HSYNC 0x000002b4 | 376 | #define REG_HDMI_ACTIVE_HSYNC 0x000002b4 |
344 | #define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff | 377 | #define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff |
345 | #define HDMI_ACTIVE_HSYNC_START__SHIFT 0 | 378 | #define HDMI_ACTIVE_HSYNC_START__SHIFT 0 |
@@ -410,17 +443,33 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) | |||
410 | #define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000 | 443 | #define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000 |
411 | #define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000 | 444 | #define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000 |
412 | 445 | ||
446 | #define REG_HDMI_AUD_INT 0x000002cc | ||
447 | #define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001 | ||
448 | #define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002 | ||
449 | #define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004 | ||
450 | #define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008 | ||
451 | |||
413 | #define REG_HDMI_PHY_CTRL 0x000002d4 | 452 | #define REG_HDMI_PHY_CTRL 0x000002d4 |
414 | #define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001 | 453 | #define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001 |
415 | #define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002 | 454 | #define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002 |
416 | #define HDMI_PHY_CTRL_SW_RESET 0x00000004 | 455 | #define HDMI_PHY_CTRL_SW_RESET 0x00000004 |
417 | #define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008 | 456 | #define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008 |
418 | 457 | ||
419 | #define REG_HDMI_AUD_INT 0x000002cc | 458 | #define REG_HDMI_CEC_WR_RANGE 0x000002dc |
420 | #define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001 | 459 | |
421 | #define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002 | 460 | #define REG_HDMI_CEC_RD_RANGE 0x000002e0 |
422 | #define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004 | 461 | |
423 | #define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008 | 462 | #define REG_HDMI_VERSION 0x000002e4 |
463 | |||
464 | #define REG_HDMI_CEC_COMPL_CTL 0x00000360 | ||
465 | |||
466 | #define REG_HDMI_CEC_RD_START_RANGE 0x00000364 | ||
467 | |||
468 | #define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368 | ||
469 | |||
470 | #define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c | ||
471 | |||
472 | #define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370 | ||
424 | 473 | ||
425 | #define REG_HDMI_8x60_PHY_REG0 0x00000300 | 474 | #define REG_HDMI_8x60_PHY_REG0 0x00000300 |
426 | #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c | 475 | #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c |
@@ -504,5 +553,23 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) | |||
504 | 553 | ||
505 | #define REG_HDMI_8960_PHY_REG12 0x00000430 | 554 | #define REG_HDMI_8960_PHY_REG12 0x00000430 |
506 | 555 | ||
556 | #define REG_HDMI_8x74_ANA_CFG0 0x00000000 | ||
557 | |||
558 | #define REG_HDMI_8x74_ANA_CFG1 0x00000004 | ||
559 | |||
560 | #define REG_HDMI_8x74_PD_CTRL0 0x00000010 | ||
561 | |||
562 | #define REG_HDMI_8x74_PD_CTRL1 0x00000014 | ||
563 | |||
564 | #define REG_HDMI_8x74_BIST_CFG0 0x00000034 | ||
565 | |||
566 | #define REG_HDMI_8x74_BIST_PATN0 0x0000003c | ||
567 | |||
568 | #define REG_HDMI_8x74_BIST_PATN1 0x00000040 | ||
569 | |||
570 | #define REG_HDMI_8x74_BIST_PATN2 0x00000044 | ||
571 | |||
572 | #define REG_HDMI_8x74_BIST_PATN3 0x00000048 | ||
573 | |||
507 | 574 | ||
508 | #endif /* HDMI_XML */ | 575 | #endif /* HDMI_XML */ |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 5a8ee3473cf5..7d10e55403c6 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c | |||
@@ -21,6 +21,7 @@ struct hdmi_bridge { | |||
21 | struct drm_bridge base; | 21 | struct drm_bridge base; |
22 | 22 | ||
23 | struct hdmi *hdmi; | 23 | struct hdmi *hdmi; |
24 | bool power_on; | ||
24 | 25 | ||
25 | unsigned long int pixclock; | 26 | unsigned long int pixclock; |
26 | }; | 27 | }; |
@@ -34,6 +35,65 @@ static void hdmi_bridge_destroy(struct drm_bridge *bridge) | |||
34 | kfree(hdmi_bridge); | 35 | kfree(hdmi_bridge); |
35 | } | 36 | } |
36 | 37 | ||
38 | static void power_on(struct drm_bridge *bridge) | ||
39 | { | ||
40 | struct drm_device *dev = bridge->dev; | ||
41 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); | ||
42 | struct hdmi *hdmi = hdmi_bridge->hdmi; | ||
43 | const struct hdmi_platform_config *config = hdmi->config; | ||
44 | int i, ret; | ||
45 | |||
46 | for (i = 0; i < config->pwr_reg_cnt; i++) { | ||
47 | ret = regulator_enable(hdmi->pwr_regs[i]); | ||
48 | if (ret) { | ||
49 | dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n", | ||
50 | config->pwr_reg_names[i], ret); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | if (config->pwr_clk_cnt > 0) { | ||
55 | DBG("pixclock: %lu", hdmi_bridge->pixclock); | ||
56 | ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock); | ||
57 | if (ret) { | ||
58 | dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n", | ||
59 | config->pwr_clk_names[0], ret); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | for (i = 0; i < config->pwr_clk_cnt; i++) { | ||
64 | ret = clk_prepare_enable(hdmi->pwr_clks[i]); | ||
65 | if (ret) { | ||
66 | dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n", | ||
67 | config->pwr_clk_names[i], ret); | ||
68 | } | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static void power_off(struct drm_bridge *bridge) | ||
73 | { | ||
74 | struct drm_device *dev = bridge->dev; | ||
75 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); | ||
76 | struct hdmi *hdmi = hdmi_bridge->hdmi; | ||
77 | const struct hdmi_platform_config *config = hdmi->config; | ||
78 | int i, ret; | ||
79 | |||
80 | /* TODO do we need to wait for final vblank somewhere before | ||
81 | * cutting the clocks? | ||
82 | */ | ||
83 | mdelay(16 + 4); | ||
84 | |||
85 | for (i = 0; i < config->pwr_clk_cnt; i++) | ||
86 | clk_disable_unprepare(hdmi->pwr_clks[i]); | ||
87 | |||
88 | for (i = 0; i < config->pwr_reg_cnt; i++) { | ||
89 | ret = regulator_disable(hdmi->pwr_regs[i]); | ||
90 | if (ret) { | ||
91 | dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n", | ||
92 | config->pwr_reg_names[i], ret); | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | |||
37 | static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) | 97 | static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) |
38 | { | 98 | { |
39 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); | 99 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); |
@@ -41,6 +101,12 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) | |||
41 | struct hdmi_phy *phy = hdmi->phy; | 101 | struct hdmi_phy *phy = hdmi->phy; |
42 | 102 | ||
43 | DBG("power up"); | 103 | DBG("power up"); |
104 | |||
105 | if (!hdmi_bridge->power_on) { | ||
106 | power_on(bridge); | ||
107 | hdmi_bridge->power_on = true; | ||
108 | } | ||
109 | |||
44 | phy->funcs->powerup(phy, hdmi_bridge->pixclock); | 110 | phy->funcs->powerup(phy, hdmi_bridge->pixclock); |
45 | hdmi_set_mode(hdmi, true); | 111 | hdmi_set_mode(hdmi, true); |
46 | } | 112 | } |
@@ -62,6 +128,11 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge) | |||
62 | DBG("power down"); | 128 | DBG("power down"); |
63 | hdmi_set_mode(hdmi, false); | 129 | hdmi_set_mode(hdmi, false); |
64 | phy->funcs->powerdown(phy); | 130 | phy->funcs->powerdown(phy); |
131 | |||
132 | if (hdmi_bridge->power_on) { | ||
133 | power_off(bridge); | ||
134 | hdmi_bridge->power_on = false; | ||
135 | } | ||
65 | } | 136 | } |
66 | 137 | ||
67 | static void hdmi_bridge_mode_set(struct drm_bridge *bridge, | 138 | static void hdmi_bridge_mode_set(struct drm_bridge *bridge, |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 823eee521a31..7dedfdd12075 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c | |||
@@ -17,19 +17,20 @@ | |||
17 | 17 | ||
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | 19 | ||
20 | #include "msm_kms.h" | ||
20 | #include "hdmi.h" | 21 | #include "hdmi.h" |
21 | 22 | ||
22 | struct hdmi_connector { | 23 | struct hdmi_connector { |
23 | struct drm_connector base; | 24 | struct drm_connector base; |
24 | struct hdmi *hdmi; | 25 | struct hdmi *hdmi; |
26 | struct work_struct hpd_work; | ||
25 | }; | 27 | }; |
26 | #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) | 28 | #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) |
27 | 29 | ||
28 | static int gpio_config(struct hdmi *hdmi, bool on) | 30 | static int gpio_config(struct hdmi *hdmi, bool on) |
29 | { | 31 | { |
30 | struct drm_device *dev = hdmi->dev; | 32 | struct drm_device *dev = hdmi->dev; |
31 | struct hdmi_platform_config *config = | 33 | const struct hdmi_platform_config *config = hdmi->config; |
32 | hdmi->pdev->dev.platform_data; | ||
33 | int ret; | 34 | int ret; |
34 | 35 | ||
35 | if (on) { | 36 | if (on) { |
@@ -39,26 +40,43 @@ static int gpio_config(struct hdmi *hdmi, bool on) | |||
39 | "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); | 40 | "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); |
40 | goto error1; | 41 | goto error1; |
41 | } | 42 | } |
43 | gpio_set_value_cansleep(config->ddc_clk_gpio, 1); | ||
44 | |||
42 | ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); | 45 | ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); |
43 | if (ret) { | 46 | if (ret) { |
44 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | 47 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", |
45 | "HDMI_DDC_DATA", config->ddc_data_gpio, ret); | 48 | "HDMI_DDC_DATA", config->ddc_data_gpio, ret); |
46 | goto error2; | 49 | goto error2; |
47 | } | 50 | } |
51 | gpio_set_value_cansleep(config->ddc_data_gpio, 1); | ||
52 | |||
48 | ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); | 53 | ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); |
49 | if (ret) { | 54 | if (ret) { |
50 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | 55 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", |
51 | "HDMI_HPD", config->hpd_gpio, ret); | 56 | "HDMI_HPD", config->hpd_gpio, ret); |
52 | goto error3; | 57 | goto error3; |
53 | } | 58 | } |
54 | if (config->pmic_gpio != -1) { | 59 | gpio_direction_input(config->hpd_gpio); |
55 | ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL"); | 60 | gpio_set_value_cansleep(config->hpd_gpio, 1); |
61 | |||
62 | if (config->mux_en_gpio != -1) { | ||
63 | ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); | ||
56 | if (ret) { | 64 | if (ret) { |
57 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | 65 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", |
58 | "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret); | 66 | "HDMI_MUX_SEL", config->mux_en_gpio, ret); |
59 | goto error4; | 67 | goto error4; |
60 | } | 68 | } |
61 | gpio_set_value_cansleep(config->pmic_gpio, 0); | 69 | gpio_set_value_cansleep(config->mux_en_gpio, 1); |
70 | } | ||
71 | |||
72 | if (config->mux_sel_gpio != -1) { | ||
73 | ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL"); | ||
74 | if (ret) { | ||
75 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | ||
76 | "HDMI_MUX_SEL", config->mux_sel_gpio, ret); | ||
77 | goto error5; | ||
78 | } | ||
79 | gpio_set_value_cansleep(config->mux_sel_gpio, 0); | ||
62 | } | 80 | } |
63 | DBG("gpio on"); | 81 | DBG("gpio on"); |
64 | } else { | 82 | } else { |
@@ -66,15 +84,23 @@ static int gpio_config(struct hdmi *hdmi, bool on) | |||
66 | gpio_free(config->ddc_data_gpio); | 84 | gpio_free(config->ddc_data_gpio); |
67 | gpio_free(config->hpd_gpio); | 85 | gpio_free(config->hpd_gpio); |
68 | 86 | ||
69 | if (config->pmic_gpio != -1) { | 87 | if (config->mux_en_gpio != -1) { |
70 | gpio_set_value_cansleep(config->pmic_gpio, 1); | 88 | gpio_set_value_cansleep(config->mux_en_gpio, 0); |
71 | gpio_free(config->pmic_gpio); | 89 | gpio_free(config->mux_en_gpio); |
90 | } | ||
91 | |||
92 | if (config->mux_sel_gpio != -1) { | ||
93 | gpio_set_value_cansleep(config->mux_sel_gpio, 1); | ||
94 | gpio_free(config->mux_sel_gpio); | ||
72 | } | 95 | } |
73 | DBG("gpio off"); | 96 | DBG("gpio off"); |
74 | } | 97 | } |
75 | 98 | ||
76 | return 0; | 99 | return 0; |
77 | 100 | ||
101 | error5: | ||
102 | if (config->mux_en_gpio != -1) | ||
103 | gpio_free(config->mux_en_gpio); | ||
78 | error4: | 104 | error4: |
79 | gpio_free(config->hpd_gpio); | 105 | gpio_free(config->hpd_gpio); |
80 | error3: | 106 | error3: |
@@ -88,10 +114,11 @@ error1: | |||
88 | static int hpd_enable(struct hdmi_connector *hdmi_connector) | 114 | static int hpd_enable(struct hdmi_connector *hdmi_connector) |
89 | { | 115 | { |
90 | struct hdmi *hdmi = hdmi_connector->hdmi; | 116 | struct hdmi *hdmi = hdmi_connector->hdmi; |
117 | const struct hdmi_platform_config *config = hdmi->config; | ||
91 | struct drm_device *dev = hdmi_connector->base.dev; | 118 | struct drm_device *dev = hdmi_connector->base.dev; |
92 | struct hdmi_phy *phy = hdmi->phy; | 119 | struct hdmi_phy *phy = hdmi->phy; |
93 | uint32_t hpd_ctrl; | 120 | uint32_t hpd_ctrl; |
94 | int ret; | 121 | int i, ret; |
95 | 122 | ||
96 | ret = gpio_config(hdmi, true); | 123 | ret = gpio_config(hdmi, true); |
97 | if (ret) { | 124 | if (ret) { |
@@ -99,31 +126,22 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) | |||
99 | goto fail; | 126 | goto fail; |
100 | } | 127 | } |
101 | 128 | ||
102 | ret = clk_prepare_enable(hdmi->clk); | 129 | for (i = 0; i < config->hpd_clk_cnt; i++) { |
103 | if (ret) { | 130 | ret = clk_prepare_enable(hdmi->hpd_clks[i]); |
104 | dev_err(dev->dev, "failed to enable 'clk': %d\n", ret); | 131 | if (ret) { |
105 | goto fail; | 132 | dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n", |
106 | } | 133 | config->hpd_clk_names[i], ret); |
107 | 134 | goto fail; | |
108 | ret = clk_prepare_enable(hdmi->m_pclk); | 135 | } |
109 | if (ret) { | ||
110 | dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret); | ||
111 | goto fail; | ||
112 | } | ||
113 | |||
114 | ret = clk_prepare_enable(hdmi->s_pclk); | ||
115 | if (ret) { | ||
116 | dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret); | ||
117 | goto fail; | ||
118 | } | 136 | } |
119 | 137 | ||
120 | if (hdmi->mpp0) | 138 | for (i = 0; i < config->hpd_reg_cnt; i++) { |
121 | ret = regulator_enable(hdmi->mpp0); | 139 | ret = regulator_enable(hdmi->hpd_regs[i]); |
122 | if (!ret) | 140 | if (ret) { |
123 | ret = regulator_enable(hdmi->mvs); | 141 | dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", |
124 | if (ret) { | 142 | config->hpd_reg_names[i], ret); |
125 | dev_err(dev->dev, "failed to enable regulators: %d\n", ret); | 143 | goto fail; |
126 | goto fail; | 144 | } |
127 | } | 145 | } |
128 | 146 | ||
129 | hdmi_set_mode(hdmi, false); | 147 | hdmi_set_mode(hdmi, false); |
@@ -156,26 +174,26 @@ fail: | |||
156 | static int hdp_disable(struct hdmi_connector *hdmi_connector) | 174 | static int hdp_disable(struct hdmi_connector *hdmi_connector) |
157 | { | 175 | { |
158 | struct hdmi *hdmi = hdmi_connector->hdmi; | 176 | struct hdmi *hdmi = hdmi_connector->hdmi; |
177 | const struct hdmi_platform_config *config = hdmi->config; | ||
159 | struct drm_device *dev = hdmi_connector->base.dev; | 178 | struct drm_device *dev = hdmi_connector->base.dev; |
160 | int ret = 0; | 179 | int i, ret = 0; |
161 | 180 | ||
162 | /* Disable HPD interrupt */ | 181 | /* Disable HPD interrupt */ |
163 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); | 182 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); |
164 | 183 | ||
165 | hdmi_set_mode(hdmi, false); | 184 | hdmi_set_mode(hdmi, false); |
166 | 185 | ||
167 | if (hdmi->mpp0) | 186 | for (i = 0; i < config->hpd_reg_cnt; i++) { |
168 | ret = regulator_disable(hdmi->mpp0); | 187 | ret = regulator_disable(hdmi->hpd_regs[i]); |
169 | if (!ret) | 188 | if (ret) { |
170 | ret = regulator_disable(hdmi->mvs); | 189 | dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n", |
171 | if (ret) { | 190 | config->hpd_reg_names[i], ret); |
172 | dev_err(dev->dev, "failed to enable regulators: %d\n", ret); | 191 | goto fail; |
173 | goto fail; | 192 | } |
174 | } | 193 | } |
175 | 194 | ||
176 | clk_disable_unprepare(hdmi->clk); | 195 | for (i = 0; i < config->hpd_clk_cnt; i++) |
177 | clk_disable_unprepare(hdmi->m_pclk); | 196 | clk_disable_unprepare(hdmi->hpd_clks[i]); |
178 | clk_disable_unprepare(hdmi->s_pclk); | ||
179 | 197 | ||
180 | ret = gpio_config(hdmi, false); | 198 | ret = gpio_config(hdmi, false); |
181 | if (ret) { | 199 | if (ret) { |
@@ -189,9 +207,19 @@ fail: | |||
189 | return ret; | 207 | return ret; |
190 | } | 208 | } |
191 | 209 | ||
210 | static void | ||
211 | hotplug_work(struct work_struct *work) | ||
212 | { | ||
213 | struct hdmi_connector *hdmi_connector = | ||
214 | container_of(work, struct hdmi_connector, hpd_work); | ||
215 | struct drm_connector *connector = &hdmi_connector->base; | ||
216 | drm_helper_hpd_irq_event(connector->dev); | ||
217 | } | ||
218 | |||
192 | void hdmi_connector_irq(struct drm_connector *connector) | 219 | void hdmi_connector_irq(struct drm_connector *connector) |
193 | { | 220 | { |
194 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); | 221 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); |
222 | struct msm_drm_private *priv = connector->dev->dev_private; | ||
195 | struct hdmi *hdmi = hdmi_connector->hdmi; | 223 | struct hdmi *hdmi = hdmi_connector->hdmi; |
196 | uint32_t hpd_int_status, hpd_int_ctrl; | 224 | uint32_t hpd_int_status, hpd_int_ctrl; |
197 | 225 | ||
@@ -209,13 +237,13 @@ void hdmi_connector_irq(struct drm_connector *connector) | |||
209 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, | 237 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, |
210 | hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); | 238 | hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); |
211 | 239 | ||
212 | drm_helper_hpd_irq_event(connector->dev); | ||
213 | |||
214 | /* detect disconnect if we are connected or visa versa: */ | 240 | /* detect disconnect if we are connected or visa versa: */ |
215 | hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; | 241 | hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; |
216 | if (!detected) | 242 | if (!detected) |
217 | hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; | 243 | hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; |
218 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); | 244 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); |
245 | |||
246 | queue_work(priv->wq, &hdmi_connector->hpd_work); | ||
219 | } | 247 | } |
220 | } | 248 | } |
221 | 249 | ||
@@ -224,6 +252,7 @@ static enum drm_connector_status hdmi_connector_detect( | |||
224 | { | 252 | { |
225 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); | 253 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); |
226 | struct hdmi *hdmi = hdmi_connector->hdmi; | 254 | struct hdmi *hdmi = hdmi_connector->hdmi; |
255 | const struct hdmi_platform_config *config = hdmi->config; | ||
227 | uint32_t hpd_int_status; | 256 | uint32_t hpd_int_status; |
228 | int retry = 20; | 257 | int retry = 20; |
229 | 258 | ||
@@ -233,6 +262,14 @@ static enum drm_connector_status hdmi_connector_detect( | |||
233 | * let that trick us into thinking the monitor is gone: | 262 | * let that trick us into thinking the monitor is gone: |
234 | */ | 263 | */ |
235 | while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) { | 264 | while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) { |
265 | /* hdmi debounce logic seems to get stuck sometimes, | ||
266 | * read directly the gpio to get a second opinion: | ||
267 | */ | ||
268 | if (gpio_get_value(config->hpd_gpio)) { | ||
269 | DBG("gpio tells us we are connected!"); | ||
270 | hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED; | ||
271 | break; | ||
272 | } | ||
236 | mdelay(10); | 273 | mdelay(10); |
237 | hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); | 274 | hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); |
238 | DBG("status=%08x", hpd_int_status); | 275 | DBG("status=%08x", hpd_int_status); |
@@ -285,6 +322,8 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, | |||
285 | struct drm_display_mode *mode) | 322 | struct drm_display_mode *mode) |
286 | { | 323 | { |
287 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); | 324 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); |
325 | struct hdmi *hdmi = hdmi_connector->hdmi; | ||
326 | const struct hdmi_platform_config *config = hdmi->config; | ||
288 | struct msm_drm_private *priv = connector->dev->dev_private; | 327 | struct msm_drm_private *priv = connector->dev->dev_private; |
289 | struct msm_kms *kms = priv->kms; | 328 | struct msm_kms *kms = priv->kms; |
290 | long actual, requested; | 329 | long actual, requested; |
@@ -293,6 +332,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, | |||
293 | actual = kms->funcs->round_pixclk(kms, | 332 | actual = kms->funcs->round_pixclk(kms, |
294 | requested, hdmi_connector->hdmi->encoder); | 333 | requested, hdmi_connector->hdmi->encoder); |
295 | 334 | ||
335 | /* for mdp5/apq8074, we manage our own pixel clk (as opposed to | ||
336 | * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder | ||
337 | * instead): | ||
338 | */ | ||
339 | if (config->pwr_clk_cnt > 0) | ||
340 | actual = clk_round_rate(hdmi->pwr_clks[0], actual); | ||
341 | |||
296 | DBG("requested=%ld, actual=%ld", requested, actual); | 342 | DBG("requested=%ld, actual=%ld", requested, actual); |
297 | 343 | ||
298 | if (actual != requested) | 344 | if (actual != requested) |
@@ -335,6 +381,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) | |||
335 | } | 381 | } |
336 | 382 | ||
337 | hdmi_connector->hdmi = hdmi_reference(hdmi); | 383 | hdmi_connector->hdmi = hdmi_reference(hdmi); |
384 | INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); | ||
338 | 385 | ||
339 | connector = &hdmi_connector->base; | 386 | connector = &hdmi_connector->base; |
340 | 387 | ||
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c new file mode 100644 index 000000000000..59fa6cdacb2a --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include "hdmi.h" | ||
19 | |||
20 | struct hdmi_phy_8x74 { | ||
21 | struct hdmi_phy base; | ||
22 | struct hdmi *hdmi; | ||
23 | void __iomem *mmio; | ||
24 | }; | ||
25 | #define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base) | ||
26 | |||
27 | |||
28 | static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data) | ||
29 | { | ||
30 | msm_writel(data, phy->mmio + reg); | ||
31 | } | ||
32 | |||
33 | //static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg) | ||
34 | //{ | ||
35 | // return msm_readl(phy->mmio + reg); | ||
36 | //} | ||
37 | |||
38 | static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy) | ||
39 | { | ||
40 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
41 | kfree(phy_8x74); | ||
42 | } | ||
43 | |||
44 | static void hdmi_phy_8x74_reset(struct hdmi_phy *phy) | ||
45 | { | ||
46 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
47 | struct hdmi *hdmi = phy_8x74->hdmi; | ||
48 | unsigned int val; | ||
49 | |||
50 | /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */ | ||
51 | |||
52 | val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); | ||
53 | |||
54 | if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { | ||
55 | /* pull low */ | ||
56 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
57 | val & ~HDMI_PHY_CTRL_SW_RESET); | ||
58 | } else { | ||
59 | /* pull high */ | ||
60 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
61 | val | HDMI_PHY_CTRL_SW_RESET); | ||
62 | } | ||
63 | |||
64 | if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { | ||
65 | /* pull low */ | ||
66 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
67 | val & ~HDMI_PHY_CTRL_SW_RESET_PLL); | ||
68 | } else { | ||
69 | /* pull high */ | ||
70 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
71 | val | HDMI_PHY_CTRL_SW_RESET_PLL); | ||
72 | } | ||
73 | |||
74 | msleep(100); | ||
75 | |||
76 | if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { | ||
77 | /* pull high */ | ||
78 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
79 | val | HDMI_PHY_CTRL_SW_RESET); | ||
80 | } else { | ||
81 | /* pull low */ | ||
82 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
83 | val & ~HDMI_PHY_CTRL_SW_RESET); | ||
84 | } | ||
85 | |||
86 | if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { | ||
87 | /* pull high */ | ||
88 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
89 | val | HDMI_PHY_CTRL_SW_RESET_PLL); | ||
90 | } else { | ||
91 | /* pull low */ | ||
92 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
93 | val & ~HDMI_PHY_CTRL_SW_RESET_PLL); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, | ||
98 | unsigned long int pixclock) | ||
99 | { | ||
100 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
101 | |||
102 | phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b); | ||
103 | phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2); | ||
104 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0); | ||
105 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0); | ||
106 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0); | ||
107 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0); | ||
108 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0); | ||
109 | phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20); | ||
110 | } | ||
111 | |||
112 | static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy) | ||
113 | { | ||
114 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
115 | phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f); | ||
116 | } | ||
117 | |||
118 | static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = { | ||
119 | .destroy = hdmi_phy_8x74_destroy, | ||
120 | .reset = hdmi_phy_8x74_reset, | ||
121 | .powerup = hdmi_phy_8x74_powerup, | ||
122 | .powerdown = hdmi_phy_8x74_powerdown, | ||
123 | }; | ||
124 | |||
125 | struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi) | ||
126 | { | ||
127 | struct hdmi_phy_8x74 *phy_8x74; | ||
128 | struct hdmi_phy *phy = NULL; | ||
129 | int ret; | ||
130 | |||
131 | phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL); | ||
132 | if (!phy_8x74) { | ||
133 | ret = -ENOMEM; | ||
134 | goto fail; | ||
135 | } | ||
136 | |||
137 | phy = &phy_8x74->base; | ||
138 | |||
139 | phy->funcs = &hdmi_phy_8x74_funcs; | ||
140 | |||
141 | phy_8x74->hdmi = hdmi; | ||
142 | |||
143 | /* for 8x74, the phy mmio is mapped separately: */ | ||
144 | phy_8x74->mmio = msm_ioremap(hdmi->pdev, | ||
145 | "phy_physical", "HDMI_8x74"); | ||
146 | if (IS_ERR(phy_8x74->mmio)) { | ||
147 | ret = PTR_ERR(phy_8x74->mmio); | ||
148 | goto fail; | ||
149 | } | ||
150 | |||
151 | return phy; | ||
152 | |||
153 | fail: | ||
154 | if (phy) | ||
155 | hdmi_phy_8x74_destroy(phy); | ||
156 | return ERR_PTR(ret); | ||
157 | } | ||
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index dbde4f6339b9..d591567173c4 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h | |||
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
19 | 21 | ||
20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index 9908ffe1c3ad..416a26e1e58d 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h | |||
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
19 | 21 | ||
20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -42,27 +44,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
42 | */ | 44 | */ |
43 | 45 | ||
44 | 46 | ||
45 | enum mdp4_bpc { | ||
46 | BPC1 = 0, | ||
47 | BPC5 = 1, | ||
48 | BPC6 = 2, | ||
49 | BPC8 = 3, | ||
50 | }; | ||
51 | |||
52 | enum mdp4_bpc_alpha { | ||
53 | BPC1A = 0, | ||
54 | BPC4A = 1, | ||
55 | BPC6A = 2, | ||
56 | BPC8A = 3, | ||
57 | }; | ||
58 | |||
59 | enum mdp4_alpha_type { | ||
60 | FG_CONST = 0, | ||
61 | BG_CONST = 1, | ||
62 | FG_PIXEL = 2, | ||
63 | BG_PIXEL = 3, | ||
64 | }; | ||
65 | |||
66 | enum mdp4_pipe { | 47 | enum mdp4_pipe { |
67 | VG1 = 0, | 48 | VG1 = 0, |
68 | VG2 = 1, | 49 | VG2 = 1, |
@@ -79,15 +60,6 @@ enum mdp4_mixer { | |||
79 | MIXER2 = 2, | 60 | MIXER2 = 2, |
80 | }; | 61 | }; |
81 | 62 | ||
82 | enum mdp4_mixer_stage_id { | ||
83 | STAGE_UNUSED = 0, | ||
84 | STAGE_BASE = 1, | ||
85 | STAGE0 = 2, | ||
86 | STAGE1 = 3, | ||
87 | STAGE2 = 4, | ||
88 | STAGE3 = 5, | ||
89 | }; | ||
90 | |||
91 | enum mdp4_intf { | 63 | enum mdp4_intf { |
92 | INTF_LCDC_DTV = 0, | 64 | INTF_LCDC_DTV = 0, |
93 | INTF_DSI_VIDEO = 1, | 65 | INTF_DSI_VIDEO = 1, |
@@ -194,56 +166,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) | |||
194 | #define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 | 166 | #define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 |
195 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 | 167 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 |
196 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 | 168 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 |
197 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val) | 169 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) |
198 | { | 170 | { |
199 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; | 171 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; |
200 | } | 172 | } |
201 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 | 173 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 |
202 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 | 174 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 |
203 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 | 175 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 |
204 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val) | 176 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) |
205 | { | 177 | { |
206 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; | 178 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; |
207 | } | 179 | } |
208 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 | 180 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 |
209 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 | 181 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 |
210 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 | 182 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 |
211 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val) | 183 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) |
212 | { | 184 | { |
213 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; | 185 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; |
214 | } | 186 | } |
215 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 | 187 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 |
216 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 | 188 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 |
217 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 | 189 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 |
218 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val) | 190 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) |
219 | { | 191 | { |
220 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; | 192 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; |
221 | } | 193 | } |
222 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 | 194 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 |
223 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 | 195 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 |
224 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 | 196 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 |
225 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val) | 197 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) |
226 | { | 198 | { |
227 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; | 199 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; |
228 | } | 200 | } |
229 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 | 201 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 |
230 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 | 202 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 |
231 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 | 203 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 |
232 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val) | 204 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) |
233 | { | 205 | { |
234 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; | 206 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; |
235 | } | 207 | } |
236 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 | 208 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 |
237 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 | 209 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 |
238 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 | 210 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 |
239 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val) | 211 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) |
240 | { | 212 | { |
241 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; | 213 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; |
242 | } | 214 | } |
243 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 | 215 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 |
244 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 | 216 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 |
245 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 | 217 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 |
246 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val) | 218 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) |
247 | { | 219 | { |
248 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; | 220 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; |
249 | } | 221 | } |
@@ -254,56 +226,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id va | |||
254 | #define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 | 226 | #define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 |
255 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 | 227 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 |
256 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 | 228 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 |
257 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val) | 229 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) |
258 | { | 230 | { |
259 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; | 231 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; |
260 | } | 232 | } |
261 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 | 233 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 |
262 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 | 234 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 |
263 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 | 235 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 |
264 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val) | 236 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) |
265 | { | 237 | { |
266 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; | 238 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; |
267 | } | 239 | } |
268 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 | 240 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 |
269 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 | 241 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 |
270 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 | 242 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 |
271 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val) | 243 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) |
272 | { | 244 | { |
273 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; | 245 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; |
274 | } | 246 | } |
275 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 | 247 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 |
276 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 | 248 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 |
277 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 | 249 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 |
278 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val) | 250 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) |
279 | { | 251 | { |
280 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; | 252 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; |
281 | } | 253 | } |
282 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 | 254 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 |
283 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 | 255 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 |
284 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 | 256 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 |
285 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val) | 257 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) |
286 | { | 258 | { |
287 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; | 259 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; |
288 | } | 260 | } |
289 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 | 261 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 |
290 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 | 262 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 |
291 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 | 263 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 |
292 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val) | 264 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) |
293 | { | 265 | { |
294 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; | 266 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; |
295 | } | 267 | } |
296 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 | 268 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 |
297 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 | 269 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 |
298 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 | 270 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 |
299 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val) | 271 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) |
300 | { | 272 | { |
301 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; | 273 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; |
302 | } | 274 | } |
303 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 | 275 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 |
304 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 | 276 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 |
305 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 | 277 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 |
306 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val) | 278 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) |
307 | { | 279 | { |
308 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; | 280 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; |
309 | } | 281 | } |
@@ -369,7 +341,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x | |||
369 | static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } | 341 | static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } |
370 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 | 342 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 |
371 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 | 343 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 |
372 | static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val) | 344 | static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val) |
373 | { | 345 | { |
374 | return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; | 346 | return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; |
375 | } | 347 | } |
@@ -377,7 +349,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val) | |||
377 | #define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 | 349 | #define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 |
378 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 | 350 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 |
379 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 | 351 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 |
380 | static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val) | 352 | static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val) |
381 | { | 353 | { |
382 | return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; | 354 | return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; |
383 | } | 355 | } |
@@ -472,19 +444,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of | |||
472 | static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } | 444 | static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } |
473 | #define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 | 445 | #define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 |
474 | #define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 | 446 | #define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 |
475 | static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val) | 447 | static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val) |
476 | { | 448 | { |
477 | return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; | 449 | return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; |
478 | } | 450 | } |
479 | #define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c | 451 | #define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c |
480 | #define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 | 452 | #define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 |
481 | static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val) | 453 | static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val) |
482 | { | 454 | { |
483 | return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; | 455 | return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; |
484 | } | 456 | } |
485 | #define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 | 457 | #define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 |
486 | #define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 | 458 | #define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 |
487 | static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val) | 459 | static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val) |
488 | { | 460 | { |
489 | return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; | 461 | return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; |
490 | } | 462 | } |
@@ -710,25 +682,25 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val) | |||
710 | static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } | 682 | static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } |
711 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 | 683 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 |
712 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 | 684 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 |
713 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val) | 685 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) |
714 | { | 686 | { |
715 | return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; | 687 | return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; |
716 | } | 688 | } |
717 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c | 689 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c |
718 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 | 690 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 |
719 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val) | 691 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) |
720 | { | 692 | { |
721 | return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; | 693 | return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; |
722 | } | 694 | } |
723 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 | 695 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 |
724 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 | 696 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 |
725 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val) | 697 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) |
726 | { | 698 | { |
727 | return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; | 699 | return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; |
728 | } | 700 | } |
729 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 | 701 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 |
730 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 | 702 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 |
731 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val) | 703 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) |
732 | { | 704 | { |
733 | return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; | 705 | return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; |
734 | } | 706 | } |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 019d530187ff..1964f4f0d452 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
@@ -66,15 +66,15 @@ struct mdp4_crtc { | |||
66 | /* for unref'ing cursor bo's after scanout completes: */ | 66 | /* for unref'ing cursor bo's after scanout completes: */ |
67 | struct drm_flip_work unref_cursor_work; | 67 | struct drm_flip_work unref_cursor_work; |
68 | 68 | ||
69 | struct mdp4_irq vblank; | 69 | struct mdp_irq vblank; |
70 | struct mdp4_irq err; | 70 | struct mdp_irq err; |
71 | }; | 71 | }; |
72 | #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) | 72 | #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) |
73 | 73 | ||
74 | static struct mdp4_kms *get_kms(struct drm_crtc *crtc) | 74 | static struct mdp4_kms *get_kms(struct drm_crtc *crtc) |
75 | { | 75 | { |
76 | struct msm_drm_private *priv = crtc->dev->dev_private; | 76 | struct msm_drm_private *priv = crtc->dev->dev_private; |
77 | return to_mdp4_kms(priv->kms); | 77 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
78 | } | 78 | } |
79 | 79 | ||
80 | static void update_fb(struct drm_crtc *crtc, bool async, | 80 | static void update_fb(struct drm_crtc *crtc, bool async, |
@@ -93,7 +93,7 @@ static void update_fb(struct drm_crtc *crtc, bool async, | |||
93 | 93 | ||
94 | if (!async) { | 94 | if (!async) { |
95 | /* enable vblank to pick up the old_fb */ | 95 | /* enable vblank to pick up the old_fb */ |
96 | mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); | 96 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
@@ -145,7 +145,7 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) | |||
145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
146 | 146 | ||
147 | atomic_or(pending, &mdp4_crtc->pending); | 147 | atomic_or(pending, &mdp4_crtc->pending); |
148 | mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); | 148 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
149 | } | 149 | } |
150 | 150 | ||
151 | static void pageflip_cb(struct msm_fence_cb *cb) | 151 | static void pageflip_cb(struct msm_fence_cb *cb) |
@@ -210,9 +210,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
210 | if (enabled != mdp4_crtc->enabled) { | 210 | if (enabled != mdp4_crtc->enabled) { |
211 | if (enabled) { | 211 | if (enabled) { |
212 | mdp4_enable(mdp4_kms); | 212 | mdp4_enable(mdp4_kms); |
213 | mdp4_irq_register(mdp4_kms, &mdp4_crtc->err); | 213 | mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); |
214 | } else { | 214 | } else { |
215 | mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err); | 215 | mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); |
216 | mdp4_disable(mdp4_kms); | 216 | mdp4_disable(mdp4_kms); |
217 | } | 217 | } |
218 | mdp4_crtc->enabled = enabled; | 218 | mdp4_crtc->enabled = enabled; |
@@ -232,7 +232,7 @@ static void blend_setup(struct drm_crtc *crtc) | |||
232 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | 232 | struct mdp4_kms *mdp4_kms = get_kms(crtc); |
233 | int i, ovlp = mdp4_crtc->ovlp; | 233 | int i, ovlp = mdp4_crtc->ovlp; |
234 | uint32_t mixer_cfg = 0; | 234 | uint32_t mixer_cfg = 0; |
235 | static const enum mdp4_mixer_stage_id stages[] = { | 235 | static const enum mdp_mixer_stage_id stages[] = { |
236 | STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, | 236 | STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, |
237 | }; | 237 | }; |
238 | /* statically (for now) map planes to mixer stage (z-order): */ | 238 | /* statically (for now) map planes to mixer stage (z-order): */ |
@@ -262,8 +262,8 @@ static void blend_setup(struct drm_crtc *crtc) | |||
262 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | 262 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); |
263 | int idx = idxs[pipe_id]; | 263 | int idx = idxs[pipe_id]; |
264 | if (idx > 0) { | 264 | if (idx > 0) { |
265 | const struct mdp4_format *format = | 265 | const struct mdp_format *format = |
266 | to_mdp4_format(msm_framebuffer_format(plane->fb)); | 266 | to_mdp_format(msm_framebuffer_format(plane->fb)); |
267 | alpha[idx-1] = format->alpha_enable; | 267 | alpha[idx-1] = format->alpha_enable; |
268 | } | 268 | } |
269 | mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]); | 269 | mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]); |
@@ -571,14 +571,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { | |||
571 | .load_lut = mdp4_crtc_load_lut, | 571 | .load_lut = mdp4_crtc_load_lut, |
572 | }; | 572 | }; |
573 | 573 | ||
574 | static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) | 574 | static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) |
575 | { | 575 | { |
576 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); | 576 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); |
577 | struct drm_crtc *crtc = &mdp4_crtc->base; | 577 | struct drm_crtc *crtc = &mdp4_crtc->base; |
578 | struct msm_drm_private *priv = crtc->dev->dev_private; | 578 | struct msm_drm_private *priv = crtc->dev->dev_private; |
579 | unsigned pending; | 579 | unsigned pending; |
580 | 580 | ||
581 | mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank); | 581 | mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
582 | 582 | ||
583 | pending = atomic_xchg(&mdp4_crtc->pending, 0); | 583 | pending = atomic_xchg(&mdp4_crtc->pending, 0); |
584 | 584 | ||
@@ -593,7 +593,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) | |||
593 | } | 593 | } |
594 | } | 594 | } |
595 | 595 | ||
596 | static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus) | 596 | static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) |
597 | { | 597 | { |
598 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); | 598 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); |
599 | struct drm_crtc *crtc = &mdp4_crtc->base; | 599 | struct drm_crtc *crtc = &mdp4_crtc->base; |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index 5e0dcae70ab5..067ed03b35fe 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c | |||
@@ -15,8 +15,6 @@ | |||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <mach/clk.h> | ||
19 | |||
20 | #include "mdp4_kms.h" | 18 | #include "mdp4_kms.h" |
21 | 19 | ||
22 | #include "drm_crtc.h" | 20 | #include "drm_crtc.h" |
@@ -37,7 +35,7 @@ struct mdp4_dtv_encoder { | |||
37 | static struct mdp4_kms *get_kms(struct drm_encoder *encoder) | 35 | static struct mdp4_kms *get_kms(struct drm_encoder *encoder) |
38 | { | 36 | { |
39 | struct msm_drm_private *priv = encoder->dev->dev_private; | 37 | struct msm_drm_private *priv = encoder->dev->dev_private; |
40 | return to_mdp4_kms(priv->kms); | 38 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
41 | } | 39 | } |
42 | 40 | ||
43 | #ifdef CONFIG_MSM_BUS_SCALING | 41 | #ifdef CONFIG_MSM_BUS_SCALING |
@@ -139,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
139 | * the settings changes for the new modeset (like new | 137 | * the settings changes for the new modeset (like new |
140 | * scanout buffer) don't latch properly.. | 138 | * scanout buffer) don't latch properly.. |
141 | */ | 139 | */ |
142 | mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC); | 140 | mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); |
143 | 141 | ||
144 | clk_disable_unprepare(mdp4_dtv_encoder->src_clk); | 142 | clk_disable_unprepare(mdp4_dtv_encoder->src_clk); |
145 | clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); | 143 | clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c new file mode 100644 index 000000000000..c740ccd1cc67 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include "msm_drv.h" | ||
20 | #include "mdp4_kms.h" | ||
21 | |||
22 | void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) | ||
23 | { | ||
24 | mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); | ||
25 | } | ||
26 | |||
27 | static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | ||
28 | { | ||
29 | DRM_ERROR("errors: %08x\n", irqstatus); | ||
30 | } | ||
31 | |||
32 | void mdp4_irq_preinstall(struct msm_kms *kms) | ||
33 | { | ||
34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | ||
35 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | ||
36 | } | ||
37 | |||
38 | int mdp4_irq_postinstall(struct msm_kms *kms) | ||
39 | { | ||
40 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
41 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); | ||
42 | struct mdp_irq *error_handler = &mdp4_kms->error_handler; | ||
43 | |||
44 | error_handler->irq = mdp4_irq_error_handler; | ||
45 | error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | | ||
46 | MDP4_IRQ_EXTERNAL_INTF_UDERRUN; | ||
47 | |||
48 | mdp_irq_register(mdp_kms, error_handler); | ||
49 | |||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | void mdp4_irq_uninstall(struct msm_kms *kms) | ||
54 | { | ||
55 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | ||
56 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
57 | } | ||
58 | |||
59 | irqreturn_t mdp4_irq(struct msm_kms *kms) | ||
60 | { | ||
61 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
62 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); | ||
63 | struct drm_device *dev = mdp4_kms->dev; | ||
64 | struct msm_drm_private *priv = dev->dev_private; | ||
65 | unsigned int id; | ||
66 | uint32_t status; | ||
67 | |||
68 | status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); | ||
69 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); | ||
70 | |||
71 | VERB("status=%08x", status); | ||
72 | |||
73 | for (id = 0; id < priv->num_crtcs; id++) | ||
74 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) | ||
75 | drm_handle_vblank(dev, id); | ||
76 | |||
77 | mdp_dispatch_irqs(mdp_kms, status); | ||
78 | |||
79 | return IRQ_HANDLED; | ||
80 | } | ||
81 | |||
82 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
83 | { | ||
84 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
85 | mdp4_crtc_vblank(crtc), true); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
90 | { | ||
91 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
92 | mdp4_crtc_vblank(crtc), false); | ||
93 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 8972ac35a43d..272e707c9487 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | |||
@@ -17,13 +17,14 @@ | |||
17 | 17 | ||
18 | 18 | ||
19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
20 | #include "msm_mmu.h" | ||
20 | #include "mdp4_kms.h" | 21 | #include "mdp4_kms.h" |
21 | 22 | ||
22 | static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); | 23 | static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); |
23 | 24 | ||
24 | static int mdp4_hw_init(struct msm_kms *kms) | 25 | static int mdp4_hw_init(struct msm_kms *kms) |
25 | { | 26 | { |
26 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 27 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
27 | struct drm_device *dev = mdp4_kms->dev; | 28 | struct drm_device *dev = mdp4_kms->dev; |
28 | uint32_t version, major, minor, dmap_cfg, vg_cfg; | 29 | uint32_t version, major, minor, dmap_cfg, vg_cfg; |
29 | unsigned long clk; | 30 | unsigned long clk; |
@@ -31,12 +32,14 @@ static int mdp4_hw_init(struct msm_kms *kms) | |||
31 | 32 | ||
32 | pm_runtime_get_sync(dev->dev); | 33 | pm_runtime_get_sync(dev->dev); |
33 | 34 | ||
35 | mdp4_enable(mdp4_kms); | ||
34 | version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); | 36 | version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); |
37 | mdp4_disable(mdp4_kms); | ||
35 | 38 | ||
36 | major = FIELD(version, MDP4_VERSION_MAJOR); | 39 | major = FIELD(version, MDP4_VERSION_MAJOR); |
37 | minor = FIELD(version, MDP4_VERSION_MINOR); | 40 | minor = FIELD(version, MDP4_VERSION_MINOR); |
38 | 41 | ||
39 | DBG("found MDP version v%d.%d", major, minor); | 42 | DBG("found MDP4 version v%d.%d", major, minor); |
40 | 43 | ||
41 | if (major != 4) { | 44 | if (major != 4) { |
42 | dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", | 45 | dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", |
@@ -130,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, | |||
130 | 133 | ||
131 | static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | 134 | static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) |
132 | { | 135 | { |
133 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
134 | struct msm_drm_private *priv = mdp4_kms->dev->dev_private; | 137 | struct msm_drm_private *priv = mdp4_kms->dev->dev_private; |
135 | unsigned i; | 138 | unsigned i; |
136 | 139 | ||
@@ -140,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | |||
140 | 143 | ||
141 | static void mdp4_destroy(struct msm_kms *kms) | 144 | static void mdp4_destroy(struct msm_kms *kms) |
142 | { | 145 | { |
143 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
144 | kfree(mdp4_kms); | 147 | kfree(mdp4_kms); |
145 | } | 148 | } |
146 | 149 | ||
147 | static const struct msm_kms_funcs kms_funcs = { | 150 | static const struct mdp_kms_funcs kms_funcs = { |
151 | .base = { | ||
148 | .hw_init = mdp4_hw_init, | 152 | .hw_init = mdp4_hw_init, |
149 | .irq_preinstall = mdp4_irq_preinstall, | 153 | .irq_preinstall = mdp4_irq_preinstall, |
150 | .irq_postinstall = mdp4_irq_postinstall, | 154 | .irq_postinstall = mdp4_irq_postinstall, |
@@ -152,10 +156,12 @@ static const struct msm_kms_funcs kms_funcs = { | |||
152 | .irq = mdp4_irq, | 156 | .irq = mdp4_irq, |
153 | .enable_vblank = mdp4_enable_vblank, | 157 | .enable_vblank = mdp4_enable_vblank, |
154 | .disable_vblank = mdp4_disable_vblank, | 158 | .disable_vblank = mdp4_disable_vblank, |
155 | .get_format = mdp4_get_format, | 159 | .get_format = mdp_get_format, |
156 | .round_pixclk = mdp4_round_pixclk, | 160 | .round_pixclk = mdp4_round_pixclk, |
157 | .preclose = mdp4_preclose, | 161 | .preclose = mdp4_preclose, |
158 | .destroy = mdp4_destroy, | 162 | .destroy = mdp4_destroy, |
163 | }, | ||
164 | .set_irqmask = mdp4_set_irqmask, | ||
159 | }; | 165 | }; |
160 | 166 | ||
161 | int mdp4_disable(struct mdp4_kms *mdp4_kms) | 167 | int mdp4_disable(struct mdp4_kms *mdp4_kms) |
@@ -189,6 +195,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) | |||
189 | struct drm_plane *plane; | 195 | struct drm_plane *plane; |
190 | struct drm_crtc *crtc; | 196 | struct drm_crtc *crtc; |
191 | struct drm_encoder *encoder; | 197 | struct drm_encoder *encoder; |
198 | struct hdmi *hdmi; | ||
192 | int ret; | 199 | int ret; |
193 | 200 | ||
194 | /* | 201 | /* |
@@ -238,9 +245,10 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) | |||
238 | encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ | 245 | encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ |
239 | priv->encoders[priv->num_encoders++] = encoder; | 246 | priv->encoders[priv->num_encoders++] = encoder; |
240 | 247 | ||
241 | ret = hdmi_init(dev, encoder); | 248 | hdmi = hdmi_init(dev, encoder); |
242 | if (ret) { | 249 | if (IS_ERR(hdmi)) { |
243 | dev_err(dev->dev, "failed to initialize HDMI\n"); | 250 | ret = PTR_ERR(hdmi); |
251 | dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); | ||
244 | goto fail; | 252 | goto fail; |
245 | } | 253 | } |
246 | 254 | ||
@@ -260,6 +268,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
260 | struct mdp4_platform_config *config = mdp4_get_config(pdev); | 268 | struct mdp4_platform_config *config = mdp4_get_config(pdev); |
261 | struct mdp4_kms *mdp4_kms; | 269 | struct mdp4_kms *mdp4_kms; |
262 | struct msm_kms *kms = NULL; | 270 | struct msm_kms *kms = NULL; |
271 | struct msm_mmu *mmu; | ||
263 | int ret; | 272 | int ret; |
264 | 273 | ||
265 | mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); | 274 | mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); |
@@ -269,8 +278,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
269 | goto fail; | 278 | goto fail; |
270 | } | 279 | } |
271 | 280 | ||
272 | kms = &mdp4_kms->base; | 281 | mdp_kms_init(&mdp4_kms->base, &kms_funcs); |
273 | kms->funcs = &kms_funcs; | 282 | |
283 | kms = &mdp4_kms->base.base; | ||
274 | 284 | ||
275 | mdp4_kms->dev = dev; | 285 | mdp4_kms->dev = dev; |
276 | 286 | ||
@@ -322,27 +332,34 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
322 | clk_set_rate(mdp4_kms->clk, config->max_clk); | 332 | clk_set_rate(mdp4_kms->clk, config->max_clk); |
323 | clk_set_rate(mdp4_kms->lut_clk, config->max_clk); | 333 | clk_set_rate(mdp4_kms->lut_clk, config->max_clk); |
324 | 334 | ||
325 | if (!config->iommu) { | ||
326 | dev_err(dev->dev, "no iommu\n"); | ||
327 | ret = -ENXIO; | ||
328 | goto fail; | ||
329 | } | ||
330 | |||
331 | /* make sure things are off before attaching iommu (bootloader could | 335 | /* make sure things are off before attaching iommu (bootloader could |
332 | * have left things on, in which case we'll start getting faults if | 336 | * have left things on, in which case we'll start getting faults if |
333 | * we don't disable): | 337 | * we don't disable): |
334 | */ | 338 | */ |
339 | mdp4_enable(mdp4_kms); | ||
335 | mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); | 340 | mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); |
336 | mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); | 341 | mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); |
337 | mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); | 342 | mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); |
343 | mdp4_disable(mdp4_kms); | ||
338 | mdelay(16); | 344 | mdelay(16); |
339 | 345 | ||
340 | ret = msm_iommu_attach(dev, config->iommu, | 346 | if (config->iommu) { |
341 | iommu_ports, ARRAY_SIZE(iommu_ports)); | 347 | mmu = msm_iommu_new(dev, config->iommu); |
342 | if (ret) | 348 | if (IS_ERR(mmu)) { |
343 | goto fail; | 349 | ret = PTR_ERR(mmu); |
350 | goto fail; | ||
351 | } | ||
352 | ret = mmu->funcs->attach(mmu, iommu_ports, | ||
353 | ARRAY_SIZE(iommu_ports)); | ||
354 | if (ret) | ||
355 | goto fail; | ||
356 | } else { | ||
357 | dev_info(dev->dev, "no iommu, fallback to phys " | ||
358 | "contig buffers for scanout\n"); | ||
359 | mmu = NULL; | ||
360 | } | ||
344 | 361 | ||
345 | mdp4_kms->id = msm_register_iommu(dev, config->iommu); | 362 | mdp4_kms->id = msm_register_mmu(dev, mmu); |
346 | if (mdp4_kms->id < 0) { | 363 | if (mdp4_kms->id < 0) { |
347 | ret = mdp4_kms->id; | 364 | ret = mdp4_kms->id; |
348 | dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); | 365 | dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index eb015c834087..66a4d31aec80 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | |||
@@ -18,29 +18,13 @@ | |||
18 | #ifndef __MDP4_KMS_H__ | 18 | #ifndef __MDP4_KMS_H__ |
19 | #define __MDP4_KMS_H__ | 19 | #define __MDP4_KMS_H__ |
20 | 20 | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/regulator/consumer.h> | ||
24 | |||
25 | #include "msm_drv.h" | 21 | #include "msm_drv.h" |
22 | #include "msm_kms.h" | ||
23 | #include "mdp/mdp_kms.h" | ||
26 | #include "mdp4.xml.h" | 24 | #include "mdp4.xml.h" |
27 | 25 | ||
28 | |||
29 | /* For transiently registering for different MDP4 irqs that various parts | ||
30 | * of the KMS code need during setup/configuration. We these are not | ||
31 | * necessarily the same as what drm_vblank_get/put() are requesting, and | ||
32 | * the hysteresis in drm_vblank_put() is not necessarily desirable for | ||
33 | * internal housekeeping related irq usage. | ||
34 | */ | ||
35 | struct mdp4_irq { | ||
36 | struct list_head node; | ||
37 | uint32_t irqmask; | ||
38 | bool registered; | ||
39 | void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus); | ||
40 | }; | ||
41 | |||
42 | struct mdp4_kms { | 26 | struct mdp4_kms { |
43 | struct msm_kms base; | 27 | struct mdp_kms base; |
44 | 28 | ||
45 | struct drm_device *dev; | 29 | struct drm_device *dev; |
46 | 30 | ||
@@ -59,11 +43,7 @@ struct mdp4_kms { | |||
59 | struct clk *pclk; | 43 | struct clk *pclk; |
60 | struct clk *lut_clk; | 44 | struct clk *lut_clk; |
61 | 45 | ||
62 | /* irq handling: */ | 46 | struct mdp_irq error_handler; |
63 | bool in_irq; | ||
64 | struct list_head irq_list; /* list of mdp4_irq */ | ||
65 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
66 | struct mdp4_irq error_handler; | ||
67 | }; | 47 | }; |
68 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) | 48 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) |
69 | 49 | ||
@@ -73,16 +53,6 @@ struct mdp4_platform_config { | |||
73 | uint32_t max_clk; | 53 | uint32_t max_clk; |
74 | }; | 54 | }; |
75 | 55 | ||
76 | struct mdp4_format { | ||
77 | struct msm_format base; | ||
78 | enum mdp4_bpc bpc_r, bpc_g, bpc_b; | ||
79 | enum mdp4_bpc_alpha bpc_a; | ||
80 | uint8_t unpack[4]; | ||
81 | bool alpha_enable, unpack_tight; | ||
82 | uint8_t cpp, unpack_count; | ||
83 | }; | ||
84 | #define to_mdp4_format(x) container_of(x, struct mdp4_format, base) | ||
85 | |||
86 | static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) | 56 | static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) |
87 | { | 57 | { |
88 | msm_writel(data, mdp4_kms->mmio + reg); | 58 | msm_writel(data, mdp4_kms->mmio + reg); |
@@ -134,7 +104,7 @@ static inline uint32_t dma2err(enum mdp4_dma dma) | |||
134 | } | 104 | } |
135 | 105 | ||
136 | static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, | 106 | static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, |
137 | enum mdp4_mixer_stage_id stage) | 107 | enum mdp_mixer_stage_id stage) |
138 | { | 108 | { |
139 | uint32_t mixer_cfg = 0; | 109 | uint32_t mixer_cfg = 0; |
140 | 110 | ||
@@ -178,19 +148,23 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, | |||
178 | int mdp4_disable(struct mdp4_kms *mdp4_kms); | 148 | int mdp4_disable(struct mdp4_kms *mdp4_kms); |
179 | int mdp4_enable(struct mdp4_kms *mdp4_kms); | 149 | int mdp4_enable(struct mdp4_kms *mdp4_kms); |
180 | 150 | ||
151 | void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
181 | void mdp4_irq_preinstall(struct msm_kms *kms); | 152 | void mdp4_irq_preinstall(struct msm_kms *kms); |
182 | int mdp4_irq_postinstall(struct msm_kms *kms); | 153 | int mdp4_irq_postinstall(struct msm_kms *kms); |
183 | void mdp4_irq_uninstall(struct msm_kms *kms); | 154 | void mdp4_irq_uninstall(struct msm_kms *kms); |
184 | irqreturn_t mdp4_irq(struct msm_kms *kms); | 155 | irqreturn_t mdp4_irq(struct msm_kms *kms); |
185 | void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask); | ||
186 | void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); | ||
187 | void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); | ||
188 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | 156 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); |
189 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | 157 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); |
190 | 158 | ||
191 | uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats, | 159 | static inline |
192 | uint32_t max_formats); | 160 | uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, |
193 | const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format); | 161 | uint32_t max_formats) |
162 | { | ||
163 | /* TODO when we have YUV, we need to filter supported formats | ||
164 | * based on pipe_id.. | ||
165 | */ | ||
166 | return mdp_get_formats(pixel_formats, max_formats); | ||
167 | } | ||
194 | 168 | ||
195 | void mdp4_plane_install_properties(struct drm_plane *plane, | 169 | void mdp4_plane_install_properties(struct drm_plane *plane, |
196 | struct drm_mode_object *obj); | 170 | struct drm_mode_object *obj); |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 0f0af243f6fc..2406027200ec 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | |||
@@ -34,7 +34,7 @@ struct mdp4_plane { | |||
34 | static struct mdp4_kms *get_kms(struct drm_plane *plane) | 34 | static struct mdp4_kms *get_kms(struct drm_plane *plane) |
35 | { | 35 | { |
36 | struct msm_drm_private *priv = plane->dev->dev_private; | 36 | struct msm_drm_private *priv = plane->dev->dev_private; |
37 | return to_mdp4_kms(priv->kms); | 37 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
38 | } | 38 | } |
39 | 39 | ||
40 | static int mdp4_plane_update(struct drm_plane *plane, | 40 | static int mdp4_plane_update(struct drm_plane *plane, |
@@ -132,7 +132,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane, | |||
132 | struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); | 132 | struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); |
133 | struct mdp4_kms *mdp4_kms = get_kms(plane); | 133 | struct mdp4_kms *mdp4_kms = get_kms(plane); |
134 | enum mdp4_pipe pipe = mdp4_plane->pipe; | 134 | enum mdp4_pipe pipe = mdp4_plane->pipe; |
135 | const struct mdp4_format *format; | 135 | const struct mdp_format *format; |
136 | uint32_t op_mode = 0; | 136 | uint32_t op_mode = 0; |
137 | uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; | 137 | uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; |
138 | uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; | 138 | uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; |
@@ -175,7 +175,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane, | |||
175 | 175 | ||
176 | mdp4_plane_set_scanout(plane, fb); | 176 | mdp4_plane_set_scanout(plane, fb); |
177 | 177 | ||
178 | format = to_mdp4_format(msm_framebuffer_format(fb)); | 178 | format = to_mdp_format(msm_framebuffer_format(fb)); |
179 | 179 | ||
180 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), | 180 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), |
181 | MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | | 181 | MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h new file mode 100644 index 000000000000..0aa51517f826 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | |||
@@ -0,0 +1,1036 @@ | |||
1 | #ifndef MDP5_XML | ||
2 | #define MDP5_XML | ||
3 | |||
4 | /* Autogenerated file, DO NOT EDIT manually! | ||
5 | |||
6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
7 | http://github.com/freedreno/envytools/ | ||
8 | git clone https://github.com/freedreno/envytools.git | ||
9 | |||
10 | The rules-ng-ng source files this header was generated from are: | ||
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) | ||
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | ||
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | ||
19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) | ||
21 | |||
22 | Copyright (C) 2013 by the following authors: | ||
23 | - Rob Clark <robdclark@gmail.com> (robclark) | ||
24 | |||
25 | Permission is hereby granted, free of charge, to any person obtaining | ||
26 | a copy of this software and associated documentation files (the | ||
27 | "Software"), to deal in the Software without restriction, including | ||
28 | without limitation the rights to use, copy, modify, merge, publish, | ||
29 | distribute, sublicense, and/or sell copies of the Software, and to | ||
30 | permit persons to whom the Software is furnished to do so, subject to | ||
31 | the following conditions: | ||
32 | |||
33 | The above copyright notice and this permission notice (including the | ||
34 | next paragraph) shall be included in all copies or substantial | ||
35 | portions of the Software. | ||
36 | |||
37 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
38 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
39 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
40 | IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
41 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
42 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
43 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
44 | */ | ||
45 | |||
46 | |||
47 | enum mdp5_intf { | ||
48 | INTF_DSI = 1, | ||
49 | INTF_HDMI = 3, | ||
50 | INTF_LCDC = 5, | ||
51 | INTF_eDP = 9, | ||
52 | }; | ||
53 | |||
54 | enum mdp5_intfnum { | ||
55 | NO_INTF = 0, | ||
56 | INTF0 = 1, | ||
57 | INTF1 = 2, | ||
58 | INTF2 = 3, | ||
59 | INTF3 = 4, | ||
60 | }; | ||
61 | |||
62 | enum mdp5_pipe { | ||
63 | SSPP_VIG0 = 0, | ||
64 | SSPP_VIG1 = 1, | ||
65 | SSPP_VIG2 = 2, | ||
66 | SSPP_RGB0 = 3, | ||
67 | SSPP_RGB1 = 4, | ||
68 | SSPP_RGB2 = 5, | ||
69 | SSPP_DMA0 = 6, | ||
70 | SSPP_DMA1 = 7, | ||
71 | }; | ||
72 | |||
73 | enum mdp5_ctl_mode { | ||
74 | MODE_NONE = 0, | ||
75 | MODE_ROT0 = 1, | ||
76 | MODE_ROT1 = 2, | ||
77 | MODE_WB0 = 3, | ||
78 | MODE_WB1 = 4, | ||
79 | MODE_WFD = 5, | ||
80 | }; | ||
81 | |||
82 | enum mdp5_pack_3d { | ||
83 | PACK_3D_FRAME_INT = 0, | ||
84 | PACK_3D_H_ROW_INT = 1, | ||
85 | PACK_3D_V_ROW_INT = 2, | ||
86 | PACK_3D_COL_INT = 3, | ||
87 | }; | ||
88 | |||
89 | enum mdp5_chroma_samp_type { | ||
90 | CHROMA_RGB = 0, | ||
91 | CHROMA_H2V1 = 1, | ||
92 | CHROMA_H1V2 = 2, | ||
93 | CHROMA_420 = 3, | ||
94 | }; | ||
95 | |||
96 | enum mdp5_scale_filter { | ||
97 | SCALE_FILTER_NEAREST = 0, | ||
98 | SCALE_FILTER_BIL = 1, | ||
99 | SCALE_FILTER_PCMN = 2, | ||
100 | SCALE_FILTER_CA = 3, | ||
101 | }; | ||
102 | |||
103 | enum mdp5_pipe_bwc { | ||
104 | BWC_LOSSLESS = 0, | ||
105 | BWC_Q_HIGH = 1, | ||
106 | BWC_Q_MED = 2, | ||
107 | }; | ||
108 | |||
109 | enum mdp5_client_id { | ||
110 | CID_UNUSED = 0, | ||
111 | CID_VIG0_Y = 1, | ||
112 | CID_VIG0_CR = 2, | ||
113 | CID_VIG0_CB = 3, | ||
114 | CID_VIG1_Y = 4, | ||
115 | CID_VIG1_CR = 5, | ||
116 | CID_VIG1_CB = 6, | ||
117 | CID_VIG2_Y = 7, | ||
118 | CID_VIG2_CR = 8, | ||
119 | CID_VIG2_CB = 9, | ||
120 | CID_DMA0_Y = 10, | ||
121 | CID_DMA0_CR = 11, | ||
122 | CID_DMA0_CB = 12, | ||
123 | CID_DMA1_Y = 13, | ||
124 | CID_DMA1_CR = 14, | ||
125 | CID_DMA1_CB = 15, | ||
126 | CID_RGB0 = 16, | ||
127 | CID_RGB1 = 17, | ||
128 | CID_RGB2 = 18, | ||
129 | CID_MAX = 19, | ||
130 | }; | ||
131 | |||
132 | enum mdp5_igc_type { | ||
133 | IGC_VIG = 0, | ||
134 | IGC_RGB = 1, | ||
135 | IGC_DMA = 2, | ||
136 | IGC_DSPP = 3, | ||
137 | }; | ||
138 | |||
139 | #define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001 | ||
140 | #define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002 | ||
141 | #define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004 | ||
142 | #define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008 | ||
143 | #define MDP5_IRQ_INTF0_WB_WFD 0x00000010 | ||
144 | #define MDP5_IRQ_INTF1_WB_WFD 0x00000020 | ||
145 | #define MDP5_IRQ_INTF2_WB_WFD 0x00000040 | ||
146 | #define MDP5_IRQ_INTF3_WB_WFD 0x00000080 | ||
147 | #define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100 | ||
148 | #define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200 | ||
149 | #define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400 | ||
150 | #define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800 | ||
151 | #define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000 | ||
152 | #define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000 | ||
153 | #define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000 | ||
154 | #define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000 | ||
155 | #define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000 | ||
156 | #define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000 | ||
157 | #define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000 | ||
158 | #define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000 | ||
159 | #define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000 | ||
160 | #define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000 | ||
161 | #define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000 | ||
162 | #define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000 | ||
163 | #define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 | ||
164 | #define MDP5_IRQ_INTF0_VSYNC 0x02000000 | ||
165 | #define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 | ||
166 | #define MDP5_IRQ_INTF1_VSYNC 0x08000000 | ||
167 | #define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000 | ||
168 | #define MDP5_IRQ_INTF2_VSYNC 0x20000000 | ||
169 | #define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 | ||
170 | #define MDP5_IRQ_INTF3_VSYNC 0x80000000 | ||
171 | #define REG_MDP5_HW_VERSION 0x00000000 | ||
172 | |||
173 | #define REG_MDP5_HW_INTR_STATUS 0x00000010 | ||
174 | #define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001 | ||
175 | #define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010 | ||
176 | #define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020 | ||
177 | #define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100 | ||
178 | #define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000 | ||
179 | |||
180 | #define REG_MDP5_MDP_VERSION 0x00000100 | ||
181 | #define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000 | ||
182 | #define MDP5_MDP_VERSION_MINOR__SHIFT 16 | ||
183 | static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val) | ||
184 | { | ||
185 | return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK; | ||
186 | } | ||
187 | #define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000 | ||
188 | #define MDP5_MDP_VERSION_MAJOR__SHIFT 28 | ||
189 | static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val) | ||
190 | { | ||
191 | return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK; | ||
192 | } | ||
193 | |||
194 | #define REG_MDP5_DISP_INTF_SEL 0x00000104 | ||
195 | #define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff | ||
196 | #define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 | ||
197 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val) | ||
198 | { | ||
199 | return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; | ||
200 | } | ||
201 | #define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 | ||
202 | #define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 | ||
203 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val) | ||
204 | { | ||
205 | return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; | ||
206 | } | ||
207 | #define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 | ||
208 | #define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 | ||
209 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val) | ||
210 | { | ||
211 | return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; | ||
212 | } | ||
213 | #define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 | ||
214 | #define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 | ||
215 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val) | ||
216 | { | ||
217 | return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; | ||
218 | } | ||
219 | |||
220 | #define REG_MDP5_INTR_EN 0x00000110 | ||
221 | |||
222 | #define REG_MDP5_INTR_STATUS 0x00000114 | ||
223 | |||
224 | #define REG_MDP5_INTR_CLEAR 0x00000118 | ||
225 | |||
226 | #define REG_MDP5_HIST_INTR_EN 0x0000011c | ||
227 | |||
228 | #define REG_MDP5_HIST_INTR_STATUS 0x00000120 | ||
229 | |||
230 | #define REG_MDP5_HIST_INTR_CLEAR 0x00000124 | ||
231 | |||
232 | static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; } | ||
233 | |||
234 | static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; } | ||
235 | #define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff | ||
236 | #define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 | ||
237 | static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val) | ||
238 | { | ||
239 | return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; | ||
240 | } | ||
241 | #define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 | ||
242 | #define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 | ||
243 | static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val) | ||
244 | { | ||
245 | return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; | ||
246 | } | ||
247 | #define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 | ||
248 | #define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 | ||
249 | static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val) | ||
250 | { | ||
251 | return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; | ||
252 | } | ||
253 | |||
254 | static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; } | ||
255 | |||
256 | static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; } | ||
257 | #define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff | ||
258 | #define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 | ||
259 | static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val) | ||
260 | { | ||
261 | return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; | ||
262 | } | ||
263 | #define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 | ||
264 | #define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 | ||
265 | static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val) | ||
266 | { | ||
267 | return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; | ||
268 | } | ||
269 | #define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 | ||
270 | #define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 | ||
271 | static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val) | ||
272 | { | ||
273 | return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; | ||
274 | } | ||
275 | |||
276 | static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) | ||
277 | { | ||
278 | switch (idx) { | ||
279 | case IGC_VIG: return 0x00000300; | ||
280 | case IGC_RGB: return 0x00000310; | ||
281 | case IGC_DMA: return 0x00000320; | ||
282 | case IGC_DSPP: return 0x00000400; | ||
283 | default: return INVALID_IDX(idx); | ||
284 | } | ||
285 | } | ||
286 | static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } | ||
287 | |||
288 | static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } | ||
289 | |||
290 | static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } | ||
291 | #define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff | ||
292 | #define MDP5_IGC_LUT_REG_VAL__SHIFT 0 | ||
293 | static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) | ||
294 | { | ||
295 | return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; | ||
296 | } | ||
297 | #define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 | ||
298 | #define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 | ||
299 | #define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 | ||
300 | #define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 | ||
301 | |||
302 | static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; } | ||
303 | |||
304 | static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } | ||
305 | |||
306 | static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } | ||
307 | #define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 | ||
308 | #define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 | ||
309 | static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val) | ||
310 | { | ||
311 | return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; | ||
312 | } | ||
313 | #define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 | ||
314 | #define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 | ||
315 | static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val) | ||
316 | { | ||
317 | return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; | ||
318 | } | ||
319 | #define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 | ||
320 | #define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 | ||
321 | static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val) | ||
322 | { | ||
323 | return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; | ||
324 | } | ||
325 | #define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 | ||
326 | #define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 | ||
327 | static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val) | ||
328 | { | ||
329 | return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; | ||
330 | } | ||
331 | #define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 | ||
332 | #define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 | ||
333 | static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val) | ||
334 | { | ||
335 | return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; | ||
336 | } | ||
337 | #define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 | ||
338 | #define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 | ||
339 | static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val) | ||
340 | { | ||
341 | return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; | ||
342 | } | ||
343 | #define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 | ||
344 | #define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 | ||
345 | static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val) | ||
346 | { | ||
347 | return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; | ||
348 | } | ||
349 | #define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 | ||
350 | #define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 | ||
351 | static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) | ||
352 | { | ||
353 | return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; | ||
354 | } | ||
355 | #define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 | ||
356 | #define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 | ||
357 | |||
358 | static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; } | ||
359 | #define MDP5_CTL_OP_MODE__MASK 0x0000000f | ||
360 | #define MDP5_CTL_OP_MODE__SHIFT 0 | ||
361 | static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) | ||
362 | { | ||
363 | return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK; | ||
364 | } | ||
365 | #define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070 | ||
366 | #define MDP5_CTL_OP_INTF_NUM__SHIFT 4 | ||
367 | static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val) | ||
368 | { | ||
369 | return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK; | ||
370 | } | ||
371 | #define MDP5_CTL_OP_CMD_MODE 0x00020000 | ||
372 | #define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000 | ||
373 | #define MDP5_CTL_OP_PACK_3D__MASK 0x00300000 | ||
374 | #define MDP5_CTL_OP_PACK_3D__SHIFT 20 | ||
375 | static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val) | ||
376 | { | ||
377 | return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; | ||
378 | } | ||
379 | |||
380 | static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; } | ||
381 | #define MDP5_CTL_FLUSH_VIG0 0x00000001 | ||
382 | #define MDP5_CTL_FLUSH_VIG1 0x00000002 | ||
383 | #define MDP5_CTL_FLUSH_VIG2 0x00000004 | ||
384 | #define MDP5_CTL_FLUSH_RGB0 0x00000008 | ||
385 | #define MDP5_CTL_FLUSH_RGB1 0x00000010 | ||
386 | #define MDP5_CTL_FLUSH_RGB2 0x00000020 | ||
387 | #define MDP5_CTL_FLUSH_LM0 0x00000040 | ||
388 | #define MDP5_CTL_FLUSH_LM1 0x00000080 | ||
389 | #define MDP5_CTL_FLUSH_LM2 0x00000100 | ||
390 | #define MDP5_CTL_FLUSH_DMA0 0x00000800 | ||
391 | #define MDP5_CTL_FLUSH_DMA1 0x00001000 | ||
392 | #define MDP5_CTL_FLUSH_DSPP0 0x00002000 | ||
393 | #define MDP5_CTL_FLUSH_DSPP1 0x00004000 | ||
394 | #define MDP5_CTL_FLUSH_DSPP2 0x00008000 | ||
395 | #define MDP5_CTL_FLUSH_CTL 0x00020000 | ||
396 | |||
397 | static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; } | ||
398 | |||
399 | static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; } | ||
400 | |||
401 | static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } | ||
402 | |||
403 | static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; } | ||
404 | |||
405 | static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; } | ||
406 | |||
407 | static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; } | ||
408 | |||
409 | static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } | ||
410 | #define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 | ||
411 | #define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 | ||
412 | static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) | ||
413 | { | ||
414 | return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK; | ||
415 | } | ||
416 | #define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff | ||
417 | #define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0 | ||
418 | static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val) | ||
419 | { | ||
420 | return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; | ||
421 | } | ||
422 | |||
423 | static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; } | ||
424 | #define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 | ||
425 | #define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 | ||
426 | static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) | ||
427 | { | ||
428 | return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK; | ||
429 | } | ||
430 | #define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff | ||
431 | #define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0 | ||
432 | static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val) | ||
433 | { | ||
434 | return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; | ||
435 | } | ||
436 | |||
437 | static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; } | ||
438 | #define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 | ||
439 | #define MDP5_PIPE_SRC_XY_Y__SHIFT 16 | ||
440 | static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) | ||
441 | { | ||
442 | return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK; | ||
443 | } | ||
444 | #define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff | ||
445 | #define MDP5_PIPE_SRC_XY_X__SHIFT 0 | ||
446 | static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val) | ||
447 | { | ||
448 | return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; | ||
449 | } | ||
450 | |||
451 | static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; } | ||
452 | #define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 | ||
453 | #define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 | ||
454 | static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) | ||
455 | { | ||
456 | return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK; | ||
457 | } | ||
458 | #define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff | ||
459 | #define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0 | ||
460 | static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val) | ||
461 | { | ||
462 | return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; | ||
463 | } | ||
464 | |||
465 | static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; } | ||
466 | #define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 | ||
467 | #define MDP5_PIPE_OUT_XY_Y__SHIFT 16 | ||
468 | static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) | ||
469 | { | ||
470 | return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK; | ||
471 | } | ||
472 | #define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff | ||
473 | #define MDP5_PIPE_OUT_XY_X__SHIFT 0 | ||
474 | static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val) | ||
475 | { | ||
476 | return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; | ||
477 | } | ||
478 | |||
479 | static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; } | ||
480 | |||
481 | static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; } | ||
482 | |||
483 | static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; } | ||
484 | |||
485 | static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; } | ||
486 | |||
487 | static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; } | ||
488 | #define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff | ||
489 | #define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 | ||
490 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) | ||
491 | { | ||
492 | return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK; | ||
493 | } | ||
494 | #define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 | ||
495 | #define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16 | ||
496 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val) | ||
497 | { | ||
498 | return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; | ||
499 | } | ||
500 | |||
501 | static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; } | ||
502 | #define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff | ||
503 | #define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 | ||
504 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) | ||
505 | { | ||
506 | return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK; | ||
507 | } | ||
508 | #define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 | ||
509 | #define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16 | ||
510 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val) | ||
511 | { | ||
512 | return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; | ||
513 | } | ||
514 | |||
515 | static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; } | ||
516 | |||
517 | static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; } | ||
518 | #define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 | ||
519 | #define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 | ||
520 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) | ||
521 | { | ||
522 | return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK; | ||
523 | } | ||
524 | #define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c | ||
525 | #define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 | ||
526 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) | ||
527 | { | ||
528 | return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK; | ||
529 | } | ||
530 | #define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 | ||
531 | #define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 | ||
532 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) | ||
533 | { | ||
534 | return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK; | ||
535 | } | ||
536 | #define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 | ||
537 | #define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 | ||
538 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) | ||
539 | { | ||
540 | return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK; | ||
541 | } | ||
542 | #define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 | ||
543 | #define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 | ||
544 | #define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9 | ||
545 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val) | ||
546 | { | ||
547 | return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK; | ||
548 | } | ||
549 | #define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800 | ||
550 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000 | ||
551 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12 | ||
552 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) | ||
553 | { | ||
554 | return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; | ||
555 | } | ||
556 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 | ||
557 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 | ||
558 | #define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000 | ||
559 | #define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 | ||
560 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val) | ||
561 | { | ||
562 | return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; | ||
563 | } | ||
564 | #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 | ||
565 | #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 | ||
566 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val) | ||
567 | { | ||
568 | return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; | ||
569 | } | ||
570 | |||
571 | static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; } | ||
572 | #define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff | ||
573 | #define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 | ||
574 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) | ||
575 | { | ||
576 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK; | ||
577 | } | ||
578 | #define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 | ||
579 | #define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 | ||
580 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val) | ||
581 | { | ||
582 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK; | ||
583 | } | ||
584 | #define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 | ||
585 | #define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 | ||
586 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val) | ||
587 | { | ||
588 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK; | ||
589 | } | ||
590 | #define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 | ||
591 | #define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 | ||
592 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val) | ||
593 | { | ||
594 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; | ||
595 | } | ||
596 | |||
597 | static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; } | ||
598 | #define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 | ||
599 | #define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 | ||
600 | #define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 | ||
601 | static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) | ||
602 | { | ||
603 | return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK; | ||
604 | } | ||
605 | #define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000 | ||
606 | #define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000 | ||
607 | #define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000 | ||
608 | #define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000 | ||
609 | #define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 | ||
610 | #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 | ||
611 | #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 | ||
612 | |||
613 | static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; } | ||
614 | |||
615 | static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; } | ||
616 | |||
617 | static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; } | ||
618 | |||
619 | static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; } | ||
620 | |||
621 | static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; } | ||
622 | |||
623 | static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; } | ||
624 | |||
625 | static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; } | ||
626 | |||
627 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; } | ||
628 | |||
629 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; } | ||
630 | |||
631 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; } | ||
632 | |||
633 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; } | ||
634 | |||
635 | static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; } | ||
636 | #define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff | ||
637 | #define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 | ||
638 | static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) | ||
639 | { | ||
640 | return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK; | ||
641 | } | ||
642 | #define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00 | ||
643 | #define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8 | ||
644 | static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) | ||
645 | { | ||
646 | return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; | ||
647 | } | ||
648 | |||
649 | static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; } | ||
650 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 | ||
651 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 | ||
652 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300 | ||
653 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8 | ||
654 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val) | ||
655 | { | ||
656 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK; | ||
657 | } | ||
658 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00 | ||
659 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10 | ||
660 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val) | ||
661 | { | ||
662 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK; | ||
663 | } | ||
664 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000 | ||
665 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12 | ||
666 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val) | ||
667 | { | ||
668 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK; | ||
669 | } | ||
670 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000 | ||
671 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14 | ||
672 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val) | ||
673 | { | ||
674 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK; | ||
675 | } | ||
676 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000 | ||
677 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16 | ||
678 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val) | ||
679 | { | ||
680 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK; | ||
681 | } | ||
682 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000 | ||
683 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18 | ||
684 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val) | ||
685 | { | ||
686 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK; | ||
687 | } | ||
688 | |||
689 | static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; } | ||
690 | |||
691 | static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; } | ||
692 | |||
693 | static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; } | ||
694 | |||
695 | static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; } | ||
696 | |||
697 | static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; } | ||
698 | |||
699 | static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; } | ||
700 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 | ||
701 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 | ||
702 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 | ||
703 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 | ||
704 | |||
705 | static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; } | ||
706 | #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 | ||
707 | #define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 | ||
708 | static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) | ||
709 | { | ||
710 | return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK; | ||
711 | } | ||
712 | #define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff | ||
713 | #define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0 | ||
714 | static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val) | ||
715 | { | ||
716 | return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; | ||
717 | } | ||
718 | |||
719 | static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; } | ||
720 | |||
721 | static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; } | ||
722 | |||
723 | static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } | ||
724 | |||
725 | static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } | ||
726 | #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 | ||
727 | #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 | ||
728 | static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) | ||
729 | { | ||
730 | return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK; | ||
731 | } | ||
732 | #define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004 | ||
733 | #define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008 | ||
734 | #define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010 | ||
735 | #define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020 | ||
736 | #define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300 | ||
737 | #define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8 | ||
738 | static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) | ||
739 | { | ||
740 | return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK; | ||
741 | } | ||
742 | #define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400 | ||
743 | #define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800 | ||
744 | #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 | ||
745 | #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 | ||
746 | |||
747 | static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; } | ||
748 | |||
749 | static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; } | ||
750 | |||
751 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; } | ||
752 | |||
753 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; } | ||
754 | |||
755 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; } | ||
756 | |||
757 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; } | ||
758 | |||
759 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; } | ||
760 | |||
761 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; } | ||
762 | |||
763 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; } | ||
764 | |||
765 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; } | ||
766 | |||
767 | static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; } | ||
768 | |||
769 | static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; } | ||
770 | |||
771 | static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; } | ||
772 | |||
773 | static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; } | ||
774 | |||
775 | static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; } | ||
776 | |||
777 | static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; } | ||
778 | |||
779 | static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; } | ||
780 | |||
781 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; } | ||
782 | |||
783 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; } | ||
784 | |||
785 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; } | ||
786 | |||
787 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; } | ||
788 | |||
789 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; } | ||
790 | |||
791 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; } | ||
792 | |||
793 | static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; } | ||
794 | |||
795 | static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; } | ||
796 | |||
797 | static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; } | ||
798 | #define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 | ||
799 | #define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e | ||
800 | #define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 | ||
801 | static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val) | ||
802 | { | ||
803 | return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK; | ||
804 | } | ||
805 | #define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010 | ||
806 | #define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100 | ||
807 | #define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000 | ||
808 | #define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000 | ||
809 | #define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000 | ||
810 | #define MDP5_DSPP_OP_MODE_PA_EN 0x00100000 | ||
811 | #define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 | ||
812 | #define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 | ||
813 | |||
814 | static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; } | ||
815 | |||
816 | static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; } | ||
817 | |||
818 | static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; } | ||
819 | |||
820 | static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; } | ||
821 | |||
822 | static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; } | ||
823 | |||
824 | static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; } | ||
825 | |||
826 | static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; } | ||
827 | |||
828 | static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; } | ||
829 | |||
830 | static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; } | ||
831 | |||
832 | static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; } | ||
833 | |||
834 | static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; } | ||
835 | |||
836 | static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; } | ||
837 | #define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff | ||
838 | #define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 | ||
839 | static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) | ||
840 | { | ||
841 | return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK; | ||
842 | } | ||
843 | #define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000 | ||
844 | #define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16 | ||
845 | static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val) | ||
846 | { | ||
847 | return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; | ||
848 | } | ||
849 | |||
850 | static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; } | ||
851 | |||
852 | static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; } | ||
853 | |||
854 | static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; } | ||
855 | |||
856 | static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; } | ||
857 | |||
858 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; } | ||
859 | |||
860 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; } | ||
861 | |||
862 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; } | ||
863 | |||
864 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; } | ||
865 | |||
866 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; } | ||
867 | #define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff | ||
868 | #define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 | ||
869 | static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) | ||
870 | { | ||
871 | return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK; | ||
872 | } | ||
873 | #define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 | ||
874 | |||
875 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; } | ||
876 | #define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff | ||
877 | #define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 | ||
878 | static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) | ||
879 | { | ||
880 | return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; | ||
881 | } | ||
882 | |||
883 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; } | ||
884 | |||
885 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; } | ||
886 | |||
887 | static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; } | ||
888 | #define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff | ||
889 | #define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 | ||
890 | static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) | ||
891 | { | ||
892 | return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK; | ||
893 | } | ||
894 | #define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000 | ||
895 | #define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16 | ||
896 | static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val) | ||
897 | { | ||
898 | return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; | ||
899 | } | ||
900 | |||
901 | static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; } | ||
902 | #define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff | ||
903 | #define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 | ||
904 | static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) | ||
905 | { | ||
906 | return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK; | ||
907 | } | ||
908 | #define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000 | ||
909 | #define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16 | ||
910 | static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val) | ||
911 | { | ||
912 | return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK; | ||
913 | } | ||
914 | #define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 | ||
915 | |||
916 | static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; } | ||
917 | |||
918 | static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; } | ||
919 | |||
920 | static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; } | ||
921 | |||
922 | static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; } | ||
923 | #define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 | ||
924 | #define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 | ||
925 | #define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 | ||
926 | |||
927 | static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; } | ||
928 | |||
929 | static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; } | ||
930 | |||
931 | static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; } | ||
932 | |||
933 | static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; } | ||
934 | |||
935 | static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; } | ||
936 | |||
937 | static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; } | ||
938 | |||
939 | static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; } | ||
940 | |||
941 | static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; } | ||
942 | |||
943 | static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; } | ||
944 | |||
945 | static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; } | ||
946 | |||
947 | static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; } | ||
948 | |||
949 | static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; } | ||
950 | |||
951 | static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; } | ||
952 | |||
953 | static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; } | ||
954 | |||
955 | static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; } | ||
956 | |||
957 | static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; } | ||
958 | |||
959 | static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; } | ||
960 | |||
961 | static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; } | ||
962 | |||
963 | static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; } | ||
964 | |||
965 | static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; } | ||
966 | |||
967 | static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; } | ||
968 | |||
969 | static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; } | ||
970 | |||
971 | static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; } | ||
972 | |||
973 | static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; } | ||
974 | |||
975 | static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; } | ||
976 | |||
977 | static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; } | ||
978 | |||
979 | static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; } | ||
980 | |||
981 | static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; } | ||
982 | |||
983 | static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; } | ||
984 | |||
985 | static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; } | ||
986 | |||
987 | static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; } | ||
988 | |||
989 | static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; } | ||
990 | |||
991 | static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; } | ||
992 | |||
993 | static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; } | ||
994 | |||
995 | static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; } | ||
996 | |||
997 | static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; } | ||
998 | |||
999 | static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; } | ||
1000 | |||
1001 | static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; } | ||
1002 | |||
1003 | static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; } | ||
1004 | |||
1005 | static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; } | ||
1006 | |||
1007 | static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; } | ||
1008 | |||
1009 | static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; } | ||
1010 | |||
1011 | static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; } | ||
1012 | |||
1013 | static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; } | ||
1014 | |||
1015 | static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; } | ||
1016 | |||
1017 | static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; } | ||
1018 | |||
1019 | static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; } | ||
1020 | |||
1021 | static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; } | ||
1022 | |||
1023 | static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; } | ||
1024 | |||
1025 | static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; } | ||
1026 | |||
1027 | static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; } | ||
1028 | |||
1029 | static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; } | ||
1030 | |||
1031 | static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; } | ||
1032 | |||
1033 | static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; } | ||
1034 | |||
1035 | |||
1036 | #endif /* MDP5_XML */ | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c new file mode 100644 index 000000000000..71a3b2345eb3 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -0,0 +1,569 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include "mdp5_kms.h" | ||
19 | |||
20 | #include <drm/drm_mode.h> | ||
21 | #include "drm_crtc.h" | ||
22 | #include "drm_crtc_helper.h" | ||
23 | #include "drm_flip_work.h" | ||
24 | |||
25 | struct mdp5_crtc { | ||
26 | struct drm_crtc base; | ||
27 | char name[8]; | ||
28 | struct drm_plane *plane; | ||
29 | struct drm_plane *planes[8]; | ||
30 | int id; | ||
31 | bool enabled; | ||
32 | |||
33 | /* which mixer/encoder we route output to: */ | ||
34 | int mixer; | ||
35 | |||
36 | /* if there is a pending flip, these will be non-null: */ | ||
37 | struct drm_pending_vblank_event *event; | ||
38 | struct msm_fence_cb pageflip_cb; | ||
39 | |||
40 | #define PENDING_CURSOR 0x1 | ||
41 | #define PENDING_FLIP 0x2 | ||
42 | atomic_t pending; | ||
43 | |||
44 | /* the fb that we logically (from PoV of KMS API) hold a ref | ||
45 | * to. Which we may not yet be scanning out (we may still | ||
46 | * be scanning out previous in case of page_flip while waiting | ||
47 | * for gpu rendering to complete: | ||
48 | */ | ||
49 | struct drm_framebuffer *fb; | ||
50 | |||
51 | /* the fb that we currently hold a scanout ref to: */ | ||
52 | struct drm_framebuffer *scanout_fb; | ||
53 | |||
54 | /* for unref'ing framebuffers after scanout completes: */ | ||
55 | struct drm_flip_work unref_fb_work; | ||
56 | |||
57 | struct mdp_irq vblank; | ||
58 | struct mdp_irq err; | ||
59 | }; | ||
60 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) | ||
61 | |||
62 | static struct mdp5_kms *get_kms(struct drm_crtc *crtc) | ||
63 | { | ||
64 | struct msm_drm_private *priv = crtc->dev->dev_private; | ||
65 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | ||
66 | } | ||
67 | |||
68 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) | ||
69 | { | ||
70 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
71 | |||
72 | atomic_or(pending, &mdp5_crtc->pending); | ||
73 | mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); | ||
74 | } | ||
75 | |||
76 | static void crtc_flush(struct drm_crtc *crtc) | ||
77 | { | ||
78 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
79 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
80 | int id = mdp5_crtc->id; | ||
81 | uint32_t i, flush = 0; | ||
82 | |||
83 | for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) { | ||
84 | struct drm_plane *plane = mdp5_crtc->planes[i]; | ||
85 | if (plane) { | ||
86 | enum mdp5_pipe pipe = mdp5_plane_pipe(plane); | ||
87 | flush |= pipe2flush(pipe); | ||
88 | } | ||
89 | } | ||
90 | flush |= mixer2flush(mdp5_crtc->id); | ||
91 | flush |= MDP5_CTL_FLUSH_CTL; | ||
92 | |||
93 | DBG("%s: flush=%08x", mdp5_crtc->name, flush); | ||
94 | |||
95 | mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush); | ||
96 | } | ||
97 | |||
98 | static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) | ||
99 | { | ||
100 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
101 | struct drm_framebuffer *old_fb = mdp5_crtc->fb; | ||
102 | |||
103 | /* grab reference to incoming scanout fb: */ | ||
104 | drm_framebuffer_reference(new_fb); | ||
105 | mdp5_crtc->base.fb = new_fb; | ||
106 | mdp5_crtc->fb = new_fb; | ||
107 | |||
108 | if (old_fb) | ||
109 | drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb); | ||
110 | } | ||
111 | |||
112 | /* unlike update_fb(), take a ref to the new scanout fb *before* updating | ||
113 | * plane, then call this. Needed to ensure we don't unref the buffer that | ||
114 | * is actually still being scanned out. | ||
115 | * | ||
116 | * Note that this whole thing goes away with atomic.. since we can defer | ||
117 | * calling into driver until rendering is done. | ||
118 | */ | ||
119 | static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) | ||
120 | { | ||
121 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
122 | |||
123 | /* flush updates, to make sure hw is updated to new scanout fb, | ||
124 | * so that we can safely queue unref to current fb (ie. next | ||
125 | * vblank we know hw is done w/ previous scanout_fb). | ||
126 | */ | ||
127 | crtc_flush(crtc); | ||
128 | |||
129 | if (mdp5_crtc->scanout_fb) | ||
130 | drm_flip_work_queue(&mdp5_crtc->unref_fb_work, | ||
131 | mdp5_crtc->scanout_fb); | ||
132 | |||
133 | mdp5_crtc->scanout_fb = fb; | ||
134 | |||
135 | /* enable vblank to complete flip: */ | ||
136 | request_pending(crtc, PENDING_FLIP); | ||
137 | } | ||
138 | |||
139 | /* if file!=NULL, this is preclose potential cancel-flip path */ | ||
140 | static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | ||
141 | { | ||
142 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
143 | struct drm_device *dev = crtc->dev; | ||
144 | struct drm_pending_vblank_event *event; | ||
145 | unsigned long flags, i; | ||
146 | |||
147 | spin_lock_irqsave(&dev->event_lock, flags); | ||
148 | event = mdp5_crtc->event; | ||
149 | if (event) { | ||
150 | /* if regular vblank case (!file) or if cancel-flip from | ||
151 | * preclose on file that requested flip, then send the | ||
152 | * event: | ||
153 | */ | ||
154 | if (!file || (event->base.file_priv == file)) { | ||
155 | mdp5_crtc->event = NULL; | ||
156 | drm_send_vblank_event(dev, mdp5_crtc->id, event); | ||
157 | } | ||
158 | } | ||
159 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
160 | |||
161 | for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) { | ||
162 | struct drm_plane *plane = mdp5_crtc->planes[i]; | ||
163 | if (plane) | ||
164 | mdp5_plane_complete_flip(plane); | ||
165 | } | ||
166 | } | ||
167 | |||
168 | static void pageflip_cb(struct msm_fence_cb *cb) | ||
169 | { | ||
170 | struct mdp5_crtc *mdp5_crtc = | ||
171 | container_of(cb, struct mdp5_crtc, pageflip_cb); | ||
172 | struct drm_crtc *crtc = &mdp5_crtc->base; | ||
173 | struct drm_framebuffer *fb = mdp5_crtc->fb; | ||
174 | |||
175 | if (!fb) | ||
176 | return; | ||
177 | |||
178 | drm_framebuffer_reference(fb); | ||
179 | mdp5_plane_set_scanout(mdp5_crtc->plane, fb); | ||
180 | update_scanout(crtc, fb); | ||
181 | } | ||
182 | |||
183 | static void unref_fb_worker(struct drm_flip_work *work, void *val) | ||
184 | { | ||
185 | struct mdp5_crtc *mdp5_crtc = | ||
186 | container_of(work, struct mdp5_crtc, unref_fb_work); | ||
187 | struct drm_device *dev = mdp5_crtc->base.dev; | ||
188 | |||
189 | mutex_lock(&dev->mode_config.mutex); | ||
190 | drm_framebuffer_unreference(val); | ||
191 | mutex_unlock(&dev->mode_config.mutex); | ||
192 | } | ||
193 | |||
194 | static void mdp5_crtc_destroy(struct drm_crtc *crtc) | ||
195 | { | ||
196 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
197 | |||
198 | mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane); | ||
199 | |||
200 | drm_crtc_cleanup(crtc); | ||
201 | drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work); | ||
202 | |||
203 | kfree(mdp5_crtc); | ||
204 | } | ||
205 | |||
206 | static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
207 | { | ||
208 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
209 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
210 | bool enabled = (mode == DRM_MODE_DPMS_ON); | ||
211 | |||
212 | DBG("%s: mode=%d", mdp5_crtc->name, mode); | ||
213 | |||
214 | if (enabled != mdp5_crtc->enabled) { | ||
215 | if (enabled) { | ||
216 | mdp5_enable(mdp5_kms); | ||
217 | mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); | ||
218 | } else { | ||
219 | mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); | ||
220 | mdp5_disable(mdp5_kms); | ||
221 | } | ||
222 | mdp5_crtc->enabled = enabled; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, | ||
227 | const struct drm_display_mode *mode, | ||
228 | struct drm_display_mode *adjusted_mode) | ||
229 | { | ||
230 | return true; | ||
231 | } | ||
232 | |||
233 | static void blend_setup(struct drm_crtc *crtc) | ||
234 | { | ||
235 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
236 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
237 | int id = mdp5_crtc->id; | ||
238 | |||
239 | /* | ||
240 | * Hard-coded setup for now until I figure out how the | ||
241 | * layer-mixer works | ||
242 | */ | ||
243 | |||
244 | /* LM[id]: */ | ||
245 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), | ||
246 | MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); | ||
247 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), | ||
248 | MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | | ||
249 | MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) | | ||
250 | MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA); | ||
251 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff); | ||
252 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00); | ||
253 | |||
254 | /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but | ||
255 | * we want to be setting CTL[m].LAYER[n]. Not sure what the | ||
256 | * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is | ||
257 | * used when chaining up mixers for high resolution displays? | ||
258 | */ | ||
259 | |||
260 | /* CTL[id]: */ | ||
261 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), | ||
262 | MDP5_CTL_LAYER_REG_RGB0(STAGE0) | | ||
263 | MDP5_CTL_LAYER_REG_BORDER_COLOR); | ||
264 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); | ||
265 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); | ||
266 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); | ||
267 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); | ||
268 | } | ||
269 | |||
270 | static int mdp5_crtc_mode_set(struct drm_crtc *crtc, | ||
271 | struct drm_display_mode *mode, | ||
272 | struct drm_display_mode *adjusted_mode, | ||
273 | int x, int y, | ||
274 | struct drm_framebuffer *old_fb) | ||
275 | { | ||
276 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
277 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
278 | int ret; | ||
279 | |||
280 | mode = adjusted_mode; | ||
281 | |||
282 | DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", | ||
283 | mdp5_crtc->name, mode->base.id, mode->name, | ||
284 | mode->vrefresh, mode->clock, | ||
285 | mode->hdisplay, mode->hsync_start, | ||
286 | mode->hsync_end, mode->htotal, | ||
287 | mode->vdisplay, mode->vsync_start, | ||
288 | mode->vsync_end, mode->vtotal, | ||
289 | mode->type, mode->flags); | ||
290 | |||
291 | /* grab extra ref for update_scanout() */ | ||
292 | drm_framebuffer_reference(crtc->fb); | ||
293 | |||
294 | ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb, | ||
295 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
296 | x << 16, y << 16, | ||
297 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
298 | if (ret) { | ||
299 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
300 | mdp5_crtc->name, ret); | ||
301 | return ret; | ||
302 | } | ||
303 | |||
304 | mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id), | ||
305 | MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | | ||
306 | MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); | ||
307 | |||
308 | update_fb(crtc, crtc->fb); | ||
309 | update_scanout(crtc, crtc->fb); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static void mdp5_crtc_prepare(struct drm_crtc *crtc) | ||
315 | { | ||
316 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
317 | DBG("%s", mdp5_crtc->name); | ||
318 | /* make sure we hold a ref to mdp clks while setting up mode: */ | ||
319 | mdp5_enable(get_kms(crtc)); | ||
320 | mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
321 | } | ||
322 | |||
323 | static void mdp5_crtc_commit(struct drm_crtc *crtc) | ||
324 | { | ||
325 | mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
326 | crtc_flush(crtc); | ||
327 | /* drop the ref to mdp clk's that we got in prepare: */ | ||
328 | mdp5_disable(get_kms(crtc)); | ||
329 | } | ||
330 | |||
331 | static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
332 | struct drm_framebuffer *old_fb) | ||
333 | { | ||
334 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
335 | struct drm_plane *plane = mdp5_crtc->plane; | ||
336 | struct drm_display_mode *mode = &crtc->mode; | ||
337 | int ret; | ||
338 | |||
339 | /* grab extra ref for update_scanout() */ | ||
340 | drm_framebuffer_reference(crtc->fb); | ||
341 | |||
342 | ret = mdp5_plane_mode_set(plane, crtc, crtc->fb, | ||
343 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
344 | x << 16, y << 16, | ||
345 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
346 | |||
347 | update_fb(crtc, crtc->fb); | ||
348 | update_scanout(crtc, crtc->fb); | ||
349 | |||
350 | return ret; | ||
351 | } | ||
352 | |||
353 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) | ||
354 | { | ||
355 | } | ||
356 | |||
357 | static int mdp5_crtc_page_flip(struct drm_crtc *crtc, | ||
358 | struct drm_framebuffer *new_fb, | ||
359 | struct drm_pending_vblank_event *event, | ||
360 | uint32_t page_flip_flags) | ||
361 | { | ||
362 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
363 | struct drm_device *dev = crtc->dev; | ||
364 | struct drm_gem_object *obj; | ||
365 | unsigned long flags; | ||
366 | |||
367 | if (mdp5_crtc->event) { | ||
368 | dev_err(dev->dev, "already pending flip!\n"); | ||
369 | return -EBUSY; | ||
370 | } | ||
371 | |||
372 | obj = msm_framebuffer_bo(new_fb, 0); | ||
373 | |||
374 | spin_lock_irqsave(&dev->event_lock, flags); | ||
375 | mdp5_crtc->event = event; | ||
376 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
377 | |||
378 | update_fb(crtc, new_fb); | ||
379 | |||
380 | return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); | ||
381 | } | ||
382 | |||
383 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, | ||
384 | struct drm_property *property, uint64_t val) | ||
385 | { | ||
386 | // XXX | ||
387 | return -EINVAL; | ||
388 | } | ||
389 | |||
390 | static const struct drm_crtc_funcs mdp5_crtc_funcs = { | ||
391 | .set_config = drm_crtc_helper_set_config, | ||
392 | .destroy = mdp5_crtc_destroy, | ||
393 | .page_flip = mdp5_crtc_page_flip, | ||
394 | .set_property = mdp5_crtc_set_property, | ||
395 | }; | ||
396 | |||
397 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { | ||
398 | .dpms = mdp5_crtc_dpms, | ||
399 | .mode_fixup = mdp5_crtc_mode_fixup, | ||
400 | .mode_set = mdp5_crtc_mode_set, | ||
401 | .prepare = mdp5_crtc_prepare, | ||
402 | .commit = mdp5_crtc_commit, | ||
403 | .mode_set_base = mdp5_crtc_mode_set_base, | ||
404 | .load_lut = mdp5_crtc_load_lut, | ||
405 | }; | ||
406 | |||
407 | static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) | ||
408 | { | ||
409 | struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); | ||
410 | struct drm_crtc *crtc = &mdp5_crtc->base; | ||
411 | struct msm_drm_private *priv = crtc->dev->dev_private; | ||
412 | unsigned pending; | ||
413 | |||
414 | mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); | ||
415 | |||
416 | pending = atomic_xchg(&mdp5_crtc->pending, 0); | ||
417 | |||
418 | if (pending & PENDING_FLIP) { | ||
419 | complete_flip(crtc, NULL); | ||
420 | drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) | ||
425 | { | ||
426 | struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); | ||
427 | struct drm_crtc *crtc = &mdp5_crtc->base; | ||
428 | DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); | ||
429 | crtc_flush(crtc); | ||
430 | } | ||
431 | |||
432 | uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) | ||
433 | { | ||
434 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
435 | return mdp5_crtc->vblank.irqmask; | ||
436 | } | ||
437 | |||
438 | void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) | ||
439 | { | ||
440 | DBG("cancel: %p", file); | ||
441 | complete_flip(crtc, file); | ||
442 | } | ||
443 | |||
444 | /* set interface for routing crtc->encoder: */ | ||
445 | void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, | ||
446 | enum mdp5_intf intf_id) | ||
447 | { | ||
448 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
449 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
450 | static const enum mdp5_intfnum intfnum[] = { | ||
451 | INTF0, INTF1, INTF2, INTF3, | ||
452 | }; | ||
453 | uint32_t intf_sel; | ||
454 | |||
455 | /* now that we know what irq's we want: */ | ||
456 | mdp5_crtc->err.irqmask = intf2err(intf); | ||
457 | mdp5_crtc->vblank.irqmask = intf2vblank(intf); | ||
458 | |||
459 | /* when called from modeset_init(), skip the rest until later: */ | ||
460 | if (!mdp5_kms) | ||
461 | return; | ||
462 | |||
463 | intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); | ||
464 | |||
465 | switch (intf) { | ||
466 | case 0: | ||
467 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; | ||
468 | intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id); | ||
469 | break; | ||
470 | case 1: | ||
471 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; | ||
472 | intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id); | ||
473 | break; | ||
474 | case 2: | ||
475 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; | ||
476 | intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id); | ||
477 | break; | ||
478 | case 3: | ||
479 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; | ||
480 | intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id); | ||
481 | break; | ||
482 | default: | ||
483 | BUG(); | ||
484 | break; | ||
485 | } | ||
486 | |||
487 | blend_setup(crtc); | ||
488 | |||
489 | DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); | ||
490 | |||
491 | mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); | ||
492 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id), | ||
493 | MDP5_CTL_OP_MODE(MODE_NONE) | | ||
494 | MDP5_CTL_OP_INTF_NUM(intfnum[intf])); | ||
495 | |||
496 | crtc_flush(crtc); | ||
497 | } | ||
498 | |||
499 | static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, | ||
500 | struct drm_plane *plane) | ||
501 | { | ||
502 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
503 | |||
504 | BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes)); | ||
505 | |||
506 | if (mdp5_crtc->planes[pipe_id] == plane) | ||
507 | return; | ||
508 | |||
509 | mdp5_crtc->planes[pipe_id] = plane; | ||
510 | blend_setup(crtc); | ||
511 | if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane)) | ||
512 | crtc_flush(crtc); | ||
513 | } | ||
514 | |||
515 | void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane) | ||
516 | { | ||
517 | set_attach(crtc, mdp5_plane_pipe(plane), plane); | ||
518 | } | ||
519 | |||
520 | void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) | ||
521 | { | ||
522 | set_attach(crtc, mdp5_plane_pipe(plane), NULL); | ||
523 | } | ||
524 | |||
525 | /* initialize crtc */ | ||
526 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, | ||
527 | struct drm_plane *plane, int id) | ||
528 | { | ||
529 | struct drm_crtc *crtc = NULL; | ||
530 | struct mdp5_crtc *mdp5_crtc; | ||
531 | int ret; | ||
532 | |||
533 | mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); | ||
534 | if (!mdp5_crtc) { | ||
535 | ret = -ENOMEM; | ||
536 | goto fail; | ||
537 | } | ||
538 | |||
539 | crtc = &mdp5_crtc->base; | ||
540 | |||
541 | mdp5_crtc->plane = plane; | ||
542 | mdp5_crtc->id = id; | ||
543 | |||
544 | mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; | ||
545 | mdp5_crtc->err.irq = mdp5_crtc_err_irq; | ||
546 | |||
547 | snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", | ||
548 | pipe2name(mdp5_plane_pipe(plane)), id); | ||
549 | |||
550 | ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16, | ||
551 | "unref fb", unref_fb_worker); | ||
552 | if (ret) | ||
553 | goto fail; | ||
554 | |||
555 | INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb); | ||
556 | |||
557 | drm_crtc_init(dev, crtc, &mdp5_crtc_funcs); | ||
558 | drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); | ||
559 | |||
560 | mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base); | ||
561 | |||
562 | return crtc; | ||
563 | |||
564 | fail: | ||
565 | if (crtc) | ||
566 | mdp5_crtc_destroy(crtc); | ||
567 | |||
568 | return ERR_PTR(ret); | ||
569 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c new file mode 100644 index 000000000000..edec7bfaa952 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
@@ -0,0 +1,258 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include "mdp5_kms.h" | ||
19 | |||
20 | #include "drm_crtc.h" | ||
21 | #include "drm_crtc_helper.h" | ||
22 | |||
23 | struct mdp5_encoder { | ||
24 | struct drm_encoder base; | ||
25 | int intf; | ||
26 | enum mdp5_intf intf_id; | ||
27 | bool enabled; | ||
28 | uint32_t bsc; | ||
29 | }; | ||
30 | #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) | ||
31 | |||
32 | static struct mdp5_kms *get_kms(struct drm_encoder *encoder) | ||
33 | { | ||
34 | struct msm_drm_private *priv = encoder->dev->dev_private; | ||
35 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | ||
36 | } | ||
37 | |||
38 | #ifdef CONFIG_MSM_BUS_SCALING | ||
39 | #include <mach/board.h> | ||
40 | #include <mach/msm_bus.h> | ||
41 | #include <mach/msm_bus_board.h> | ||
42 | #define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ | ||
43 | { \ | ||
44 | .src = MSM_BUS_MASTER_MDP_PORT0, \ | ||
45 | .dst = MSM_BUS_SLAVE_EBI_CH0, \ | ||
46 | .ab = (ab_val), \ | ||
47 | .ib = (ib_val), \ | ||
48 | } | ||
49 | |||
50 | static struct msm_bus_vectors mdp_bus_vectors[] = { | ||
51 | MDP_BUS_VECTOR_ENTRY(0, 0), | ||
52 | MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), | ||
53 | }; | ||
54 | static struct msm_bus_paths mdp_bus_usecases[] = { { | ||
55 | .num_paths = 1, | ||
56 | .vectors = &mdp_bus_vectors[0], | ||
57 | }, { | ||
58 | .num_paths = 1, | ||
59 | .vectors = &mdp_bus_vectors[1], | ||
60 | } }; | ||
61 | static struct msm_bus_scale_pdata mdp_bus_scale_table = { | ||
62 | .usecase = mdp_bus_usecases, | ||
63 | .num_usecases = ARRAY_SIZE(mdp_bus_usecases), | ||
64 | .name = "mdss_mdp", | ||
65 | }; | ||
66 | |||
67 | static void bs_init(struct mdp5_encoder *mdp5_encoder) | ||
68 | { | ||
69 | mdp5_encoder->bsc = msm_bus_scale_register_client( | ||
70 | &mdp_bus_scale_table); | ||
71 | DBG("bus scale client: %08x", mdp5_encoder->bsc); | ||
72 | } | ||
73 | |||
74 | static void bs_fini(struct mdp5_encoder *mdp5_encoder) | ||
75 | { | ||
76 | if (mdp5_encoder->bsc) { | ||
77 | msm_bus_scale_unregister_client(mdp5_encoder->bsc); | ||
78 | mdp5_encoder->bsc = 0; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) | ||
83 | { | ||
84 | if (mdp5_encoder->bsc) { | ||
85 | DBG("set bus scaling: %d", idx); | ||
86 | /* HACK: scaling down, and then immediately back up | ||
87 | * seems to leave things broken (underflow).. so | ||
88 | * never disable: | ||
89 | */ | ||
90 | idx = 1; | ||
91 | msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx); | ||
92 | } | ||
93 | } | ||
94 | #else | ||
95 | static void bs_init(struct mdp5_encoder *mdp5_encoder) {} | ||
96 | static void bs_fini(struct mdp5_encoder *mdp5_encoder) {} | ||
97 | static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {} | ||
98 | #endif | ||
99 | |||
100 | static void mdp5_encoder_destroy(struct drm_encoder *encoder) | ||
101 | { | ||
102 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
103 | bs_fini(mdp5_encoder); | ||
104 | drm_encoder_cleanup(encoder); | ||
105 | kfree(mdp5_encoder); | ||
106 | } | ||
107 | |||
108 | static const struct drm_encoder_funcs mdp5_encoder_funcs = { | ||
109 | .destroy = mdp5_encoder_destroy, | ||
110 | }; | ||
111 | |||
112 | static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
113 | { | ||
114 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
115 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | ||
116 | int intf = mdp5_encoder->intf; | ||
117 | bool enabled = (mode == DRM_MODE_DPMS_ON); | ||
118 | |||
119 | DBG("mode=%d", mode); | ||
120 | |||
121 | if (enabled == mdp5_encoder->enabled) | ||
122 | return; | ||
123 | |||
124 | if (enabled) { | ||
125 | bs_set(mdp5_encoder, 1); | ||
126 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); | ||
127 | } else { | ||
128 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); | ||
129 | bs_set(mdp5_encoder, 0); | ||
130 | } | ||
131 | |||
132 | mdp5_encoder->enabled = enabled; | ||
133 | } | ||
134 | |||
135 | static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder, | ||
136 | const struct drm_display_mode *mode, | ||
137 | struct drm_display_mode *adjusted_mode) | ||
138 | { | ||
139 | return true; | ||
140 | } | ||
141 | |||
142 | static void mdp5_encoder_mode_set(struct drm_encoder *encoder, | ||
143 | struct drm_display_mode *mode, | ||
144 | struct drm_display_mode *adjusted_mode) | ||
145 | { | ||
146 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
147 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | ||
148 | int intf = mdp5_encoder->intf; | ||
149 | uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; | ||
150 | uint32_t display_v_start, display_v_end; | ||
151 | uint32_t hsync_start_x, hsync_end_x; | ||
152 | uint32_t format; | ||
153 | |||
154 | mode = adjusted_mode; | ||
155 | |||
156 | DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", | ||
157 | mode->base.id, mode->name, | ||
158 | mode->vrefresh, mode->clock, | ||
159 | mode->hdisplay, mode->hsync_start, | ||
160 | mode->hsync_end, mode->htotal, | ||
161 | mode->vdisplay, mode->vsync_start, | ||
162 | mode->vsync_end, mode->vtotal, | ||
163 | mode->type, mode->flags); | ||
164 | |||
165 | ctrl_pol = 0; | ||
166 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
167 | ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; | ||
168 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
169 | ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; | ||
170 | /* probably need to get DATA_EN polarity from panel.. */ | ||
171 | |||
172 | dtv_hsync_skew = 0; /* get this from panel? */ | ||
173 | format = 0x213f; /* get this from panel? */ | ||
174 | |||
175 | hsync_start_x = (mode->htotal - mode->hsync_start); | ||
176 | hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; | ||
177 | |||
178 | vsync_period = mode->vtotal * mode->htotal; | ||
179 | vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; | ||
180 | display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; | ||
181 | display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; | ||
182 | |||
183 | mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), | ||
184 | MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | | ||
185 | MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); | ||
186 | mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period); | ||
187 | mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len); | ||
188 | mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf), | ||
189 | MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) | | ||
190 | MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x)); | ||
191 | mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start); | ||
192 | mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end); | ||
193 | mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0); | ||
194 | mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff); | ||
195 | mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew); | ||
196 | mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol); | ||
197 | mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf), | ||
198 | MDP5_INTF_ACTIVE_HCTL_START(0) | | ||
199 | MDP5_INTF_ACTIVE_HCTL_END(0)); | ||
200 | mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0); | ||
201 | mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); | ||
202 | mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); | ||
203 | mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ | ||
204 | } | ||
205 | |||
206 | static void mdp5_encoder_prepare(struct drm_encoder *encoder) | ||
207 | { | ||
208 | mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
209 | } | ||
210 | |||
211 | static void mdp5_encoder_commit(struct drm_encoder *encoder) | ||
212 | { | ||
213 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
214 | mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf, | ||
215 | mdp5_encoder->intf_id); | ||
216 | mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | ||
217 | } | ||
218 | |||
219 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { | ||
220 | .dpms = mdp5_encoder_dpms, | ||
221 | .mode_fixup = mdp5_encoder_mode_fixup, | ||
222 | .mode_set = mdp5_encoder_mode_set, | ||
223 | .prepare = mdp5_encoder_prepare, | ||
224 | .commit = mdp5_encoder_commit, | ||
225 | }; | ||
226 | |||
227 | /* initialize encoder */ | ||
228 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, | ||
229 | enum mdp5_intf intf_id) | ||
230 | { | ||
231 | struct drm_encoder *encoder = NULL; | ||
232 | struct mdp5_encoder *mdp5_encoder; | ||
233 | int ret; | ||
234 | |||
235 | mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); | ||
236 | if (!mdp5_encoder) { | ||
237 | ret = -ENOMEM; | ||
238 | goto fail; | ||
239 | } | ||
240 | |||
241 | mdp5_encoder->intf = intf; | ||
242 | mdp5_encoder->intf_id = intf_id; | ||
243 | encoder = &mdp5_encoder->base; | ||
244 | |||
245 | drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, | ||
246 | DRM_MODE_ENCODER_TMDS); | ||
247 | drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); | ||
248 | |||
249 | bs_init(mdp5_encoder); | ||
250 | |||
251 | return encoder; | ||
252 | |||
253 | fail: | ||
254 | if (encoder) | ||
255 | mdp5_encoder_destroy(encoder); | ||
256 | |||
257 | return ERR_PTR(ret); | ||
258 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c new file mode 100644 index 000000000000..353d494a497f --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include "msm_drv.h" | ||
20 | #include "mdp5_kms.h" | ||
21 | |||
22 | void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) | ||
23 | { | ||
24 | mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); | ||
25 | } | ||
26 | |||
27 | static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | ||
28 | { | ||
29 | DRM_ERROR("errors: %08x\n", irqstatus); | ||
30 | } | ||
31 | |||
32 | void mdp5_irq_preinstall(struct msm_kms *kms) | ||
33 | { | ||
34 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
35 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); | ||
36 | } | ||
37 | |||
38 | int mdp5_irq_postinstall(struct msm_kms *kms) | ||
39 | { | ||
40 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
41 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); | ||
42 | struct mdp_irq *error_handler = &mdp5_kms->error_handler; | ||
43 | |||
44 | error_handler->irq = mdp5_irq_error_handler; | ||
45 | error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN | | ||
46 | MDP5_IRQ_INTF1_UNDER_RUN | | ||
47 | MDP5_IRQ_INTF2_UNDER_RUN | | ||
48 | MDP5_IRQ_INTF3_UNDER_RUN; | ||
49 | |||
50 | mdp_irq_register(mdp_kms, error_handler); | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | void mdp5_irq_uninstall(struct msm_kms *kms) | ||
56 | { | ||
57 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
58 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | ||
59 | } | ||
60 | |||
61 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | ||
62 | { | ||
63 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); | ||
64 | struct drm_device *dev = mdp5_kms->dev; | ||
65 | struct msm_drm_private *priv = dev->dev_private; | ||
66 | unsigned int id; | ||
67 | uint32_t status; | ||
68 | |||
69 | status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS); | ||
70 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); | ||
71 | |||
72 | VERB("status=%08x", status); | ||
73 | |||
74 | for (id = 0; id < priv->num_crtcs; id++) | ||
75 | if (status & mdp5_crtc_vblank(priv->crtcs[id])) | ||
76 | drm_handle_vblank(dev, id); | ||
77 | |||
78 | mdp_dispatch_irqs(mdp_kms, status); | ||
79 | } | ||
80 | |||
81 | irqreturn_t mdp5_irq(struct msm_kms *kms) | ||
82 | { | ||
83 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
84 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); | ||
85 | uint32_t intr; | ||
86 | |||
87 | intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS); | ||
88 | |||
89 | VERB("intr=%08x", intr); | ||
90 | |||
91 | if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) | ||
92 | mdp5_irq_mdp(mdp_kms); | ||
93 | |||
94 | if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) | ||
95 | hdmi_irq(0, mdp5_kms->hdmi); | ||
96 | |||
97 | return IRQ_HANDLED; | ||
98 | } | ||
99 | |||
100 | int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
101 | { | ||
102 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
103 | mdp5_crtc_vblank(crtc), true); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
108 | { | ||
109 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
110 | mdp5_crtc_vblank(crtc), false); | ||
111 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c new file mode 100644 index 000000000000..ee8446c1b5f6 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | |||
@@ -0,0 +1,350 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include "msm_drv.h" | ||
20 | #include "msm_mmu.h" | ||
21 | #include "mdp5_kms.h" | ||
22 | |||
23 | static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev); | ||
24 | |||
25 | static int mdp5_hw_init(struct msm_kms *kms) | ||
26 | { | ||
27 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
28 | struct drm_device *dev = mdp5_kms->dev; | ||
29 | uint32_t version, major, minor; | ||
30 | int ret = 0; | ||
31 | |||
32 | pm_runtime_get_sync(dev->dev); | ||
33 | |||
34 | mdp5_enable(mdp5_kms); | ||
35 | version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); | ||
36 | mdp5_disable(mdp5_kms); | ||
37 | |||
38 | major = FIELD(version, MDP5_MDP_VERSION_MAJOR); | ||
39 | minor = FIELD(version, MDP5_MDP_VERSION_MINOR); | ||
40 | |||
41 | DBG("found MDP5 version v%d.%d", major, minor); | ||
42 | |||
43 | if ((major != 1) || ((minor != 0) && (minor != 2))) { | ||
44 | dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", | ||
45 | major, minor); | ||
46 | ret = -ENXIO; | ||
47 | goto out; | ||
48 | } | ||
49 | |||
50 | mdp5_kms->rev = minor; | ||
51 | |||
52 | /* Magic unknown register writes: | ||
53 | * | ||
54 | * W VBIF:0x004 00000001 (mdss_mdp.c:839) | ||
55 | * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) | ||
56 | * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) | ||
57 | * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) | ||
58 | * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) | ||
59 | * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) | ||
60 | * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) | ||
61 | * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) | ||
62 | * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) | ||
63 | * | ||
64 | * Downstream fbdev driver gets these register offsets/values | ||
65 | * from DT.. not really sure what these registers are or if | ||
66 | * different values for different boards/SoC's, etc. I guess | ||
67 | * they are the golden registers. | ||
68 | * | ||
69 | * Not setting these does not seem to cause any problem. But | ||
70 | * we may be getting lucky with the bootloader initializing | ||
71 | * them for us. OTOH, if we can always count on the bootloader | ||
72 | * setting the golden registers, then perhaps we don't need to | ||
73 | * care. | ||
74 | */ | ||
75 | |||
76 | mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); | ||
77 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0); | ||
78 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0); | ||
79 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0); | ||
80 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0); | ||
81 | |||
82 | out: | ||
83 | pm_runtime_put_sync(dev->dev); | ||
84 | |||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, | ||
89 | struct drm_encoder *encoder) | ||
90 | { | ||
91 | return rate; | ||
92 | } | ||
93 | |||
94 | static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file) | ||
95 | { | ||
96 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
97 | struct msm_drm_private *priv = mdp5_kms->dev->dev_private; | ||
98 | unsigned i; | ||
99 | |||
100 | for (i = 0; i < priv->num_crtcs; i++) | ||
101 | mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file); | ||
102 | } | ||
103 | |||
104 | static void mdp5_destroy(struct msm_kms *kms) | ||
105 | { | ||
106 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
107 | kfree(mdp5_kms); | ||
108 | } | ||
109 | |||
110 | static const struct mdp_kms_funcs kms_funcs = { | ||
111 | .base = { | ||
112 | .hw_init = mdp5_hw_init, | ||
113 | .irq_preinstall = mdp5_irq_preinstall, | ||
114 | .irq_postinstall = mdp5_irq_postinstall, | ||
115 | .irq_uninstall = mdp5_irq_uninstall, | ||
116 | .irq = mdp5_irq, | ||
117 | .enable_vblank = mdp5_enable_vblank, | ||
118 | .disable_vblank = mdp5_disable_vblank, | ||
119 | .get_format = mdp_get_format, | ||
120 | .round_pixclk = mdp5_round_pixclk, | ||
121 | .preclose = mdp5_preclose, | ||
122 | .destroy = mdp5_destroy, | ||
123 | }, | ||
124 | .set_irqmask = mdp5_set_irqmask, | ||
125 | }; | ||
126 | |||
127 | int mdp5_disable(struct mdp5_kms *mdp5_kms) | ||
128 | { | ||
129 | DBG(""); | ||
130 | |||
131 | clk_disable_unprepare(mdp5_kms->ahb_clk); | ||
132 | clk_disable_unprepare(mdp5_kms->axi_clk); | ||
133 | clk_disable_unprepare(mdp5_kms->core_clk); | ||
134 | clk_disable_unprepare(mdp5_kms->lut_clk); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | int mdp5_enable(struct mdp5_kms *mdp5_kms) | ||
140 | { | ||
141 | DBG(""); | ||
142 | |||
143 | clk_prepare_enable(mdp5_kms->ahb_clk); | ||
144 | clk_prepare_enable(mdp5_kms->axi_clk); | ||
145 | clk_prepare_enable(mdp5_kms->core_clk); | ||
146 | clk_prepare_enable(mdp5_kms->lut_clk); | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int modeset_init(struct mdp5_kms *mdp5_kms) | ||
152 | { | ||
153 | static const enum mdp5_pipe crtcs[] = { | ||
154 | SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, | ||
155 | }; | ||
156 | struct drm_device *dev = mdp5_kms->dev; | ||
157 | struct msm_drm_private *priv = dev->dev_private; | ||
158 | struct drm_encoder *encoder; | ||
159 | int i, ret; | ||
160 | |||
161 | /* construct CRTCs: */ | ||
162 | for (i = 0; i < ARRAY_SIZE(crtcs); i++) { | ||
163 | struct drm_plane *plane; | ||
164 | struct drm_crtc *crtc; | ||
165 | |||
166 | plane = mdp5_plane_init(dev, crtcs[i], true); | ||
167 | if (IS_ERR(plane)) { | ||
168 | ret = PTR_ERR(plane); | ||
169 | dev_err(dev->dev, "failed to construct plane for %s (%d)\n", | ||
170 | pipe2name(crtcs[i]), ret); | ||
171 | goto fail; | ||
172 | } | ||
173 | |||
174 | crtc = mdp5_crtc_init(dev, plane, i); | ||
175 | if (IS_ERR(crtc)) { | ||
176 | ret = PTR_ERR(crtc); | ||
177 | dev_err(dev->dev, "failed to construct crtc for %s (%d)\n", | ||
178 | pipe2name(crtcs[i]), ret); | ||
179 | goto fail; | ||
180 | } | ||
181 | priv->crtcs[priv->num_crtcs++] = crtc; | ||
182 | } | ||
183 | |||
184 | /* Construct encoder for HDMI: */ | ||
185 | encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); | ||
186 | if (IS_ERR(encoder)) { | ||
187 | dev_err(dev->dev, "failed to construct encoder\n"); | ||
188 | ret = PTR_ERR(encoder); | ||
189 | goto fail; | ||
190 | } | ||
191 | |||
192 | /* NOTE: the vsync and error irq's are actually associated with | ||
193 | * the INTF/encoder.. the easiest way to deal with this (ie. what | ||
194 | * we do now) is assume a fixed relationship between crtc's and | ||
195 | * encoders. I'm not sure if there is ever a need to more freely | ||
196 | * assign crtcs to encoders, but if there is then we need to take | ||
197 | * care of error and vblank irq's that the crtc has registered, | ||
198 | * and also update user-requested vblank_mask. | ||
199 | */ | ||
200 | encoder->possible_crtcs = BIT(0); | ||
201 | mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI); | ||
202 | |||
203 | priv->encoders[priv->num_encoders++] = encoder; | ||
204 | |||
205 | /* Construct bridge/connector for HDMI: */ | ||
206 | mdp5_kms->hdmi = hdmi_init(dev, encoder); | ||
207 | if (IS_ERR(mdp5_kms->hdmi)) { | ||
208 | ret = PTR_ERR(mdp5_kms->hdmi); | ||
209 | dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); | ||
210 | goto fail; | ||
211 | } | ||
212 | |||
213 | return 0; | ||
214 | |||
215 | fail: | ||
216 | return ret; | ||
217 | } | ||
218 | |||
219 | static const char *iommu_ports[] = { | ||
220 | "mdp_0", | ||
221 | }; | ||
222 | |||
223 | static int get_clk(struct platform_device *pdev, struct clk **clkp, | ||
224 | const char *name) | ||
225 | { | ||
226 | struct device *dev = &pdev->dev; | ||
227 | struct clk *clk = devm_clk_get(dev, name); | ||
228 | if (IS_ERR(clk)) { | ||
229 | dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); | ||
230 | return PTR_ERR(clk); | ||
231 | } | ||
232 | *clkp = clk; | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | struct msm_kms *mdp5_kms_init(struct drm_device *dev) | ||
237 | { | ||
238 | struct platform_device *pdev = dev->platformdev; | ||
239 | struct mdp5_platform_config *config = mdp5_get_config(pdev); | ||
240 | struct mdp5_kms *mdp5_kms; | ||
241 | struct msm_kms *kms = NULL; | ||
242 | struct msm_mmu *mmu; | ||
243 | int ret; | ||
244 | |||
245 | mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); | ||
246 | if (!mdp5_kms) { | ||
247 | dev_err(dev->dev, "failed to allocate kms\n"); | ||
248 | ret = -ENOMEM; | ||
249 | goto fail; | ||
250 | } | ||
251 | |||
252 | mdp_kms_init(&mdp5_kms->base, &kms_funcs); | ||
253 | |||
254 | kms = &mdp5_kms->base.base; | ||
255 | |||
256 | mdp5_kms->dev = dev; | ||
257 | mdp5_kms->smp_blk_cnt = config->smp_blk_cnt; | ||
258 | |||
259 | mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); | ||
260 | if (IS_ERR(mdp5_kms->mmio)) { | ||
261 | ret = PTR_ERR(mdp5_kms->mmio); | ||
262 | goto fail; | ||
263 | } | ||
264 | |||
265 | mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); | ||
266 | if (IS_ERR(mdp5_kms->vbif)) { | ||
267 | ret = PTR_ERR(mdp5_kms->vbif); | ||
268 | goto fail; | ||
269 | } | ||
270 | |||
271 | mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); | ||
272 | if (IS_ERR(mdp5_kms->vdd)) { | ||
273 | ret = PTR_ERR(mdp5_kms->vdd); | ||
274 | goto fail; | ||
275 | } | ||
276 | |||
277 | ret = regulator_enable(mdp5_kms->vdd); | ||
278 | if (ret) { | ||
279 | dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); | ||
280 | goto fail; | ||
281 | } | ||
282 | |||
283 | ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") || | ||
284 | get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") || | ||
285 | get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") || | ||
286 | get_clk(pdev, &mdp5_kms->core_clk, "core_clk") || | ||
287 | get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") || | ||
288 | get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk"); | ||
289 | if (ret) | ||
290 | goto fail; | ||
291 | |||
292 | ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); | ||
293 | |||
294 | /* make sure things are off before attaching iommu (bootloader could | ||
295 | * have left things on, in which case we'll start getting faults if | ||
296 | * we don't disable): | ||
297 | */ | ||
298 | mdp5_enable(mdp5_kms); | ||
299 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0); | ||
300 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0); | ||
301 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0); | ||
302 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0); | ||
303 | mdp5_disable(mdp5_kms); | ||
304 | mdelay(16); | ||
305 | |||
306 | if (config->iommu) { | ||
307 | mmu = msm_iommu_new(dev, config->iommu); | ||
308 | if (IS_ERR(mmu)) { | ||
309 | ret = PTR_ERR(mmu); | ||
310 | goto fail; | ||
311 | } | ||
312 | ret = mmu->funcs->attach(mmu, iommu_ports, | ||
313 | ARRAY_SIZE(iommu_ports)); | ||
314 | if (ret) | ||
315 | goto fail; | ||
316 | } else { | ||
317 | dev_info(dev->dev, "no iommu, fallback to phys " | ||
318 | "contig buffers for scanout\n"); | ||
319 | mmu = NULL; | ||
320 | } | ||
321 | |||
322 | mdp5_kms->id = msm_register_mmu(dev, mmu); | ||
323 | if (mdp5_kms->id < 0) { | ||
324 | ret = mdp5_kms->id; | ||
325 | dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret); | ||
326 | goto fail; | ||
327 | } | ||
328 | |||
329 | ret = modeset_init(mdp5_kms); | ||
330 | if (ret) { | ||
331 | dev_err(dev->dev, "modeset_init failed: %d\n", ret); | ||
332 | goto fail; | ||
333 | } | ||
334 | |||
335 | return kms; | ||
336 | |||
337 | fail: | ||
338 | if (kms) | ||
339 | mdp5_destroy(kms); | ||
340 | return ERR_PTR(ret); | ||
341 | } | ||
342 | |||
343 | static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev) | ||
344 | { | ||
345 | static struct mdp5_platform_config config = {}; | ||
346 | #ifdef CONFIG_OF | ||
347 | /* TODO */ | ||
348 | #endif | ||
349 | return &config; | ||
350 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h new file mode 100644 index 000000000000..c8b1a2522c25 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __MDP5_KMS_H__ | ||
19 | #define __MDP5_KMS_H__ | ||
20 | |||
21 | #include "msm_drv.h" | ||
22 | #include "msm_kms.h" | ||
23 | #include "mdp/mdp_kms.h" | ||
24 | #include "mdp5.xml.h" | ||
25 | #include "mdp5_smp.h" | ||
26 | |||
27 | struct mdp5_kms { | ||
28 | struct mdp_kms base; | ||
29 | |||
30 | struct drm_device *dev; | ||
31 | |||
32 | int rev; | ||
33 | |||
34 | /* mapper-id used to request GEM buffer mapped for scanout: */ | ||
35 | int id; | ||
36 | |||
37 | /* for tracking smp allocation amongst pipes: */ | ||
38 | mdp5_smp_state_t smp_state; | ||
39 | struct mdp5_client_smp_state smp_client_state[CID_MAX]; | ||
40 | int smp_blk_cnt; | ||
41 | |||
42 | /* io/register spaces: */ | ||
43 | void __iomem *mmio, *vbif; | ||
44 | |||
45 | struct regulator *vdd; | ||
46 | |||
47 | struct clk *axi_clk; | ||
48 | struct clk *ahb_clk; | ||
49 | struct clk *src_clk; | ||
50 | struct clk *core_clk; | ||
51 | struct clk *lut_clk; | ||
52 | struct clk *vsync_clk; | ||
53 | |||
54 | struct hdmi *hdmi; | ||
55 | |||
56 | struct mdp_irq error_handler; | ||
57 | }; | ||
58 | #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) | ||
59 | |||
60 | /* platform config data (ie. from DT, or pdata) */ | ||
61 | struct mdp5_platform_config { | ||
62 | struct iommu_domain *iommu; | ||
63 | uint32_t max_clk; | ||
64 | int smp_blk_cnt; | ||
65 | }; | ||
66 | |||
67 | static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) | ||
68 | { | ||
69 | msm_writel(data, mdp5_kms->mmio + reg); | ||
70 | } | ||
71 | |||
72 | static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) | ||
73 | { | ||
74 | return msm_readl(mdp5_kms->mmio + reg); | ||
75 | } | ||
76 | |||
77 | static inline const char *pipe2name(enum mdp5_pipe pipe) | ||
78 | { | ||
79 | static const char *names[] = { | ||
80 | #define NAME(n) [SSPP_ ## n] = #n | ||
81 | NAME(VIG0), NAME(VIG1), NAME(VIG2), | ||
82 | NAME(RGB0), NAME(RGB1), NAME(RGB2), | ||
83 | NAME(DMA0), NAME(DMA1), | ||
84 | #undef NAME | ||
85 | }; | ||
86 | return names[pipe]; | ||
87 | } | ||
88 | |||
89 | static inline uint32_t pipe2flush(enum mdp5_pipe pipe) | ||
90 | { | ||
91 | switch (pipe) { | ||
92 | case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; | ||
93 | case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; | ||
94 | case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; | ||
95 | case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; | ||
96 | case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; | ||
97 | case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; | ||
98 | case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; | ||
99 | case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; | ||
100 | default: return 0; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | static inline int pipe2nclients(enum mdp5_pipe pipe) | ||
105 | { | ||
106 | switch (pipe) { | ||
107 | case SSPP_RGB0: | ||
108 | case SSPP_RGB1: | ||
109 | case SSPP_RGB2: | ||
110 | return 1; | ||
111 | default: | ||
112 | return 3; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane) | ||
117 | { | ||
118 | WARN_ON(plane >= pipe2nclients(pipe)); | ||
119 | switch (pipe) { | ||
120 | case SSPP_VIG0: return CID_VIG0_Y + plane; | ||
121 | case SSPP_VIG1: return CID_VIG1_Y + plane; | ||
122 | case SSPP_VIG2: return CID_VIG2_Y + plane; | ||
123 | case SSPP_RGB0: return CID_RGB0; | ||
124 | case SSPP_RGB1: return CID_RGB1; | ||
125 | case SSPP_RGB2: return CID_RGB2; | ||
126 | case SSPP_DMA0: return CID_DMA0_Y + plane; | ||
127 | case SSPP_DMA1: return CID_DMA1_Y + plane; | ||
128 | default: return CID_UNUSED; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | static inline uint32_t mixer2flush(int lm) | ||
133 | { | ||
134 | switch (lm) { | ||
135 | case 0: return MDP5_CTL_FLUSH_LM0; | ||
136 | case 1: return MDP5_CTL_FLUSH_LM1; | ||
137 | case 2: return MDP5_CTL_FLUSH_LM2; | ||
138 | default: return 0; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static inline uint32_t intf2err(int intf) | ||
143 | { | ||
144 | switch (intf) { | ||
145 | case 0: return MDP5_IRQ_INTF0_UNDER_RUN; | ||
146 | case 1: return MDP5_IRQ_INTF1_UNDER_RUN; | ||
147 | case 2: return MDP5_IRQ_INTF2_UNDER_RUN; | ||
148 | case 3: return MDP5_IRQ_INTF3_UNDER_RUN; | ||
149 | default: return 0; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | static inline uint32_t intf2vblank(int intf) | ||
154 | { | ||
155 | switch (intf) { | ||
156 | case 0: return MDP5_IRQ_INTF0_VSYNC; | ||
157 | case 1: return MDP5_IRQ_INTF1_VSYNC; | ||
158 | case 2: return MDP5_IRQ_INTF2_VSYNC; | ||
159 | case 3: return MDP5_IRQ_INTF3_VSYNC; | ||
160 | default: return 0; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | int mdp5_disable(struct mdp5_kms *mdp5_kms); | ||
165 | int mdp5_enable(struct mdp5_kms *mdp5_kms); | ||
166 | |||
167 | void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
168 | void mdp5_irq_preinstall(struct msm_kms *kms); | ||
169 | int mdp5_irq_postinstall(struct msm_kms *kms); | ||
170 | void mdp5_irq_uninstall(struct msm_kms *kms); | ||
171 | irqreturn_t mdp5_irq(struct msm_kms *kms); | ||
172 | int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | ||
173 | void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | ||
174 | |||
175 | static inline | ||
176 | uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, | ||
177 | uint32_t max_formats) | ||
178 | { | ||
179 | /* TODO when we have YUV, we need to filter supported formats | ||
180 | * based on pipe id.. | ||
181 | */ | ||
182 | return mdp_get_formats(pixel_formats, max_formats); | ||
183 | } | ||
184 | |||
185 | void mdp5_plane_install_properties(struct drm_plane *plane, | ||
186 | struct drm_mode_object *obj); | ||
187 | void mdp5_plane_set_scanout(struct drm_plane *plane, | ||
188 | struct drm_framebuffer *fb); | ||
189 | int mdp5_plane_mode_set(struct drm_plane *plane, | ||
190 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
191 | int crtc_x, int crtc_y, | ||
192 | unsigned int crtc_w, unsigned int crtc_h, | ||
193 | uint32_t src_x, uint32_t src_y, | ||
194 | uint32_t src_w, uint32_t src_h); | ||
195 | void mdp5_plane_complete_flip(struct drm_plane *plane); | ||
196 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); | ||
197 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, | ||
198 | enum mdp5_pipe pipe, bool private_plane); | ||
199 | |||
200 | uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); | ||
201 | |||
202 | void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); | ||
203 | void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, | ||
204 | enum mdp5_intf intf_id); | ||
205 | void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane); | ||
206 | void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane); | ||
207 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, | ||
208 | struct drm_plane *plane, int id); | ||
209 | |||
210 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, | ||
211 | enum mdp5_intf intf_id); | ||
212 | |||
213 | #endif /* __MDP5_KMS_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c new file mode 100644 index 000000000000..0ac8bb5e7e85 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
@@ -0,0 +1,389 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include "mdp5_kms.h" | ||
19 | |||
20 | |||
21 | struct mdp5_plane { | ||
22 | struct drm_plane base; | ||
23 | const char *name; | ||
24 | |||
25 | enum mdp5_pipe pipe; | ||
26 | |||
27 | uint32_t nformats; | ||
28 | uint32_t formats[32]; | ||
29 | |||
30 | bool enabled; | ||
31 | }; | ||
32 | #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) | ||
33 | |||
34 | static struct mdp5_kms *get_kms(struct drm_plane *plane) | ||
35 | { | ||
36 | struct msm_drm_private *priv = plane->dev->dev_private; | ||
37 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | ||
38 | } | ||
39 | |||
40 | static int mdp5_plane_update(struct drm_plane *plane, | ||
41 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
42 | int crtc_x, int crtc_y, | ||
43 | unsigned int crtc_w, unsigned int crtc_h, | ||
44 | uint32_t src_x, uint32_t src_y, | ||
45 | uint32_t src_w, uint32_t src_h) | ||
46 | { | ||
47 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
48 | |||
49 | mdp5_plane->enabled = true; | ||
50 | |||
51 | if (plane->fb) | ||
52 | drm_framebuffer_unreference(plane->fb); | ||
53 | |||
54 | drm_framebuffer_reference(fb); | ||
55 | |||
56 | return mdp5_plane_mode_set(plane, crtc, fb, | ||
57 | crtc_x, crtc_y, crtc_w, crtc_h, | ||
58 | src_x, src_y, src_w, src_h); | ||
59 | } | ||
60 | |||
61 | static int mdp5_plane_disable(struct drm_plane *plane) | ||
62 | { | ||
63 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
64 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
65 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
66 | int i; | ||
67 | |||
68 | DBG("%s: disable", mdp5_plane->name); | ||
69 | |||
70 | /* update our SMP request to zero (release all our blks): */ | ||
71 | for (i = 0; i < pipe2nclients(pipe); i++) | ||
72 | mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); | ||
73 | |||
74 | /* TODO detaching now will cause us not to get the last | ||
75 | * vblank and mdp5_smp_commit().. so other planes will | ||
76 | * still see smp blocks previously allocated to us as | ||
77 | * in-use.. | ||
78 | */ | ||
79 | if (plane->crtc) | ||
80 | mdp5_crtc_detach(plane->crtc, plane); | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static void mdp5_plane_destroy(struct drm_plane *plane) | ||
86 | { | ||
87 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
88 | |||
89 | mdp5_plane_disable(plane); | ||
90 | drm_plane_cleanup(plane); | ||
91 | |||
92 | kfree(mdp5_plane); | ||
93 | } | ||
94 | |||
95 | /* helper to install properties which are common to planes and crtcs */ | ||
96 | void mdp5_plane_install_properties(struct drm_plane *plane, | ||
97 | struct drm_mode_object *obj) | ||
98 | { | ||
99 | // XXX | ||
100 | } | ||
101 | |||
102 | int mdp5_plane_set_property(struct drm_plane *plane, | ||
103 | struct drm_property *property, uint64_t val) | ||
104 | { | ||
105 | // XXX | ||
106 | return -EINVAL; | ||
107 | } | ||
108 | |||
109 | static const struct drm_plane_funcs mdp5_plane_funcs = { | ||
110 | .update_plane = mdp5_plane_update, | ||
111 | .disable_plane = mdp5_plane_disable, | ||
112 | .destroy = mdp5_plane_destroy, | ||
113 | .set_property = mdp5_plane_set_property, | ||
114 | }; | ||
115 | |||
116 | void mdp5_plane_set_scanout(struct drm_plane *plane, | ||
117 | struct drm_framebuffer *fb) | ||
118 | { | ||
119 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
120 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
121 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
122 | uint32_t nplanes = drm_format_num_planes(fb->pixel_format); | ||
123 | uint32_t iova[4]; | ||
124 | int i; | ||
125 | |||
126 | for (i = 0; i < nplanes; i++) { | ||
127 | struct drm_gem_object *bo = msm_framebuffer_bo(fb, i); | ||
128 | msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]); | ||
129 | } | ||
130 | for (; i < 4; i++) | ||
131 | iova[i] = 0; | ||
132 | |||
133 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), | ||
134 | MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | | ||
135 | MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); | ||
136 | |||
137 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), | ||
138 | MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | | ||
139 | MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); | ||
140 | |||
141 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]); | ||
142 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]); | ||
143 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]); | ||
144 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]); | ||
145 | |||
146 | plane->fb = fb; | ||
147 | } | ||
148 | |||
149 | /* NOTE: looks like if horizontal decimation is used (if we supported that) | ||
150 | * then the width used to calculate SMP block requirements is the post- | ||
151 | * decimated width. Ie. SMP buffering sits downstream of decimation (which | ||
152 | * presumably happens during the dma from scanout buffer). | ||
153 | */ | ||
154 | static int request_smp_blocks(struct drm_plane *plane, uint32_t format, | ||
155 | uint32_t nplanes, uint32_t width) | ||
156 | { | ||
157 | struct drm_device *dev = plane->dev; | ||
158 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
159 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
160 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
161 | int i, hsub, nlines, nblks, ret; | ||
162 | |||
163 | hsub = drm_format_horz_chroma_subsampling(format); | ||
164 | |||
165 | /* different if BWC (compressed framebuffer?) enabled: */ | ||
166 | nlines = 2; | ||
167 | |||
168 | for (i = 0, nblks = 0; i < nplanes; i++) { | ||
169 | int n, fetch_stride, cpp; | ||
170 | |||
171 | cpp = drm_format_plane_cpp(format, i); | ||
172 | fetch_stride = width * cpp / (i ? hsub : 1); | ||
173 | |||
174 | n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); | ||
175 | |||
176 | /* for hw rev v1.00 */ | ||
177 | if (mdp5_kms->rev == 0) | ||
178 | n = roundup_pow_of_two(n); | ||
179 | |||
180 | DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); | ||
181 | ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n); | ||
182 | if (ret) { | ||
183 | dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n", | ||
184 | n, ret); | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | nblks += n; | ||
189 | } | ||
190 | |||
191 | /* in success case, return total # of blocks allocated: */ | ||
192 | return nblks; | ||
193 | } | ||
194 | |||
195 | static void set_fifo_thresholds(struct drm_plane *plane, int nblks) | ||
196 | { | ||
197 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
198 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
199 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
200 | uint32_t val; | ||
201 | |||
202 | /* 1/4 of SMP pool that is being fetched */ | ||
203 | val = (nblks * SMP_ENTRIES_PER_BLK) / 4; | ||
204 | |||
205 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); | ||
206 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); | ||
207 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); | ||
208 | |||
209 | } | ||
210 | |||
211 | int mdp5_plane_mode_set(struct drm_plane *plane, | ||
212 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
213 | int crtc_x, int crtc_y, | ||
214 | unsigned int crtc_w, unsigned int crtc_h, | ||
215 | uint32_t src_x, uint32_t src_y, | ||
216 | uint32_t src_w, uint32_t src_h) | ||
217 | { | ||
218 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
219 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
220 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
221 | const struct mdp_format *format; | ||
222 | uint32_t nplanes, config = 0; | ||
223 | uint32_t phasex_step = 0, phasey_step = 0; | ||
224 | uint32_t hdecm = 0, vdecm = 0; | ||
225 | int i, nblks; | ||
226 | |||
227 | nplanes = drm_format_num_planes(fb->pixel_format); | ||
228 | |||
229 | /* bad formats should already be rejected: */ | ||
230 | if (WARN_ON(nplanes > pipe2nclients(pipe))) | ||
231 | return -EINVAL; | ||
232 | |||
233 | /* src values are in Q16 fixed point, convert to integer: */ | ||
234 | src_x = src_x >> 16; | ||
235 | src_y = src_y >> 16; | ||
236 | src_w = src_w >> 16; | ||
237 | src_h = src_h >> 16; | ||
238 | |||
239 | DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name, | ||
240 | fb->base.id, src_x, src_y, src_w, src_h, | ||
241 | crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); | ||
242 | |||
243 | /* | ||
244 | * Calculate and request required # of smp blocks: | ||
245 | */ | ||
246 | nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); | ||
247 | if (nblks < 0) | ||
248 | return nblks; | ||
249 | |||
250 | /* | ||
251 | * Currently we update the hw for allocations/requests immediately, | ||
252 | * but once atomic modeset/pageflip is in place, the allocation | ||
253 | * would move into atomic->check_plane_state(), while updating the | ||
254 | * hw would remain here: | ||
255 | */ | ||
256 | for (i = 0; i < pipe2nclients(pipe); i++) | ||
257 | mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i)); | ||
258 | |||
259 | if (src_w != crtc_w) { | ||
260 | config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; | ||
261 | /* TODO calc phasex_step, hdecm */ | ||
262 | } | ||
263 | |||
264 | if (src_h != crtc_h) { | ||
265 | config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN; | ||
266 | /* TODO calc phasey_step, vdecm */ | ||
267 | } | ||
268 | |||
269 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), | ||
270 | MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | | ||
271 | MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); | ||
272 | |||
273 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), | ||
274 | MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | | ||
275 | MDP5_PIPE_SRC_SIZE_HEIGHT(src_h)); | ||
276 | |||
277 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), | ||
278 | MDP5_PIPE_SRC_XY_X(src_x) | | ||
279 | MDP5_PIPE_SRC_XY_Y(src_y)); | ||
280 | |||
281 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), | ||
282 | MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) | | ||
283 | MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h)); | ||
284 | |||
285 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), | ||
286 | MDP5_PIPE_OUT_XY_X(crtc_x) | | ||
287 | MDP5_PIPE_OUT_XY_Y(crtc_y)); | ||
288 | |||
289 | mdp5_plane_set_scanout(plane, fb); | ||
290 | |||
291 | format = to_mdp_format(msm_framebuffer_format(fb)); | ||
292 | |||
293 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), | ||
294 | MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | | ||
295 | MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | | ||
296 | MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | | ||
297 | MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | | ||
298 | COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | | ||
299 | MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | | ||
300 | MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | | ||
301 | COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | | ||
302 | MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) | | ||
303 | MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB)); | ||
304 | |||
305 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), | ||
306 | MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | | ||
307 | MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | | ||
308 | MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | | ||
309 | MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); | ||
310 | |||
311 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), | ||
312 | MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); | ||
313 | |||
314 | /* not using secure mode: */ | ||
315 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); | ||
316 | |||
317 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step); | ||
318 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step); | ||
319 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), | ||
320 | MDP5_PIPE_DECIMATION_VERT(vdecm) | | ||
321 | MDP5_PIPE_DECIMATION_HORZ(hdecm)); | ||
322 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), | ||
323 | MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) | | ||
324 | MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) | | ||
325 | MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) | | ||
326 | MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) | | ||
327 | MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | | ||
328 | MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); | ||
329 | |||
330 | set_fifo_thresholds(plane, nblks); | ||
331 | |||
332 | /* TODO detach from old crtc (if we had more than one) */ | ||
333 | mdp5_crtc_attach(crtc, plane); | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | void mdp5_plane_complete_flip(struct drm_plane *plane) | ||
339 | { | ||
340 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
341 | enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; | ||
342 | int i; | ||
343 | |||
344 | for (i = 0; i < pipe2nclients(pipe); i++) | ||
345 | mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i)); | ||
346 | } | ||
347 | |||
348 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) | ||
349 | { | ||
350 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
351 | return mdp5_plane->pipe; | ||
352 | } | ||
353 | |||
354 | /* initialize plane */ | ||
355 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, | ||
356 | enum mdp5_pipe pipe, bool private_plane) | ||
357 | { | ||
358 | struct drm_plane *plane = NULL; | ||
359 | struct mdp5_plane *mdp5_plane; | ||
360 | int ret; | ||
361 | |||
362 | mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); | ||
363 | if (!mdp5_plane) { | ||
364 | ret = -ENOMEM; | ||
365 | goto fail; | ||
366 | } | ||
367 | |||
368 | plane = &mdp5_plane->base; | ||
369 | |||
370 | mdp5_plane->pipe = pipe; | ||
371 | mdp5_plane->name = pipe2name(pipe); | ||
372 | |||
373 | mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, | ||
374 | ARRAY_SIZE(mdp5_plane->formats)); | ||
375 | |||
376 | drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, | ||
377 | mdp5_plane->formats, mdp5_plane->nformats, | ||
378 | private_plane); | ||
379 | |||
380 | mdp5_plane_install_properties(plane, &plane->base); | ||
381 | |||
382 | return plane; | ||
383 | |||
384 | fail: | ||
385 | if (plane) | ||
386 | mdp5_plane_destroy(plane); | ||
387 | |||
388 | return ERR_PTR(ret); | ||
389 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c new file mode 100644 index 000000000000..2d0236b963a6 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c | |||
@@ -0,0 +1,173 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include "mdp5_kms.h" | ||
20 | #include "mdp5_smp.h" | ||
21 | |||
22 | |||
23 | /* SMP - Shared Memory Pool | ||
24 | * | ||
25 | * These are shared between all the clients, where each plane in a | ||
26 | * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on | ||
27 | * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. | ||
28 | * | ||
29 | * Based on the size of the attached scanout buffer, a certain # of | ||
30 | * blocks must be allocated to that client out of the shared pool. | ||
31 | * | ||
32 | * For each block, it can be either free, or pending/in-use by a | ||
33 | * client. The updates happen in three steps: | ||
34 | * | ||
35 | * 1) mdp5_smp_request(): | ||
36 | * When plane scanout is setup, calculate required number of | ||
37 | * blocks needed per client, and request. Blocks not inuse or | ||
38 | * pending by any other client are added to client's pending | ||
39 | * set. | ||
40 | * | ||
41 | * 2) mdp5_smp_configure(): | ||
42 | * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers | ||
43 | * are configured for the union(pending, inuse) | ||
44 | * | ||
45 | * 3) mdp5_smp_commit(): | ||
46 | * After next vblank, copy pending -> inuse. Optionally update | ||
47 | * MDP5_SMP_ALLOC registers if there are newly unused blocks | ||
48 | * | ||
49 | * On the next vblank after changes have been committed to hw, the | ||
50 | * client's pending blocks become it's in-use blocks (and no-longer | ||
51 | * in-use blocks become available to other clients). | ||
52 | * | ||
53 | * btw, hurray for confusing overloaded acronyms! :-/ | ||
54 | * | ||
55 | * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1 | ||
56 | * should happen at (or before)? atomic->check(). And we'd need | ||
57 | * an API to discard previous requests if update is aborted or | ||
58 | * (test-only). | ||
59 | * | ||
60 | * TODO would perhaps be nice to have debugfs to dump out kernel | ||
61 | * inuse and pending state of all clients.. | ||
62 | */ | ||
63 | |||
64 | static DEFINE_SPINLOCK(smp_lock); | ||
65 | |||
66 | |||
67 | /* step #1: update # of blocks pending for the client: */ | ||
68 | int mdp5_smp_request(struct mdp5_kms *mdp5_kms, | ||
69 | enum mdp5_client_id cid, int nblks) | ||
70 | { | ||
71 | struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; | ||
72 | int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; | ||
73 | unsigned long flags; | ||
74 | |||
75 | spin_lock_irqsave(&smp_lock, flags); | ||
76 | |||
77 | avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); | ||
78 | if (nblks > avail) { | ||
79 | ret = -ENOSPC; | ||
80 | goto fail; | ||
81 | } | ||
82 | |||
83 | cur_nblks = bitmap_weight(ps->pending, cnt); | ||
84 | if (nblks > cur_nblks) { | ||
85 | /* grow the existing pending reservation: */ | ||
86 | for (i = cur_nblks; i < nblks; i++) { | ||
87 | int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); | ||
88 | set_bit(blk, ps->pending); | ||
89 | set_bit(blk, mdp5_kms->smp_state); | ||
90 | } | ||
91 | } else { | ||
92 | /* shrink the existing pending reservation: */ | ||
93 | for (i = cur_nblks; i > nblks; i--) { | ||
94 | int blk = find_first_bit(ps->pending, cnt); | ||
95 | clear_bit(blk, ps->pending); | ||
96 | /* don't clear in global smp_state until _commit() */ | ||
97 | } | ||
98 | } | ||
99 | |||
100 | fail: | ||
101 | spin_unlock_irqrestore(&smp_lock, flags); | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static void update_smp_state(struct mdp5_kms *mdp5_kms, | ||
106 | enum mdp5_client_id cid, mdp5_smp_state_t *assigned) | ||
107 | { | ||
108 | int cnt = mdp5_kms->smp_blk_cnt; | ||
109 | uint32_t blk, val; | ||
110 | |||
111 | for_each_set_bit(blk, *assigned, cnt) { | ||
112 | int idx = blk / 3; | ||
113 | int fld = blk % 3; | ||
114 | |||
115 | val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); | ||
116 | |||
117 | switch (fld) { | ||
118 | case 0: | ||
119 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; | ||
120 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); | ||
121 | break; | ||
122 | case 1: | ||
123 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; | ||
124 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); | ||
125 | break; | ||
126 | case 2: | ||
127 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; | ||
128 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); | ||
129 | break; | ||
130 | } | ||
131 | |||
132 | mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); | ||
133 | mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | /* step #2: configure hw for union(pending, inuse): */ | ||
138 | void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) | ||
139 | { | ||
140 | struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; | ||
141 | int cnt = mdp5_kms->smp_blk_cnt; | ||
142 | mdp5_smp_state_t assigned; | ||
143 | |||
144 | bitmap_or(assigned, ps->inuse, ps->pending, cnt); | ||
145 | update_smp_state(mdp5_kms, cid, &assigned); | ||
146 | } | ||
147 | |||
148 | /* step #3: after vblank, copy pending -> inuse: */ | ||
149 | void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) | ||
150 | { | ||
151 | struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; | ||
152 | int cnt = mdp5_kms->smp_blk_cnt; | ||
153 | mdp5_smp_state_t released; | ||
154 | |||
155 | /* | ||
156 | * Figure out if there are any blocks we where previously | ||
157 | * using, which can be released and made available to other | ||
158 | * clients: | ||
159 | */ | ||
160 | if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { | ||
161 | unsigned long flags; | ||
162 | |||
163 | spin_lock_irqsave(&smp_lock, flags); | ||
164 | /* clear released blocks: */ | ||
165 | bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state, | ||
166 | released, cnt); | ||
167 | spin_unlock_irqrestore(&smp_lock, flags); | ||
168 | |||
169 | update_smp_state(mdp5_kms, CID_UNUSED, &released); | ||
170 | } | ||
171 | |||
172 | bitmap_copy(ps->inuse, ps->pending, cnt); | ||
173 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h new file mode 100644 index 000000000000..0ab739e1a1dd --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __MDP5_SMP_H__ | ||
19 | #define __MDP5_SMP_H__ | ||
20 | |||
21 | #include "msm_drv.h" | ||
22 | |||
23 | #define MAX_SMP_BLOCKS 22 | ||
24 | #define SMP_BLK_SIZE 4096 | ||
25 | #define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16) | ||
26 | |||
27 | typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); | ||
28 | |||
29 | struct mdp5_client_smp_state { | ||
30 | mdp5_smp_state_t inuse; | ||
31 | mdp5_smp_state_t pending; | ||
32 | }; | ||
33 | |||
34 | struct mdp5_kms; | ||
35 | |||
36 | int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); | ||
37 | void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); | ||
38 | void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); | ||
39 | |||
40 | |||
41 | #endif /* __MDP5_SMP_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h new file mode 100644 index 000000000000..a9629b85b983 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h | |||
@@ -0,0 +1,78 @@ | |||
1 | #ifndef MDP_COMMON_XML | ||
2 | #define MDP_COMMON_XML | ||
3 | |||
4 | /* Autogenerated file, DO NOT EDIT manually! | ||
5 | |||
6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
7 | http://github.com/freedreno/envytools/ | ||
8 | git clone https://github.com/freedreno/envytools.git | ||
9 | |||
10 | The rules-ng-ng source files this header was generated from are: | ||
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) | ||
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | ||
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) | ||
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | ||
19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) | ||
21 | |||
22 | Copyright (C) 2013 by the following authors: | ||
23 | - Rob Clark <robdclark@gmail.com> (robclark) | ||
24 | |||
25 | Permission is hereby granted, free of charge, to any person obtaining | ||
26 | a copy of this software and associated documentation files (the | ||
27 | "Software"), to deal in the Software without restriction, including | ||
28 | without limitation the rights to use, copy, modify, merge, publish, | ||
29 | distribute, sublicense, and/or sell copies of the Software, and to | ||
30 | permit persons to whom the Software is furnished to do so, subject to | ||
31 | the following conditions: | ||
32 | |||
33 | The above copyright notice and this permission notice (including the | ||
34 | next paragraph) shall be included in all copies or substantial | ||
35 | portions of the Software. | ||
36 | |||
37 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
38 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
39 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
40 | IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
41 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
42 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
43 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
44 | */ | ||
45 | |||
46 | |||
47 | enum mdp_mixer_stage_id { | ||
48 | STAGE_UNUSED = 0, | ||
49 | STAGE_BASE = 1, | ||
50 | STAGE0 = 2, | ||
51 | STAGE1 = 3, | ||
52 | STAGE2 = 4, | ||
53 | STAGE3 = 5, | ||
54 | }; | ||
55 | |||
56 | enum mdp_alpha_type { | ||
57 | FG_CONST = 0, | ||
58 | BG_CONST = 1, | ||
59 | FG_PIXEL = 2, | ||
60 | BG_PIXEL = 3, | ||
61 | }; | ||
62 | |||
63 | enum mdp_bpc { | ||
64 | BPC1 = 0, | ||
65 | BPC5 = 1, | ||
66 | BPC6 = 2, | ||
67 | BPC8 = 3, | ||
68 | }; | ||
69 | |||
70 | enum mdp_bpc_alpha { | ||
71 | BPC1A = 0, | ||
72 | BPC4A = 1, | ||
73 | BPC6A = 2, | ||
74 | BPC8A = 3, | ||
75 | }; | ||
76 | |||
77 | |||
78 | #endif /* MDP_COMMON_XML */ | ||
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c index 17330b0927b2..e0a6ffbe6ab4 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c +++ b/drivers/gpu/drm/msm/mdp/mdp_format.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | 18 | ||
19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
20 | #include "mdp4_kms.h" | 20 | #include "mdp_kms.h" |
21 | 21 | ||
22 | #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ | 22 | #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ |
23 | .base = { .pixel_format = DRM_FORMAT_ ## name }, \ | 23 | .base = { .pixel_format = DRM_FORMAT_ ## name }, \ |
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | #define BPC0A 0 | 35 | #define BPC0A 0 |
36 | 36 | ||
37 | static const struct mdp4_format formats[] = { | 37 | static const struct mdp_format formats[] = { |
38 | /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ | 38 | /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ |
39 | FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), | 39 | FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), |
40 | FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), | 40 | FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), |
@@ -44,12 +44,11 @@ static const struct mdp4_format formats[] = { | |||
44 | FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), | 44 | FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), |
45 | }; | 45 | }; |
46 | 46 | ||
47 | uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, | 47 | uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats) |
48 | uint32_t max_formats) | ||
49 | { | 48 | { |
50 | uint32_t i; | 49 | uint32_t i; |
51 | for (i = 0; i < ARRAY_SIZE(formats); i++) { | 50 | for (i = 0; i < ARRAY_SIZE(formats); i++) { |
52 | const struct mdp4_format *f = &formats[i]; | 51 | const struct mdp_format *f = &formats[i]; |
53 | 52 | ||
54 | if (i == max_formats) | 53 | if (i == max_formats) |
55 | break; | 54 | break; |
@@ -60,11 +59,11 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, | |||
60 | return i; | 59 | return i; |
61 | } | 60 | } |
62 | 61 | ||
63 | const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format) | 62 | const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) |
64 | { | 63 | { |
65 | int i; | 64 | int i; |
66 | for (i = 0; i < ARRAY_SIZE(formats); i++) { | 65 | for (i = 0; i < ARRAY_SIZE(formats); i++) { |
67 | const struct mdp4_format *f = &formats[i]; | 66 | const struct mdp_format *f = &formats[i]; |
68 | if (f->base.pixel_format == format) | 67 | if (f->base.pixel_format == format) |
69 | return &f->base; | 68 | return &f->base; |
70 | } | 69 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c new file mode 100644 index 000000000000..3be48f7c36be --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include "msm_drv.h" | ||
20 | #include "mdp_kms.h" | ||
21 | |||
22 | |||
23 | struct mdp_irq_wait { | ||
24 | struct mdp_irq irq; | ||
25 | int count; | ||
26 | }; | ||
27 | |||
28 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); | ||
29 | |||
30 | static DEFINE_SPINLOCK(list_lock); | ||
31 | |||
32 | static void update_irq(struct mdp_kms *mdp_kms) | ||
33 | { | ||
34 | struct mdp_irq *irq; | ||
35 | uint32_t irqmask = mdp_kms->vblank_mask; | ||
36 | |||
37 | BUG_ON(!spin_is_locked(&list_lock)); | ||
38 | |||
39 | list_for_each_entry(irq, &mdp_kms->irq_list, node) | ||
40 | irqmask |= irq->irqmask; | ||
41 | |||
42 | mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); | ||
43 | } | ||
44 | |||
45 | static void update_irq_unlocked(struct mdp_kms *mdp_kms) | ||
46 | { | ||
47 | unsigned long flags; | ||
48 | spin_lock_irqsave(&list_lock, flags); | ||
49 | update_irq(mdp_kms); | ||
50 | spin_unlock_irqrestore(&list_lock, flags); | ||
51 | } | ||
52 | |||
53 | void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status) | ||
54 | { | ||
55 | struct mdp_irq *handler, *n; | ||
56 | unsigned long flags; | ||
57 | |||
58 | spin_lock_irqsave(&list_lock, flags); | ||
59 | mdp_kms->in_irq = true; | ||
60 | list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) { | ||
61 | if (handler->irqmask & status) { | ||
62 | spin_unlock_irqrestore(&list_lock, flags); | ||
63 | handler->irq(handler, handler->irqmask & status); | ||
64 | spin_lock_irqsave(&list_lock, flags); | ||
65 | } | ||
66 | } | ||
67 | mdp_kms->in_irq = false; | ||
68 | update_irq(mdp_kms); | ||
69 | spin_unlock_irqrestore(&list_lock, flags); | ||
70 | |||
71 | } | ||
72 | |||
73 | void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | |||
77 | spin_lock_irqsave(&list_lock, flags); | ||
78 | if (enable) | ||
79 | mdp_kms->vblank_mask |= mask; | ||
80 | else | ||
81 | mdp_kms->vblank_mask &= ~mask; | ||
82 | update_irq(mdp_kms); | ||
83 | spin_unlock_irqrestore(&list_lock, flags); | ||
84 | } | ||
85 | |||
86 | static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus) | ||
87 | { | ||
88 | struct mdp_irq_wait *wait = | ||
89 | container_of(irq, struct mdp_irq_wait, irq); | ||
90 | wait->count--; | ||
91 | wake_up_all(&wait_event); | ||
92 | } | ||
93 | |||
94 | void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask) | ||
95 | { | ||
96 | struct mdp_irq_wait wait = { | ||
97 | .irq = { | ||
98 | .irq = wait_irq, | ||
99 | .irqmask = irqmask, | ||
100 | }, | ||
101 | .count = 1, | ||
102 | }; | ||
103 | mdp_irq_register(mdp_kms, &wait.irq); | ||
104 | wait_event(wait_event, (wait.count <= 0)); | ||
105 | mdp_irq_unregister(mdp_kms, &wait.irq); | ||
106 | } | ||
107 | |||
108 | void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) | ||
109 | { | ||
110 | unsigned long flags; | ||
111 | bool needs_update = false; | ||
112 | |||
113 | spin_lock_irqsave(&list_lock, flags); | ||
114 | |||
115 | if (!irq->registered) { | ||
116 | irq->registered = true; | ||
117 | list_add(&irq->node, &mdp_kms->irq_list); | ||
118 | needs_update = !mdp_kms->in_irq; | ||
119 | } | ||
120 | |||
121 | spin_unlock_irqrestore(&list_lock, flags); | ||
122 | |||
123 | if (needs_update) | ||
124 | update_irq_unlocked(mdp_kms); | ||
125 | } | ||
126 | |||
127 | void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | bool needs_update = false; | ||
131 | |||
132 | spin_lock_irqsave(&list_lock, flags); | ||
133 | |||
134 | if (irq->registered) { | ||
135 | irq->registered = false; | ||
136 | list_del(&irq->node); | ||
137 | needs_update = !mdp_kms->in_irq; | ||
138 | } | ||
139 | |||
140 | spin_unlock_irqrestore(&list_lock, flags); | ||
141 | |||
142 | if (needs_update) | ||
143 | update_irq_unlocked(mdp_kms); | ||
144 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h new file mode 100644 index 000000000000..99557b5ad4fd --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __MDP_KMS_H__ | ||
19 | #define __MDP_KMS_H__ | ||
20 | |||
21 | #include <linux/clk.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/regulator/consumer.h> | ||
24 | |||
25 | #include "msm_drv.h" | ||
26 | #include "msm_kms.h" | ||
27 | #include "mdp_common.xml.h" | ||
28 | |||
29 | struct mdp_kms; | ||
30 | |||
31 | struct mdp_kms_funcs { | ||
32 | struct msm_kms_funcs base; | ||
33 | void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
34 | }; | ||
35 | |||
36 | struct mdp_kms { | ||
37 | struct msm_kms base; | ||
38 | |||
39 | const struct mdp_kms_funcs *funcs; | ||
40 | |||
41 | /* irq handling: */ | ||
42 | bool in_irq; | ||
43 | struct list_head irq_list; /* list of mdp4_irq */ | ||
44 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
45 | }; | ||
46 | #define to_mdp_kms(x) container_of(x, struct mdp_kms, base) | ||
47 | |||
48 | static inline void mdp_kms_init(struct mdp_kms *mdp_kms, | ||
49 | const struct mdp_kms_funcs *funcs) | ||
50 | { | ||
51 | mdp_kms->funcs = funcs; | ||
52 | INIT_LIST_HEAD(&mdp_kms->irq_list); | ||
53 | msm_kms_init(&mdp_kms->base, &funcs->base); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * irq helpers: | ||
58 | */ | ||
59 | |||
60 | /* For transiently registering for different MDP irqs that various parts | ||
61 | * of the KMS code need during setup/configuration. These are not | ||
62 | * necessarily the same as what drm_vblank_get/put() are requesting, and | ||
63 | * the hysteresis in drm_vblank_put() is not necessarily desirable for | ||
64 | * internal housekeeping related irq usage. | ||
65 | */ | ||
66 | struct mdp_irq { | ||
67 | struct list_head node; | ||
68 | uint32_t irqmask; | ||
69 | bool registered; | ||
70 | void (*irq)(struct mdp_irq *irq, uint32_t irqstatus); | ||
71 | }; | ||
72 | |||
73 | void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status); | ||
74 | void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable); | ||
75 | void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
76 | void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); | ||
77 | void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); | ||
78 | |||
79 | |||
80 | /* | ||
81 | * pixel format helpers: | ||
82 | */ | ||
83 | |||
84 | struct mdp_format { | ||
85 | struct msm_format base; | ||
86 | enum mdp_bpc bpc_r, bpc_g, bpc_b; | ||
87 | enum mdp_bpc_alpha bpc_a; | ||
88 | uint8_t unpack[4]; | ||
89 | bool alpha_enable, unpack_tight; | ||
90 | uint8_t cpp, unpack_count; | ||
91 | }; | ||
92 | #define to_mdp_format(x) container_of(x, struct mdp_format, base) | ||
93 | |||
94 | uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats); | ||
95 | const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); | ||
96 | |||
97 | #endif /* __MDP_KMS_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c deleted file mode 100644 index 5c6b7fca4edd..000000000000 --- a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c +++ /dev/null | |||
@@ -1,203 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include "msm_drv.h" | ||
20 | #include "mdp4_kms.h" | ||
21 | |||
22 | |||
23 | struct mdp4_irq_wait { | ||
24 | struct mdp4_irq irq; | ||
25 | int count; | ||
26 | }; | ||
27 | |||
28 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); | ||
29 | |||
30 | static DEFINE_SPINLOCK(list_lock); | ||
31 | |||
32 | static void update_irq(struct mdp4_kms *mdp4_kms) | ||
33 | { | ||
34 | struct mdp4_irq *irq; | ||
35 | uint32_t irqmask = mdp4_kms->vblank_mask; | ||
36 | |||
37 | BUG_ON(!spin_is_locked(&list_lock)); | ||
38 | |||
39 | list_for_each_entry(irq, &mdp4_kms->irq_list, node) | ||
40 | irqmask |= irq->irqmask; | ||
41 | |||
42 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask); | ||
43 | } | ||
44 | |||
45 | static void update_irq_unlocked(struct mdp4_kms *mdp4_kms) | ||
46 | { | ||
47 | unsigned long flags; | ||
48 | spin_lock_irqsave(&list_lock, flags); | ||
49 | update_irq(mdp4_kms); | ||
50 | spin_unlock_irqrestore(&list_lock, flags); | ||
51 | } | ||
52 | |||
53 | static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus) | ||
54 | { | ||
55 | DRM_ERROR("errors: %08x\n", irqstatus); | ||
56 | } | ||
57 | |||
58 | void mdp4_irq_preinstall(struct msm_kms *kms) | ||
59 | { | ||
60 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
61 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | ||
62 | } | ||
63 | |||
64 | int mdp4_irq_postinstall(struct msm_kms *kms) | ||
65 | { | ||
66 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
67 | struct mdp4_irq *error_handler = &mdp4_kms->error_handler; | ||
68 | |||
69 | INIT_LIST_HEAD(&mdp4_kms->irq_list); | ||
70 | |||
71 | error_handler->irq = mdp4_irq_error_handler; | ||
72 | error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | | ||
73 | MDP4_IRQ_EXTERNAL_INTF_UDERRUN; | ||
74 | |||
75 | mdp4_irq_register(mdp4_kms, error_handler); | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | void mdp4_irq_uninstall(struct msm_kms *kms) | ||
81 | { | ||
82 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
83 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
84 | } | ||
85 | |||
86 | irqreturn_t mdp4_irq(struct msm_kms *kms) | ||
87 | { | ||
88 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
89 | struct drm_device *dev = mdp4_kms->dev; | ||
90 | struct msm_drm_private *priv = dev->dev_private; | ||
91 | struct mdp4_irq *handler, *n; | ||
92 | unsigned long flags; | ||
93 | unsigned int id; | ||
94 | uint32_t status; | ||
95 | |||
96 | status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); | ||
97 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); | ||
98 | |||
99 | VERB("status=%08x", status); | ||
100 | |||
101 | for (id = 0; id < priv->num_crtcs; id++) | ||
102 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) | ||
103 | drm_handle_vblank(dev, id); | ||
104 | |||
105 | spin_lock_irqsave(&list_lock, flags); | ||
106 | mdp4_kms->in_irq = true; | ||
107 | list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) { | ||
108 | if (handler->irqmask & status) { | ||
109 | spin_unlock_irqrestore(&list_lock, flags); | ||
110 | handler->irq(handler, handler->irqmask & status); | ||
111 | spin_lock_irqsave(&list_lock, flags); | ||
112 | } | ||
113 | } | ||
114 | mdp4_kms->in_irq = false; | ||
115 | update_irq(mdp4_kms); | ||
116 | spin_unlock_irqrestore(&list_lock, flags); | ||
117 | |||
118 | return IRQ_HANDLED; | ||
119 | } | ||
120 | |||
121 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
122 | { | ||
123 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&list_lock, flags); | ||
127 | mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc); | ||
128 | update_irq(mdp4_kms); | ||
129 | spin_unlock_irqrestore(&list_lock, flags); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
135 | { | ||
136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
137 | unsigned long flags; | ||
138 | |||
139 | spin_lock_irqsave(&list_lock, flags); | ||
140 | mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc); | ||
141 | update_irq(mdp4_kms); | ||
142 | spin_unlock_irqrestore(&list_lock, flags); | ||
143 | } | ||
144 | |||
145 | static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus) | ||
146 | { | ||
147 | struct mdp4_irq_wait *wait = | ||
148 | container_of(irq, struct mdp4_irq_wait, irq); | ||
149 | wait->count--; | ||
150 | wake_up_all(&wait_event); | ||
151 | } | ||
152 | |||
153 | void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask) | ||
154 | { | ||
155 | struct mdp4_irq_wait wait = { | ||
156 | .irq = { | ||
157 | .irq = wait_irq, | ||
158 | .irqmask = irqmask, | ||
159 | }, | ||
160 | .count = 1, | ||
161 | }; | ||
162 | mdp4_irq_register(mdp4_kms, &wait.irq); | ||
163 | wait_event(wait_event, (wait.count <= 0)); | ||
164 | mdp4_irq_unregister(mdp4_kms, &wait.irq); | ||
165 | } | ||
166 | |||
167 | void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) | ||
168 | { | ||
169 | unsigned long flags; | ||
170 | bool needs_update = false; | ||
171 | |||
172 | spin_lock_irqsave(&list_lock, flags); | ||
173 | |||
174 | if (!irq->registered) { | ||
175 | irq->registered = true; | ||
176 | list_add(&irq->node, &mdp4_kms->irq_list); | ||
177 | needs_update = !mdp4_kms->in_irq; | ||
178 | } | ||
179 | |||
180 | spin_unlock_irqrestore(&list_lock, flags); | ||
181 | |||
182 | if (needs_update) | ||
183 | update_irq_unlocked(mdp4_kms); | ||
184 | } | ||
185 | |||
186 | void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) | ||
187 | { | ||
188 | unsigned long flags; | ||
189 | bool needs_update = false; | ||
190 | |||
191 | spin_lock_irqsave(&list_lock, flags); | ||
192 | |||
193 | if (irq->registered) { | ||
194 | irq->registered = false; | ||
195 | list_del(&irq->node); | ||
196 | needs_update = !mdp4_kms->in_irq; | ||
197 | } | ||
198 | |||
199 | spin_unlock_irqrestore(&list_lock, flags); | ||
200 | |||
201 | if (needs_update) | ||
202 | update_irq_unlocked(mdp4_kms); | ||
203 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 28b57eb6f9a1..63ed79fe8a05 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include "msm_drv.h" | 18 | #include "msm_drv.h" |
19 | #include "msm_gpu.h" | 19 | #include "msm_gpu.h" |
20 | #include "msm_kms.h" | ||
20 | 21 | ||
21 | static void msm_fb_output_poll_changed(struct drm_device *dev) | 22 | static void msm_fb_output_poll_changed(struct drm_device *dev) |
22 | { | 23 | { |
@@ -30,50 +31,19 @@ static const struct drm_mode_config_funcs mode_config_funcs = { | |||
30 | .output_poll_changed = msm_fb_output_poll_changed, | 31 | .output_poll_changed = msm_fb_output_poll_changed, |
31 | }; | 32 | }; |
32 | 33 | ||
33 | static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, | 34 | int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) |
34 | unsigned long iova, int flags, void *arg) | ||
35 | { | ||
36 | DBG("*** fault: iova=%08lx, flags=%d", iova, flags); | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu) | ||
41 | { | 35 | { |
42 | struct msm_drm_private *priv = dev->dev_private; | 36 | struct msm_drm_private *priv = dev->dev_private; |
43 | int idx = priv->num_iommus++; | 37 | int idx = priv->num_mmus++; |
44 | 38 | ||
45 | if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus))) | 39 | if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus))) |
46 | return -EINVAL; | 40 | return -EINVAL; |
47 | 41 | ||
48 | priv->iommus[idx] = iommu; | 42 | priv->mmus[idx] = mmu; |
49 | |||
50 | iommu_set_fault_handler(iommu, msm_fault_handler, dev); | ||
51 | |||
52 | /* need to iommu_attach_device() somewhere?? on resume?? */ | ||
53 | 43 | ||
54 | return idx; | 44 | return idx; |
55 | } | 45 | } |
56 | 46 | ||
57 | int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, | ||
58 | const char **names, int cnt) | ||
59 | { | ||
60 | int i, ret; | ||
61 | |||
62 | for (i = 0; i < cnt; i++) { | ||
63 | /* TODO maybe some day msm iommu won't require this hack: */ | ||
64 | struct device *msm_iommu_get_ctx(const char *ctx_name); | ||
65 | struct device *ctx = msm_iommu_get_ctx(names[i]); | ||
66 | if (!ctx) | ||
67 | continue; | ||
68 | ret = iommu_attach_device(iommu, ctx); | ||
69 | if (ret) { | ||
70 | dev_warn(dev->dev, "could not attach iommu to %s", names[i]); | ||
71 | return ret; | ||
72 | } | ||
73 | } | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING | 47 | #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING |
78 | static bool reglog = false; | 48 | static bool reglog = false; |
79 | MODULE_PARM_DESC(reglog, "Enable register read/write logging"); | 49 | MODULE_PARM_DESC(reglog, "Enable register read/write logging"); |
@@ -82,6 +52,10 @@ module_param(reglog, bool, 0600); | |||
82 | #define reglog 0 | 52 | #define reglog 0 |
83 | #endif | 53 | #endif |
84 | 54 | ||
55 | static char *vram; | ||
56 | MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); | ||
57 | module_param(vram, charp, 0); | ||
58 | |||
85 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, | 59 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
86 | const char *dbgname) | 60 | const char *dbgname) |
87 | { | 61 | { |
@@ -161,6 +135,14 @@ static int msm_unload(struct drm_device *dev) | |||
161 | mutex_unlock(&dev->struct_mutex); | 135 | mutex_unlock(&dev->struct_mutex); |
162 | } | 136 | } |
163 | 137 | ||
138 | if (priv->vram.paddr) { | ||
139 | DEFINE_DMA_ATTRS(attrs); | ||
140 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); | ||
141 | drm_mm_takedown(&priv->vram.mm); | ||
142 | dma_free_attrs(dev->dev, priv->vram.size, NULL, | ||
143 | priv->vram.paddr, &attrs); | ||
144 | } | ||
145 | |||
164 | dev->dev_private = NULL; | 146 | dev->dev_private = NULL; |
165 | 147 | ||
166 | kfree(priv); | 148 | kfree(priv); |
@@ -168,6 +150,24 @@ static int msm_unload(struct drm_device *dev) | |||
168 | return 0; | 150 | return 0; |
169 | } | 151 | } |
170 | 152 | ||
153 | static int get_mdp_ver(struct platform_device *pdev) | ||
154 | { | ||
155 | #ifdef CONFIG_OF | ||
156 | const static struct of_device_id match_types[] = { { | ||
157 | .compatible = "qcom,mdss_mdp", | ||
158 | .data = (void *)5, | ||
159 | }, { | ||
160 | /* end node */ | ||
161 | } }; | ||
162 | struct device *dev = &pdev->dev; | ||
163 | const struct of_device_id *match; | ||
164 | match = of_match_node(match_types, dev->of_node); | ||
165 | if (match) | ||
166 | return (int)match->data; | ||
167 | #endif | ||
168 | return 4; | ||
169 | } | ||
170 | |||
171 | static int msm_load(struct drm_device *dev, unsigned long flags) | 171 | static int msm_load(struct drm_device *dev, unsigned long flags) |
172 | { | 172 | { |
173 | struct platform_device *pdev = dev->platformdev; | 173 | struct platform_device *pdev = dev->platformdev; |
@@ -191,7 +191,53 @@ static int msm_load(struct drm_device *dev, unsigned long flags) | |||
191 | 191 | ||
192 | drm_mode_config_init(dev); | 192 | drm_mode_config_init(dev); |
193 | 193 | ||
194 | kms = mdp4_kms_init(dev); | 194 | /* if we have no IOMMU, then we need to use carveout allocator. |
195 | * Grab the entire CMA chunk carved out in early startup in | ||
196 | * mach-msm: | ||
197 | */ | ||
198 | if (!iommu_present(&platform_bus_type)) { | ||
199 | DEFINE_DMA_ATTRS(attrs); | ||
200 | unsigned long size; | ||
201 | void *p; | ||
202 | |||
203 | DBG("using %s VRAM carveout", vram); | ||
204 | size = memparse(vram, NULL); | ||
205 | priv->vram.size = size; | ||
206 | |||
207 | drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); | ||
208 | |||
209 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); | ||
210 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
211 | |||
212 | /* note that for no-kernel-mapping, the vaddr returned | ||
213 | * is bogus, but non-null if allocation succeeded: | ||
214 | */ | ||
215 | p = dma_alloc_attrs(dev->dev, size, | ||
216 | &priv->vram.paddr, 0, &attrs); | ||
217 | if (!p) { | ||
218 | dev_err(dev->dev, "failed to allocate VRAM\n"); | ||
219 | priv->vram.paddr = 0; | ||
220 | ret = -ENOMEM; | ||
221 | goto fail; | ||
222 | } | ||
223 | |||
224 | dev_info(dev->dev, "VRAM: %08x->%08x\n", | ||
225 | (uint32_t)priv->vram.paddr, | ||
226 | (uint32_t)(priv->vram.paddr + size)); | ||
227 | } | ||
228 | |||
229 | switch (get_mdp_ver(pdev)) { | ||
230 | case 4: | ||
231 | kms = mdp4_kms_init(dev); | ||
232 | break; | ||
233 | case 5: | ||
234 | kms = mdp5_kms_init(dev); | ||
235 | break; | ||
236 | default: | ||
237 | kms = ERR_PTR(-ENODEV); | ||
238 | break; | ||
239 | } | ||
240 | |||
195 | if (IS_ERR(kms)) { | 241 | if (IS_ERR(kms)) { |
196 | /* | 242 | /* |
197 | * NOTE: once we have GPU support, having no kms should not | 243 | * NOTE: once we have GPU support, having no kms should not |
@@ -778,6 +824,7 @@ static const struct dev_pm_ops msm_pm_ops = { | |||
778 | 824 | ||
779 | static int msm_pdev_probe(struct platform_device *pdev) | 825 | static int msm_pdev_probe(struct platform_device *pdev) |
780 | { | 826 | { |
827 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
781 | return drm_platform_init(&msm_driver, pdev); | 828 | return drm_platform_init(&msm_driver, pdev); |
782 | } | 829 | } |
783 | 830 | ||
@@ -793,12 +840,19 @@ static const struct platform_device_id msm_id[] = { | |||
793 | { } | 840 | { } |
794 | }; | 841 | }; |
795 | 842 | ||
843 | static const struct of_device_id dt_match[] = { | ||
844 | { .compatible = "qcom,mdss_mdp" }, | ||
845 | {} | ||
846 | }; | ||
847 | MODULE_DEVICE_TABLE(of, dt_match); | ||
848 | |||
796 | static struct platform_driver msm_platform_driver = { | 849 | static struct platform_driver msm_platform_driver = { |
797 | .probe = msm_pdev_probe, | 850 | .probe = msm_pdev_probe, |
798 | .remove = msm_pdev_remove, | 851 | .remove = msm_pdev_remove, |
799 | .driver = { | 852 | .driver = { |
800 | .owner = THIS_MODULE, | 853 | .owner = THIS_MODULE, |
801 | .name = "msm", | 854 | .name = "msm", |
855 | .of_match_table = dt_match, | ||
802 | .pm = &msm_pm_ops, | 856 | .pm = &msm_pm_ops, |
803 | }, | 857 | }, |
804 | .id_table = msm_id, | 858 | .id_table = msm_id, |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d39f0862b19e..3d63269c5b29 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -31,6 +31,15 @@ | |||
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <asm/sizes.h> | 32 | #include <asm/sizes.h> |
33 | 33 | ||
34 | |||
35 | #if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM) | ||
36 | /* stubs we need for compile-test: */ | ||
37 | static inline struct device *msm_iommu_get_ctx(const char *ctx_name) | ||
38 | { | ||
39 | return NULL; | ||
40 | } | ||
41 | #endif | ||
42 | |||
34 | #ifndef CONFIG_OF | 43 | #ifndef CONFIG_OF |
35 | #include <mach/board.h> | 44 | #include <mach/board.h> |
36 | #include <mach/socinfo.h> | 45 | #include <mach/socinfo.h> |
@@ -44,6 +53,7 @@ | |||
44 | 53 | ||
45 | struct msm_kms; | 54 | struct msm_kms; |
46 | struct msm_gpu; | 55 | struct msm_gpu; |
56 | struct msm_mmu; | ||
47 | 57 | ||
48 | #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ | 58 | #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ |
49 | 59 | ||
@@ -76,9 +86,9 @@ struct msm_drm_private { | |||
76 | /* callbacks deferred until bo is inactive: */ | 86 | /* callbacks deferred until bo is inactive: */ |
77 | struct list_head fence_cbs; | 87 | struct list_head fence_cbs; |
78 | 88 | ||
79 | /* registered IOMMU domains: */ | 89 | /* registered MMUs: */ |
80 | unsigned int num_iommus; | 90 | unsigned int num_mmus; |
81 | struct iommu_domain *iommus[NUM_DOMAINS]; | 91 | struct msm_mmu *mmus[NUM_DOMAINS]; |
82 | 92 | ||
83 | unsigned int num_planes; | 93 | unsigned int num_planes; |
84 | struct drm_plane *planes[8]; | 94 | struct drm_plane *planes[8]; |
@@ -94,6 +104,16 @@ struct msm_drm_private { | |||
94 | 104 | ||
95 | unsigned int num_connectors; | 105 | unsigned int num_connectors; |
96 | struct drm_connector *connectors[8]; | 106 | struct drm_connector *connectors[8]; |
107 | |||
108 | /* VRAM carveout, used when no IOMMU: */ | ||
109 | struct { | ||
110 | unsigned long size; | ||
111 | dma_addr_t paddr; | ||
112 | /* NOTE: mm managed at the page level, size is in # of pages | ||
113 | * and position mm_node->start is in # of pages: | ||
114 | */ | ||
115 | struct drm_mm mm; | ||
116 | } vram; | ||
97 | }; | 117 | }; |
98 | 118 | ||
99 | struct msm_format { | 119 | struct msm_format { |
@@ -114,39 +134,7 @@ void __msm_fence_worker(struct work_struct *work); | |||
114 | (_cb)->func = _func; \ | 134 | (_cb)->func = _func; \ |
115 | } while (0) | 135 | } while (0) |
116 | 136 | ||
117 | /* As there are different display controller blocks depending on the | 137 | int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); |
118 | * snapdragon version, the kms support is split out and the appropriate | ||
119 | * implementation is loaded at runtime. The kms module is responsible | ||
120 | * for constructing the appropriate planes/crtcs/encoders/connectors. | ||
121 | */ | ||
122 | struct msm_kms_funcs { | ||
123 | /* hw initialization: */ | ||
124 | int (*hw_init)(struct msm_kms *kms); | ||
125 | /* irq handling: */ | ||
126 | void (*irq_preinstall)(struct msm_kms *kms); | ||
127 | int (*irq_postinstall)(struct msm_kms *kms); | ||
128 | void (*irq_uninstall)(struct msm_kms *kms); | ||
129 | irqreturn_t (*irq)(struct msm_kms *kms); | ||
130 | int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
131 | void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
132 | /* misc: */ | ||
133 | const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); | ||
134 | long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, | ||
135 | struct drm_encoder *encoder); | ||
136 | /* cleanup: */ | ||
137 | void (*preclose)(struct msm_kms *kms, struct drm_file *file); | ||
138 | void (*destroy)(struct msm_kms *kms); | ||
139 | }; | ||
140 | |||
141 | struct msm_kms { | ||
142 | const struct msm_kms_funcs *funcs; | ||
143 | }; | ||
144 | |||
145 | struct msm_kms *mdp4_kms_init(struct drm_device *dev); | ||
146 | |||
147 | int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu); | ||
148 | int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, | ||
149 | const char **names, int cnt); | ||
150 | 138 | ||
151 | int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, | 139 | int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, |
152 | struct timespec *timeout); | 140 | struct timespec *timeout); |
@@ -202,7 +190,9 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, | |||
202 | 190 | ||
203 | struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); | 191 | struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); |
204 | 192 | ||
205 | int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); | 193 | struct hdmi; |
194 | struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); | ||
195 | irqreturn_t hdmi_irq(int irq, void *dev_id); | ||
206 | void __init hdmi_register(void); | 196 | void __init hdmi_register(void); |
207 | void __exit hdmi_unregister(void); | 197 | void __exit hdmi_unregister(void); |
208 | 198 | ||
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 0286c0eeb10c..81bafdf19ab3 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c | |||
@@ -16,6 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "msm_drv.h" | 18 | #include "msm_drv.h" |
19 | #include "msm_kms.h" | ||
19 | 20 | ||
20 | #include "drm_crtc.h" | 21 | #include "drm_crtc.h" |
21 | #include "drm_crtc_helper.h" | 22 | #include "drm_crtc_helper.h" |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index e587d251c590..d8d60c969ac7 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -22,7 +22,45 @@ | |||
22 | #include "msm_drv.h" | 22 | #include "msm_drv.h" |
23 | #include "msm_gem.h" | 23 | #include "msm_gem.h" |
24 | #include "msm_gpu.h" | 24 | #include "msm_gpu.h" |
25 | #include "msm_mmu.h" | ||
25 | 26 | ||
27 | static dma_addr_t physaddr(struct drm_gem_object *obj) | ||
28 | { | ||
29 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
30 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
31 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | ||
32 | priv->vram.paddr; | ||
33 | } | ||
34 | |||
35 | /* allocate pages from VRAM carveout, used when no IOMMU: */ | ||
36 | static struct page **get_pages_vram(struct drm_gem_object *obj, | ||
37 | int npages) | ||
38 | { | ||
39 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
40 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
41 | dma_addr_t paddr; | ||
42 | struct page **p; | ||
43 | int ret, i; | ||
44 | |||
45 | p = drm_malloc_ab(npages, sizeof(struct page *)); | ||
46 | if (!p) | ||
47 | return ERR_PTR(-ENOMEM); | ||
48 | |||
49 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, | ||
50 | npages, 0, DRM_MM_SEARCH_DEFAULT); | ||
51 | if (ret) { | ||
52 | drm_free_large(p); | ||
53 | return ERR_PTR(ret); | ||
54 | } | ||
55 | |||
56 | paddr = physaddr(obj); | ||
57 | for (i = 0; i < npages; i++) { | ||
58 | p[i] = phys_to_page(paddr); | ||
59 | paddr += PAGE_SIZE; | ||
60 | } | ||
61 | |||
62 | return p; | ||
63 | } | ||
26 | 64 | ||
27 | /* called with dev->struct_mutex held */ | 65 | /* called with dev->struct_mutex held */ |
28 | static struct page **get_pages(struct drm_gem_object *obj) | 66 | static struct page **get_pages(struct drm_gem_object *obj) |
@@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
31 | 69 | ||
32 | if (!msm_obj->pages) { | 70 | if (!msm_obj->pages) { |
33 | struct drm_device *dev = obj->dev; | 71 | struct drm_device *dev = obj->dev; |
34 | struct page **p = drm_gem_get_pages(obj, 0); | 72 | struct page **p; |
35 | int npages = obj->size >> PAGE_SHIFT; | 73 | int npages = obj->size >> PAGE_SHIFT; |
36 | 74 | ||
75 | if (iommu_present(&platform_bus_type)) | ||
76 | p = drm_gem_get_pages(obj, 0); | ||
77 | else | ||
78 | p = get_pages_vram(obj, npages); | ||
79 | |||
37 | if (IS_ERR(p)) { | 80 | if (IS_ERR(p)) { |
38 | dev_err(dev->dev, "could not get pages: %ld\n", | 81 | dev_err(dev->dev, "could not get pages: %ld\n", |
39 | PTR_ERR(p)); | 82 | PTR_ERR(p)); |
@@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj) | |||
73 | sg_free_table(msm_obj->sgt); | 116 | sg_free_table(msm_obj->sgt); |
74 | kfree(msm_obj->sgt); | 117 | kfree(msm_obj->sgt); |
75 | 118 | ||
76 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | 119 | if (iommu_present(&platform_bus_type)) |
120 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | ||
121 | else | ||
122 | drm_mm_remove_node(msm_obj->vram_node); | ||
123 | |||
77 | msm_obj->pages = NULL; | 124 | msm_obj->pages = NULL; |
78 | } | 125 | } |
79 | } | 126 | } |
@@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
138 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 185 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
139 | { | 186 | { |
140 | struct drm_gem_object *obj = vma->vm_private_data; | 187 | struct drm_gem_object *obj = vma->vm_private_data; |
141 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
142 | struct drm_device *dev = obj->dev; | 188 | struct drm_device *dev = obj->dev; |
143 | struct page **pages; | 189 | struct page **pages; |
144 | unsigned long pfn; | 190 | unsigned long pfn; |
@@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
163 | pgoff = ((unsigned long)vmf->virtual_address - | 209 | pgoff = ((unsigned long)vmf->virtual_address - |
164 | vma->vm_start) >> PAGE_SHIFT; | 210 | vma->vm_start) >> PAGE_SHIFT; |
165 | 211 | ||
166 | pfn = page_to_pfn(msm_obj->pages[pgoff]); | 212 | pfn = page_to_pfn(pages[pgoff]); |
167 | 213 | ||
168 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | 214 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, |
169 | pfn, pfn << PAGE_SHIFT); | 215 | pfn, pfn << PAGE_SHIFT); |
@@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |||
219 | return offset; | 265 | return offset; |
220 | } | 266 | } |
221 | 267 | ||
222 | /* helpers for dealing w/ iommu: */ | ||
223 | static int map_range(struct iommu_domain *domain, unsigned int iova, | ||
224 | struct sg_table *sgt, unsigned int len, int prot) | ||
225 | { | ||
226 | struct scatterlist *sg; | ||
227 | unsigned int da = iova; | ||
228 | unsigned int i, j; | ||
229 | int ret; | ||
230 | |||
231 | if (!domain || !sgt) | ||
232 | return -EINVAL; | ||
233 | |||
234 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
235 | u32 pa = sg_phys(sg) - sg->offset; | ||
236 | size_t bytes = sg->length + sg->offset; | ||
237 | |||
238 | VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); | ||
239 | |||
240 | ret = iommu_map(domain, da, pa, bytes, prot); | ||
241 | if (ret) | ||
242 | goto fail; | ||
243 | |||
244 | da += bytes; | ||
245 | } | ||
246 | |||
247 | return 0; | ||
248 | |||
249 | fail: | ||
250 | da = iova; | ||
251 | |||
252 | for_each_sg(sgt->sgl, sg, i, j) { | ||
253 | size_t bytes = sg->length + sg->offset; | ||
254 | iommu_unmap(domain, da, bytes); | ||
255 | da += bytes; | ||
256 | } | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | static void unmap_range(struct iommu_domain *domain, unsigned int iova, | ||
261 | struct sg_table *sgt, unsigned int len) | ||
262 | { | ||
263 | struct scatterlist *sg; | ||
264 | unsigned int da = iova; | ||
265 | int i; | ||
266 | |||
267 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
268 | size_t bytes = sg->length + sg->offset; | ||
269 | size_t unmapped; | ||
270 | |||
271 | unmapped = iommu_unmap(domain, da, bytes); | ||
272 | if (unmapped < bytes) | ||
273 | break; | ||
274 | |||
275 | VERB("unmap[%d]: %08x(%x)", i, iova, bytes); | ||
276 | |||
277 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
278 | |||
279 | da += bytes; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | /* should be called under struct_mutex.. although it can be called | 268 | /* should be called under struct_mutex.. although it can be called |
284 | * from atomic context without struct_mutex to acquire an extra | 269 | * from atomic context without struct_mutex to acquire an extra |
285 | * iova ref if you know one is already held. | 270 | * iova ref if you know one is already held. |
@@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | |||
295 | 280 | ||
296 | if (!msm_obj->domain[id].iova) { | 281 | if (!msm_obj->domain[id].iova) { |
297 | struct msm_drm_private *priv = obj->dev->dev_private; | 282 | struct msm_drm_private *priv = obj->dev->dev_private; |
298 | uint32_t offset = (uint32_t)mmap_offset(obj); | 283 | struct msm_mmu *mmu = priv->mmus[id]; |
299 | struct page **pages; | 284 | struct page **pages = get_pages(obj); |
300 | pages = get_pages(obj); | 285 | |
301 | if (IS_ERR(pages)) | 286 | if (IS_ERR(pages)) |
302 | return PTR_ERR(pages); | 287 | return PTR_ERR(pages); |
303 | // XXX ideally we would not map buffers writable when not needed... | 288 | |
304 | ret = map_range(priv->iommus[id], offset, msm_obj->sgt, | 289 | if (iommu_present(&platform_bus_type)) { |
305 | obj->size, IOMMU_READ | IOMMU_WRITE); | 290 | uint32_t offset = (uint32_t)mmap_offset(obj); |
306 | msm_obj->domain[id].iova = offset; | 291 | ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, |
292 | obj->size, IOMMU_READ | IOMMU_WRITE); | ||
293 | msm_obj->domain[id].iova = offset; | ||
294 | } else { | ||
295 | msm_obj->domain[id].iova = physaddr(obj); | ||
296 | } | ||
307 | } | 297 | } |
308 | 298 | ||
309 | if (!ret) | 299 | if (!ret) |
@@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |||
514 | void msm_gem_free_object(struct drm_gem_object *obj) | 504 | void msm_gem_free_object(struct drm_gem_object *obj) |
515 | { | 505 | { |
516 | struct drm_device *dev = obj->dev; | 506 | struct drm_device *dev = obj->dev; |
507 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
517 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 508 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
518 | int id; | 509 | int id; |
519 | 510 | ||
@@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj) | |||
525 | list_del(&msm_obj->mm_list); | 516 | list_del(&msm_obj->mm_list); |
526 | 517 | ||
527 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | 518 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { |
528 | if (msm_obj->domain[id].iova) { | 519 | struct msm_mmu *mmu = priv->mmus[id]; |
529 | struct msm_drm_private *priv = obj->dev->dev_private; | 520 | if (mmu && msm_obj->domain[id].iova) { |
530 | uint32_t offset = (uint32_t)mmap_offset(obj); | 521 | uint32_t offset = (uint32_t)mmap_offset(obj); |
531 | unmap_range(priv->iommus[id], offset, | 522 | mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); |
532 | msm_obj->sgt, obj->size); | ||
533 | } | 523 | } |
534 | } | 524 | } |
535 | 525 | ||
@@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
591 | { | 581 | { |
592 | struct msm_drm_private *priv = dev->dev_private; | 582 | struct msm_drm_private *priv = dev->dev_private; |
593 | struct msm_gem_object *msm_obj; | 583 | struct msm_gem_object *msm_obj; |
584 | unsigned sz; | ||
594 | 585 | ||
595 | switch (flags & MSM_BO_CACHE_MASK) { | 586 | switch (flags & MSM_BO_CACHE_MASK) { |
596 | case MSM_BO_UNCACHED: | 587 | case MSM_BO_UNCACHED: |
@@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
603 | return -EINVAL; | 594 | return -EINVAL; |
604 | } | 595 | } |
605 | 596 | ||
606 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); | 597 | sz = sizeof(*msm_obj); |
598 | if (!iommu_present(&platform_bus_type)) | ||
599 | sz += sizeof(struct drm_mm_node); | ||
600 | |||
601 | msm_obj = kzalloc(sz, GFP_KERNEL); | ||
607 | if (!msm_obj) | 602 | if (!msm_obj) |
608 | return -ENOMEM; | 603 | return -ENOMEM; |
609 | 604 | ||
605 | if (!iommu_present(&platform_bus_type)) | ||
606 | msm_obj->vram_node = (void *)&msm_obj[1]; | ||
607 | |||
610 | msm_obj->flags = flags; | 608 | msm_obj->flags = flags; |
611 | 609 | ||
612 | msm_obj->resv = &msm_obj->_resv; | 610 | msm_obj->resv = &msm_obj->_resv; |
@@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
623 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | 621 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
624 | uint32_t size, uint32_t flags) | 622 | uint32_t size, uint32_t flags) |
625 | { | 623 | { |
626 | struct drm_gem_object *obj; | 624 | struct drm_gem_object *obj = NULL; |
627 | int ret; | 625 | int ret; |
628 | 626 | ||
629 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 627 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
@@ -634,9 +632,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
634 | if (ret) | 632 | if (ret) |
635 | goto fail; | 633 | goto fail; |
636 | 634 | ||
637 | ret = drm_gem_object_init(dev, obj, size); | 635 | if (iommu_present(&platform_bus_type)) { |
638 | if (ret) | 636 | ret = drm_gem_object_init(dev, obj, size); |
639 | goto fail; | 637 | if (ret) |
638 | goto fail; | ||
639 | } else { | ||
640 | drm_gem_private_object_init(dev, obj, size); | ||
641 | } | ||
640 | 642 | ||
641 | return obj; | 643 | return obj; |
642 | 644 | ||
@@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |||
654 | struct drm_gem_object *obj; | 656 | struct drm_gem_object *obj; |
655 | int ret, npages; | 657 | int ret, npages; |
656 | 658 | ||
659 | /* if we don't have IOMMU, don't bother pretending we can import: */ | ||
660 | if (!iommu_present(&platform_bus_type)) { | ||
661 | dev_err(dev->dev, "cannot import without IOMMU\n"); | ||
662 | return ERR_PTR(-EINVAL); | ||
663 | } | ||
664 | |||
657 | size = PAGE_ALIGN(size); | 665 | size = PAGE_ALIGN(size); |
658 | 666 | ||
659 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); | 667 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); |
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index f4f23a578d9d..3246bb46c4f2 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
@@ -57,6 +57,11 @@ struct msm_gem_object { | |||
57 | /* normally (resv == &_resv) except for imported bo's */ | 57 | /* normally (resv == &_resv) except for imported bo's */ |
58 | struct reservation_object *resv; | 58 | struct reservation_object *resv; |
59 | struct reservation_object _resv; | 59 | struct reservation_object _resv; |
60 | |||
61 | /* For physically contiguous buffers. Used when we don't have | ||
62 | * an IOMMU. | ||
63 | */ | ||
64 | struct drm_mm_node *vram_node; | ||
60 | }; | 65 | }; |
61 | #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) | 66 | #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) |
62 | 67 | ||
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 4583d61556f5..4ebce8be489d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include "msm_gpu.h" | 18 | #include "msm_gpu.h" |
19 | #include "msm_gem.h" | 19 | #include "msm_gem.h" |
20 | #include "msm_mmu.h" | ||
20 | 21 | ||
21 | 22 | ||
22 | /* | 23 | /* |
@@ -25,20 +26,10 @@ | |||
25 | 26 | ||
26 | #ifdef CONFIG_MSM_BUS_SCALING | 27 | #ifdef CONFIG_MSM_BUS_SCALING |
27 | #include <mach/board.h> | 28 | #include <mach/board.h> |
28 | #include <mach/kgsl.h> | 29 | static void bs_init(struct msm_gpu *gpu) |
29 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) | ||
30 | { | 30 | { |
31 | struct drm_device *dev = gpu->dev; | 31 | if (gpu->bus_scale_table) { |
32 | struct kgsl_device_platform_data *pdata; | 32 | gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table); |
33 | |||
34 | if (!pdev) { | ||
35 | dev_err(dev->dev, "could not find dtv pdata\n"); | ||
36 | return; | ||
37 | } | ||
38 | |||
39 | pdata = pdev->dev.platform_data; | ||
40 | if (pdata->bus_scale_table) { | ||
41 | gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); | ||
42 | DBG("bus scale client: %08x", gpu->bsc); | 33 | DBG("bus scale client: %08x", gpu->bsc); |
43 | } | 34 | } |
44 | } | 35 | } |
@@ -59,7 +50,7 @@ static void bs_set(struct msm_gpu *gpu, int idx) | |||
59 | } | 50 | } |
60 | } | 51 | } |
61 | #else | 52 | #else |
62 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {} | 53 | static void bs_init(struct msm_gpu *gpu) {} |
63 | static void bs_fini(struct msm_gpu *gpu) {} | 54 | static void bs_fini(struct msm_gpu *gpu) {} |
64 | static void bs_set(struct msm_gpu *gpu, int idx) {} | 55 | static void bs_set(struct msm_gpu *gpu, int idx) {} |
65 | #endif | 56 | #endif |
@@ -363,6 +354,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
363 | struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, | 354 | struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, |
364 | const char *name, const char *ioname, const char *irqname, int ringsz) | 355 | const char *name, const char *ioname, const char *irqname, int ringsz) |
365 | { | 356 | { |
357 | struct iommu_domain *iommu; | ||
366 | int i, ret; | 358 | int i, ret; |
367 | 359 | ||
368 | gpu->dev = drm; | 360 | gpu->dev = drm; |
@@ -428,13 +420,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
428 | * and have separate page tables per context. For now, to keep things | 420 | * and have separate page tables per context. For now, to keep things |
429 | * simple and to get something working, just use a single address space: | 421 | * simple and to get something working, just use a single address space: |
430 | */ | 422 | */ |
431 | gpu->iommu = iommu_domain_alloc(&platform_bus_type); | 423 | iommu = iommu_domain_alloc(&platform_bus_type); |
432 | if (!gpu->iommu) { | 424 | if (iommu) { |
433 | dev_err(drm->dev, "failed to allocate IOMMU\n"); | 425 | dev_info(drm->dev, "%s: using IOMMU\n", name); |
434 | ret = -ENOMEM; | 426 | gpu->mmu = msm_iommu_new(drm, iommu); |
435 | goto fail; | 427 | } else { |
428 | dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); | ||
436 | } | 429 | } |
437 | gpu->id = msm_register_iommu(drm, gpu->iommu); | 430 | gpu->id = msm_register_mmu(drm, gpu->mmu); |
438 | 431 | ||
439 | /* Create ringbuffer: */ | 432 | /* Create ringbuffer: */ |
440 | gpu->rb = msm_ringbuffer_new(gpu, ringsz); | 433 | gpu->rb = msm_ringbuffer_new(gpu, ringsz); |
@@ -452,7 +445,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
452 | goto fail; | 445 | goto fail; |
453 | } | 446 | } |
454 | 447 | ||
455 | bs_init(gpu, pdev); | 448 | bs_init(gpu); |
456 | 449 | ||
457 | return 0; | 450 | return 0; |
458 | 451 | ||
@@ -474,6 +467,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
474 | msm_ringbuffer_destroy(gpu->rb); | 467 | msm_ringbuffer_destroy(gpu->rb); |
475 | } | 468 | } |
476 | 469 | ||
477 | if (gpu->iommu) | 470 | if (gpu->mmu) |
478 | iommu_domain_free(gpu->iommu); | 471 | gpu->mmu->funcs->destroy(gpu->mmu); |
479 | } | 472 | } |
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 8cd829e520bb..458db8c64c28 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h | |||
@@ -78,14 +78,18 @@ struct msm_gpu { | |||
78 | void __iomem *mmio; | 78 | void __iomem *mmio; |
79 | int irq; | 79 | int irq; |
80 | 80 | ||
81 | struct iommu_domain *iommu; | 81 | struct msm_mmu *mmu; |
82 | int id; | 82 | int id; |
83 | 83 | ||
84 | /* Power Control: */ | 84 | /* Power Control: */ |
85 | struct regulator *gpu_reg, *gpu_cx; | 85 | struct regulator *gpu_reg, *gpu_cx; |
86 | struct clk *ebi1_clk, *grp_clks[5]; | 86 | struct clk *ebi1_clk, *grp_clks[5]; |
87 | uint32_t fast_rate, slow_rate, bus_freq; | 87 | uint32_t fast_rate, slow_rate, bus_freq; |
88 | |||
89 | #ifdef CONFIG_MSM_BUS_SCALING | ||
90 | struct msm_bus_scale_pdata *bus_scale_table; | ||
88 | uint32_t bsc; | 91 | uint32_t bsc; |
92 | #endif | ||
89 | 93 | ||
90 | /* Hang Detction: */ | 94 | /* Hang Detction: */ |
91 | #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ | 95 | #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ |
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c new file mode 100644 index 000000000000..92b745986231 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_iommu.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include "msm_drv.h" | ||
19 | #include "msm_mmu.h" | ||
20 | |||
21 | struct msm_iommu { | ||
22 | struct msm_mmu base; | ||
23 | struct iommu_domain *domain; | ||
24 | }; | ||
25 | #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) | ||
26 | |||
27 | static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, | ||
28 | unsigned long iova, int flags, void *arg) | ||
29 | { | ||
30 | DBG("*** fault: iova=%08lx, flags=%d", iova, flags); | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) | ||
35 | { | ||
36 | struct drm_device *dev = mmu->dev; | ||
37 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
38 | int i, ret; | ||
39 | |||
40 | for (i = 0; i < cnt; i++) { | ||
41 | struct device *msm_iommu_get_ctx(const char *ctx_name); | ||
42 | struct device *ctx = msm_iommu_get_ctx(names[i]); | ||
43 | if (IS_ERR_OR_NULL(ctx)) | ||
44 | continue; | ||
45 | ret = iommu_attach_device(iommu->domain, ctx); | ||
46 | if (ret) { | ||
47 | dev_warn(dev->dev, "could not attach iommu to %s", names[i]); | ||
48 | return ret; | ||
49 | } | ||
50 | } | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, | ||
56 | struct sg_table *sgt, unsigned len, int prot) | ||
57 | { | ||
58 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
59 | struct iommu_domain *domain = iommu->domain; | ||
60 | struct scatterlist *sg; | ||
61 | unsigned int da = iova; | ||
62 | unsigned int i, j; | ||
63 | int ret; | ||
64 | |||
65 | if (!domain || !sgt) | ||
66 | return -EINVAL; | ||
67 | |||
68 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
69 | u32 pa = sg_phys(sg) - sg->offset; | ||
70 | size_t bytes = sg->length + sg->offset; | ||
71 | |||
72 | VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); | ||
73 | |||
74 | ret = iommu_map(domain, da, pa, bytes, prot); | ||
75 | if (ret) | ||
76 | goto fail; | ||
77 | |||
78 | da += bytes; | ||
79 | } | ||
80 | |||
81 | return 0; | ||
82 | |||
83 | fail: | ||
84 | da = iova; | ||
85 | |||
86 | for_each_sg(sgt->sgl, sg, i, j) { | ||
87 | size_t bytes = sg->length + sg->offset; | ||
88 | iommu_unmap(domain, da, bytes); | ||
89 | da += bytes; | ||
90 | } | ||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, | ||
95 | struct sg_table *sgt, unsigned len) | ||
96 | { | ||
97 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
98 | struct iommu_domain *domain = iommu->domain; | ||
99 | struct scatterlist *sg; | ||
100 | unsigned int da = iova; | ||
101 | int i; | ||
102 | |||
103 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
104 | size_t bytes = sg->length + sg->offset; | ||
105 | size_t unmapped; | ||
106 | |||
107 | unmapped = iommu_unmap(domain, da, bytes); | ||
108 | if (unmapped < bytes) | ||
109 | return unmapped; | ||
110 | |||
111 | VERB("unmap[%d]: %08x(%x)", i, iova, bytes); | ||
112 | |||
113 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
114 | |||
115 | da += bytes; | ||
116 | } | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static void msm_iommu_destroy(struct msm_mmu *mmu) | ||
122 | { | ||
123 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
124 | iommu_domain_free(iommu->domain); | ||
125 | kfree(iommu); | ||
126 | } | ||
127 | |||
128 | static const struct msm_mmu_funcs funcs = { | ||
129 | .attach = msm_iommu_attach, | ||
130 | .map = msm_iommu_map, | ||
131 | .unmap = msm_iommu_unmap, | ||
132 | .destroy = msm_iommu_destroy, | ||
133 | }; | ||
134 | |||
135 | struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain) | ||
136 | { | ||
137 | struct msm_iommu *iommu; | ||
138 | |||
139 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | ||
140 | if (!iommu) | ||
141 | return ERR_PTR(-ENOMEM); | ||
142 | |||
143 | iommu->domain = domain; | ||
144 | msm_mmu_init(&iommu->base, dev, &funcs); | ||
145 | iommu_set_fault_handler(domain, msm_fault_handler, dev); | ||
146 | |||
147 | return &iommu->base; | ||
148 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h new file mode 100644 index 000000000000..06437745bc2c --- /dev/null +++ b/drivers/gpu/drm/msm/msm_kms.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __MSM_KMS_H__ | ||
19 | #define __MSM_KMS_H__ | ||
20 | |||
21 | #include <linux/clk.h> | ||
22 | #include <linux/regulator/consumer.h> | ||
23 | |||
24 | #include "msm_drv.h" | ||
25 | |||
26 | /* As there are different display controller blocks depending on the | ||
27 | * snapdragon version, the kms support is split out and the appropriate | ||
28 | * implementation is loaded at runtime. The kms module is responsible | ||
29 | * for constructing the appropriate planes/crtcs/encoders/connectors. | ||
30 | */ | ||
31 | struct msm_kms_funcs { | ||
32 | /* hw initialization: */ | ||
33 | int (*hw_init)(struct msm_kms *kms); | ||
34 | /* irq handling: */ | ||
35 | void (*irq_preinstall)(struct msm_kms *kms); | ||
36 | int (*irq_postinstall)(struct msm_kms *kms); | ||
37 | void (*irq_uninstall)(struct msm_kms *kms); | ||
38 | irqreturn_t (*irq)(struct msm_kms *kms); | ||
39 | int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
40 | void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
41 | /* misc: */ | ||
42 | const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); | ||
43 | long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, | ||
44 | struct drm_encoder *encoder); | ||
45 | /* cleanup: */ | ||
46 | void (*preclose)(struct msm_kms *kms, struct drm_file *file); | ||
47 | void (*destroy)(struct msm_kms *kms); | ||
48 | }; | ||
49 | |||
50 | struct msm_kms { | ||
51 | const struct msm_kms_funcs *funcs; | ||
52 | |||
53 | /* irq handling: */ | ||
54 | bool in_irq; | ||
55 | struct list_head irq_list; /* list of mdp4_irq */ | ||
56 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
57 | }; | ||
58 | |||
59 | static inline void msm_kms_init(struct msm_kms *kms, | ||
60 | const struct msm_kms_funcs *funcs) | ||
61 | { | ||
62 | kms->funcs = funcs; | ||
63 | } | ||
64 | |||
65 | struct msm_kms *mdp4_kms_init(struct drm_device *dev); | ||
66 | struct msm_kms *mdp5_kms_init(struct drm_device *dev); | ||
67 | |||
68 | #endif /* __MSM_KMS_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h new file mode 100644 index 000000000000..030324482b4a --- /dev/null +++ b/drivers/gpu/drm/msm/msm_mmu.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __MSM_MMU_H__ | ||
19 | #define __MSM_MMU_H__ | ||
20 | |||
21 | #include <linux/iommu.h> | ||
22 | |||
23 | struct msm_mmu_funcs { | ||
24 | int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); | ||
25 | int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, | ||
26 | unsigned len, int prot); | ||
27 | int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, | ||
28 | unsigned len); | ||
29 | void (*destroy)(struct msm_mmu *mmu); | ||
30 | }; | ||
31 | |||
32 | struct msm_mmu { | ||
33 | const struct msm_mmu_funcs *funcs; | ||
34 | struct drm_device *dev; | ||
35 | }; | ||
36 | |||
37 | static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev, | ||
38 | const struct msm_mmu_funcs *funcs) | ||
39 | { | ||
40 | mmu->dev = dev; | ||
41 | mmu->funcs = funcs; | ||
42 | } | ||
43 | |||
44 | struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain); | ||
45 | struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu); | ||
46 | |||
47 | #endif /* __MSM_MMU_H__ */ | ||
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index f685035dbe39..b5162c3b6111 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c | |||
@@ -27,8 +27,6 @@ | |||
27 | #include "radeon.h" | 27 | #include "radeon.h" |
28 | #include "atom.h" | 28 | #include "atom.h" |
29 | 29 | ||
30 | extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); | ||
31 | |||
32 | #define TARGET_HW_I2C_CLOCK 50 | 30 | #define TARGET_HW_I2C_CLOCK 50 |
33 | 31 | ||
34 | /* these are a limitation of ProcessI2cChannelTransaction not the hw */ | 32 | /* these are a limitation of ProcessI2cChannelTransaction not the hw */ |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 9b6950d9b3c0..0fbd36f3d4e9 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
@@ -49,6 +49,7 @@ struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps); | |||
49 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); | 49 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); |
50 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); | 50 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); |
51 | 51 | ||
52 | extern int ni_mc_load_microcode(struct radeon_device *rdev); | ||
52 | 53 | ||
53 | //********* BARTS **************// | 54 | //********* BARTS **************// |
54 | static const u32 barts_cgcg_cgls_default[] = | 55 | static const u32 barts_cgcg_cgls_default[] = |
@@ -2510,21 +2511,6 @@ int btc_dpm_enable(struct radeon_device *rdev) | |||
2510 | if (eg_pi->ls_clock_gating) | 2511 | if (eg_pi->ls_clock_gating) |
2511 | btc_ls_clock_gating_enable(rdev, true); | 2512 | btc_ls_clock_gating_enable(rdev, true); |
2512 | 2513 | ||
2513 | if (rdev->irq.installed && | ||
2514 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
2515 | PPSMC_Result result; | ||
2516 | |||
2517 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
2518 | if (ret) | ||
2519 | return ret; | ||
2520 | rdev->irq.dpm_thermal = true; | ||
2521 | radeon_irq_set(rdev); | ||
2522 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | ||
2523 | |||
2524 | if (result != PPSMC_Result_OK) | ||
2525 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | ||
2526 | } | ||
2527 | |||
2528 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 2514 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
2529 | 2515 | ||
2530 | btc_init_stutter_mode(rdev); | 2516 | btc_init_stutter_mode(rdev); |
@@ -2576,7 +2562,11 @@ void btc_dpm_disable(struct radeon_device *rdev) | |||
2576 | void btc_dpm_setup_asic(struct radeon_device *rdev) | 2562 | void btc_dpm_setup_asic(struct radeon_device *rdev) |
2577 | { | 2563 | { |
2578 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2564 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
2565 | int r; | ||
2579 | 2566 | ||
2567 | r = ni_mc_load_microcode(rdev); | ||
2568 | if (r) | ||
2569 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
2580 | rv770_get_memory_type(rdev); | 2570 | rv770_get_memory_type(rdev); |
2581 | rv740_read_clock_registers(rdev); | 2571 | rv740_read_clock_registers(rdev); |
2582 | btc_read_arb_registers(rdev); | 2572 | btc_read_arb_registers(rdev); |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 1ed479976358..8d49104ca6c2 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
@@ -171,8 +171,7 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, | |||
171 | struct atom_voltage_table *voltage_table); | 171 | struct atom_voltage_table *voltage_table); |
172 | extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); | 172 | extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); |
173 | extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); | 173 | extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); |
174 | extern void cik_update_cg(struct radeon_device *rdev, | 174 | extern int ci_mc_load_microcode(struct radeon_device *rdev); |
175 | u32 block, bool enable); | ||
176 | 175 | ||
177 | static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, | 176 | static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, |
178 | struct atom_voltage_table_entry *voltage_table, | 177 | struct atom_voltage_table_entry *voltage_table, |
@@ -4503,8 +4502,8 @@ static void ci_get_memory_type(struct radeon_device *rdev) | |||
4503 | 4502 | ||
4504 | } | 4503 | } |
4505 | 4504 | ||
4506 | void ci_update_current_ps(struct radeon_device *rdev, | 4505 | static void ci_update_current_ps(struct radeon_device *rdev, |
4507 | struct radeon_ps *rps) | 4506 | struct radeon_ps *rps) |
4508 | { | 4507 | { |
4509 | struct ci_ps *new_ps = ci_get_ps(rps); | 4508 | struct ci_ps *new_ps = ci_get_ps(rps); |
4510 | struct ci_power_info *pi = ci_get_pi(rdev); | 4509 | struct ci_power_info *pi = ci_get_pi(rdev); |
@@ -4514,8 +4513,8 @@ void ci_update_current_ps(struct radeon_device *rdev, | |||
4514 | pi->current_rps.ps_priv = &pi->current_ps; | 4513 | pi->current_rps.ps_priv = &pi->current_ps; |
4515 | } | 4514 | } |
4516 | 4515 | ||
4517 | void ci_update_requested_ps(struct radeon_device *rdev, | 4516 | static void ci_update_requested_ps(struct radeon_device *rdev, |
4518 | struct radeon_ps *rps) | 4517 | struct radeon_ps *rps) |
4519 | { | 4518 | { |
4520 | struct ci_ps *new_ps = ci_get_ps(rps); | 4519 | struct ci_ps *new_ps = ci_get_ps(rps); |
4521 | struct ci_power_info *pi = ci_get_pi(rdev); | 4520 | struct ci_power_info *pi = ci_get_pi(rdev); |
@@ -4549,6 +4548,11 @@ void ci_dpm_post_set_power_state(struct radeon_device *rdev) | |||
4549 | 4548 | ||
4550 | void ci_dpm_setup_asic(struct radeon_device *rdev) | 4549 | void ci_dpm_setup_asic(struct radeon_device *rdev) |
4551 | { | 4550 | { |
4551 | int r; | ||
4552 | |||
4553 | r = ci_mc_load_microcode(rdev); | ||
4554 | if (r) | ||
4555 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
4552 | ci_read_clock_registers(rdev); | 4556 | ci_read_clock_registers(rdev); |
4553 | ci_get_memory_type(rdev); | 4557 | ci_get_memory_type(rdev); |
4554 | ci_enable_acpi_power_management(rdev); | 4558 | ci_enable_acpi_power_management(rdev); |
@@ -4561,13 +4565,6 @@ int ci_dpm_enable(struct radeon_device *rdev) | |||
4561 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 4565 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
4562 | int ret; | 4566 | int ret; |
4563 | 4567 | ||
4564 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
4565 | RADEON_CG_BLOCK_MC | | ||
4566 | RADEON_CG_BLOCK_SDMA | | ||
4567 | RADEON_CG_BLOCK_BIF | | ||
4568 | RADEON_CG_BLOCK_UVD | | ||
4569 | RADEON_CG_BLOCK_HDP), false); | ||
4570 | |||
4571 | if (ci_is_smc_running(rdev)) | 4568 | if (ci_is_smc_running(rdev)) |
4572 | return -EINVAL; | 4569 | return -EINVAL; |
4573 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { | 4570 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { |
@@ -4665,6 +4662,18 @@ int ci_dpm_enable(struct radeon_device *rdev) | |||
4665 | DRM_ERROR("ci_enable_power_containment failed\n"); | 4662 | DRM_ERROR("ci_enable_power_containment failed\n"); |
4666 | return ret; | 4663 | return ret; |
4667 | } | 4664 | } |
4665 | |||
4666 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
4667 | |||
4668 | ci_update_current_ps(rdev, boot_ps); | ||
4669 | |||
4670 | return 0; | ||
4671 | } | ||
4672 | |||
4673 | int ci_dpm_late_enable(struct radeon_device *rdev) | ||
4674 | { | ||
4675 | int ret; | ||
4676 | |||
4668 | if (rdev->irq.installed && | 4677 | if (rdev->irq.installed && |
4669 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 4678 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
4670 | #if 0 | 4679 | #if 0 |
@@ -4685,19 +4694,8 @@ int ci_dpm_enable(struct radeon_device *rdev) | |||
4685 | #endif | 4694 | #endif |
4686 | } | 4695 | } |
4687 | 4696 | ||
4688 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
4689 | |||
4690 | ci_dpm_powergate_uvd(rdev, true); | 4697 | ci_dpm_powergate_uvd(rdev, true); |
4691 | 4698 | ||
4692 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
4693 | RADEON_CG_BLOCK_MC | | ||
4694 | RADEON_CG_BLOCK_SDMA | | ||
4695 | RADEON_CG_BLOCK_BIF | | ||
4696 | RADEON_CG_BLOCK_UVD | | ||
4697 | RADEON_CG_BLOCK_HDP), true); | ||
4698 | |||
4699 | ci_update_current_ps(rdev, boot_ps); | ||
4700 | |||
4701 | return 0; | 4699 | return 0; |
4702 | } | 4700 | } |
4703 | 4701 | ||
@@ -4706,12 +4704,6 @@ void ci_dpm_disable(struct radeon_device *rdev) | |||
4706 | struct ci_power_info *pi = ci_get_pi(rdev); | 4704 | struct ci_power_info *pi = ci_get_pi(rdev); |
4707 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 4705 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
4708 | 4706 | ||
4709 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
4710 | RADEON_CG_BLOCK_MC | | ||
4711 | RADEON_CG_BLOCK_SDMA | | ||
4712 | RADEON_CG_BLOCK_UVD | | ||
4713 | RADEON_CG_BLOCK_HDP), false); | ||
4714 | |||
4715 | ci_dpm_powergate_uvd(rdev, false); | 4707 | ci_dpm_powergate_uvd(rdev, false); |
4716 | 4708 | ||
4717 | if (!ci_is_smc_running(rdev)) | 4709 | if (!ci_is_smc_running(rdev)) |
@@ -4742,13 +4734,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) | |||
4742 | struct radeon_ps *old_ps = &pi->current_rps; | 4734 | struct radeon_ps *old_ps = &pi->current_rps; |
4743 | int ret; | 4735 | int ret; |
4744 | 4736 | ||
4745 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
4746 | RADEON_CG_BLOCK_MC | | ||
4747 | RADEON_CG_BLOCK_SDMA | | ||
4748 | RADEON_CG_BLOCK_BIF | | ||
4749 | RADEON_CG_BLOCK_UVD | | ||
4750 | RADEON_CG_BLOCK_HDP), false); | ||
4751 | |||
4752 | ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); | 4737 | ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); |
4753 | if (pi->pcie_performance_request) | 4738 | if (pi->pcie_performance_request) |
4754 | ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); | 4739 | ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); |
@@ -4804,13 +4789,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) | |||
4804 | if (pi->pcie_performance_request) | 4789 | if (pi->pcie_performance_request) |
4805 | ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); | 4790 | ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); |
4806 | 4791 | ||
4807 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
4808 | RADEON_CG_BLOCK_MC | | ||
4809 | RADEON_CG_BLOCK_SDMA | | ||
4810 | RADEON_CG_BLOCK_BIF | | ||
4811 | RADEON_CG_BLOCK_UVD | | ||
4812 | RADEON_CG_BLOCK_HDP), true); | ||
4813 | |||
4814 | return 0; | 4792 | return 0; |
4815 | } | 4793 | } |
4816 | 4794 | ||
@@ -5023,8 +5001,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) | |||
5023 | return 0; | 5001 | return 0; |
5024 | } | 5002 | } |
5025 | 5003 | ||
5026 | int ci_get_vbios_boot_values(struct radeon_device *rdev, | 5004 | static int ci_get_vbios_boot_values(struct radeon_device *rdev, |
5027 | struct ci_vbios_boot_state *boot_state) | 5005 | struct ci_vbios_boot_state *boot_state) |
5028 | { | 5006 | { |
5029 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 5007 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
5030 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | 5008 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c index 9c745dd22438..8debc9d47362 100644 --- a/drivers/gpu/drm/radeon/ci_smc.c +++ b/drivers/gpu/drm/radeon/ci_smc.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "cikd.h" | 28 | #include "cikd.h" |
29 | #include "ppsmc.h" | 29 | #include "ppsmc.h" |
30 | #include "radeon_ucode.h" | 30 | #include "radeon_ucode.h" |
31 | #include "ci_dpm.h" | ||
31 | 32 | ||
32 | static int ci_set_smc_sram_address(struct radeon_device *rdev, | 33 | static int ci_set_smc_sram_address(struct radeon_device *rdev, |
33 | u32 smc_address, u32 limit) | 34 | u32 smc_address, u32 limit) |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b43a3a3c9067..9e50dd5d0e42 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -1697,7 +1697,7 @@ static void cik_srbm_select(struct radeon_device *rdev, | |||
1697 | * Load the GDDR MC ucode into the hw (CIK). | 1697 | * Load the GDDR MC ucode into the hw (CIK). |
1698 | * Returns 0 on success, error on failure. | 1698 | * Returns 0 on success, error on failure. |
1699 | */ | 1699 | */ |
1700 | static int ci_mc_load_microcode(struct radeon_device *rdev) | 1700 | int ci_mc_load_microcode(struct radeon_device *rdev) |
1701 | { | 1701 | { |
1702 | const __be32 *fw_data; | 1702 | const __be32 *fw_data; |
1703 | u32 running, blackout = 0; | 1703 | u32 running, blackout = 0; |
@@ -4015,15 +4015,43 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) | |||
4015 | return 0; | 4015 | return 0; |
4016 | } | 4016 | } |
4017 | 4017 | ||
4018 | u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | 4018 | u32 cik_gfx_get_rptr(struct radeon_device *rdev, |
4019 | struct radeon_ring *ring) | 4019 | struct radeon_ring *ring) |
4020 | { | 4020 | { |
4021 | u32 rptr; | 4021 | u32 rptr; |
4022 | 4022 | ||
4023 | if (rdev->wb.enabled) | ||
4024 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
4025 | else | ||
4026 | rptr = RREG32(CP_RB0_RPTR); | ||
4023 | 4027 | ||
4028 | return rptr; | ||
4029 | } | ||
4030 | |||
4031 | u32 cik_gfx_get_wptr(struct radeon_device *rdev, | ||
4032 | struct radeon_ring *ring) | ||
4033 | { | ||
4034 | u32 wptr; | ||
4035 | |||
4036 | wptr = RREG32(CP_RB0_WPTR); | ||
4037 | |||
4038 | return wptr; | ||
4039 | } | ||
4040 | |||
4041 | void cik_gfx_set_wptr(struct radeon_device *rdev, | ||
4042 | struct radeon_ring *ring) | ||
4043 | { | ||
4044 | WREG32(CP_RB0_WPTR, ring->wptr); | ||
4045 | (void)RREG32(CP_RB0_WPTR); | ||
4046 | } | ||
4047 | |||
4048 | u32 cik_compute_get_rptr(struct radeon_device *rdev, | ||
4049 | struct radeon_ring *ring) | ||
4050 | { | ||
4051 | u32 rptr; | ||
4024 | 4052 | ||
4025 | if (rdev->wb.enabled) { | 4053 | if (rdev->wb.enabled) { |
4026 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | 4054 | rptr = rdev->wb.wb[ring->rptr_offs/4]; |
4027 | } else { | 4055 | } else { |
4028 | mutex_lock(&rdev->srbm_mutex); | 4056 | mutex_lock(&rdev->srbm_mutex); |
4029 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 4057 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
@@ -4035,13 +4063,14 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | |||
4035 | return rptr; | 4063 | return rptr; |
4036 | } | 4064 | } |
4037 | 4065 | ||
4038 | u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | 4066 | u32 cik_compute_get_wptr(struct radeon_device *rdev, |
4039 | struct radeon_ring *ring) | 4067 | struct radeon_ring *ring) |
4040 | { | 4068 | { |
4041 | u32 wptr; | 4069 | u32 wptr; |
4042 | 4070 | ||
4043 | if (rdev->wb.enabled) { | 4071 | if (rdev->wb.enabled) { |
4044 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); | 4072 | /* XXX check if swapping is necessary on BE */ |
4073 | wptr = rdev->wb.wb[ring->wptr_offs/4]; | ||
4045 | } else { | 4074 | } else { |
4046 | mutex_lock(&rdev->srbm_mutex); | 4075 | mutex_lock(&rdev->srbm_mutex); |
4047 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 4076 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
@@ -4053,10 +4082,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | |||
4053 | return wptr; | 4082 | return wptr; |
4054 | } | 4083 | } |
4055 | 4084 | ||
4056 | void cik_compute_ring_set_wptr(struct radeon_device *rdev, | 4085 | void cik_compute_set_wptr(struct radeon_device *rdev, |
4057 | struct radeon_ring *ring) | 4086 | struct radeon_ring *ring) |
4058 | { | 4087 | { |
4059 | rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); | 4088 | /* XXX check if swapping is necessary on BE */ |
4089 | rdev->wb.wb[ring->wptr_offs/4] = ring->wptr; | ||
4060 | WDOORBELL32(ring->doorbell_index, ring->wptr); | 4090 | WDOORBELL32(ring->doorbell_index, ring->wptr); |
4061 | } | 4091 | } |
4062 | 4092 | ||
@@ -4850,6 +4880,160 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
4850 | cik_print_gpu_status_regs(rdev); | 4880 | cik_print_gpu_status_regs(rdev); |
4851 | } | 4881 | } |
4852 | 4882 | ||
4883 | struct kv_reset_save_regs { | ||
4884 | u32 gmcon_reng_execute; | ||
4885 | u32 gmcon_misc; | ||
4886 | u32 gmcon_misc3; | ||
4887 | }; | ||
4888 | |||
4889 | static void kv_save_regs_for_reset(struct radeon_device *rdev, | ||
4890 | struct kv_reset_save_regs *save) | ||
4891 | { | ||
4892 | save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE); | ||
4893 | save->gmcon_misc = RREG32(GMCON_MISC); | ||
4894 | save->gmcon_misc3 = RREG32(GMCON_MISC3); | ||
4895 | |||
4896 | WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP); | ||
4897 | WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE | | ||
4898 | STCTRL_STUTTER_EN)); | ||
4899 | } | ||
4900 | |||
4901 | static void kv_restore_regs_for_reset(struct radeon_device *rdev, | ||
4902 | struct kv_reset_save_regs *save) | ||
4903 | { | ||
4904 | int i; | ||
4905 | |||
4906 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4907 | WREG32(GMCON_PGFSM_CONFIG, 0x200010ff); | ||
4908 | |||
4909 | for (i = 0; i < 5; i++) | ||
4910 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4911 | |||
4912 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4913 | WREG32(GMCON_PGFSM_CONFIG, 0x300010ff); | ||
4914 | |||
4915 | for (i = 0; i < 5; i++) | ||
4916 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4917 | |||
4918 | WREG32(GMCON_PGFSM_WRITE, 0x210000); | ||
4919 | WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff); | ||
4920 | |||
4921 | for (i = 0; i < 5; i++) | ||
4922 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4923 | |||
4924 | WREG32(GMCON_PGFSM_WRITE, 0x21003); | ||
4925 | WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff); | ||
4926 | |||
4927 | for (i = 0; i < 5; i++) | ||
4928 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4929 | |||
4930 | WREG32(GMCON_PGFSM_WRITE, 0x2b00); | ||
4931 | WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff); | ||
4932 | |||
4933 | for (i = 0; i < 5; i++) | ||
4934 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4935 | |||
4936 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4937 | WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff); | ||
4938 | |||
4939 | for (i = 0; i < 5; i++) | ||
4940 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4941 | |||
4942 | WREG32(GMCON_PGFSM_WRITE, 0x420000); | ||
4943 | WREG32(GMCON_PGFSM_CONFIG, 0x100010ff); | ||
4944 | |||
4945 | for (i = 0; i < 5; i++) | ||
4946 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4947 | |||
4948 | WREG32(GMCON_PGFSM_WRITE, 0x120202); | ||
4949 | WREG32(GMCON_PGFSM_CONFIG, 0x500010ff); | ||
4950 | |||
4951 | for (i = 0; i < 5; i++) | ||
4952 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4953 | |||
4954 | WREG32(GMCON_PGFSM_WRITE, 0x3e3e36); | ||
4955 | WREG32(GMCON_PGFSM_CONFIG, 0x600010ff); | ||
4956 | |||
4957 | for (i = 0; i < 5; i++) | ||
4958 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4959 | |||
4960 | WREG32(GMCON_PGFSM_WRITE, 0x373f3e); | ||
4961 | WREG32(GMCON_PGFSM_CONFIG, 0x700010ff); | ||
4962 | |||
4963 | for (i = 0; i < 5; i++) | ||
4964 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4965 | |||
4966 | WREG32(GMCON_PGFSM_WRITE, 0x3e1332); | ||
4967 | WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff); | ||
4968 | |||
4969 | WREG32(GMCON_MISC3, save->gmcon_misc3); | ||
4970 | WREG32(GMCON_MISC, save->gmcon_misc); | ||
4971 | WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute); | ||
4972 | } | ||
4973 | |||
4974 | static void cik_gpu_pci_config_reset(struct radeon_device *rdev) | ||
4975 | { | ||
4976 | struct evergreen_mc_save save; | ||
4977 | struct kv_reset_save_regs kv_save = { 0 }; | ||
4978 | u32 tmp, i; | ||
4979 | |||
4980 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
4981 | |||
4982 | /* disable dpm? */ | ||
4983 | |||
4984 | /* disable cg/pg */ | ||
4985 | cik_fini_pg(rdev); | ||
4986 | cik_fini_cg(rdev); | ||
4987 | |||
4988 | /* Disable GFX parsing/prefetching */ | ||
4989 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); | ||
4990 | |||
4991 | /* Disable MEC parsing/prefetching */ | ||
4992 | WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT); | ||
4993 | |||
4994 | /* sdma0 */ | ||
4995 | tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET); | ||
4996 | tmp |= SDMA_HALT; | ||
4997 | WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp); | ||
4998 | /* sdma1 */ | ||
4999 | tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET); | ||
5000 | tmp |= SDMA_HALT; | ||
5001 | WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp); | ||
5002 | /* XXX other engines? */ | ||
5003 | |||
5004 | /* halt the rlc, disable cp internal ints */ | ||
5005 | cik_rlc_stop(rdev); | ||
5006 | |||
5007 | udelay(50); | ||
5008 | |||
5009 | /* disable mem access */ | ||
5010 | evergreen_mc_stop(rdev, &save); | ||
5011 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
5012 | dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); | ||
5013 | } | ||
5014 | |||
5015 | if (rdev->flags & RADEON_IS_IGP) | ||
5016 | kv_save_regs_for_reset(rdev, &kv_save); | ||
5017 | |||
5018 | /* disable BM */ | ||
5019 | pci_clear_master(rdev->pdev); | ||
5020 | /* reset */ | ||
5021 | radeon_pci_config_reset(rdev); | ||
5022 | |||
5023 | udelay(100); | ||
5024 | |||
5025 | /* wait for asic to come out of reset */ | ||
5026 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
5027 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
5028 | break; | ||
5029 | udelay(1); | ||
5030 | } | ||
5031 | |||
5032 | /* does asic init need to be run first??? */ | ||
5033 | if (rdev->flags & RADEON_IS_IGP) | ||
5034 | kv_restore_regs_for_reset(rdev, &kv_save); | ||
5035 | } | ||
5036 | |||
4853 | /** | 5037 | /** |
4854 | * cik_asic_reset - soft reset GPU | 5038 | * cik_asic_reset - soft reset GPU |
4855 | * | 5039 | * |
@@ -4868,10 +5052,17 @@ int cik_asic_reset(struct radeon_device *rdev) | |||
4868 | if (reset_mask) | 5052 | if (reset_mask) |
4869 | r600_set_bios_scratch_engine_hung(rdev, true); | 5053 | r600_set_bios_scratch_engine_hung(rdev, true); |
4870 | 5054 | ||
5055 | /* try soft reset */ | ||
4871 | cik_gpu_soft_reset(rdev, reset_mask); | 5056 | cik_gpu_soft_reset(rdev, reset_mask); |
4872 | 5057 | ||
4873 | reset_mask = cik_gpu_check_soft_reset(rdev); | 5058 | reset_mask = cik_gpu_check_soft_reset(rdev); |
4874 | 5059 | ||
5060 | /* try pci config reset */ | ||
5061 | if (reset_mask && radeon_hard_reset) | ||
5062 | cik_gpu_pci_config_reset(rdev); | ||
5063 | |||
5064 | reset_mask = cik_gpu_check_soft_reset(rdev); | ||
5065 | |||
4875 | if (!reset_mask) | 5066 | if (!reset_mask) |
4876 | r600_set_bios_scratch_engine_hung(rdev, false); | 5067 | r600_set_bios_scratch_engine_hung(rdev, false); |
4877 | 5068 | ||
@@ -7501,26 +7692,7 @@ static int cik_startup(struct radeon_device *rdev) | |||
7501 | 7692 | ||
7502 | cik_mc_program(rdev); | 7693 | cik_mc_program(rdev); |
7503 | 7694 | ||
7504 | if (rdev->flags & RADEON_IS_IGP) { | 7695 | if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) { |
7505 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
7506 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | ||
7507 | r = cik_init_microcode(rdev); | ||
7508 | if (r) { | ||
7509 | DRM_ERROR("Failed to load firmware!\n"); | ||
7510 | return r; | ||
7511 | } | ||
7512 | } | ||
7513 | } else { | ||
7514 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
7515 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || | ||
7516 | !rdev->mc_fw) { | ||
7517 | r = cik_init_microcode(rdev); | ||
7518 | if (r) { | ||
7519 | DRM_ERROR("Failed to load firmware!\n"); | ||
7520 | return r; | ||
7521 | } | ||
7522 | } | ||
7523 | |||
7524 | r = ci_mc_load_microcode(rdev); | 7696 | r = ci_mc_load_microcode(rdev); |
7525 | if (r) { | 7697 | if (r) { |
7526 | DRM_ERROR("Failed to load MC firmware!\n"); | 7698 | DRM_ERROR("Failed to load MC firmware!\n"); |
@@ -7625,7 +7797,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7625 | 7797 | ||
7626 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 7798 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
7627 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 7799 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
7628 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
7629 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7800 | PACKET3(PACKET3_NOP, 0x3FFF)); |
7630 | if (r) | 7801 | if (r) |
7631 | return r; | 7802 | return r; |
@@ -7634,7 +7805,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7634 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 7805 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
7635 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | 7806 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
7636 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, | 7807 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, |
7637 | CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, | ||
7638 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7808 | PACKET3(PACKET3_NOP, 0x3FFF)); |
7639 | if (r) | 7809 | if (r) |
7640 | return r; | 7810 | return r; |
@@ -7646,7 +7816,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7646 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 7816 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
7647 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | 7817 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
7648 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, | 7818 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, |
7649 | CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, | ||
7650 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7819 | PACKET3(PACKET3_NOP, 0x3FFF)); |
7651 | if (r) | 7820 | if (r) |
7652 | return r; | 7821 | return r; |
@@ -7658,16 +7827,12 @@ static int cik_startup(struct radeon_device *rdev) | |||
7658 | 7827 | ||
7659 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 7828 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
7660 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 7829 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
7661 | SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET, | ||
7662 | SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET, | ||
7663 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | 7830 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); |
7664 | if (r) | 7831 | if (r) |
7665 | return r; | 7832 | return r; |
7666 | 7833 | ||
7667 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | 7834 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
7668 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | 7835 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
7669 | SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET, | ||
7670 | SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET, | ||
7671 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | 7836 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); |
7672 | if (r) | 7837 | if (r) |
7673 | return r; | 7838 | return r; |
@@ -7683,7 +7848,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7683 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 7848 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
7684 | if (ring->ring_size) { | 7849 | if (ring->ring_size) { |
7685 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 7850 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
7686 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
7687 | RADEON_CP_PACKET2); | 7851 | RADEON_CP_PACKET2); |
7688 | if (!r) | 7852 | if (!r) |
7689 | r = uvd_v1_0_init(rdev); | 7853 | r = uvd_v1_0_init(rdev); |
@@ -7729,6 +7893,8 @@ int cik_resume(struct radeon_device *rdev) | |||
7729 | /* init golden registers */ | 7893 | /* init golden registers */ |
7730 | cik_init_golden_registers(rdev); | 7894 | cik_init_golden_registers(rdev); |
7731 | 7895 | ||
7896 | radeon_pm_resume(rdev); | ||
7897 | |||
7732 | rdev->accel_working = true; | 7898 | rdev->accel_working = true; |
7733 | r = cik_startup(rdev); | 7899 | r = cik_startup(rdev); |
7734 | if (r) { | 7900 | if (r) { |
@@ -7752,6 +7918,7 @@ int cik_resume(struct radeon_device *rdev) | |||
7752 | */ | 7918 | */ |
7753 | int cik_suspend(struct radeon_device *rdev) | 7919 | int cik_suspend(struct radeon_device *rdev) |
7754 | { | 7920 | { |
7921 | radeon_pm_suspend(rdev); | ||
7755 | dce6_audio_fini(rdev); | 7922 | dce6_audio_fini(rdev); |
7756 | radeon_vm_manager_fini(rdev); | 7923 | radeon_vm_manager_fini(rdev); |
7757 | cik_cp_enable(rdev, false); | 7924 | cik_cp_enable(rdev, false); |
@@ -7833,6 +8000,30 @@ int cik_init(struct radeon_device *rdev) | |||
7833 | if (r) | 8000 | if (r) |
7834 | return r; | 8001 | return r; |
7835 | 8002 | ||
8003 | if (rdev->flags & RADEON_IS_IGP) { | ||
8004 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
8005 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | ||
8006 | r = cik_init_microcode(rdev); | ||
8007 | if (r) { | ||
8008 | DRM_ERROR("Failed to load firmware!\n"); | ||
8009 | return r; | ||
8010 | } | ||
8011 | } | ||
8012 | } else { | ||
8013 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
8014 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || | ||
8015 | !rdev->mc_fw) { | ||
8016 | r = cik_init_microcode(rdev); | ||
8017 | if (r) { | ||
8018 | DRM_ERROR("Failed to load firmware!\n"); | ||
8019 | return r; | ||
8020 | } | ||
8021 | } | ||
8022 | } | ||
8023 | |||
8024 | /* Initialize power management */ | ||
8025 | radeon_pm_init(rdev); | ||
8026 | |||
7836 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 8027 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
7837 | ring->ring_obj = NULL; | 8028 | ring->ring_obj = NULL; |
7838 | r600_ring_init(rdev, ring, 1024 * 1024); | 8029 | r600_ring_init(rdev, ring, 1024 * 1024); |
@@ -7913,6 +8104,7 @@ int cik_init(struct radeon_device *rdev) | |||
7913 | */ | 8104 | */ |
7914 | void cik_fini(struct radeon_device *rdev) | 8105 | void cik_fini(struct radeon_device *rdev) |
7915 | { | 8106 | { |
8107 | radeon_pm_fini(rdev); | ||
7916 | cik_cp_fini(rdev); | 8108 | cik_cp_fini(rdev); |
7917 | cik_sdma_fini(rdev); | 8109 | cik_sdma_fini(rdev); |
7918 | cik_fini_pg(rdev); | 8110 | cik_fini_pg(rdev); |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 0300727a4f70..f0f9e1089409 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -52,6 +52,75 @@ u32 cik_gpu_check_soft_reset(struct radeon_device *rdev); | |||
52 | */ | 52 | */ |
53 | 53 | ||
54 | /** | 54 | /** |
55 | * cik_sdma_get_rptr - get the current read pointer | ||
56 | * | ||
57 | * @rdev: radeon_device pointer | ||
58 | * @ring: radeon ring pointer | ||
59 | * | ||
60 | * Get the current rptr from the hardware (CIK+). | ||
61 | */ | ||
62 | uint32_t cik_sdma_get_rptr(struct radeon_device *rdev, | ||
63 | struct radeon_ring *ring) | ||
64 | { | ||
65 | u32 rptr, reg; | ||
66 | |||
67 | if (rdev->wb.enabled) { | ||
68 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
69 | } else { | ||
70 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
71 | reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET; | ||
72 | else | ||
73 | reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET; | ||
74 | |||
75 | rptr = RREG32(reg); | ||
76 | } | ||
77 | |||
78 | return (rptr & 0x3fffc) >> 2; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * cik_sdma_get_wptr - get the current write pointer | ||
83 | * | ||
84 | * @rdev: radeon_device pointer | ||
85 | * @ring: radeon ring pointer | ||
86 | * | ||
87 | * Get the current wptr from the hardware (CIK+). | ||
88 | */ | ||
89 | uint32_t cik_sdma_get_wptr(struct radeon_device *rdev, | ||
90 | struct radeon_ring *ring) | ||
91 | { | ||
92 | u32 reg; | ||
93 | |||
94 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
95 | reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; | ||
96 | else | ||
97 | reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; | ||
98 | |||
99 | return (RREG32(reg) & 0x3fffc) >> 2; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * cik_sdma_set_wptr - commit the write pointer | ||
104 | * | ||
105 | * @rdev: radeon_device pointer | ||
106 | * @ring: radeon ring pointer | ||
107 | * | ||
108 | * Write the wptr back to the hardware (CIK+). | ||
109 | */ | ||
110 | void cik_sdma_set_wptr(struct radeon_device *rdev, | ||
111 | struct radeon_ring *ring) | ||
112 | { | ||
113 | u32 reg; | ||
114 | |||
115 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
116 | reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; | ||
117 | else | ||
118 | reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; | ||
119 | |||
120 | WREG32(reg, (ring->wptr << 2) & 0x3fffc); | ||
121 | } | ||
122 | |||
123 | /** | ||
55 | * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine | 124 | * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine |
56 | * | 125 | * |
57 | * @rdev: radeon_device pointer | 126 | * @rdev: radeon_device pointer |
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 5964af5e5b2d..98bae9d7b74d 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
@@ -724,6 +724,17 @@ | |||
724 | 724 | ||
725 | #define ATC_MISC_CG 0x3350 | 725 | #define ATC_MISC_CG 0x3350 |
726 | 726 | ||
727 | #define GMCON_RENG_EXECUTE 0x3508 | ||
728 | #define RENG_EXECUTE_ON_PWR_UP (1 << 0) | ||
729 | #define GMCON_MISC 0x350c | ||
730 | #define RENG_EXECUTE_ON_REG_UPDATE (1 << 11) | ||
731 | #define STCTRL_STUTTER_EN (1 << 16) | ||
732 | |||
733 | #define GMCON_PGFSM_CONFIG 0x3538 | ||
734 | #define GMCON_PGFSM_WRITE 0x353c | ||
735 | #define GMCON_PGFSM_READ 0x3540 | ||
736 | #define GMCON_MISC3 0x3544 | ||
737 | |||
727 | #define MC_SEQ_CNTL_3 0x3600 | 738 | #define MC_SEQ_CNTL_3 0x3600 |
728 | # define CAC_EN (1 << 31) | 739 | # define CAC_EN (1 << 31) |
729 | #define MC_SEQ_G5PDX_CTRL 0x3604 | 740 | #define MC_SEQ_G5PDX_CTRL 0x3604 |
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 920e1e4a52c5..cf783fc0ef21 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c | |||
@@ -1905,21 +1905,6 @@ int cypress_dpm_enable(struct radeon_device *rdev) | |||
1905 | if (pi->mg_clock_gating) | 1905 | if (pi->mg_clock_gating) |
1906 | cypress_mg_clock_gating_enable(rdev, true); | 1906 | cypress_mg_clock_gating_enable(rdev, true); |
1907 | 1907 | ||
1908 | if (rdev->irq.installed && | ||
1909 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
1910 | PPSMC_Result result; | ||
1911 | |||
1912 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
1913 | if (ret) | ||
1914 | return ret; | ||
1915 | rdev->irq.dpm_thermal = true; | ||
1916 | radeon_irq_set(rdev); | ||
1917 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | ||
1918 | |||
1919 | if (result != PPSMC_Result_OK) | ||
1920 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | ||
1921 | } | ||
1922 | |||
1923 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 1908 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
1924 | 1909 | ||
1925 | return 0; | 1910 | return 0; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 9702e55e924e..4116d0279596 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -146,6 +146,7 @@ extern u32 si_get_csb_size(struct radeon_device *rdev); | |||
146 | extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); | 146 | extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); |
147 | extern u32 cik_get_csb_size(struct radeon_device *rdev); | 147 | extern u32 cik_get_csb_size(struct radeon_device *rdev); |
148 | extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); | 148 | extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); |
149 | extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); | ||
149 | 150 | ||
150 | static const u32 evergreen_golden_registers[] = | 151 | static const u32 evergreen_golden_registers[] = |
151 | { | 152 | { |
@@ -3867,6 +3868,48 @@ static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
3867 | evergreen_print_gpu_status_regs(rdev); | 3868 | evergreen_print_gpu_status_regs(rdev); |
3868 | } | 3869 | } |
3869 | 3870 | ||
3871 | void evergreen_gpu_pci_config_reset(struct radeon_device *rdev) | ||
3872 | { | ||
3873 | struct evergreen_mc_save save; | ||
3874 | u32 tmp, i; | ||
3875 | |||
3876 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
3877 | |||
3878 | /* disable dpm? */ | ||
3879 | |||
3880 | /* Disable CP parsing/prefetching */ | ||
3881 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); | ||
3882 | udelay(50); | ||
3883 | /* Disable DMA */ | ||
3884 | tmp = RREG32(DMA_RB_CNTL); | ||
3885 | tmp &= ~DMA_RB_ENABLE; | ||
3886 | WREG32(DMA_RB_CNTL, tmp); | ||
3887 | /* XXX other engines? */ | ||
3888 | |||
3889 | /* halt the rlc */ | ||
3890 | r600_rlc_stop(rdev); | ||
3891 | |||
3892 | udelay(50); | ||
3893 | |||
3894 | /* set mclk/sclk to bypass */ | ||
3895 | rv770_set_clk_bypass_mode(rdev); | ||
3896 | /* disable BM */ | ||
3897 | pci_clear_master(rdev->pdev); | ||
3898 | /* disable mem access */ | ||
3899 | evergreen_mc_stop(rdev, &save); | ||
3900 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
3901 | dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); | ||
3902 | } | ||
3903 | /* reset */ | ||
3904 | radeon_pci_config_reset(rdev); | ||
3905 | /* wait for asic to come out of reset */ | ||
3906 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
3907 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
3908 | break; | ||
3909 | udelay(1); | ||
3910 | } | ||
3911 | } | ||
3912 | |||
3870 | int evergreen_asic_reset(struct radeon_device *rdev) | 3913 | int evergreen_asic_reset(struct radeon_device *rdev) |
3871 | { | 3914 | { |
3872 | u32 reset_mask; | 3915 | u32 reset_mask; |
@@ -3876,10 +3919,17 @@ int evergreen_asic_reset(struct radeon_device *rdev) | |||
3876 | if (reset_mask) | 3919 | if (reset_mask) |
3877 | r600_set_bios_scratch_engine_hung(rdev, true); | 3920 | r600_set_bios_scratch_engine_hung(rdev, true); |
3878 | 3921 | ||
3922 | /* try soft reset */ | ||
3879 | evergreen_gpu_soft_reset(rdev, reset_mask); | 3923 | evergreen_gpu_soft_reset(rdev, reset_mask); |
3880 | 3924 | ||
3881 | reset_mask = evergreen_gpu_check_soft_reset(rdev); | 3925 | reset_mask = evergreen_gpu_check_soft_reset(rdev); |
3882 | 3926 | ||
3927 | /* try pci config reset */ | ||
3928 | if (reset_mask && radeon_hard_reset) | ||
3929 | evergreen_gpu_pci_config_reset(rdev); | ||
3930 | |||
3931 | reset_mask = evergreen_gpu_check_soft_reset(rdev); | ||
3932 | |||
3883 | if (!reset_mask) | 3933 | if (!reset_mask) |
3884 | r600_set_bios_scratch_engine_hung(rdev, false); | 3934 | r600_set_bios_scratch_engine_hung(rdev, false); |
3885 | 3935 | ||
@@ -5109,27 +5159,12 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
5109 | 5159 | ||
5110 | evergreen_mc_program(rdev); | 5160 | evergreen_mc_program(rdev); |
5111 | 5161 | ||
5112 | if (ASIC_IS_DCE5(rdev)) { | 5162 | if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) { |
5113 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
5114 | r = ni_init_microcode(rdev); | ||
5115 | if (r) { | ||
5116 | DRM_ERROR("Failed to load firmware!\n"); | ||
5117 | return r; | ||
5118 | } | ||
5119 | } | ||
5120 | r = ni_mc_load_microcode(rdev); | 5163 | r = ni_mc_load_microcode(rdev); |
5121 | if (r) { | 5164 | if (r) { |
5122 | DRM_ERROR("Failed to load MC firmware!\n"); | 5165 | DRM_ERROR("Failed to load MC firmware!\n"); |
5123 | return r; | 5166 | return r; |
5124 | } | 5167 | } |
5125 | } else { | ||
5126 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
5127 | r = r600_init_microcode(rdev); | ||
5128 | if (r) { | ||
5129 | DRM_ERROR("Failed to load firmware!\n"); | ||
5130 | return r; | ||
5131 | } | ||
5132 | } | ||
5133 | } | 5168 | } |
5134 | 5169 | ||
5135 | if (rdev->flags & RADEON_IS_AGP) { | 5170 | if (rdev->flags & RADEON_IS_AGP) { |
@@ -5199,14 +5234,12 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
5199 | 5234 | ||
5200 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 5235 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
5201 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 5236 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
5202 | R600_CP_RB_RPTR, R600_CP_RB_WPTR, | ||
5203 | RADEON_CP_PACKET2); | 5237 | RADEON_CP_PACKET2); |
5204 | if (r) | 5238 | if (r) |
5205 | return r; | 5239 | return r; |
5206 | 5240 | ||
5207 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 5241 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
5208 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 5242 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
5209 | DMA_RB_RPTR, DMA_RB_WPTR, | ||
5210 | DMA_PACKET(DMA_PACKET_NOP, 0, 0)); | 5243 | DMA_PACKET(DMA_PACKET_NOP, 0, 0)); |
5211 | if (r) | 5244 | if (r) |
5212 | return r; | 5245 | return r; |
@@ -5224,7 +5257,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
5224 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 5257 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
5225 | if (ring->ring_size) { | 5258 | if (ring->ring_size) { |
5226 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 5259 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
5227 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
5228 | RADEON_CP_PACKET2); | 5260 | RADEON_CP_PACKET2); |
5229 | if (!r) | 5261 | if (!r) |
5230 | r = uvd_v1_0_init(rdev); | 5262 | r = uvd_v1_0_init(rdev); |
@@ -5267,6 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev) | |||
5267 | /* init golden registers */ | 5299 | /* init golden registers */ |
5268 | evergreen_init_golden_registers(rdev); | 5300 | evergreen_init_golden_registers(rdev); |
5269 | 5301 | ||
5302 | radeon_pm_resume(rdev); | ||
5303 | |||
5270 | rdev->accel_working = true; | 5304 | rdev->accel_working = true; |
5271 | r = evergreen_startup(rdev); | 5305 | r = evergreen_startup(rdev); |
5272 | if (r) { | 5306 | if (r) { |
@@ -5281,6 +5315,7 @@ int evergreen_resume(struct radeon_device *rdev) | |||
5281 | 5315 | ||
5282 | int evergreen_suspend(struct radeon_device *rdev) | 5316 | int evergreen_suspend(struct radeon_device *rdev) |
5283 | { | 5317 | { |
5318 | radeon_pm_suspend(rdev); | ||
5284 | r600_audio_fini(rdev); | 5319 | r600_audio_fini(rdev); |
5285 | uvd_v1_0_fini(rdev); | 5320 | uvd_v1_0_fini(rdev); |
5286 | radeon_uvd_suspend(rdev); | 5321 | radeon_uvd_suspend(rdev); |
@@ -5357,6 +5392,27 @@ int evergreen_init(struct radeon_device *rdev) | |||
5357 | if (r) | 5392 | if (r) |
5358 | return r; | 5393 | return r; |
5359 | 5394 | ||
5395 | if (ASIC_IS_DCE5(rdev)) { | ||
5396 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
5397 | r = ni_init_microcode(rdev); | ||
5398 | if (r) { | ||
5399 | DRM_ERROR("Failed to load firmware!\n"); | ||
5400 | return r; | ||
5401 | } | ||
5402 | } | ||
5403 | } else { | ||
5404 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
5405 | r = r600_init_microcode(rdev); | ||
5406 | if (r) { | ||
5407 | DRM_ERROR("Failed to load firmware!\n"); | ||
5408 | return r; | ||
5409 | } | ||
5410 | } | ||
5411 | } | ||
5412 | |||
5413 | /* Initialize power management */ | ||
5414 | radeon_pm_init(rdev); | ||
5415 | |||
5360 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 5416 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
5361 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 5417 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
5362 | 5418 | ||
@@ -5409,6 +5465,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
5409 | 5465 | ||
5410 | void evergreen_fini(struct radeon_device *rdev) | 5466 | void evergreen_fini(struct radeon_device *rdev) |
5411 | { | 5467 | { |
5468 | radeon_pm_fini(rdev); | ||
5412 | r600_audio_fini(rdev); | 5469 | r600_audio_fini(rdev); |
5413 | r700_cp_fini(rdev); | 5470 | r700_cp_fini(rdev); |
5414 | r600_dma_fini(rdev); | 5471 | r600_dma_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index eb8ac315f92f..c7cac07f139b 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) | |||
967 | if (track->cb_dirty) { | 967 | if (track->cb_dirty) { |
968 | tmp = track->cb_target_mask; | 968 | tmp = track->cb_target_mask; |
969 | for (i = 0; i < 8; i++) { | 969 | for (i = 0; i < 8; i++) { |
970 | if ((tmp >> (i * 4)) & 0xF) { | 970 | u32 format = G_028C70_FORMAT(track->cb_color_info[i]); |
971 | |||
972 | if (format != V_028C70_COLOR_INVALID && | ||
973 | (tmp >> (i * 4)) & 0xF) { | ||
971 | /* at least one component is enabled */ | 974 | /* at least one component is enabled */ |
972 | if (track->cb_color_bo[i] == NULL) { | 975 | if (track->cb_color_bo[i] == NULL) { |
973 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", | 976 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 8a4e641f0e3c..a0f63ff5a5e9 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #define EVERGREEN_PIF_PHY0_DATA 0xc | 33 | #define EVERGREEN_PIF_PHY0_DATA 0xc |
34 | #define EVERGREEN_PIF_PHY1_INDEX 0x10 | 34 | #define EVERGREEN_PIF_PHY1_INDEX 0x10 |
35 | #define EVERGREEN_PIF_PHY1_DATA 0x14 | 35 | #define EVERGREEN_PIF_PHY1_DATA 0x14 |
36 | #define EVERGREEN_MM_INDEX_HI 0x18 | ||
36 | 37 | ||
37 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 | 38 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 |
38 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 | 39 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 17f990798992..f9c7963b3ee6 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -82,12 +82,16 @@ | |||
82 | #define CG_SPLL_FUNC_CNTL_2 0x604 | 82 | #define CG_SPLL_FUNC_CNTL_2 0x604 |
83 | #define SCLK_MUX_SEL(x) ((x) << 0) | 83 | #define SCLK_MUX_SEL(x) ((x) << 0) |
84 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) | 84 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) |
85 | #define SCLK_MUX_UPDATE (1 << 26) | ||
85 | #define CG_SPLL_FUNC_CNTL_3 0x608 | 86 | #define CG_SPLL_FUNC_CNTL_3 0x608 |
86 | #define SPLL_FB_DIV(x) ((x) << 0) | 87 | #define SPLL_FB_DIV(x) ((x) << 0) |
87 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) | 88 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) |
88 | #define SPLL_DITHEN (1 << 28) | 89 | #define SPLL_DITHEN (1 << 28) |
90 | #define CG_SPLL_STATUS 0x60c | ||
91 | #define SPLL_CHG_STATUS (1 << 1) | ||
89 | 92 | ||
90 | #define MPLL_CNTL_MODE 0x61c | 93 | #define MPLL_CNTL_MODE 0x61c |
94 | # define MPLL_MCLK_SEL (1 << 11) | ||
91 | # define SS_SSEN (1 << 24) | 95 | # define SS_SSEN (1 << 24) |
92 | # define SS_DSMODE_EN (1 << 25) | 96 | # define SS_DSMODE_EN (1 << 25) |
93 | 97 | ||
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b41905573cd2..b6e01d5d2cce 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -1126,11 +1126,6 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
1126 | struct kv_power_info *pi = kv_get_pi(rdev); | 1126 | struct kv_power_info *pi = kv_get_pi(rdev); |
1127 | int ret; | 1127 | int ret; |
1128 | 1128 | ||
1129 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
1130 | RADEON_CG_BLOCK_SDMA | | ||
1131 | RADEON_CG_BLOCK_BIF | | ||
1132 | RADEON_CG_BLOCK_HDP), false); | ||
1133 | |||
1134 | ret = kv_process_firmware_header(rdev); | 1129 | ret = kv_process_firmware_header(rdev); |
1135 | if (ret) { | 1130 | if (ret) { |
1136 | DRM_ERROR("kv_process_firmware_header failed\n"); | 1131 | DRM_ERROR("kv_process_firmware_header failed\n"); |
@@ -1215,6 +1210,21 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
1215 | 1210 | ||
1216 | kv_reset_acp_boot_level(rdev); | 1211 | kv_reset_acp_boot_level(rdev); |
1217 | 1212 | ||
1213 | ret = kv_smc_bapm_enable(rdev, false); | ||
1214 | if (ret) { | ||
1215 | DRM_ERROR("kv_smc_bapm_enable failed\n"); | ||
1216 | return ret; | ||
1217 | } | ||
1218 | |||
1219 | kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1220 | |||
1221 | return ret; | ||
1222 | } | ||
1223 | |||
1224 | int kv_dpm_late_enable(struct radeon_device *rdev) | ||
1225 | { | ||
1226 | int ret; | ||
1227 | |||
1218 | if (rdev->irq.installed && | 1228 | if (rdev->irq.installed && |
1219 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
1220 | ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 1230 | ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
@@ -1226,35 +1236,17 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
1226 | radeon_irq_set(rdev); | 1236 | radeon_irq_set(rdev); |
1227 | } | 1237 | } |
1228 | 1238 | ||
1229 | ret = kv_smc_bapm_enable(rdev, false); | ||
1230 | if (ret) { | ||
1231 | DRM_ERROR("kv_smc_bapm_enable failed\n"); | ||
1232 | return ret; | ||
1233 | } | ||
1234 | |||
1235 | /* powerdown unused blocks for now */ | 1239 | /* powerdown unused blocks for now */ |
1236 | kv_dpm_powergate_acp(rdev, true); | 1240 | kv_dpm_powergate_acp(rdev, true); |
1237 | kv_dpm_powergate_samu(rdev, true); | 1241 | kv_dpm_powergate_samu(rdev, true); |
1238 | kv_dpm_powergate_vce(rdev, true); | 1242 | kv_dpm_powergate_vce(rdev, true); |
1239 | kv_dpm_powergate_uvd(rdev, true); | 1243 | kv_dpm_powergate_uvd(rdev, true); |
1240 | 1244 | ||
1241 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
1242 | RADEON_CG_BLOCK_SDMA | | ||
1243 | RADEON_CG_BLOCK_BIF | | ||
1244 | RADEON_CG_BLOCK_HDP), true); | ||
1245 | |||
1246 | kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1247 | |||
1248 | return ret; | 1245 | return ret; |
1249 | } | 1246 | } |
1250 | 1247 | ||
1251 | void kv_dpm_disable(struct radeon_device *rdev) | 1248 | void kv_dpm_disable(struct radeon_device *rdev) |
1252 | { | 1249 | { |
1253 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
1254 | RADEON_CG_BLOCK_SDMA | | ||
1255 | RADEON_CG_BLOCK_BIF | | ||
1256 | RADEON_CG_BLOCK_HDP), false); | ||
1257 | |||
1258 | kv_smc_bapm_enable(rdev, false); | 1250 | kv_smc_bapm_enable(rdev, false); |
1259 | 1251 | ||
1260 | /* powerup blocks */ | 1252 | /* powerup blocks */ |
@@ -1779,11 +1771,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1779 | /*struct radeon_ps *old_ps = &pi->current_rps;*/ | 1771 | /*struct radeon_ps *old_ps = &pi->current_rps;*/ |
1780 | int ret; | 1772 | int ret; |
1781 | 1773 | ||
1782 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
1783 | RADEON_CG_BLOCK_SDMA | | ||
1784 | RADEON_CG_BLOCK_BIF | | ||
1785 | RADEON_CG_BLOCK_HDP), false); | ||
1786 | |||
1787 | if (pi->bapm_enable) { | 1774 | if (pi->bapm_enable) { |
1788 | ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); | 1775 | ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); |
1789 | if (ret) { | 1776 | if (ret) { |
@@ -1849,11 +1836,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1849 | } | 1836 | } |
1850 | } | 1837 | } |
1851 | 1838 | ||
1852 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
1853 | RADEON_CG_BLOCK_SDMA | | ||
1854 | RADEON_CG_BLOCK_BIF | | ||
1855 | RADEON_CG_BLOCK_HDP), true); | ||
1856 | |||
1857 | return 0; | 1839 | return 0; |
1858 | } | 1840 | } |
1859 | 1841 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 11aab2ab54ce..9f11a55962b5 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -174,6 +174,7 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | |||
174 | extern void evergreen_program_aspm(struct radeon_device *rdev); | 174 | extern void evergreen_program_aspm(struct radeon_device *rdev); |
175 | extern void sumo_rlc_fini(struct radeon_device *rdev); | 175 | extern void sumo_rlc_fini(struct radeon_device *rdev); |
176 | extern int sumo_rlc_init(struct radeon_device *rdev); | 176 | extern int sumo_rlc_init(struct radeon_device *rdev); |
177 | extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev); | ||
177 | 178 | ||
178 | /* Firmware Names */ | 179 | /* Firmware Names */ |
179 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); | 180 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); |
@@ -1386,6 +1387,55 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable) | |||
1386 | } | 1387 | } |
1387 | } | 1388 | } |
1388 | 1389 | ||
1390 | u32 cayman_gfx_get_rptr(struct radeon_device *rdev, | ||
1391 | struct radeon_ring *ring) | ||
1392 | { | ||
1393 | u32 rptr; | ||
1394 | |||
1395 | if (rdev->wb.enabled) | ||
1396 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
1397 | else { | ||
1398 | if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) | ||
1399 | rptr = RREG32(CP_RB0_RPTR); | ||
1400 | else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) | ||
1401 | rptr = RREG32(CP_RB1_RPTR); | ||
1402 | else | ||
1403 | rptr = RREG32(CP_RB2_RPTR); | ||
1404 | } | ||
1405 | |||
1406 | return rptr; | ||
1407 | } | ||
1408 | |||
1409 | u32 cayman_gfx_get_wptr(struct radeon_device *rdev, | ||
1410 | struct radeon_ring *ring) | ||
1411 | { | ||
1412 | u32 wptr; | ||
1413 | |||
1414 | if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) | ||
1415 | wptr = RREG32(CP_RB0_WPTR); | ||
1416 | else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) | ||
1417 | wptr = RREG32(CP_RB1_WPTR); | ||
1418 | else | ||
1419 | wptr = RREG32(CP_RB2_WPTR); | ||
1420 | |||
1421 | return wptr; | ||
1422 | } | ||
1423 | |||
1424 | void cayman_gfx_set_wptr(struct radeon_device *rdev, | ||
1425 | struct radeon_ring *ring) | ||
1426 | { | ||
1427 | if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) { | ||
1428 | WREG32(CP_RB0_WPTR, ring->wptr); | ||
1429 | (void)RREG32(CP_RB0_WPTR); | ||
1430 | } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) { | ||
1431 | WREG32(CP_RB1_WPTR, ring->wptr); | ||
1432 | (void)RREG32(CP_RB1_WPTR); | ||
1433 | } else { | ||
1434 | WREG32(CP_RB2_WPTR, ring->wptr); | ||
1435 | (void)RREG32(CP_RB2_WPTR); | ||
1436 | } | ||
1437 | } | ||
1438 | |||
1389 | static int cayman_cp_load_microcode(struct radeon_device *rdev) | 1439 | static int cayman_cp_load_microcode(struct radeon_device *rdev) |
1390 | { | 1440 | { |
1391 | const __be32 *fw_data; | 1441 | const __be32 *fw_data; |
@@ -1514,6 +1564,16 @@ static int cayman_cp_resume(struct radeon_device *rdev) | |||
1514 | CP_RB1_BASE, | 1564 | CP_RB1_BASE, |
1515 | CP_RB2_BASE | 1565 | CP_RB2_BASE |
1516 | }; | 1566 | }; |
1567 | static const unsigned cp_rb_rptr[] = { | ||
1568 | CP_RB0_RPTR, | ||
1569 | CP_RB1_RPTR, | ||
1570 | CP_RB2_RPTR | ||
1571 | }; | ||
1572 | static const unsigned cp_rb_wptr[] = { | ||
1573 | CP_RB0_WPTR, | ||
1574 | CP_RB1_WPTR, | ||
1575 | CP_RB2_WPTR | ||
1576 | }; | ||
1517 | struct radeon_ring *ring; | 1577 | struct radeon_ring *ring; |
1518 | int i, r; | 1578 | int i, r; |
1519 | 1579 | ||
@@ -1572,8 +1632,8 @@ static int cayman_cp_resume(struct radeon_device *rdev) | |||
1572 | WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); | 1632 | WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); |
1573 | 1633 | ||
1574 | ring->rptr = ring->wptr = 0; | 1634 | ring->rptr = ring->wptr = 0; |
1575 | WREG32(ring->rptr_reg, ring->rptr); | 1635 | WREG32(cp_rb_rptr[i], ring->rptr); |
1576 | WREG32(ring->wptr_reg, ring->wptr); | 1636 | WREG32(cp_rb_wptr[i], ring->wptr); |
1577 | 1637 | ||
1578 | mdelay(1); | 1638 | mdelay(1); |
1579 | WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); | 1639 | WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); |
@@ -1819,8 +1879,10 @@ int cayman_asic_reset(struct radeon_device *rdev) | |||
1819 | 1879 | ||
1820 | reset_mask = cayman_gpu_check_soft_reset(rdev); | 1880 | reset_mask = cayman_gpu_check_soft_reset(rdev); |
1821 | 1881 | ||
1822 | if (!reset_mask) | 1882 | if (reset_mask) |
1823 | r600_set_bios_scratch_engine_hung(rdev, false); | 1883 | evergreen_gpu_pci_config_reset(rdev); |
1884 | |||
1885 | r600_set_bios_scratch_engine_hung(rdev, false); | ||
1824 | 1886 | ||
1825 | return 0; | 1887 | return 0; |
1826 | } | 1888 | } |
@@ -1866,23 +1928,7 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1866 | 1928 | ||
1867 | evergreen_mc_program(rdev); | 1929 | evergreen_mc_program(rdev); |
1868 | 1930 | ||
1869 | if (rdev->flags & RADEON_IS_IGP) { | 1931 | if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) { |
1870 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
1871 | r = ni_init_microcode(rdev); | ||
1872 | if (r) { | ||
1873 | DRM_ERROR("Failed to load firmware!\n"); | ||
1874 | return r; | ||
1875 | } | ||
1876 | } | ||
1877 | } else { | ||
1878 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
1879 | r = ni_init_microcode(rdev); | ||
1880 | if (r) { | ||
1881 | DRM_ERROR("Failed to load firmware!\n"); | ||
1882 | return r; | ||
1883 | } | ||
1884 | } | ||
1885 | |||
1886 | r = ni_mc_load_microcode(rdev); | 1932 | r = ni_mc_load_microcode(rdev); |
1887 | if (r) { | 1933 | if (r) { |
1888 | DRM_ERROR("Failed to load MC firmware!\n"); | 1934 | DRM_ERROR("Failed to load MC firmware!\n"); |
@@ -1969,23 +2015,18 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1969 | evergreen_irq_set(rdev); | 2015 | evergreen_irq_set(rdev); |
1970 | 2016 | ||
1971 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 2017 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1972 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
1973 | RADEON_CP_PACKET2); | 2018 | RADEON_CP_PACKET2); |
1974 | if (r) | 2019 | if (r) |
1975 | return r; | 2020 | return r; |
1976 | 2021 | ||
1977 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 2022 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
1978 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 2023 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
1979 | DMA_RB_RPTR + DMA0_REGISTER_OFFSET, | ||
1980 | DMA_RB_WPTR + DMA0_REGISTER_OFFSET, | ||
1981 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | 2024 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
1982 | if (r) | 2025 | if (r) |
1983 | return r; | 2026 | return r; |
1984 | 2027 | ||
1985 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | 2028 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
1986 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | 2029 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
1987 | DMA_RB_RPTR + DMA1_REGISTER_OFFSET, | ||
1988 | DMA_RB_WPTR + DMA1_REGISTER_OFFSET, | ||
1989 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | 2030 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
1990 | if (r) | 2031 | if (r) |
1991 | return r; | 2032 | return r; |
@@ -2004,7 +2045,6 @@ static int cayman_startup(struct radeon_device *rdev) | |||
2004 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 2045 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
2005 | if (ring->ring_size) { | 2046 | if (ring->ring_size) { |
2006 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 2047 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
2007 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
2008 | RADEON_CP_PACKET2); | 2048 | RADEON_CP_PACKET2); |
2009 | if (!r) | 2049 | if (!r) |
2010 | r = uvd_v1_0_init(rdev); | 2050 | r = uvd_v1_0_init(rdev); |
@@ -2051,6 +2091,8 @@ int cayman_resume(struct radeon_device *rdev) | |||
2051 | /* init golden registers */ | 2091 | /* init golden registers */ |
2052 | ni_init_golden_registers(rdev); | 2092 | ni_init_golden_registers(rdev); |
2053 | 2093 | ||
2094 | radeon_pm_resume(rdev); | ||
2095 | |||
2054 | rdev->accel_working = true; | 2096 | rdev->accel_working = true; |
2055 | r = cayman_startup(rdev); | 2097 | r = cayman_startup(rdev); |
2056 | if (r) { | 2098 | if (r) { |
@@ -2063,6 +2105,7 @@ int cayman_resume(struct radeon_device *rdev) | |||
2063 | 2105 | ||
2064 | int cayman_suspend(struct radeon_device *rdev) | 2106 | int cayman_suspend(struct radeon_device *rdev) |
2065 | { | 2107 | { |
2108 | radeon_pm_suspend(rdev); | ||
2066 | if (ASIC_IS_DCE6(rdev)) | 2109 | if (ASIC_IS_DCE6(rdev)) |
2067 | dce6_audio_fini(rdev); | 2110 | dce6_audio_fini(rdev); |
2068 | else | 2111 | else |
@@ -2133,6 +2176,27 @@ int cayman_init(struct radeon_device *rdev) | |||
2133 | if (r) | 2176 | if (r) |
2134 | return r; | 2177 | return r; |
2135 | 2178 | ||
2179 | if (rdev->flags & RADEON_IS_IGP) { | ||
2180 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
2181 | r = ni_init_microcode(rdev); | ||
2182 | if (r) { | ||
2183 | DRM_ERROR("Failed to load firmware!\n"); | ||
2184 | return r; | ||
2185 | } | ||
2186 | } | ||
2187 | } else { | ||
2188 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
2189 | r = ni_init_microcode(rdev); | ||
2190 | if (r) { | ||
2191 | DRM_ERROR("Failed to load firmware!\n"); | ||
2192 | return r; | ||
2193 | } | ||
2194 | } | ||
2195 | } | ||
2196 | |||
2197 | /* Initialize power management */ | ||
2198 | radeon_pm_init(rdev); | ||
2199 | |||
2136 | ring->ring_obj = NULL; | 2200 | ring->ring_obj = NULL; |
2137 | r600_ring_init(rdev, ring, 1024 * 1024); | 2201 | r600_ring_init(rdev, ring, 1024 * 1024); |
2138 | 2202 | ||
@@ -2192,6 +2256,7 @@ int cayman_init(struct radeon_device *rdev) | |||
2192 | 2256 | ||
2193 | void cayman_fini(struct radeon_device *rdev) | 2257 | void cayman_fini(struct radeon_device *rdev) |
2194 | { | 2258 | { |
2259 | radeon_pm_fini(rdev); | ||
2195 | cayman_cp_fini(rdev); | 2260 | cayman_cp_fini(rdev); |
2196 | cayman_dma_fini(rdev); | 2261 | cayman_dma_fini(rdev); |
2197 | r600_irq_fini(rdev); | 2262 | r600_irq_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index bdeb65ed3658..51424ab79432 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c | |||
@@ -43,6 +43,75 @@ u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev); | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | /** | 45 | /** |
46 | * cayman_dma_get_rptr - get the current read pointer | ||
47 | * | ||
48 | * @rdev: radeon_device pointer | ||
49 | * @ring: radeon ring pointer | ||
50 | * | ||
51 | * Get the current rptr from the hardware (cayman+). | ||
52 | */ | ||
53 | uint32_t cayman_dma_get_rptr(struct radeon_device *rdev, | ||
54 | struct radeon_ring *ring) | ||
55 | { | ||
56 | u32 rptr, reg; | ||
57 | |||
58 | if (rdev->wb.enabled) { | ||
59 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
60 | } else { | ||
61 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
62 | reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET; | ||
63 | else | ||
64 | reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET; | ||
65 | |||
66 | rptr = RREG32(reg); | ||
67 | } | ||
68 | |||
69 | return (rptr & 0x3fffc) >> 2; | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * cayman_dma_get_wptr - get the current write pointer | ||
74 | * | ||
75 | * @rdev: radeon_device pointer | ||
76 | * @ring: radeon ring pointer | ||
77 | * | ||
78 | * Get the current wptr from the hardware (cayman+). | ||
79 | */ | ||
80 | uint32_t cayman_dma_get_wptr(struct radeon_device *rdev, | ||
81 | struct radeon_ring *ring) | ||
82 | { | ||
83 | u32 reg; | ||
84 | |||
85 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
86 | reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET; | ||
87 | else | ||
88 | reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET; | ||
89 | |||
90 | return (RREG32(reg) & 0x3fffc) >> 2; | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * cayman_dma_set_wptr - commit the write pointer | ||
95 | * | ||
96 | * @rdev: radeon_device pointer | ||
97 | * @ring: radeon ring pointer | ||
98 | * | ||
99 | * Write the wptr back to the hardware (cayman+). | ||
100 | */ | ||
101 | void cayman_dma_set_wptr(struct radeon_device *rdev, | ||
102 | struct radeon_ring *ring) | ||
103 | { | ||
104 | u32 reg; | ||
105 | |||
106 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
107 | reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET; | ||
108 | else | ||
109 | reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET; | ||
110 | |||
111 | WREG32(reg, (ring->wptr << 2) & 0x3fffc); | ||
112 | } | ||
113 | |||
114 | /** | ||
46 | * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine | 115 | * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine |
47 | * | 116 | * |
48 | * @rdev: radeon_device pointer | 117 | * @rdev: radeon_device pointer |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 49c4d48f54d6..c351226ecb31 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -720,6 +720,8 @@ static const u32 cayman_sysls_enable[] = | |||
720 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); | 720 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); |
721 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); | 721 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); |
722 | 722 | ||
723 | extern int ni_mc_load_microcode(struct radeon_device *rdev); | ||
724 | |||
723 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev) | 725 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev) |
724 | { | 726 | { |
725 | struct ni_power_info *pi = rdev->pm.dpm.priv; | 727 | struct ni_power_info *pi = rdev->pm.dpm.priv; |
@@ -3565,7 +3567,11 @@ void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, | |||
3565 | void ni_dpm_setup_asic(struct radeon_device *rdev) | 3567 | void ni_dpm_setup_asic(struct radeon_device *rdev) |
3566 | { | 3568 | { |
3567 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 3569 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
3570 | int r; | ||
3568 | 3571 | ||
3572 | r = ni_mc_load_microcode(rdev); | ||
3573 | if (r) | ||
3574 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
3569 | ni_read_clock_registers(rdev); | 3575 | ni_read_clock_registers(rdev); |
3570 | btc_read_arb_registers(rdev); | 3576 | btc_read_arb_registers(rdev); |
3571 | rv770_get_memory_type(rdev); | 3577 | rv770_get_memory_type(rdev); |
@@ -3710,21 +3716,6 @@ int ni_dpm_enable(struct radeon_device *rdev) | |||
3710 | if (eg_pi->ls_clock_gating) | 3716 | if (eg_pi->ls_clock_gating) |
3711 | ni_ls_clockgating_enable(rdev, true); | 3717 | ni_ls_clockgating_enable(rdev, true); |
3712 | 3718 | ||
3713 | if (rdev->irq.installed && | ||
3714 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
3715 | PPSMC_Result result; | ||
3716 | |||
3717 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000); | ||
3718 | if (ret) | ||
3719 | return ret; | ||
3720 | rdev->irq.dpm_thermal = true; | ||
3721 | radeon_irq_set(rdev); | ||
3722 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | ||
3723 | |||
3724 | if (result != PPSMC_Result_OK) | ||
3725 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | ||
3726 | } | ||
3727 | |||
3728 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 3719 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
3729 | 3720 | ||
3730 | ni_update_current_ps(rdev, boot_ps); | 3721 | ni_update_current_ps(rdev, boot_ps); |
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h index da43ab328833..2d532996c697 100644 --- a/drivers/gpu/drm/radeon/pptable.h +++ b/drivers/gpu/drm/radeon/pptable.h | |||
@@ -23,7 +23,7 @@ | |||
23 | #ifndef _PPTABLE_H | 23 | #ifndef _PPTABLE_H |
24 | #define _PPTABLE_H | 24 | #define _PPTABLE_H |
25 | 25 | ||
26 | #pragma pack(push, 1) | 26 | #pragma pack(1) |
27 | 27 | ||
28 | typedef struct _ATOM_PPLIB_THERMALCONTROLLER | 28 | typedef struct _ATOM_PPLIB_THERMALCONTROLLER |
29 | 29 | ||
@@ -677,6 +677,6 @@ typedef struct _ATOM_PPLIB_PPM_Table | |||
677 | ULONG ulTjmax; | 677 | ULONG ulTjmax; |
678 | } ATOM_PPLIB_PPM_Table; | 678 | } ATOM_PPLIB_PPM_Table; |
679 | 679 | ||
680 | #pragma pack(pop) | 680 | #pragma pack() |
681 | 681 | ||
682 | #endif | 682 | #endif |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 10abc4d5a6cc..ef024ce3f7cc 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1050,6 +1050,36 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) | |||
1050 | return err; | 1050 | return err; |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | u32 r100_gfx_get_rptr(struct radeon_device *rdev, | ||
1054 | struct radeon_ring *ring) | ||
1055 | { | ||
1056 | u32 rptr; | ||
1057 | |||
1058 | if (rdev->wb.enabled) | ||
1059 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | ||
1060 | else | ||
1061 | rptr = RREG32(RADEON_CP_RB_RPTR); | ||
1062 | |||
1063 | return rptr; | ||
1064 | } | ||
1065 | |||
1066 | u32 r100_gfx_get_wptr(struct radeon_device *rdev, | ||
1067 | struct radeon_ring *ring) | ||
1068 | { | ||
1069 | u32 wptr; | ||
1070 | |||
1071 | wptr = RREG32(RADEON_CP_RB_WPTR); | ||
1072 | |||
1073 | return wptr; | ||
1074 | } | ||
1075 | |||
1076 | void r100_gfx_set_wptr(struct radeon_device *rdev, | ||
1077 | struct radeon_ring *ring) | ||
1078 | { | ||
1079 | WREG32(RADEON_CP_RB_WPTR, ring->wptr); | ||
1080 | (void)RREG32(RADEON_CP_RB_WPTR); | ||
1081 | } | ||
1082 | |||
1053 | static void r100_cp_load_microcode(struct radeon_device *rdev) | 1083 | static void r100_cp_load_microcode(struct radeon_device *rdev) |
1054 | { | 1084 | { |
1055 | const __be32 *fw_data; | 1085 | const __be32 *fw_data; |
@@ -1102,7 +1132,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1102 | ring_size = (1 << (rb_bufsz + 1)) * 4; | 1132 | ring_size = (1 << (rb_bufsz + 1)) * 4; |
1103 | r100_cp_load_microcode(rdev); | 1133 | r100_cp_load_microcode(rdev); |
1104 | r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, | 1134 | r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1105 | RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, | ||
1106 | RADEON_CP_PACKET2); | 1135 | RADEON_CP_PACKET2); |
1107 | if (r) { | 1136 | if (r) { |
1108 | return r; | 1137 | return r; |
@@ -3913,6 +3942,8 @@ int r100_resume(struct radeon_device *rdev) | |||
3913 | /* Initialize surface registers */ | 3942 | /* Initialize surface registers */ |
3914 | radeon_surface_init(rdev); | 3943 | radeon_surface_init(rdev); |
3915 | 3944 | ||
3945 | radeon_pm_resume(rdev); | ||
3946 | |||
3916 | rdev->accel_working = true; | 3947 | rdev->accel_working = true; |
3917 | r = r100_startup(rdev); | 3948 | r = r100_startup(rdev); |
3918 | if (r) { | 3949 | if (r) { |
@@ -3923,6 +3954,7 @@ int r100_resume(struct radeon_device *rdev) | |||
3923 | 3954 | ||
3924 | int r100_suspend(struct radeon_device *rdev) | 3955 | int r100_suspend(struct radeon_device *rdev) |
3925 | { | 3956 | { |
3957 | radeon_pm_suspend(rdev); | ||
3926 | r100_cp_disable(rdev); | 3958 | r100_cp_disable(rdev); |
3927 | radeon_wb_disable(rdev); | 3959 | radeon_wb_disable(rdev); |
3928 | r100_irq_disable(rdev); | 3960 | r100_irq_disable(rdev); |
@@ -3933,6 +3965,7 @@ int r100_suspend(struct radeon_device *rdev) | |||
3933 | 3965 | ||
3934 | void r100_fini(struct radeon_device *rdev) | 3966 | void r100_fini(struct radeon_device *rdev) |
3935 | { | 3967 | { |
3968 | radeon_pm_fini(rdev); | ||
3936 | r100_cp_fini(rdev); | 3969 | r100_cp_fini(rdev); |
3937 | radeon_wb_fini(rdev); | 3970 | radeon_wb_fini(rdev); |
3938 | radeon_ib_pool_fini(rdev); | 3971 | radeon_ib_pool_fini(rdev); |
@@ -4039,6 +4072,9 @@ int r100_init(struct radeon_device *rdev) | |||
4039 | } | 4072 | } |
4040 | r100_set_safe_registers(rdev); | 4073 | r100_set_safe_registers(rdev); |
4041 | 4074 | ||
4075 | /* Initialize power management */ | ||
4076 | radeon_pm_init(rdev); | ||
4077 | |||
4042 | rdev->accel_working = true; | 4078 | rdev->accel_working = true; |
4043 | r = r100_startup(rdev); | 4079 | r = r100_startup(rdev); |
4044 | if (r) { | 4080 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index d8dd269b9159..7c63ef840e86 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -1430,6 +1430,8 @@ int r300_resume(struct radeon_device *rdev) | |||
1430 | /* Initialize surface registers */ | 1430 | /* Initialize surface registers */ |
1431 | radeon_surface_init(rdev); | 1431 | radeon_surface_init(rdev); |
1432 | 1432 | ||
1433 | radeon_pm_resume(rdev); | ||
1434 | |||
1433 | rdev->accel_working = true; | 1435 | rdev->accel_working = true; |
1434 | r = r300_startup(rdev); | 1436 | r = r300_startup(rdev); |
1435 | if (r) { | 1437 | if (r) { |
@@ -1440,6 +1442,7 @@ int r300_resume(struct radeon_device *rdev) | |||
1440 | 1442 | ||
1441 | int r300_suspend(struct radeon_device *rdev) | 1443 | int r300_suspend(struct radeon_device *rdev) |
1442 | { | 1444 | { |
1445 | radeon_pm_suspend(rdev); | ||
1443 | r100_cp_disable(rdev); | 1446 | r100_cp_disable(rdev); |
1444 | radeon_wb_disable(rdev); | 1447 | radeon_wb_disable(rdev); |
1445 | r100_irq_disable(rdev); | 1448 | r100_irq_disable(rdev); |
@@ -1452,6 +1455,7 @@ int r300_suspend(struct radeon_device *rdev) | |||
1452 | 1455 | ||
1453 | void r300_fini(struct radeon_device *rdev) | 1456 | void r300_fini(struct radeon_device *rdev) |
1454 | { | 1457 | { |
1458 | radeon_pm_fini(rdev); | ||
1455 | r100_cp_fini(rdev); | 1459 | r100_cp_fini(rdev); |
1456 | radeon_wb_fini(rdev); | 1460 | radeon_wb_fini(rdev); |
1457 | radeon_ib_pool_fini(rdev); | 1461 | radeon_ib_pool_fini(rdev); |
@@ -1538,6 +1542,9 @@ int r300_init(struct radeon_device *rdev) | |||
1538 | } | 1542 | } |
1539 | r300_set_reg_safe(rdev); | 1543 | r300_set_reg_safe(rdev); |
1540 | 1544 | ||
1545 | /* Initialize power management */ | ||
1546 | radeon_pm_init(rdev); | ||
1547 | |||
1541 | rdev->accel_working = true; | 1548 | rdev->accel_working = true; |
1542 | r = r300_startup(rdev); | 1549 | r = r300_startup(rdev); |
1543 | if (r) { | 1550 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 6edf2b3a52b4..3768aab2710b 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -325,6 +325,8 @@ int r420_resume(struct radeon_device *rdev) | |||
325 | /* Initialize surface registers */ | 325 | /* Initialize surface registers */ |
326 | radeon_surface_init(rdev); | 326 | radeon_surface_init(rdev); |
327 | 327 | ||
328 | radeon_pm_resume(rdev); | ||
329 | |||
328 | rdev->accel_working = true; | 330 | rdev->accel_working = true; |
329 | r = r420_startup(rdev); | 331 | r = r420_startup(rdev); |
330 | if (r) { | 332 | if (r) { |
@@ -335,6 +337,7 @@ int r420_resume(struct radeon_device *rdev) | |||
335 | 337 | ||
336 | int r420_suspend(struct radeon_device *rdev) | 338 | int r420_suspend(struct radeon_device *rdev) |
337 | { | 339 | { |
340 | radeon_pm_suspend(rdev); | ||
338 | r420_cp_errata_fini(rdev); | 341 | r420_cp_errata_fini(rdev); |
339 | r100_cp_disable(rdev); | 342 | r100_cp_disable(rdev); |
340 | radeon_wb_disable(rdev); | 343 | radeon_wb_disable(rdev); |
@@ -348,6 +351,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
348 | 351 | ||
349 | void r420_fini(struct radeon_device *rdev) | 352 | void r420_fini(struct radeon_device *rdev) |
350 | { | 353 | { |
354 | radeon_pm_fini(rdev); | ||
351 | r100_cp_fini(rdev); | 355 | r100_cp_fini(rdev); |
352 | radeon_wb_fini(rdev); | 356 | radeon_wb_fini(rdev); |
353 | radeon_ib_pool_fini(rdev); | 357 | radeon_ib_pool_fini(rdev); |
@@ -444,6 +448,9 @@ int r420_init(struct radeon_device *rdev) | |||
444 | } | 448 | } |
445 | r420_set_reg_safe(rdev); | 449 | r420_set_reg_safe(rdev); |
446 | 450 | ||
451 | /* Initialize power management */ | ||
452 | radeon_pm_init(rdev); | ||
453 | |||
447 | rdev->accel_working = true; | 454 | rdev->accel_working = true; |
448 | r = r420_startup(rdev); | 455 | r = r420_startup(rdev); |
449 | if (r) { | 456 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e1aece73b370..e209eb75024f 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -240,6 +240,8 @@ int r520_resume(struct radeon_device *rdev) | |||
240 | /* Initialize surface registers */ | 240 | /* Initialize surface registers */ |
241 | radeon_surface_init(rdev); | 241 | radeon_surface_init(rdev); |
242 | 242 | ||
243 | radeon_pm_resume(rdev); | ||
244 | |||
243 | rdev->accel_working = true; | 245 | rdev->accel_working = true; |
244 | r = r520_startup(rdev); | 246 | r = r520_startup(rdev); |
245 | if (r) { | 247 | if (r) { |
@@ -312,6 +314,9 @@ int r520_init(struct radeon_device *rdev) | |||
312 | return r; | 314 | return r; |
313 | rv515_set_safe_registers(rdev); | 315 | rv515_set_safe_registers(rdev); |
314 | 316 | ||
317 | /* Initialize power management */ | ||
318 | radeon_pm_init(rdev); | ||
319 | |||
315 | rdev->accel_working = true; | 320 | rdev->accel_working = true; |
316 | r = r520_startup(rdev); | 321 | r = r520_startup(rdev); |
317 | if (r) { | 322 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9ad06732a78b..ad99bae2e85c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -105,6 +105,7 @@ void r600_fini(struct radeon_device *rdev); | |||
105 | void r600_irq_disable(struct radeon_device *rdev); | 105 | void r600_irq_disable(struct radeon_device *rdev); |
106 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); | 106 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); |
107 | extern int evergreen_rlc_resume(struct radeon_device *rdev); | 107 | extern int evergreen_rlc_resume(struct radeon_device *rdev); |
108 | extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); | ||
108 | 109 | ||
109 | /** | 110 | /** |
110 | * r600_get_xclk - get the xclk | 111 | * r600_get_xclk - get the xclk |
@@ -1644,6 +1645,67 @@ static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
1644 | r600_print_gpu_status_regs(rdev); | 1645 | r600_print_gpu_status_regs(rdev); |
1645 | } | 1646 | } |
1646 | 1647 | ||
1648 | static void r600_gpu_pci_config_reset(struct radeon_device *rdev) | ||
1649 | { | ||
1650 | struct rv515_mc_save save; | ||
1651 | u32 tmp, i; | ||
1652 | |||
1653 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
1654 | |||
1655 | /* disable dpm? */ | ||
1656 | |||
1657 | /* Disable CP parsing/prefetching */ | ||
1658 | if (rdev->family >= CHIP_RV770) | ||
1659 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); | ||
1660 | else | ||
1661 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | ||
1662 | |||
1663 | /* disable the RLC */ | ||
1664 | WREG32(RLC_CNTL, 0); | ||
1665 | |||
1666 | /* Disable DMA */ | ||
1667 | tmp = RREG32(DMA_RB_CNTL); | ||
1668 | tmp &= ~DMA_RB_ENABLE; | ||
1669 | WREG32(DMA_RB_CNTL, tmp); | ||
1670 | |||
1671 | mdelay(50); | ||
1672 | |||
1673 | /* set mclk/sclk to bypass */ | ||
1674 | if (rdev->family >= CHIP_RV770) | ||
1675 | rv770_set_clk_bypass_mode(rdev); | ||
1676 | /* disable BM */ | ||
1677 | pci_clear_master(rdev->pdev); | ||
1678 | /* disable mem access */ | ||
1679 | rv515_mc_stop(rdev, &save); | ||
1680 | if (r600_mc_wait_for_idle(rdev)) { | ||
1681 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
1682 | } | ||
1683 | |||
1684 | /* BIF reset workaround. Not sure if this is needed on 6xx */ | ||
1685 | tmp = RREG32(BUS_CNTL); | ||
1686 | tmp |= VGA_COHE_SPEC_TIMER_DIS; | ||
1687 | WREG32(BUS_CNTL, tmp); | ||
1688 | |||
1689 | tmp = RREG32(BIF_SCRATCH0); | ||
1690 | |||
1691 | /* reset */ | ||
1692 | radeon_pci_config_reset(rdev); | ||
1693 | mdelay(1); | ||
1694 | |||
1695 | /* BIF reset workaround. Not sure if this is needed on 6xx */ | ||
1696 | tmp = SOFT_RESET_BIF; | ||
1697 | WREG32(SRBM_SOFT_RESET, tmp); | ||
1698 | mdelay(1); | ||
1699 | WREG32(SRBM_SOFT_RESET, 0); | ||
1700 | |||
1701 | /* wait for asic to come out of reset */ | ||
1702 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
1703 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
1704 | break; | ||
1705 | udelay(1); | ||
1706 | } | ||
1707 | } | ||
1708 | |||
1647 | int r600_asic_reset(struct radeon_device *rdev) | 1709 | int r600_asic_reset(struct radeon_device *rdev) |
1648 | { | 1710 | { |
1649 | u32 reset_mask; | 1711 | u32 reset_mask; |
@@ -1653,10 +1715,17 @@ int r600_asic_reset(struct radeon_device *rdev) | |||
1653 | if (reset_mask) | 1715 | if (reset_mask) |
1654 | r600_set_bios_scratch_engine_hung(rdev, true); | 1716 | r600_set_bios_scratch_engine_hung(rdev, true); |
1655 | 1717 | ||
1718 | /* try soft reset */ | ||
1656 | r600_gpu_soft_reset(rdev, reset_mask); | 1719 | r600_gpu_soft_reset(rdev, reset_mask); |
1657 | 1720 | ||
1658 | reset_mask = r600_gpu_check_soft_reset(rdev); | 1721 | reset_mask = r600_gpu_check_soft_reset(rdev); |
1659 | 1722 | ||
1723 | /* try pci config reset */ | ||
1724 | if (reset_mask && radeon_hard_reset) | ||
1725 | r600_gpu_pci_config_reset(rdev); | ||
1726 | |||
1727 | reset_mask = r600_gpu_check_soft_reset(rdev); | ||
1728 | |||
1660 | if (!reset_mask) | 1729 | if (!reset_mask) |
1661 | r600_set_bios_scratch_engine_hung(rdev, false); | 1730 | r600_set_bios_scratch_engine_hung(rdev, false); |
1662 | 1731 | ||
@@ -2382,6 +2451,36 @@ out: | |||
2382 | return err; | 2451 | return err; |
2383 | } | 2452 | } |
2384 | 2453 | ||
2454 | u32 r600_gfx_get_rptr(struct radeon_device *rdev, | ||
2455 | struct radeon_ring *ring) | ||
2456 | { | ||
2457 | u32 rptr; | ||
2458 | |||
2459 | if (rdev->wb.enabled) | ||
2460 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
2461 | else | ||
2462 | rptr = RREG32(R600_CP_RB_RPTR); | ||
2463 | |||
2464 | return rptr; | ||
2465 | } | ||
2466 | |||
2467 | u32 r600_gfx_get_wptr(struct radeon_device *rdev, | ||
2468 | struct radeon_ring *ring) | ||
2469 | { | ||
2470 | u32 wptr; | ||
2471 | |||
2472 | wptr = RREG32(R600_CP_RB_WPTR); | ||
2473 | |||
2474 | return wptr; | ||
2475 | } | ||
2476 | |||
2477 | void r600_gfx_set_wptr(struct radeon_device *rdev, | ||
2478 | struct radeon_ring *ring) | ||
2479 | { | ||
2480 | WREG32(R600_CP_RB_WPTR, ring->wptr); | ||
2481 | (void)RREG32(R600_CP_RB_WPTR); | ||
2482 | } | ||
2483 | |||
2385 | static int r600_cp_load_microcode(struct radeon_device *rdev) | 2484 | static int r600_cp_load_microcode(struct radeon_device *rdev) |
2386 | { | 2485 | { |
2387 | const __be32 *fw_data; | 2486 | const __be32 *fw_data; |
@@ -2775,14 +2874,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
2775 | 2874 | ||
2776 | r600_mc_program(rdev); | 2875 | r600_mc_program(rdev); |
2777 | 2876 | ||
2778 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
2779 | r = r600_init_microcode(rdev); | ||
2780 | if (r) { | ||
2781 | DRM_ERROR("Failed to load firmware!\n"); | ||
2782 | return r; | ||
2783 | } | ||
2784 | } | ||
2785 | |||
2786 | if (rdev->flags & RADEON_IS_AGP) { | 2877 | if (rdev->flags & RADEON_IS_AGP) { |
2787 | r600_agp_enable(rdev); | 2878 | r600_agp_enable(rdev); |
2788 | } else { | 2879 | } else { |
@@ -2826,14 +2917,12 @@ static int r600_startup(struct radeon_device *rdev) | |||
2826 | 2917 | ||
2827 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 2918 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
2828 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 2919 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
2829 | R600_CP_RB_RPTR, R600_CP_RB_WPTR, | ||
2830 | RADEON_CP_PACKET2); | 2920 | RADEON_CP_PACKET2); |
2831 | if (r) | 2921 | if (r) |
2832 | return r; | 2922 | return r; |
2833 | 2923 | ||
2834 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 2924 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
2835 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 2925 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
2836 | DMA_RB_RPTR, DMA_RB_WPTR, | ||
2837 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | 2926 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
2838 | if (r) | 2927 | if (r) |
2839 | return r; | 2928 | return r; |
@@ -2889,6 +2978,8 @@ int r600_resume(struct radeon_device *rdev) | |||
2889 | /* post card */ | 2978 | /* post card */ |
2890 | atom_asic_init(rdev->mode_info.atom_context); | 2979 | atom_asic_init(rdev->mode_info.atom_context); |
2891 | 2980 | ||
2981 | radeon_pm_resume(rdev); | ||
2982 | |||
2892 | rdev->accel_working = true; | 2983 | rdev->accel_working = true; |
2893 | r = r600_startup(rdev); | 2984 | r = r600_startup(rdev); |
2894 | if (r) { | 2985 | if (r) { |
@@ -2902,6 +2993,7 @@ int r600_resume(struct radeon_device *rdev) | |||
2902 | 2993 | ||
2903 | int r600_suspend(struct radeon_device *rdev) | 2994 | int r600_suspend(struct radeon_device *rdev) |
2904 | { | 2995 | { |
2996 | radeon_pm_suspend(rdev); | ||
2905 | r600_audio_fini(rdev); | 2997 | r600_audio_fini(rdev); |
2906 | r600_cp_stop(rdev); | 2998 | r600_cp_stop(rdev); |
2907 | r600_dma_stop(rdev); | 2999 | r600_dma_stop(rdev); |
@@ -2970,6 +3062,17 @@ int r600_init(struct radeon_device *rdev) | |||
2970 | if (r) | 3062 | if (r) |
2971 | return r; | 3063 | return r; |
2972 | 3064 | ||
3065 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
3066 | r = r600_init_microcode(rdev); | ||
3067 | if (r) { | ||
3068 | DRM_ERROR("Failed to load firmware!\n"); | ||
3069 | return r; | ||
3070 | } | ||
3071 | } | ||
3072 | |||
3073 | /* Initialize power management */ | ||
3074 | radeon_pm_init(rdev); | ||
3075 | |||
2973 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 3076 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
2974 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 3077 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
2975 | 3078 | ||
@@ -3002,6 +3105,7 @@ int r600_init(struct radeon_device *rdev) | |||
3002 | 3105 | ||
3003 | void r600_fini(struct radeon_device *rdev) | 3106 | void r600_fini(struct radeon_device *rdev) |
3004 | { | 3107 | { |
3108 | radeon_pm_fini(rdev); | ||
3005 | r600_audio_fini(rdev); | 3109 | r600_audio_fini(rdev); |
3006 | r600_cp_fini(rdev); | 3110 | r600_cp_fini(rdev); |
3007 | r600_dma_fini(rdev); | 3111 | r600_dma_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d824f7fed47d..7b399dc5fd54 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
749 | } | 749 | } |
750 | 750 | ||
751 | for (i = 0; i < 8; i++) { | 751 | for (i = 0; i < 8; i++) { |
752 | if ((tmp >> (i * 4)) & 0xF) { | 752 | u32 format = G_0280A0_FORMAT(track->cb_color_info[i]); |
753 | |||
754 | if (format != V_0280A0_COLOR_INVALID && | ||
755 | (tmp >> (i * 4)) & 0xF) { | ||
753 | /* at least one component is enabled */ | 756 | /* at least one component is enabled */ |
754 | if (track->cb_color_bo[i] == NULL) { | 757 | if (track->cb_color_bo[i] == NULL) { |
755 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", | 758 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", |
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 7844d15c139f..3452c8410bd7 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c | |||
@@ -51,7 +51,14 @@ u32 r600_gpu_check_soft_reset(struct radeon_device *rdev); | |||
51 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, | 51 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, |
52 | struct radeon_ring *ring) | 52 | struct radeon_ring *ring) |
53 | { | 53 | { |
54 | return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2; | 54 | u32 rptr; |
55 | |||
56 | if (rdev->wb.enabled) | ||
57 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
58 | else | ||
59 | rptr = RREG32(DMA_RB_RPTR); | ||
60 | |||
61 | return (rptr & 0x3fffc) >> 2; | ||
55 | } | 62 | } |
56 | 63 | ||
57 | /** | 64 | /** |
@@ -65,7 +72,7 @@ uint32_t r600_dma_get_rptr(struct radeon_device *rdev, | |||
65 | uint32_t r600_dma_get_wptr(struct radeon_device *rdev, | 72 | uint32_t r600_dma_get_wptr(struct radeon_device *rdev, |
66 | struct radeon_ring *ring) | 73 | struct radeon_ring *ring) |
67 | { | 74 | { |
68 | return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2; | 75 | return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2; |
69 | } | 76 | } |
70 | 77 | ||
71 | /** | 78 | /** |
@@ -79,7 +86,7 @@ uint32_t r600_dma_get_wptr(struct radeon_device *rdev, | |||
79 | void r600_dma_set_wptr(struct radeon_device *rdev, | 86 | void r600_dma_set_wptr(struct radeon_device *rdev, |
80 | struct radeon_ring *ring) | 87 | struct radeon_ring *ring) |
81 | { | 88 | { |
82 | WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc); | 89 | WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); |
83 | } | 90 | } |
84 | 91 | ||
85 | /** | 92 | /** |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 5513d8f06252..e4cc9b314ce9 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -729,8 +729,8 @@ bool r600_is_uvd_state(u32 class, u32 class2) | |||
729 | return false; | 729 | return false; |
730 | } | 730 | } |
731 | 731 | ||
732 | int r600_set_thermal_temperature_range(struct radeon_device *rdev, | 732 | static int r600_set_thermal_temperature_range(struct radeon_device *rdev, |
733 | int min_temp, int max_temp) | 733 | int min_temp, int max_temp) |
734 | { | 734 | { |
735 | int low_temp = 0 * 1000; | 735 | int low_temp = 0 * 1000; |
736 | int high_temp = 255 * 1000; | 736 | int high_temp = 255 * 1000; |
@@ -777,6 +777,22 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) | |||
777 | } | 777 | } |
778 | } | 778 | } |
779 | 779 | ||
780 | int r600_dpm_late_enable(struct radeon_device *rdev) | ||
781 | { | ||
782 | int ret; | ||
783 | |||
784 | if (rdev->irq.installed && | ||
785 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
786 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
787 | if (ret) | ||
788 | return ret; | ||
789 | rdev->irq.dpm_thermal = true; | ||
790 | radeon_irq_set(rdev); | ||
791 | } | ||
792 | |||
793 | return 0; | ||
794 | } | ||
795 | |||
780 | union power_info { | 796 | union power_info { |
781 | struct _ATOM_POWERPLAY_INFO info; | 797 | struct _ATOM_POWERPLAY_INFO info; |
782 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | 798 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 1000bf9719f2..07eab2b04e81 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h | |||
@@ -213,8 +213,6 @@ void r600_wait_for_power_level(struct radeon_device *rdev, | |||
213 | void r600_start_dpm(struct radeon_device *rdev); | 213 | void r600_start_dpm(struct radeon_device *rdev); |
214 | void r600_stop_dpm(struct radeon_device *rdev); | 214 | void r600_stop_dpm(struct radeon_device *rdev); |
215 | 215 | ||
216 | int r600_set_thermal_temperature_range(struct radeon_device *rdev, | ||
217 | int min_temp, int max_temp); | ||
218 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); | 216 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); |
219 | 217 | ||
220 | int r600_parse_extended_power_table(struct radeon_device *rdev); | 218 | int r600_parse_extended_power_table(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index b7d3ecba43e3..3016fc14f502 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -250,7 +250,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder) | |||
250 | value, ~HDMI0_AUDIO_TEST_EN); | 250 | value, ~HDMI0_AUDIO_TEST_EN); |
251 | } | 251 | } |
252 | 252 | ||
253 | void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | 253 | static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) |
254 | { | 254 | { |
255 | struct drm_device *dev = encoder->dev; | 255 | struct drm_device *dev = encoder->dev; |
256 | struct radeon_device *rdev = dev->dev_private; | 256 | struct radeon_device *rdev = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index ebe38724a976..3fca4b9c65ad 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -701,11 +701,18 @@ | |||
701 | #define RLC_UCODE_DATA 0x3f30 | 701 | #define RLC_UCODE_DATA 0x3f30 |
702 | 702 | ||
703 | #define SRBM_SOFT_RESET 0xe60 | 703 | #define SRBM_SOFT_RESET 0xe60 |
704 | # define SOFT_RESET_BIF (1 << 1) | ||
704 | # define SOFT_RESET_DMA (1 << 12) | 705 | # define SOFT_RESET_DMA (1 << 12) |
705 | # define SOFT_RESET_RLC (1 << 13) | 706 | # define SOFT_RESET_RLC (1 << 13) |
706 | # define SOFT_RESET_UVD (1 << 18) | 707 | # define SOFT_RESET_UVD (1 << 18) |
707 | # define RV770_SOFT_RESET_DMA (1 << 20) | 708 | # define RV770_SOFT_RESET_DMA (1 << 20) |
708 | 709 | ||
710 | #define BIF_SCRATCH0 0x5438 | ||
711 | |||
712 | #define BUS_CNTL 0x5420 | ||
713 | # define BIOS_ROM_DIS (1 << 1) | ||
714 | # define VGA_COHE_SPEC_TIMER_DIS (1 << 9) | ||
715 | |||
709 | #define CP_INT_CNTL 0xc124 | 716 | #define CP_INT_CNTL 0xc124 |
710 | # define CNTX_BUSY_INT_ENABLE (1 << 19) | 717 | # define CNTX_BUSY_INT_ENABLE (1 << 19) |
711 | # define CNTX_EMPTY_INT_ENABLE (1 << 20) | 718 | # define CNTX_EMPTY_INT_ENABLE (1 << 20) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b1f990d0eaa1..746c0c6c269b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -99,6 +99,7 @@ extern int radeon_fastfb; | |||
99 | extern int radeon_dpm; | 99 | extern int radeon_dpm; |
100 | extern int radeon_aspm; | 100 | extern int radeon_aspm; |
101 | extern int radeon_runtime_pm; | 101 | extern int radeon_runtime_pm; |
102 | extern int radeon_hard_reset; | ||
102 | 103 | ||
103 | /* | 104 | /* |
104 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 105 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
@@ -139,6 +140,9 @@ extern int radeon_runtime_pm; | |||
139 | #define RADEON_VA_RESERVED_SIZE (8 << 20) | 140 | #define RADEON_VA_RESERVED_SIZE (8 << 20) |
140 | #define RADEON_IB_VM_MAX_SIZE (64 << 10) | 141 | #define RADEON_IB_VM_MAX_SIZE (64 << 10) |
141 | 142 | ||
143 | /* hard reset data */ | ||
144 | #define RADEON_ASIC_RESET_DATA 0x39d5e86b | ||
145 | |||
142 | /* reset flags */ | 146 | /* reset flags */ |
143 | #define RADEON_RESET_GFX (1 << 0) | 147 | #define RADEON_RESET_GFX (1 << 0) |
144 | #define RADEON_RESET_COMPUTE (1 << 1) | 148 | #define RADEON_RESET_COMPUTE (1 << 1) |
@@ -252,6 +256,7 @@ struct radeon_clock { | |||
252 | * Power management | 256 | * Power management |
253 | */ | 257 | */ |
254 | int radeon_pm_init(struct radeon_device *rdev); | 258 | int radeon_pm_init(struct radeon_device *rdev); |
259 | int radeon_pm_late_init(struct radeon_device *rdev); | ||
255 | void radeon_pm_fini(struct radeon_device *rdev); | 260 | void radeon_pm_fini(struct radeon_device *rdev); |
256 | void radeon_pm_compute_clocks(struct radeon_device *rdev); | 261 | void radeon_pm_compute_clocks(struct radeon_device *rdev); |
257 | void radeon_pm_suspend(struct radeon_device *rdev); | 262 | void radeon_pm_suspend(struct radeon_device *rdev); |
@@ -413,6 +418,11 @@ struct radeon_mman { | |||
413 | struct ttm_bo_device bdev; | 418 | struct ttm_bo_device bdev; |
414 | bool mem_global_referenced; | 419 | bool mem_global_referenced; |
415 | bool initialized; | 420 | bool initialized; |
421 | |||
422 | #if defined(CONFIG_DEBUG_FS) | ||
423 | struct dentry *vram; | ||
424 | struct dentry *gtt; | ||
425 | #endif | ||
416 | }; | 426 | }; |
417 | 427 | ||
418 | /* bo virtual address in a specific vm */ | 428 | /* bo virtual address in a specific vm */ |
@@ -779,13 +789,11 @@ struct radeon_ring { | |||
779 | volatile uint32_t *ring; | 789 | volatile uint32_t *ring; |
780 | unsigned rptr; | 790 | unsigned rptr; |
781 | unsigned rptr_offs; | 791 | unsigned rptr_offs; |
782 | unsigned rptr_reg; | ||
783 | unsigned rptr_save_reg; | 792 | unsigned rptr_save_reg; |
784 | u64 next_rptr_gpu_addr; | 793 | u64 next_rptr_gpu_addr; |
785 | volatile u32 *next_rptr_cpu_addr; | 794 | volatile u32 *next_rptr_cpu_addr; |
786 | unsigned wptr; | 795 | unsigned wptr; |
787 | unsigned wptr_old; | 796 | unsigned wptr_old; |
788 | unsigned wptr_reg; | ||
789 | unsigned ring_size; | 797 | unsigned ring_size; |
790 | unsigned ring_free_dw; | 798 | unsigned ring_free_dw; |
791 | int count_dw; | 799 | int count_dw; |
@@ -949,7 +957,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring | |||
949 | int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, | 957 | int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, |
950 | unsigned size, uint32_t *data); | 958 | unsigned size, uint32_t *data); |
951 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, | 959 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, |
952 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop); | 960 | unsigned rptr_offs, u32 nop); |
953 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); | 961 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); |
954 | 962 | ||
955 | 963 | ||
@@ -1775,6 +1783,7 @@ struct radeon_asic { | |||
1775 | int (*init)(struct radeon_device *rdev); | 1783 | int (*init)(struct radeon_device *rdev); |
1776 | void (*setup_asic)(struct radeon_device *rdev); | 1784 | void (*setup_asic)(struct radeon_device *rdev); |
1777 | int (*enable)(struct radeon_device *rdev); | 1785 | int (*enable)(struct radeon_device *rdev); |
1786 | int (*late_enable)(struct radeon_device *rdev); | ||
1778 | void (*disable)(struct radeon_device *rdev); | 1787 | void (*disable)(struct radeon_device *rdev); |
1779 | int (*pre_set_power_state)(struct radeon_device *rdev); | 1788 | int (*pre_set_power_state)(struct radeon_device *rdev); |
1780 | int (*set_power_state)(struct radeon_device *rdev); | 1789 | int (*set_power_state)(struct radeon_device *rdev); |
@@ -2650,6 +2659,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); | |||
2650 | #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) | 2659 | #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) |
2651 | #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) | 2660 | #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) |
2652 | #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) | 2661 | #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) |
2662 | #define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev)) | ||
2653 | #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev)) | 2663 | #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev)) |
2654 | #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev)) | 2664 | #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev)) |
2655 | #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev)) | 2665 | #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev)) |
@@ -2668,6 +2678,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); | |||
2668 | /* Common functions */ | 2678 | /* Common functions */ |
2669 | /* AGP */ | 2679 | /* AGP */ |
2670 | extern int radeon_gpu_reset(struct radeon_device *rdev); | 2680 | extern int radeon_gpu_reset(struct radeon_device *rdev); |
2681 | extern void radeon_pci_config_reset(struct radeon_device *rdev); | ||
2671 | extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung); | 2682 | extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung); |
2672 | extern void radeon_agp_disable(struct radeon_device *rdev); | 2683 | extern void radeon_agp_disable(struct radeon_device *rdev); |
2673 | extern int radeon_modeset_init(struct radeon_device *rdev); | 2684 | extern int radeon_modeset_init(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e354ce94cdd1..f55879dd11c6 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -182,9 +182,9 @@ static struct radeon_asic_ring r100_gfx_ring = { | |||
182 | .ring_test = &r100_ring_test, | 182 | .ring_test = &r100_ring_test, |
183 | .ib_test = &r100_ib_test, | 183 | .ib_test = &r100_ib_test, |
184 | .is_lockup = &r100_gpu_is_lockup, | 184 | .is_lockup = &r100_gpu_is_lockup, |
185 | .get_rptr = &radeon_ring_generic_get_rptr, | 185 | .get_rptr = &r100_gfx_get_rptr, |
186 | .get_wptr = &radeon_ring_generic_get_wptr, | 186 | .get_wptr = &r100_gfx_get_wptr, |
187 | .set_wptr = &radeon_ring_generic_set_wptr, | 187 | .set_wptr = &r100_gfx_set_wptr, |
188 | }; | 188 | }; |
189 | 189 | ||
190 | static struct radeon_asic r100_asic = { | 190 | static struct radeon_asic r100_asic = { |
@@ -330,9 +330,9 @@ static struct radeon_asic_ring r300_gfx_ring = { | |||
330 | .ring_test = &r100_ring_test, | 330 | .ring_test = &r100_ring_test, |
331 | .ib_test = &r100_ib_test, | 331 | .ib_test = &r100_ib_test, |
332 | .is_lockup = &r100_gpu_is_lockup, | 332 | .is_lockup = &r100_gpu_is_lockup, |
333 | .get_rptr = &radeon_ring_generic_get_rptr, | 333 | .get_rptr = &r100_gfx_get_rptr, |
334 | .get_wptr = &radeon_ring_generic_get_wptr, | 334 | .get_wptr = &r100_gfx_get_wptr, |
335 | .set_wptr = &radeon_ring_generic_set_wptr, | 335 | .set_wptr = &r100_gfx_set_wptr, |
336 | }; | 336 | }; |
337 | 337 | ||
338 | static struct radeon_asic r300_asic = { | 338 | static struct radeon_asic r300_asic = { |
@@ -883,9 +883,9 @@ static struct radeon_asic_ring r600_gfx_ring = { | |||
883 | .ring_test = &r600_ring_test, | 883 | .ring_test = &r600_ring_test, |
884 | .ib_test = &r600_ib_test, | 884 | .ib_test = &r600_ib_test, |
885 | .is_lockup = &r600_gfx_is_lockup, | 885 | .is_lockup = &r600_gfx_is_lockup, |
886 | .get_rptr = &radeon_ring_generic_get_rptr, | 886 | .get_rptr = &r600_gfx_get_rptr, |
887 | .get_wptr = &radeon_ring_generic_get_wptr, | 887 | .get_wptr = &r600_gfx_get_wptr, |
888 | .set_wptr = &radeon_ring_generic_set_wptr, | 888 | .set_wptr = &r600_gfx_set_wptr, |
889 | }; | 889 | }; |
890 | 890 | ||
891 | static struct radeon_asic_ring r600_dma_ring = { | 891 | static struct radeon_asic_ring r600_dma_ring = { |
@@ -1045,6 +1045,7 @@ static struct radeon_asic rv6xx_asic = { | |||
1045 | .init = &rv6xx_dpm_init, | 1045 | .init = &rv6xx_dpm_init, |
1046 | .setup_asic = &rv6xx_setup_asic, | 1046 | .setup_asic = &rv6xx_setup_asic, |
1047 | .enable = &rv6xx_dpm_enable, | 1047 | .enable = &rv6xx_dpm_enable, |
1048 | .late_enable = &r600_dpm_late_enable, | ||
1048 | .disable = &rv6xx_dpm_disable, | 1049 | .disable = &rv6xx_dpm_disable, |
1049 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1050 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
1050 | .set_power_state = &rv6xx_dpm_set_power_state, | 1051 | .set_power_state = &rv6xx_dpm_set_power_state, |
@@ -1135,6 +1136,7 @@ static struct radeon_asic rs780_asic = { | |||
1135 | .init = &rs780_dpm_init, | 1136 | .init = &rs780_dpm_init, |
1136 | .setup_asic = &rs780_dpm_setup_asic, | 1137 | .setup_asic = &rs780_dpm_setup_asic, |
1137 | .enable = &rs780_dpm_enable, | 1138 | .enable = &rs780_dpm_enable, |
1139 | .late_enable = &r600_dpm_late_enable, | ||
1138 | .disable = &rs780_dpm_disable, | 1140 | .disable = &rs780_dpm_disable, |
1139 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1141 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
1140 | .set_power_state = &rs780_dpm_set_power_state, | 1142 | .set_power_state = &rs780_dpm_set_power_state, |
@@ -1239,6 +1241,7 @@ static struct radeon_asic rv770_asic = { | |||
1239 | .init = &rv770_dpm_init, | 1241 | .init = &rv770_dpm_init, |
1240 | .setup_asic = &rv770_dpm_setup_asic, | 1242 | .setup_asic = &rv770_dpm_setup_asic, |
1241 | .enable = &rv770_dpm_enable, | 1243 | .enable = &rv770_dpm_enable, |
1244 | .late_enable = &rv770_dpm_late_enable, | ||
1242 | .disable = &rv770_dpm_disable, | 1245 | .disable = &rv770_dpm_disable, |
1243 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1246 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
1244 | .set_power_state = &rv770_dpm_set_power_state, | 1247 | .set_power_state = &rv770_dpm_set_power_state, |
@@ -1267,9 +1270,9 @@ static struct radeon_asic_ring evergreen_gfx_ring = { | |||
1267 | .ring_test = &r600_ring_test, | 1270 | .ring_test = &r600_ring_test, |
1268 | .ib_test = &r600_ib_test, | 1271 | .ib_test = &r600_ib_test, |
1269 | .is_lockup = &evergreen_gfx_is_lockup, | 1272 | .is_lockup = &evergreen_gfx_is_lockup, |
1270 | .get_rptr = &radeon_ring_generic_get_rptr, | 1273 | .get_rptr = &r600_gfx_get_rptr, |
1271 | .get_wptr = &radeon_ring_generic_get_wptr, | 1274 | .get_wptr = &r600_gfx_get_wptr, |
1272 | .set_wptr = &radeon_ring_generic_set_wptr, | 1275 | .set_wptr = &r600_gfx_set_wptr, |
1273 | }; | 1276 | }; |
1274 | 1277 | ||
1275 | static struct radeon_asic_ring evergreen_dma_ring = { | 1278 | static struct radeon_asic_ring evergreen_dma_ring = { |
@@ -1357,6 +1360,7 @@ static struct radeon_asic evergreen_asic = { | |||
1357 | .init = &cypress_dpm_init, | 1360 | .init = &cypress_dpm_init, |
1358 | .setup_asic = &cypress_dpm_setup_asic, | 1361 | .setup_asic = &cypress_dpm_setup_asic, |
1359 | .enable = &cypress_dpm_enable, | 1362 | .enable = &cypress_dpm_enable, |
1363 | .late_enable = &rv770_dpm_late_enable, | ||
1360 | .disable = &cypress_dpm_disable, | 1364 | .disable = &cypress_dpm_disable, |
1361 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1365 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
1362 | .set_power_state = &cypress_dpm_set_power_state, | 1366 | .set_power_state = &cypress_dpm_set_power_state, |
@@ -1449,6 +1453,7 @@ static struct radeon_asic sumo_asic = { | |||
1449 | .init = &sumo_dpm_init, | 1453 | .init = &sumo_dpm_init, |
1450 | .setup_asic = &sumo_dpm_setup_asic, | 1454 | .setup_asic = &sumo_dpm_setup_asic, |
1451 | .enable = &sumo_dpm_enable, | 1455 | .enable = &sumo_dpm_enable, |
1456 | .late_enable = &sumo_dpm_late_enable, | ||
1452 | .disable = &sumo_dpm_disable, | 1457 | .disable = &sumo_dpm_disable, |
1453 | .pre_set_power_state = &sumo_dpm_pre_set_power_state, | 1458 | .pre_set_power_state = &sumo_dpm_pre_set_power_state, |
1454 | .set_power_state = &sumo_dpm_set_power_state, | 1459 | .set_power_state = &sumo_dpm_set_power_state, |
@@ -1540,6 +1545,7 @@ static struct radeon_asic btc_asic = { | |||
1540 | .init = &btc_dpm_init, | 1545 | .init = &btc_dpm_init, |
1541 | .setup_asic = &btc_dpm_setup_asic, | 1546 | .setup_asic = &btc_dpm_setup_asic, |
1542 | .enable = &btc_dpm_enable, | 1547 | .enable = &btc_dpm_enable, |
1548 | .late_enable = &rv770_dpm_late_enable, | ||
1543 | .disable = &btc_dpm_disable, | 1549 | .disable = &btc_dpm_disable, |
1544 | .pre_set_power_state = &btc_dpm_pre_set_power_state, | 1550 | .pre_set_power_state = &btc_dpm_pre_set_power_state, |
1545 | .set_power_state = &btc_dpm_set_power_state, | 1551 | .set_power_state = &btc_dpm_set_power_state, |
@@ -1570,9 +1576,9 @@ static struct radeon_asic_ring cayman_gfx_ring = { | |||
1570 | .ib_test = &r600_ib_test, | 1576 | .ib_test = &r600_ib_test, |
1571 | .is_lockup = &cayman_gfx_is_lockup, | 1577 | .is_lockup = &cayman_gfx_is_lockup, |
1572 | .vm_flush = &cayman_vm_flush, | 1578 | .vm_flush = &cayman_vm_flush, |
1573 | .get_rptr = &radeon_ring_generic_get_rptr, | 1579 | .get_rptr = &cayman_gfx_get_rptr, |
1574 | .get_wptr = &radeon_ring_generic_get_wptr, | 1580 | .get_wptr = &cayman_gfx_get_wptr, |
1575 | .set_wptr = &radeon_ring_generic_set_wptr, | 1581 | .set_wptr = &cayman_gfx_set_wptr, |
1576 | }; | 1582 | }; |
1577 | 1583 | ||
1578 | static struct radeon_asic_ring cayman_dma_ring = { | 1584 | static struct radeon_asic_ring cayman_dma_ring = { |
@@ -1585,9 +1591,9 @@ static struct radeon_asic_ring cayman_dma_ring = { | |||
1585 | .ib_test = &r600_dma_ib_test, | 1591 | .ib_test = &r600_dma_ib_test, |
1586 | .is_lockup = &cayman_dma_is_lockup, | 1592 | .is_lockup = &cayman_dma_is_lockup, |
1587 | .vm_flush = &cayman_dma_vm_flush, | 1593 | .vm_flush = &cayman_dma_vm_flush, |
1588 | .get_rptr = &r600_dma_get_rptr, | 1594 | .get_rptr = &cayman_dma_get_rptr, |
1589 | .get_wptr = &r600_dma_get_wptr, | 1595 | .get_wptr = &cayman_dma_get_wptr, |
1590 | .set_wptr = &r600_dma_set_wptr | 1596 | .set_wptr = &cayman_dma_set_wptr |
1591 | }; | 1597 | }; |
1592 | 1598 | ||
1593 | static struct radeon_asic_ring cayman_uvd_ring = { | 1599 | static struct radeon_asic_ring cayman_uvd_ring = { |
@@ -1683,6 +1689,7 @@ static struct radeon_asic cayman_asic = { | |||
1683 | .init = &ni_dpm_init, | 1689 | .init = &ni_dpm_init, |
1684 | .setup_asic = &ni_dpm_setup_asic, | 1690 | .setup_asic = &ni_dpm_setup_asic, |
1685 | .enable = &ni_dpm_enable, | 1691 | .enable = &ni_dpm_enable, |
1692 | .late_enable = &rv770_dpm_late_enable, | ||
1686 | .disable = &ni_dpm_disable, | 1693 | .disable = &ni_dpm_disable, |
1687 | .pre_set_power_state = &ni_dpm_pre_set_power_state, | 1694 | .pre_set_power_state = &ni_dpm_pre_set_power_state, |
1688 | .set_power_state = &ni_dpm_set_power_state, | 1695 | .set_power_state = &ni_dpm_set_power_state, |
@@ -1783,6 +1790,7 @@ static struct radeon_asic trinity_asic = { | |||
1783 | .init = &trinity_dpm_init, | 1790 | .init = &trinity_dpm_init, |
1784 | .setup_asic = &trinity_dpm_setup_asic, | 1791 | .setup_asic = &trinity_dpm_setup_asic, |
1785 | .enable = &trinity_dpm_enable, | 1792 | .enable = &trinity_dpm_enable, |
1793 | .late_enable = &trinity_dpm_late_enable, | ||
1786 | .disable = &trinity_dpm_disable, | 1794 | .disable = &trinity_dpm_disable, |
1787 | .pre_set_power_state = &trinity_dpm_pre_set_power_state, | 1795 | .pre_set_power_state = &trinity_dpm_pre_set_power_state, |
1788 | .set_power_state = &trinity_dpm_set_power_state, | 1796 | .set_power_state = &trinity_dpm_set_power_state, |
@@ -1813,9 +1821,9 @@ static struct radeon_asic_ring si_gfx_ring = { | |||
1813 | .ib_test = &r600_ib_test, | 1821 | .ib_test = &r600_ib_test, |
1814 | .is_lockup = &si_gfx_is_lockup, | 1822 | .is_lockup = &si_gfx_is_lockup, |
1815 | .vm_flush = &si_vm_flush, | 1823 | .vm_flush = &si_vm_flush, |
1816 | .get_rptr = &radeon_ring_generic_get_rptr, | 1824 | .get_rptr = &cayman_gfx_get_rptr, |
1817 | .get_wptr = &radeon_ring_generic_get_wptr, | 1825 | .get_wptr = &cayman_gfx_get_wptr, |
1818 | .set_wptr = &radeon_ring_generic_set_wptr, | 1826 | .set_wptr = &cayman_gfx_set_wptr, |
1819 | }; | 1827 | }; |
1820 | 1828 | ||
1821 | static struct radeon_asic_ring si_dma_ring = { | 1829 | static struct radeon_asic_ring si_dma_ring = { |
@@ -1828,9 +1836,9 @@ static struct radeon_asic_ring si_dma_ring = { | |||
1828 | .ib_test = &r600_dma_ib_test, | 1836 | .ib_test = &r600_dma_ib_test, |
1829 | .is_lockup = &si_dma_is_lockup, | 1837 | .is_lockup = &si_dma_is_lockup, |
1830 | .vm_flush = &si_dma_vm_flush, | 1838 | .vm_flush = &si_dma_vm_flush, |
1831 | .get_rptr = &r600_dma_get_rptr, | 1839 | .get_rptr = &cayman_dma_get_rptr, |
1832 | .get_wptr = &r600_dma_get_wptr, | 1840 | .get_wptr = &cayman_dma_get_wptr, |
1833 | .set_wptr = &r600_dma_set_wptr, | 1841 | .set_wptr = &cayman_dma_set_wptr, |
1834 | }; | 1842 | }; |
1835 | 1843 | ||
1836 | static struct radeon_asic si_asic = { | 1844 | static struct radeon_asic si_asic = { |
@@ -1913,6 +1921,7 @@ static struct radeon_asic si_asic = { | |||
1913 | .init = &si_dpm_init, | 1921 | .init = &si_dpm_init, |
1914 | .setup_asic = &si_dpm_setup_asic, | 1922 | .setup_asic = &si_dpm_setup_asic, |
1915 | .enable = &si_dpm_enable, | 1923 | .enable = &si_dpm_enable, |
1924 | .late_enable = &si_dpm_late_enable, | ||
1916 | .disable = &si_dpm_disable, | 1925 | .disable = &si_dpm_disable, |
1917 | .pre_set_power_state = &si_dpm_pre_set_power_state, | 1926 | .pre_set_power_state = &si_dpm_pre_set_power_state, |
1918 | .set_power_state = &si_dpm_set_power_state, | 1927 | .set_power_state = &si_dpm_set_power_state, |
@@ -1943,9 +1952,9 @@ static struct radeon_asic_ring ci_gfx_ring = { | |||
1943 | .ib_test = &cik_ib_test, | 1952 | .ib_test = &cik_ib_test, |
1944 | .is_lockup = &cik_gfx_is_lockup, | 1953 | .is_lockup = &cik_gfx_is_lockup, |
1945 | .vm_flush = &cik_vm_flush, | 1954 | .vm_flush = &cik_vm_flush, |
1946 | .get_rptr = &radeon_ring_generic_get_rptr, | 1955 | .get_rptr = &cik_gfx_get_rptr, |
1947 | .get_wptr = &radeon_ring_generic_get_wptr, | 1956 | .get_wptr = &cik_gfx_get_wptr, |
1948 | .set_wptr = &radeon_ring_generic_set_wptr, | 1957 | .set_wptr = &cik_gfx_set_wptr, |
1949 | }; | 1958 | }; |
1950 | 1959 | ||
1951 | static struct radeon_asic_ring ci_cp_ring = { | 1960 | static struct radeon_asic_ring ci_cp_ring = { |
@@ -1958,9 +1967,9 @@ static struct radeon_asic_ring ci_cp_ring = { | |||
1958 | .ib_test = &cik_ib_test, | 1967 | .ib_test = &cik_ib_test, |
1959 | .is_lockup = &cik_gfx_is_lockup, | 1968 | .is_lockup = &cik_gfx_is_lockup, |
1960 | .vm_flush = &cik_vm_flush, | 1969 | .vm_flush = &cik_vm_flush, |
1961 | .get_rptr = &cik_compute_ring_get_rptr, | 1970 | .get_rptr = &cik_compute_get_rptr, |
1962 | .get_wptr = &cik_compute_ring_get_wptr, | 1971 | .get_wptr = &cik_compute_get_wptr, |
1963 | .set_wptr = &cik_compute_ring_set_wptr, | 1972 | .set_wptr = &cik_compute_set_wptr, |
1964 | }; | 1973 | }; |
1965 | 1974 | ||
1966 | static struct radeon_asic_ring ci_dma_ring = { | 1975 | static struct radeon_asic_ring ci_dma_ring = { |
@@ -1973,9 +1982,9 @@ static struct radeon_asic_ring ci_dma_ring = { | |||
1973 | .ib_test = &cik_sdma_ib_test, | 1982 | .ib_test = &cik_sdma_ib_test, |
1974 | .is_lockup = &cik_sdma_is_lockup, | 1983 | .is_lockup = &cik_sdma_is_lockup, |
1975 | .vm_flush = &cik_dma_vm_flush, | 1984 | .vm_flush = &cik_dma_vm_flush, |
1976 | .get_rptr = &r600_dma_get_rptr, | 1985 | .get_rptr = &cik_sdma_get_rptr, |
1977 | .get_wptr = &r600_dma_get_wptr, | 1986 | .get_wptr = &cik_sdma_get_wptr, |
1978 | .set_wptr = &r600_dma_set_wptr, | 1987 | .set_wptr = &cik_sdma_set_wptr, |
1979 | }; | 1988 | }; |
1980 | 1989 | ||
1981 | static struct radeon_asic ci_asic = { | 1990 | static struct radeon_asic ci_asic = { |
@@ -2058,6 +2067,7 @@ static struct radeon_asic ci_asic = { | |||
2058 | .init = &ci_dpm_init, | 2067 | .init = &ci_dpm_init, |
2059 | .setup_asic = &ci_dpm_setup_asic, | 2068 | .setup_asic = &ci_dpm_setup_asic, |
2060 | .enable = &ci_dpm_enable, | 2069 | .enable = &ci_dpm_enable, |
2070 | .late_enable = &ci_dpm_late_enable, | ||
2061 | .disable = &ci_dpm_disable, | 2071 | .disable = &ci_dpm_disable, |
2062 | .pre_set_power_state = &ci_dpm_pre_set_power_state, | 2072 | .pre_set_power_state = &ci_dpm_pre_set_power_state, |
2063 | .set_power_state = &ci_dpm_set_power_state, | 2073 | .set_power_state = &ci_dpm_set_power_state, |
@@ -2159,6 +2169,7 @@ static struct radeon_asic kv_asic = { | |||
2159 | .init = &kv_dpm_init, | 2169 | .init = &kv_dpm_init, |
2160 | .setup_asic = &kv_dpm_setup_asic, | 2170 | .setup_asic = &kv_dpm_setup_asic, |
2161 | .enable = &kv_dpm_enable, | 2171 | .enable = &kv_dpm_enable, |
2172 | .late_enable = &kv_dpm_late_enable, | ||
2162 | .disable = &kv_dpm_disable, | 2173 | .disable = &kv_dpm_disable, |
2163 | .pre_set_power_state = &kv_dpm_pre_set_power_state, | 2174 | .pre_set_power_state = &kv_dpm_pre_set_power_state, |
2164 | .set_power_state = &kv_dpm_set_power_state, | 2175 | .set_power_state = &kv_dpm_set_power_state, |
@@ -2449,7 +2460,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2449 | rdev->cg_flags = | 2460 | rdev->cg_flags = |
2450 | RADEON_CG_SUPPORT_GFX_MGCG | | 2461 | RADEON_CG_SUPPORT_GFX_MGCG | |
2451 | RADEON_CG_SUPPORT_GFX_MGLS | | 2462 | RADEON_CG_SUPPORT_GFX_MGLS | |
2452 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2463 | RADEON_CG_SUPPORT_GFX_CGCG | |
2453 | RADEON_CG_SUPPORT_GFX_CGLS | | 2464 | RADEON_CG_SUPPORT_GFX_CGLS | |
2454 | RADEON_CG_SUPPORT_GFX_CGTS | | 2465 | RADEON_CG_SUPPORT_GFX_CGTS | |
2455 | RADEON_CG_SUPPORT_GFX_CGTS_LS | | 2466 | RADEON_CG_SUPPORT_GFX_CGTS_LS | |
@@ -2468,7 +2479,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2468 | rdev->cg_flags = | 2479 | rdev->cg_flags = |
2469 | RADEON_CG_SUPPORT_GFX_MGCG | | 2480 | RADEON_CG_SUPPORT_GFX_MGCG | |
2470 | RADEON_CG_SUPPORT_GFX_MGLS | | 2481 | RADEON_CG_SUPPORT_GFX_MGLS | |
2471 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2482 | RADEON_CG_SUPPORT_GFX_CGCG | |
2472 | RADEON_CG_SUPPORT_GFX_CGLS | | 2483 | RADEON_CG_SUPPORT_GFX_CGLS | |
2473 | RADEON_CG_SUPPORT_GFX_CGTS | | 2484 | RADEON_CG_SUPPORT_GFX_CGTS | |
2474 | RADEON_CG_SUPPORT_GFX_CP_LS | | 2485 | RADEON_CG_SUPPORT_GFX_CP_LS | |
@@ -2493,7 +2504,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2493 | rdev->cg_flags = | 2504 | rdev->cg_flags = |
2494 | RADEON_CG_SUPPORT_GFX_MGCG | | 2505 | RADEON_CG_SUPPORT_GFX_MGCG | |
2495 | RADEON_CG_SUPPORT_GFX_MGLS | | 2506 | RADEON_CG_SUPPORT_GFX_MGLS | |
2496 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2507 | RADEON_CG_SUPPORT_GFX_CGCG | |
2497 | RADEON_CG_SUPPORT_GFX_CGLS | | 2508 | RADEON_CG_SUPPORT_GFX_CGLS | |
2498 | RADEON_CG_SUPPORT_GFX_CGTS | | 2509 | RADEON_CG_SUPPORT_GFX_CGTS | |
2499 | RADEON_CG_SUPPORT_GFX_CGTS_LS | | 2510 | RADEON_CG_SUPPORT_GFX_CGTS_LS | |
@@ -2521,7 +2532,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2521 | rdev->cg_flags = | 2532 | rdev->cg_flags = |
2522 | RADEON_CG_SUPPORT_GFX_MGCG | | 2533 | RADEON_CG_SUPPORT_GFX_MGCG | |
2523 | RADEON_CG_SUPPORT_GFX_MGLS | | 2534 | RADEON_CG_SUPPORT_GFX_MGLS | |
2524 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2535 | RADEON_CG_SUPPORT_GFX_CGCG | |
2525 | RADEON_CG_SUPPORT_GFX_CGLS | | 2536 | RADEON_CG_SUPPORT_GFX_CGLS | |
2526 | RADEON_CG_SUPPORT_GFX_CGTS | | 2537 | RADEON_CG_SUPPORT_GFX_CGTS | |
2527 | RADEON_CG_SUPPORT_GFX_CGTS_LS | | 2538 | RADEON_CG_SUPPORT_GFX_CGTS_LS | |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c9fd97b58076..b3bc433eed4c 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -47,13 +47,6 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder); | |||
47 | void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); | 47 | void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); |
48 | u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder); | 48 | u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder); |
49 | 49 | ||
50 | u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev, | ||
51 | struct radeon_ring *ring); | ||
52 | u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev, | ||
53 | struct radeon_ring *ring); | ||
54 | void radeon_ring_generic_set_wptr(struct radeon_device *rdev, | ||
55 | struct radeon_ring *ring); | ||
56 | |||
57 | /* | 50 | /* |
58 | * r100,rv100,rs100,rv200,rs200 | 51 | * r100,rv100,rs100,rv200,rs200 |
59 | */ | 52 | */ |
@@ -148,6 +141,13 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | |||
148 | extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); | 141 | extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); |
149 | extern int r100_mc_wait_for_idle(struct radeon_device *rdev); | 142 | extern int r100_mc_wait_for_idle(struct radeon_device *rdev); |
150 | 143 | ||
144 | u32 r100_gfx_get_rptr(struct radeon_device *rdev, | ||
145 | struct radeon_ring *ring); | ||
146 | u32 r100_gfx_get_wptr(struct radeon_device *rdev, | ||
147 | struct radeon_ring *ring); | ||
148 | void r100_gfx_set_wptr(struct radeon_device *rdev, | ||
149 | struct radeon_ring *ring); | ||
150 | |||
151 | /* | 151 | /* |
152 | * r200,rv250,rs300,rv280 | 152 | * r200,rv250,rs300,rv280 |
153 | */ | 153 | */ |
@@ -368,6 +368,12 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); | |||
368 | int r600_pcie_gart_init(struct radeon_device *rdev); | 368 | int r600_pcie_gart_init(struct radeon_device *rdev); |
369 | void r600_scratch_init(struct radeon_device *rdev); | 369 | void r600_scratch_init(struct radeon_device *rdev); |
370 | int r600_init_microcode(struct radeon_device *rdev); | 370 | int r600_init_microcode(struct radeon_device *rdev); |
371 | u32 r600_gfx_get_rptr(struct radeon_device *rdev, | ||
372 | struct radeon_ring *ring); | ||
373 | u32 r600_gfx_get_wptr(struct radeon_device *rdev, | ||
374 | struct radeon_ring *ring); | ||
375 | void r600_gfx_set_wptr(struct radeon_device *rdev, | ||
376 | struct radeon_ring *ring); | ||
371 | /* r600 irq */ | 377 | /* r600 irq */ |
372 | int r600_irq_process(struct radeon_device *rdev); | 378 | int r600_irq_process(struct radeon_device *rdev); |
373 | int r600_irq_init(struct radeon_device *rdev); | 379 | int r600_irq_init(struct radeon_device *rdev); |
@@ -392,6 +398,7 @@ int rv6xx_get_temp(struct radeon_device *rdev); | |||
392 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); | 398 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
393 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev); | 399 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev); |
394 | void r600_dpm_post_set_power_state(struct radeon_device *rdev); | 400 | void r600_dpm_post_set_power_state(struct radeon_device *rdev); |
401 | int r600_dpm_late_enable(struct radeon_device *rdev); | ||
395 | /* r600 dma */ | 402 | /* r600 dma */ |
396 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, | 403 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, |
397 | struct radeon_ring *ring); | 404 | struct radeon_ring *ring); |
@@ -454,6 +461,7 @@ int rv770_get_temp(struct radeon_device *rdev); | |||
454 | /* rv7xx pm */ | 461 | /* rv7xx pm */ |
455 | int rv770_dpm_init(struct radeon_device *rdev); | 462 | int rv770_dpm_init(struct radeon_device *rdev); |
456 | int rv770_dpm_enable(struct radeon_device *rdev); | 463 | int rv770_dpm_enable(struct radeon_device *rdev); |
464 | int rv770_dpm_late_enable(struct radeon_device *rdev); | ||
457 | void rv770_dpm_disable(struct radeon_device *rdev); | 465 | void rv770_dpm_disable(struct radeon_device *rdev); |
458 | int rv770_dpm_set_power_state(struct radeon_device *rdev); | 466 | int rv770_dpm_set_power_state(struct radeon_device *rdev); |
459 | void rv770_dpm_setup_asic(struct radeon_device *rdev); | 467 | void rv770_dpm_setup_asic(struct radeon_device *rdev); |
@@ -545,6 +553,7 @@ u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); | |||
545 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); | 553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); |
546 | int sumo_dpm_init(struct radeon_device *rdev); | 554 | int sumo_dpm_init(struct radeon_device *rdev); |
547 | int sumo_dpm_enable(struct radeon_device *rdev); | 555 | int sumo_dpm_enable(struct radeon_device *rdev); |
556 | int sumo_dpm_late_enable(struct radeon_device *rdev); | ||
548 | void sumo_dpm_disable(struct radeon_device *rdev); | 557 | void sumo_dpm_disable(struct radeon_device *rdev); |
549 | int sumo_dpm_pre_set_power_state(struct radeon_device *rdev); | 558 | int sumo_dpm_pre_set_power_state(struct radeon_device *rdev); |
550 | int sumo_dpm_set_power_state(struct radeon_device *rdev); | 559 | int sumo_dpm_set_power_state(struct radeon_device *rdev); |
@@ -591,6 +600,19 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, | |||
591 | 600 | ||
592 | void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 601 | void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
593 | 602 | ||
603 | u32 cayman_gfx_get_rptr(struct radeon_device *rdev, | ||
604 | struct radeon_ring *ring); | ||
605 | u32 cayman_gfx_get_wptr(struct radeon_device *rdev, | ||
606 | struct radeon_ring *ring); | ||
607 | void cayman_gfx_set_wptr(struct radeon_device *rdev, | ||
608 | struct radeon_ring *ring); | ||
609 | uint32_t cayman_dma_get_rptr(struct radeon_device *rdev, | ||
610 | struct radeon_ring *ring); | ||
611 | uint32_t cayman_dma_get_wptr(struct radeon_device *rdev, | ||
612 | struct radeon_ring *ring); | ||
613 | void cayman_dma_set_wptr(struct radeon_device *rdev, | ||
614 | struct radeon_ring *ring); | ||
615 | |||
594 | int ni_dpm_init(struct radeon_device *rdev); | 616 | int ni_dpm_init(struct radeon_device *rdev); |
595 | void ni_dpm_setup_asic(struct radeon_device *rdev); | 617 | void ni_dpm_setup_asic(struct radeon_device *rdev); |
596 | int ni_dpm_enable(struct radeon_device *rdev); | 618 | int ni_dpm_enable(struct radeon_device *rdev); |
@@ -610,6 +632,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev, | |||
610 | bool ni_dpm_vblank_too_short(struct radeon_device *rdev); | 632 | bool ni_dpm_vblank_too_short(struct radeon_device *rdev); |
611 | int trinity_dpm_init(struct radeon_device *rdev); | 633 | int trinity_dpm_init(struct radeon_device *rdev); |
612 | int trinity_dpm_enable(struct radeon_device *rdev); | 634 | int trinity_dpm_enable(struct radeon_device *rdev); |
635 | int trinity_dpm_late_enable(struct radeon_device *rdev); | ||
613 | void trinity_dpm_disable(struct radeon_device *rdev); | 636 | void trinity_dpm_disable(struct radeon_device *rdev); |
614 | int trinity_dpm_pre_set_power_state(struct radeon_device *rdev); | 637 | int trinity_dpm_pre_set_power_state(struct radeon_device *rdev); |
615 | int trinity_dpm_set_power_state(struct radeon_device *rdev); | 638 | int trinity_dpm_set_power_state(struct radeon_device *rdev); |
@@ -669,6 +692,7 @@ int si_get_temp(struct radeon_device *rdev); | |||
669 | int si_dpm_init(struct radeon_device *rdev); | 692 | int si_dpm_init(struct radeon_device *rdev); |
670 | void si_dpm_setup_asic(struct radeon_device *rdev); | 693 | void si_dpm_setup_asic(struct radeon_device *rdev); |
671 | int si_dpm_enable(struct radeon_device *rdev); | 694 | int si_dpm_enable(struct radeon_device *rdev); |
695 | int si_dpm_late_enable(struct radeon_device *rdev); | ||
672 | void si_dpm_disable(struct radeon_device *rdev); | 696 | void si_dpm_disable(struct radeon_device *rdev); |
673 | int si_dpm_pre_set_power_state(struct radeon_device *rdev); | 697 | int si_dpm_pre_set_power_state(struct radeon_device *rdev); |
674 | int si_dpm_set_power_state(struct radeon_device *rdev); | 698 | int si_dpm_set_power_state(struct radeon_device *rdev); |
@@ -739,17 +763,30 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, | |||
739 | uint32_t incr, uint32_t flags); | 763 | uint32_t incr, uint32_t flags); |
740 | void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 764 | void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
741 | int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); | 765 | int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
742 | u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | 766 | u32 cik_gfx_get_rptr(struct radeon_device *rdev, |
743 | struct radeon_ring *ring); | 767 | struct radeon_ring *ring); |
744 | u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | 768 | u32 cik_gfx_get_wptr(struct radeon_device *rdev, |
745 | struct radeon_ring *ring); | 769 | struct radeon_ring *ring); |
746 | void cik_compute_ring_set_wptr(struct radeon_device *rdev, | 770 | void cik_gfx_set_wptr(struct radeon_device *rdev, |
747 | struct radeon_ring *ring); | 771 | struct radeon_ring *ring); |
772 | u32 cik_compute_get_rptr(struct radeon_device *rdev, | ||
773 | struct radeon_ring *ring); | ||
774 | u32 cik_compute_get_wptr(struct radeon_device *rdev, | ||
775 | struct radeon_ring *ring); | ||
776 | void cik_compute_set_wptr(struct radeon_device *rdev, | ||
777 | struct radeon_ring *ring); | ||
778 | u32 cik_sdma_get_rptr(struct radeon_device *rdev, | ||
779 | struct radeon_ring *ring); | ||
780 | u32 cik_sdma_get_wptr(struct radeon_device *rdev, | ||
781 | struct radeon_ring *ring); | ||
782 | void cik_sdma_set_wptr(struct radeon_device *rdev, | ||
783 | struct radeon_ring *ring); | ||
748 | int ci_get_temp(struct radeon_device *rdev); | 784 | int ci_get_temp(struct radeon_device *rdev); |
749 | int kv_get_temp(struct radeon_device *rdev); | 785 | int kv_get_temp(struct radeon_device *rdev); |
750 | 786 | ||
751 | int ci_dpm_init(struct radeon_device *rdev); | 787 | int ci_dpm_init(struct radeon_device *rdev); |
752 | int ci_dpm_enable(struct radeon_device *rdev); | 788 | int ci_dpm_enable(struct radeon_device *rdev); |
789 | int ci_dpm_late_enable(struct radeon_device *rdev); | ||
753 | void ci_dpm_disable(struct radeon_device *rdev); | 790 | void ci_dpm_disable(struct radeon_device *rdev); |
754 | int ci_dpm_pre_set_power_state(struct radeon_device *rdev); | 791 | int ci_dpm_pre_set_power_state(struct radeon_device *rdev); |
755 | int ci_dpm_set_power_state(struct radeon_device *rdev); | 792 | int ci_dpm_set_power_state(struct radeon_device *rdev); |
@@ -770,6 +807,7 @@ void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); | |||
770 | 807 | ||
771 | int kv_dpm_init(struct radeon_device *rdev); | 808 | int kv_dpm_init(struct radeon_device *rdev); |
772 | int kv_dpm_enable(struct radeon_device *rdev); | 809 | int kv_dpm_enable(struct radeon_device *rdev); |
810 | int kv_dpm_late_enable(struct radeon_device *rdev); | ||
773 | void kv_dpm_disable(struct radeon_device *rdev); | 811 | void kv_dpm_disable(struct radeon_device *rdev); |
774 | int kv_dpm_pre_set_power_state(struct radeon_device *rdev); | 812 | int kv_dpm_pre_set_power_state(struct radeon_device *rdev); |
775 | int kv_dpm_set_power_state(struct radeon_device *rdev); | 813 | int kv_dpm_set_power_state(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5c39bf7c3d88..80a56ad40c52 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -30,27 +30,10 @@ | |||
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | #include "atom-bits.h" | 31 | #include "atom-bits.h" |
32 | 32 | ||
33 | /* from radeon_encoder.c */ | ||
34 | extern uint32_t | ||
35 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | ||
36 | uint8_t dac); | ||
37 | extern void radeon_link_encoder_connector(struct drm_device *dev); | ||
38 | extern void | 33 | extern void |
39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, | 34 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
40 | uint32_t supported_device, u16 caps); | 35 | uint32_t supported_device, u16 caps); |
41 | 36 | ||
42 | /* from radeon_connector.c */ | ||
43 | extern void | ||
44 | radeon_add_atom_connector(struct drm_device *dev, | ||
45 | uint32_t connector_id, | ||
46 | uint32_t supported_device, | ||
47 | int connector_type, | ||
48 | struct radeon_i2c_bus_rec *i2c_bus, | ||
49 | uint32_t igp_lane_info, | ||
50 | uint16_t connector_object_id, | ||
51 | struct radeon_hpd *hpd, | ||
52 | struct radeon_router *router); | ||
53 | |||
54 | /* from radeon_legacy_encoder.c */ | 37 | /* from radeon_legacy_encoder.c */ |
55 | extern void | 38 | extern void |
56 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, | 39 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 68ce36056019..6651177110f0 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -37,22 +37,6 @@ | |||
37 | #include <asm/pci-bridge.h> | 37 | #include <asm/pci-bridge.h> |
38 | #endif /* CONFIG_PPC_PMAC */ | 38 | #endif /* CONFIG_PPC_PMAC */ |
39 | 39 | ||
40 | /* from radeon_encoder.c */ | ||
41 | extern uint32_t | ||
42 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | ||
43 | uint8_t dac); | ||
44 | extern void radeon_link_encoder_connector(struct drm_device *dev); | ||
45 | |||
46 | /* from radeon_connector.c */ | ||
47 | extern void | ||
48 | radeon_add_legacy_connector(struct drm_device *dev, | ||
49 | uint32_t connector_id, | ||
50 | uint32_t supported_device, | ||
51 | int connector_type, | ||
52 | struct radeon_i2c_bus_rec *i2c_bus, | ||
53 | uint16_t connector_object_id, | ||
54 | struct radeon_hpd *hpd); | ||
55 | |||
56 | /* from radeon_legacy_encoder.c */ | 40 | /* from radeon_legacy_encoder.c */ |
57 | extern void | 41 | extern void |
58 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, | 42 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 20a768ac89a8..82d4f865546e 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -33,15 +33,6 @@ | |||
33 | 33 | ||
34 | #include <linux/pm_runtime.h> | 34 | #include <linux/pm_runtime.h> |
35 | 35 | ||
36 | extern void | ||
37 | radeon_combios_connected_scratch_regs(struct drm_connector *connector, | ||
38 | struct drm_encoder *encoder, | ||
39 | bool connected); | ||
40 | extern void | ||
41 | radeon_atombios_connected_scratch_regs(struct drm_connector *connector, | ||
42 | struct drm_encoder *encoder, | ||
43 | bool connected); | ||
44 | |||
45 | void radeon_connector_hotplug(struct drm_connector *connector) | 36 | void radeon_connector_hotplug(struct drm_connector *connector) |
46 | { | 37 | { |
47 | struct drm_device *dev = connector->dev; | 38 | struct drm_device *dev = connector->dev; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 39b033b441d2..b012cbbc3ed5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -144,6 +144,11 @@ void radeon_program_register_sequence(struct radeon_device *rdev, | |||
144 | } | 144 | } |
145 | } | 145 | } |
146 | 146 | ||
147 | void radeon_pci_config_reset(struct radeon_device *rdev) | ||
148 | { | ||
149 | pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA); | ||
150 | } | ||
151 | |||
147 | /** | 152 | /** |
148 | * radeon_surface_init - Clear GPU surface registers. | 153 | * radeon_surface_init - Clear GPU surface registers. |
149 | * | 154 | * |
@@ -249,7 +254,7 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |||
249 | * Init doorbell driver information (CIK) | 254 | * Init doorbell driver information (CIK) |
250 | * Returns 0 on success, error on failure. | 255 | * Returns 0 on success, error on failure. |
251 | */ | 256 | */ |
252 | int radeon_doorbell_init(struct radeon_device *rdev) | 257 | static int radeon_doorbell_init(struct radeon_device *rdev) |
253 | { | 258 | { |
254 | /* doorbell bar mapping */ | 259 | /* doorbell bar mapping */ |
255 | rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); | 260 | rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); |
@@ -278,7 +283,7 @@ int radeon_doorbell_init(struct radeon_device *rdev) | |||
278 | * | 283 | * |
279 | * Tear down doorbell driver information (CIK) | 284 | * Tear down doorbell driver information (CIK) |
280 | */ | 285 | */ |
281 | void radeon_doorbell_fini(struct radeon_device *rdev) | 286 | static void radeon_doorbell_fini(struct radeon_device *rdev) |
282 | { | 287 | { |
283 | iounmap(rdev->doorbell.ptr); | 288 | iounmap(rdev->doorbell.ptr); |
284 | rdev->doorbell.ptr = NULL; | 289 | rdev->doorbell.ptr = NULL; |
@@ -1330,6 +1335,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1330 | if (r) | 1335 | if (r) |
1331 | return r; | 1336 | return r; |
1332 | } | 1337 | } |
1338 | |||
1333 | if ((radeon_testing & 1)) { | 1339 | if ((radeon_testing & 1)) { |
1334 | if (rdev->accel_working) | 1340 | if (rdev->accel_working) |
1335 | radeon_test_moves(rdev); | 1341 | radeon_test_moves(rdev); |
@@ -1455,7 +1461,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
1455 | 1461 | ||
1456 | radeon_save_bios_scratch_regs(rdev); | 1462 | radeon_save_bios_scratch_regs(rdev); |
1457 | 1463 | ||
1458 | radeon_pm_suspend(rdev); | ||
1459 | radeon_suspend(rdev); | 1464 | radeon_suspend(rdev); |
1460 | radeon_hpd_fini(rdev); | 1465 | radeon_hpd_fini(rdev); |
1461 | /* evict remaining vram memory */ | 1466 | /* evict remaining vram memory */ |
@@ -1516,14 +1521,22 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1516 | if (r) | 1521 | if (r) |
1517 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1522 | DRM_ERROR("ib ring test failed (%d).\n", r); |
1518 | 1523 | ||
1519 | radeon_pm_resume(rdev); | 1524 | if (rdev->pm.dpm_enabled) { |
1525 | /* do dpm late init */ | ||
1526 | r = radeon_pm_late_init(rdev); | ||
1527 | if (r) { | ||
1528 | rdev->pm.dpm_enabled = false; | ||
1529 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | ||
1530 | } | ||
1531 | } | ||
1532 | |||
1520 | radeon_restore_bios_scratch_regs(rdev); | 1533 | radeon_restore_bios_scratch_regs(rdev); |
1521 | 1534 | ||
1522 | if (fbcon) { | 1535 | if (fbcon) { |
1523 | radeon_fbdev_set_suspend(rdev, 0); | 1536 | radeon_fbdev_set_suspend(rdev, 0); |
1524 | console_unlock(); | 1537 | console_unlock(); |
1525 | } | 1538 | } |
1526 | 1539 | ||
1527 | /* init dig PHYs, disp eng pll */ | 1540 | /* init dig PHYs, disp eng pll */ |
1528 | if (rdev->is_atom_bios) { | 1541 | if (rdev->is_atom_bios) { |
1529 | radeon_atom_encoder_init(rdev); | 1542 | radeon_atom_encoder_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7b253815a323..7ea647b84733 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -1464,12 +1464,22 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
1464 | /* setup afmt */ | 1464 | /* setup afmt */ |
1465 | radeon_afmt_init(rdev); | 1465 | radeon_afmt_init(rdev); |
1466 | 1466 | ||
1467 | /* Initialize power management */ | ||
1468 | radeon_pm_init(rdev); | ||
1469 | |||
1470 | radeon_fbdev_init(rdev); | 1467 | radeon_fbdev_init(rdev); |
1471 | drm_kms_helper_poll_init(rdev->ddev); | 1468 | drm_kms_helper_poll_init(rdev->ddev); |
1472 | 1469 | ||
1470 | if (rdev->pm.dpm_enabled) { | ||
1471 | /* do dpm late init */ | ||
1472 | ret = radeon_pm_late_init(rdev); | ||
1473 | if (ret) { | ||
1474 | rdev->pm.dpm_enabled = false; | ||
1475 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | ||
1476 | } | ||
1477 | /* set the dpm state for PX since there won't be | ||
1478 | * a modeset to call this. | ||
1479 | */ | ||
1480 | radeon_pm_compute_clocks(rdev); | ||
1481 | } | ||
1482 | |||
1473 | return 0; | 1483 | return 0; |
1474 | } | 1484 | } |
1475 | 1485 | ||
@@ -1477,7 +1487,6 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
1477 | { | 1487 | { |
1478 | radeon_fbdev_fini(rdev); | 1488 | radeon_fbdev_fini(rdev); |
1479 | kfree(rdev->mode_info.bios_hardcoded_edid); | 1489 | kfree(rdev->mode_info.bios_hardcoded_edid); |
1480 | radeon_pm_fini(rdev); | ||
1481 | 1490 | ||
1482 | if (rdev->mode_info.mode_config_initialized) { | 1491 | if (rdev->mode_info.mode_config_initialized) { |
1483 | radeon_afmt_fini(rdev); | 1492 | radeon_afmt_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f6f30b9e9ff5..e91d548063ef 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -167,6 +167,7 @@ int radeon_fastfb = 0; | |||
167 | int radeon_dpm = -1; | 167 | int radeon_dpm = -1; |
168 | int radeon_aspm = -1; | 168 | int radeon_aspm = -1; |
169 | int radeon_runtime_pm = -1; | 169 | int radeon_runtime_pm = -1; |
170 | int radeon_hard_reset = 0; | ||
170 | 171 | ||
171 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 172 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
172 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 173 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -231,6 +232,9 @@ module_param_named(aspm, radeon_aspm, int, 0444); | |||
231 | MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); | 232 | MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); |
232 | module_param_named(runpm, radeon_runtime_pm, int, 0444); | 233 | module_param_named(runpm, radeon_runtime_pm, int, 0444); |
233 | 234 | ||
235 | MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); | ||
236 | module_param_named(hard_reset, radeon_hard_reset, int, 0444); | ||
237 | |||
234 | static struct pci_device_id pciidlist[] = { | 238 | static struct pci_device_id pciidlist[] = { |
235 | radeon_PCI_IDS | 239 | radeon_PCI_IDS |
236 | }; | 240 | }; |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d3a86e43c012..866744e47cfa 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -841,6 +841,8 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) | |||
841 | if (!rdev->fence_drv[i].initialized) | 841 | if (!rdev->fence_drv[i].initialized) |
842 | continue; | 842 | continue; |
843 | 843 | ||
844 | radeon_fence_process(rdev, i); | ||
845 | |||
844 | seq_printf(m, "--- ring %d ---\n", i); | 846 | seq_printf(m, "--- ring %d ---\n", i); |
845 | seq_printf(m, "Last signaled fence 0x%016llx\n", | 847 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
846 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); | 848 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 805c5e566b9a..b96c819024b3 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -86,7 +86,7 @@ retry: | |||
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | int radeon_gem_set_domain(struct drm_gem_object *gobj, | 89 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
90 | uint32_t rdomain, uint32_t wdomain) | 90 | uint32_t rdomain, uint32_t wdomain) |
91 | { | 91 | { |
92 | struct radeon_bo *robj; | 92 | struct radeon_bo *robj; |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index fc60b74ee304..e24ca6ab96de 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) | |||
1020 | /* Add the default buses */ | 1020 | /* Add the default buses */ |
1021 | void radeon_i2c_init(struct radeon_device *rdev) | 1021 | void radeon_i2c_init(struct radeon_device *rdev) |
1022 | { | 1022 | { |
1023 | if (radeon_hw_i2c) | ||
1024 | DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); | ||
1025 | |||
1023 | if (rdev->is_atom_bios) | 1026 | if (rdev->is_atom_bios) |
1024 | radeon_atombios_i2c_init(rdev); | 1027 | radeon_atombios_i2c_init(rdev); |
1025 | else | 1028 | else |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index daa28b6a6832..c44574e248d1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -191,7 +191,7 @@ static void radeon_set_filp_rights(struct drm_device *dev, | |||
191 | * etc. (all asics). | 191 | * etc. (all asics). |
192 | * Returns 0 on success, -EINVAL on failure. | 192 | * Returns 0 on success, -EINVAL on failure. |
193 | */ | 193 | */ |
194 | int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 194 | static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
195 | { | 195 | { |
196 | struct radeon_device *rdev = dev->dev_private; | 196 | struct radeon_device *rdev = dev->dev_private; |
197 | struct drm_radeon_info *info = data; | 197 | struct drm_radeon_info *info = data; |
@@ -707,7 +707,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | |||
707 | } | 707 | } |
708 | 708 | ||
709 | #define KMS_INVALID_IOCTL(name) \ | 709 | #define KMS_INVALID_IOCTL(name) \ |
710 | int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ | 710 | static int name(struct drm_device *dev, void *data, struct drm_file \ |
711 | *file_priv) \ | ||
711 | { \ | 712 | { \ |
712 | DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ | 713 | DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ |
713 | return -EINVAL; \ | 714 | return -EINVAL; \ |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3f0dd664af90..28bba631b80c 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -624,6 +624,30 @@ struct atom_voltage_table | |||
624 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; | 624 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; |
625 | }; | 625 | }; |
626 | 626 | ||
627 | |||
628 | extern void | ||
629 | radeon_add_atom_connector(struct drm_device *dev, | ||
630 | uint32_t connector_id, | ||
631 | uint32_t supported_device, | ||
632 | int connector_type, | ||
633 | struct radeon_i2c_bus_rec *i2c_bus, | ||
634 | uint32_t igp_lane_info, | ||
635 | uint16_t connector_object_id, | ||
636 | struct radeon_hpd *hpd, | ||
637 | struct radeon_router *router); | ||
638 | extern void | ||
639 | radeon_add_legacy_connector(struct drm_device *dev, | ||
640 | uint32_t connector_id, | ||
641 | uint32_t supported_device, | ||
642 | int connector_type, | ||
643 | struct radeon_i2c_bus_rec *i2c_bus, | ||
644 | uint16_t connector_object_id, | ||
645 | struct radeon_hpd *hpd); | ||
646 | extern uint32_t | ||
647 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | ||
648 | uint8_t dac); | ||
649 | extern void radeon_link_encoder_connector(struct drm_device *dev); | ||
650 | |||
627 | extern enum radeon_tv_std | 651 | extern enum radeon_tv_std |
628 | radeon_combios_get_tv_info(struct radeon_device *rdev); | 652 | radeon_combios_get_tv_info(struct radeon_device *rdev); |
629 | extern enum radeon_tv_std | 653 | extern enum radeon_tv_std |
@@ -631,6 +655,15 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev); | |||
631 | extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev, | 655 | extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev, |
632 | u16 *vddc, u16 *vddci, u16 *mvdd); | 656 | u16 *vddc, u16 *vddci, u16 *mvdd); |
633 | 657 | ||
658 | extern void | ||
659 | radeon_combios_connected_scratch_regs(struct drm_connector *connector, | ||
660 | struct drm_encoder *encoder, | ||
661 | bool connected); | ||
662 | extern void | ||
663 | radeon_atombios_connected_scratch_regs(struct drm_connector *connector, | ||
664 | struct drm_encoder *encoder, | ||
665 | bool connected); | ||
666 | |||
634 | extern struct drm_connector * | 667 | extern struct drm_connector * |
635 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); | 668 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); |
636 | extern struct drm_connector * | 669 | extern struct drm_connector * |
@@ -666,6 +699,7 @@ extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); | |||
666 | extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); | 699 | extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); |
667 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 700 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
668 | u8 write_byte, u8 *read_byte); | 701 | u8 write_byte, u8 *read_byte); |
702 | void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); | ||
669 | 703 | ||
670 | extern void radeon_i2c_init(struct radeon_device *rdev); | 704 | extern void radeon_i2c_init(struct radeon_device *rdev); |
671 | extern void radeon_i2c_fini(struct radeon_device *rdev); | 705 | extern void radeon_i2c_fini(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c0fa4aa9ceea..08595cf90b01 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -46,7 +46,7 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); | |||
46 | * function are calling it. | 46 | * function are calling it. |
47 | */ | 47 | */ |
48 | 48 | ||
49 | void radeon_bo_clear_va(struct radeon_bo *bo) | 49 | static void radeon_bo_clear_va(struct radeon_bo *bo) |
50 | { | 50 | { |
51 | struct radeon_bo_va *bo_va, *tmp; | 51 | struct radeon_bo_va *bo_va, *tmp; |
52 | 52 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 984097b907ef..0b24c4c7dcf9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -1010,8 +1010,10 @@ static void radeon_pm_resume_old(struct radeon_device *rdev) | |||
1010 | rdev->pm.current_clock_mode_index = 0; | 1010 | rdev->pm.current_clock_mode_index = 0; |
1011 | rdev->pm.current_sclk = rdev->pm.default_sclk; | 1011 | rdev->pm.current_sclk = rdev->pm.default_sclk; |
1012 | rdev->pm.current_mclk = rdev->pm.default_mclk; | 1012 | rdev->pm.current_mclk = rdev->pm.default_mclk; |
1013 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 1013 | if (rdev->pm.power_state) { |
1014 | rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; | 1014 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
1015 | rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; | ||
1016 | } | ||
1015 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | 1017 | if (rdev->pm.pm_method == PM_METHOD_DYNPM |
1016 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | 1018 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { |
1017 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 1019 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
@@ -1032,25 +1034,27 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) | |||
1032 | radeon_dpm_setup_asic(rdev); | 1034 | radeon_dpm_setup_asic(rdev); |
1033 | ret = radeon_dpm_enable(rdev); | 1035 | ret = radeon_dpm_enable(rdev); |
1034 | mutex_unlock(&rdev->pm.mutex); | 1036 | mutex_unlock(&rdev->pm.mutex); |
1035 | if (ret) { | 1037 | if (ret) |
1036 | DRM_ERROR("radeon: dpm resume failed\n"); | 1038 | goto dpm_resume_fail; |
1037 | if ((rdev->family >= CHIP_BARTS) && | 1039 | rdev->pm.dpm_enabled = true; |
1038 | (rdev->family <= CHIP_CAYMAN) && | 1040 | radeon_pm_compute_clocks(rdev); |
1039 | rdev->mc_fw) { | 1041 | return; |
1040 | if (rdev->pm.default_vddc) | 1042 | |
1041 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | 1043 | dpm_resume_fail: |
1042 | SET_VOLTAGE_TYPE_ASIC_VDDC); | 1044 | DRM_ERROR("radeon: dpm resume failed\n"); |
1043 | if (rdev->pm.default_vddci) | 1045 | if ((rdev->family >= CHIP_BARTS) && |
1044 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | 1046 | (rdev->family <= CHIP_CAYMAN) && |
1045 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | 1047 | rdev->mc_fw) { |
1046 | if (rdev->pm.default_sclk) | 1048 | if (rdev->pm.default_vddc) |
1047 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | 1049 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
1048 | if (rdev->pm.default_mclk) | 1050 | SET_VOLTAGE_TYPE_ASIC_VDDC); |
1049 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | 1051 | if (rdev->pm.default_vddci) |
1050 | } | 1052 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, |
1051 | } else { | 1053 | SET_VOLTAGE_TYPE_ASIC_VDDCI); |
1052 | rdev->pm.dpm_enabled = true; | 1054 | if (rdev->pm.default_sclk) |
1053 | radeon_pm_compute_clocks(rdev); | 1055 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); |
1056 | if (rdev->pm.default_mclk) | ||
1057 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
1054 | } | 1058 | } |
1055 | } | 1059 | } |
1056 | 1060 | ||
@@ -1170,51 +1174,50 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) | |||
1170 | radeon_dpm_setup_asic(rdev); | 1174 | radeon_dpm_setup_asic(rdev); |
1171 | ret = radeon_dpm_enable(rdev); | 1175 | ret = radeon_dpm_enable(rdev); |
1172 | mutex_unlock(&rdev->pm.mutex); | 1176 | mutex_unlock(&rdev->pm.mutex); |
1173 | if (ret) { | 1177 | if (ret) |
1174 | rdev->pm.dpm_enabled = false; | 1178 | goto dpm_failed; |
1175 | if ((rdev->family >= CHIP_BARTS) && | ||
1176 | (rdev->family <= CHIP_CAYMAN) && | ||
1177 | rdev->mc_fw) { | ||
1178 | if (rdev->pm.default_vddc) | ||
1179 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | ||
1180 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
1181 | if (rdev->pm.default_vddci) | ||
1182 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
1183 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
1184 | if (rdev->pm.default_sclk) | ||
1185 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
1186 | if (rdev->pm.default_mclk) | ||
1187 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
1188 | } | ||
1189 | DRM_ERROR("radeon: dpm initialization failed\n"); | ||
1190 | return ret; | ||
1191 | } | ||
1192 | rdev->pm.dpm_enabled = true; | 1179 | rdev->pm.dpm_enabled = true; |
1193 | radeon_pm_compute_clocks(rdev); | ||
1194 | 1180 | ||
1195 | if (rdev->pm.num_power_states > 1) { | 1181 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); |
1196 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); | 1182 | if (ret) |
1197 | if (ret) | 1183 | DRM_ERROR("failed to create device file for dpm state\n"); |
1198 | DRM_ERROR("failed to create device file for dpm state\n"); | 1184 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); |
1199 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); | 1185 | if (ret) |
1200 | if (ret) | 1186 | DRM_ERROR("failed to create device file for dpm state\n"); |
1201 | DRM_ERROR("failed to create device file for dpm state\n"); | 1187 | /* XXX: these are noops for dpm but are here for backwards compat */ |
1202 | /* XXX: these are noops for dpm but are here for backwards compat */ | 1188 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); |
1203 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | 1189 | if (ret) |
1204 | if (ret) | 1190 | DRM_ERROR("failed to create device file for power profile\n"); |
1205 | DRM_ERROR("failed to create device file for power profile\n"); | 1191 | ret = device_create_file(rdev->dev, &dev_attr_power_method); |
1206 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | 1192 | if (ret) |
1207 | if (ret) | 1193 | DRM_ERROR("failed to create device file for power method\n"); |
1208 | DRM_ERROR("failed to create device file for power method\n"); | ||
1209 | |||
1210 | if (radeon_debugfs_pm_init(rdev)) { | ||
1211 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); | ||
1212 | } | ||
1213 | 1194 | ||
1214 | DRM_INFO("radeon: dpm initialized\n"); | 1195 | if (radeon_debugfs_pm_init(rdev)) { |
1196 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); | ||
1215 | } | 1197 | } |
1216 | 1198 | ||
1199 | DRM_INFO("radeon: dpm initialized\n"); | ||
1200 | |||
1217 | return 0; | 1201 | return 0; |
1202 | |||
1203 | dpm_failed: | ||
1204 | rdev->pm.dpm_enabled = false; | ||
1205 | if ((rdev->family >= CHIP_BARTS) && | ||
1206 | (rdev->family <= CHIP_CAYMAN) && | ||
1207 | rdev->mc_fw) { | ||
1208 | if (rdev->pm.default_vddc) | ||
1209 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | ||
1210 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
1211 | if (rdev->pm.default_vddci) | ||
1212 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
1213 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
1214 | if (rdev->pm.default_sclk) | ||
1215 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
1216 | if (rdev->pm.default_mclk) | ||
1217 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
1218 | } | ||
1219 | DRM_ERROR("radeon: dpm initialization failed\n"); | ||
1220 | return ret; | ||
1218 | } | 1221 | } |
1219 | 1222 | ||
1220 | int radeon_pm_init(struct radeon_device *rdev) | 1223 | int radeon_pm_init(struct radeon_device *rdev) |
@@ -1229,10 +1232,6 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
1229 | case CHIP_RS780: | 1232 | case CHIP_RS780: |
1230 | case CHIP_RS880: | 1233 | case CHIP_RS880: |
1231 | case CHIP_CAYMAN: | 1234 | case CHIP_CAYMAN: |
1232 | case CHIP_BONAIRE: | ||
1233 | case CHIP_KABINI: | ||
1234 | case CHIP_KAVERI: | ||
1235 | case CHIP_HAWAII: | ||
1236 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ | 1235 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
1237 | if (!rdev->rlc_fw) | 1236 | if (!rdev->rlc_fw) |
1238 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1237 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
@@ -1266,6 +1265,10 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
1266 | case CHIP_VERDE: | 1265 | case CHIP_VERDE: |
1267 | case CHIP_OLAND: | 1266 | case CHIP_OLAND: |
1268 | case CHIP_HAINAN: | 1267 | case CHIP_HAINAN: |
1268 | case CHIP_BONAIRE: | ||
1269 | case CHIP_KABINI: | ||
1270 | case CHIP_KAVERI: | ||
1271 | case CHIP_HAWAII: | ||
1269 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ | 1272 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
1270 | if (!rdev->rlc_fw) | 1273 | if (!rdev->rlc_fw) |
1271 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1274 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
@@ -1290,6 +1293,18 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
1290 | return radeon_pm_init_old(rdev); | 1293 | return radeon_pm_init_old(rdev); |
1291 | } | 1294 | } |
1292 | 1295 | ||
1296 | int radeon_pm_late_init(struct radeon_device *rdev) | ||
1297 | { | ||
1298 | int ret = 0; | ||
1299 | |||
1300 | if (rdev->pm.pm_method == PM_METHOD_DPM) { | ||
1301 | mutex_lock(&rdev->pm.mutex); | ||
1302 | ret = radeon_dpm_late_enable(rdev); | ||
1303 | mutex_unlock(&rdev->pm.mutex); | ||
1304 | } | ||
1305 | return ret; | ||
1306 | } | ||
1307 | |||
1293 | static void radeon_pm_fini_old(struct radeon_device *rdev) | 1308 | static void radeon_pm_fini_old(struct radeon_device *rdev) |
1294 | { | 1309 | { |
1295 | if (rdev->pm.num_power_states > 1) { | 1310 | if (rdev->pm.num_power_states > 1) { |
@@ -1420,6 +1435,9 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) | |||
1420 | struct drm_crtc *crtc; | 1435 | struct drm_crtc *crtc; |
1421 | struct radeon_crtc *radeon_crtc; | 1436 | struct radeon_crtc *radeon_crtc; |
1422 | 1437 | ||
1438 | if (!rdev->pm.dpm_enabled) | ||
1439 | return; | ||
1440 | |||
1423 | mutex_lock(&rdev->pm.mutex); | 1441 | mutex_lock(&rdev->pm.mutex); |
1424 | 1442 | ||
1425 | /* update active crtc counts */ | 1443 | /* update active crtc counts */ |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index ca2d71afeb02..1b783f0e6d3a 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -332,36 +332,6 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, | |||
332 | } | 332 | } |
333 | } | 333 | } |
334 | 334 | ||
335 | u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev, | ||
336 | struct radeon_ring *ring) | ||
337 | { | ||
338 | u32 rptr; | ||
339 | |||
340 | if (rdev->wb.enabled) | ||
341 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | ||
342 | else | ||
343 | rptr = RREG32(ring->rptr_reg); | ||
344 | |||
345 | return rptr; | ||
346 | } | ||
347 | |||
348 | u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev, | ||
349 | struct radeon_ring *ring) | ||
350 | { | ||
351 | u32 wptr; | ||
352 | |||
353 | wptr = RREG32(ring->wptr_reg); | ||
354 | |||
355 | return wptr; | ||
356 | } | ||
357 | |||
358 | void radeon_ring_generic_set_wptr(struct radeon_device *rdev, | ||
359 | struct radeon_ring *ring) | ||
360 | { | ||
361 | WREG32(ring->wptr_reg, ring->wptr); | ||
362 | (void)RREG32(ring->wptr_reg); | ||
363 | } | ||
364 | |||
365 | /** | 335 | /** |
366 | * radeon_ring_free_size - update the free size | 336 | * radeon_ring_free_size - update the free size |
367 | * | 337 | * |
@@ -689,22 +659,18 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, | |||
689 | * @ring: radeon_ring structure holding ring information | 659 | * @ring: radeon_ring structure holding ring information |
690 | * @ring_size: size of the ring | 660 | * @ring_size: size of the ring |
691 | * @rptr_offs: offset of the rptr writeback location in the WB buffer | 661 | * @rptr_offs: offset of the rptr writeback location in the WB buffer |
692 | * @rptr_reg: MMIO offset of the rptr register | ||
693 | * @wptr_reg: MMIO offset of the wptr register | ||
694 | * @nop: nop packet for this ring | 662 | * @nop: nop packet for this ring |
695 | * | 663 | * |
696 | * Initialize the driver information for the selected ring (all asics). | 664 | * Initialize the driver information for the selected ring (all asics). |
697 | * Returns 0 on success, error on failure. | 665 | * Returns 0 on success, error on failure. |
698 | */ | 666 | */ |
699 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, | 667 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
700 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop) | 668 | unsigned rptr_offs, u32 nop) |
701 | { | 669 | { |
702 | int r; | 670 | int r; |
703 | 671 | ||
704 | ring->ring_size = ring_size; | 672 | ring->ring_size = ring_size; |
705 | ring->rptr_offs = rptr_offs; | 673 | ring->rptr_offs = rptr_offs; |
706 | ring->rptr_reg = rptr_reg; | ||
707 | ring->wptr_reg = wptr_reg; | ||
708 | ring->nop = nop; | 674 | ring->nop = nop; |
709 | /* Allocate ring buffer */ | 675 | /* Allocate ring buffer */ |
710 | if (ring->ring_obj == NULL) { | 676 | if (ring->ring_obj == NULL) { |
@@ -790,34 +756,54 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |||
790 | struct radeon_device *rdev = dev->dev_private; | 756 | struct radeon_device *rdev = dev->dev_private; |
791 | int ridx = *(int*)node->info_ent->data; | 757 | int ridx = *(int*)node->info_ent->data; |
792 | struct radeon_ring *ring = &rdev->ring[ridx]; | 758 | struct radeon_ring *ring = &rdev->ring[ridx]; |
759 | |||
760 | uint32_t rptr, wptr, rptr_next; | ||
793 | unsigned count, i, j; | 761 | unsigned count, i, j; |
794 | u32 tmp; | ||
795 | 762 | ||
796 | radeon_ring_free_size(rdev, ring); | 763 | radeon_ring_free_size(rdev, ring); |
797 | count = (ring->ring_size / 4) - ring->ring_free_dw; | 764 | count = (ring->ring_size / 4) - ring->ring_free_dw; |
798 | tmp = radeon_ring_get_wptr(rdev, ring); | 765 | |
799 | seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); | 766 | wptr = radeon_ring_get_wptr(rdev, ring); |
800 | tmp = radeon_ring_get_rptr(rdev, ring); | 767 | seq_printf(m, "wptr: 0x%08x [%5d]\n", |
801 | seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); | 768 | wptr, wptr); |
769 | |||
770 | rptr = radeon_ring_get_rptr(rdev, ring); | ||
771 | seq_printf(m, "rptr: 0x%08x [%5d]\n", | ||
772 | rptr, rptr); | ||
773 | |||
802 | if (ring->rptr_save_reg) { | 774 | if (ring->rptr_save_reg) { |
803 | seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, | 775 | rptr_next = RREG32(ring->rptr_save_reg); |
804 | RREG32(ring->rptr_save_reg)); | 776 | seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n", |
805 | } | 777 | ring->rptr_save_reg, rptr_next, rptr_next); |
806 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); | 778 | } else |
807 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); | 779 | rptr_next = ~0; |
808 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); | 780 | |
809 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); | 781 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", |
782 | ring->wptr, ring->wptr); | ||
783 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", | ||
784 | ring->rptr, ring->rptr); | ||
785 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", | ||
786 | ring->last_semaphore_signal_addr); | ||
787 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", | ||
788 | ring->last_semaphore_wait_addr); | ||
810 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | 789 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
811 | seq_printf(m, "%u dwords in ring\n", count); | 790 | seq_printf(m, "%u dwords in ring\n", count); |
791 | |||
792 | if (!ring->ready) | ||
793 | return 0; | ||
794 | |||
812 | /* print 8 dw before current rptr as often it's the last executed | 795 | /* print 8 dw before current rptr as often it's the last executed |
813 | * packet that is the root issue | 796 | * packet that is the root issue |
814 | */ | 797 | */ |
815 | i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; | 798 | i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; |
816 | if (ring->ready) { | 799 | for (j = 0; j <= (count + 32); j++) { |
817 | for (j = 0; j <= (count + 32); j++) { | 800 | seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); |
818 | seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); | 801 | if (rptr == i) |
819 | i = (i + 1) & ring->ptr_mask; | 802 | seq_puts(m, " *"); |
820 | } | 803 | if (rptr_next == i) |
804 | seq_puts(m, " #"); | ||
805 | seq_puts(m, "\n"); | ||
806 | i = (i + 1) & ring->ptr_mask; | ||
821 | } | 807 | } |
822 | return 0; | 808 | return 0; |
823 | } | 809 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index f0bac68254b7..c0625805cdd7 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c | |||
@@ -402,13 +402,15 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, | |||
402 | 402 | ||
403 | spin_lock(&sa_manager->wq.lock); | 403 | spin_lock(&sa_manager->wq.lock); |
404 | list_for_each_entry(i, &sa_manager->olist, olist) { | 404 | list_for_each_entry(i, &sa_manager->olist, olist) { |
405 | uint64_t soffset = i->soffset + sa_manager->gpu_addr; | ||
406 | uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; | ||
405 | if (&i->olist == sa_manager->hole) { | 407 | if (&i->olist == sa_manager->hole) { |
406 | seq_printf(m, ">"); | 408 | seq_printf(m, ">"); |
407 | } else { | 409 | } else { |
408 | seq_printf(m, " "); | 410 | seq_printf(m, " "); |
409 | } | 411 | } |
410 | seq_printf(m, "[0x%08x 0x%08x] size %8d", | 412 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", |
411 | i->soffset, i->eoffset, i->eoffset - i->soffset); | 413 | soffset, eoffset, eoffset - soffset); |
412 | if (i->fence) { | 414 | if (i->fence) { |
413 | seq_printf(m, " protected by 0x%016llx on ring %d", | 415 | seq_printf(m, " protected by 0x%016llx on ring %d", |
414 | i->fence->seq, i->fence->ring); | 416 | i->fence->seq, i->fence->ring); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 051fa874065a..77f5b0c3edb8 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -39,12 +39,14 @@ | |||
39 | #include <linux/seq_file.h> | 39 | #include <linux/seq_file.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/swiotlb.h> | 41 | #include <linux/swiotlb.h> |
42 | #include <linux/debugfs.h> | ||
42 | #include "radeon_reg.h" | 43 | #include "radeon_reg.h" |
43 | #include "radeon.h" | 44 | #include "radeon.h" |
44 | 45 | ||
45 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | 46 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
46 | 47 | ||
47 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); | 48 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
49 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); | ||
48 | 50 | ||
49 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) | 51 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
50 | { | 52 | { |
@@ -753,6 +755,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
753 | 755 | ||
754 | if (!rdev->mman.initialized) | 756 | if (!rdev->mman.initialized) |
755 | return; | 757 | return; |
758 | radeon_ttm_debugfs_fini(rdev); | ||
756 | if (rdev->stollen_vga_memory) { | 759 | if (rdev->stollen_vga_memory) { |
757 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); | 760 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
758 | if (r == 0) { | 761 | if (r == 0) { |
@@ -832,16 +835,15 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |||
832 | return 0; | 835 | return 0; |
833 | } | 836 | } |
834 | 837 | ||
835 | |||
836 | #define RADEON_DEBUGFS_MEM_TYPES 2 | ||
837 | |||
838 | #if defined(CONFIG_DEBUG_FS) | 838 | #if defined(CONFIG_DEBUG_FS) |
839 | |||
839 | static int radeon_mm_dump_table(struct seq_file *m, void *data) | 840 | static int radeon_mm_dump_table(struct seq_file *m, void *data) |
840 | { | 841 | { |
841 | struct drm_info_node *node = (struct drm_info_node *)m->private; | 842 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
842 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | 843 | unsigned ttm_pl = *(int *)node->info_ent->data; |
843 | struct drm_device *dev = node->minor->dev; | 844 | struct drm_device *dev = node->minor->dev; |
844 | struct radeon_device *rdev = dev->dev_private; | 845 | struct radeon_device *rdev = dev->dev_private; |
846 | struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv; | ||
845 | int ret; | 847 | int ret; |
846 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | 848 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
847 | 849 | ||
@@ -850,46 +852,169 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) | |||
850 | spin_unlock(&glob->lru_lock); | 852 | spin_unlock(&glob->lru_lock); |
851 | return ret; | 853 | return ret; |
852 | } | 854 | } |
855 | |||
856 | static int ttm_pl_vram = TTM_PL_VRAM; | ||
857 | static int ttm_pl_tt = TTM_PL_TT; | ||
858 | |||
859 | static struct drm_info_list radeon_ttm_debugfs_list[] = { | ||
860 | {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram}, | ||
861 | {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt}, | ||
862 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | ||
863 | #ifdef CONFIG_SWIOTLB | ||
864 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | ||
853 | #endif | 865 | #endif |
866 | }; | ||
854 | 867 | ||
855 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | 868 | static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) |
856 | { | 869 | { |
857 | #if defined(CONFIG_DEBUG_FS) | 870 | struct radeon_device *rdev = inode->i_private; |
858 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; | 871 | i_size_write(inode, rdev->mc.mc_vram_size); |
859 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; | 872 | filep->private_data = inode->i_private; |
860 | unsigned i; | 873 | return 0; |
874 | } | ||
861 | 875 | ||
862 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { | 876 | static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf, |
863 | if (i == 0) | 877 | size_t size, loff_t *pos) |
864 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); | 878 | { |
865 | else | 879 | struct radeon_device *rdev = f->private_data; |
866 | sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); | 880 | ssize_t result = 0; |
867 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | 881 | int r; |
868 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | ||
869 | radeon_mem_types_list[i].driver_features = 0; | ||
870 | if (i == 0) | ||
871 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; | ||
872 | else | ||
873 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; | ||
874 | 882 | ||
883 | if (size & 0x3 || *pos & 0x3) | ||
884 | return -EINVAL; | ||
885 | |||
886 | while (size) { | ||
887 | unsigned long flags; | ||
888 | uint32_t value; | ||
889 | |||
890 | if (*pos >= rdev->mc.mc_vram_size) | ||
891 | return result; | ||
892 | |||
893 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); | ||
894 | WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000); | ||
895 | if (rdev->family >= CHIP_CEDAR) | ||
896 | WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31); | ||
897 | value = RREG32(RADEON_MM_DATA); | ||
898 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); | ||
899 | |||
900 | r = put_user(value, (uint32_t *)buf); | ||
901 | if (r) | ||
902 | return r; | ||
903 | |||
904 | result += 4; | ||
905 | buf += 4; | ||
906 | *pos += 4; | ||
907 | size -= 4; | ||
875 | } | 908 | } |
876 | /* Add ttm page pool to debugfs */ | 909 | |
877 | sprintf(radeon_mem_types_names[i], "ttm_page_pool"); | 910 | return result; |
878 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | 911 | } |
879 | radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; | 912 | |
880 | radeon_mem_types_list[i].driver_features = 0; | 913 | static const struct file_operations radeon_ttm_vram_fops = { |
881 | radeon_mem_types_list[i++].data = NULL; | 914 | .owner = THIS_MODULE, |
882 | #ifdef CONFIG_SWIOTLB | 915 | .open = radeon_ttm_vram_open, |
883 | if (swiotlb_nr_tbl()) { | 916 | .read = radeon_ttm_vram_read, |
884 | sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); | 917 | .llseek = default_llseek |
885 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | 918 | }; |
886 | radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; | 919 | |
887 | radeon_mem_types_list[i].driver_features = 0; | 920 | static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep) |
888 | radeon_mem_types_list[i++].data = NULL; | 921 | { |
922 | struct radeon_device *rdev = inode->i_private; | ||
923 | i_size_write(inode, rdev->mc.gtt_size); | ||
924 | filep->private_data = inode->i_private; | ||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, | ||
929 | size_t size, loff_t *pos) | ||
930 | { | ||
931 | struct radeon_device *rdev = f->private_data; | ||
932 | ssize_t result = 0; | ||
933 | int r; | ||
934 | |||
935 | while (size) { | ||
936 | loff_t p = *pos / PAGE_SIZE; | ||
937 | unsigned off = *pos & ~PAGE_MASK; | ||
938 | ssize_t cur_size = min(size, PAGE_SIZE - off); | ||
939 | struct page *page; | ||
940 | void *ptr; | ||
941 | |||
942 | if (p >= rdev->gart.num_cpu_pages) | ||
943 | return result; | ||
944 | |||
945 | page = rdev->gart.pages[p]; | ||
946 | if (page) { | ||
947 | ptr = kmap(page); | ||
948 | ptr += off; | ||
949 | |||
950 | r = copy_to_user(buf, ptr, cur_size); | ||
951 | kunmap(rdev->gart.pages[p]); | ||
952 | } else | ||
953 | r = clear_user(buf, cur_size); | ||
954 | |||
955 | if (r) | ||
956 | return -EFAULT; | ||
957 | |||
958 | result += cur_size; | ||
959 | buf += cur_size; | ||
960 | *pos += cur_size; | ||
961 | size -= cur_size; | ||
889 | } | 962 | } |
963 | |||
964 | return result; | ||
965 | } | ||
966 | |||
967 | static const struct file_operations radeon_ttm_gtt_fops = { | ||
968 | .owner = THIS_MODULE, | ||
969 | .open = radeon_ttm_gtt_open, | ||
970 | .read = radeon_ttm_gtt_read, | ||
971 | .llseek = default_llseek | ||
972 | }; | ||
973 | |||
890 | #endif | 974 | #endif |
891 | return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); | ||
892 | 975 | ||
976 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | ||
977 | { | ||
978 | #if defined(CONFIG_DEBUG_FS) | ||
979 | unsigned count; | ||
980 | |||
981 | struct drm_minor *minor = rdev->ddev->primary; | ||
982 | struct dentry *ent, *root = minor->debugfs_root; | ||
983 | |||
984 | ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root, | ||
985 | rdev, &radeon_ttm_vram_fops); | ||
986 | if (IS_ERR(ent)) | ||
987 | return PTR_ERR(ent); | ||
988 | rdev->mman.vram = ent; | ||
989 | |||
990 | ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root, | ||
991 | rdev, &radeon_ttm_gtt_fops); | ||
992 | if (IS_ERR(ent)) | ||
993 | return PTR_ERR(ent); | ||
994 | rdev->mman.gtt = ent; | ||
995 | |||
996 | count = ARRAY_SIZE(radeon_ttm_debugfs_list); | ||
997 | |||
998 | #ifdef CONFIG_SWIOTLB | ||
999 | if (!swiotlb_nr_tbl()) | ||
1000 | --count; | ||
893 | #endif | 1001 | #endif |
1002 | |||
1003 | return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count); | ||
1004 | #else | ||
1005 | |||
894 | return 0; | 1006 | return 0; |
1007 | #endif | ||
1008 | } | ||
1009 | |||
1010 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev) | ||
1011 | { | ||
1012 | #if defined(CONFIG_DEBUG_FS) | ||
1013 | |||
1014 | debugfs_remove(rdev->mman.vram); | ||
1015 | rdev->mman.vram = NULL; | ||
1016 | |||
1017 | debugfs_remove(rdev->mman.gtt); | ||
1018 | rdev->mman.gtt = NULL; | ||
1019 | #endif | ||
895 | } | 1020 | } |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9566b5940a5a..b5c2369cda2f 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -474,6 +474,8 @@ int rs400_resume(struct radeon_device *rdev) | |||
474 | /* Initialize surface registers */ | 474 | /* Initialize surface registers */ |
475 | radeon_surface_init(rdev); | 475 | radeon_surface_init(rdev); |
476 | 476 | ||
477 | radeon_pm_resume(rdev); | ||
478 | |||
477 | rdev->accel_working = true; | 479 | rdev->accel_working = true; |
478 | r = rs400_startup(rdev); | 480 | r = rs400_startup(rdev); |
479 | if (r) { | 481 | if (r) { |
@@ -484,6 +486,7 @@ int rs400_resume(struct radeon_device *rdev) | |||
484 | 486 | ||
485 | int rs400_suspend(struct radeon_device *rdev) | 487 | int rs400_suspend(struct radeon_device *rdev) |
486 | { | 488 | { |
489 | radeon_pm_suspend(rdev); | ||
487 | r100_cp_disable(rdev); | 490 | r100_cp_disable(rdev); |
488 | radeon_wb_disable(rdev); | 491 | radeon_wb_disable(rdev); |
489 | r100_irq_disable(rdev); | 492 | r100_irq_disable(rdev); |
@@ -493,6 +496,7 @@ int rs400_suspend(struct radeon_device *rdev) | |||
493 | 496 | ||
494 | void rs400_fini(struct radeon_device *rdev) | 497 | void rs400_fini(struct radeon_device *rdev) |
495 | { | 498 | { |
499 | radeon_pm_fini(rdev); | ||
496 | r100_cp_fini(rdev); | 500 | r100_cp_fini(rdev); |
497 | radeon_wb_fini(rdev); | 501 | radeon_wb_fini(rdev); |
498 | radeon_ib_pool_fini(rdev); | 502 | radeon_ib_pool_fini(rdev); |
@@ -560,6 +564,9 @@ int rs400_init(struct radeon_device *rdev) | |||
560 | return r; | 564 | return r; |
561 | r300_set_reg_safe(rdev); | 565 | r300_set_reg_safe(rdev); |
562 | 566 | ||
567 | /* Initialize power management */ | ||
568 | radeon_pm_init(rdev); | ||
569 | |||
563 | rdev->accel_working = true; | 570 | rdev->accel_working = true; |
564 | r = rs400_startup(rdev); | 571 | r = rs400_startup(rdev); |
565 | if (r) { | 572 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 76cc8d3aafec..fdcde7693032 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -1048,6 +1048,8 @@ int rs600_resume(struct radeon_device *rdev) | |||
1048 | /* Initialize surface registers */ | 1048 | /* Initialize surface registers */ |
1049 | radeon_surface_init(rdev); | 1049 | radeon_surface_init(rdev); |
1050 | 1050 | ||
1051 | radeon_pm_resume(rdev); | ||
1052 | |||
1051 | rdev->accel_working = true; | 1053 | rdev->accel_working = true; |
1052 | r = rs600_startup(rdev); | 1054 | r = rs600_startup(rdev); |
1053 | if (r) { | 1055 | if (r) { |
@@ -1058,6 +1060,7 @@ int rs600_resume(struct radeon_device *rdev) | |||
1058 | 1060 | ||
1059 | int rs600_suspend(struct radeon_device *rdev) | 1061 | int rs600_suspend(struct radeon_device *rdev) |
1060 | { | 1062 | { |
1063 | radeon_pm_suspend(rdev); | ||
1061 | r600_audio_fini(rdev); | 1064 | r600_audio_fini(rdev); |
1062 | r100_cp_disable(rdev); | 1065 | r100_cp_disable(rdev); |
1063 | radeon_wb_disable(rdev); | 1066 | radeon_wb_disable(rdev); |
@@ -1068,6 +1071,7 @@ int rs600_suspend(struct radeon_device *rdev) | |||
1068 | 1071 | ||
1069 | void rs600_fini(struct radeon_device *rdev) | 1072 | void rs600_fini(struct radeon_device *rdev) |
1070 | { | 1073 | { |
1074 | radeon_pm_fini(rdev); | ||
1071 | r600_audio_fini(rdev); | 1075 | r600_audio_fini(rdev); |
1072 | r100_cp_fini(rdev); | 1076 | r100_cp_fini(rdev); |
1073 | radeon_wb_fini(rdev); | 1077 | radeon_wb_fini(rdev); |
@@ -1136,6 +1140,9 @@ int rs600_init(struct radeon_device *rdev) | |||
1136 | return r; | 1140 | return r; |
1137 | rs600_set_safe_registers(rdev); | 1141 | rs600_set_safe_registers(rdev); |
1138 | 1142 | ||
1143 | /* Initialize power management */ | ||
1144 | radeon_pm_init(rdev); | ||
1145 | |||
1139 | rdev->accel_working = true; | 1146 | rdev->accel_working = true; |
1140 | r = rs600_startup(rdev); | 1147 | r = rs600_startup(rdev); |
1141 | if (r) { | 1148 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 1c560629575a..e461b45f29a9 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -746,6 +746,8 @@ int rs690_resume(struct radeon_device *rdev) | |||
746 | /* Initialize surface registers */ | 746 | /* Initialize surface registers */ |
747 | radeon_surface_init(rdev); | 747 | radeon_surface_init(rdev); |
748 | 748 | ||
749 | radeon_pm_resume(rdev); | ||
750 | |||
749 | rdev->accel_working = true; | 751 | rdev->accel_working = true; |
750 | r = rs690_startup(rdev); | 752 | r = rs690_startup(rdev); |
751 | if (r) { | 753 | if (r) { |
@@ -756,6 +758,7 @@ int rs690_resume(struct radeon_device *rdev) | |||
756 | 758 | ||
757 | int rs690_suspend(struct radeon_device *rdev) | 759 | int rs690_suspend(struct radeon_device *rdev) |
758 | { | 760 | { |
761 | radeon_pm_suspend(rdev); | ||
759 | r600_audio_fini(rdev); | 762 | r600_audio_fini(rdev); |
760 | r100_cp_disable(rdev); | 763 | r100_cp_disable(rdev); |
761 | radeon_wb_disable(rdev); | 764 | radeon_wb_disable(rdev); |
@@ -766,6 +769,7 @@ int rs690_suspend(struct radeon_device *rdev) | |||
766 | 769 | ||
767 | void rs690_fini(struct radeon_device *rdev) | 770 | void rs690_fini(struct radeon_device *rdev) |
768 | { | 771 | { |
772 | radeon_pm_fini(rdev); | ||
769 | r600_audio_fini(rdev); | 773 | r600_audio_fini(rdev); |
770 | r100_cp_fini(rdev); | 774 | r100_cp_fini(rdev); |
771 | radeon_wb_fini(rdev); | 775 | radeon_wb_fini(rdev); |
@@ -835,6 +839,9 @@ int rs690_init(struct radeon_device *rdev) | |||
835 | return r; | 839 | return r; |
836 | rs600_set_safe_registers(rdev); | 840 | rs600_set_safe_registers(rdev); |
837 | 841 | ||
842 | /* Initialize power management */ | ||
843 | radeon_pm_init(rdev); | ||
844 | |||
838 | rdev->accel_working = true; | 845 | rdev->accel_working = true; |
839 | r = rs690_startup(rdev); | 846 | r = rs690_startup(rdev); |
840 | if (r) { | 847 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index 6af8505cf4d2..8512085b0aef 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c | |||
@@ -623,14 +623,6 @@ int rs780_dpm_enable(struct radeon_device *rdev) | |||
623 | if (pi->gfx_clock_gating) | 623 | if (pi->gfx_clock_gating) |
624 | r600_gfx_clockgating_enable(rdev, true); | 624 | r600_gfx_clockgating_enable(rdev, true); |
625 | 625 | ||
626 | if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) { | ||
627 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
628 | if (ret) | ||
629 | return ret; | ||
630 | rdev->irq.dpm_thermal = true; | ||
631 | radeon_irq_set(rdev); | ||
632 | } | ||
633 | |||
634 | return 0; | 626 | return 0; |
635 | } | 627 | } |
636 | 628 | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 5d1c316115ef..98e8138ff779 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -586,6 +586,8 @@ int rv515_resume(struct radeon_device *rdev) | |||
586 | /* Initialize surface registers */ | 586 | /* Initialize surface registers */ |
587 | radeon_surface_init(rdev); | 587 | radeon_surface_init(rdev); |
588 | 588 | ||
589 | radeon_pm_resume(rdev); | ||
590 | |||
589 | rdev->accel_working = true; | 591 | rdev->accel_working = true; |
590 | r = rv515_startup(rdev); | 592 | r = rv515_startup(rdev); |
591 | if (r) { | 593 | if (r) { |
@@ -596,6 +598,7 @@ int rv515_resume(struct radeon_device *rdev) | |||
596 | 598 | ||
597 | int rv515_suspend(struct radeon_device *rdev) | 599 | int rv515_suspend(struct radeon_device *rdev) |
598 | { | 600 | { |
601 | radeon_pm_suspend(rdev); | ||
599 | r100_cp_disable(rdev); | 602 | r100_cp_disable(rdev); |
600 | radeon_wb_disable(rdev); | 603 | radeon_wb_disable(rdev); |
601 | rs600_irq_disable(rdev); | 604 | rs600_irq_disable(rdev); |
@@ -612,6 +615,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
612 | 615 | ||
613 | void rv515_fini(struct radeon_device *rdev) | 616 | void rv515_fini(struct radeon_device *rdev) |
614 | { | 617 | { |
618 | radeon_pm_fini(rdev); | ||
615 | r100_cp_fini(rdev); | 619 | r100_cp_fini(rdev); |
616 | radeon_wb_fini(rdev); | 620 | radeon_wb_fini(rdev); |
617 | radeon_ib_pool_fini(rdev); | 621 | radeon_ib_pool_fini(rdev); |
@@ -685,6 +689,9 @@ int rv515_init(struct radeon_device *rdev) | |||
685 | return r; | 689 | return r; |
686 | rv515_set_safe_registers(rdev); | 690 | rv515_set_safe_registers(rdev); |
687 | 691 | ||
692 | /* Initialize power management */ | ||
693 | radeon_pm_init(rdev); | ||
694 | |||
688 | rdev->accel_working = true; | 695 | rdev->accel_working = true; |
689 | r = rv515_startup(rdev); | 696 | r = rv515_startup(rdev); |
690 | if (r) { | 697 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 26633a025252..bebf31c4d841 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
@@ -1546,7 +1546,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev) | |||
1546 | { | 1546 | { |
1547 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); | 1547 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); |
1548 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 1548 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
1549 | int ret; | ||
1550 | 1549 | ||
1551 | if (r600_dynamicpm_enabled(rdev)) | 1550 | if (r600_dynamicpm_enabled(rdev)) |
1552 | return -EINVAL; | 1551 | return -EINVAL; |
@@ -1594,15 +1593,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev) | |||
1594 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true); | 1593 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true); |
1595 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true); | 1594 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true); |
1596 | 1595 | ||
1597 | if (rdev->irq.installed && | ||
1598 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
1599 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
1600 | if (ret) | ||
1601 | return ret; | ||
1602 | rdev->irq.dpm_thermal = true; | ||
1603 | radeon_irq_set(rdev); | ||
1604 | } | ||
1605 | |||
1606 | rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 1596 | rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
1607 | 1597 | ||
1608 | r600_start_dpm(rdev); | 1598 | r600_start_dpm(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 9f5846743c9e..18e02889ec7d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -1123,6 +1123,35 @@ void r700_cp_fini(struct radeon_device *rdev) | |||
1123 | radeon_scratch_free(rdev, ring->rptr_save_reg); | 1123 | radeon_scratch_free(rdev, ring->rptr_save_reg); |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | void rv770_set_clk_bypass_mode(struct radeon_device *rdev) | ||
1127 | { | ||
1128 | u32 tmp, i; | ||
1129 | |||
1130 | if (rdev->flags & RADEON_IS_IGP) | ||
1131 | return; | ||
1132 | |||
1133 | tmp = RREG32(CG_SPLL_FUNC_CNTL_2); | ||
1134 | tmp &= SCLK_MUX_SEL_MASK; | ||
1135 | tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE; | ||
1136 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
1137 | |||
1138 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
1139 | if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS) | ||
1140 | break; | ||
1141 | udelay(1); | ||
1142 | } | ||
1143 | |||
1144 | tmp &= ~SCLK_MUX_UPDATE; | ||
1145 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
1146 | |||
1147 | tmp = RREG32(MPLL_CNTL_MODE); | ||
1148 | if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) | ||
1149 | tmp &= ~RV730_MPLL_MCLK_SEL; | ||
1150 | else | ||
1151 | tmp &= ~MPLL_MCLK_SEL; | ||
1152 | WREG32(MPLL_CNTL_MODE, tmp); | ||
1153 | } | ||
1154 | |||
1126 | /* | 1155 | /* |
1127 | * Core functions | 1156 | * Core functions |
1128 | */ | 1157 | */ |
@@ -1665,14 +1694,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1665 | 1694 | ||
1666 | rv770_mc_program(rdev); | 1695 | rv770_mc_program(rdev); |
1667 | 1696 | ||
1668 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
1669 | r = r600_init_microcode(rdev); | ||
1670 | if (r) { | ||
1671 | DRM_ERROR("Failed to load firmware!\n"); | ||
1672 | return r; | ||
1673 | } | ||
1674 | } | ||
1675 | |||
1676 | if (rdev->flags & RADEON_IS_AGP) { | 1697 | if (rdev->flags & RADEON_IS_AGP) { |
1677 | rv770_agp_enable(rdev); | 1698 | rv770_agp_enable(rdev); |
1678 | } else { | 1699 | } else { |
@@ -1728,14 +1749,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1728 | 1749 | ||
1729 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 1750 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1730 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 1751 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1731 | R600_CP_RB_RPTR, R600_CP_RB_WPTR, | ||
1732 | RADEON_CP_PACKET2); | 1752 | RADEON_CP_PACKET2); |
1733 | if (r) | 1753 | if (r) |
1734 | return r; | 1754 | return r; |
1735 | 1755 | ||
1736 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 1756 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
1737 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 1757 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
1738 | DMA_RB_RPTR, DMA_RB_WPTR, | ||
1739 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | 1758 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
1740 | if (r) | 1759 | if (r) |
1741 | return r; | 1760 | return r; |
@@ -1754,7 +1773,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1754 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 1773 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
1755 | if (ring->ring_size) { | 1774 | if (ring->ring_size) { |
1756 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 1775 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
1757 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
1758 | RADEON_CP_PACKET2); | 1776 | RADEON_CP_PACKET2); |
1759 | if (!r) | 1777 | if (!r) |
1760 | r = uvd_v1_0_init(rdev); | 1778 | r = uvd_v1_0_init(rdev); |
@@ -1792,6 +1810,8 @@ int rv770_resume(struct radeon_device *rdev) | |||
1792 | /* init golden registers */ | 1810 | /* init golden registers */ |
1793 | rv770_init_golden_registers(rdev); | 1811 | rv770_init_golden_registers(rdev); |
1794 | 1812 | ||
1813 | radeon_pm_resume(rdev); | ||
1814 | |||
1795 | rdev->accel_working = true; | 1815 | rdev->accel_working = true; |
1796 | r = rv770_startup(rdev); | 1816 | r = rv770_startup(rdev); |
1797 | if (r) { | 1817 | if (r) { |
@@ -1806,6 +1826,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
1806 | 1826 | ||
1807 | int rv770_suspend(struct radeon_device *rdev) | 1827 | int rv770_suspend(struct radeon_device *rdev) |
1808 | { | 1828 | { |
1829 | radeon_pm_suspend(rdev); | ||
1809 | r600_audio_fini(rdev); | 1830 | r600_audio_fini(rdev); |
1810 | uvd_v1_0_fini(rdev); | 1831 | uvd_v1_0_fini(rdev); |
1811 | radeon_uvd_suspend(rdev); | 1832 | radeon_uvd_suspend(rdev); |
@@ -1876,6 +1897,17 @@ int rv770_init(struct radeon_device *rdev) | |||
1876 | if (r) | 1897 | if (r) |
1877 | return r; | 1898 | return r; |
1878 | 1899 | ||
1900 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
1901 | r = r600_init_microcode(rdev); | ||
1902 | if (r) { | ||
1903 | DRM_ERROR("Failed to load firmware!\n"); | ||
1904 | return r; | ||
1905 | } | ||
1906 | } | ||
1907 | |||
1908 | /* Initialize power management */ | ||
1909 | radeon_pm_init(rdev); | ||
1910 | |||
1879 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 1911 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
1880 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 1912 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
1881 | 1913 | ||
@@ -1915,6 +1947,7 @@ int rv770_init(struct radeon_device *rdev) | |||
1915 | 1947 | ||
1916 | void rv770_fini(struct radeon_device *rdev) | 1948 | void rv770_fini(struct radeon_device *rdev) |
1917 | { | 1949 | { |
1950 | radeon_pm_fini(rdev); | ||
1918 | r700_cp_fini(rdev); | 1951 | r700_cp_fini(rdev); |
1919 | r600_dma_fini(rdev); | 1952 | r600_dma_fini(rdev); |
1920 | r600_irq_fini(rdev); | 1953 | r600_irq_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 913b025ae9b3..4aaeb118a3ff 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -1863,8 +1863,8 @@ void rv770_enable_auto_throttle_source(struct radeon_device *rdev, | |||
1863 | } | 1863 | } |
1864 | } | 1864 | } |
1865 | 1865 | ||
1866 | int rv770_set_thermal_temperature_range(struct radeon_device *rdev, | 1866 | static int rv770_set_thermal_temperature_range(struct radeon_device *rdev, |
1867 | int min_temp, int max_temp) | 1867 | int min_temp, int max_temp) |
1868 | { | 1868 | { |
1869 | int low_temp = 0 * 1000; | 1869 | int low_temp = 0 * 1000; |
1870 | int high_temp = 255 * 1000; | 1870 | int high_temp = 255 * 1000; |
@@ -1966,6 +1966,15 @@ int rv770_dpm_enable(struct radeon_device *rdev) | |||
1966 | if (pi->mg_clock_gating) | 1966 | if (pi->mg_clock_gating) |
1967 | rv770_mg_clock_gating_enable(rdev, true); | 1967 | rv770_mg_clock_gating_enable(rdev, true); |
1968 | 1968 | ||
1969 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
1970 | |||
1971 | return 0; | ||
1972 | } | ||
1973 | |||
1974 | int rv770_dpm_late_enable(struct radeon_device *rdev) | ||
1975 | { | ||
1976 | int ret; | ||
1977 | |||
1969 | if (rdev->irq.installed && | 1978 | if (rdev->irq.installed && |
1970 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1979 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
1971 | PPSMC_Result result; | 1980 | PPSMC_Result result; |
@@ -1981,8 +1990,6 @@ int rv770_dpm_enable(struct radeon_device *rdev) | |||
1981 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | 1990 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
1982 | } | 1991 | } |
1983 | 1992 | ||
1984 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
1985 | |||
1986 | return 0; | 1993 | return 0; |
1987 | } | 1994 | } |
1988 | 1995 | ||
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 9244effc6b59..f776634840c9 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h | |||
@@ -283,8 +283,4 @@ int rv770_read_smc_soft_register(struct radeon_device *rdev, | |||
283 | int rv770_write_smc_soft_register(struct radeon_device *rdev, | 283 | int rv770_write_smc_soft_register(struct radeon_device *rdev, |
284 | u16 reg_offset, u32 value); | 284 | u16 reg_offset, u32 value); |
285 | 285 | ||
286 | /* thermal */ | ||
287 | int rv770_set_thermal_temperature_range(struct radeon_device *rdev, | ||
288 | int min_temp, int max_temp); | ||
289 | |||
290 | #endif | 286 | #endif |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 1ae277152cc7..3cf1e2921545 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -100,14 +100,21 @@ | |||
100 | #define CG_SPLL_FUNC_CNTL_2 0x604 | 100 | #define CG_SPLL_FUNC_CNTL_2 0x604 |
101 | #define SCLK_MUX_SEL(x) ((x) << 0) | 101 | #define SCLK_MUX_SEL(x) ((x) << 0) |
102 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) | 102 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) |
103 | #define SCLK_MUX_UPDATE (1 << 26) | ||
103 | #define CG_SPLL_FUNC_CNTL_3 0x608 | 104 | #define CG_SPLL_FUNC_CNTL_3 0x608 |
104 | #define SPLL_FB_DIV(x) ((x) << 0) | 105 | #define SPLL_FB_DIV(x) ((x) << 0) |
105 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) | 106 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) |
106 | #define SPLL_DITHEN (1 << 28) | 107 | #define SPLL_DITHEN (1 << 28) |
108 | #define CG_SPLL_STATUS 0x60c | ||
109 | #define SPLL_CHG_STATUS (1 << 1) | ||
107 | 110 | ||
108 | #define SPLL_CNTL_MODE 0x610 | 111 | #define SPLL_CNTL_MODE 0x610 |
109 | #define SPLL_DIV_SYNC (1 << 5) | 112 | #define SPLL_DIV_SYNC (1 << 5) |
110 | 113 | ||
114 | #define MPLL_CNTL_MODE 0x61c | ||
115 | # define MPLL_MCLK_SEL (1 << 11) | ||
116 | # define RV730_MPLL_MCLK_SEL (1 << 25) | ||
117 | |||
111 | #define MPLL_AD_FUNC_CNTL 0x624 | 118 | #define MPLL_AD_FUNC_CNTL 0x624 |
112 | #define CLKF(x) ((x) << 0) | 119 | #define CLKF(x) ((x) << 0) |
113 | #define CLKF_MASK (0x7f << 0) | 120 | #define CLKF_MASK (0x7f << 0) |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index a36736dab5e0..626163ef483d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -80,6 +80,8 @@ extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); | |||
80 | extern bool evergreen_is_display_hung(struct radeon_device *rdev); | 80 | extern bool evergreen_is_display_hung(struct radeon_device *rdev); |
81 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, | 81 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, |
82 | bool enable); | 82 | bool enable); |
83 | static void si_init_pg(struct radeon_device *rdev); | ||
84 | static void si_init_cg(struct radeon_device *rdev); | ||
83 | static void si_fini_pg(struct radeon_device *rdev); | 85 | static void si_fini_pg(struct radeon_device *rdev); |
84 | static void si_fini_cg(struct radeon_device *rdev); | 86 | static void si_fini_cg(struct radeon_device *rdev); |
85 | static void si_rlc_stop(struct radeon_device *rdev); | 87 | static void si_rlc_stop(struct radeon_device *rdev); |
@@ -1460,7 +1462,7 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { | |||
1460 | }; | 1462 | }; |
1461 | 1463 | ||
1462 | /* ucode loading */ | 1464 | /* ucode loading */ |
1463 | static int si_mc_load_microcode(struct radeon_device *rdev) | 1465 | int si_mc_load_microcode(struct radeon_device *rdev) |
1464 | { | 1466 | { |
1465 | const __be32 *fw_data; | 1467 | const __be32 *fw_data; |
1466 | u32 running, blackout = 0; | 1468 | u32 running, blackout = 0; |
@@ -3722,6 +3724,106 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
3722 | evergreen_print_gpu_status_regs(rdev); | 3724 | evergreen_print_gpu_status_regs(rdev); |
3723 | } | 3725 | } |
3724 | 3726 | ||
3727 | static void si_set_clk_bypass_mode(struct radeon_device *rdev) | ||
3728 | { | ||
3729 | u32 tmp, i; | ||
3730 | |||
3731 | tmp = RREG32(CG_SPLL_FUNC_CNTL); | ||
3732 | tmp |= SPLL_BYPASS_EN; | ||
3733 | WREG32(CG_SPLL_FUNC_CNTL, tmp); | ||
3734 | |||
3735 | tmp = RREG32(CG_SPLL_FUNC_CNTL_2); | ||
3736 | tmp |= SPLL_CTLREQ_CHG; | ||
3737 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
3738 | |||
3739 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
3740 | if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) | ||
3741 | break; | ||
3742 | udelay(1); | ||
3743 | } | ||
3744 | |||
3745 | tmp = RREG32(CG_SPLL_FUNC_CNTL_2); | ||
3746 | tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); | ||
3747 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
3748 | |||
3749 | tmp = RREG32(MPLL_CNTL_MODE); | ||
3750 | tmp &= ~MPLL_MCLK_SEL; | ||
3751 | WREG32(MPLL_CNTL_MODE, tmp); | ||
3752 | } | ||
3753 | |||
3754 | static void si_spll_powerdown(struct radeon_device *rdev) | ||
3755 | { | ||
3756 | u32 tmp; | ||
3757 | |||
3758 | tmp = RREG32(SPLL_CNTL_MODE); | ||
3759 | tmp |= SPLL_SW_DIR_CONTROL; | ||
3760 | WREG32(SPLL_CNTL_MODE, tmp); | ||
3761 | |||
3762 | tmp = RREG32(CG_SPLL_FUNC_CNTL); | ||
3763 | tmp |= SPLL_RESET; | ||
3764 | WREG32(CG_SPLL_FUNC_CNTL, tmp); | ||
3765 | |||
3766 | tmp = RREG32(CG_SPLL_FUNC_CNTL); | ||
3767 | tmp |= SPLL_SLEEP; | ||
3768 | WREG32(CG_SPLL_FUNC_CNTL, tmp); | ||
3769 | |||
3770 | tmp = RREG32(SPLL_CNTL_MODE); | ||
3771 | tmp &= ~SPLL_SW_DIR_CONTROL; | ||
3772 | WREG32(SPLL_CNTL_MODE, tmp); | ||
3773 | } | ||
3774 | |||
3775 | static void si_gpu_pci_config_reset(struct radeon_device *rdev) | ||
3776 | { | ||
3777 | struct evergreen_mc_save save; | ||
3778 | u32 tmp, i; | ||
3779 | |||
3780 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
3781 | |||
3782 | /* disable dpm? */ | ||
3783 | |||
3784 | /* disable cg/pg */ | ||
3785 | si_fini_pg(rdev); | ||
3786 | si_fini_cg(rdev); | ||
3787 | |||
3788 | /* Disable CP parsing/prefetching */ | ||
3789 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); | ||
3790 | /* dma0 */ | ||
3791 | tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); | ||
3792 | tmp &= ~DMA_RB_ENABLE; | ||
3793 | WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); | ||
3794 | /* dma1 */ | ||
3795 | tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); | ||
3796 | tmp &= ~DMA_RB_ENABLE; | ||
3797 | WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); | ||
3798 | /* XXX other engines? */ | ||
3799 | |||
3800 | /* halt the rlc, disable cp internal ints */ | ||
3801 | si_rlc_stop(rdev); | ||
3802 | |||
3803 | udelay(50); | ||
3804 | |||
3805 | /* disable mem access */ | ||
3806 | evergreen_mc_stop(rdev, &save); | ||
3807 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
3808 | dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); | ||
3809 | } | ||
3810 | |||
3811 | /* set mclk/sclk to bypass */ | ||
3812 | si_set_clk_bypass_mode(rdev); | ||
3813 | /* powerdown spll */ | ||
3814 | si_spll_powerdown(rdev); | ||
3815 | /* disable BM */ | ||
3816 | pci_clear_master(rdev->pdev); | ||
3817 | /* reset */ | ||
3818 | radeon_pci_config_reset(rdev); | ||
3819 | /* wait for asic to come out of reset */ | ||
3820 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
3821 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
3822 | break; | ||
3823 | udelay(1); | ||
3824 | } | ||
3825 | } | ||
3826 | |||
3725 | int si_asic_reset(struct radeon_device *rdev) | 3827 | int si_asic_reset(struct radeon_device *rdev) |
3726 | { | 3828 | { |
3727 | u32 reset_mask; | 3829 | u32 reset_mask; |
@@ -3731,10 +3833,17 @@ int si_asic_reset(struct radeon_device *rdev) | |||
3731 | if (reset_mask) | 3833 | if (reset_mask) |
3732 | r600_set_bios_scratch_engine_hung(rdev, true); | 3834 | r600_set_bios_scratch_engine_hung(rdev, true); |
3733 | 3835 | ||
3836 | /* try soft reset */ | ||
3734 | si_gpu_soft_reset(rdev, reset_mask); | 3837 | si_gpu_soft_reset(rdev, reset_mask); |
3735 | 3838 | ||
3736 | reset_mask = si_gpu_check_soft_reset(rdev); | 3839 | reset_mask = si_gpu_check_soft_reset(rdev); |
3737 | 3840 | ||
3841 | /* try pci config reset */ | ||
3842 | if (reset_mask && radeon_hard_reset) | ||
3843 | si_gpu_pci_config_reset(rdev); | ||
3844 | |||
3845 | reset_mask = si_gpu_check_soft_reset(rdev); | ||
3846 | |||
3738 | if (!reset_mask) | 3847 | if (!reset_mask) |
3739 | r600_set_bios_scratch_engine_hung(rdev, false); | 3848 | r600_set_bios_scratch_engine_hung(rdev, false); |
3740 | 3849 | ||
@@ -5210,8 +5319,8 @@ static void si_enable_hdp_ls(struct radeon_device *rdev, | |||
5210 | WREG32(HDP_MEM_POWER_LS, data); | 5319 | WREG32(HDP_MEM_POWER_LS, data); |
5211 | } | 5320 | } |
5212 | 5321 | ||
5213 | void si_update_cg(struct radeon_device *rdev, | 5322 | static void si_update_cg(struct radeon_device *rdev, |
5214 | u32 block, bool enable) | 5323 | u32 block, bool enable) |
5215 | { | 5324 | { |
5216 | if (block & RADEON_CG_BLOCK_GFX) { | 5325 | if (block & RADEON_CG_BLOCK_GFX) { |
5217 | si_enable_gui_idle_interrupt(rdev, false); | 5326 | si_enable_gui_idle_interrupt(rdev, false); |
@@ -6322,21 +6431,14 @@ static int si_startup(struct radeon_device *rdev) | |||
6322 | 6431 | ||
6323 | si_mc_program(rdev); | 6432 | si_mc_program(rdev); |
6324 | 6433 | ||
6325 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 6434 | if (!rdev->pm.dpm_enabled) { |
6326 | !rdev->rlc_fw || !rdev->mc_fw) { | 6435 | r = si_mc_load_microcode(rdev); |
6327 | r = si_init_microcode(rdev); | ||
6328 | if (r) { | 6436 | if (r) { |
6329 | DRM_ERROR("Failed to load firmware!\n"); | 6437 | DRM_ERROR("Failed to load MC firmware!\n"); |
6330 | return r; | 6438 | return r; |
6331 | } | 6439 | } |
6332 | } | 6440 | } |
6333 | 6441 | ||
6334 | r = si_mc_load_microcode(rdev); | ||
6335 | if (r) { | ||
6336 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
6337 | return r; | ||
6338 | } | ||
6339 | |||
6340 | r = si_pcie_gart_enable(rdev); | 6442 | r = si_pcie_gart_enable(rdev); |
6341 | if (r) | 6443 | if (r) |
6342 | return r; | 6444 | return r; |
@@ -6419,37 +6521,30 @@ static int si_startup(struct radeon_device *rdev) | |||
6419 | 6521 | ||
6420 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 6522 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
6421 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 6523 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
6422 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
6423 | RADEON_CP_PACKET2); | 6524 | RADEON_CP_PACKET2); |
6424 | if (r) | 6525 | if (r) |
6425 | return r; | 6526 | return r; |
6426 | 6527 | ||
6427 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | 6528 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
6428 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, | 6529 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, |
6429 | CP_RB1_RPTR, CP_RB1_WPTR, | ||
6430 | RADEON_CP_PACKET2); | 6530 | RADEON_CP_PACKET2); |
6431 | if (r) | 6531 | if (r) |
6432 | return r; | 6532 | return r; |
6433 | 6533 | ||
6434 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | 6534 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
6435 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, | 6535 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, |
6436 | CP_RB2_RPTR, CP_RB2_WPTR, | ||
6437 | RADEON_CP_PACKET2); | 6536 | RADEON_CP_PACKET2); |
6438 | if (r) | 6537 | if (r) |
6439 | return r; | 6538 | return r; |
6440 | 6539 | ||
6441 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 6540 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
6442 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 6541 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
6443 | DMA_RB_RPTR + DMA0_REGISTER_OFFSET, | ||
6444 | DMA_RB_WPTR + DMA0_REGISTER_OFFSET, | ||
6445 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); | 6542 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
6446 | if (r) | 6543 | if (r) |
6447 | return r; | 6544 | return r; |
6448 | 6545 | ||
6449 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | 6546 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
6450 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | 6547 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
6451 | DMA_RB_RPTR + DMA1_REGISTER_OFFSET, | ||
6452 | DMA_RB_WPTR + DMA1_REGISTER_OFFSET, | ||
6453 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); | 6548 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
6454 | if (r) | 6549 | if (r) |
6455 | return r; | 6550 | return r; |
@@ -6469,7 +6564,6 @@ static int si_startup(struct radeon_device *rdev) | |||
6469 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 6564 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
6470 | if (ring->ring_size) { | 6565 | if (ring->ring_size) { |
6471 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 6566 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
6472 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
6473 | RADEON_CP_PACKET2); | 6567 | RADEON_CP_PACKET2); |
6474 | if (!r) | 6568 | if (!r) |
6475 | r = uvd_v1_0_init(rdev); | 6569 | r = uvd_v1_0_init(rdev); |
@@ -6511,6 +6605,8 @@ int si_resume(struct radeon_device *rdev) | |||
6511 | /* init golden registers */ | 6605 | /* init golden registers */ |
6512 | si_init_golden_registers(rdev); | 6606 | si_init_golden_registers(rdev); |
6513 | 6607 | ||
6608 | radeon_pm_resume(rdev); | ||
6609 | |||
6514 | rdev->accel_working = true; | 6610 | rdev->accel_working = true; |
6515 | r = si_startup(rdev); | 6611 | r = si_startup(rdev); |
6516 | if (r) { | 6612 | if (r) { |
@@ -6525,6 +6621,7 @@ int si_resume(struct radeon_device *rdev) | |||
6525 | 6621 | ||
6526 | int si_suspend(struct radeon_device *rdev) | 6622 | int si_suspend(struct radeon_device *rdev) |
6527 | { | 6623 | { |
6624 | radeon_pm_suspend(rdev); | ||
6528 | dce6_audio_fini(rdev); | 6625 | dce6_audio_fini(rdev); |
6529 | radeon_vm_manager_fini(rdev); | 6626 | radeon_vm_manager_fini(rdev); |
6530 | si_cp_enable(rdev, false); | 6627 | si_cp_enable(rdev, false); |
@@ -6598,6 +6695,18 @@ int si_init(struct radeon_device *rdev) | |||
6598 | if (r) | 6695 | if (r) |
6599 | return r; | 6696 | return r; |
6600 | 6697 | ||
6698 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
6699 | !rdev->rlc_fw || !rdev->mc_fw) { | ||
6700 | r = si_init_microcode(rdev); | ||
6701 | if (r) { | ||
6702 | DRM_ERROR("Failed to load firmware!\n"); | ||
6703 | return r; | ||
6704 | } | ||
6705 | } | ||
6706 | |||
6707 | /* Initialize power management */ | ||
6708 | radeon_pm_init(rdev); | ||
6709 | |||
6601 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 6710 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
6602 | ring->ring_obj = NULL; | 6711 | ring->ring_obj = NULL; |
6603 | r600_ring_init(rdev, ring, 1024 * 1024); | 6712 | r600_ring_init(rdev, ring, 1024 * 1024); |
@@ -6664,6 +6773,7 @@ int si_init(struct radeon_device *rdev) | |||
6664 | 6773 | ||
6665 | void si_fini(struct radeon_device *rdev) | 6774 | void si_fini(struct radeon_device *rdev) |
6666 | { | 6775 | { |
6776 | radeon_pm_fini(rdev); | ||
6667 | si_cp_fini(rdev); | 6777 | si_cp_fini(rdev); |
6668 | cayman_dma_fini(rdev); | 6778 | cayman_dma_fini(rdev); |
6669 | si_fini_pg(rdev); | 6779 | si_fini_pg(rdev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0b00c790fb77..512919b0156a 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -1738,6 +1738,8 @@ struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); | |||
1738 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev); | 1738 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev); |
1739 | struct ni_ps *ni_get_ps(struct radeon_ps *rps); | 1739 | struct ni_ps *ni_get_ps(struct radeon_ps *rps); |
1740 | 1740 | ||
1741 | extern int si_mc_load_microcode(struct radeon_device *rdev); | ||
1742 | |||
1741 | static int si_populate_voltage_value(struct radeon_device *rdev, | 1743 | static int si_populate_voltage_value(struct radeon_device *rdev, |
1742 | const struct atom_voltage_table *table, | 1744 | const struct atom_voltage_table *table, |
1743 | u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); | 1745 | u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); |
@@ -1753,9 +1755,6 @@ static int si_calculate_sclk_params(struct radeon_device *rdev, | |||
1753 | u32 engine_clock, | 1755 | u32 engine_clock, |
1754 | SISLANDS_SMC_SCLK_VALUE *sclk); | 1756 | SISLANDS_SMC_SCLK_VALUE *sclk); |
1755 | 1757 | ||
1756 | extern void si_update_cg(struct radeon_device *rdev, | ||
1757 | u32 block, bool enable); | ||
1758 | |||
1759 | static struct si_power_info *si_get_pi(struct radeon_device *rdev) | 1758 | static struct si_power_info *si_get_pi(struct radeon_device *rdev) |
1760 | { | 1759 | { |
1761 | struct si_power_info *pi = rdev->pm.dpm.priv; | 1760 | struct si_power_info *pi = rdev->pm.dpm.priv; |
@@ -5754,6 +5753,11 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev, | |||
5754 | 5753 | ||
5755 | void si_dpm_setup_asic(struct radeon_device *rdev) | 5754 | void si_dpm_setup_asic(struct radeon_device *rdev) |
5756 | { | 5755 | { |
5756 | int r; | ||
5757 | |||
5758 | r = si_mc_load_microcode(rdev); | ||
5759 | if (r) | ||
5760 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
5757 | rv770_get_memory_type(rdev); | 5761 | rv770_get_memory_type(rdev); |
5758 | si_read_clock_registers(rdev); | 5762 | si_read_clock_registers(rdev); |
5759 | si_enable_acpi_power_management(rdev); | 5763 | si_enable_acpi_power_management(rdev); |
@@ -5791,13 +5795,6 @@ int si_dpm_enable(struct radeon_device *rdev) | |||
5791 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 5795 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
5792 | int ret; | 5796 | int ret; |
5793 | 5797 | ||
5794 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
5795 | RADEON_CG_BLOCK_MC | | ||
5796 | RADEON_CG_BLOCK_SDMA | | ||
5797 | RADEON_CG_BLOCK_BIF | | ||
5798 | RADEON_CG_BLOCK_UVD | | ||
5799 | RADEON_CG_BLOCK_HDP), false); | ||
5800 | |||
5801 | if (si_is_smc_running(rdev)) | 5798 | if (si_is_smc_running(rdev)) |
5802 | return -EINVAL; | 5799 | return -EINVAL; |
5803 | if (pi->voltage_control) | 5800 | if (pi->voltage_control) |
@@ -5900,6 +5897,17 @@ int si_dpm_enable(struct radeon_device *rdev) | |||
5900 | si_enable_sclk_control(rdev, true); | 5897 | si_enable_sclk_control(rdev, true); |
5901 | si_start_dpm(rdev); | 5898 | si_start_dpm(rdev); |
5902 | 5899 | ||
5900 | si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
5901 | |||
5902 | ni_update_current_ps(rdev, boot_ps); | ||
5903 | |||
5904 | return 0; | ||
5905 | } | ||
5906 | |||
5907 | int si_dpm_late_enable(struct radeon_device *rdev) | ||
5908 | { | ||
5909 | int ret; | ||
5910 | |||
5903 | if (rdev->irq.installed && | 5911 | if (rdev->irq.installed && |
5904 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 5912 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
5905 | PPSMC_Result result; | 5913 | PPSMC_Result result; |
@@ -5915,17 +5923,6 @@ int si_dpm_enable(struct radeon_device *rdev) | |||
5915 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | 5923 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
5916 | } | 5924 | } |
5917 | 5925 | ||
5918 | si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
5919 | |||
5920 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
5921 | RADEON_CG_BLOCK_MC | | ||
5922 | RADEON_CG_BLOCK_SDMA | | ||
5923 | RADEON_CG_BLOCK_BIF | | ||
5924 | RADEON_CG_BLOCK_UVD | | ||
5925 | RADEON_CG_BLOCK_HDP), true); | ||
5926 | |||
5927 | ni_update_current_ps(rdev, boot_ps); | ||
5928 | |||
5929 | return 0; | 5926 | return 0; |
5930 | } | 5927 | } |
5931 | 5928 | ||
@@ -5934,13 +5931,6 @@ void si_dpm_disable(struct radeon_device *rdev) | |||
5934 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | 5931 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
5935 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 5932 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
5936 | 5933 | ||
5937 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
5938 | RADEON_CG_BLOCK_MC | | ||
5939 | RADEON_CG_BLOCK_SDMA | | ||
5940 | RADEON_CG_BLOCK_BIF | | ||
5941 | RADEON_CG_BLOCK_UVD | | ||
5942 | RADEON_CG_BLOCK_HDP), false); | ||
5943 | |||
5944 | if (!si_is_smc_running(rdev)) | 5934 | if (!si_is_smc_running(rdev)) |
5945 | return; | 5935 | return; |
5946 | si_disable_ulv(rdev); | 5936 | si_disable_ulv(rdev); |
@@ -6005,13 +5995,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev) | |||
6005 | struct radeon_ps *old_ps = &eg_pi->current_rps; | 5995 | struct radeon_ps *old_ps = &eg_pi->current_rps; |
6006 | int ret; | 5996 | int ret; |
6007 | 5997 | ||
6008 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
6009 | RADEON_CG_BLOCK_MC | | ||
6010 | RADEON_CG_BLOCK_SDMA | | ||
6011 | RADEON_CG_BLOCK_BIF | | ||
6012 | RADEON_CG_BLOCK_UVD | | ||
6013 | RADEON_CG_BLOCK_HDP), false); | ||
6014 | |||
6015 | ret = si_disable_ulv(rdev); | 5998 | ret = si_disable_ulv(rdev); |
6016 | if (ret) { | 5999 | if (ret) { |
6017 | DRM_ERROR("si_disable_ulv failed\n"); | 6000 | DRM_ERROR("si_disable_ulv failed\n"); |
@@ -6104,13 +6087,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev) | |||
6104 | return ret; | 6087 | return ret; |
6105 | } | 6088 | } |
6106 | 6089 | ||
6107 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
6108 | RADEON_CG_BLOCK_MC | | ||
6109 | RADEON_CG_BLOCK_SDMA | | ||
6110 | RADEON_CG_BLOCK_BIF | | ||
6111 | RADEON_CG_BLOCK_UVD | | ||
6112 | RADEON_CG_BLOCK_HDP), true); | ||
6113 | |||
6114 | return 0; | 6090 | return 0; |
6115 | } | 6091 | } |
6116 | 6092 | ||
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c index d422a1cbf727..e80efcf0c230 100644 --- a/drivers/gpu/drm/radeon/si_smc.c +++ b/drivers/gpu/drm/radeon/si_smc.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "sid.h" | 28 | #include "sid.h" |
29 | #include "ppsmc.h" | 29 | #include "ppsmc.h" |
30 | #include "radeon_ucode.h" | 30 | #include "radeon_ucode.h" |
31 | #include "sislands_smc.h" | ||
31 | 32 | ||
32 | static int si_set_smc_sram_address(struct radeon_device *rdev, | 33 | static int si_set_smc_sram_address(struct radeon_device *rdev, |
33 | u32 smc_address, u32 limit) | 34 | u32 smc_address, u32 limit) |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index b322acc48097..caa3e61a38c2 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -94,6 +94,8 @@ | |||
94 | #define CG_SPLL_FUNC_CNTL_2 0x604 | 94 | #define CG_SPLL_FUNC_CNTL_2 0x604 |
95 | #define SCLK_MUX_SEL(x) ((x) << 0) | 95 | #define SCLK_MUX_SEL(x) ((x) << 0) |
96 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) | 96 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) |
97 | #define SPLL_CTLREQ_CHG (1 << 23) | ||
98 | #define SCLK_MUX_UPDATE (1 << 26) | ||
97 | #define CG_SPLL_FUNC_CNTL_3 0x608 | 99 | #define CG_SPLL_FUNC_CNTL_3 0x608 |
98 | #define SPLL_FB_DIV(x) ((x) << 0) | 100 | #define SPLL_FB_DIV(x) ((x) << 0) |
99 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) | 101 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) |
@@ -101,7 +103,10 @@ | |||
101 | #define SPLL_DITHEN (1 << 28) | 103 | #define SPLL_DITHEN (1 << 28) |
102 | #define CG_SPLL_FUNC_CNTL_4 0x60c | 104 | #define CG_SPLL_FUNC_CNTL_4 0x60c |
103 | 105 | ||
106 | #define SPLL_STATUS 0x614 | ||
107 | #define SPLL_CHG_STATUS (1 << 1) | ||
104 | #define SPLL_CNTL_MODE 0x618 | 108 | #define SPLL_CNTL_MODE 0x618 |
109 | #define SPLL_SW_DIR_CONTROL (1 << 0) | ||
105 | # define SPLL_REFCLK_SEL(x) ((x) << 8) | 110 | # define SPLL_REFCLK_SEL(x) ((x) << 8) |
106 | # define SPLL_REFCLK_SEL_MASK 0xFF00 | 111 | # define SPLL_REFCLK_SEL_MASK 0xFF00 |
107 | 112 | ||
@@ -559,6 +564,8 @@ | |||
559 | # define MRDCK0_BYPASS (1 << 24) | 564 | # define MRDCK0_BYPASS (1 << 24) |
560 | # define MRDCK1_BYPASS (1 << 25) | 565 | # define MRDCK1_BYPASS (1 << 25) |
561 | 566 | ||
567 | #define MPLL_CNTL_MODE 0x2bb0 | ||
568 | # define MPLL_MCLK_SEL (1 << 11) | ||
562 | #define MPLL_FUNC_CNTL 0x2bb4 | 569 | #define MPLL_FUNC_CNTL 0x2bb4 |
563 | #define BWCTRL(x) ((x) << 20) | 570 | #define BWCTRL(x) ((x) << 20) |
564 | #define BWCTRL_MASK (0xff << 20) | 571 | #define BWCTRL_MASK (0xff << 20) |
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h index 5578e9837026..10e945a49479 100644 --- a/drivers/gpu/drm/radeon/sislands_smc.h +++ b/drivers/gpu/drm/radeon/sislands_smc.h | |||
@@ -374,8 +374,6 @@ typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration; | |||
374 | 374 | ||
375 | #pragma pack(pop) | 375 | #pragma pack(pop) |
376 | 376 | ||
377 | int si_set_smc_sram_address(struct radeon_device *rdev, | ||
378 | u32 smc_address, u32 limit); | ||
379 | int si_copy_bytes_to_smc(struct radeon_device *rdev, | 377 | int si_copy_bytes_to_smc(struct radeon_device *rdev, |
380 | u32 smc_start_address, | 378 | u32 smc_start_address, |
381 | const u8 *src, u32 byte_count, u32 limit); | 379 | const u8 *src, u32 byte_count, u32 limit); |
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 96ea6db8bf57..f121efe12dc5 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
@@ -71,7 +71,7 @@ static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = | |||
71 | SUMO_DTC_DFLT_14, | 71 | SUMO_DTC_DFLT_14, |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct sumo_ps *sumo_get_ps(struct radeon_ps *rps) | 74 | static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps) |
75 | { | 75 | { |
76 | struct sumo_ps *ps = rps->ps_priv; | 76 | struct sumo_ps *ps = rps->ps_priv; |
77 | 77 | ||
@@ -1202,14 +1202,10 @@ static void sumo_update_requested_ps(struct radeon_device *rdev, | |||
1202 | int sumo_dpm_enable(struct radeon_device *rdev) | 1202 | int sumo_dpm_enable(struct radeon_device *rdev) |
1203 | { | 1203 | { |
1204 | struct sumo_power_info *pi = sumo_get_pi(rdev); | 1204 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1205 | int ret; | ||
1206 | 1205 | ||
1207 | if (sumo_dpm_enabled(rdev)) | 1206 | if (sumo_dpm_enabled(rdev)) |
1208 | return -EINVAL; | 1207 | return -EINVAL; |
1209 | 1208 | ||
1210 | ret = sumo_enable_clock_power_gating(rdev); | ||
1211 | if (ret) | ||
1212 | return ret; | ||
1213 | sumo_program_bootup_state(rdev); | 1209 | sumo_program_bootup_state(rdev); |
1214 | sumo_init_bsp(rdev); | 1210 | sumo_init_bsp(rdev); |
1215 | sumo_reset_am(rdev); | 1211 | sumo_reset_am(rdev); |
@@ -1233,6 +1229,19 @@ int sumo_dpm_enable(struct radeon_device *rdev) | |||
1233 | if (pi->enable_boost) | 1229 | if (pi->enable_boost) |
1234 | sumo_enable_boost_timer(rdev); | 1230 | sumo_enable_boost_timer(rdev); |
1235 | 1231 | ||
1232 | sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | int sumo_dpm_late_enable(struct radeon_device *rdev) | ||
1238 | { | ||
1239 | int ret; | ||
1240 | |||
1241 | ret = sumo_enable_clock_power_gating(rdev); | ||
1242 | if (ret) | ||
1243 | return ret; | ||
1244 | |||
1236 | if (rdev->irq.installed && | 1245 | if (rdev->irq.installed && |
1237 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1246 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
1238 | ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 1247 | ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
@@ -1242,8 +1251,6 @@ int sumo_dpm_enable(struct radeon_device *rdev) | |||
1242 | radeon_irq_set(rdev); | 1251 | radeon_irq_set(rdev); |
1243 | } | 1252 | } |
1244 | 1253 | ||
1245 | sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1246 | |||
1247 | return 0; | 1254 | return 0; |
1248 | } | 1255 | } |
1249 | 1256 | ||
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c index 18abba5b5810..fb081d2ae374 100644 --- a/drivers/gpu/drm/radeon/sumo_smc.c +++ b/drivers/gpu/drm/radeon/sumo_smc.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27 | 31 | #define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27 |
32 | #define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20 | 32 | #define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20 |
33 | 33 | ||
34 | struct sumo_ps *sumo_get_ps(struct radeon_ps *rps); | ||
35 | struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev); | 34 | struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev); |
36 | 35 | ||
37 | static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id) | 36 | static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id) |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index d700698a1f22..2d447192d6f7 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
@@ -342,14 +342,14 @@ static void trinity_apply_state_adjust_rules(struct radeon_device *rdev, | |||
342 | struct radeon_ps *new_rps, | 342 | struct radeon_ps *new_rps, |
343 | struct radeon_ps *old_rps); | 343 | struct radeon_ps *old_rps); |
344 | 344 | ||
345 | struct trinity_ps *trinity_get_ps(struct radeon_ps *rps) | 345 | static struct trinity_ps *trinity_get_ps(struct radeon_ps *rps) |
346 | { | 346 | { |
347 | struct trinity_ps *ps = rps->ps_priv; | 347 | struct trinity_ps *ps = rps->ps_priv; |
348 | 348 | ||
349 | return ps; | 349 | return ps; |
350 | } | 350 | } |
351 | 351 | ||
352 | struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev) | 352 | static struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev) |
353 | { | 353 | { |
354 | struct trinity_power_info *pi = rdev->pm.dpm.priv; | 354 | struct trinity_power_info *pi = rdev->pm.dpm.priv; |
355 | 355 | ||
@@ -1082,7 +1082,6 @@ void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable) | |||
1082 | int trinity_dpm_enable(struct radeon_device *rdev) | 1082 | int trinity_dpm_enable(struct radeon_device *rdev) |
1083 | { | 1083 | { |
1084 | struct trinity_power_info *pi = trinity_get_pi(rdev); | 1084 | struct trinity_power_info *pi = trinity_get_pi(rdev); |
1085 | int ret; | ||
1086 | 1085 | ||
1087 | trinity_acquire_mutex(rdev); | 1086 | trinity_acquire_mutex(rdev); |
1088 | 1087 | ||
@@ -1091,7 +1090,6 @@ int trinity_dpm_enable(struct radeon_device *rdev) | |||
1091 | return -EINVAL; | 1090 | return -EINVAL; |
1092 | } | 1091 | } |
1093 | 1092 | ||
1094 | trinity_enable_clock_power_gating(rdev); | ||
1095 | trinity_program_bootup_state(rdev); | 1093 | trinity_program_bootup_state(rdev); |
1096 | sumo_program_vc(rdev, 0x00C00033); | 1094 | sumo_program_vc(rdev, 0x00C00033); |
1097 | trinity_start_am(rdev); | 1095 | trinity_start_am(rdev); |
@@ -1105,6 +1103,18 @@ int trinity_dpm_enable(struct radeon_device *rdev) | |||
1105 | trinity_dpm_bapm_enable(rdev, false); | 1103 | trinity_dpm_bapm_enable(rdev, false); |
1106 | trinity_release_mutex(rdev); | 1104 | trinity_release_mutex(rdev); |
1107 | 1105 | ||
1106 | trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1107 | |||
1108 | return 0; | ||
1109 | } | ||
1110 | |||
1111 | int trinity_dpm_late_enable(struct radeon_device *rdev) | ||
1112 | { | ||
1113 | int ret; | ||
1114 | |||
1115 | trinity_acquire_mutex(rdev); | ||
1116 | trinity_enable_clock_power_gating(rdev); | ||
1117 | |||
1108 | if (rdev->irq.installed && | 1118 | if (rdev->irq.installed && |
1109 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1119 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
1110 | ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 1120 | ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
@@ -1115,8 +1125,7 @@ int trinity_dpm_enable(struct radeon_device *rdev) | |||
1115 | rdev->irq.dpm_thermal = true; | 1125 | rdev->irq.dpm_thermal = true; |
1116 | radeon_irq_set(rdev); | 1126 | radeon_irq_set(rdev); |
1117 | } | 1127 | } |
1118 | 1128 | trinity_release_mutex(rdev); | |
1119 | trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
1120 | 1129 | ||
1121 | return 0; | 1130 | return 0; |
1122 | } | 1131 | } |
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c index 9672bcbc7312..99dd0455334d 100644 --- a/drivers/gpu/drm/radeon/trinity_smc.c +++ b/drivers/gpu/drm/radeon/trinity_smc.c | |||
@@ -27,9 +27,6 @@ | |||
27 | #include "trinity_dpm.h" | 27 | #include "trinity_dpm.h" |
28 | #include "ppsmc.h" | 28 | #include "ppsmc.h" |
29 | 29 | ||
30 | struct trinity_ps *trinity_get_ps(struct radeon_ps *rps); | ||
31 | struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev); | ||
32 | |||
33 | static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id) | 30 | static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id) |
34 | { | 31 | { |
35 | int i; | 32 | int i; |