diff options
author | Dave Airlie <airlied@redhat.com> | 2015-03-09 05:58:30 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-03-09 05:58:30 -0400 |
commit | a8c6ecb3be7029881f7c95e5e201a629094a4e1a (patch) | |
tree | eb006541f40528f51334eefc725f155c4ce386a6 /drivers/gpu/drm | |
parent | 8dd0eb3566711d81bfbe2b4421b33f0dd723cec4 (diff) | |
parent | 9eccca0843205f87c00404b663188b88eb248051 (diff) |
Merge tag 'v4.0-rc3' into drm-next
Linux 4.0-rc3 backmerge to fix two i915 conflicts, and get
some mainline bug fixes needed for my testing box
Conflicts:
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_display.c
Diffstat (limited to 'drivers/gpu/drm')
54 files changed, 598 insertions, 386 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b3589d0e39b9..910ff8ab9c9c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) | |||
62 | return KFD_MQD_TYPE_CP; | 62 | return KFD_MQD_TYPE_CP; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) | 65 | unsigned int get_first_pipe(struct device_queue_manager *dqm) |
66 | { | 66 | { |
67 | BUG_ON(!dqm); | 67 | BUG_ON(!dqm || !dqm->dev); |
68 | return dqm->dev->shared_resources.first_compute_pipe; | 68 | return dqm->dev->shared_resources.first_compute_pipe; |
69 | } | 69 | } |
70 | 70 | ||
71 | unsigned int get_pipes_num(struct device_queue_manager *dqm) | ||
72 | { | ||
73 | BUG_ON(!dqm || !dqm->dev); | ||
74 | return dqm->dev->shared_resources.compute_pipe_count; | ||
75 | } | ||
76 | |||
71 | static inline unsigned int get_pipes_num_cpsch(void) | 77 | static inline unsigned int get_pipes_num_cpsch(void) |
72 | { | 78 | { |
73 | return PIPE_PER_ME_CP_SCHEDULING; | 79 | return PIPE_PER_ME_CP_SCHEDULING; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d64f86cda34f..488f51d19427 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | |||
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, | |||
163 | struct qcm_process_device *qpd); | 163 | struct qcm_process_device *qpd); |
164 | int init_pipelines(struct device_queue_manager *dqm, | 164 | int init_pipelines(struct device_queue_manager *dqm, |
165 | unsigned int pipes_num, unsigned int first_pipe); | 165 | unsigned int pipes_num, unsigned int first_pipe); |
166 | unsigned int get_first_pipe(struct device_queue_manager *dqm); | ||
167 | unsigned int get_pipes_num(struct device_queue_manager *dqm); | ||
166 | 168 | ||
167 | extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) | 169 | extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) |
168 | { | 170 | { |
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) | |||
175 | return (pdd->lds_base >> 60) & 0x0E; | 177 | return (pdd->lds_base >> 60) & 0x0E; |
176 | } | 178 | } |
177 | 179 | ||
178 | extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm) | ||
179 | { | ||
180 | BUG_ON(!dqm || !dqm->dev); | ||
181 | return dqm->dev->shared_resources.compute_pipe_count; | ||
182 | } | ||
183 | |||
184 | #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ | 180 | #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index 6b072466e2a6..5469efe0523e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c | |||
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm, | |||
131 | 131 | ||
132 | static int initialize_cpsch_cik(struct device_queue_manager *dqm) | 132 | static int initialize_cpsch_cik(struct device_queue_manager *dqm) |
133 | { | 133 | { |
134 | return init_pipelines(dqm, get_pipes_num(dqm), 0); | 134 | return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); |
135 | } | 135 | } |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index b81cea6baeea..d55c0c232e1d 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | |||
@@ -85,7 +85,7 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) | |||
85 | (adj->crtc_hdisplay - 1) | | 85 | (adj->crtc_hdisplay - 1) | |
86 | ((adj->crtc_vdisplay - 1) << 16)); | 86 | ((adj->crtc_vdisplay - 1) << 16)); |
87 | 87 | ||
88 | cfg = ATMEL_HLCDC_CLKPOL; | 88 | cfg = 0; |
89 | 89 | ||
90 | prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); | 90 | prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); |
91 | mode_rate = adj->crtc_clock * 1000; | 91 | mode_rate = adj->crtc_clock * 1000; |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 01afeafe17b1..c4bb1f9f95c6 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | |||
@@ -313,8 +313,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) | |||
313 | 313 | ||
314 | pm_runtime_enable(dev->dev); | 314 | pm_runtime_enable(dev->dev); |
315 | 315 | ||
316 | pm_runtime_put_sync(dev->dev); | ||
317 | |||
318 | ret = atmel_hlcdc_dc_modeset_init(dev); | 316 | ret = atmel_hlcdc_dc_modeset_init(dev); |
319 | if (ret < 0) { | 317 | if (ret < 0) { |
320 | dev_err(dev->dev, "failed to initialize mode setting\n"); | 318 | dev_err(dev->dev, "failed to initialize mode setting\n"); |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c index d1dca39b76dd..377e43cea9dd 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c | |||
@@ -311,7 +311,8 @@ void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) | |||
311 | 311 | ||
312 | /* Disable the layer */ | 312 | /* Disable the layer */ |
313 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, | 313 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, |
314 | ATMEL_HLCDC_LAYER_RST); | 314 | ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q | |
315 | ATMEL_HLCDC_LAYER_UPDATE); | ||
315 | 316 | ||
316 | /* Clear all pending interrupts */ | 317 | /* Clear all pending interrupts */ |
317 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); | 318 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 29dbde5ecef7..927f3445ff38 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -2138,7 +2138,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
2138 | DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); | 2138 | DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); |
2139 | 2139 | ||
2140 | mutex_lock(&dev->mode_config.mutex); | 2140 | mutex_lock(&dev->mode_config.mutex); |
2141 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
2142 | 2141 | ||
2143 | connector = drm_connector_find(dev, out_resp->connector_id); | 2142 | connector = drm_connector_find(dev, out_resp->connector_id); |
2144 | if (!connector) { | 2143 | if (!connector) { |
@@ -2168,6 +2167,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
2168 | out_resp->mm_height = connector->display_info.height_mm; | 2167 | out_resp->mm_height = connector->display_info.height_mm; |
2169 | out_resp->subpixel = connector->display_info.subpixel_order; | 2168 | out_resp->subpixel = connector->display_info.subpixel_order; |
2170 | out_resp->connection = connector->status; | 2169 | out_resp->connection = connector->status; |
2170 | |||
2171 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
2171 | encoder = drm_connector_get_encoder(connector); | 2172 | encoder = drm_connector_get_encoder(connector); |
2172 | if (encoder) | 2173 | if (encoder) |
2173 | out_resp->encoder_id = encoder->base.id; | 2174 | out_resp->encoder_id = encoder->base.id; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04a209e2b66d..7fc6f8bd4821 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -91,29 +91,29 @@ | |||
91 | */ | 91 | */ |
92 | 92 | ||
93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
94 | unsigned long size, | 94 | u64 size, |
95 | unsigned alignment, | 95 | unsigned alignment, |
96 | unsigned long color, | 96 | unsigned long color, |
97 | enum drm_mm_search_flags flags); | 97 | enum drm_mm_search_flags flags); |
98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
99 | unsigned long size, | 99 | u64 size, |
100 | unsigned alignment, | 100 | unsigned alignment, |
101 | unsigned long color, | 101 | unsigned long color, |
102 | unsigned long start, | 102 | u64 start, |
103 | unsigned long end, | 103 | u64 end, |
104 | enum drm_mm_search_flags flags); | 104 | enum drm_mm_search_flags flags); |
105 | 105 | ||
106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | 106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
107 | struct drm_mm_node *node, | 107 | struct drm_mm_node *node, |
108 | unsigned long size, unsigned alignment, | 108 | u64 size, unsigned alignment, |
109 | unsigned long color, | 109 | unsigned long color, |
110 | enum drm_mm_allocator_flags flags) | 110 | enum drm_mm_allocator_flags flags) |
111 | { | 111 | { |
112 | struct drm_mm *mm = hole_node->mm; | 112 | struct drm_mm *mm = hole_node->mm; |
113 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 113 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
114 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 114 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
115 | unsigned long adj_start = hole_start; | 115 | u64 adj_start = hole_start; |
116 | unsigned long adj_end = hole_end; | 116 | u64 adj_end = hole_end; |
117 | 117 | ||
118 | BUG_ON(node->allocated); | 118 | BUG_ON(node->allocated); |
119 | 119 | ||
@@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
124 | adj_start = adj_end - size; | 124 | adj_start = adj_end - size; |
125 | 125 | ||
126 | if (alignment) { | 126 | if (alignment) { |
127 | unsigned tmp = adj_start % alignment; | 127 | u64 tmp = adj_start; |
128 | if (tmp) { | 128 | unsigned rem; |
129 | |||
130 | rem = do_div(tmp, alignment); | ||
131 | if (rem) { | ||
129 | if (flags & DRM_MM_CREATE_TOP) | 132 | if (flags & DRM_MM_CREATE_TOP) |
130 | adj_start -= tmp; | 133 | adj_start -= rem; |
131 | else | 134 | else |
132 | adj_start += alignment - tmp; | 135 | adj_start += alignment - rem; |
133 | } | 136 | } |
134 | } | 137 | } |
135 | 138 | ||
@@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
176 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) | 179 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
177 | { | 180 | { |
178 | struct drm_mm_node *hole; | 181 | struct drm_mm_node *hole; |
179 | unsigned long end = node->start + node->size; | 182 | u64 end = node->start + node->size; |
180 | unsigned long hole_start; | 183 | u64 hole_start; |
181 | unsigned long hole_end; | 184 | u64 hole_end; |
182 | 185 | ||
183 | BUG_ON(node == NULL); | 186 | BUG_ON(node == NULL); |
184 | 187 | ||
@@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node); | |||
227 | * 0 on success, -ENOSPC if there's no suitable hole. | 230 | * 0 on success, -ENOSPC if there's no suitable hole. |
228 | */ | 231 | */ |
229 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, | 232 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
230 | unsigned long size, unsigned alignment, | 233 | u64 size, unsigned alignment, |
231 | unsigned long color, | 234 | unsigned long color, |
232 | enum drm_mm_search_flags sflags, | 235 | enum drm_mm_search_flags sflags, |
233 | enum drm_mm_allocator_flags aflags) | 236 | enum drm_mm_allocator_flags aflags) |
@@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic); | |||
246 | 249 | ||
247 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | 250 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
248 | struct drm_mm_node *node, | 251 | struct drm_mm_node *node, |
249 | unsigned long size, unsigned alignment, | 252 | u64 size, unsigned alignment, |
250 | unsigned long color, | 253 | unsigned long color, |
251 | unsigned long start, unsigned long end, | 254 | u64 start, u64 end, |
252 | enum drm_mm_allocator_flags flags) | 255 | enum drm_mm_allocator_flags flags) |
253 | { | 256 | { |
254 | struct drm_mm *mm = hole_node->mm; | 257 | struct drm_mm *mm = hole_node->mm; |
255 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 258 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
256 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 259 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
257 | unsigned long adj_start = hole_start; | 260 | u64 adj_start = hole_start; |
258 | unsigned long adj_end = hole_end; | 261 | u64 adj_end = hole_end; |
259 | 262 | ||
260 | BUG_ON(!hole_node->hole_follows || node->allocated); | 263 | BUG_ON(!hole_node->hole_follows || node->allocated); |
261 | 264 | ||
@@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
271 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); | 274 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
272 | 275 | ||
273 | if (alignment) { | 276 | if (alignment) { |
274 | unsigned tmp = adj_start % alignment; | 277 | u64 tmp = adj_start; |
275 | if (tmp) { | 278 | unsigned rem; |
279 | |||
280 | rem = do_div(tmp, alignment); | ||
281 | if (rem) { | ||
276 | if (flags & DRM_MM_CREATE_TOP) | 282 | if (flags & DRM_MM_CREATE_TOP) |
277 | adj_start -= tmp; | 283 | adj_start -= rem; |
278 | else | 284 | else |
279 | adj_start += alignment - tmp; | 285 | adj_start += alignment - rem; |
280 | } | 286 | } |
281 | } | 287 | } |
282 | 288 | ||
@@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
324 | * 0 on success, -ENOSPC if there's no suitable hole. | 330 | * 0 on success, -ENOSPC if there's no suitable hole. |
325 | */ | 331 | */ |
326 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, | 332 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
327 | unsigned long size, unsigned alignment, | 333 | u64 size, unsigned alignment, |
328 | unsigned long color, | 334 | unsigned long color, |
329 | unsigned long start, unsigned long end, | 335 | u64 start, u64 end, |
330 | enum drm_mm_search_flags sflags, | 336 | enum drm_mm_search_flags sflags, |
331 | enum drm_mm_allocator_flags aflags) | 337 | enum drm_mm_allocator_flags aflags) |
332 | { | 338 | { |
@@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node) | |||
387 | } | 393 | } |
388 | EXPORT_SYMBOL(drm_mm_remove_node); | 394 | EXPORT_SYMBOL(drm_mm_remove_node); |
389 | 395 | ||
390 | static int check_free_hole(unsigned long start, unsigned long end, | 396 | static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) |
391 | unsigned long size, unsigned alignment) | ||
392 | { | 397 | { |
393 | if (end - start < size) | 398 | if (end - start < size) |
394 | return 0; | 399 | return 0; |
395 | 400 | ||
396 | if (alignment) { | 401 | if (alignment) { |
397 | unsigned tmp = start % alignment; | 402 | u64 tmp = start; |
403 | unsigned rem; | ||
404 | |||
405 | rem = do_div(tmp, alignment); | ||
398 | if (tmp) | 406 | if (tmp) |
399 | start += alignment - tmp; | 407 | start += alignment - rem; |
400 | } | 408 | } |
401 | 409 | ||
402 | return end >= start + size; | 410 | return end >= start + size; |
403 | } | 411 | } |
404 | 412 | ||
405 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 413 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
406 | unsigned long size, | 414 | u64 size, |
407 | unsigned alignment, | 415 | unsigned alignment, |
408 | unsigned long color, | 416 | unsigned long color, |
409 | enum drm_mm_search_flags flags) | 417 | enum drm_mm_search_flags flags) |
410 | { | 418 | { |
411 | struct drm_mm_node *entry; | 419 | struct drm_mm_node *entry; |
412 | struct drm_mm_node *best; | 420 | struct drm_mm_node *best; |
413 | unsigned long adj_start; | 421 | u64 adj_start; |
414 | unsigned long adj_end; | 422 | u64 adj_end; |
415 | unsigned long best_size; | 423 | u64 best_size; |
416 | 424 | ||
417 | BUG_ON(mm->scanned_blocks); | 425 | BUG_ON(mm->scanned_blocks); |
418 | 426 | ||
@@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
421 | 429 | ||
422 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 430 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
423 | flags & DRM_MM_SEARCH_BELOW) { | 431 | flags & DRM_MM_SEARCH_BELOW) { |
424 | unsigned long hole_size = adj_end - adj_start; | 432 | u64 hole_size = adj_end - adj_start; |
425 | 433 | ||
426 | if (mm->color_adjust) { | 434 | if (mm->color_adjust) { |
427 | mm->color_adjust(entry, color, &adj_start, &adj_end); | 435 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
@@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
445 | } | 453 | } |
446 | 454 | ||
447 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 455 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
448 | unsigned long size, | 456 | u64 size, |
449 | unsigned alignment, | 457 | unsigned alignment, |
450 | unsigned long color, | 458 | unsigned long color, |
451 | unsigned long start, | 459 | u64 start, |
452 | unsigned long end, | 460 | u64 end, |
453 | enum drm_mm_search_flags flags) | 461 | enum drm_mm_search_flags flags) |
454 | { | 462 | { |
455 | struct drm_mm_node *entry; | 463 | struct drm_mm_node *entry; |
456 | struct drm_mm_node *best; | 464 | struct drm_mm_node *best; |
457 | unsigned long adj_start; | 465 | u64 adj_start; |
458 | unsigned long adj_end; | 466 | u64 adj_end; |
459 | unsigned long best_size; | 467 | u64 best_size; |
460 | 468 | ||
461 | BUG_ON(mm->scanned_blocks); | 469 | BUG_ON(mm->scanned_blocks); |
462 | 470 | ||
@@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ | |||
465 | 473 | ||
466 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 474 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
467 | flags & DRM_MM_SEARCH_BELOW) { | 475 | flags & DRM_MM_SEARCH_BELOW) { |
468 | unsigned long hole_size = adj_end - adj_start; | 476 | u64 hole_size = adj_end - adj_start; |
469 | 477 | ||
470 | if (adj_start < start) | 478 | if (adj_start < start) |
471 | adj_start = start; | 479 | adj_start = start; |
@@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node); | |||
561 | * adding/removing nodes to/from the scan list are allowed. | 569 | * adding/removing nodes to/from the scan list are allowed. |
562 | */ | 570 | */ |
563 | void drm_mm_init_scan(struct drm_mm *mm, | 571 | void drm_mm_init_scan(struct drm_mm *mm, |
564 | unsigned long size, | 572 | u64 size, |
565 | unsigned alignment, | 573 | unsigned alignment, |
566 | unsigned long color) | 574 | unsigned long color) |
567 | { | 575 | { |
@@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan); | |||
594 | * adding/removing nodes to/from the scan list are allowed. | 602 | * adding/removing nodes to/from the scan list are allowed. |
595 | */ | 603 | */ |
596 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | 604 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
597 | unsigned long size, | 605 | u64 size, |
598 | unsigned alignment, | 606 | unsigned alignment, |
599 | unsigned long color, | 607 | unsigned long color, |
600 | unsigned long start, | 608 | u64 start, |
601 | unsigned long end) | 609 | u64 end) |
602 | { | 610 | { |
603 | mm->scan_color = color; | 611 | mm->scan_color = color; |
604 | mm->scan_alignment = alignment; | 612 | mm->scan_alignment = alignment; |
@@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node) | |||
627 | { | 635 | { |
628 | struct drm_mm *mm = node->mm; | 636 | struct drm_mm *mm = node->mm; |
629 | struct drm_mm_node *prev_node; | 637 | struct drm_mm_node *prev_node; |
630 | unsigned long hole_start, hole_end; | 638 | u64 hole_start, hole_end; |
631 | unsigned long adj_start, adj_end; | 639 | u64 adj_start, adj_end; |
632 | 640 | ||
633 | mm->scanned_blocks++; | 641 | mm->scanned_blocks++; |
634 | 642 | ||
@@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean); | |||
731 | * | 739 | * |
732 | * Note that @mm must be cleared to 0 before calling this function. | 740 | * Note that @mm must be cleared to 0 before calling this function. |
733 | */ | 741 | */ |
734 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 742 | void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) |
735 | { | 743 | { |
736 | INIT_LIST_HEAD(&mm->hole_stack); | 744 | INIT_LIST_HEAD(&mm->hole_stack); |
737 | mm->scanned_blocks = 0; | 745 | mm->scanned_blocks = 0; |
@@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm) | |||
766 | } | 774 | } |
767 | EXPORT_SYMBOL(drm_mm_takedown); | 775 | EXPORT_SYMBOL(drm_mm_takedown); |
768 | 776 | ||
769 | static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | 777 | static u64 drm_mm_debug_hole(struct drm_mm_node *entry, |
770 | const char *prefix) | 778 | const char *prefix) |
771 | { | 779 | { |
772 | unsigned long hole_start, hole_end, hole_size; | 780 | u64 hole_start, hole_end, hole_size; |
773 | 781 | ||
774 | if (entry->hole_follows) { | 782 | if (entry->hole_follows) { |
775 | hole_start = drm_mm_hole_node_start(entry); | 783 | hole_start = drm_mm_hole_node_start(entry); |
776 | hole_end = drm_mm_hole_node_end(entry); | 784 | hole_end = drm_mm_hole_node_end(entry); |
777 | hole_size = hole_end - hole_start; | 785 | hole_size = hole_end - hole_start; |
778 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | 786 | pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, |
779 | prefix, hole_start, hole_end, | 787 | hole_end, hole_size); |
780 | hole_size); | ||
781 | return hole_size; | 788 | return hole_size; |
782 | } | 789 | } |
783 | 790 | ||
@@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | |||
792 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | 799 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
793 | { | 800 | { |
794 | struct drm_mm_node *entry; | 801 | struct drm_mm_node *entry; |
795 | unsigned long total_used = 0, total_free = 0, total = 0; | 802 | u64 total_used = 0, total_free = 0, total = 0; |
796 | 803 | ||
797 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); | 804 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
798 | 805 | ||
799 | drm_mm_for_each_node(entry, mm) { | 806 | drm_mm_for_each_node(entry, mm) { |
800 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", | 807 | pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, |
801 | prefix, entry->start, entry->start + entry->size, | 808 | entry->start + entry->size, entry->size); |
802 | entry->size); | ||
803 | total_used += entry->size; | 809 | total_used += entry->size; |
804 | total_free += drm_mm_debug_hole(entry, prefix); | 810 | total_free += drm_mm_debug_hole(entry, prefix); |
805 | } | 811 | } |
806 | total = total_free + total_used; | 812 | total = total_free + total_used; |
807 | 813 | ||
808 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, | 814 | pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, |
809 | total_used, total_free); | 815 | total_used, total_free); |
810 | } | 816 | } |
811 | EXPORT_SYMBOL(drm_mm_debug_table); | 817 | EXPORT_SYMBOL(drm_mm_debug_table); |
812 | 818 | ||
813 | #if defined(CONFIG_DEBUG_FS) | 819 | #if defined(CONFIG_DEBUG_FS) |
814 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) | 820 | static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
815 | { | 821 | { |
816 | unsigned long hole_start, hole_end, hole_size; | 822 | u64 hole_start, hole_end, hole_size; |
817 | 823 | ||
818 | if (entry->hole_follows) { | 824 | if (entry->hole_follows) { |
819 | hole_start = drm_mm_hole_node_start(entry); | 825 | hole_start = drm_mm_hole_node_start(entry); |
820 | hole_end = drm_mm_hole_node_end(entry); | 826 | hole_end = drm_mm_hole_node_end(entry); |
821 | hole_size = hole_end - hole_start; | 827 | hole_size = hole_end - hole_start; |
822 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | 828 | seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start, |
823 | hole_start, hole_end, hole_size); | 829 | hole_end, hole_size); |
824 | return hole_size; | 830 | return hole_size; |
825 | } | 831 | } |
826 | 832 | ||
@@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en | |||
835 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 841 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
836 | { | 842 | { |
837 | struct drm_mm_node *entry; | 843 | struct drm_mm_node *entry; |
838 | unsigned long total_used = 0, total_free = 0, total = 0; | 844 | u64 total_used = 0, total_free = 0, total = 0; |
839 | 845 | ||
840 | total_free += drm_mm_dump_hole(m, &mm->head_node); | 846 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
841 | 847 | ||
842 | drm_mm_for_each_node(entry, mm) { | 848 | drm_mm_for_each_node(entry, mm) { |
843 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | 849 | seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start, |
844 | entry->start, entry->start + entry->size, | 850 | entry->start + entry->size, entry->size); |
845 | entry->size); | ||
846 | total_used += entry->size; | 851 | total_used += entry->size; |
847 | total_free += drm_mm_dump_hole(m, entry); | 852 | total_free += drm_mm_dump_hole(m, entry); |
848 | } | 853 | } |
849 | total = total_free + total_used; | 854 | total = total_free + total_used; |
850 | 855 | ||
851 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); | 856 | seq_printf(m, "total: %llu, used %llu free %llu\n", total, |
857 | total_used, total_free); | ||
852 | return 0; | 858 | return 0; |
853 | } | 859 | } |
854 | EXPORT_SYMBOL(drm_mm_dump_table); | 860 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 94b3984dbea0..e38f45374d55 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -153,12 +153,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
153 | seq_puts(m, " (pp"); | 153 | seq_puts(m, " (pp"); |
154 | else | 154 | else |
155 | seq_puts(m, " (g"); | 155 | seq_puts(m, " (g"); |
156 | seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", | 156 | seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", |
157 | vma->node.start, vma->node.size, | 157 | vma->node.start, vma->node.size, |
158 | vma->ggtt_view.type); | 158 | vma->ggtt_view.type); |
159 | } | 159 | } |
160 | if (obj->stolen) | 160 | if (obj->stolen) |
161 | seq_printf(m, " (stolen: %08lx)", obj->stolen->start); | 161 | seq_printf(m, " (stolen: %08llx)", obj->stolen->start); |
162 | if (obj->pin_mappable || obj->fault_mappable) { | 162 | if (obj->pin_mappable || obj->fault_mappable) { |
163 | char s[3], *t = s; | 163 | char s[3], *t = s; |
164 | if (obj->pin_mappable) | 164 | if (obj->pin_mappable) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 875b1b7964c3..0001642c38b4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -633,7 +633,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
635 | 635 | ||
636 | static int i915_drm_suspend_late(struct drm_device *drm_dev) | 636 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
637 | { | 637 | { |
638 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | 638 | struct drm_i915_private *dev_priv = drm_dev->dev_private; |
639 | int ret; | 639 | int ret; |
@@ -647,7 +647,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev) | |||
647 | } | 647 | } |
648 | 648 | ||
649 | pci_disable_device(drm_dev->pdev); | 649 | pci_disable_device(drm_dev->pdev); |
650 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | 650 | /* |
651 | * During hibernation on some GEN4 platforms the BIOS may try to access | ||
652 | * the device even though it's already in D3 and hang the machine. So | ||
653 | * leave the device in D0 on those platforms and hope the BIOS will | ||
654 | * power down the device properly. Platforms where this was seen: | ||
655 | * Lenovo Thinkpad X301, X61s | ||
656 | */ | ||
657 | if (!(hibernation && | ||
658 | drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && | ||
659 | INTEL_INFO(dev_priv)->gen == 4)) | ||
660 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | ||
651 | 661 | ||
652 | return 0; | 662 | return 0; |
653 | } | 663 | } |
@@ -673,7 +683,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) | |||
673 | if (error) | 683 | if (error) |
674 | return error; | 684 | return error; |
675 | 685 | ||
676 | return i915_drm_suspend_late(dev); | 686 | return i915_drm_suspend_late(dev, false); |
677 | } | 687 | } |
678 | 688 | ||
679 | static int i915_drm_resume(struct drm_device *dev) | 689 | static int i915_drm_resume(struct drm_device *dev) |
@@ -953,7 +963,17 @@ static int i915_pm_suspend_late(struct device *dev) | |||
953 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 963 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
954 | return 0; | 964 | return 0; |
955 | 965 | ||
956 | return i915_drm_suspend_late(drm_dev); | 966 | return i915_drm_suspend_late(drm_dev, false); |
967 | } | ||
968 | |||
969 | static int i915_pm_poweroff_late(struct device *dev) | ||
970 | { | ||
971 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | ||
972 | |||
973 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
974 | return 0; | ||
975 | |||
976 | return i915_drm_suspend_late(drm_dev, true); | ||
957 | } | 977 | } |
958 | 978 | ||
959 | static int i915_pm_resume_early(struct device *dev) | 979 | static int i915_pm_resume_early(struct device *dev) |
@@ -1523,7 +1543,7 @@ static const struct dev_pm_ops i915_pm_ops = { | |||
1523 | .thaw_early = i915_pm_resume_early, | 1543 | .thaw_early = i915_pm_resume_early, |
1524 | .thaw = i915_pm_resume, | 1544 | .thaw = i915_pm_resume, |
1525 | .poweroff = i915_pm_suspend, | 1545 | .poweroff = i915_pm_suspend, |
1526 | .poweroff_late = i915_pm_suspend_late, | 1546 | .poweroff_late = i915_pm_poweroff_late, |
1527 | .restore_early = i915_pm_resume_early, | 1547 | .restore_early = i915_pm_resume_early, |
1528 | .restore = i915_pm_resume, | 1548 | .restore = i915_pm_resume, |
1529 | 1549 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2903090f25e5..ee5bc43dfc0b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -2004,6 +2004,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
2004 | * number comparisons on buffer last_read|write_seqno. It also allows an | 2004 | * number comparisons on buffer last_read|write_seqno. It also allows an |
2005 | * emission time to be associated with the request for tracking how far ahead | 2005 | * emission time to be associated with the request for tracking how far ahead |
2006 | * of the GPU the submission is. | 2006 | * of the GPU the submission is. |
2007 | * | ||
2008 | * The requests are reference counted, so upon creation they should have an | ||
2009 | * initial reference taken using kref_init | ||
2007 | */ | 2010 | */ |
2008 | struct drm_i915_gem_request { | 2011 | struct drm_i915_gem_request { |
2009 | struct kref ref; | 2012 | struct kref ref; |
@@ -2027,7 +2030,16 @@ struct drm_i915_gem_request { | |||
2027 | /** Position in the ringbuffer of the end of the whole request */ | 2030 | /** Position in the ringbuffer of the end of the whole request */ |
2028 | u32 tail; | 2031 | u32 tail; |
2029 | 2032 | ||
2030 | /** Context and ring buffer related to this request */ | 2033 | /** |
2034 | * Context and ring buffer related to this request | ||
2035 | * Contexts are refcounted, so when this request is associated with a | ||
2036 | * context, we must increment the context's refcount, to guarantee that | ||
2037 | * it persists while any request is linked to it. Requests themselves | ||
2038 | * are also refcounted, so the request will only be freed when the last | ||
2039 | * reference to it is dismissed, and the code in | ||
2040 | * i915_gem_request_free() will then decrement the refcount on the | ||
2041 | * context. | ||
2042 | */ | ||
2031 | struct intel_context *ctx; | 2043 | struct intel_context *ctx; |
2032 | struct intel_ringbuffer *ringbuf; | 2044 | struct intel_ringbuffer *ringbuf; |
2033 | 2045 | ||
@@ -2266,6 +2278,7 @@ struct drm_i915_cmd_table { | |||
2266 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) | 2278 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
2267 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ | 2279 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
2268 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ | 2280 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
2281 | (INTEL_DEVID(dev) & 0xf) == 0xb || \ | ||
2269 | (INTEL_DEVID(dev) & 0xf) == 0xe)) | 2282 | (INTEL_DEVID(dev) & 0xf) == 0xe)) |
2270 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ | 2283 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
2271 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | 2284 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1b2a1eb3aafc..0107c2ae77d0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2664,8 +2664,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
2664 | if (submit_req->ctx != ring->default_context) | 2664 | if (submit_req->ctx != ring->default_context) |
2665 | intel_lr_context_unpin(ring, submit_req->ctx); | 2665 | intel_lr_context_unpin(ring, submit_req->ctx); |
2666 | 2666 | ||
2667 | i915_gem_context_unreference(submit_req->ctx); | 2667 | i915_gem_request_unreference(submit_req); |
2668 | kfree(submit_req); | ||
2669 | } | 2668 | } |
2670 | 2669 | ||
2671 | /* | 2670 | /* |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index bd95776c3144..74df3d1581dd 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1229,7 +1229,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
1229 | 1229 | ||
1230 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); | 1230 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
1231 | 1231 | ||
1232 | DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", | 1232 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
1233 | ppgtt->node.size >> 20, | 1233 | ppgtt->node.size >> 20, |
1234 | ppgtt->node.start / PAGE_SIZE); | 1234 | ppgtt->node.start / PAGE_SIZE); |
1235 | 1235 | ||
@@ -1797,8 +1797,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) | |||
1797 | 1797 | ||
1798 | static void i915_gtt_color_adjust(struct drm_mm_node *node, | 1798 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
1799 | unsigned long color, | 1799 | unsigned long color, |
1800 | unsigned long *start, | 1800 | u64 *start, |
1801 | unsigned long *end) | 1801 | u64 *end) |
1802 | { | 1802 | { |
1803 | if (node->color != color) | 1803 | if (node->color != color) |
1804 | *start += 4096; | 1804 | *start += 4096; |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index f1de95f7432c..f8da71682c96 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
485 | stolen_offset, gtt_offset, size); | 485 | stolen_offset, gtt_offset, size); |
486 | 486 | ||
487 | /* KISS and expect everything to be page-aligned */ | 487 | /* KISS and expect everything to be page-aligned */ |
488 | BUG_ON(stolen_offset & 4095); | 488 | if (WARN_ON(size == 0) || WARN_ON(size & 4095) || |
489 | BUG_ON(size & 4095); | 489 | WARN_ON(stolen_offset & 4095)) |
490 | |||
491 | if (WARN_ON(size == 0)) | ||
492 | return NULL; | 490 | return NULL; |
493 | 491 | ||
494 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); | 492 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7a24bd1a51f6..6377b22269ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
335 | return -EINVAL; | 335 | return -EINVAL; |
336 | } | 336 | } |
337 | 337 | ||
338 | mutex_lock(&dev->struct_mutex); | ||
338 | if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { | 339 | if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { |
339 | drm_gem_object_unreference_unlocked(&obj->base); | 340 | ret = -EBUSY; |
340 | return -EBUSY; | 341 | goto err; |
341 | } | 342 | } |
342 | 343 | ||
343 | if (args->tiling_mode == I915_TILING_NONE) { | 344 | if (args->tiling_mode == I915_TILING_NONE) { |
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
369 | } | 370 | } |
370 | } | 371 | } |
371 | 372 | ||
372 | mutex_lock(&dev->struct_mutex); | ||
373 | if (args->tiling_mode != obj->tiling_mode || | 373 | if (args->tiling_mode != obj->tiling_mode || |
374 | args->stride != obj->stride) { | 374 | args->stride != obj->stride) { |
375 | /* We need to rebind the object if its current allocation | 375 | /* We need to rebind the object if its current allocation |
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
424 | obj->bit_17 = NULL; | 424 | obj->bit_17 = NULL; |
425 | } | 425 | } |
426 | 426 | ||
427 | err: | ||
427 | drm_gem_object_unreference(&obj->base); | 428 | drm_gem_object_unreference(&obj->base); |
428 | mutex_unlock(&dev->struct_mutex); | 429 | mutex_unlock(&dev->struct_mutex); |
429 | 430 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6ebea4614204..9baecb79de8c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1840,6 +1840,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
1840 | u32 iir, gt_iir, pm_iir; | 1840 | u32 iir, gt_iir, pm_iir; |
1841 | irqreturn_t ret = IRQ_NONE; | 1841 | irqreturn_t ret = IRQ_NONE; |
1842 | 1842 | ||
1843 | if (!intel_irqs_enabled(dev_priv)) | ||
1844 | return IRQ_NONE; | ||
1845 | |||
1843 | while (true) { | 1846 | while (true) { |
1844 | /* Find, clear, then process each source of interrupt */ | 1847 | /* Find, clear, then process each source of interrupt */ |
1845 | 1848 | ||
@@ -1884,6 +1887,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1884 | u32 master_ctl, iir; | 1887 | u32 master_ctl, iir; |
1885 | irqreturn_t ret = IRQ_NONE; | 1888 | irqreturn_t ret = IRQ_NONE; |
1886 | 1889 | ||
1890 | if (!intel_irqs_enabled(dev_priv)) | ||
1891 | return IRQ_NONE; | ||
1892 | |||
1887 | for (;;) { | 1893 | for (;;) { |
1888 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; | 1894 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; |
1889 | iir = I915_READ(VLV_IIR); | 1895 | iir = I915_READ(VLV_IIR); |
@@ -2156,6 +2162,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2156 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; | 2162 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
2157 | irqreturn_t ret = IRQ_NONE; | 2163 | irqreturn_t ret = IRQ_NONE; |
2158 | 2164 | ||
2165 | if (!intel_irqs_enabled(dev_priv)) | ||
2166 | return IRQ_NONE; | ||
2167 | |||
2159 | /* We get interrupts on unclaimed registers, so check for this before we | 2168 | /* We get interrupts on unclaimed registers, so check for this before we |
2160 | * do any I915_{READ,WRITE}. */ | 2169 | * do any I915_{READ,WRITE}. */ |
2161 | intel_uncore_check_errors(dev); | 2170 | intel_uncore_check_errors(dev); |
@@ -2227,6 +2236,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2227 | enum pipe pipe; | 2236 | enum pipe pipe; |
2228 | u32 aux_mask = GEN8_AUX_CHANNEL_A; | 2237 | u32 aux_mask = GEN8_AUX_CHANNEL_A; |
2229 | 2238 | ||
2239 | if (!intel_irqs_enabled(dev_priv)) | ||
2240 | return IRQ_NONE; | ||
2241 | |||
2230 | if (IS_GEN9(dev)) | 2242 | if (IS_GEN9(dev)) |
2231 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | | 2243 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
2232 | GEN9_AUX_CHANNEL_D; | 2244 | GEN9_AUX_CHANNEL_D; |
@@ -3704,6 +3716,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
3704 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3716 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3705 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3717 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3706 | 3718 | ||
3719 | if (!intel_irqs_enabled(dev_priv)) | ||
3720 | return IRQ_NONE; | ||
3721 | |||
3707 | iir = I915_READ16(IIR); | 3722 | iir = I915_READ16(IIR); |
3708 | if (iir == 0) | 3723 | if (iir == 0) |
3709 | return IRQ_NONE; | 3724 | return IRQ_NONE; |
@@ -3884,6 +3899,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
3884 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3899 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3885 | int pipe, ret = IRQ_NONE; | 3900 | int pipe, ret = IRQ_NONE; |
3886 | 3901 | ||
3902 | if (!intel_irqs_enabled(dev_priv)) | ||
3903 | return IRQ_NONE; | ||
3904 | |||
3887 | iir = I915_READ(IIR); | 3905 | iir = I915_READ(IIR); |
3888 | do { | 3906 | do { |
3889 | bool irq_received = (iir & ~flip_mask) != 0; | 3907 | bool irq_received = (iir & ~flip_mask) != 0; |
@@ -4104,6 +4122,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4104 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 4122 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
4105 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 4123 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
4106 | 4124 | ||
4125 | if (!intel_irqs_enabled(dev_priv)) | ||
4126 | return IRQ_NONE; | ||
4127 | |||
4107 | iir = I915_READ(IIR); | 4128 | iir = I915_READ(IIR); |
4108 | 4129 | ||
4109 | for (;;) { | 4130 | for (;;) { |
@@ -4451,6 +4472,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) | |||
4451 | { | 4472 | { |
4452 | dev_priv->dev->driver->irq_uninstall(dev_priv->dev); | 4473 | dev_priv->dev->driver->irq_uninstall(dev_priv->dev); |
4453 | dev_priv->pm.irqs_enabled = false; | 4474 | dev_priv->pm.irqs_enabled = false; |
4475 | synchronize_irq(dev_priv->dev->irq); | ||
4454 | } | 4476 | } |
4455 | 4477 | ||
4456 | /** | 4478 | /** |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dbd817928d0d..31f3b11589b9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2416,13 +2416,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc, | |||
2416 | struct drm_i915_gem_object *obj = NULL; | 2416 | struct drm_i915_gem_object *obj = NULL; |
2417 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | 2417 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
2418 | struct drm_framebuffer *fb = &plane_config->fb->base; | 2418 | struct drm_framebuffer *fb = &plane_config->fb->base; |
2419 | u32 base = plane_config->base; | 2419 | u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); |
2420 | u32 size_aligned = round_up(plane_config->base + plane_config->size, | ||
2421 | PAGE_SIZE); | ||
2422 | |||
2423 | size_aligned -= base_aligned; | ||
2420 | 2424 | ||
2421 | if (plane_config->size == 0) | 2425 | if (plane_config->size == 0) |
2422 | return false; | 2426 | return false; |
2423 | 2427 | ||
2424 | obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, | 2428 | obj = i915_gem_object_create_stolen_for_preallocated(dev, |
2425 | plane_config->size); | 2429 | base_aligned, |
2430 | base_aligned, | ||
2431 | size_aligned); | ||
2426 | if (!obj) | 2432 | if (!obj) |
2427 | return false; | 2433 | return false; |
2428 | 2434 | ||
@@ -2830,10 +2836,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, | |||
2830 | case DRM_FORMAT_XRGB8888: | 2836 | case DRM_FORMAT_XRGB8888: |
2831 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | 2837 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; |
2832 | break; | 2838 | break; |
2839 | case DRM_FORMAT_ARGB8888: | ||
2840 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | ||
2841 | plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; | ||
2842 | break; | ||
2833 | case DRM_FORMAT_XBGR8888: | 2843 | case DRM_FORMAT_XBGR8888: |
2834 | plane_ctl |= PLANE_CTL_ORDER_RGBX; | 2844 | plane_ctl |= PLANE_CTL_ORDER_RGBX; |
2835 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | 2845 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; |
2836 | break; | 2846 | break; |
2847 | case DRM_FORMAT_ABGR8888: | ||
2848 | plane_ctl |= PLANE_CTL_ORDER_RGBX; | ||
2849 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | ||
2850 | plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; | ||
2851 | break; | ||
2837 | case DRM_FORMAT_XRGB2101010: | 2852 | case DRM_FORMAT_XRGB2101010: |
2838 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; | 2853 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; |
2839 | break; | 2854 | break; |
@@ -6755,7 +6770,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6755 | fb->pixel_format, | 6770 | fb->pixel_format, |
6756 | fb->modifier[0]); | 6771 | fb->modifier[0]); |
6757 | 6772 | ||
6758 | plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); | 6773 | plane_config->size = fb->pitches[0] * aligned_height; |
6759 | 6774 | ||
6760 | DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 6775 | DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
6761 | pipe_name(pipe), plane, fb->width, fb->height, | 6776 | pipe_name(pipe), plane, fb->width, fb->height, |
@@ -7804,7 +7819,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7804 | fb->pixel_format, | 7819 | fb->pixel_format, |
7805 | fb->modifier[0]); | 7820 | fb->modifier[0]); |
7806 | 7821 | ||
7807 | plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); | 7822 | plane_config->size = fb->pitches[0] * aligned_height; |
7808 | 7823 | ||
7809 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 7824 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
7810 | pipe_name(pipe), fb->width, fb->height, | 7825 | pipe_name(pipe), fb->width, fb->height, |
@@ -7901,7 +7916,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7901 | fb->pixel_format, | 7916 | fb->pixel_format, |
7902 | fb->modifier[0]); | 7917 | fb->modifier[0]); |
7903 | 7918 | ||
7904 | plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); | 7919 | plane_config->size = fb->pitches[0] * aligned_height; |
7905 | 7920 | ||
7906 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 7921 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
7907 | pipe_name(pipe), fb->width, fb->height, | 7922 | pipe_name(pipe), fb->width, fb->height, |
@@ -8844,6 +8859,7 @@ retry: | |||
8844 | old->release_fb->funcs->destroy(old->release_fb); | 8859 | old->release_fb->funcs->destroy(old->release_fb); |
8845 | goto fail; | 8860 | goto fail; |
8846 | } | 8861 | } |
8862 | crtc->primary->crtc = crtc; | ||
8847 | 8863 | ||
8848 | /* let the connector get through one full cycle before testing */ | 8864 | /* let the connector get through one full cycle before testing */ |
8849 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 8865 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
@@ -12279,10 +12295,9 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
12279 | return -ENOMEM; | 12295 | return -ENOMEM; |
12280 | } | 12296 | } |
12281 | 12297 | ||
12282 | if (fb == crtc->cursor->fb) | 12298 | /* we only need to pin inside GTT if cursor is non-phy */ |
12283 | return 0; | 12299 | mutex_lock(&dev->struct_mutex); |
12284 | 12300 | if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { | |
12285 | if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { | ||
12286 | DRM_DEBUG_KMS("cursor cannot be tiled\n"); | 12301 | DRM_DEBUG_KMS("cursor cannot be tiled\n"); |
12287 | ret = -EINVAL; | 12302 | ret = -EINVAL; |
12288 | } | 12303 | } |
@@ -13236,6 +13251,9 @@ static struct intel_quirk intel_quirks[] = { | |||
13236 | 13251 | ||
13237 | /* HP Chromebook 14 (Celeron 2955U) */ | 13252 | /* HP Chromebook 14 (Celeron 2955U) */ |
13238 | { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, | 13253 | { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, |
13254 | |||
13255 | /* Dell Chromebook 11 */ | ||
13256 | { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, | ||
13239 | }; | 13257 | }; |
13240 | 13258 | ||
13241 | static void intel_init_quirks(struct drm_device *dev) | 13259 | static void intel_init_quirks(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 04e248dd2259..54daa66c6970 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c | |||
@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
282 | return ret; | 282 | return ret; |
283 | } | 283 | } |
284 | 284 | ||
285 | static bool | ||
286 | __cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv, | ||
287 | enum pipe pipe) | ||
288 | { | ||
289 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
290 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
291 | |||
292 | return !intel_crtc->cpu_fifo_underrun_disabled; | ||
293 | } | ||
294 | |||
295 | /** | 285 | /** |
296 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state | 286 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state |
297 | * @dev_priv: i915 device instance | 287 | * @dev_priv: i915 device instance |
@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
352 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, | 342 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, |
353 | enum pipe pipe) | 343 | enum pipe pipe) |
354 | { | 344 | { |
345 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
346 | |||
347 | /* We may be called too early in init, thanks BIOS! */ | ||
348 | if (crtc == NULL) | ||
349 | return; | ||
350 | |||
355 | /* GMCH can't disable fifo underruns, filter them. */ | 351 | /* GMCH can't disable fifo underruns, filter them. */ |
356 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && | 352 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && |
357 | !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) | 353 | to_intel_crtc(crtc)->cpu_fifo_underrun_disabled) |
358 | return; | 354 | return; |
359 | 355 | ||
360 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) | 356 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index a2dc9a11a248..fcb074bd55dc 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -512,18 +512,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring, | |||
512 | * If there isn't a request associated with this submission, | 512 | * If there isn't a request associated with this submission, |
513 | * create one as a temporary holder. | 513 | * create one as a temporary holder. |
514 | */ | 514 | */ |
515 | WARN(1, "execlist context submission without request"); | ||
516 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 515 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
517 | if (request == NULL) | 516 | if (request == NULL) |
518 | return -ENOMEM; | 517 | return -ENOMEM; |
519 | request->ring = ring; | 518 | request->ring = ring; |
520 | request->ctx = to; | 519 | request->ctx = to; |
520 | kref_init(&request->ref); | ||
521 | request->uniq = dev_priv->request_uniq++; | ||
522 | i915_gem_context_reference(request->ctx); | ||
521 | } else { | 523 | } else { |
524 | i915_gem_request_reference(request); | ||
522 | WARN_ON(to != request->ctx); | 525 | WARN_ON(to != request->ctx); |
523 | } | 526 | } |
524 | request->tail = tail; | 527 | request->tail = tail; |
525 | i915_gem_request_reference(request); | ||
526 | i915_gem_context_reference(request->ctx); | ||
527 | 528 | ||
528 | intel_runtime_pm_get(dev_priv); | 529 | intel_runtime_pm_get(dev_priv); |
529 | 530 | ||
@@ -742,7 +743,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) | |||
742 | if (ctx_obj && (ctx != ring->default_context)) | 743 | if (ctx_obj && (ctx != ring->default_context)) |
743 | intel_lr_context_unpin(ring, ctx); | 744 | intel_lr_context_unpin(ring, ctx); |
744 | intel_runtime_pm_put(dev_priv); | 745 | intel_runtime_pm_put(dev_priv); |
745 | i915_gem_context_unreference(ctx); | ||
746 | list_del(&req->execlist_link); | 746 | list_del(&req->execlist_link); |
747 | i915_gem_request_unreference(req); | 747 | i915_gem_request_unreference(req); |
748 | } | 748 | } |
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 121d30ca2d44..87fe8ed92ebe 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c | |||
@@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = { | |||
70 | 118800000, { 0x091c, 0x091c, 0x06dc }, | 70 | 118800000, { 0x091c, 0x091c, 0x06dc }, |
71 | }, { | 71 | }, { |
72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, | 72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, |
73 | } | 73 | }, { |
74 | ~0UL, { 0x0000, 0x0000, 0x0000 }, | ||
75 | }, | ||
74 | }; | 76 | }; |
75 | 77 | ||
76 | static const struct dw_hdmi_sym_term imx_sym_term[] = { | 78 | static const struct dw_hdmi_sym_term imx_sym_term[] = { |
@@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { | |||
136 | .destroy = drm_encoder_cleanup, | 138 | .destroy = drm_encoder_cleanup, |
137 | }; | 139 | }; |
138 | 140 | ||
141 | static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con, | ||
142 | struct drm_display_mode *mode) | ||
143 | { | ||
144 | if (mode->clock < 13500) | ||
145 | return MODE_CLOCK_LOW; | ||
146 | if (mode->clock > 266000) | ||
147 | return MODE_CLOCK_HIGH; | ||
148 | |||
149 | return MODE_OK; | ||
150 | } | ||
151 | |||
152 | static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con, | ||
153 | struct drm_display_mode *mode) | ||
154 | { | ||
155 | if (mode->clock < 13500) | ||
156 | return MODE_CLOCK_LOW; | ||
157 | if (mode->clock > 270000) | ||
158 | return MODE_CLOCK_HIGH; | ||
159 | |||
160 | return MODE_OK; | ||
161 | } | ||
162 | |||
139 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { | 163 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { |
140 | .mpll_cfg = imx_mpll_cfg, | 164 | .mpll_cfg = imx_mpll_cfg, |
141 | .cur_ctr = imx_cur_ctr, | 165 | .cur_ctr = imx_cur_ctr, |
142 | .sym_term = imx_sym_term, | 166 | .sym_term = imx_sym_term, |
143 | .dev_type = IMX6Q_HDMI, | 167 | .dev_type = IMX6Q_HDMI, |
168 | .mode_valid = imx6q_hdmi_mode_valid, | ||
144 | }; | 169 | }; |
145 | 170 | ||
146 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | 171 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { |
@@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | |||
148 | .cur_ctr = imx_cur_ctr, | 173 | .cur_ctr = imx_cur_ctr, |
149 | .sym_term = imx_sym_term, | 174 | .sym_term = imx_sym_term, |
150 | .dev_type = IMX6DL_HDMI, | 175 | .dev_type = IMX6DL_HDMI, |
176 | .mode_valid = imx6dl_hdmi_mode_valid, | ||
151 | }; | 177 | }; |
152 | 178 | ||
153 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { | 179 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { |
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 1b86aac0b341..2d6dc94e1e64 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
@@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder) | |||
163 | { | 163 | { |
164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
166 | struct drm_display_mode *mode = &encoder->crtc->hwmode; | ||
167 | u32 pixel_fmt; | 166 | u32 pixel_fmt; |
168 | unsigned long serial_clk; | ||
169 | unsigned long di_clk = mode->clock * 1000; | ||
170 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
171 | |||
172 | if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { | ||
173 | /* dual channel LVDS mode */ | ||
174 | serial_clk = 3500UL * mode->clock; | ||
175 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
176 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
177 | } else { | ||
178 | serial_clk = 7000UL * mode->clock; | ||
179 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
180 | di_clk); | ||
181 | } | ||
182 | 167 | ||
183 | switch (imx_ldb_ch->chno) { | 168 | switch (imx_ldb_ch->chno) { |
184 | case 0: | 169 | case 0: |
@@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
247 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 232 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
248 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 233 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
249 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; | 234 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; |
235 | unsigned long serial_clk; | ||
236 | unsigned long di_clk = mode->clock * 1000; | ||
237 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
250 | 238 | ||
251 | if (mode->clock > 170000) { | 239 | if (mode->clock > 170000) { |
252 | dev_warn(ldb->dev, | 240 | dev_warn(ldb->dev, |
@@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
257 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); | 245 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); |
258 | } | 246 | } |
259 | 247 | ||
248 | if (dual) { | ||
249 | serial_clk = 3500UL * mode->clock; | ||
250 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
251 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
252 | } else { | ||
253 | serial_clk = 7000UL * mode->clock; | ||
254 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
255 | di_clk); | ||
256 | } | ||
257 | |||
260 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ | 258 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ |
261 | if (imx_ldb_ch == &ldb->channel[0]) { | 259 | if (imx_ldb_ch == &ldb->channel[0]) { |
262 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 260 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 5e83e007080f..900dda6a8e71 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
@@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); | 238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); |
239 | if (panel_node) | 239 | if (panel_node) { |
240 | imxpd->panel = of_drm_find_panel(panel_node); | 240 | imxpd->panel = of_drm_find_panel(panel_node); |
241 | if (!imxpd->panel) | ||
242 | return -EPROBE_DEFER; | ||
243 | } | ||
241 | 244 | ||
242 | imxpd->dev = dev; | 245 | imxpd->dev = dev; |
243 | 246 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 8edd531cb621..7369ee7f0c55 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
@@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
32 | void mdp4_irq_preinstall(struct msm_kms *kms) | 32 | void mdp4_irq_preinstall(struct msm_kms *kms) |
33 | { | 33 | { |
34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
35 | mdp4_enable(mdp4_kms); | ||
35 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | 36 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); |
37 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
38 | mdp4_disable(mdp4_kms); | ||
36 | } | 39 | } |
37 | 40 | ||
38 | int mdp4_irq_postinstall(struct msm_kms *kms) | 41 | int mdp4_irq_postinstall(struct msm_kms *kms) |
@@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms) | |||
53 | void mdp4_irq_uninstall(struct msm_kms *kms) | 56 | void mdp4_irq_uninstall(struct msm_kms *kms) |
54 | { | 57 | { |
55 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 58 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
59 | mdp4_enable(mdp4_kms); | ||
56 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | 60 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); |
61 | mdp4_disable(mdp4_kms); | ||
57 | } | 62 | } |
58 | 63 | ||
59 | irqreturn_t mdp4_irq(struct msm_kms *kms) | 64 | irqreturn_t mdp4_irq(struct msm_kms *kms) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 09b4a25eb553..c276624290af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | |||
@@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) | 11 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) | 13 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) | ||
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) | ||
19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) | ||
21 | - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) | ||
22 | 14 | ||
23 | Copyright (C) 2013-2015 by the following authors: | 15 | Copyright (C) 2013-2015 by the following authors: |
24 | - Rob Clark <robdclark@gmail.com> (robclark) | 16 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx) | |||
910 | case 2: return (mdp5_cfg->lm.base[2]); | 902 | case 2: return (mdp5_cfg->lm.base[2]); |
911 | case 3: return (mdp5_cfg->lm.base[3]); | 903 | case 3: return (mdp5_cfg->lm.base[3]); |
912 | case 4: return (mdp5_cfg->lm.base[4]); | 904 | case 4: return (mdp5_cfg->lm.base[4]); |
905 | case 5: return (mdp5_cfg->lm.base[5]); | ||
913 | default: return INVALID_IDX(idx); | 906 | default: return INVALID_IDX(idx); |
914 | } | 907 | } |
915 | } | 908 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 46fac545dc2b..2f2863cf8b45 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -62,8 +62,8 @@ struct mdp5_crtc { | |||
62 | 62 | ||
63 | /* current cursor being scanned out: */ | 63 | /* current cursor being scanned out: */ |
64 | struct drm_gem_object *scanout_bo; | 64 | struct drm_gem_object *scanout_bo; |
65 | uint32_t width; | 65 | uint32_t width, height; |
66 | uint32_t height; | 66 | uint32_t x, y; |
67 | } cursor; | 67 | } cursor; |
68 | }; | 68 | }; |
69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) | 69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) |
@@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc) | |||
103 | struct drm_plane *plane; | 103 | struct drm_plane *plane; |
104 | uint32_t flush_mask = 0; | 104 | uint32_t flush_mask = 0; |
105 | 105 | ||
106 | /* we could have already released CTL in the disable path: */ | 106 | /* this should not happen: */ |
107 | if (!mdp5_crtc->ctl) | 107 | if (WARN_ON(!mdp5_crtc->ctl)) |
108 | return; | 108 | return; |
109 | 109 | ||
110 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 110 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
@@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
143 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 143 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
144 | mdp5_plane_complete_flip(plane); | 144 | mdp5_plane_complete_flip(plane); |
145 | } | 145 | } |
146 | |||
147 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
148 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
149 | mdp5_crtc->ctl = NULL; | ||
150 | } | ||
146 | } | 151 | } |
147 | 152 | ||
148 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) | 153 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) |
@@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) | |||
386 | mdp5_crtc->event = crtc->state->event; | 391 | mdp5_crtc->event = crtc->state->event; |
387 | spin_unlock_irqrestore(&dev->event_lock, flags); | 392 | spin_unlock_irqrestore(&dev->event_lock, flags); |
388 | 393 | ||
394 | /* | ||
395 | * If no CTL has been allocated in mdp5_crtc_atomic_check(), | ||
396 | * it means we are trying to flush a CRTC whose state is disabled: | ||
397 | * nothing else needs to be done. | ||
398 | */ | ||
399 | if (unlikely(!mdp5_crtc->ctl)) | ||
400 | return; | ||
401 | |||
389 | blend_setup(crtc); | 402 | blend_setup(crtc); |
390 | crtc_flush_all(crtc); | 403 | crtc_flush_all(crtc); |
391 | request_pending(crtc, PENDING_FLIP); | 404 | request_pending(crtc, PENDING_FLIP); |
392 | |||
393 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
394 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
395 | mdp5_crtc->ctl = NULL; | ||
396 | } | ||
397 | } | 405 | } |
398 | 406 | ||
399 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, | 407 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, |
@@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, | |||
403 | return -EINVAL; | 411 | return -EINVAL; |
404 | } | 412 | } |
405 | 413 | ||
414 | static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) | ||
415 | { | ||
416 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
417 | uint32_t xres = crtc->mode.hdisplay; | ||
418 | uint32_t yres = crtc->mode.vdisplay; | ||
419 | |||
420 | /* | ||
421 | * Cursor Region Of Interest (ROI) is a plane read from cursor | ||
422 | * buffer to render. The ROI region is determined by the visibility of | ||
423 | * the cursor point. In the default Cursor image the cursor point will | ||
424 | * be at the top left of the cursor image, unless it is specified | ||
425 | * otherwise using hotspot feature. | ||
426 | * | ||
427 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
428 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
429 | * width and ROI height need to be evaluated to crop the cursor image | ||
430 | * accordingly. | ||
431 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
432 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
433 | */ | ||
434 | *roi_w = min(mdp5_crtc->cursor.width, xres - | ||
435 | mdp5_crtc->cursor.x); | ||
436 | *roi_h = min(mdp5_crtc->cursor.height, yres - | ||
437 | mdp5_crtc->cursor.y); | ||
438 | } | ||
439 | |||
406 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | 440 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, |
407 | struct drm_file *file, uint32_t handle, | 441 | struct drm_file *file, uint32_t handle, |
408 | uint32_t width, uint32_t height) | 442 | uint32_t width, uint32_t height) |
@@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
416 | unsigned int depth; | 450 | unsigned int depth; |
417 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; | 451 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; |
418 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 452 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
453 | uint32_t roi_w, roi_h; | ||
419 | unsigned long flags; | 454 | unsigned long flags; |
420 | 455 | ||
421 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { | 456 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { |
@@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
446 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 481 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
447 | old_bo = mdp5_crtc->cursor.scanout_bo; | 482 | old_bo = mdp5_crtc->cursor.scanout_bo; |
448 | 483 | ||
484 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
485 | mdp5_crtc->cursor.width = width; | ||
486 | mdp5_crtc->cursor.height = height; | ||
487 | |||
488 | get_roi(crtc, &roi_w, &roi_h); | ||
489 | |||
449 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); | 490 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); |
450 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), | 491 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), |
451 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); | 492 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); |
@@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
453 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | | 494 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | |
454 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); | 495 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); |
455 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), | 496 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), |
456 | MDP5_LM_CURSOR_SIZE_ROI_H(height) | | 497 | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | |
457 | MDP5_LM_CURSOR_SIZE_ROI_W(width)); | 498 | MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); |
458 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); | 499 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); |
459 | 500 | ||
460 | |||
461 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; | 501 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; |
462 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN; | ||
463 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); | 502 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); |
464 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); | 503 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); |
465 | 504 | ||
466 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
467 | mdp5_crtc->cursor.width = width; | ||
468 | mdp5_crtc->cursor.height = height; | ||
469 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 505 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
470 | 506 | ||
471 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); | 507 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); |
@@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
489 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | 525 | struct mdp5_kms *mdp5_kms = get_kms(crtc); |
490 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | 526 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); |
491 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 527 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
492 | uint32_t xres = crtc->mode.hdisplay; | ||
493 | uint32_t yres = crtc->mode.vdisplay; | ||
494 | uint32_t roi_w; | 528 | uint32_t roi_w; |
495 | uint32_t roi_h; | 529 | uint32_t roi_h; |
496 | unsigned long flags; | 530 | unsigned long flags; |
497 | 531 | ||
498 | x = (x > 0) ? x : 0; | 532 | /* In case the CRTC is disabled, just drop the cursor update */ |
499 | y = (y > 0) ? y : 0; | 533 | if (unlikely(!crtc->state->enable)) |
534 | return 0; | ||
500 | 535 | ||
501 | /* | 536 | mdp5_crtc->cursor.x = x = max(x, 0); |
502 | * Cursor Region Of Interest (ROI) is a plane read from cursor | 537 | mdp5_crtc->cursor.y = y = max(y, 0); |
503 | * buffer to render. The ROI region is determined by the visiblity of | 538 | |
504 | * the cursor point. In the default Cursor image the cursor point will | 539 | get_roi(crtc, &roi_w, &roi_h); |
505 | * be at the top left of the cursor image, unless it is specified | ||
506 | * otherwise using hotspot feature. | ||
507 | * | ||
508 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
509 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
510 | * width and ROI height need to be evaluated to crop the cursor image | ||
511 | * accordingly. | ||
512 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
513 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
514 | */ | ||
515 | roi_w = min(mdp5_crtc->cursor.width, xres - x); | ||
516 | roi_h = min(mdp5_crtc->cursor.height, yres - y); | ||
517 | 540 | ||
518 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 541 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
519 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), | 542 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), |
@@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { | |||
544 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { | 567 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { |
545 | .mode_fixup = mdp5_crtc_mode_fixup, | 568 | .mode_fixup = mdp5_crtc_mode_fixup, |
546 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, | 569 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, |
547 | .prepare = mdp5_crtc_disable, | 570 | .disable = mdp5_crtc_disable, |
548 | .commit = mdp5_crtc_enable, | 571 | .enable = mdp5_crtc_enable, |
549 | .atomic_check = mdp5_crtc_atomic_check, | 572 | .atomic_check = mdp5_crtc_atomic_check, |
550 | .atomic_begin = mdp5_crtc_atomic_begin, | 573 | .atomic_begin = mdp5_crtc_atomic_begin, |
551 | .atomic_flush = mdp5_crtc_atomic_flush, | 574 | .atomic_flush = mdp5_crtc_atomic_flush, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index d6a14bb99988..af0e02fa4f48 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
@@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) | |||
267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); | 267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); |
268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); | 268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); |
269 | 269 | ||
270 | mdp5_encoder->enabled = false; | 270 | mdp5_encoder->enabled = true; |
271 | } | 271 | } |
272 | 272 | ||
273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { | 273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { |
274 | .mode_fixup = mdp5_encoder_mode_fixup, | 274 | .mode_fixup = mdp5_encoder_mode_fixup, |
275 | .mode_set = mdp5_encoder_mode_set, | 275 | .mode_set = mdp5_encoder_mode_set, |
276 | .prepare = mdp5_encoder_disable, | 276 | .disable = mdp5_encoder_disable, |
277 | .commit = mdp5_encoder_enable, | 277 | .enable = mdp5_encoder_enable, |
278 | }; | 278 | }; |
279 | 279 | ||
280 | /* initialize encoder */ | 280 | /* initialize encoder */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 70ac81edd40f..a9407105b9b7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
@@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
34 | void mdp5_irq_preinstall(struct msm_kms *kms) | 34 | void mdp5_irq_preinstall(struct msm_kms *kms) |
35 | { | 35 | { |
36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
37 | mdp5_enable(mdp5_kms); | ||
37 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); | 38 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); |
39 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | ||
40 | mdp5_disable(mdp5_kms); | ||
38 | } | 41 | } |
39 | 42 | ||
40 | int mdp5_irq_postinstall(struct msm_kms *kms) | 43 | int mdp5_irq_postinstall(struct msm_kms *kms) |
@@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms) | |||
57 | void mdp5_irq_uninstall(struct msm_kms *kms) | 60 | void mdp5_irq_uninstall(struct msm_kms *kms) |
58 | { | 61 | { |
59 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 62 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
63 | mdp5_enable(mdp5_kms); | ||
60 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | 64 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); |
65 | mdp5_disable(mdp5_kms); | ||
61 | } | 66 | } |
62 | 67 | ||
63 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | 68 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) |
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 7c412292a0ff..5b192128cda2 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
@@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev, | |||
219 | * mark our set of crtc's as busy: | 219 | * mark our set of crtc's as busy: |
220 | */ | 220 | */ |
221 | ret = start_atomic(dev->dev_private, c->crtc_mask); | 221 | ret = start_atomic(dev->dev_private, c->crtc_mask); |
222 | if (ret) | 222 | if (ret) { |
223 | kfree(c); | ||
223 | return ret; | 224 | return ret; |
225 | } | ||
224 | 226 | ||
225 | /* | 227 | /* |
226 | * This is the point of no return - everything below never fails except | 228 | * This is the point of no return - everything below never fails except |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 79924e4b1b49..6751553abe4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
418 | nouveau_fbcon_zfill(dev, fbcon); | 418 | nouveau_fbcon_zfill(dev, fbcon); |
419 | 419 | ||
420 | /* To allow resizeing without swapping buffers */ | 420 | /* To allow resizeing without swapping buffers */ |
421 | NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", | 421 | NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", |
422 | nouveau_fb->base.width, nouveau_fb->base.height, | 422 | nouveau_fb->base.width, nouveau_fb->base.height, |
423 | nvbo->bo.offset, nvbo); | 423 | nvbo->bo.offset, nvbo); |
424 | 424 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index ed644a4f6f57..86807ee91bd1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1405 | (x << 16) | y); | 1405 | (x << 16) | y); |
1406 | viewport_w = crtc->mode.hdisplay; | 1406 | viewport_w = crtc->mode.hdisplay; |
1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | 1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; |
1408 | if ((rdev->family >= CHIP_BONAIRE) && | ||
1409 | (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) | ||
1410 | viewport_h *= 2; | ||
1408 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1411 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1409 | (viewport_w << 16) | viewport_h); | 1412 | (viewport_w << 16) | viewport_h); |
1410 | 1413 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 5bf825dfaa09..8d74de82456e 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | |||
178 | switch (msg->request & ~DP_AUX_I2C_MOT) { | 178 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
179 | case DP_AUX_NATIVE_WRITE: | 179 | case DP_AUX_NATIVE_WRITE: |
180 | case DP_AUX_I2C_WRITE: | 180 | case DP_AUX_I2C_WRITE: |
181 | /* The atom implementation only supports writes with a max payload of | ||
182 | * 12 bytes since it uses 4 bits for the total count (header + payload) | ||
183 | * in the parameter space. The atom interface supports 16 byte | ||
184 | * payloads for reads. The hw itself supports up to 16 bytes of payload. | ||
185 | */ | ||
186 | if (WARN_ON_ONCE(msg->size > 12)) | ||
187 | return -E2BIG; | ||
181 | /* tx_size needs to be 4 even for bare address packets since the atom | 188 | /* tx_size needs to be 4 even for bare address packets since the atom |
182 | * table needs the info in tx_buf[3]. | 189 | * table needs the info in tx_buf[3]. |
183 | */ | 190 | */ |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 7c9df1eac065..c39c1d0d9d4e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
731 | dig_connector = radeon_connector->con_priv; | 731 | dig_connector = radeon_connector->con_priv; |
732 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 732 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
733 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { | 733 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { |
734 | if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | 734 | if (radeon_audio != 0 && |
735 | drm_detect_monitor_audio(radeon_connector_edid(connector)) && | ||
736 | ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | ||
735 | return ATOM_ENCODER_MODE_DP_AUDIO; | 737 | return ATOM_ENCODER_MODE_DP_AUDIO; |
736 | return ATOM_ENCODER_MODE_DP; | 738 | return ATOM_ENCODER_MODE_DP; |
737 | } else if (radeon_audio != 0) { | 739 | } else if (radeon_audio != 0) { |
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
747 | } | 749 | } |
748 | break; | 750 | break; |
749 | case DRM_MODE_CONNECTOR_eDP: | 751 | case DRM_MODE_CONNECTOR_eDP: |
750 | if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | 752 | if (radeon_audio != 0 && |
753 | drm_detect_monitor_audio(radeon_connector_edid(connector)) && | ||
754 | ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | ||
751 | return ATOM_ENCODER_MODE_DP_AUDIO; | 755 | return ATOM_ENCODER_MODE_DP_AUDIO; |
752 | return ATOM_ENCODER_MODE_DP; | 756 | return ATOM_ENCODER_MODE_DP; |
753 | case DRM_MODE_CONNECTOR_DVIA: | 757 | case DRM_MODE_CONNECTOR_DVIA: |
@@ -1622,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1622 | struct radeon_connector *radeon_connector = NULL; | 1626 | struct radeon_connector *radeon_connector = NULL; |
1623 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; | 1627 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; |
1624 | bool travis_quirk = false; | 1628 | bool travis_quirk = false; |
1625 | int encoder_mode; | ||
1626 | 1629 | ||
1627 | if (connector) { | 1630 | if (connector) { |
1628 | radeon_connector = to_radeon_connector(connector); | 1631 | radeon_connector = to_radeon_connector(connector); |
@@ -1718,11 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1718 | } | 1721 | } |
1719 | break; | 1722 | break; |
1720 | } | 1723 | } |
1721 | |||
1722 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
1723 | if (radeon_audio != 0 && | ||
1724 | (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) | ||
1725 | radeon_audio_dpms(encoder, mode); | ||
1726 | } | 1724 | } |
1727 | 1725 | ||
1728 | static void | 1726 | static void |
@@ -1731,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1731 | struct drm_device *dev = encoder->dev; | 1729 | struct drm_device *dev = encoder->dev; |
1732 | struct radeon_device *rdev = dev->dev_private; | 1730 | struct radeon_device *rdev = dev->dev_private; |
1733 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1731 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1732 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1733 | int encoder_mode = atombios_get_encoder_mode(encoder); | ||
1734 | 1734 | ||
1735 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | 1735 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
1736 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | 1736 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
1737 | radeon_encoder->active_device); | 1737 | radeon_encoder->active_device); |
1738 | |||
1739 | if (connector && (radeon_audio != 0) && | ||
1740 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
1741 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
1742 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
1743 | radeon_audio_dpms(encoder, mode); | ||
1744 | |||
1738 | switch (radeon_encoder->encoder_id) { | 1745 | switch (radeon_encoder->encoder_id) { |
1739 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | 1746 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
1740 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 1747 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
@@ -2136,6 +2143,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2136 | struct drm_device *dev = encoder->dev; | 2143 | struct drm_device *dev = encoder->dev; |
2137 | struct radeon_device *rdev = dev->dev_private; | 2144 | struct radeon_device *rdev = dev->dev_private; |
2138 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 2145 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
2146 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
2139 | int encoder_mode; | 2147 | int encoder_mode; |
2140 | 2148 | ||
2141 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 2149 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
@@ -2163,10 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2163 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | 2171 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
2164 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 2172 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
2165 | /* handled in dpms */ | 2173 | /* handled in dpms */ |
2166 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
2167 | if (radeon_audio != 0 && | ||
2168 | (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) | ||
2169 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
2170 | break; | 2174 | break; |
2171 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 2175 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
2172 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 2176 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
@@ -2188,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2188 | } | 2192 | } |
2189 | 2193 | ||
2190 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 2194 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
2195 | |||
2196 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
2197 | if (connector && (radeon_audio != 0) && | ||
2198 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
2199 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
2200 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
2201 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
2191 | } | 2202 | } |
2192 | 2203 | ||
2193 | static bool | 2204 | static bool |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6a4ba236c70..3e670d344a20 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
3613 | } | 3613 | } |
3614 | 3614 | ||
3615 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 3615 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
3616 | WREG32(SRBM_INT_CNTL, 0x1); | ||
3617 | WREG32(SRBM_INT_ACK, 0x1); | ||
3616 | 3618 | ||
3617 | WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); | 3619 | WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); |
3618 | 3620 | ||
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) | |||
7230 | WREG32(CP_ME2_PIPE3_INT_CNTL, 0); | 7232 | WREG32(CP_ME2_PIPE3_INT_CNTL, 0); |
7231 | /* grbm */ | 7233 | /* grbm */ |
7232 | WREG32(GRBM_INT_CNTL, 0); | 7234 | WREG32(GRBM_INT_CNTL, 0); |
7235 | /* SRBM */ | ||
7236 | WREG32(SRBM_INT_CNTL, 0); | ||
7233 | /* vline/vblank, etc. */ | 7237 | /* vline/vblank, etc. */ |
7234 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 7238 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
7235 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 7239 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
@@ -7551,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev) | |||
7551 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | 7555 | WREG32(DC_HPD5_INT_CONTROL, hpd5); |
7552 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | 7556 | WREG32(DC_HPD6_INT_CONTROL, hpd6); |
7553 | 7557 | ||
7558 | /* posting read */ | ||
7559 | RREG32(SRBM_STATUS); | ||
7560 | |||
7554 | return 0; | 7561 | return 0; |
7555 | } | 7562 | } |
7556 | 7563 | ||
@@ -8046,6 +8053,10 @@ restart_ih: | |||
8046 | break; | 8053 | break; |
8047 | } | 8054 | } |
8048 | break; | 8055 | break; |
8056 | case 96: | ||
8057 | DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); | ||
8058 | WREG32(SRBM_INT_ACK, 0x1); | ||
8059 | break; | ||
8049 | case 124: /* UVD */ | 8060 | case 124: /* UVD */ |
8050 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | 8061 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
8051 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | 8062 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 03003f8a6de6..c648e1996dab 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
@@ -482,6 +482,10 @@ | |||
482 | #define SOFT_RESET_ORB (1 << 23) | 482 | #define SOFT_RESET_ORB (1 << 23) |
483 | #define SOFT_RESET_VCE (1 << 24) | 483 | #define SOFT_RESET_VCE (1 << 24) |
484 | 484 | ||
485 | #define SRBM_READ_ERROR 0xE98 | ||
486 | #define SRBM_INT_CNTL 0xEA0 | ||
487 | #define SRBM_INT_ACK 0xEA8 | ||
488 | |||
485 | #define VM_L2_CNTL 0x1400 | 489 | #define VM_L2_CNTL 0x1400 |
486 | #define ENABLE_L2_CACHE (1 << 0) | 490 | #define ENABLE_L2_CACHE (1 << 0) |
487 | #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) | 491 | #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) |
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 192c80389151..3adc2afe32aa 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
@@ -26,6 +26,9 @@ | |||
26 | #include "radeon_audio.h" | 26 | #include "radeon_audio.h" |
27 | #include "sid.h" | 27 | #include "sid.h" |
28 | 28 | ||
29 | #define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8 | ||
30 | #define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc | ||
31 | |||
29 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, | 32 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
30 | u32 block_offset, u32 reg) | 33 | u32 block_offset, u32 reg) |
31 | { | 34 | { |
@@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev, | |||
252 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, | 255 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, |
253 | struct radeon_crtc *crtc, unsigned int clock) | 256 | struct radeon_crtc *crtc, unsigned int clock) |
254 | { | 257 | { |
255 | /* Two dtos; generally use dto0 for HDMI */ | 258 | /* Two dtos; generally use dto0 for HDMI */ |
256 | u32 value = 0; | 259 | u32 value = 0; |
257 | 260 | ||
258 | if (crtc) | 261 | if (crtc) |
259 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 262 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
260 | 263 | ||
261 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 264 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
262 | 265 | ||
263 | /* Express [24MHz / target pixel clock] as an exact rational | 266 | /* Express [24MHz / target pixel clock] as an exact rational |
264 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 267 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
265 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 268 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
266 | */ | 269 | */ |
267 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); | 270 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); |
268 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); | 271 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); |
269 | } | 272 | } |
270 | 273 | ||
271 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, |
272 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
273 | { | 276 | { |
274 | /* Two dtos; generally use dto1 for DP */ | 277 | /* Two dtos; generally use dto1 for DP */ |
275 | u32 value = 0; | 278 | u32 value = 0; |
276 | value |= DCCG_AUDIO_DTO_SEL; | 279 | value |= DCCG_AUDIO_DTO_SEL; |
277 | 280 | ||
278 | if (crtc) | 281 | if (crtc) |
279 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 282 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
280 | 283 | ||
281 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 284 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
282 | 285 | ||
283 | /* Express [24MHz / target pixel clock] as an exact rational | 286 | /* Express [24MHz / target pixel clock] as an exact rational |
284 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 287 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
285 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 288 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
286 | */ | 289 | */ |
287 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 290 | if (ASIC_IS_DCE8(rdev)) { |
288 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | 291 | WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); |
292 | WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); | ||
293 | } else { | ||
294 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | ||
295 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | ||
296 | } | ||
289 | } | 297 | } |
290 | 298 | ||
291 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 299 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable) |
292 | { | 300 | { |
293 | struct drm_device *dev = encoder->dev; | 301 | struct drm_device *dev = encoder->dev; |
294 | struct radeon_device *rdev = dev->dev_private; | 302 | struct radeon_device *rdev = dev->dev_private; |
295 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 303 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
296 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 304 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
297 | uint32_t offset; | ||
298 | 305 | ||
299 | if (!dig || !dig->afmt) | 306 | if (!dig || !dig->afmt) |
300 | return; | 307 | return; |
301 | 308 | ||
302 | offset = dig->afmt->offset; | ||
303 | |||
304 | if (enable) { | 309 | if (enable) { |
305 | if (dig->afmt->enabled) | 310 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
306 | return; | 311 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
307 | 312 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, | |
308 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | 313 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
309 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 314 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
310 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 315 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
311 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 316 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
312 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | ||
313 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | ||
314 | radeon_audio_enable(rdev, dig->afmt->pin, true); | ||
315 | } else { | 317 | } else { |
316 | if (!dig->afmt->enabled) | 318 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
317 | return; | ||
318 | |||
319 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
320 | radeon_audio_enable(rdev, dig->afmt->pin, false); | ||
321 | } | 319 | } |
322 | 320 | ||
323 | dig->afmt->enabled = enable; | 321 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 78600f534c80..973df064c14f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
3253 | } | 3253 | } |
3254 | 3254 | ||
3255 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 3255 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
3256 | WREG32(SRBM_INT_CNTL, 0x1); | ||
3257 | WREG32(SRBM_INT_ACK, 0x1); | ||
3256 | 3258 | ||
3257 | evergreen_fix_pci_max_read_req_size(rdev); | 3259 | evergreen_fix_pci_max_read_req_size(rdev); |
3258 | 3260 | ||
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |||
4324 | tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; | 4326 | tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
4325 | WREG32(DMA_CNTL, tmp); | 4327 | WREG32(DMA_CNTL, tmp); |
4326 | WREG32(GRBM_INT_CNTL, 0); | 4328 | WREG32(GRBM_INT_CNTL, 0); |
4329 | WREG32(SRBM_INT_CNTL, 0); | ||
4327 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 4330 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
4328 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 4331 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
4329 | if (rdev->num_crtc >= 4) { | 4332 | if (rdev->num_crtc >= 4) { |
@@ -4590,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
4590 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); | 4593 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); |
4591 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); | 4594 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); |
4592 | 4595 | ||
4596 | /* posting read */ | ||
4597 | RREG32(SRBM_STATUS); | ||
4598 | |||
4593 | return 0; | 4599 | return 0; |
4594 | } | 4600 | } |
4595 | 4601 | ||
@@ -5066,6 +5072,10 @@ restart_ih: | |||
5066 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | 5072 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); |
5067 | break; | 5073 | break; |
5068 | } | 5074 | } |
5075 | case 96: | ||
5076 | DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); | ||
5077 | WREG32(SRBM_INT_ACK, 0x1); | ||
5078 | break; | ||
5069 | case 124: /* UVD */ | 5079 | case 124: /* UVD */ |
5070 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | 5080 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
5071 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | 5081 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 1d9aebc79595..c18d4ecbd95d 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, | |||
272 | } | 272 | } |
273 | 273 | ||
274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, |
275 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
276 | { | 276 | { |
277 | u32 value; | 277 | u32 value; |
278 | 278 | ||
@@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev, | |||
294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
295 | */ | 295 | */ |
296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
297 | WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10); | 297 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
298 | } | 298 | } |
299 | 299 | ||
300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) | 300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) |
@@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) | |||
350 | struct drm_device *dev = encoder->dev; | 350 | struct drm_device *dev = encoder->dev; |
351 | struct radeon_device *rdev = dev->dev_private; | 351 | struct radeon_device *rdev = dev->dev_private; |
352 | 352 | ||
353 | WREG32(HDMI_INFOFRAME_CONTROL0 + offset, | ||
354 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | ||
355 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | ||
356 | |||
357 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, | 353 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, |
358 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ | 354 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ |
359 | 355 | ||
360 | WREG32(HDMI_INFOFRAME_CONTROL1 + offset, | ||
361 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ | ||
362 | |||
363 | WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, | ||
364 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
365 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
366 | |||
367 | WREG32(AFMT_60958_0 + offset, | 356 | WREG32(AFMT_60958_0 + offset, |
368 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); | 357 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); |
369 | 358 | ||
@@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
408 | if (!dig || !dig->afmt) | 397 | if (!dig || !dig->afmt) |
409 | return; | 398 | return; |
410 | 399 | ||
411 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 400 | if (enable) { |
412 | if (enable && dig->afmt->enabled) | 401 | WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, |
413 | return; | 402 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ |
414 | if (!enable && !dig->afmt->enabled) | 403 | |
415 | return; | 404 | WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
405 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
406 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
416 | 407 | ||
417 | if (!enable && dig->afmt->pin) { | 408 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
418 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 409 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
419 | dig->afmt->pin = NULL; | 410 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ |
411 | } else { | ||
412 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); | ||
420 | } | 413 | } |
421 | 414 | ||
422 | dig->afmt->enabled = enable; | 415 | dig->afmt->enabled = enable; |
@@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
425 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); | 418 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); |
426 | } | 419 | } |
427 | 420 | ||
428 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 421 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) |
429 | { | 422 | { |
430 | struct drm_device *dev = encoder->dev; | 423 | struct drm_device *dev = encoder->dev; |
431 | struct radeon_device *rdev = dev->dev_private; | 424 | struct radeon_device *rdev = dev->dev_private; |
432 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 425 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
433 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 426 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
434 | uint32_t offset; | ||
435 | 427 | ||
436 | if (!dig || !dig->afmt) | 428 | if (!dig || !dig->afmt) |
437 | return; | 429 | return; |
438 | 430 | ||
439 | offset = dig->afmt->offset; | ||
440 | |||
441 | if (enable) { | 431 | if (enable) { |
442 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 432 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
443 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 433 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
444 | struct radeon_connector_atom_dig *dig_connector; | 434 | struct radeon_connector_atom_dig *dig_connector; |
445 | uint32_t val; | 435 | uint32_t val; |
446 | 436 | ||
447 | if (dig->afmt->enabled) | 437 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
448 | return; | 438 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
449 | |||
450 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | ||
451 | 439 | ||
452 | if (radeon_connector->con_priv) { | 440 | if (radeon_connector->con_priv) { |
453 | dig_connector = radeon_connector->con_priv; | 441 | dig_connector = radeon_connector->con_priv; |
454 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset); | 442 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); |
455 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); | 443 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); |
456 | 444 | ||
457 | if (dig_connector->dp_clock == 162000) | 445 | if (dig_connector->dp_clock == 162000) |
@@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | |||
459 | else | 447 | else |
460 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); | 448 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); |
461 | 449 | ||
462 | WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val); | 450 | WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val); |
463 | } | 451 | } |
464 | 452 | ||
465 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 453 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, |
466 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 454 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
467 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 455 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
468 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | 456 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
469 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | 457 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
470 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
471 | } else { | 458 | } else { |
472 | if (!dig->afmt->enabled) | 459 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
473 | return; | ||
474 | |||
475 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
476 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
477 | } | 460 | } |
478 | 461 | ||
479 | dig->afmt->enabled = enable; | 462 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index ee83d2a88750..a8d1d5240fcb 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -1191,6 +1191,10 @@ | |||
1191 | #define SOFT_RESET_REGBB (1 << 22) | 1191 | #define SOFT_RESET_REGBB (1 << 22) |
1192 | #define SOFT_RESET_ORB (1 << 23) | 1192 | #define SOFT_RESET_ORB (1 << 23) |
1193 | 1193 | ||
1194 | #define SRBM_READ_ERROR 0xE98 | ||
1195 | #define SRBM_INT_CNTL 0xEA0 | ||
1196 | #define SRBM_INT_ACK 0xEA8 | ||
1197 | |||
1194 | /* display watermarks */ | 1198 | /* display watermarks */ |
1195 | #define DC_LB_MEMORY_SPLIT 0x6b0c | 1199 | #define DC_LB_MEMORY_SPLIT 0x6b0c |
1196 | #define PRIORITY_A_CNT 0x6b18 | 1200 | #define PRIORITY_A_CNT 0x6b18 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 24242a7f0ac3..dab00812abaa 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
962 | } | 962 | } |
963 | 963 | ||
964 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 964 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
965 | WREG32(SRBM_INT_CNTL, 0x1); | ||
966 | WREG32(SRBM_INT_ACK, 0x1); | ||
965 | 967 | ||
966 | evergreen_fix_pci_max_read_req_size(rdev); | 968 | evergreen_fix_pci_max_read_req_size(rdev); |
967 | 969 | ||
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
1086 | 1088 | ||
1087 | if ((rdev->config.cayman.max_backends_per_se == 1) && | 1089 | if ((rdev->config.cayman.max_backends_per_se == 1) && |
1088 | (rdev->flags & RADEON_IS_IGP)) { | 1090 | (rdev->flags & RADEON_IS_IGP)) { |
1089 | if ((disabled_rb_mask & 3) == 1) { | 1091 | if ((disabled_rb_mask & 3) == 2) { |
1090 | /* RB0 disabled, RB1 enabled */ | ||
1091 | tmp = 0x11111111; | ||
1092 | } else { | ||
1093 | /* RB1 disabled, RB0 enabled */ | 1092 | /* RB1 disabled, RB0 enabled */ |
1094 | tmp = 0x00000000; | 1093 | tmp = 0x00000000; |
1094 | } else { | ||
1095 | /* RB0 disabled, RB1 enabled */ | ||
1096 | tmp = 0x11111111; | ||
1095 | } | 1097 | } |
1096 | } else { | 1098 | } else { |
1097 | tmp = gb_addr_config & NUM_PIPES_MASK; | 1099 | tmp = gb_addr_config & NUM_PIPES_MASK; |
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index ad7125486894..6b44580440d0 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h | |||
@@ -82,6 +82,10 @@ | |||
82 | #define SOFT_RESET_REGBB (1 << 22) | 82 | #define SOFT_RESET_REGBB (1 << 22) |
83 | #define SOFT_RESET_ORB (1 << 23) | 83 | #define SOFT_RESET_ORB (1 << 23) |
84 | 84 | ||
85 | #define SRBM_READ_ERROR 0xE98 | ||
86 | #define SRBM_INT_CNTL 0xEA0 | ||
87 | #define SRBM_INT_ACK 0xEA8 | ||
88 | |||
85 | #define SRBM_STATUS2 0x0EC4 | 89 | #define SRBM_STATUS2 0x0EC4 |
86 | #define DMA_BUSY (1 << 5) | 90 | #define DMA_BUSY (1 << 5) |
87 | #define DMA1_BUSY (1 << 6) | 91 | #define DMA1_BUSY (1 << 6) |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 279801ca5110..04f2514f7564 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev) | |||
728 | tmp |= RADEON_FP2_DETECT_MASK; | 728 | tmp |= RADEON_FP2_DETECT_MASK; |
729 | } | 729 | } |
730 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 730 | WREG32(RADEON_GEN_INT_CNTL, tmp); |
731 | |||
732 | /* read back to post the write */ | ||
733 | RREG32(RADEON_GEN_INT_CNTL); | ||
734 | |||
731 | return 0; | 735 | return 0; |
732 | } | 736 | } |
733 | 737 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 07a71a2488c9..2fcad344492f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); | 3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); |
3785 | } | 3785 | } |
3786 | 3786 | ||
3787 | /* posting read */ | ||
3788 | RREG32(R_000E50_SRBM_STATUS); | ||
3789 | |||
3787 | return 0; | 3790 | return 0; |
3788 | } | 3791 | } |
3789 | 3792 | ||
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 843b65f46ece..fa2154493cf1 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) | |||
188 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 188 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
189 | radeon_crtc = to_radeon_crtc(crtc); | 189 | radeon_crtc = to_radeon_crtc(crtc); |
190 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { | 190 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { |
191 | vrefresh = radeon_crtc->hw_mode.vrefresh; | 191 | vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); |
192 | break; | 192 | break; |
193 | } | 193 | } |
194 | } | 194 | } |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 62c91ed669ce..dd6606b8e23c 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
476 | if (!dig || !dig->afmt) | 476 | if (!dig || !dig->afmt) |
477 | return; | 477 | return; |
478 | 478 | ||
479 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
480 | if (enable && dig->afmt->enabled) | ||
481 | return; | ||
482 | if (!enable && !dig->afmt->enabled) | ||
483 | return; | ||
484 | |||
485 | if (!enable && dig->afmt->pin) { | ||
486 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
487 | dig->afmt->pin = NULL; | ||
488 | } | ||
489 | |||
490 | /* Older chipsets require setting HDMI and routing manually */ | 479 | /* Older chipsets require setting HDMI and routing manually */ |
491 | if (!ASIC_IS_DCE3(rdev)) { | 480 | if (!ASIC_IS_DCE3(rdev)) { |
492 | if (enable) | 481 | if (enable) |
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index a3ceef6d9632..b21ef69a34ac 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c | |||
@@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
101 | struct drm_display_mode *mode); | 101 | struct drm_display_mode *mode); |
102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); | 102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); |
103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); | 103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
104 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 104 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); |
105 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 105 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable); |
106 | 106 | ||
107 | static const u32 pin_offsets[7] = | 107 | static const u32 pin_offsets[7] = |
108 | { | 108 | { |
@@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = { | |||
210 | .set_avi_packet = evergreen_set_avi_packet, | 210 | .set_avi_packet = evergreen_set_avi_packet, |
211 | .set_audio_packet = dce4_set_audio_packet, | 211 | .set_audio_packet = dce4_set_audio_packet, |
212 | .mode_set = radeon_audio_dp_mode_set, | 212 | .mode_set = radeon_audio_dp_mode_set, |
213 | .dpms = evergreen_enable_dp_audio_packets, | 213 | .dpms = evergreen_dp_enable, |
214 | }; | 214 | }; |
215 | 215 | ||
216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { | 216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { |
@@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = { | |||
240 | .set_avi_packet = evergreen_set_avi_packet, | 240 | .set_avi_packet = evergreen_set_avi_packet, |
241 | .set_audio_packet = dce4_set_audio_packet, | 241 | .set_audio_packet = dce4_set_audio_packet, |
242 | .mode_set = radeon_audio_dp_mode_set, | 242 | .mode_set = radeon_audio_dp_mode_set, |
243 | .dpms = dce6_enable_dp_audio_packets, | 243 | .dpms = dce6_dp_enable, |
244 | }; | 244 | }; |
245 | 245 | ||
246 | static void radeon_audio_interface_init(struct radeon_device *rdev) | 246 | static void radeon_audio_interface_init(struct radeon_device *rdev) |
@@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev, | |||
452 | } | 452 | } |
453 | 453 | ||
454 | void radeon_audio_detect(struct drm_connector *connector, | 454 | void radeon_audio_detect(struct drm_connector *connector, |
455 | enum drm_connector_status status) | 455 | enum drm_connector_status status) |
456 | { | 456 | { |
457 | struct radeon_device *rdev; | 457 | struct radeon_device *rdev; |
458 | struct radeon_encoder *radeon_encoder; | 458 | struct radeon_encoder *radeon_encoder; |
@@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector, | |||
483 | else | 483 | else |
484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; | 484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
485 | 485 | ||
486 | radeon_audio_write_speaker_allocation(connector->encoder); | 486 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); |
487 | radeon_audio_write_sad_regs(connector->encoder); | ||
488 | if (connector->encoder->crtc) | ||
489 | radeon_audio_write_latency_fields(connector->encoder, | ||
490 | &connector->encoder->crtc->mode); | ||
491 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | 487 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
492 | } else { | 488 | } else { |
493 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 489 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
490 | dig->afmt->pin = NULL; | ||
494 | } | 491 | } |
495 | } | 492 | } |
496 | 493 | ||
@@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute) | |||
694 | * update the info frames with the data from the current display mode | 691 | * update the info frames with the data from the current display mode |
695 | */ | 692 | */ |
696 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | 693 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, |
697 | struct drm_display_mode *mode) | 694 | struct drm_display_mode *mode) |
698 | { | 695 | { |
699 | struct radeon_device *rdev = encoder->dev->dev_private; | ||
700 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 696 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
701 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 697 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
702 | 698 | ||
703 | if (!dig || !dig->afmt) | 699 | if (!dig || !dig->afmt) |
704 | return; | 700 | return; |
705 | 701 | ||
706 | /* disable audio prior to setting up hw */ | 702 | radeon_audio_set_mute(encoder, true); |
707 | dig->afmt->pin = radeon_audio_get_pin(encoder); | ||
708 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
709 | 703 | ||
704 | radeon_audio_write_speaker_allocation(encoder); | ||
705 | radeon_audio_write_sad_regs(encoder); | ||
706 | radeon_audio_write_latency_fields(encoder, mode); | ||
710 | radeon_audio_set_dto(encoder, mode->clock); | 707 | radeon_audio_set_dto(encoder, mode->clock); |
711 | radeon_audio_set_vbi_packet(encoder); | 708 | radeon_audio_set_vbi_packet(encoder); |
712 | radeon_hdmi_set_color_depth(encoder); | 709 | radeon_hdmi_set_color_depth(encoder); |
713 | radeon_audio_set_mute(encoder, false); | ||
714 | radeon_audio_update_acr(encoder, mode->clock); | 710 | radeon_audio_update_acr(encoder, mode->clock); |
715 | radeon_audio_set_audio_packet(encoder); | 711 | radeon_audio_set_audio_packet(encoder); |
716 | radeon_audio_select_pin(encoder); | 712 | radeon_audio_select_pin(encoder); |
@@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | |||
718 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 714 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
719 | return; | 715 | return; |
720 | 716 | ||
721 | /* enable audio after to setting up hw */ | 717 | radeon_audio_set_mute(encoder, false); |
722 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
723 | } | 718 | } |
724 | 719 | ||
725 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | 720 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, |
@@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
729 | struct radeon_device *rdev = dev->dev_private; | 724 | struct radeon_device *rdev = dev->dev_private; |
730 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 725 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
731 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 726 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
727 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
728 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
729 | struct radeon_connector_atom_dig *dig_connector = | ||
730 | radeon_connector->con_priv; | ||
732 | 731 | ||
733 | if (!dig || !dig->afmt) | 732 | if (!dig || !dig->afmt) |
734 | return; | 733 | return; |
735 | 734 | ||
736 | /* disable audio prior to setting up hw */ | 735 | radeon_audio_write_speaker_allocation(encoder); |
737 | dig->afmt->pin = radeon_audio_get_pin(encoder); | 736 | radeon_audio_write_sad_regs(encoder); |
738 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 737 | radeon_audio_write_latency_fields(encoder, mode); |
739 | 738 | if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) | |
740 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); | 739 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); |
740 | else | ||
741 | radeon_audio_set_dto(encoder, dig_connector->dp_clock); | ||
741 | radeon_audio_set_audio_packet(encoder); | 742 | radeon_audio_set_audio_packet(encoder); |
742 | radeon_audio_select_pin(encoder); | 743 | radeon_audio_select_pin(encoder); |
743 | 744 | ||
744 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 745 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
745 | return; | 746 | return; |
746 | |||
747 | /* enable audio after to setting up hw */ | ||
748 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
749 | } | 747 | } |
750 | 748 | ||
751 | void radeon_audio_mode_set(struct drm_encoder *encoder, | 749 | void radeon_audio_mode_set(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index c830863bc98a..4d0f96cc3da4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
256 | u32 ring = RADEON_CS_RING_GFX; | 256 | u32 ring = RADEON_CS_RING_GFX; |
257 | s32 priority = 0; | 257 | s32 priority = 0; |
258 | 258 | ||
259 | INIT_LIST_HEAD(&p->validated); | ||
260 | |||
259 | if (!cs->num_chunks) { | 261 | if (!cs->num_chunks) { |
260 | return 0; | 262 | return 0; |
261 | } | 263 | } |
264 | |||
262 | /* get chunks */ | 265 | /* get chunks */ |
263 | INIT_LIST_HEAD(&p->validated); | ||
264 | p->idx = 0; | 266 | p->idx = 0; |
265 | p->ib.sa_bo = NULL; | 267 | p->ib.sa_bo = NULL; |
266 | p->const_ib.sa_bo = NULL; | 268 | p->const_ib.sa_bo = NULL; |
@@ -715,6 +717,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, | |||
715 | struct radeon_cs_chunk *ib_chunk = p->chunk_ib; | 717 | struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
716 | struct radeon_device *rdev = p->rdev; | 718 | struct radeon_device *rdev = p->rdev; |
717 | uint32_t header; | 719 | uint32_t header; |
720 | int ret = 0, i; | ||
718 | 721 | ||
719 | if (idx >= ib_chunk->length_dw) { | 722 | if (idx >= ib_chunk->length_dw) { |
720 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | 723 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", |
@@ -743,14 +746,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, | |||
743 | break; | 746 | break; |
744 | default: | 747 | default: |
745 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); | 748 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); |
746 | return -EINVAL; | 749 | ret = -EINVAL; |
750 | goto dump_ib; | ||
747 | } | 751 | } |
748 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { | 752 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { |
749 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", | 753 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", |
750 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); | 754 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); |
751 | return -EINVAL; | 755 | ret = -EINVAL; |
756 | goto dump_ib; | ||
752 | } | 757 | } |
753 | return 0; | 758 | return 0; |
759 | |||
760 | dump_ib: | ||
761 | for (i = 0; i < ib_chunk->length_dw; i++) { | ||
762 | if (i == idx) | ||
763 | printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); | ||
764 | else | ||
765 | printk("\t0x%08x\n", radeon_get_ib_value(p, i)); | ||
766 | } | ||
767 | return ret; | ||
754 | } | 768 | } |
755 | 769 | ||
756 | /** | 770 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 6b670b0bc47b..3a297037cc17 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, | |||
179 | (rdev->pdev->subsystem_vendor == 0x1734) && | 179 | (rdev->pdev->subsystem_vendor == 0x1734) && |
180 | (rdev->pdev->subsystem_device == 0x1107)) | 180 | (rdev->pdev->subsystem_device == 0x1107)) |
181 | use_bl = false; | 181 | use_bl = false; |
182 | /* Older PPC macs use on-GPU backlight controller */ | ||
183 | #ifndef CONFIG_PPC_PMAC | ||
182 | /* disable native backlight control on older asics */ | 184 | /* disable native backlight control on older asics */ |
183 | else if (rdev->family < CHIP_R600) | 185 | else if (rdev->family < CHIP_R600) |
184 | use_bl = false; | 186 | use_bl = false; |
187 | #endif | ||
185 | else | 188 | else |
186 | use_bl = true; | 189 | use_bl = true; |
187 | } | 190 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 9f758d39420d..33cf4108386d 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | |||
852 | single_display = false; | 852 | single_display = false; |
853 | } | 853 | } |
854 | 854 | ||
855 | /* 120hz tends to be problematic even if they are under the | ||
856 | * vblank limit. | ||
857 | */ | ||
858 | if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) | ||
859 | single_display = false; | ||
860 | |||
855 | /* certain older asics have a separare 3D performance state, | 861 | /* certain older asics have a separare 3D performance state, |
856 | * so try that first if the user selected performance | 862 | * so try that first if the user selected performance |
857 | */ | 863 | */ |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d81182ad53ec..97a904835759 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | 694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); |
695 | if (ASIC_IS_DCE2(rdev)) | 695 | if (ASIC_IS_DCE2(rdev)) |
696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); | 696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); |
697 | |||
698 | /* posting read */ | ||
699 | RREG32(R_000040_GEN_INT_CNTL); | ||
700 | |||
697 | return 0; | 701 | return 0; |
698 | } | 702 | } |
699 | 703 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 73107fe9e46f..e088e5558da0 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
3162 | } | 3162 | } |
3163 | 3163 | ||
3164 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 3164 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
3165 | WREG32(SRBM_INT_CNTL, 1); | ||
3166 | WREG32(SRBM_INT_ACK, 1); | ||
3165 | 3167 | ||
3166 | evergreen_fix_pci_max_read_req_size(rdev); | 3168 | evergreen_fix_pci_max_read_req_size(rdev); |
3167 | 3169 | ||
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) | |||
4699 | switch (pkt.type) { | 4701 | switch (pkt.type) { |
4700 | case RADEON_PACKET_TYPE0: | 4702 | case RADEON_PACKET_TYPE0: |
4701 | dev_err(rdev->dev, "Packet0 not allowed!\n"); | 4703 | dev_err(rdev->dev, "Packet0 not allowed!\n"); |
4702 | for (i = 0; i < ib->length_dw; i++) { | ||
4703 | if (i == idx) | ||
4704 | printk("\t0x%08x <---\n", ib->ptr[i]); | ||
4705 | else | ||
4706 | printk("\t0x%08x\n", ib->ptr[i]); | ||
4707 | } | ||
4708 | ret = -EINVAL; | 4704 | ret = -EINVAL; |
4709 | break; | 4705 | break; |
4710 | case RADEON_PACKET_TYPE2: | 4706 | case RADEON_PACKET_TYPE2: |
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) | |||
4736 | ret = -EINVAL; | 4732 | ret = -EINVAL; |
4737 | break; | 4733 | break; |
4738 | } | 4734 | } |
4739 | if (ret) | 4735 | if (ret) { |
4736 | for (i = 0; i < ib->length_dw; i++) { | ||
4737 | if (i == idx) | ||
4738 | printk("\t0x%08x <---\n", ib->ptr[i]); | ||
4739 | else | ||
4740 | printk("\t0x%08x\n", ib->ptr[i]); | ||
4741 | } | ||
4740 | break; | 4742 | break; |
4743 | } | ||
4741 | } while (idx < ib->length_dw); | 4744 | } while (idx < ib->length_dw); |
4742 | 4745 | ||
4743 | return ret; | 4746 | return ret; |
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) | |||
5910 | tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; | 5913 | tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
5911 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); | 5914 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); |
5912 | WREG32(GRBM_INT_CNTL, 0); | 5915 | WREG32(GRBM_INT_CNTL, 0); |
5916 | WREG32(SRBM_INT_CNTL, 0); | ||
5913 | if (rdev->num_crtc >= 2) { | 5917 | if (rdev->num_crtc >= 2) { |
5914 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 5918 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
5915 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 5919 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
@@ -6199,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev) | |||
6199 | 6203 | ||
6200 | WREG32(CG_THERMAL_INT, thermal_int); | 6204 | WREG32(CG_THERMAL_INT, thermal_int); |
6201 | 6205 | ||
6206 | /* posting read */ | ||
6207 | RREG32(SRBM_STATUS); | ||
6208 | |||
6202 | return 0; | 6209 | return 0; |
6203 | } | 6210 | } |
6204 | 6211 | ||
@@ -6609,6 +6616,10 @@ restart_ih: | |||
6609 | break; | 6616 | break; |
6610 | } | 6617 | } |
6611 | break; | 6618 | break; |
6619 | case 96: | ||
6620 | DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); | ||
6621 | WREG32(SRBM_INT_ACK, 0x1); | ||
6622 | break; | ||
6612 | case 124: /* UVD */ | 6623 | case 124: /* UVD */ |
6613 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | 6624 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
6614 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | 6625 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index cbd91d226f3c..99a9835c9f61 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -358,6 +358,10 @@ | |||
358 | #define CC_SYS_RB_BACKEND_DISABLE 0xe80 | 358 | #define CC_SYS_RB_BACKEND_DISABLE 0xe80 |
359 | #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 | 359 | #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 |
360 | 360 | ||
361 | #define SRBM_READ_ERROR 0xE98 | ||
362 | #define SRBM_INT_CNTL 0xEA0 | ||
363 | #define SRBM_INT_ACK 0xEA8 | ||
364 | |||
361 | #define SRBM_STATUS2 0x0EC4 | 365 | #define SRBM_STATUS2 0x0EC4 |
362 | #define DMA_BUSY (1 << 5) | 366 | #define DMA_BUSY (1 << 5) |
363 | #define DMA1_BUSY (1 << 6) | 367 | #define DMA1_BUSY (1 << 6) |
@@ -908,8 +912,8 @@ | |||
908 | 912 | ||
909 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 | 913 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 |
910 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 914 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
911 | #define DCCG_AUDIO_DTO1_PHASE 0x05b8 | 915 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
912 | #define DCCG_AUDIO_DTO1_MODULE 0x05bc | 916 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
913 | 917 | ||
914 | #define AFMT_AUDIO_SRC_CONTROL 0x713c | 918 | #define AFMT_AUDIO_SRC_CONTROL 0x713c |
915 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) | 919 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) |
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index bc3e85186b88..b7f781573b15 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c | |||
@@ -999,8 +999,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc) | |||
999 | crtc->state = NULL; | 999 | crtc->state = NULL; |
1000 | 1000 | ||
1001 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 1001 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
1002 | if (state) | 1002 | if (state) { |
1003 | crtc->state = &state->base; | 1003 | crtc->state = &state->base; |
1004 | crtc->state->crtc = crtc; | ||
1005 | } | ||
1004 | } | 1006 | } |
1005 | 1007 | ||
1006 | static struct drm_crtc_state * | 1008 | static struct drm_crtc_state * |
@@ -1014,6 +1016,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) | |||
1014 | return NULL; | 1016 | return NULL; |
1015 | 1017 | ||
1016 | copy->base.mode_changed = false; | 1018 | copy->base.mode_changed = false; |
1019 | copy->base.active_changed = false; | ||
1017 | copy->base.planes_changed = false; | 1020 | copy->base.planes_changed = false; |
1018 | copy->base.event = NULL; | 1021 | copy->base.event = NULL; |
1019 | 1022 | ||
@@ -1229,9 +1232,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
1229 | /* program display mode */ | 1232 | /* program display mode */ |
1230 | tegra_dc_set_timings(dc, mode); | 1233 | tegra_dc_set_timings(dc, mode); |
1231 | 1234 | ||
1232 | if (dc->soc->supports_border_color) | ||
1233 | tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); | ||
1234 | |||
1235 | /* interlacing isn't supported yet, so disable it */ | 1235 | /* interlacing isn't supported yet, so disable it */ |
1236 | if (dc->soc->supports_interlacing) { | 1236 | if (dc->soc->supports_interlacing) { |
1237 | value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); | 1237 | value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); |
@@ -1254,42 +1254,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
1254 | 1254 | ||
1255 | static void tegra_crtc_prepare(struct drm_crtc *crtc) | 1255 | static void tegra_crtc_prepare(struct drm_crtc *crtc) |
1256 | { | 1256 | { |
1257 | struct tegra_dc *dc = to_tegra_dc(crtc); | ||
1258 | unsigned int syncpt; | ||
1259 | unsigned long value; | ||
1260 | |||
1261 | drm_crtc_vblank_off(crtc); | 1257 | drm_crtc_vblank_off(crtc); |
1262 | |||
1263 | if (dc->pipe) | ||
1264 | syncpt = SYNCPT_VBLANK1; | ||
1265 | else | ||
1266 | syncpt = SYNCPT_VBLANK0; | ||
1267 | |||
1268 | /* initialize display controller */ | ||
1269 | tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); | ||
1270 | tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); | ||
1271 | |||
1272 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; | ||
1273 | tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); | ||
1274 | |||
1275 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | | ||
1276 | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; | ||
1277 | tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); | ||
1278 | |||
1279 | /* initialize timer */ | ||
1280 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | | ||
1281 | WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); | ||
1282 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); | ||
1283 | |||
1284 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | | ||
1285 | WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); | ||
1286 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); | ||
1287 | |||
1288 | value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1289 | tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); | ||
1290 | |||
1291 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1292 | tegra_dc_writel(dc, value, DC_CMD_INT_MASK); | ||
1293 | } | 1258 | } |
1294 | 1259 | ||
1295 | static void tegra_crtc_commit(struct drm_crtc *crtc) | 1260 | static void tegra_crtc_commit(struct drm_crtc *crtc) |
@@ -1666,6 +1631,8 @@ static int tegra_dc_init(struct host1x_client *client) | |||
1666 | struct tegra_drm *tegra = drm->dev_private; | 1631 | struct tegra_drm *tegra = drm->dev_private; |
1667 | struct drm_plane *primary = NULL; | 1632 | struct drm_plane *primary = NULL; |
1668 | struct drm_plane *cursor = NULL; | 1633 | struct drm_plane *cursor = NULL; |
1634 | unsigned int syncpt; | ||
1635 | u32 value; | ||
1669 | int err; | 1636 | int err; |
1670 | 1637 | ||
1671 | if (tegra->domain) { | 1638 | if (tegra->domain) { |
@@ -1732,6 +1699,40 @@ static int tegra_dc_init(struct host1x_client *client) | |||
1732 | goto cleanup; | 1699 | goto cleanup; |
1733 | } | 1700 | } |
1734 | 1701 | ||
1702 | /* initialize display controller */ | ||
1703 | if (dc->pipe) | ||
1704 | syncpt = SYNCPT_VBLANK1; | ||
1705 | else | ||
1706 | syncpt = SYNCPT_VBLANK0; | ||
1707 | |||
1708 | tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); | ||
1709 | tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); | ||
1710 | |||
1711 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; | ||
1712 | tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); | ||
1713 | |||
1714 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | | ||
1715 | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; | ||
1716 | tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); | ||
1717 | |||
1718 | /* initialize timer */ | ||
1719 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | | ||
1720 | WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); | ||
1721 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); | ||
1722 | |||
1723 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | | ||
1724 | WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); | ||
1725 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); | ||
1726 | |||
1727 | value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1728 | tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); | ||
1729 | |||
1730 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1731 | tegra_dc_writel(dc, value, DC_CMD_INT_MASK); | ||
1732 | |||
1733 | if (dc->soc->supports_border_color) | ||
1734 | tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); | ||
1735 | |||
1735 | return 0; | 1736 | return 0; |
1736 | 1737 | ||
1737 | cleanup: | 1738 | cleanup: |
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 7e06657ae58b..7eaaee74a039 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c | |||
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, | |||
851 | h_back_porch = mode->htotal - mode->hsync_end; | 851 | h_back_porch = mode->htotal - mode->hsync_end; |
852 | h_front_porch = mode->hsync_start - mode->hdisplay; | 852 | h_front_porch = mode->hsync_start - mode->hdisplay; |
853 | 853 | ||
854 | err = clk_set_rate(hdmi->clk, pclk); | ||
855 | if (err < 0) { | ||
856 | dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", | ||
857 | err); | ||
858 | } | ||
859 | |||
860 | DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk)); | ||
861 | |||
854 | /* power up sequence */ | 862 | /* power up sequence */ |
855 | value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); | 863 | value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); |
856 | value &= ~SOR_PLL_PDBG; | 864 | value &= ~SOR_PLL_PDBG; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d395b0bef73b..8d9b7de25613 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | |||
74 | pr_err(" has_type: %d\n", man->has_type); | 74 | pr_err(" has_type: %d\n", man->has_type); |
75 | pr_err(" use_type: %d\n", man->use_type); | 75 | pr_err(" use_type: %d\n", man->use_type); |
76 | pr_err(" flags: 0x%08X\n", man->flags); | 76 | pr_err(" flags: 0x%08X\n", man->flags); |
77 | pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); | 77 | pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); |
78 | pr_err(" size: %llu\n", man->size); | 78 | pr_err(" size: %llu\n", man->size); |
79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); | 79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); | 80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |