diff options
author | Dave Airlie <airlied@redhat.com> | 2015-10-02 01:41:17 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-10-02 01:41:17 -0400 |
commit | d4070ff71363a2b6598633f23558f809600ebad2 (patch) | |
tree | 61a031eae18b88c1b563c9761a478db8fcff264b | |
parent | 2d4df13c0f9ef56452b1d9a9016cb3946e17bfe5 (diff) | |
parent | fd1ee4cc9326c97b52154ee2ef8cdd23ac6aae1c (diff) |
Merge tag 'drm-intel-next-2015-09-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- initialize backlight from VBT as fallback (Jani)
- hpd A support from Ville
- various atomic polish all over (mostly from Maarten)
- first parts of virtualize gpu guest support on bdw from
Zhiyuan Lv
- GuC fixes from Alex
- polish for the chv clocks code (Ville)
- various things all over, as usual
* tag 'drm-intel-next-2015-09-11' of git://anongit.freedesktop.org/drm-intel: (145 commits)
drm/i915: Update DRIVER_DATE to 20150911
drm/i915: Remove one very outdated comment
drm/i915: Use crtc->state for duplication.
drm/i915: Do not handle a null plane state.
drm/i915: Remove legacy plane updates for cursor and sprite planes.
drm/i915: Use atomic state when changing cursor visibility.
drm/i915: Use the atomic state in intel_update_primary_planes.
drm/i915: Use the plane state in intel_crtc_info.
drm/i915: Use atomic plane state in the primary plane update.
drm/i915: add attached connector to hdmi container
drm/i915: don't hard code vlv backlight frequency if unset
drm/i915: initialize backlight max from VBT
drm/i915: use pch backlight override on hsw too
drm/i915/bxt: Clean up bxt_init_clock_gating
drm/i915: Fix cmdparser STORE/LOAD command descriptors
drm/i915: Dump pfit state as hex
drm/i915: access the PP_ON_DELAYS/PP_OFF_DELAYS regs only pre GEN5
drm/i915: access the PP_CONTROL reg only pre GEN5
drm/i915: Refactor common ringbuffer allocation code
drm/i915: use the yesno helper for logging
...
50 files changed, 4888 insertions, 1308 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 30401f927156..f78ca7f18bb2 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl | |||
@@ -4238,6 +4238,20 @@ int num_ioctls;</synopsis> | |||
4238 | </sect2> | 4238 | </sect2> |
4239 | </sect1> | 4239 | </sect1> |
4240 | <sect1> | 4240 | <sect1> |
4241 | <title>GuC-based Command Submission</title> | ||
4242 | <sect2> | ||
4243 | <title>GuC</title> | ||
4244 | !Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader | ||
4245 | !Idrivers/gpu/drm/i915/intel_guc_loader.c | ||
4246 | </sect2> | ||
4247 | <sect2> | ||
4248 | <title>GuC Client</title> | ||
4249 | !Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison | ||
4250 | !Idrivers/gpu/drm/i915/i915_guc_submission.c | ||
4251 | </sect2> | ||
4252 | </sect1> | ||
4253 | |||
4254 | <sect1> | ||
4241 | <title> Tracing </title> | 4255 | <title> Tracing </title> |
4242 | <para> | 4256 | <para> |
4243 | This sections covers all things related to the tracepoints implemented in | 4257 | This sections covers all things related to the tracepoints implemented in |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 998b4643109f..44d290ae1999 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -40,6 +40,10 @@ i915-y += i915_cmd_parser.o \ | |||
40 | intel_ringbuffer.o \ | 40 | intel_ringbuffer.o \ |
41 | intel_uncore.o | 41 | intel_uncore.o |
42 | 42 | ||
43 | # general-purpose microcontroller (GuC) support | ||
44 | i915-y += intel_guc_loader.o \ | ||
45 | i915_guc_submission.o | ||
46 | |||
43 | # autogenerated null render state | 47 | # autogenerated null render state |
44 | i915-y += intel_renderstate_gen6.o \ | 48 | i915-y += intel_renderstate_gen6.o \ |
45 | intel_renderstate_gen7.o \ | 49 | intel_renderstate_gen7.o \ |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 237ff6884a22..09932cab1a3f 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
@@ -94,7 +94,7 @@ | |||
94 | #define CMD(op, opm, f, lm, fl, ...) \ | 94 | #define CMD(op, opm, f, lm, fl, ...) \ |
95 | { \ | 95 | { \ |
96 | .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ | 96 | .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ |
97 | .cmd = { (op), (opm) }, \ | 97 | .cmd = { (op), (opm) }, \ |
98 | .length = { (lm) }, \ | 98 | .length = { (lm) }, \ |
99 | __VA_ARGS__ \ | 99 | __VA_ARGS__ \ |
100 | } | 100 | } |
@@ -124,14 +124,14 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { | |||
124 | CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ), | 124 | CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ), |
125 | CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, | 125 | CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, |
126 | .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), | 126 | .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), |
127 | CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, | 127 | CMD( MI_STORE_REGISTER_MEM, SMI, F, 3, W | B, |
128 | .reg = { .offset = 1, .mask = 0x007FFFFC }, | 128 | .reg = { .offset = 1, .mask = 0x007FFFFC }, |
129 | .bits = {{ | 129 | .bits = {{ |
130 | .offset = 0, | 130 | .offset = 0, |
131 | .mask = MI_GLOBAL_GTT, | 131 | .mask = MI_GLOBAL_GTT, |
132 | .expected = 0, | 132 | .expected = 0, |
133 | }}, ), | 133 | }}, ), |
134 | CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, | 134 | CMD( MI_LOAD_REGISTER_MEM, SMI, F, 3, W | B, |
135 | .reg = { .offset = 1, .mask = 0x007FFFFC }, | 135 | .reg = { .offset = 1, .mask = 0x007FFFFC }, |
136 | .bits = {{ | 136 | .bits = {{ |
137 | .offset = 0, | 137 | .offset = 0, |
@@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, | |||
1021 | * only MI_LOAD_REGISTER_IMM commands. | 1021 | * only MI_LOAD_REGISTER_IMM commands. |
1022 | */ | 1022 | */ |
1023 | if (reg_addr == OACONTROL) { | 1023 | if (reg_addr == OACONTROL) { |
1024 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { | 1024 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { |
1025 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); | 1025 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); |
1026 | return false; | 1026 | return false; |
1027 | } | 1027 | } |
@@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, | |||
1035 | * allowed mask/value pair given in the whitelist entry. | 1035 | * allowed mask/value pair given in the whitelist entry. |
1036 | */ | 1036 | */ |
1037 | if (reg->mask) { | 1037 | if (reg->mask) { |
1038 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { | 1038 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { |
1039 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", | 1039 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", |
1040 | reg_addr); | 1040 | reg_addr); |
1041 | return false; | 1041 | return false; |
@@ -1213,6 +1213,7 @@ int i915_cmd_parser_get_version(void) | |||
1213 | * 2. Allow access to the MI_PREDICATE_SRC0 and | 1213 | * 2. Allow access to the MI_PREDICATE_SRC0 and |
1214 | * MI_PREDICATE_SRC1 registers. | 1214 | * MI_PREDICATE_SRC1 registers. |
1215 | * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. | 1215 | * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. |
1216 | * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. | ||
1216 | */ | 1217 | */ |
1217 | return 3; | 1218 | return 4; |
1218 | } | 1219 | } |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e3ec9049081f..72ae3472ddbe 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -46,11 +46,6 @@ enum { | |||
46 | PINNED_LIST, | 46 | PINNED_LIST, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static const char *yesno(int v) | ||
50 | { | ||
51 | return v ? "yes" : "no"; | ||
52 | } | ||
53 | |||
54 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | 49 | /* As the drm_debugfs_init() routines are called before dev->dev_private is |
55 | * allocated we need to hook into the minor for release. */ | 50 | * allocated we need to hook into the minor for release. */ |
56 | static int | 51 | static int |
@@ -1387,17 +1382,16 @@ static int ironlake_drpc_info(struct seq_file *m) | |||
1387 | intel_runtime_pm_put(dev_priv); | 1382 | intel_runtime_pm_put(dev_priv); |
1388 | mutex_unlock(&dev->struct_mutex); | 1383 | mutex_unlock(&dev->struct_mutex); |
1389 | 1384 | ||
1390 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | 1385 | seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); |
1391 | "yes" : "no"); | ||
1392 | seq_printf(m, "Boost freq: %d\n", | 1386 | seq_printf(m, "Boost freq: %d\n", |
1393 | (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> | 1387 | (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> |
1394 | MEMMODE_BOOST_FREQ_SHIFT); | 1388 | MEMMODE_BOOST_FREQ_SHIFT); |
1395 | seq_printf(m, "HW control enabled: %s\n", | 1389 | seq_printf(m, "HW control enabled: %s\n", |
1396 | rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); | 1390 | yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); |
1397 | seq_printf(m, "SW control enabled: %s\n", | 1391 | seq_printf(m, "SW control enabled: %s\n", |
1398 | rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); | 1392 | yesno(rgvmodectl & MEMMODE_SWMODE_EN)); |
1399 | seq_printf(m, "Gated voltage change: %s\n", | 1393 | seq_printf(m, "Gated voltage change: %s\n", |
1400 | rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); | 1394 | yesno(rgvmodectl & MEMMODE_RCLK_GATE)); |
1401 | seq_printf(m, "Starting frequency: P%d\n", | 1395 | seq_printf(m, "Starting frequency: P%d\n", |
1402 | (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); | 1396 | (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); |
1403 | seq_printf(m, "Max P-state: P%d\n", | 1397 | seq_printf(m, "Max P-state: P%d\n", |
@@ -1406,7 +1400,7 @@ static int ironlake_drpc_info(struct seq_file *m) | |||
1406 | seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); | 1400 | seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); |
1407 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); | 1401 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); |
1408 | seq_printf(m, "Render standby enabled: %s\n", | 1402 | seq_printf(m, "Render standby enabled: %s\n", |
1409 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); | 1403 | yesno(!(rstdbyctl & RCX_SW_EXIT))); |
1410 | seq_puts(m, "Current RS state: "); | 1404 | seq_puts(m, "Current RS state: "); |
1411 | switch (rstdbyctl & RSX_STATUS_MASK) { | 1405 | switch (rstdbyctl & RSX_STATUS_MASK) { |
1412 | case RSX_STATUS_ON: | 1406 | case RSX_STATUS_ON: |
@@ -1995,7 +1989,7 @@ static void i915_dump_lrc_obj(struct seq_file *m, | |||
1995 | return; | 1989 | return; |
1996 | } | 1990 | } |
1997 | 1991 | ||
1998 | page = i915_gem_object_get_page(ctx_obj, 1); | 1992 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
1999 | if (!WARN_ON(page == NULL)) { | 1993 | if (!WARN_ON(page == NULL)) { |
2000 | reg_state = kmap_atomic(page); | 1994 | reg_state = kmap_atomic(page); |
2001 | 1995 | ||
@@ -2250,7 +2244,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) | |||
2250 | { | 2244 | { |
2251 | struct drm_i915_private *dev_priv = dev->dev_private; | 2245 | struct drm_i915_private *dev_priv = dev->dev_private; |
2252 | struct intel_engine_cs *ring; | 2246 | struct intel_engine_cs *ring; |
2253 | struct drm_file *file; | ||
2254 | int i; | 2247 | int i; |
2255 | 2248 | ||
2256 | if (INTEL_INFO(dev)->gen == 6) | 2249 | if (INTEL_INFO(dev)->gen == 6) |
@@ -2273,13 +2266,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) | |||
2273 | ppgtt->debug_dump(ppgtt, m); | 2266 | ppgtt->debug_dump(ppgtt, m); |
2274 | } | 2267 | } |
2275 | 2268 | ||
2276 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { | ||
2277 | struct drm_i915_file_private *file_priv = file->driver_priv; | ||
2278 | |||
2279 | seq_printf(m, "proc: %s\n", | ||
2280 | get_pid_task(file->pid, PIDTYPE_PID)->comm); | ||
2281 | idr_for_each(&file_priv->context_idr, per_file_ctx, m); | ||
2282 | } | ||
2283 | seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); | 2269 | seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); |
2284 | } | 2270 | } |
2285 | 2271 | ||
@@ -2288,6 +2274,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) | |||
2288 | struct drm_info_node *node = m->private; | 2274 | struct drm_info_node *node = m->private; |
2289 | struct drm_device *dev = node->minor->dev; | 2275 | struct drm_device *dev = node->minor->dev; |
2290 | struct drm_i915_private *dev_priv = dev->dev_private; | 2276 | struct drm_i915_private *dev_priv = dev->dev_private; |
2277 | struct drm_file *file; | ||
2291 | 2278 | ||
2292 | int ret = mutex_lock_interruptible(&dev->struct_mutex); | 2279 | int ret = mutex_lock_interruptible(&dev->struct_mutex); |
2293 | if (ret) | 2280 | if (ret) |
@@ -2299,6 +2286,15 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) | |||
2299 | else if (INTEL_INFO(dev)->gen >= 6) | 2286 | else if (INTEL_INFO(dev)->gen >= 6) |
2300 | gen6_ppgtt_info(m, dev); | 2287 | gen6_ppgtt_info(m, dev); |
2301 | 2288 | ||
2289 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { | ||
2290 | struct drm_i915_file_private *file_priv = file->driver_priv; | ||
2291 | |||
2292 | seq_printf(m, "\nproc: %s\n", | ||
2293 | get_pid_task(file->pid, PIDTYPE_PID)->comm); | ||
2294 | idr_for_each(&file_priv->context_idr, per_file_ctx, | ||
2295 | (void *)(unsigned long)m); | ||
2296 | } | ||
2297 | |||
2302 | intel_runtime_pm_put(dev_priv); | 2298 | intel_runtime_pm_put(dev_priv); |
2303 | mutex_unlock(&dev->struct_mutex); | 2299 | mutex_unlock(&dev->struct_mutex); |
2304 | 2300 | ||
@@ -2372,6 +2368,147 @@ static int i915_llc(struct seq_file *m, void *data) | |||
2372 | return 0; | 2368 | return 0; |
2373 | } | 2369 | } |
2374 | 2370 | ||
2371 | static int i915_guc_load_status_info(struct seq_file *m, void *data) | ||
2372 | { | ||
2373 | struct drm_info_node *node = m->private; | ||
2374 | struct drm_i915_private *dev_priv = node->minor->dev->dev_private; | ||
2375 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | ||
2376 | u32 tmp, i; | ||
2377 | |||
2378 | if (!HAS_GUC_UCODE(dev_priv->dev)) | ||
2379 | return 0; | ||
2380 | |||
2381 | seq_printf(m, "GuC firmware status:\n"); | ||
2382 | seq_printf(m, "\tpath: %s\n", | ||
2383 | guc_fw->guc_fw_path); | ||
2384 | seq_printf(m, "\tfetch: %s\n", | ||
2385 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); | ||
2386 | seq_printf(m, "\tload: %s\n", | ||
2387 | intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); | ||
2388 | seq_printf(m, "\tversion wanted: %d.%d\n", | ||
2389 | guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); | ||
2390 | seq_printf(m, "\tversion found: %d.%d\n", | ||
2391 | guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); | ||
2392 | |||
2393 | tmp = I915_READ(GUC_STATUS); | ||
2394 | |||
2395 | seq_printf(m, "\nGuC status 0x%08x:\n", tmp); | ||
2396 | seq_printf(m, "\tBootrom status = 0x%x\n", | ||
2397 | (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); | ||
2398 | seq_printf(m, "\tuKernel status = 0x%x\n", | ||
2399 | (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); | ||
2400 | seq_printf(m, "\tMIA Core status = 0x%x\n", | ||
2401 | (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); | ||
2402 | seq_puts(m, "\nScratch registers:\n"); | ||
2403 | for (i = 0; i < 16; i++) | ||
2404 | seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); | ||
2405 | |||
2406 | return 0; | ||
2407 | } | ||
2408 | |||
2409 | static void i915_guc_client_info(struct seq_file *m, | ||
2410 | struct drm_i915_private *dev_priv, | ||
2411 | struct i915_guc_client *client) | ||
2412 | { | ||
2413 | struct intel_engine_cs *ring; | ||
2414 | uint64_t tot = 0; | ||
2415 | uint32_t i; | ||
2416 | |||
2417 | seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", | ||
2418 | client->priority, client->ctx_index, client->proc_desc_offset); | ||
2419 | seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", | ||
2420 | client->doorbell_id, client->doorbell_offset, client->cookie); | ||
2421 | seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", | ||
2422 | client->wq_size, client->wq_offset, client->wq_tail); | ||
2423 | |||
2424 | seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); | ||
2425 | seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); | ||
2426 | seq_printf(m, "\tLast submission result: %d\n", client->retcode); | ||
2427 | |||
2428 | for_each_ring(ring, dev_priv, i) { | ||
2429 | seq_printf(m, "\tSubmissions: %llu %s\n", | ||
2430 | client->submissions[i], | ||
2431 | ring->name); | ||
2432 | tot += client->submissions[i]; | ||
2433 | } | ||
2434 | seq_printf(m, "\tTotal: %llu\n", tot); | ||
2435 | } | ||
2436 | |||
2437 | static int i915_guc_info(struct seq_file *m, void *data) | ||
2438 | { | ||
2439 | struct drm_info_node *node = m->private; | ||
2440 | struct drm_device *dev = node->minor->dev; | ||
2441 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2442 | struct intel_guc guc; | ||
2443 | struct i915_guc_client client = {}; | ||
2444 | struct intel_engine_cs *ring; | ||
2445 | enum intel_ring_id i; | ||
2446 | u64 total = 0; | ||
2447 | |||
2448 | if (!HAS_GUC_SCHED(dev_priv->dev)) | ||
2449 | return 0; | ||
2450 | |||
2451 | /* Take a local copy of the GuC data, so we can dump it at leisure */ | ||
2452 | spin_lock(&dev_priv->guc.host2guc_lock); | ||
2453 | guc = dev_priv->guc; | ||
2454 | if (guc.execbuf_client) { | ||
2455 | spin_lock(&guc.execbuf_client->wq_lock); | ||
2456 | client = *guc.execbuf_client; | ||
2457 | spin_unlock(&guc.execbuf_client->wq_lock); | ||
2458 | } | ||
2459 | spin_unlock(&dev_priv->guc.host2guc_lock); | ||
2460 | |||
2461 | seq_printf(m, "GuC total action count: %llu\n", guc.action_count); | ||
2462 | seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); | ||
2463 | seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); | ||
2464 | seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status); | ||
2465 | seq_printf(m, "GuC last action error code: %d\n", guc.action_err); | ||
2466 | |||
2467 | seq_printf(m, "\nGuC submissions:\n"); | ||
2468 | for_each_ring(ring, dev_priv, i) { | ||
2469 | seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n", | ||
2470 | ring->name, guc.submissions[i], | ||
2471 | guc.last_seqno[i], guc.last_seqno[i]); | ||
2472 | total += guc.submissions[i]; | ||
2473 | } | ||
2474 | seq_printf(m, "\t%s: %llu\n", "Total", total); | ||
2475 | |||
2476 | seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client); | ||
2477 | i915_guc_client_info(m, dev_priv, &client); | ||
2478 | |||
2479 | /* Add more as required ... */ | ||
2480 | |||
2481 | return 0; | ||
2482 | } | ||
2483 | |||
2484 | static int i915_guc_log_dump(struct seq_file *m, void *data) | ||
2485 | { | ||
2486 | struct drm_info_node *node = m->private; | ||
2487 | struct drm_device *dev = node->minor->dev; | ||
2488 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2489 | struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; | ||
2490 | u32 *log; | ||
2491 | int i = 0, pg; | ||
2492 | |||
2493 | if (!log_obj) | ||
2494 | return 0; | ||
2495 | |||
2496 | for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { | ||
2497 | log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); | ||
2498 | |||
2499 | for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) | ||
2500 | seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
2501 | *(log + i), *(log + i + 1), | ||
2502 | *(log + i + 2), *(log + i + 3)); | ||
2503 | |||
2504 | kunmap_atomic(log); | ||
2505 | } | ||
2506 | |||
2507 | seq_putc(m, '\n'); | ||
2508 | |||
2509 | return 0; | ||
2510 | } | ||
2511 | |||
2375 | static int i915_edp_psr_status(struct seq_file *m, void *data) | 2512 | static int i915_edp_psr_status(struct seq_file *m, void *data) |
2376 | { | 2513 | { |
2377 | struct drm_info_node *node = m->private; | 2514 | struct drm_info_node *node = m->private; |
@@ -2680,11 +2817,13 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) | |||
2680 | struct drm_device *dev = node->minor->dev; | 2817 | struct drm_device *dev = node->minor->dev; |
2681 | struct drm_crtc *crtc = &intel_crtc->base; | 2818 | struct drm_crtc *crtc = &intel_crtc->base; |
2682 | struct intel_encoder *intel_encoder; | 2819 | struct intel_encoder *intel_encoder; |
2820 | struct drm_plane_state *plane_state = crtc->primary->state; | ||
2821 | struct drm_framebuffer *fb = plane_state->fb; | ||
2683 | 2822 | ||
2684 | if (crtc->primary->fb) | 2823 | if (fb) |
2685 | seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", | 2824 | seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", |
2686 | crtc->primary->fb->base.id, crtc->x, crtc->y, | 2825 | fb->base.id, plane_state->src_x >> 16, |
2687 | crtc->primary->fb->width, crtc->primary->fb->height); | 2826 | plane_state->src_y >> 16, fb->width, fb->height); |
2688 | else | 2827 | else |
2689 | seq_puts(m, "\tprimary plane disabled\n"); | 2828 | seq_puts(m, "\tprimary plane disabled\n"); |
2690 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) | 2829 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) |
@@ -2706,8 +2845,7 @@ static void intel_dp_info(struct seq_file *m, | |||
2706 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | 2845 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2707 | 2846 | ||
2708 | seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); | 2847 | seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); |
2709 | seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : | 2848 | seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); |
2710 | "no"); | ||
2711 | if (intel_encoder->type == INTEL_OUTPUT_EDP) | 2849 | if (intel_encoder->type == INTEL_OUTPUT_EDP) |
2712 | intel_panel_info(m, &intel_connector->panel); | 2850 | intel_panel_info(m, &intel_connector->panel); |
2713 | } | 2851 | } |
@@ -2718,8 +2856,7 @@ static void intel_hdmi_info(struct seq_file *m, | |||
2718 | struct intel_encoder *intel_encoder = intel_connector->encoder; | 2856 | struct intel_encoder *intel_encoder = intel_connector->encoder; |
2719 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); | 2857 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); |
2720 | 2858 | ||
2721 | seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : | 2859 | seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); |
2722 | "no"); | ||
2723 | } | 2860 | } |
2724 | 2861 | ||
2725 | static void intel_lvds_info(struct seq_file *m, | 2862 | static void intel_lvds_info(struct seq_file *m, |
@@ -4807,7 +4944,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev, | |||
4807 | struct sseu_dev_status *stat) | 4944 | struct sseu_dev_status *stat) |
4808 | { | 4945 | { |
4809 | struct drm_i915_private *dev_priv = dev->dev_private; | 4946 | struct drm_i915_private *dev_priv = dev->dev_private; |
4810 | const int ss_max = 2; | 4947 | int ss_max = 2; |
4811 | int ss; | 4948 | int ss; |
4812 | u32 sig1[ss_max], sig2[ss_max]; | 4949 | u32 sig1[ss_max], sig2[ss_max]; |
4813 | 4950 | ||
@@ -5033,6 +5170,9 @@ static const struct drm_info_list i915_debugfs_list[] = { | |||
5033 | {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, | 5170 | {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, |
5034 | {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, | 5171 | {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, |
5035 | {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, | 5172 | {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, |
5173 | {"i915_guc_info", i915_guc_info, 0}, | ||
5174 | {"i915_guc_load_status", i915_guc_load_status_info, 0}, | ||
5175 | {"i915_guc_log_dump", i915_guc_log_dump, 0}, | ||
5036 | {"i915_frequency_info", i915_frequency_info, 0}, | 5176 | {"i915_frequency_info", i915_frequency_info, 0}, |
5037 | {"i915_hangcheck_info", i915_hangcheck_info, 0}, | 5177 | {"i915_hangcheck_info", i915_hangcheck_info, 0}, |
5038 | {"i915_drpc_info", i915_drpc_info, 0}, | 5178 | {"i915_drpc_info", i915_drpc_info, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ab37d1121be8..066a0efa75d7 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -364,12 +364,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
364 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 364 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
365 | /* i915 resume handler doesn't set to D0 */ | 365 | /* i915 resume handler doesn't set to D0 */ |
366 | pci_set_power_state(dev->pdev, PCI_D0); | 366 | pci_set_power_state(dev->pdev, PCI_D0); |
367 | i915_resume_legacy(dev); | 367 | i915_resume_switcheroo(dev); |
368 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | 368 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
369 | } else { | 369 | } else { |
370 | pr_err("switched off\n"); | 370 | pr_err("switched off\n"); |
371 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 371 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
372 | i915_suspend_legacy(dev, pmm); | 372 | i915_suspend_switcheroo(dev, pmm); |
373 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | 373 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
374 | } | 374 | } |
375 | } | 375 | } |
@@ -435,6 +435,11 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
435 | * working irqs for e.g. gmbus and dp aux transfers. */ | 435 | * working irqs for e.g. gmbus and dp aux transfers. */ |
436 | intel_modeset_init(dev); | 436 | intel_modeset_init(dev); |
437 | 437 | ||
438 | /* intel_guc_ucode_init() needs the mutex to allocate GEM objects */ | ||
439 | mutex_lock(&dev->struct_mutex); | ||
440 | intel_guc_ucode_init(dev); | ||
441 | mutex_unlock(&dev->struct_mutex); | ||
442 | |||
438 | ret = i915_gem_init(dev); | 443 | ret = i915_gem_init(dev); |
439 | if (ret) | 444 | if (ret) |
440 | goto cleanup_irq; | 445 | goto cleanup_irq; |
@@ -476,6 +481,9 @@ cleanup_gem: | |||
476 | i915_gem_context_fini(dev); | 481 | i915_gem_context_fini(dev); |
477 | mutex_unlock(&dev->struct_mutex); | 482 | mutex_unlock(&dev->struct_mutex); |
478 | cleanup_irq: | 483 | cleanup_irq: |
484 | mutex_lock(&dev->struct_mutex); | ||
485 | intel_guc_ucode_fini(dev); | ||
486 | mutex_unlock(&dev->struct_mutex); | ||
479 | drm_irq_uninstall(dev); | 487 | drm_irq_uninstall(dev); |
480 | cleanup_gem_stolen: | 488 | cleanup_gem_stolen: |
481 | i915_gem_cleanup_stolen(dev); | 489 | i915_gem_cleanup_stolen(dev); |
@@ -791,6 +799,24 @@ static void intel_device_info_runtime_init(struct drm_device *dev) | |||
791 | info->has_eu_pg ? "y" : "n"); | 799 | info->has_eu_pg ? "y" : "n"); |
792 | } | 800 | } |
793 | 801 | ||
802 | static void intel_init_dpio(struct drm_i915_private *dev_priv) | ||
803 | { | ||
804 | if (!IS_VALLEYVIEW(dev_priv)) | ||
805 | return; | ||
806 | |||
807 | /* | ||
808 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | ||
809 | * CHV x1 PHY (DP/HDMI D) | ||
810 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) | ||
811 | */ | ||
812 | if (IS_CHERRYVIEW(dev_priv)) { | ||
813 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | ||
814 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | ||
815 | } else { | ||
816 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | ||
817 | } | ||
818 | } | ||
819 | |||
794 | /** | 820 | /** |
795 | * i915_driver_load - setup chip and create an initial config | 821 | * i915_driver_load - setup chip and create an initial config |
796 | * @dev: DRM device | 822 | * @dev: DRM device |
@@ -971,8 +997,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
971 | intel_setup_gmbus(dev); | 997 | intel_setup_gmbus(dev); |
972 | intel_opregion_setup(dev); | 998 | intel_opregion_setup(dev); |
973 | 999 | ||
974 | intel_setup_bios(dev); | ||
975 | |||
976 | i915_gem_load(dev); | 1000 | i915_gem_load(dev); |
977 | 1001 | ||
978 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1002 | /* On the 945G/GM, the chipset reports the MSI capability on the |
@@ -991,6 +1015,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
991 | 1015 | ||
992 | intel_device_info_runtime_init(dev); | 1016 | intel_device_info_runtime_init(dev); |
993 | 1017 | ||
1018 | intel_init_dpio(dev_priv); | ||
1019 | |||
994 | if (INTEL_INFO(dev)->num_pipes) { | 1020 | if (INTEL_INFO(dev)->num_pipes) { |
995 | ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); | 1021 | ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); |
996 | if (ret) | 1022 | if (ret) |
@@ -1128,6 +1154,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
1128 | flush_workqueue(dev_priv->wq); | 1154 | flush_workqueue(dev_priv->wq); |
1129 | 1155 | ||
1130 | mutex_lock(&dev->struct_mutex); | 1156 | mutex_lock(&dev->struct_mutex); |
1157 | intel_guc_ucode_fini(dev); | ||
1131 | i915_gem_cleanup_ringbuffer(dev); | 1158 | i915_gem_cleanup_ringbuffer(dev); |
1132 | i915_gem_context_fini(dev); | 1159 | i915_gem_context_fini(dev); |
1133 | mutex_unlock(&dev->struct_mutex); | 1160 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ab64d68388f2..e2bf9e2f6261 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -362,6 +362,7 @@ static const struct intel_device_info intel_skylake_info = { | |||
362 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 362 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
363 | .has_llc = 1, | 363 | .has_llc = 1, |
364 | .has_ddi = 1, | 364 | .has_ddi = 1, |
365 | .has_fpga_dbg = 1, | ||
365 | .has_fbc = 1, | 366 | .has_fbc = 1, |
366 | GEN_DEFAULT_PIPEOFFSETS, | 367 | GEN_DEFAULT_PIPEOFFSETS, |
367 | IVB_CURSOR_OFFSETS, | 368 | IVB_CURSOR_OFFSETS, |
@@ -374,6 +375,7 @@ static const struct intel_device_info intel_skylake_gt3_info = { | |||
374 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | 375 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
375 | .has_llc = 1, | 376 | .has_llc = 1, |
376 | .has_ddi = 1, | 377 | .has_ddi = 1, |
378 | .has_fpga_dbg = 1, | ||
377 | .has_fbc = 1, | 379 | .has_fbc = 1, |
378 | GEN_DEFAULT_PIPEOFFSETS, | 380 | GEN_DEFAULT_PIPEOFFSETS, |
379 | IVB_CURSOR_OFFSETS, | 381 | IVB_CURSOR_OFFSETS, |
@@ -386,6 +388,7 @@ static const struct intel_device_info intel_broxton_info = { | |||
386 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 388 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
387 | .num_pipes = 3, | 389 | .num_pipes = 3, |
388 | .has_ddi = 1, | 390 | .has_ddi = 1, |
391 | .has_fpga_dbg = 1, | ||
389 | .has_fbc = 1, | 392 | .has_fbc = 1, |
390 | GEN_DEFAULT_PIPEOFFSETS, | 393 | GEN_DEFAULT_PIPEOFFSETS, |
391 | IVB_CURSOR_OFFSETS, | 394 | IVB_CURSOR_OFFSETS, |
@@ -679,7 +682,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
679 | return 0; | 682 | return 0; |
680 | } | 683 | } |
681 | 684 | ||
682 | int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) | 685 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
683 | { | 686 | { |
684 | int error; | 687 | int error; |
685 | 688 | ||
@@ -812,7 +815,7 @@ static int i915_drm_resume_early(struct drm_device *dev) | |||
812 | return ret; | 815 | return ret; |
813 | } | 816 | } |
814 | 817 | ||
815 | int i915_resume_legacy(struct drm_device *dev) | 818 | int i915_resume_switcheroo(struct drm_device *dev) |
816 | { | 819 | { |
817 | int ret; | 820 | int ret; |
818 | 821 | ||
@@ -1552,6 +1555,15 @@ static int intel_runtime_resume(struct device *device) | |||
1552 | gen6_update_ring_freq(dev); | 1555 | gen6_update_ring_freq(dev); |
1553 | 1556 | ||
1554 | intel_runtime_pm_enable_interrupts(dev_priv); | 1557 | intel_runtime_pm_enable_interrupts(dev_priv); |
1558 | |||
1559 | /* | ||
1560 | * On VLV/CHV display interrupts are part of the display | ||
1561 | * power well, so hpd is reinitialized from there. For | ||
1562 | * everyone else do it here. | ||
1563 | */ | ||
1564 | if (!IS_VALLEYVIEW(dev_priv)) | ||
1565 | intel_hpd_init(dev_priv); | ||
1566 | |||
1555 | intel_enable_gt_powersave(dev); | 1567 | intel_enable_gt_powersave(dev); |
1556 | 1568 | ||
1557 | if (ret) | 1569 | if (ret) |
@@ -1649,7 +1661,7 @@ static struct drm_driver driver = { | |||
1649 | */ | 1661 | */ |
1650 | .driver_features = | 1662 | .driver_features = |
1651 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | | 1663 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
1652 | DRIVER_RENDER, | 1664 | DRIVER_RENDER | DRIVER_MODESET, |
1653 | .load = i915_driver_load, | 1665 | .load = i915_driver_load, |
1654 | .unload = i915_driver_unload, | 1666 | .unload = i915_driver_unload, |
1655 | .open = i915_driver_open, | 1667 | .open = i915_driver_open, |
@@ -1658,10 +1670,6 @@ static struct drm_driver driver = { | |||
1658 | .postclose = i915_driver_postclose, | 1670 | .postclose = i915_driver_postclose, |
1659 | .set_busid = drm_pci_set_busid, | 1671 | .set_busid = drm_pci_set_busid, |
1660 | 1672 | ||
1661 | /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ | ||
1662 | .suspend = i915_suspend_legacy, | ||
1663 | .resume = i915_resume_legacy, | ||
1664 | |||
1665 | #if defined(CONFIG_DEBUG_FS) | 1673 | #if defined(CONFIG_DEBUG_FS) |
1666 | .debugfs_init = i915_debugfs_init, | 1674 | .debugfs_init = i915_debugfs_init, |
1667 | .debugfs_cleanup = i915_debugfs_cleanup, | 1675 | .debugfs_cleanup = i915_debugfs_cleanup, |
@@ -1704,7 +1712,6 @@ static int __init i915_init(void) | |||
1704 | * either the i915.modeset prarameter or by the | 1712 | * either the i915.modeset prarameter or by the |
1705 | * vga_text_mode_force boot option. | 1713 | * vga_text_mode_force boot option. |
1706 | */ | 1714 | */ |
1707 | driver.driver_features |= DRIVER_MODESET; | ||
1708 | 1715 | ||
1709 | if (i915.modeset == 0) | 1716 | if (i915.modeset == 0) |
1710 | driver.driver_features &= ~DRIVER_MODESET; | 1717 | driver.driver_features &= ~DRIVER_MODESET; |
@@ -1715,18 +1722,12 @@ static int __init i915_init(void) | |||
1715 | #endif | 1722 | #endif |
1716 | 1723 | ||
1717 | if (!(driver.driver_features & DRIVER_MODESET)) { | 1724 | if (!(driver.driver_features & DRIVER_MODESET)) { |
1718 | driver.get_vblank_timestamp = NULL; | ||
1719 | /* Silently fail loading to not upset userspace. */ | 1725 | /* Silently fail loading to not upset userspace. */ |
1720 | DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); | 1726 | DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); |
1721 | return 0; | 1727 | return 0; |
1722 | } | 1728 | } |
1723 | 1729 | ||
1724 | /* | 1730 | if (i915.nuclear_pageflip) |
1725 | * FIXME: Note that we're lying to the DRM core here so that we can get access | ||
1726 | * to the atomic ioctl and the atomic properties. Only plane operations on | ||
1727 | * a single CRTC will actually work. | ||
1728 | */ | ||
1729 | if (driver.driver_features & DRIVER_MODESET) | ||
1730 | driver.driver_features |= DRIVER_ATOMIC; | 1731 | driver.driver_features |= DRIVER_ATOMIC; |
1731 | 1732 | ||
1732 | return drm_pci_init(&driver, &i915_pci_driver); | 1733 | return drm_pci_init(&driver, &i915_pci_driver); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e1db8de52851..466704674fac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -50,13 +50,14 @@ | |||
50 | #include <linux/intel-iommu.h> | 50 | #include <linux/intel-iommu.h> |
51 | #include <linux/kref.h> | 51 | #include <linux/kref.h> |
52 | #include <linux/pm_qos.h> | 52 | #include <linux/pm_qos.h> |
53 | #include "intel_guc.h" | ||
53 | 54 | ||
54 | /* General customization: | 55 | /* General customization: |
55 | */ | 56 | */ |
56 | 57 | ||
57 | #define DRIVER_NAME "i915" | 58 | #define DRIVER_NAME "i915" |
58 | #define DRIVER_DESC "Intel Graphics" | 59 | #define DRIVER_DESC "Intel Graphics" |
59 | #define DRIVER_DATE "20150731" | 60 | #define DRIVER_DATE "20150911" |
60 | 61 | ||
61 | #undef WARN_ON | 62 | #undef WARN_ON |
62 | /* Many gcc seem to no see through this and fall over :( */ | 63 | /* Many gcc seem to no see through this and fall over :( */ |
@@ -67,11 +68,11 @@ | |||
67 | BUILD_BUG_ON(__i915_warn_cond); \ | 68 | BUILD_BUG_ON(__i915_warn_cond); \ |
68 | WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) | 69 | WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) |
69 | #else | 70 | #else |
70 | #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") | 71 | #define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x ) |
71 | #endif | 72 | #endif |
72 | 73 | ||
73 | #undef WARN_ON_ONCE | 74 | #undef WARN_ON_ONCE |
74 | #define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") | 75 | #define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x ) |
75 | 76 | ||
76 | #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ | 77 | #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ |
77 | (long) (x), __func__); | 78 | (long) (x), __func__); |
@@ -105,6 +106,11 @@ | |||
105 | unlikely(__ret_warn_on); \ | 106 | unlikely(__ret_warn_on); \ |
106 | }) | 107 | }) |
107 | 108 | ||
109 | static inline const char *yesno(bool v) | ||
110 | { | ||
111 | return v ? "yes" : "no"; | ||
112 | } | ||
113 | |||
108 | enum pipe { | 114 | enum pipe { |
109 | INVALID_PIPE = -1, | 115 | INVALID_PIPE = -1, |
110 | PIPE_A = 0, | 116 | PIPE_A = 0, |
@@ -549,7 +555,7 @@ struct drm_i915_error_state { | |||
549 | 555 | ||
550 | struct drm_i915_error_object { | 556 | struct drm_i915_error_object { |
551 | int page_count; | 557 | int page_count; |
552 | u32 gtt_offset; | 558 | u64 gtt_offset; |
553 | u32 *pages[0]; | 559 | u32 *pages[0]; |
554 | } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; | 560 | } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; |
555 | 561 | ||
@@ -575,7 +581,7 @@ struct drm_i915_error_state { | |||
575 | u32 size; | 581 | u32 size; |
576 | u32 name; | 582 | u32 name; |
577 | u32 rseqno[I915_NUM_RINGS], wseqno; | 583 | u32 rseqno[I915_NUM_RINGS], wseqno; |
578 | u32 gtt_offset; | 584 | u64 gtt_offset; |
579 | u32 read_domains; | 585 | u32 read_domains; |
580 | u32 write_domain; | 586 | u32 write_domain; |
581 | s32 fence_reg:I915_MAX_NUM_FENCE_BITS; | 587 | s32 fence_reg:I915_MAX_NUM_FENCE_BITS; |
@@ -665,6 +671,8 @@ struct drm_i915_display_funcs { | |||
665 | uint32_t level); | 671 | uint32_t level); |
666 | void (*disable_backlight)(struct intel_connector *connector); | 672 | void (*disable_backlight)(struct intel_connector *connector); |
667 | void (*enable_backlight)(struct intel_connector *connector); | 673 | void (*enable_backlight)(struct intel_connector *connector); |
674 | uint32_t (*backlight_hz_to_pwm)(struct intel_connector *connector, | ||
675 | uint32_t hz); | ||
668 | }; | 676 | }; |
669 | 677 | ||
670 | enum forcewake_domain_id { | 678 | enum forcewake_domain_id { |
@@ -1693,7 +1701,7 @@ struct i915_execbuffer_params { | |||
1693 | struct drm_file *file; | 1701 | struct drm_file *file; |
1694 | uint32_t dispatch_flags; | 1702 | uint32_t dispatch_flags; |
1695 | uint32_t args_batch_start_offset; | 1703 | uint32_t args_batch_start_offset; |
1696 | uint32_t batch_obj_vm_offset; | 1704 | uint64_t batch_obj_vm_offset; |
1697 | struct intel_engine_cs *ring; | 1705 | struct intel_engine_cs *ring; |
1698 | struct drm_i915_gem_object *batch_obj; | 1706 | struct drm_i915_gem_object *batch_obj; |
1699 | struct intel_context *ctx; | 1707 | struct intel_context *ctx; |
@@ -1716,6 +1724,8 @@ struct drm_i915_private { | |||
1716 | 1724 | ||
1717 | struct i915_virtual_gpu vgpu; | 1725 | struct i915_virtual_gpu vgpu; |
1718 | 1726 | ||
1727 | struct intel_guc guc; | ||
1728 | |||
1719 | struct intel_csr csr; | 1729 | struct intel_csr csr; |
1720 | 1730 | ||
1721 | /* Display CSR-related protection */ | 1731 | /* Display CSR-related protection */ |
@@ -1796,6 +1806,7 @@ struct drm_i915_private { | |||
1796 | unsigned int fsb_freq, mem_freq, is_ddr3; | 1806 | unsigned int fsb_freq, mem_freq, is_ddr3; |
1797 | unsigned int skl_boot_cdclk; | 1807 | unsigned int skl_boot_cdclk; |
1798 | unsigned int cdclk_freq, max_cdclk_freq; | 1808 | unsigned int cdclk_freq, max_cdclk_freq; |
1809 | unsigned int max_dotclk_freq; | ||
1799 | unsigned int hpll_freq; | 1810 | unsigned int hpll_freq; |
1800 | 1811 | ||
1801 | /** | 1812 | /** |
@@ -1963,6 +1974,11 @@ static inline struct drm_i915_private *dev_to_i915(struct device *dev) | |||
1963 | return to_i915(dev_get_drvdata(dev)); | 1974 | return to_i915(dev_get_drvdata(dev)); |
1964 | } | 1975 | } |
1965 | 1976 | ||
1977 | static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) | ||
1978 | { | ||
1979 | return container_of(guc, struct drm_i915_private, guc); | ||
1980 | } | ||
1981 | |||
1966 | /* Iterate over initialised rings */ | 1982 | /* Iterate over initialised rings */ |
1967 | #define for_each_ring(ring__, dev_priv__, i__) \ | 1983 | #define for_each_ring(ring__, dev_priv__, i__) \ |
1968 | for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ | 1984 | for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ |
@@ -2520,7 +2536,8 @@ struct drm_i915_cmd_table { | |||
2520 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) | 2536 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
2521 | #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) | 2537 | #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) |
2522 | #define USES_PPGTT(dev) (i915.enable_ppgtt) | 2538 | #define USES_PPGTT(dev) (i915.enable_ppgtt) |
2523 | #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) | 2539 | #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) |
2540 | #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) | ||
2524 | 2541 | ||
2525 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | 2542 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
2526 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) | 2543 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
@@ -2566,6 +2583,9 @@ struct drm_i915_cmd_table { | |||
2566 | 2583 | ||
2567 | #define HAS_CSR(dev) (IS_SKYLAKE(dev)) | 2584 | #define HAS_CSR(dev) (IS_SKYLAKE(dev)) |
2568 | 2585 | ||
2586 | #define HAS_GUC_UCODE(dev) (IS_GEN9(dev)) | ||
2587 | #define HAS_GUC_SCHED(dev) (IS_GEN9(dev)) | ||
2588 | |||
2569 | #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ | 2589 | #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ |
2570 | INTEL_INFO(dev)->gen >= 8) | 2590 | INTEL_INFO(dev)->gen >= 8) |
2571 | 2591 | ||
@@ -2584,6 +2604,7 @@ struct drm_i915_cmd_table { | |||
2584 | #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) | 2604 | #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) |
2585 | #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) | 2605 | #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) |
2586 | #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) | 2606 | #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
2607 | #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) | ||
2587 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 2608 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
2588 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | 2609 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
2589 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) | 2610 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) |
@@ -2603,8 +2624,8 @@ struct drm_i915_cmd_table { | |||
2603 | extern const struct drm_ioctl_desc i915_ioctls[]; | 2624 | extern const struct drm_ioctl_desc i915_ioctls[]; |
2604 | extern int i915_max_ioctl; | 2625 | extern int i915_max_ioctl; |
2605 | 2626 | ||
2606 | extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); | 2627 | extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); |
2607 | extern int i915_resume_legacy(struct drm_device *dev); | 2628 | extern int i915_resume_switcheroo(struct drm_device *dev); |
2608 | 2629 | ||
2609 | /* i915_params.c */ | 2630 | /* i915_params.c */ |
2610 | struct i915_params { | 2631 | struct i915_params { |
@@ -2637,6 +2658,7 @@ struct i915_params { | |||
2637 | int use_mmio_flip; | 2658 | int use_mmio_flip; |
2638 | int mmio_debug; | 2659 | int mmio_debug; |
2639 | bool verbose_state_checks; | 2660 | bool verbose_state_checks; |
2661 | bool nuclear_pageflip; | ||
2640 | int edp_vswing; | 2662 | int edp_vswing; |
2641 | }; | 2663 | }; |
2642 | extern struct i915_params i915 __read_mostly; | 2664 | extern struct i915_params i915 __read_mostly; |
@@ -2986,13 +3008,11 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, | |||
2986 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | 3008 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
2987 | struct drm_gem_object *gem_obj, int flags); | 3009 | struct drm_gem_object *gem_obj, int flags); |
2988 | 3010 | ||
2989 | unsigned long | 3011 | u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, |
2990 | i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, | 3012 | const struct i915_ggtt_view *view); |
2991 | const struct i915_ggtt_view *view); | 3013 | u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, |
2992 | unsigned long | 3014 | struct i915_address_space *vm); |
2993 | i915_gem_obj_offset(struct drm_i915_gem_object *o, | 3015 | static inline u64 |
2994 | struct i915_address_space *vm); | ||
2995 | static inline unsigned long | ||
2996 | i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) | 3016 | i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) |
2997 | { | 3017 | { |
2998 | return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); | 3018 | return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4d631a946481..22071cbc206f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1005,12 +1005,14 @@ out: | |||
1005 | if (!needs_clflush_after && | 1005 | if (!needs_clflush_after && |
1006 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 1006 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
1007 | if (i915_gem_clflush_object(obj, obj->pin_display)) | 1007 | if (i915_gem_clflush_object(obj, obj->pin_display)) |
1008 | i915_gem_chipset_flush(dev); | 1008 | needs_clflush_after = true; |
1009 | } | 1009 | } |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | if (needs_clflush_after) | 1012 | if (needs_clflush_after) |
1013 | i915_gem_chipset_flush(dev); | 1013 | i915_gem_chipset_flush(dev); |
1014 | else | ||
1015 | obj->cache_dirty = true; | ||
1014 | 1016 | ||
1015 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 1017 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); |
1016 | return ret; | 1018 | return ret; |
@@ -3228,10 +3230,6 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
3228 | ret = i915_gem_object_wait_rendering(obj, false); | 3230 | ret = i915_gem_object_wait_rendering(obj, false); |
3229 | if (ret) | 3231 | if (ret) |
3230 | return ret; | 3232 | return ret; |
3231 | /* Continue on if we fail due to EIO, the GPU is hung so we | ||
3232 | * should be safe and we need to cleanup or else we might | ||
3233 | * cause memory corruption through use-after-free. | ||
3234 | */ | ||
3235 | 3233 | ||
3236 | if (i915_is_ggtt(vma->vm) && | 3234 | if (i915_is_ggtt(vma->vm) && |
3237 | vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { | 3235 | vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
@@ -3355,7 +3353,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3355 | { | 3353 | { |
3356 | struct drm_device *dev = obj->base.dev; | 3354 | struct drm_device *dev = obj->base.dev; |
3357 | struct drm_i915_private *dev_priv = dev->dev_private; | 3355 | struct drm_i915_private *dev_priv = dev->dev_private; |
3358 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 3356 | u32 fence_alignment, unfenced_alignment; |
3357 | u64 size, fence_size; | ||
3359 | u64 start = | 3358 | u64 start = |
3360 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; | 3359 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; |
3361 | u64 end = | 3360 | u64 end = |
@@ -3414,7 +3413,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3414 | * attempt to find space. | 3413 | * attempt to find space. |
3415 | */ | 3414 | */ |
3416 | if (size > end) { | 3415 | if (size > end) { |
3417 | DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n", | 3416 | DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n", |
3418 | ggtt_view ? ggtt_view->type : 0, | 3417 | ggtt_view ? ggtt_view->type : 0, |
3419 | size, | 3418 | size, |
3420 | flags & PIN_MAPPABLE ? "mappable" : "total", | 3419 | flags & PIN_MAPPABLE ? "mappable" : "total", |
@@ -3638,10 +3637,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3638 | { | 3637 | { |
3639 | struct drm_device *dev = obj->base.dev; | 3638 | struct drm_device *dev = obj->base.dev; |
3640 | struct i915_vma *vma, *next; | 3639 | struct i915_vma *vma, *next; |
3641 | int ret; | 3640 | int ret = 0; |
3642 | 3641 | ||
3643 | if (obj->cache_level == cache_level) | 3642 | if (obj->cache_level == cache_level) |
3644 | return 0; | 3643 | goto out; |
3645 | 3644 | ||
3646 | if (i915_gem_obj_is_pinned(obj)) { | 3645 | if (i915_gem_obj_is_pinned(obj)) { |
3647 | DRM_DEBUG("can not change the cache level of pinned objects\n"); | 3646 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
@@ -3686,6 +3685,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3686 | vma->node.color = cache_level; | 3685 | vma->node.color = cache_level; |
3687 | obj->cache_level = cache_level; | 3686 | obj->cache_level = cache_level; |
3688 | 3687 | ||
3688 | out: | ||
3689 | if (obj->cache_dirty && | 3689 | if (obj->cache_dirty && |
3690 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && | 3690 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && |
3691 | cpu_write_needs_clflush(obj)) { | 3691 | cpu_write_needs_clflush(obj)) { |
@@ -3738,6 +3738,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |||
3738 | level = I915_CACHE_NONE; | 3738 | level = I915_CACHE_NONE; |
3739 | break; | 3739 | break; |
3740 | case I915_CACHING_CACHED: | 3740 | case I915_CACHING_CACHED: |
3741 | /* | ||
3742 | * Due to a HW issue on BXT A stepping, GPU stores via a | ||
3743 | * snooped mapping may leave stale data in a corresponding CPU | ||
3744 | * cacheline, whereas normally such cachelines would get | ||
3745 | * invalidated. | ||
3746 | */ | ||
3747 | if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) | ||
3748 | return -ENODEV; | ||
3749 | |||
3741 | level = I915_CACHE_LLC; | 3750 | level = I915_CACHE_LLC; |
3742 | break; | 3751 | break; |
3743 | case I915_CACHING_DISPLAY: | 3752 | case I915_CACHING_DISPLAY: |
@@ -4011,15 +4020,13 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, | |||
4011 | return -EBUSY; | 4020 | return -EBUSY; |
4012 | 4021 | ||
4013 | if (i915_vma_misplaced(vma, alignment, flags)) { | 4022 | if (i915_vma_misplaced(vma, alignment, flags)) { |
4014 | unsigned long offset; | ||
4015 | offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) : | ||
4016 | i915_gem_obj_offset(obj, vm); | ||
4017 | WARN(vma->pin_count, | 4023 | WARN(vma->pin_count, |
4018 | "bo is already pinned in %s with incorrect alignment:" | 4024 | "bo is already pinned in %s with incorrect alignment:" |
4019 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," | 4025 | " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d," |
4020 | " obj->map_and_fenceable=%d\n", | 4026 | " obj->map_and_fenceable=%d\n", |
4021 | ggtt_view ? "ggtt" : "ppgtt", | 4027 | ggtt_view ? "ggtt" : "ppgtt", |
4022 | offset, | 4028 | upper_32_bits(vma->node.start), |
4029 | lower_32_bits(vma->node.start), | ||
4023 | alignment, | 4030 | alignment, |
4024 | !!(flags & PIN_MAPPABLE), | 4031 | !!(flags & PIN_MAPPABLE), |
4025 | obj->map_and_fenceable); | 4032 | obj->map_and_fenceable); |
@@ -4679,6 +4686,22 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4679 | goto out; | 4686 | goto out; |
4680 | } | 4687 | } |
4681 | 4688 | ||
4689 | /* We can't enable contexts until all firmware is loaded */ | ||
4690 | ret = intel_guc_ucode_load(dev); | ||
4691 | if (ret) { | ||
4692 | /* | ||
4693 | * If we got an error and GuC submission is enabled, map | ||
4694 | * the error to -EIO so the GPU will be declared wedged. | ||
4695 | * OTOH, if we didn't intend to use the GuC anyway, just | ||
4696 | * discard the error and carry on. | ||
4697 | */ | ||
4698 | DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret, | ||
4699 | i915.enable_guc_submission ? "" : " (ignored)"); | ||
4700 | ret = i915.enable_guc_submission ? -EIO : 0; | ||
4701 | if (ret) | ||
4702 | goto out; | ||
4703 | } | ||
4704 | |||
4682 | /* Now it is safe to go back round and do everything else: */ | 4705 | /* Now it is safe to go back round and do everything else: */ |
4683 | for_each_ring(ring, dev_priv, i) { | 4706 | for_each_ring(ring, dev_priv, i) { |
4684 | struct drm_i915_gem_request *req; | 4707 | struct drm_i915_gem_request *req; |
@@ -4974,9 +4997,8 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
4974 | } | 4997 | } |
4975 | 4998 | ||
4976 | /* All the new VM stuff */ | 4999 | /* All the new VM stuff */ |
4977 | unsigned long | 5000 | u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, |
4978 | i915_gem_obj_offset(struct drm_i915_gem_object *o, | 5001 | struct i915_address_space *vm) |
4979 | struct i915_address_space *vm) | ||
4980 | { | 5002 | { |
4981 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; | 5003 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4982 | struct i915_vma *vma; | 5004 | struct i915_vma *vma; |
@@ -4996,9 +5018,8 @@ i915_gem_obj_offset(struct drm_i915_gem_object *o, | |||
4996 | return -1; | 5018 | return -1; |
4997 | } | 5019 | } |
4998 | 5020 | ||
4999 | unsigned long | 5021 | u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, |
5000 | i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, | 5022 | const struct i915_ggtt_view *view) |
5001 | const struct i915_ggtt_view *view) | ||
5002 | { | 5023 | { |
5003 | struct i915_address_space *ggtt = i915_obj_to_ggtt(o); | 5024 | struct i915_address_space *ggtt = i915_obj_to_ggtt(o); |
5004 | struct i915_vma *vma; | 5025 | struct i915_vma *vma; |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8e893b354bcc..74aa0c9929ba 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -332,6 +332,13 @@ int i915_gem_context_init(struct drm_device *dev) | |||
332 | if (WARN_ON(dev_priv->ring[RCS].default_context)) | 332 | if (WARN_ON(dev_priv->ring[RCS].default_context)) |
333 | return 0; | 333 | return 0; |
334 | 334 | ||
335 | if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { | ||
336 | if (!i915.enable_execlists) { | ||
337 | DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); | ||
338 | return -EINVAL; | ||
339 | } | ||
340 | } | ||
341 | |||
335 | if (i915.enable_execlists) { | 342 | if (i915.enable_execlists) { |
336 | /* NB: intentionally left blank. We will allocate our own | 343 | /* NB: intentionally left blank. We will allocate our own |
337 | * backing objects as we need them, thank you very much */ | 344 | * backing objects as we need them, thank you very much */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index af1f8c461060..6077dffb318a 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c | |||
@@ -128,7 +128,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, | |||
128 | WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || | 128 | WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || |
129 | (size & -size) != size || | 129 | (size & -size) != size || |
130 | (i915_gem_obj_ggtt_offset(obj) & (size - 1)), | 130 | (i915_gem_obj_ggtt_offset(obj) & (size - 1)), |
131 | "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", | 131 | "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
132 | i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); | 132 | i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); |
133 | 133 | ||
134 | if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) | 134 | if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) |
@@ -171,7 +171,7 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, | |||
171 | WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || | 171 | WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || |
172 | (size & -size) != size || | 172 | (size & -size) != size || |
173 | (i915_gem_obj_ggtt_offset(obj) & (size - 1)), | 173 | (i915_gem_obj_ggtt_offset(obj) & (size - 1)), |
174 | "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", | 174 | "object 0x%08llx not 512K or pot-size 0x%08x aligned\n", |
175 | i915_gem_obj_ggtt_offset(obj), size); | 175 | i915_gem_obj_ggtt_offset(obj), size); |
176 | 176 | ||
177 | pitch_val = obj->stride / 128; | 177 | pitch_val = obj->stride / 128; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 96054a560f4f..87862813cfde 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -204,6 +204,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, | |||
204 | return pde; | 204 | return pde; |
205 | } | 205 | } |
206 | 206 | ||
207 | #define gen8_pdpe_encode gen8_pde_encode | ||
208 | #define gen8_pml4e_encode gen8_pde_encode | ||
209 | |||
207 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, | 210 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, |
208 | enum i915_cache_level level, | 211 | enum i915_cache_level level, |
209 | bool valid, u32 unused) | 212 | bool valid, u32 unused) |
@@ -522,6 +525,127 @@ static void gen8_initialize_pd(struct i915_address_space *vm, | |||
522 | fill_px(vm->dev, pd, scratch_pde); | 525 | fill_px(vm->dev, pd, scratch_pde); |
523 | } | 526 | } |
524 | 527 | ||
528 | static int __pdp_init(struct drm_device *dev, | ||
529 | struct i915_page_directory_pointer *pdp) | ||
530 | { | ||
531 | size_t pdpes = I915_PDPES_PER_PDP(dev); | ||
532 | |||
533 | pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes), | ||
534 | sizeof(unsigned long), | ||
535 | GFP_KERNEL); | ||
536 | if (!pdp->used_pdpes) | ||
537 | return -ENOMEM; | ||
538 | |||
539 | pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory), | ||
540 | GFP_KERNEL); | ||
541 | if (!pdp->page_directory) { | ||
542 | kfree(pdp->used_pdpes); | ||
543 | /* the PDP might be the statically allocated top level. Keep it | ||
544 | * as clean as possible */ | ||
545 | pdp->used_pdpes = NULL; | ||
546 | return -ENOMEM; | ||
547 | } | ||
548 | |||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | static void __pdp_fini(struct i915_page_directory_pointer *pdp) | ||
553 | { | ||
554 | kfree(pdp->used_pdpes); | ||
555 | kfree(pdp->page_directory); | ||
556 | pdp->page_directory = NULL; | ||
557 | } | ||
558 | |||
559 | static struct | ||
560 | i915_page_directory_pointer *alloc_pdp(struct drm_device *dev) | ||
561 | { | ||
562 | struct i915_page_directory_pointer *pdp; | ||
563 | int ret = -ENOMEM; | ||
564 | |||
565 | WARN_ON(!USES_FULL_48BIT_PPGTT(dev)); | ||
566 | |||
567 | pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); | ||
568 | if (!pdp) | ||
569 | return ERR_PTR(-ENOMEM); | ||
570 | |||
571 | ret = __pdp_init(dev, pdp); | ||
572 | if (ret) | ||
573 | goto fail_bitmap; | ||
574 | |||
575 | ret = setup_px(dev, pdp); | ||
576 | if (ret) | ||
577 | goto fail_page_m; | ||
578 | |||
579 | return pdp; | ||
580 | |||
581 | fail_page_m: | ||
582 | __pdp_fini(pdp); | ||
583 | fail_bitmap: | ||
584 | kfree(pdp); | ||
585 | |||
586 | return ERR_PTR(ret); | ||
587 | } | ||
588 | |||
589 | static void free_pdp(struct drm_device *dev, | ||
590 | struct i915_page_directory_pointer *pdp) | ||
591 | { | ||
592 | __pdp_fini(pdp); | ||
593 | if (USES_FULL_48BIT_PPGTT(dev)) { | ||
594 | cleanup_px(dev, pdp); | ||
595 | kfree(pdp); | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static void gen8_initialize_pdp(struct i915_address_space *vm, | ||
600 | struct i915_page_directory_pointer *pdp) | ||
601 | { | ||
602 | gen8_ppgtt_pdpe_t scratch_pdpe; | ||
603 | |||
604 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); | ||
605 | |||
606 | fill_px(vm->dev, pdp, scratch_pdpe); | ||
607 | } | ||
608 | |||
609 | static void gen8_initialize_pml4(struct i915_address_space *vm, | ||
610 | struct i915_pml4 *pml4) | ||
611 | { | ||
612 | gen8_ppgtt_pml4e_t scratch_pml4e; | ||
613 | |||
614 | scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), | ||
615 | I915_CACHE_LLC); | ||
616 | |||
617 | fill_px(vm->dev, pml4, scratch_pml4e); | ||
618 | } | ||
619 | |||
620 | static void | ||
621 | gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt, | ||
622 | struct i915_page_directory_pointer *pdp, | ||
623 | struct i915_page_directory *pd, | ||
624 | int index) | ||
625 | { | ||
626 | gen8_ppgtt_pdpe_t *page_directorypo; | ||
627 | |||
628 | if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) | ||
629 | return; | ||
630 | |||
631 | page_directorypo = kmap_px(pdp); | ||
632 | page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); | ||
633 | kunmap_px(ppgtt, page_directorypo); | ||
634 | } | ||
635 | |||
636 | static void | ||
637 | gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt, | ||
638 | struct i915_pml4 *pml4, | ||
639 | struct i915_page_directory_pointer *pdp, | ||
640 | int index) | ||
641 | { | ||
642 | gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4); | ||
643 | |||
644 | WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)); | ||
645 | pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); | ||
646 | kunmap_px(ppgtt, pagemap); | ||
647 | } | ||
648 | |||
525 | /* Broadwell Page Directory Pointer Descriptors */ | 649 | /* Broadwell Page Directory Pointer Descriptors */ |
526 | static int gen8_write_pdp(struct drm_i915_gem_request *req, | 650 | static int gen8_write_pdp(struct drm_i915_gem_request *req, |
527 | unsigned entry, | 651 | unsigned entry, |
@@ -547,8 +671,8 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, | |||
547 | return 0; | 671 | return 0; |
548 | } | 672 | } |
549 | 673 | ||
550 | static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, | 674 | static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt, |
551 | struct drm_i915_gem_request *req) | 675 | struct drm_i915_gem_request *req) |
552 | { | 676 | { |
553 | int i, ret; | 677 | int i, ret; |
554 | 678 | ||
@@ -563,31 +687,38 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
563 | return 0; | 687 | return 0; |
564 | } | 688 | } |
565 | 689 | ||
566 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | 690 | static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, |
567 | uint64_t start, | 691 | struct drm_i915_gem_request *req) |
568 | uint64_t length, | 692 | { |
569 | bool use_scratch) | 693 | return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); |
694 | } | ||
695 | |||
696 | static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, | ||
697 | struct i915_page_directory_pointer *pdp, | ||
698 | uint64_t start, | ||
699 | uint64_t length, | ||
700 | gen8_pte_t scratch_pte) | ||
570 | { | 701 | { |
571 | struct i915_hw_ppgtt *ppgtt = | 702 | struct i915_hw_ppgtt *ppgtt = |
572 | container_of(vm, struct i915_hw_ppgtt, base); | 703 | container_of(vm, struct i915_hw_ppgtt, base); |
573 | gen8_pte_t *pt_vaddr, scratch_pte; | 704 | gen8_pte_t *pt_vaddr; |
574 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; | 705 | unsigned pdpe = gen8_pdpe_index(start); |
575 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; | 706 | unsigned pde = gen8_pde_index(start); |
576 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; | 707 | unsigned pte = gen8_pte_index(start); |
577 | unsigned num_entries = length >> PAGE_SHIFT; | 708 | unsigned num_entries = length >> PAGE_SHIFT; |
578 | unsigned last_pte, i; | 709 | unsigned last_pte, i; |
579 | 710 | ||
580 | scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page), | 711 | if (WARN_ON(!pdp)) |
581 | I915_CACHE_LLC, use_scratch); | 712 | return; |
582 | 713 | ||
583 | while (num_entries) { | 714 | while (num_entries) { |
584 | struct i915_page_directory *pd; | 715 | struct i915_page_directory *pd; |
585 | struct i915_page_table *pt; | 716 | struct i915_page_table *pt; |
586 | 717 | ||
587 | if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) | 718 | if (WARN_ON(!pdp->page_directory[pdpe])) |
588 | break; | 719 | break; |
589 | 720 | ||
590 | pd = ppgtt->pdp.page_directory[pdpe]; | 721 | pd = pdp->page_directory[pdpe]; |
591 | 722 | ||
592 | if (WARN_ON(!pd->page_table[pde])) | 723 | if (WARN_ON(!pd->page_table[pde])) |
593 | break; | 724 | break; |
@@ -612,45 +743,69 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | |||
612 | 743 | ||
613 | pte = 0; | 744 | pte = 0; |
614 | if (++pde == I915_PDES) { | 745 | if (++pde == I915_PDES) { |
615 | pdpe++; | 746 | if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) |
747 | break; | ||
616 | pde = 0; | 748 | pde = 0; |
617 | } | 749 | } |
618 | } | 750 | } |
619 | } | 751 | } |
620 | 752 | ||
621 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | 753 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, |
622 | struct sg_table *pages, | 754 | uint64_t start, |
623 | uint64_t start, | 755 | uint64_t length, |
624 | enum i915_cache_level cache_level, u32 unused) | 756 | bool use_scratch) |
757 | { | ||
758 | struct i915_hw_ppgtt *ppgtt = | ||
759 | container_of(vm, struct i915_hw_ppgtt, base); | ||
760 | gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), | ||
761 | I915_CACHE_LLC, use_scratch); | ||
762 | |||
763 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { | ||
764 | gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, | ||
765 | scratch_pte); | ||
766 | } else { | ||
767 | uint64_t templ4, pml4e; | ||
768 | struct i915_page_directory_pointer *pdp; | ||
769 | |||
770 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { | ||
771 | gen8_ppgtt_clear_pte_range(vm, pdp, start, length, | ||
772 | scratch_pte); | ||
773 | } | ||
774 | } | ||
775 | } | ||
776 | |||
777 | static void | ||
778 | gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, | ||
779 | struct i915_page_directory_pointer *pdp, | ||
780 | struct sg_page_iter *sg_iter, | ||
781 | uint64_t start, | ||
782 | enum i915_cache_level cache_level) | ||
625 | { | 783 | { |
626 | struct i915_hw_ppgtt *ppgtt = | 784 | struct i915_hw_ppgtt *ppgtt = |
627 | container_of(vm, struct i915_hw_ppgtt, base); | 785 | container_of(vm, struct i915_hw_ppgtt, base); |
628 | gen8_pte_t *pt_vaddr; | 786 | gen8_pte_t *pt_vaddr; |
629 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; | 787 | unsigned pdpe = gen8_pdpe_index(start); |
630 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; | 788 | unsigned pde = gen8_pde_index(start); |
631 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; | 789 | unsigned pte = gen8_pte_index(start); |
632 | struct sg_page_iter sg_iter; | ||
633 | 790 | ||
634 | pt_vaddr = NULL; | 791 | pt_vaddr = NULL; |
635 | 792 | ||
636 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { | 793 | while (__sg_page_iter_next(sg_iter)) { |
637 | if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES)) | ||
638 | break; | ||
639 | |||
640 | if (pt_vaddr == NULL) { | 794 | if (pt_vaddr == NULL) { |
641 | struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe]; | 795 | struct i915_page_directory *pd = pdp->page_directory[pdpe]; |
642 | struct i915_page_table *pt = pd->page_table[pde]; | 796 | struct i915_page_table *pt = pd->page_table[pde]; |
643 | pt_vaddr = kmap_px(pt); | 797 | pt_vaddr = kmap_px(pt); |
644 | } | 798 | } |
645 | 799 | ||
646 | pt_vaddr[pte] = | 800 | pt_vaddr[pte] = |
647 | gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), | 801 | gen8_pte_encode(sg_page_iter_dma_address(sg_iter), |
648 | cache_level, true); | 802 | cache_level, true); |
649 | if (++pte == GEN8_PTES) { | 803 | if (++pte == GEN8_PTES) { |
650 | kunmap_px(ppgtt, pt_vaddr); | 804 | kunmap_px(ppgtt, pt_vaddr); |
651 | pt_vaddr = NULL; | 805 | pt_vaddr = NULL; |
652 | if (++pde == I915_PDES) { | 806 | if (++pde == I915_PDES) { |
653 | pdpe++; | 807 | if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) |
808 | break; | ||
654 | pde = 0; | 809 | pde = 0; |
655 | } | 810 | } |
656 | pte = 0; | 811 | pte = 0; |
@@ -661,6 +816,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | |||
661 | kunmap_px(ppgtt, pt_vaddr); | 816 | kunmap_px(ppgtt, pt_vaddr); |
662 | } | 817 | } |
663 | 818 | ||
819 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | ||
820 | struct sg_table *pages, | ||
821 | uint64_t start, | ||
822 | enum i915_cache_level cache_level, | ||
823 | u32 unused) | ||
824 | { | ||
825 | struct i915_hw_ppgtt *ppgtt = | ||
826 | container_of(vm, struct i915_hw_ppgtt, base); | ||
827 | struct sg_page_iter sg_iter; | ||
828 | |||
829 | __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); | ||
830 | |||
831 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { | ||
832 | gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, | ||
833 | cache_level); | ||
834 | } else { | ||
835 | struct i915_page_directory_pointer *pdp; | ||
836 | uint64_t templ4, pml4e; | ||
837 | uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; | ||
838 | |||
839 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { | ||
840 | gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, | ||
841 | start, cache_level); | ||
842 | } | ||
843 | } | ||
844 | } | ||
845 | |||
664 | static void gen8_free_page_tables(struct drm_device *dev, | 846 | static void gen8_free_page_tables(struct drm_device *dev, |
665 | struct i915_page_directory *pd) | 847 | struct i915_page_directory *pd) |
666 | { | 848 | { |
@@ -699,8 +881,55 @@ static int gen8_init_scratch(struct i915_address_space *vm) | |||
699 | return PTR_ERR(vm->scratch_pd); | 881 | return PTR_ERR(vm->scratch_pd); |
700 | } | 882 | } |
701 | 883 | ||
884 | if (USES_FULL_48BIT_PPGTT(dev)) { | ||
885 | vm->scratch_pdp = alloc_pdp(dev); | ||
886 | if (IS_ERR(vm->scratch_pdp)) { | ||
887 | free_pd(dev, vm->scratch_pd); | ||
888 | free_pt(dev, vm->scratch_pt); | ||
889 | free_scratch_page(dev, vm->scratch_page); | ||
890 | return PTR_ERR(vm->scratch_pdp); | ||
891 | } | ||
892 | } | ||
893 | |||
702 | gen8_initialize_pt(vm, vm->scratch_pt); | 894 | gen8_initialize_pt(vm, vm->scratch_pt); |
703 | gen8_initialize_pd(vm, vm->scratch_pd); | 895 | gen8_initialize_pd(vm, vm->scratch_pd); |
896 | if (USES_FULL_48BIT_PPGTT(dev)) | ||
897 | gen8_initialize_pdp(vm, vm->scratch_pdp); | ||
898 | |||
899 | return 0; | ||
900 | } | ||
901 | |||
902 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) | ||
903 | { | ||
904 | enum vgt_g2v_type msg; | ||
905 | struct drm_device *dev = ppgtt->base.dev; | ||
906 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
907 | unsigned int offset = vgtif_reg(pdp0_lo); | ||
908 | int i; | ||
909 | |||
910 | if (USES_FULL_48BIT_PPGTT(dev)) { | ||
911 | u64 daddr = px_dma(&ppgtt->pml4); | ||
912 | |||
913 | I915_WRITE(offset, lower_32_bits(daddr)); | ||
914 | I915_WRITE(offset + 4, upper_32_bits(daddr)); | ||
915 | |||
916 | msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : | ||
917 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); | ||
918 | } else { | ||
919 | for (i = 0; i < GEN8_LEGACY_PDPES; i++) { | ||
920 | u64 daddr = i915_page_dir_dma_addr(ppgtt, i); | ||
921 | |||
922 | I915_WRITE(offset, lower_32_bits(daddr)); | ||
923 | I915_WRITE(offset + 4, upper_32_bits(daddr)); | ||
924 | |||
925 | offset += 8; | ||
926 | } | ||
927 | |||
928 | msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : | ||
929 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); | ||
930 | } | ||
931 | |||
932 | I915_WRITE(vgtif_reg(g2v_notify), msg); | ||
704 | 933 | ||
705 | return 0; | 934 | return 0; |
706 | } | 935 | } |
@@ -709,35 +938,65 @@ static void gen8_free_scratch(struct i915_address_space *vm) | |||
709 | { | 938 | { |
710 | struct drm_device *dev = vm->dev; | 939 | struct drm_device *dev = vm->dev; |
711 | 940 | ||
941 | if (USES_FULL_48BIT_PPGTT(dev)) | ||
942 | free_pdp(dev, vm->scratch_pdp); | ||
712 | free_pd(dev, vm->scratch_pd); | 943 | free_pd(dev, vm->scratch_pd); |
713 | free_pt(dev, vm->scratch_pt); | 944 | free_pt(dev, vm->scratch_pt); |
714 | free_scratch_page(dev, vm->scratch_page); | 945 | free_scratch_page(dev, vm->scratch_page); |
715 | } | 946 | } |
716 | 947 | ||
717 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | 948 | static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev, |
949 | struct i915_page_directory_pointer *pdp) | ||
950 | { | ||
951 | int i; | ||
952 | |||
953 | for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) { | ||
954 | if (WARN_ON(!pdp->page_directory[i])) | ||
955 | continue; | ||
956 | |||
957 | gen8_free_page_tables(dev, pdp->page_directory[i]); | ||
958 | free_pd(dev, pdp->page_directory[i]); | ||
959 | } | ||
960 | |||
961 | free_pdp(dev, pdp); | ||
962 | } | ||
963 | |||
964 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) | ||
718 | { | 965 | { |
719 | struct i915_hw_ppgtt *ppgtt = | ||
720 | container_of(vm, struct i915_hw_ppgtt, base); | ||
721 | int i; | 966 | int i; |
722 | 967 | ||
723 | for_each_set_bit(i, ppgtt->pdp.used_pdpes, GEN8_LEGACY_PDPES) { | 968 | for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { |
724 | if (WARN_ON(!ppgtt->pdp.page_directory[i])) | 969 | if (WARN_ON(!ppgtt->pml4.pdps[i])) |
725 | continue; | 970 | continue; |
726 | 971 | ||
727 | gen8_free_page_tables(ppgtt->base.dev, | 972 | gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]); |
728 | ppgtt->pdp.page_directory[i]); | ||
729 | free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]); | ||
730 | } | 973 | } |
731 | 974 | ||
975 | cleanup_px(ppgtt->base.dev, &ppgtt->pml4); | ||
976 | } | ||
977 | |||
978 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | ||
979 | { | ||
980 | struct i915_hw_ppgtt *ppgtt = | ||
981 | container_of(vm, struct i915_hw_ppgtt, base); | ||
982 | |||
983 | if (intel_vgpu_active(vm->dev)) | ||
984 | gen8_ppgtt_notify_vgt(ppgtt, false); | ||
985 | |||
986 | if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) | ||
987 | gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp); | ||
988 | else | ||
989 | gen8_ppgtt_cleanup_4lvl(ppgtt); | ||
990 | |||
732 | gen8_free_scratch(vm); | 991 | gen8_free_scratch(vm); |
733 | } | 992 | } |
734 | 993 | ||
735 | /** | 994 | /** |
736 | * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. | 995 | * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. |
737 | * @ppgtt: Master ppgtt structure. | 996 | * @vm: Master vm structure. |
738 | * @pd: Page directory for this address range. | 997 | * @pd: Page directory for this address range. |
739 | * @start: Starting virtual address to begin allocations. | 998 | * @start: Starting virtual address to begin allocations. |
740 | * @length Size of the allocations. | 999 | * @length: Size of the allocations. |
741 | * @new_pts: Bitmap set by function with new allocations. Likely used by the | 1000 | * @new_pts: Bitmap set by function with new allocations. Likely used by the |
742 | * caller to free on error. | 1001 | * caller to free on error. |
743 | * | 1002 | * |
@@ -750,22 +1009,22 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | |||
750 | * | 1009 | * |
751 | * Return: 0 if success; negative error code otherwise. | 1010 | * Return: 0 if success; negative error code otherwise. |
752 | */ | 1011 | */ |
753 | static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, | 1012 | static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, |
754 | struct i915_page_directory *pd, | 1013 | struct i915_page_directory *pd, |
755 | uint64_t start, | 1014 | uint64_t start, |
756 | uint64_t length, | 1015 | uint64_t length, |
757 | unsigned long *new_pts) | 1016 | unsigned long *new_pts) |
758 | { | 1017 | { |
759 | struct drm_device *dev = ppgtt->base.dev; | 1018 | struct drm_device *dev = vm->dev; |
760 | struct i915_page_table *pt; | 1019 | struct i915_page_table *pt; |
761 | uint64_t temp; | 1020 | uint64_t temp; |
762 | uint32_t pde; | 1021 | uint32_t pde; |
763 | 1022 | ||
764 | gen8_for_each_pde(pt, pd, start, length, temp, pde) { | 1023 | gen8_for_each_pde(pt, pd, start, length, temp, pde) { |
765 | /* Don't reallocate page tables */ | 1024 | /* Don't reallocate page tables */ |
766 | if (pt) { | 1025 | if (test_bit(pde, pd->used_pdes)) { |
767 | /* Scratch is never allocated this way */ | 1026 | /* Scratch is never allocated this way */ |
768 | WARN_ON(pt == ppgtt->base.scratch_pt); | 1027 | WARN_ON(pt == vm->scratch_pt); |
769 | continue; | 1028 | continue; |
770 | } | 1029 | } |
771 | 1030 | ||
@@ -773,9 +1032,10 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, | |||
773 | if (IS_ERR(pt)) | 1032 | if (IS_ERR(pt)) |
774 | goto unwind_out; | 1033 | goto unwind_out; |
775 | 1034 | ||
776 | gen8_initialize_pt(&ppgtt->base, pt); | 1035 | gen8_initialize_pt(vm, pt); |
777 | pd->page_table[pde] = pt; | 1036 | pd->page_table[pde] = pt; |
778 | __set_bit(pde, new_pts); | 1037 | __set_bit(pde, new_pts); |
1038 | trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT); | ||
779 | } | 1039 | } |
780 | 1040 | ||
781 | return 0; | 1041 | return 0; |
@@ -789,11 +1049,11 @@ unwind_out: | |||
789 | 1049 | ||
790 | /** | 1050 | /** |
791 | * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. | 1051 | * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. |
792 | * @ppgtt: Master ppgtt structure. | 1052 | * @vm: Master vm structure. |
793 | * @pdp: Page directory pointer for this address range. | 1053 | * @pdp: Page directory pointer for this address range. |
794 | * @start: Starting virtual address to begin allocations. | 1054 | * @start: Starting virtual address to begin allocations. |
795 | * @length Size of the allocations. | 1055 | * @length: Size of the allocations. |
796 | * @new_pds Bitmap set by function with new allocations. Likely used by the | 1056 | * @new_pds: Bitmap set by function with new allocations. Likely used by the |
797 | * caller to free on error. | 1057 | * caller to free on error. |
798 | * | 1058 | * |
799 | * Allocate the required number of page directories starting at the pde index of | 1059 | * Allocate the required number of page directories starting at the pde index of |
@@ -810,48 +1070,102 @@ unwind_out: | |||
810 | * | 1070 | * |
811 | * Return: 0 if success; negative error code otherwise. | 1071 | * Return: 0 if success; negative error code otherwise. |
812 | */ | 1072 | */ |
813 | static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, | 1073 | static int |
814 | struct i915_page_directory_pointer *pdp, | 1074 | gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, |
815 | uint64_t start, | 1075 | struct i915_page_directory_pointer *pdp, |
816 | uint64_t length, | 1076 | uint64_t start, |
817 | unsigned long *new_pds) | 1077 | uint64_t length, |
1078 | unsigned long *new_pds) | ||
818 | { | 1079 | { |
819 | struct drm_device *dev = ppgtt->base.dev; | 1080 | struct drm_device *dev = vm->dev; |
820 | struct i915_page_directory *pd; | 1081 | struct i915_page_directory *pd; |
821 | uint64_t temp; | 1082 | uint64_t temp; |
822 | uint32_t pdpe; | 1083 | uint32_t pdpe; |
1084 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); | ||
823 | 1085 | ||
824 | WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES)); | 1086 | WARN_ON(!bitmap_empty(new_pds, pdpes)); |
825 | 1087 | ||
826 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { | 1088 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { |
827 | if (pd) | 1089 | if (test_bit(pdpe, pdp->used_pdpes)) |
828 | continue; | 1090 | continue; |
829 | 1091 | ||
830 | pd = alloc_pd(dev); | 1092 | pd = alloc_pd(dev); |
831 | if (IS_ERR(pd)) | 1093 | if (IS_ERR(pd)) |
832 | goto unwind_out; | 1094 | goto unwind_out; |
833 | 1095 | ||
834 | gen8_initialize_pd(&ppgtt->base, pd); | 1096 | gen8_initialize_pd(vm, pd); |
835 | pdp->page_directory[pdpe] = pd; | 1097 | pdp->page_directory[pdpe] = pd; |
836 | __set_bit(pdpe, new_pds); | 1098 | __set_bit(pdpe, new_pds); |
1099 | trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT); | ||
837 | } | 1100 | } |
838 | 1101 | ||
839 | return 0; | 1102 | return 0; |
840 | 1103 | ||
841 | unwind_out: | 1104 | unwind_out: |
842 | for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES) | 1105 | for_each_set_bit(pdpe, new_pds, pdpes) |
843 | free_pd(dev, pdp->page_directory[pdpe]); | 1106 | free_pd(dev, pdp->page_directory[pdpe]); |
844 | 1107 | ||
845 | return -ENOMEM; | 1108 | return -ENOMEM; |
846 | } | 1109 | } |
847 | 1110 | ||
848 | static void | 1111 | /** |
849 | free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts) | 1112 | * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range. |
1113 | * @vm: Master vm structure. | ||
1114 | * @pml4: Page map level 4 for this address range. | ||
1115 | * @start: Starting virtual address to begin allocations. | ||
1116 | * @length: Size of the allocations. | ||
1117 | * @new_pdps: Bitmap set by function with new allocations. Likely used by the | ||
1118 | * caller to free on error. | ||
1119 | * | ||
1120 | * Allocate the required number of page directory pointers. Extremely similar to | ||
1121 | * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs(). | ||
1122 | * The main difference is here we are limited by the pml4 boundary (instead of | ||
1123 | * the page directory pointer). | ||
1124 | * | ||
1125 | * Return: 0 if success; negative error code otherwise. | ||
1126 | */ | ||
1127 | static int | ||
1128 | gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, | ||
1129 | struct i915_pml4 *pml4, | ||
1130 | uint64_t start, | ||
1131 | uint64_t length, | ||
1132 | unsigned long *new_pdps) | ||
850 | { | 1133 | { |
851 | int i; | 1134 | struct drm_device *dev = vm->dev; |
1135 | struct i915_page_directory_pointer *pdp; | ||
1136 | uint64_t temp; | ||
1137 | uint32_t pml4e; | ||
1138 | |||
1139 | WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); | ||
1140 | |||
1141 | gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { | ||
1142 | if (!test_bit(pml4e, pml4->used_pml4es)) { | ||
1143 | pdp = alloc_pdp(dev); | ||
1144 | if (IS_ERR(pdp)) | ||
1145 | goto unwind_out; | ||
1146 | |||
1147 | gen8_initialize_pdp(vm, pdp); | ||
1148 | pml4->pdps[pml4e] = pdp; | ||
1149 | __set_bit(pml4e, new_pdps); | ||
1150 | trace_i915_page_directory_pointer_entry_alloc(vm, | ||
1151 | pml4e, | ||
1152 | start, | ||
1153 | GEN8_PML4E_SHIFT); | ||
1154 | } | ||
1155 | } | ||
1156 | |||
1157 | return 0; | ||
1158 | |||
1159 | unwind_out: | ||
1160 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) | ||
1161 | free_pdp(dev, pml4->pdps[pml4e]); | ||
1162 | |||
1163 | return -ENOMEM; | ||
1164 | } | ||
852 | 1165 | ||
853 | for (i = 0; i < GEN8_LEGACY_PDPES; i++) | 1166 | static void |
854 | kfree(new_pts[i]); | 1167 | free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts) |
1168 | { | ||
855 | kfree(new_pts); | 1169 | kfree(new_pts); |
856 | kfree(new_pds); | 1170 | kfree(new_pds); |
857 | } | 1171 | } |
@@ -861,28 +1175,20 @@ free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts) | |||
861 | */ | 1175 | */ |
862 | static | 1176 | static |
863 | int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, | 1177 | int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, |
864 | unsigned long ***new_pts) | 1178 | unsigned long **new_pts, |
1179 | uint32_t pdpes) | ||
865 | { | 1180 | { |
866 | int i; | ||
867 | unsigned long *pds; | 1181 | unsigned long *pds; |
868 | unsigned long **pts; | 1182 | unsigned long *pts; |
869 | 1183 | ||
870 | pds = kcalloc(BITS_TO_LONGS(GEN8_LEGACY_PDPES), sizeof(unsigned long), GFP_KERNEL); | 1184 | pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY); |
871 | if (!pds) | 1185 | if (!pds) |
872 | return -ENOMEM; | 1186 | return -ENOMEM; |
873 | 1187 | ||
874 | pts = kcalloc(GEN8_LEGACY_PDPES, sizeof(unsigned long *), GFP_KERNEL); | 1188 | pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long), |
875 | if (!pts) { | 1189 | GFP_TEMPORARY); |
876 | kfree(pds); | 1190 | if (!pts) |
877 | return -ENOMEM; | 1191 | goto err_out; |
878 | } | ||
879 | |||
880 | for (i = 0; i < GEN8_LEGACY_PDPES; i++) { | ||
881 | pts[i] = kcalloc(BITS_TO_LONGS(I915_PDES), | ||
882 | sizeof(unsigned long), GFP_KERNEL); | ||
883 | if (!pts[i]) | ||
884 | goto err_out; | ||
885 | } | ||
886 | 1192 | ||
887 | *new_pds = pds; | 1193 | *new_pds = pds; |
888 | *new_pts = pts; | 1194 | *new_pts = pts; |
@@ -904,18 +1210,21 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | |||
904 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; | 1210 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; |
905 | } | 1211 | } |
906 | 1212 | ||
907 | static int gen8_alloc_va_range(struct i915_address_space *vm, | 1213 | static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, |
908 | uint64_t start, | 1214 | struct i915_page_directory_pointer *pdp, |
909 | uint64_t length) | 1215 | uint64_t start, |
1216 | uint64_t length) | ||
910 | { | 1217 | { |
911 | struct i915_hw_ppgtt *ppgtt = | 1218 | struct i915_hw_ppgtt *ppgtt = |
912 | container_of(vm, struct i915_hw_ppgtt, base); | 1219 | container_of(vm, struct i915_hw_ppgtt, base); |
913 | unsigned long *new_page_dirs, **new_page_tables; | 1220 | unsigned long *new_page_dirs, *new_page_tables; |
1221 | struct drm_device *dev = vm->dev; | ||
914 | struct i915_page_directory *pd; | 1222 | struct i915_page_directory *pd; |
915 | const uint64_t orig_start = start; | 1223 | const uint64_t orig_start = start; |
916 | const uint64_t orig_length = length; | 1224 | const uint64_t orig_length = length; |
917 | uint64_t temp; | 1225 | uint64_t temp; |
918 | uint32_t pdpe; | 1226 | uint32_t pdpe; |
1227 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); | ||
919 | int ret; | 1228 | int ret; |
920 | 1229 | ||
921 | /* Wrap is never okay since we can only represent 48b, and we don't | 1230 | /* Wrap is never okay since we can only represent 48b, and we don't |
@@ -924,25 +1233,25 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
924 | if (WARN_ON(start + length < start)) | 1233 | if (WARN_ON(start + length < start)) |
925 | return -ENODEV; | 1234 | return -ENODEV; |
926 | 1235 | ||
927 | if (WARN_ON(start + length > ppgtt->base.total)) | 1236 | if (WARN_ON(start + length > vm->total)) |
928 | return -ENODEV; | 1237 | return -ENODEV; |
929 | 1238 | ||
930 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables); | 1239 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); |
931 | if (ret) | 1240 | if (ret) |
932 | return ret; | 1241 | return ret; |
933 | 1242 | ||
934 | /* Do the allocations first so we can easily bail out */ | 1243 | /* Do the allocations first so we can easily bail out */ |
935 | ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length, | 1244 | ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length, |
936 | new_page_dirs); | 1245 | new_page_dirs); |
937 | if (ret) { | 1246 | if (ret) { |
938 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); | 1247 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
939 | return ret; | 1248 | return ret; |
940 | } | 1249 | } |
941 | 1250 | ||
942 | /* For every page directory referenced, allocate page tables */ | 1251 | /* For every page directory referenced, allocate page tables */ |
943 | gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { | 1252 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { |
944 | ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length, | 1253 | ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, |
945 | new_page_tables[pdpe]); | 1254 | new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); |
946 | if (ret) | 1255 | if (ret) |
947 | goto err_out; | 1256 | goto err_out; |
948 | } | 1257 | } |
@@ -952,10 +1261,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
952 | 1261 | ||
953 | /* Allocations have completed successfully, so set the bitmaps, and do | 1262 | /* Allocations have completed successfully, so set the bitmaps, and do |
954 | * the mappings. */ | 1263 | * the mappings. */ |
955 | gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { | 1264 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { |
956 | gen8_pde_t *const page_directory = kmap_px(pd); | 1265 | gen8_pde_t *const page_directory = kmap_px(pd); |
957 | struct i915_page_table *pt; | 1266 | struct i915_page_table *pt; |
958 | uint64_t pd_len = gen8_clamp_pd(start, length); | 1267 | uint64_t pd_len = length; |
959 | uint64_t pd_start = start; | 1268 | uint64_t pd_start = start; |
960 | uint32_t pde; | 1269 | uint32_t pde; |
961 | 1270 | ||
@@ -979,14 +1288,18 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
979 | /* Map the PDE to the page table */ | 1288 | /* Map the PDE to the page table */ |
980 | page_directory[pde] = gen8_pde_encode(px_dma(pt), | 1289 | page_directory[pde] = gen8_pde_encode(px_dma(pt), |
981 | I915_CACHE_LLC); | 1290 | I915_CACHE_LLC); |
1291 | trace_i915_page_table_entry_map(&ppgtt->base, pde, pt, | ||
1292 | gen8_pte_index(start), | ||
1293 | gen8_pte_count(start, length), | ||
1294 | GEN8_PTES); | ||
982 | 1295 | ||
983 | /* NB: We haven't yet mapped ptes to pages. At this | 1296 | /* NB: We haven't yet mapped ptes to pages. At this |
984 | * point we're still relying on insert_entries() */ | 1297 | * point we're still relying on insert_entries() */ |
985 | } | 1298 | } |
986 | 1299 | ||
987 | kunmap_px(ppgtt, page_directory); | 1300 | kunmap_px(ppgtt, page_directory); |
988 | 1301 | __set_bit(pdpe, pdp->used_pdpes); | |
989 | __set_bit(pdpe, ppgtt->pdp.used_pdpes); | 1302 | gen8_setup_page_directory(ppgtt, pdp, pd, pdpe); |
990 | } | 1303 | } |
991 | 1304 | ||
992 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); | 1305 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
@@ -995,18 +1308,191 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
995 | 1308 | ||
996 | err_out: | 1309 | err_out: |
997 | while (pdpe--) { | 1310 | while (pdpe--) { |
998 | for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES) | 1311 | for_each_set_bit(temp, new_page_tables + pdpe * |
999 | free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]); | 1312 | BITS_TO_LONGS(I915_PDES), I915_PDES) |
1313 | free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); | ||
1000 | } | 1314 | } |
1001 | 1315 | ||
1002 | for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES) | 1316 | for_each_set_bit(pdpe, new_page_dirs, pdpes) |
1003 | free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]); | 1317 | free_pd(dev, pdp->page_directory[pdpe]); |
1004 | 1318 | ||
1005 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); | 1319 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
1006 | mark_tlbs_dirty(ppgtt); | 1320 | mark_tlbs_dirty(ppgtt); |
1007 | return ret; | 1321 | return ret; |
1008 | } | 1322 | } |
1009 | 1323 | ||
1324 | static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, | ||
1325 | struct i915_pml4 *pml4, | ||
1326 | uint64_t start, | ||
1327 | uint64_t length) | ||
1328 | { | ||
1329 | DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4); | ||
1330 | struct i915_hw_ppgtt *ppgtt = | ||
1331 | container_of(vm, struct i915_hw_ppgtt, base); | ||
1332 | struct i915_page_directory_pointer *pdp; | ||
1333 | uint64_t temp, pml4e; | ||
1334 | int ret = 0; | ||
1335 | |||
1336 | /* Do the pml4 allocations first, so we don't need to track the newly | ||
1337 | * allocated tables below the pdp */ | ||
1338 | bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4); | ||
1339 | |||
1340 | /* The pagedirectory and pagetable allocations are done in the shared 3 | ||
1341 | * and 4 level code. Just allocate the pdps. | ||
1342 | */ | ||
1343 | ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length, | ||
1344 | new_pdps); | ||
1345 | if (ret) | ||
1346 | return ret; | ||
1347 | |||
1348 | WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2, | ||
1349 | "The allocation has spanned more than 512GB. " | ||
1350 | "It is highly likely this is incorrect."); | ||
1351 | |||
1352 | gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { | ||
1353 | WARN_ON(!pdp); | ||
1354 | |||
1355 | ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); | ||
1356 | if (ret) | ||
1357 | goto err_out; | ||
1358 | |||
1359 | gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e); | ||
1360 | } | ||
1361 | |||
1362 | bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es, | ||
1363 | GEN8_PML4ES_PER_PML4); | ||
1364 | |||
1365 | return 0; | ||
1366 | |||
1367 | err_out: | ||
1368 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) | ||
1369 | gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]); | ||
1370 | |||
1371 | return ret; | ||
1372 | } | ||
1373 | |||
1374 | static int gen8_alloc_va_range(struct i915_address_space *vm, | ||
1375 | uint64_t start, uint64_t length) | ||
1376 | { | ||
1377 | struct i915_hw_ppgtt *ppgtt = | ||
1378 | container_of(vm, struct i915_hw_ppgtt, base); | ||
1379 | |||
1380 | if (USES_FULL_48BIT_PPGTT(vm->dev)) | ||
1381 | return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); | ||
1382 | else | ||
1383 | return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); | ||
1384 | } | ||
1385 | |||
1386 | static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, | ||
1387 | uint64_t start, uint64_t length, | ||
1388 | gen8_pte_t scratch_pte, | ||
1389 | struct seq_file *m) | ||
1390 | { | ||
1391 | struct i915_page_directory *pd; | ||
1392 | uint64_t temp; | ||
1393 | uint32_t pdpe; | ||
1394 | |||
1395 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { | ||
1396 | struct i915_page_table *pt; | ||
1397 | uint64_t pd_len = length; | ||
1398 | uint64_t pd_start = start; | ||
1399 | uint32_t pde; | ||
1400 | |||
1401 | if (!test_bit(pdpe, pdp->used_pdpes)) | ||
1402 | continue; | ||
1403 | |||
1404 | seq_printf(m, "\tPDPE #%d\n", pdpe); | ||
1405 | gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { | ||
1406 | uint32_t pte; | ||
1407 | gen8_pte_t *pt_vaddr; | ||
1408 | |||
1409 | if (!test_bit(pde, pd->used_pdes)) | ||
1410 | continue; | ||
1411 | |||
1412 | pt_vaddr = kmap_px(pt); | ||
1413 | for (pte = 0; pte < GEN8_PTES; pte += 4) { | ||
1414 | uint64_t va = | ||
1415 | (pdpe << GEN8_PDPE_SHIFT) | | ||
1416 | (pde << GEN8_PDE_SHIFT) | | ||
1417 | (pte << GEN8_PTE_SHIFT); | ||
1418 | int i; | ||
1419 | bool found = false; | ||
1420 | |||
1421 | for (i = 0; i < 4; i++) | ||
1422 | if (pt_vaddr[pte + i] != scratch_pte) | ||
1423 | found = true; | ||
1424 | if (!found) | ||
1425 | continue; | ||
1426 | |||
1427 | seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); | ||
1428 | for (i = 0; i < 4; i++) { | ||
1429 | if (pt_vaddr[pte + i] != scratch_pte) | ||
1430 | seq_printf(m, " %llx", pt_vaddr[pte + i]); | ||
1431 | else | ||
1432 | seq_puts(m, " SCRATCH "); | ||
1433 | } | ||
1434 | seq_puts(m, "\n"); | ||
1435 | } | ||
1436 | /* don't use kunmap_px, it could trigger | ||
1437 | * an unnecessary flush. | ||
1438 | */ | ||
1439 | kunmap_atomic(pt_vaddr); | ||
1440 | } | ||
1441 | } | ||
1442 | } | ||
1443 | |||
1444 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | ||
1445 | { | ||
1446 | struct i915_address_space *vm = &ppgtt->base; | ||
1447 | uint64_t start = ppgtt->base.start; | ||
1448 | uint64_t length = ppgtt->base.total; | ||
1449 | gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), | ||
1450 | I915_CACHE_LLC, true); | ||
1451 | |||
1452 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { | ||
1453 | gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); | ||
1454 | } else { | ||
1455 | uint64_t templ4, pml4e; | ||
1456 | struct i915_pml4 *pml4 = &ppgtt->pml4; | ||
1457 | struct i915_page_directory_pointer *pdp; | ||
1458 | |||
1459 | gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) { | ||
1460 | if (!test_bit(pml4e, pml4->used_pml4es)) | ||
1461 | continue; | ||
1462 | |||
1463 | seq_printf(m, " PML4E #%llu\n", pml4e); | ||
1464 | gen8_dump_pdp(pdp, start, length, scratch_pte, m); | ||
1465 | } | ||
1466 | } | ||
1467 | } | ||
1468 | |||
1469 | static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) | ||
1470 | { | ||
1471 | unsigned long *new_page_dirs, *new_page_tables; | ||
1472 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); | ||
1473 | int ret; | ||
1474 | |||
1475 | /* We allocate temp bitmap for page tables for no gain | ||
1476 | * but as this is for init only, lets keep the things simple | ||
1477 | */ | ||
1478 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); | ||
1479 | if (ret) | ||
1480 | return ret; | ||
1481 | |||
1482 | /* Allocate for all pdps regardless of how the ppgtt | ||
1483 | * was defined. | ||
1484 | */ | ||
1485 | ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp, | ||
1486 | 0, 1ULL << 32, | ||
1487 | new_page_dirs); | ||
1488 | if (!ret) | ||
1489 | *ppgtt->pdp.used_pdpes = *new_page_dirs; | ||
1490 | |||
1491 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); | ||
1492 | |||
1493 | return ret; | ||
1494 | } | ||
1495 | |||
1010 | /* | 1496 | /* |
1011 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers | 1497 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
1012 | * with a net effect resembling a 2-level page table in normal x86 terms. Each | 1498 | * with a net effect resembling a 2-level page table in normal x86 terms. Each |
@@ -1023,24 +1509,49 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
1023 | return ret; | 1509 | return ret; |
1024 | 1510 | ||
1025 | ppgtt->base.start = 0; | 1511 | ppgtt->base.start = 0; |
1026 | ppgtt->base.total = 1ULL << 32; | ||
1027 | if (IS_ENABLED(CONFIG_X86_32)) | ||
1028 | /* While we have a proliferation of size_t variables | ||
1029 | * we cannot represent the full ppgtt size on 32bit, | ||
1030 | * so limit it to the same size as the GGTT (currently | ||
1031 | * 2GiB). | ||
1032 | */ | ||
1033 | ppgtt->base.total = to_i915(ppgtt->base.dev)->gtt.base.total; | ||
1034 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; | 1512 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
1035 | ppgtt->base.allocate_va_range = gen8_alloc_va_range; | 1513 | ppgtt->base.allocate_va_range = gen8_alloc_va_range; |
1036 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; | 1514 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
1037 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; | 1515 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
1038 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; | 1516 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; |
1039 | ppgtt->base.bind_vma = ppgtt_bind_vma; | 1517 | ppgtt->base.bind_vma = ppgtt_bind_vma; |
1518 | ppgtt->debug_dump = gen8_dump_ppgtt; | ||
1519 | |||
1520 | if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { | ||
1521 | ret = setup_px(ppgtt->base.dev, &ppgtt->pml4); | ||
1522 | if (ret) | ||
1523 | goto free_scratch; | ||
1040 | 1524 | ||
1041 | ppgtt->switch_mm = gen8_mm_switch; | 1525 | gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); |
1526 | |||
1527 | ppgtt->base.total = 1ULL << 48; | ||
1528 | ppgtt->switch_mm = gen8_48b_mm_switch; | ||
1529 | } else { | ||
1530 | ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp); | ||
1531 | if (ret) | ||
1532 | goto free_scratch; | ||
1533 | |||
1534 | ppgtt->base.total = 1ULL << 32; | ||
1535 | ppgtt->switch_mm = gen8_legacy_mm_switch; | ||
1536 | trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base, | ||
1537 | 0, 0, | ||
1538 | GEN8_PML4E_SHIFT); | ||
1539 | |||
1540 | if (intel_vgpu_active(ppgtt->base.dev)) { | ||
1541 | ret = gen8_preallocate_top_level_pdps(ppgtt); | ||
1542 | if (ret) | ||
1543 | goto free_scratch; | ||
1544 | } | ||
1545 | } | ||
1546 | |||
1547 | if (intel_vgpu_active(ppgtt->base.dev)) | ||
1548 | gen8_ppgtt_notify_vgt(ppgtt, true); | ||
1042 | 1549 | ||
1043 | return 0; | 1550 | return 0; |
1551 | |||
1552 | free_scratch: | ||
1553 | gen8_free_scratch(&ppgtt->base); | ||
1554 | return ret; | ||
1044 | } | 1555 | } |
1045 | 1556 | ||
1046 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | 1557 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
@@ -1228,8 +1739,9 @@ static void gen8_ppgtt_enable(struct drm_device *dev) | |||
1228 | int j; | 1739 | int j; |
1229 | 1740 | ||
1230 | for_each_ring(ring, dev_priv, j) { | 1741 | for_each_ring(ring, dev_priv, j) { |
1742 | u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; | ||
1231 | I915_WRITE(RING_MODE_GEN7(ring), | 1743 | I915_WRITE(RING_MODE_GEN7(ring), |
1232 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | 1744 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); |
1233 | } | 1745 | } |
1234 | } | 1746 | } |
1235 | 1747 | ||
@@ -2084,9 +2596,9 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, | |||
2084 | } | 2596 | } |
2085 | 2597 | ||
2086 | static int i915_gem_setup_global_gtt(struct drm_device *dev, | 2598 | static int i915_gem_setup_global_gtt(struct drm_device *dev, |
2087 | unsigned long start, | 2599 | u64 start, |
2088 | unsigned long mappable_end, | 2600 | u64 mappable_end, |
2089 | unsigned long end) | 2601 | u64 end) |
2090 | { | 2602 | { |
2091 | /* Let GEM Manage all of the aperture. | 2603 | /* Let GEM Manage all of the aperture. |
2092 | * | 2604 | * |
@@ -2125,7 +2637,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, | |||
2125 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 2637 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
2126 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); | 2638 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
2127 | 2639 | ||
2128 | DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", | 2640 | DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n", |
2129 | i915_gem_obj_ggtt_offset(obj), obj->base.size); | 2641 | i915_gem_obj_ggtt_offset(obj), obj->base.size); |
2130 | 2642 | ||
2131 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); | 2643 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index e1cfa292f9ad..82750073d5b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -39,6 +39,8 @@ struct drm_i915_file_private; | |||
39 | typedef uint32_t gen6_pte_t; | 39 | typedef uint32_t gen6_pte_t; |
40 | typedef uint64_t gen8_pte_t; | 40 | typedef uint64_t gen8_pte_t; |
41 | typedef uint64_t gen8_pde_t; | 41 | typedef uint64_t gen8_pde_t; |
42 | typedef uint64_t gen8_ppgtt_pdpe_t; | ||
43 | typedef uint64_t gen8_ppgtt_pml4e_t; | ||
42 | 44 | ||
43 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) | 45 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) |
44 | 46 | ||
@@ -88,9 +90,18 @@ typedef uint64_t gen8_pde_t; | |||
88 | * PDPE | PDE | PTE | offset | 90 | * PDPE | PDE | PTE | offset |
89 | * The difference as compared to normal x86 3 level page table is the PDPEs are | 91 | * The difference as compared to normal x86 3 level page table is the PDPEs are |
90 | * programmed via register. | 92 | * programmed via register. |
93 | * | ||
94 | * GEN8 48b legacy style address is defined as a 4 level page table: | ||
95 | * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 | ||
96 | * PML4E | PDPE | PDE | PTE | offset | ||
91 | */ | 97 | */ |
98 | #define GEN8_PML4ES_PER_PML4 512 | ||
99 | #define GEN8_PML4E_SHIFT 39 | ||
100 | #define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1) | ||
92 | #define GEN8_PDPE_SHIFT 30 | 101 | #define GEN8_PDPE_SHIFT 30 |
93 | #define GEN8_PDPE_MASK 0x3 | 102 | /* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page |
103 | * tables */ | ||
104 | #define GEN8_PDPE_MASK 0x1ff | ||
94 | #define GEN8_PDE_SHIFT 21 | 105 | #define GEN8_PDE_SHIFT 21 |
95 | #define GEN8_PDE_MASK 0x1ff | 106 | #define GEN8_PDE_MASK 0x1ff |
96 | #define GEN8_PTE_SHIFT 12 | 107 | #define GEN8_PTE_SHIFT 12 |
@@ -98,6 +109,9 @@ typedef uint64_t gen8_pde_t; | |||
98 | #define GEN8_LEGACY_PDPES 4 | 109 | #define GEN8_LEGACY_PDPES 4 |
99 | #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) | 110 | #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) |
100 | 111 | ||
112 | #define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ | ||
113 | GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES) | ||
114 | |||
101 | #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) | 115 | #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) |
102 | #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ | 116 | #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ |
103 | #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ | 117 | #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ |
@@ -135,7 +149,7 @@ struct i915_ggtt_view { | |||
135 | 149 | ||
136 | union { | 150 | union { |
137 | struct { | 151 | struct { |
138 | unsigned long offset; | 152 | u64 offset; |
139 | unsigned int size; | 153 | unsigned int size; |
140 | } partial; | 154 | } partial; |
141 | } params; | 155 | } params; |
@@ -241,9 +255,17 @@ struct i915_page_directory { | |||
241 | }; | 255 | }; |
242 | 256 | ||
243 | struct i915_page_directory_pointer { | 257 | struct i915_page_directory_pointer { |
244 | /* struct page *page; */ | 258 | struct i915_page_dma base; |
245 | DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES); | 259 | |
246 | struct i915_page_directory *page_directory[GEN8_LEGACY_PDPES]; | 260 | unsigned long *used_pdpes; |
261 | struct i915_page_directory **page_directory; | ||
262 | }; | ||
263 | |||
264 | struct i915_pml4 { | ||
265 | struct i915_page_dma base; | ||
266 | |||
267 | DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4); | ||
268 | struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4]; | ||
247 | }; | 269 | }; |
248 | 270 | ||
249 | struct i915_address_space { | 271 | struct i915_address_space { |
@@ -256,6 +278,7 @@ struct i915_address_space { | |||
256 | struct i915_page_scratch *scratch_page; | 278 | struct i915_page_scratch *scratch_page; |
257 | struct i915_page_table *scratch_pt; | 279 | struct i915_page_table *scratch_pt; |
258 | struct i915_page_directory *scratch_pd; | 280 | struct i915_page_directory *scratch_pd; |
281 | struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */ | ||
259 | 282 | ||
260 | /** | 283 | /** |
261 | * List of objects currently involved in rendering. | 284 | * List of objects currently involved in rendering. |
@@ -341,8 +364,9 @@ struct i915_hw_ppgtt { | |||
341 | struct drm_mm_node node; | 364 | struct drm_mm_node node; |
342 | unsigned long pd_dirty_rings; | 365 | unsigned long pd_dirty_rings; |
343 | union { | 366 | union { |
344 | struct i915_page_directory_pointer pdp; | 367 | struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */ |
345 | struct i915_page_directory pd; | 368 | struct i915_page_directory_pointer pdp; /* GEN8+ */ |
369 | struct i915_page_directory pd; /* GEN6-7 */ | ||
346 | }; | 370 | }; |
347 | 371 | ||
348 | struct drm_i915_file_private *file_priv; | 372 | struct drm_i915_file_private *file_priv; |
@@ -436,24 +460,23 @@ static inline uint32_t gen6_pde_index(uint32_t addr) | |||
436 | temp = min(temp, length), \ | 460 | temp = min(temp, length), \ |
437 | start += temp, length -= temp) | 461 | start += temp, length -= temp) |
438 | 462 | ||
439 | #define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ | 463 | #define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ |
440 | for (iter = gen8_pdpe_index(start); \ | 464 | for (iter = gen8_pdpe_index(start); \ |
441 | pd = (pdp)->page_directory[iter], length > 0 && iter < GEN8_LEGACY_PDPES; \ | 465 | pd = (pdp)->page_directory[iter], \ |
466 | length > 0 && (iter < I915_PDPES_PER_PDP(dev)); \ | ||
442 | iter++, \ | 467 | iter++, \ |
443 | temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ | 468 | temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ |
444 | temp = min(temp, length), \ | 469 | temp = min(temp, length), \ |
445 | start += temp, length -= temp) | 470 | start += temp, length -= temp) |
446 | 471 | ||
447 | /* Clamp length to the next page_directory boundary */ | 472 | #define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \ |
448 | static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length) | 473 | for (iter = gen8_pml4e_index(start); \ |
449 | { | 474 | pdp = (pml4)->pdps[iter], \ |
450 | uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT); | 475 | length > 0 && iter < GEN8_PML4ES_PER_PML4; \ |
451 | 476 | iter++, \ | |
452 | if (next_pd > (start + length)) | 477 | temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \ |
453 | return length; | 478 | temp = min(temp, length), \ |
454 | 479 | start += temp, length -= temp) | |
455 | return next_pd - start; | ||
456 | } | ||
457 | 480 | ||
458 | static inline uint32_t gen8_pte_index(uint64_t address) | 481 | static inline uint32_t gen8_pte_index(uint64_t address) |
459 | { | 482 | { |
@@ -472,8 +495,7 @@ static inline uint32_t gen8_pdpe_index(uint64_t address) | |||
472 | 495 | ||
473 | static inline uint32_t gen8_pml4e_index(uint64_t address) | 496 | static inline uint32_t gen8_pml4e_index(uint64_t address) |
474 | { | 497 | { |
475 | WARN_ON(1); /* For 64B */ | 498 | return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK; |
476 | return 0; | ||
477 | } | 499 | } |
478 | 500 | ||
479 | static inline size_t gen8_pte_count(uint64_t address, uint64_t length) | 501 | static inline size_t gen8_pte_count(uint64_t address, uint64_t length) |
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 8fd431bcdfd3..d11901d590ac 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -813,7 +813,6 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |||
813 | int | 813 | int |
814 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | 814 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
815 | { | 815 | { |
816 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
817 | struct drm_i915_gem_userptr *args = data; | 816 | struct drm_i915_gem_userptr *args = data; |
818 | struct drm_i915_gem_object *obj; | 817 | struct drm_i915_gem_object *obj; |
819 | int ret; | 818 | int ret; |
@@ -826,9 +825,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
826 | if (offset_in_page(args->user_ptr | args->user_size)) | 825 | if (offset_in_page(args->user_ptr | args->user_size)) |
827 | return -EINVAL; | 826 | return -EINVAL; |
828 | 827 | ||
829 | if (args->user_size > dev_priv->gtt.base.total) | ||
830 | return -E2BIG; | ||
831 | |||
832 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, | 828 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, |
833 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | 829 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) |
834 | return -EFAULT; | 830 | return -EFAULT; |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 41d0739e6fdf..3379f9c1ef88 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -30,11 +30,6 @@ | |||
30 | #include <generated/utsrelease.h> | 30 | #include <generated/utsrelease.h> |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | 32 | ||
33 | static const char *yesno(int v) | ||
34 | { | ||
35 | return v ? "yes" : "no"; | ||
36 | } | ||
37 | |||
38 | static const char *ring_str(int ring) | 33 | static const char *ring_str(int ring) |
39 | { | 34 | { |
40 | switch (ring) { | 35 | switch (ring) { |
@@ -197,8 +192,9 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, | |||
197 | err_printf(m, " %s [%d]:\n", name, count); | 192 | err_printf(m, " %s [%d]:\n", name, count); |
198 | 193 | ||
199 | while (count--) { | 194 | while (count--) { |
200 | err_printf(m, " %08x %8u %02x %02x [ ", | 195 | err_printf(m, " %08x_%08x %8u %02x %02x [ ", |
201 | err->gtt_offset, | 196 | upper_32_bits(err->gtt_offset), |
197 | lower_32_bits(err->gtt_offset), | ||
202 | err->size, | 198 | err->size, |
203 | err->read_domains, | 199 | err->read_domains, |
204 | err->write_domain); | 200 | err->write_domain); |
@@ -427,15 +423,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
427 | err_printf(m, " (submitted by %s [%d])", | 423 | err_printf(m, " (submitted by %s [%d])", |
428 | error->ring[i].comm, | 424 | error->ring[i].comm, |
429 | error->ring[i].pid); | 425 | error->ring[i].pid); |
430 | err_printf(m, " --- gtt_offset = 0x%08x\n", | 426 | err_printf(m, " --- gtt_offset = 0x%08x %08x\n", |
431 | obj->gtt_offset); | 427 | upper_32_bits(obj->gtt_offset), |
428 | lower_32_bits(obj->gtt_offset)); | ||
432 | print_error_obj(m, obj); | 429 | print_error_obj(m, obj); |
433 | } | 430 | } |
434 | 431 | ||
435 | obj = error->ring[i].wa_batchbuffer; | 432 | obj = error->ring[i].wa_batchbuffer; |
436 | if (obj) { | 433 | if (obj) { |
437 | err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", | 434 | err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", |
438 | dev_priv->ring[i].name, obj->gtt_offset); | 435 | dev_priv->ring[i].name, |
436 | lower_32_bits(obj->gtt_offset)); | ||
439 | print_error_obj(m, obj); | 437 | print_error_obj(m, obj); |
440 | } | 438 | } |
441 | 439 | ||
@@ -454,22 +452,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
454 | if ((obj = error->ring[i].ringbuffer)) { | 452 | if ((obj = error->ring[i].ringbuffer)) { |
455 | err_printf(m, "%s --- ringbuffer = 0x%08x\n", | 453 | err_printf(m, "%s --- ringbuffer = 0x%08x\n", |
456 | dev_priv->ring[i].name, | 454 | dev_priv->ring[i].name, |
457 | obj->gtt_offset); | 455 | lower_32_bits(obj->gtt_offset)); |
458 | print_error_obj(m, obj); | 456 | print_error_obj(m, obj); |
459 | } | 457 | } |
460 | 458 | ||
461 | if ((obj = error->ring[i].hws_page)) { | 459 | if ((obj = error->ring[i].hws_page)) { |
462 | err_printf(m, "%s --- HW Status = 0x%08x\n", | 460 | err_printf(m, "%s --- HW Status = 0x%08llx\n", |
463 | dev_priv->ring[i].name, | 461 | dev_priv->ring[i].name, |
464 | obj->gtt_offset); | 462 | obj->gtt_offset + LRC_PPHWSP_PN * PAGE_SIZE); |
465 | offset = 0; | 463 | offset = 0; |
466 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { | 464 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { |
467 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", | 465 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", |
468 | offset, | 466 | offset, |
469 | obj->pages[0][elt], | 467 | obj->pages[LRC_PPHWSP_PN][elt], |
470 | obj->pages[0][elt+1], | 468 | obj->pages[LRC_PPHWSP_PN][elt+1], |
471 | obj->pages[0][elt+2], | 469 | obj->pages[LRC_PPHWSP_PN][elt+2], |
472 | obj->pages[0][elt+3]); | 470 | obj->pages[LRC_PPHWSP_PN][elt+3]); |
473 | offset += 16; | 471 | offset += 16; |
474 | } | 472 | } |
475 | } | 473 | } |
@@ -477,13 +475,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
477 | if ((obj = error->ring[i].ctx)) { | 475 | if ((obj = error->ring[i].ctx)) { |
478 | err_printf(m, "%s --- HW Context = 0x%08x\n", | 476 | err_printf(m, "%s --- HW Context = 0x%08x\n", |
479 | dev_priv->ring[i].name, | 477 | dev_priv->ring[i].name, |
480 | obj->gtt_offset); | 478 | lower_32_bits(obj->gtt_offset)); |
481 | print_error_obj(m, obj); | 479 | print_error_obj(m, obj); |
482 | } | 480 | } |
483 | } | 481 | } |
484 | 482 | ||
485 | if ((obj = error->semaphore_obj)) { | 483 | if ((obj = error->semaphore_obj)) { |
486 | err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset); | 484 | err_printf(m, "Semaphore page = 0x%08x\n", |
485 | lower_32_bits(obj->gtt_offset)); | ||
487 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { | 486 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { |
488 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", | 487 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", |
489 | elt * 4, | 488 | elt * 4, |
@@ -591,7 +590,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
591 | int num_pages; | 590 | int num_pages; |
592 | bool use_ggtt; | 591 | bool use_ggtt; |
593 | int i = 0; | 592 | int i = 0; |
594 | u32 reloc_offset; | 593 | u64 reloc_offset; |
595 | 594 | ||
596 | if (src == NULL || src->pages == NULL) | 595 | if (src == NULL || src->pages == NULL) |
597 | return NULL; | 596 | return NULL; |
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index ccdc6c8ac20b..8c8e574e63ba 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h | |||
@@ -38,10 +38,6 @@ | |||
38 | #define GS_MIA_SHIFT 16 | 38 | #define GS_MIA_SHIFT 16 |
39 | #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) | 39 | #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) |
40 | 40 | ||
41 | #define GUC_WOPCM_SIZE 0xc050 | ||
42 | #define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ | ||
43 | #define GUC_WOPCM_OFFSET 0x80000 /* 512KB */ | ||
44 | |||
45 | #define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) | 41 | #define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) |
46 | 42 | ||
47 | #define UOS_RSA_SCRATCH_0 0xc200 | 43 | #define UOS_RSA_SCRATCH_0 0xc200 |
@@ -56,10 +52,18 @@ | |||
56 | #define UOS_MOVE (1<<4) | 52 | #define UOS_MOVE (1<<4) |
57 | #define START_DMA (1<<0) | 53 | #define START_DMA (1<<0) |
58 | #define DMA_GUC_WOPCM_OFFSET 0xc340 | 54 | #define DMA_GUC_WOPCM_OFFSET 0xc340 |
55 | #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ | ||
56 | |||
57 | #define GUC_WOPCM_SIZE 0xc050 | ||
58 | #define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ | ||
59 | |||
60 | /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ | ||
61 | #define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) | ||
59 | 62 | ||
60 | #define GEN8_GT_PM_CONFIG 0x138140 | 63 | #define GEN8_GT_PM_CONFIG 0x138140 |
64 | #define GEN9LP_GT_PM_CONFIG 0x138140 | ||
61 | #define GEN9_GT_PM_CONFIG 0x13816c | 65 | #define GEN9_GT_PM_CONFIG 0x13816c |
62 | #define GEN8_GT_DOORBELL_ENABLE (1<<0) | 66 | #define GT_DOORBELL_ENABLE (1<<0) |
63 | 67 | ||
64 | #define GEN8_GTCR 0x4274 | 68 | #define GEN8_GTCR 0x4274 |
65 | #define GEN8_GTCR_INVALIDATE (1<<0) | 69 | #define GEN8_GTCR_INVALIDATE (1<<0) |
@@ -80,7 +84,8 @@ | |||
80 | GUC_ENABLE_READ_CACHE_LOGIC | \ | 84 | GUC_ENABLE_READ_CACHE_LOGIC | \ |
81 | GUC_ENABLE_MIA_CACHING | \ | 85 | GUC_ENABLE_MIA_CACHING | \ |
82 | GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \ | 86 | GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \ |
83 | GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA) | 87 | GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ |
88 | GUC_ENABLE_MIA_CLOCK_GATING) | ||
84 | 89 | ||
85 | #define HOST2GUC_INTERRUPT 0xc4c8 | 90 | #define HOST2GUC_INTERRUPT 0xc4c8 |
86 | #define HOST2GUC_TRIGGER (1<<0) | 91 | #define HOST2GUC_TRIGGER (1<<0) |
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c new file mode 100644 index 000000000000..792d0b958a2c --- /dev/null +++ b/drivers/gpu/drm/i915/i915_guc_submission.c | |||
@@ -0,0 +1,916 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | #include <linux/firmware.h> | ||
25 | #include <linux/circ_buf.h> | ||
26 | #include "i915_drv.h" | ||
27 | #include "intel_guc.h" | ||
28 | |||
29 | /** | ||
30 | * DOC: GuC Client | ||
31 | * | ||
32 | * i915_guc_client: | ||
33 | * We use the term client to avoid confusion with contexts. A i915_guc_client is | ||
34 | * equivalent to GuC object guc_context_desc. This context descriptor is | ||
35 | * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell | ||
36 | * and workqueue for it. Also the process descriptor (guc_process_desc), which | ||
37 | * is mapped to client space. So the client can write Work Item then ring the | ||
38 | * doorbell. | ||
39 | * | ||
40 | * To simplify the implementation, we allocate one gem object that contains all | ||
41 | * pages for doorbell, process descriptor and workqueue. | ||
42 | * | ||
43 | * The Scratch registers: | ||
44 | * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes | ||
45 | * a value to the action register (SOFT_SCRATCH_0) along with any data. It then | ||
46 | * triggers an interrupt on the GuC via another register write (0xC4C8). | ||
47 | * Firmware writes a success/fail code back to the action register after | ||
48 | * processes the request. The kernel driver polls waiting for this update and | ||
49 | * then proceeds. | ||
50 | * See host2guc_action() | ||
51 | * | ||
52 | * Doorbells: | ||
53 | * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) | ||
54 | * mapped into process space. | ||
55 | * | ||
56 | * Work Items: | ||
57 | * There are several types of work items that the host may place into a | ||
58 | * workqueue, each with its own requirements and limitations. Currently only | ||
59 | * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which | ||
60 | * represents in-order queue. The kernel driver packs ring tail pointer and an | ||
61 | * ELSP context descriptor dword into Work Item. | ||
62 | * See guc_add_workqueue_item() | ||
63 | * | ||
64 | */ | ||
65 | |||
66 | /* | ||
67 | * Read GuC command/status register (SOFT_SCRATCH_0) | ||
68 | * Return true if it contains a response rather than a command | ||
69 | */ | ||
70 | static inline bool host2guc_action_response(struct drm_i915_private *dev_priv, | ||
71 | u32 *status) | ||
72 | { | ||
73 | u32 val = I915_READ(SOFT_SCRATCH(0)); | ||
74 | *status = val; | ||
75 | return GUC2HOST_IS_RESPONSE(val); | ||
76 | } | ||
77 | |||
78 | static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) | ||
79 | { | ||
80 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
81 | u32 status; | ||
82 | int i; | ||
83 | int ret; | ||
84 | |||
85 | if (WARN_ON(len < 1 || len > 15)) | ||
86 | return -EINVAL; | ||
87 | |||
88 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
89 | spin_lock(&dev_priv->guc.host2guc_lock); | ||
90 | |||
91 | dev_priv->guc.action_count += 1; | ||
92 | dev_priv->guc.action_cmd = data[0]; | ||
93 | |||
94 | for (i = 0; i < len; i++) | ||
95 | I915_WRITE(SOFT_SCRATCH(i), data[i]); | ||
96 | |||
97 | POSTING_READ(SOFT_SCRATCH(i - 1)); | ||
98 | |||
99 | I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER); | ||
100 | |||
101 | /* No HOST2GUC command should take longer than 10ms */ | ||
102 | ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10); | ||
103 | if (status != GUC2HOST_STATUS_SUCCESS) { | ||
104 | /* | ||
105 | * Either the GuC explicitly returned an error (which | ||
106 | * we convert to -EIO here) or no response at all was | ||
107 | * received within the timeout limit (-ETIMEDOUT) | ||
108 | */ | ||
109 | if (ret != -ETIMEDOUT) | ||
110 | ret = -EIO; | ||
111 | |||
112 | DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d " | ||
113 | "status=0x%08X response=0x%08X\n", | ||
114 | data[0], ret, status, | ||
115 | I915_READ(SOFT_SCRATCH(15))); | ||
116 | |||
117 | dev_priv->guc.action_fail += 1; | ||
118 | dev_priv->guc.action_err = ret; | ||
119 | } | ||
120 | dev_priv->guc.action_status = status; | ||
121 | |||
122 | spin_unlock(&dev_priv->guc.host2guc_lock); | ||
123 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Tell the GuC to allocate or deallocate a specific doorbell | ||
130 | */ | ||
131 | |||
132 | static int host2guc_allocate_doorbell(struct intel_guc *guc, | ||
133 | struct i915_guc_client *client) | ||
134 | { | ||
135 | u32 data[2]; | ||
136 | |||
137 | data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL; | ||
138 | data[1] = client->ctx_index; | ||
139 | |||
140 | return host2guc_action(guc, data, 2); | ||
141 | } | ||
142 | |||
143 | static int host2guc_release_doorbell(struct intel_guc *guc, | ||
144 | struct i915_guc_client *client) | ||
145 | { | ||
146 | u32 data[2]; | ||
147 | |||
148 | data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL; | ||
149 | data[1] = client->ctx_index; | ||
150 | |||
151 | return host2guc_action(guc, data, 2); | ||
152 | } | ||
153 | |||
154 | static int host2guc_sample_forcewake(struct intel_guc *guc, | ||
155 | struct i915_guc_client *client) | ||
156 | { | ||
157 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
158 | u32 data[2]; | ||
159 | |||
160 | data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; | ||
161 | data[1] = (intel_enable_rc6(dev_priv->dev)) ? 1 : 0; | ||
162 | |||
163 | return host2guc_action(guc, data, 2); | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Initialise, update, or clear doorbell data shared with the GuC | ||
168 | * | ||
169 | * These functions modify shared data and so need access to the mapped | ||
170 | * client object which contains the page being used for the doorbell | ||
171 | */ | ||
172 | |||
173 | static void guc_init_doorbell(struct intel_guc *guc, | ||
174 | struct i915_guc_client *client) | ||
175 | { | ||
176 | struct guc_doorbell_info *doorbell; | ||
177 | void *base; | ||
178 | |||
179 | base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); | ||
180 | doorbell = base + client->doorbell_offset; | ||
181 | |||
182 | doorbell->db_status = 1; | ||
183 | doorbell->cookie = 0; | ||
184 | |||
185 | kunmap_atomic(base); | ||
186 | } | ||
187 | |||
188 | static int guc_ring_doorbell(struct i915_guc_client *gc) | ||
189 | { | ||
190 | struct guc_process_desc *desc; | ||
191 | union guc_doorbell_qw db_cmp, db_exc, db_ret; | ||
192 | union guc_doorbell_qw *db; | ||
193 | void *base; | ||
194 | int attempt = 2, ret = -EAGAIN; | ||
195 | |||
196 | base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); | ||
197 | desc = base + gc->proc_desc_offset; | ||
198 | |||
199 | /* Update the tail so it is visible to GuC */ | ||
200 | desc->tail = gc->wq_tail; | ||
201 | |||
202 | /* current cookie */ | ||
203 | db_cmp.db_status = GUC_DOORBELL_ENABLED; | ||
204 | db_cmp.cookie = gc->cookie; | ||
205 | |||
206 | /* cookie to be updated */ | ||
207 | db_exc.db_status = GUC_DOORBELL_ENABLED; | ||
208 | db_exc.cookie = gc->cookie + 1; | ||
209 | if (db_exc.cookie == 0) | ||
210 | db_exc.cookie = 1; | ||
211 | |||
212 | /* pointer of current doorbell cacheline */ | ||
213 | db = base + gc->doorbell_offset; | ||
214 | |||
215 | while (attempt--) { | ||
216 | /* lets ring the doorbell */ | ||
217 | db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, | ||
218 | db_cmp.value_qw, db_exc.value_qw); | ||
219 | |||
220 | /* if the exchange was successfully executed */ | ||
221 | if (db_ret.value_qw == db_cmp.value_qw) { | ||
222 | /* db was successfully rung */ | ||
223 | gc->cookie = db_exc.cookie; | ||
224 | ret = 0; | ||
225 | break; | ||
226 | } | ||
227 | |||
228 | /* XXX: doorbell was lost and need to acquire it again */ | ||
229 | if (db_ret.db_status == GUC_DOORBELL_DISABLED) | ||
230 | break; | ||
231 | |||
232 | DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", | ||
233 | db_cmp.cookie, db_ret.cookie); | ||
234 | |||
235 | /* update the cookie to newly read cookie from GuC */ | ||
236 | db_cmp.cookie = db_ret.cookie; | ||
237 | db_exc.cookie = db_ret.cookie + 1; | ||
238 | if (db_exc.cookie == 0) | ||
239 | db_exc.cookie = 1; | ||
240 | } | ||
241 | |||
242 | kunmap_atomic(base); | ||
243 | return ret; | ||
244 | } | ||
245 | |||
246 | static void guc_disable_doorbell(struct intel_guc *guc, | ||
247 | struct i915_guc_client *client) | ||
248 | { | ||
249 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
250 | struct guc_doorbell_info *doorbell; | ||
251 | void *base; | ||
252 | int drbreg = GEN8_DRBREGL(client->doorbell_id); | ||
253 | int value; | ||
254 | |||
255 | base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); | ||
256 | doorbell = base + client->doorbell_offset; | ||
257 | |||
258 | doorbell->db_status = 0; | ||
259 | |||
260 | kunmap_atomic(base); | ||
261 | |||
262 | I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); | ||
263 | |||
264 | value = I915_READ(drbreg); | ||
265 | WARN_ON((value & GEN8_DRB_VALID) != 0); | ||
266 | |||
267 | I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0); | ||
268 | I915_WRITE(drbreg, 0); | ||
269 | |||
270 | /* XXX: wait for any interrupts */ | ||
271 | /* XXX: wait for workqueue to drain */ | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Select, assign and relase doorbell cachelines | ||
276 | * | ||
277 | * These functions track which doorbell cachelines are in use. | ||
278 | * The data they manipulate is protected by the host2guc lock. | ||
279 | */ | ||
280 | |||
281 | static uint32_t select_doorbell_cacheline(struct intel_guc *guc) | ||
282 | { | ||
283 | const uint32_t cacheline_size = cache_line_size(); | ||
284 | uint32_t offset; | ||
285 | |||
286 | spin_lock(&guc->host2guc_lock); | ||
287 | |||
288 | /* Doorbell uses a single cache line within a page */ | ||
289 | offset = offset_in_page(guc->db_cacheline); | ||
290 | |||
291 | /* Moving to next cache line to reduce contention */ | ||
292 | guc->db_cacheline += cacheline_size; | ||
293 | |||
294 | spin_unlock(&guc->host2guc_lock); | ||
295 | |||
296 | DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n", | ||
297 | offset, guc->db_cacheline, cacheline_size); | ||
298 | |||
299 | return offset; | ||
300 | } | ||
301 | |||
302 | static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority) | ||
303 | { | ||
304 | /* | ||
305 | * The bitmap is split into two halves; the first half is used for | ||
306 | * normal priority contexts, the second half for high-priority ones. | ||
307 | * Note that logically higher priorities are numerically less than | ||
308 | * normal ones, so the test below means "is it high-priority?" | ||
309 | */ | ||
310 | const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); | ||
311 | const uint16_t half = GUC_MAX_DOORBELLS / 2; | ||
312 | const uint16_t start = hi_pri ? half : 0; | ||
313 | const uint16_t end = start + half; | ||
314 | uint16_t id; | ||
315 | |||
316 | spin_lock(&guc->host2guc_lock); | ||
317 | id = find_next_zero_bit(guc->doorbell_bitmap, end, start); | ||
318 | if (id == end) | ||
319 | id = GUC_INVALID_DOORBELL_ID; | ||
320 | else | ||
321 | bitmap_set(guc->doorbell_bitmap, id, 1); | ||
322 | spin_unlock(&guc->host2guc_lock); | ||
323 | |||
324 | DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", | ||
325 | hi_pri ? "high" : "normal", id); | ||
326 | |||
327 | return id; | ||
328 | } | ||
329 | |||
330 | static void release_doorbell(struct intel_guc *guc, uint16_t id) | ||
331 | { | ||
332 | spin_lock(&guc->host2guc_lock); | ||
333 | bitmap_clear(guc->doorbell_bitmap, id, 1); | ||
334 | spin_unlock(&guc->host2guc_lock); | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * Initialise the process descriptor shared with the GuC firmware. | ||
339 | */ | ||
340 | static void guc_init_proc_desc(struct intel_guc *guc, | ||
341 | struct i915_guc_client *client) | ||
342 | { | ||
343 | struct guc_process_desc *desc; | ||
344 | void *base; | ||
345 | |||
346 | base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); | ||
347 | desc = base + client->proc_desc_offset; | ||
348 | |||
349 | memset(desc, 0, sizeof(*desc)); | ||
350 | |||
351 | /* | ||
352 | * XXX: pDoorbell and WQVBaseAddress are pointers in process address | ||
353 | * space for ring3 clients (set them as in mmap_ioctl) or kernel | ||
354 | * space for kernel clients (map on demand instead? May make debug | ||
355 | * easier to have it mapped). | ||
356 | */ | ||
357 | desc->wq_base_addr = 0; | ||
358 | desc->db_base_addr = 0; | ||
359 | |||
360 | desc->context_id = client->ctx_index; | ||
361 | desc->wq_size_bytes = client->wq_size; | ||
362 | desc->wq_status = WQ_STATUS_ACTIVE; | ||
363 | desc->priority = client->priority; | ||
364 | |||
365 | kunmap_atomic(base); | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * Initialise/clear the context descriptor shared with the GuC firmware. | ||
370 | * | ||
371 | * This descriptor tells the GuC where (in GGTT space) to find the important | ||
372 | * data structures relating to this client (doorbell, process descriptor, | ||
373 | * write queue, etc). | ||
374 | */ | ||
375 | |||
376 | static void guc_init_ctx_desc(struct intel_guc *guc, | ||
377 | struct i915_guc_client *client) | ||
378 | { | ||
379 | struct intel_context *ctx = client->owner; | ||
380 | struct guc_context_desc desc; | ||
381 | struct sg_table *sg; | ||
382 | int i; | ||
383 | |||
384 | memset(&desc, 0, sizeof(desc)); | ||
385 | |||
386 | desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL; | ||
387 | desc.context_id = client->ctx_index; | ||
388 | desc.priority = client->priority; | ||
389 | desc.db_id = client->doorbell_id; | ||
390 | |||
391 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
392 | struct guc_execlist_context *lrc = &desc.lrc[i]; | ||
393 | struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; | ||
394 | struct intel_engine_cs *ring; | ||
395 | struct drm_i915_gem_object *obj; | ||
396 | uint64_t ctx_desc; | ||
397 | |||
398 | /* TODO: We have a design issue to be solved here. Only when we | ||
399 | * receive the first batch, we know which engine is used by the | ||
400 | * user. But here GuC expects the lrc and ring to be pinned. It | ||
401 | * is not an issue for default context, which is the only one | ||
402 | * for now who owns a GuC client. But for future owner of GuC | ||
403 | * client, need to make sure lrc is pinned prior to enter here. | ||
404 | */ | ||
405 | obj = ctx->engine[i].state; | ||
406 | if (!obj) | ||
407 | break; /* XXX: continue? */ | ||
408 | |||
409 | ring = ringbuf->ring; | ||
410 | ctx_desc = intel_lr_context_descriptor(ctx, ring); | ||
411 | lrc->context_desc = (u32)ctx_desc; | ||
412 | |||
413 | /* The state page is after PPHWSP */ | ||
414 | lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) + | ||
415 | LRC_STATE_PN * PAGE_SIZE; | ||
416 | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | | ||
417 | (ring->id << GUC_ELC_ENGINE_OFFSET); | ||
418 | |||
419 | obj = ringbuf->obj; | ||
420 | |||
421 | lrc->ring_begin = i915_gem_obj_ggtt_offset(obj); | ||
422 | lrc->ring_end = lrc->ring_begin + obj->base.size - 1; | ||
423 | lrc->ring_next_free_location = lrc->ring_begin; | ||
424 | lrc->ring_current_tail_pointer_value = 0; | ||
425 | |||
426 | desc.engines_used |= (1 << ring->id); | ||
427 | } | ||
428 | |||
429 | WARN_ON(desc.engines_used == 0); | ||
430 | |||
431 | /* | ||
432 | * The CPU address is only needed at certain points, so kmap_atomic on | ||
433 | * demand instead of storing it in the ctx descriptor. | ||
434 | * XXX: May make debug easier to have it mapped | ||
435 | */ | ||
436 | desc.db_trigger_cpu = 0; | ||
437 | desc.db_trigger_uk = client->doorbell_offset + | ||
438 | i915_gem_obj_ggtt_offset(client->client_obj); | ||
439 | desc.db_trigger_phy = client->doorbell_offset + | ||
440 | sg_dma_address(client->client_obj->pages->sgl); | ||
441 | |||
442 | desc.process_desc = client->proc_desc_offset + | ||
443 | i915_gem_obj_ggtt_offset(client->client_obj); | ||
444 | |||
445 | desc.wq_addr = client->wq_offset + | ||
446 | i915_gem_obj_ggtt_offset(client->client_obj); | ||
447 | |||
448 | desc.wq_size = client->wq_size; | ||
449 | |||
450 | /* | ||
451 | * XXX: Take LRCs from an existing intel_context if this is not an | ||
452 | * IsKMDCreatedContext client | ||
453 | */ | ||
454 | desc.desc_private = (uintptr_t)client; | ||
455 | |||
456 | /* Pool context is pinned already */ | ||
457 | sg = guc->ctx_pool_obj->pages; | ||
458 | sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), | ||
459 | sizeof(desc) * client->ctx_index); | ||
460 | } | ||
461 | |||
462 | static void guc_fini_ctx_desc(struct intel_guc *guc, | ||
463 | struct i915_guc_client *client) | ||
464 | { | ||
465 | struct guc_context_desc desc; | ||
466 | struct sg_table *sg; | ||
467 | |||
468 | memset(&desc, 0, sizeof(desc)); | ||
469 | |||
470 | sg = guc->ctx_pool_obj->pages; | ||
471 | sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), | ||
472 | sizeof(desc) * client->ctx_index); | ||
473 | } | ||
474 | |||
475 | /* Get valid workqueue item and return it back to offset */ | ||
476 | static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset) | ||
477 | { | ||
478 | struct guc_process_desc *desc; | ||
479 | void *base; | ||
480 | u32 size = sizeof(struct guc_wq_item); | ||
481 | int ret = 0, timeout_counter = 200; | ||
482 | |||
483 | base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); | ||
484 | desc = base + gc->proc_desc_offset; | ||
485 | |||
486 | while (timeout_counter-- > 0) { | ||
487 | ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head, | ||
488 | gc->wq_size) >= size, 1); | ||
489 | |||
490 | if (!ret) { | ||
491 | *offset = gc->wq_tail; | ||
492 | |||
493 | /* advance the tail for next workqueue item */ | ||
494 | gc->wq_tail += size; | ||
495 | gc->wq_tail &= gc->wq_size - 1; | ||
496 | |||
497 | /* this will break the loop */ | ||
498 | timeout_counter = 0; | ||
499 | } | ||
500 | }; | ||
501 | |||
502 | kunmap_atomic(base); | ||
503 | |||
504 | return ret; | ||
505 | } | ||
506 | |||
507 | static int guc_add_workqueue_item(struct i915_guc_client *gc, | ||
508 | struct drm_i915_gem_request *rq) | ||
509 | { | ||
510 | enum intel_ring_id ring_id = rq->ring->id; | ||
511 | struct guc_wq_item *wqi; | ||
512 | void *base; | ||
513 | u32 tail, wq_len, wq_off = 0; | ||
514 | int ret; | ||
515 | |||
516 | ret = guc_get_workqueue_space(gc, &wq_off); | ||
517 | if (ret) | ||
518 | return ret; | ||
519 | |||
520 | /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we | ||
521 | * should not have the case where structure wqi is across page, neither | ||
522 | * wrapped to the beginning. This simplifies the implementation below. | ||
523 | * | ||
524 | * XXX: if not the case, we need save data to a temp wqi and copy it to | ||
525 | * workqueue buffer dw by dw. | ||
526 | */ | ||
527 | WARN_ON(sizeof(struct guc_wq_item) != 16); | ||
528 | WARN_ON(wq_off & 3); | ||
529 | |||
530 | /* wq starts from the page after doorbell / process_desc */ | ||
531 | base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, | ||
532 | (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT)); | ||
533 | wq_off &= PAGE_SIZE - 1; | ||
534 | wqi = (struct guc_wq_item *)((char *)base + wq_off); | ||
535 | |||
536 | /* len does not include the header */ | ||
537 | wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; | ||
538 | wqi->header = WQ_TYPE_INORDER | | ||
539 | (wq_len << WQ_LEN_SHIFT) | | ||
540 | (ring_id << WQ_TARGET_SHIFT) | | ||
541 | WQ_NO_WCFLUSH_WAIT; | ||
542 | |||
543 | /* The GuC wants only the low-order word of the context descriptor */ | ||
544 | wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring); | ||
545 | |||
546 | /* The GuC firmware wants the tail index in QWords, not bytes */ | ||
547 | tail = rq->ringbuf->tail >> 3; | ||
548 | wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; | ||
549 | wqi->fence_id = 0; /*XXX: what fence to be here */ | ||
550 | |||
551 | kunmap_atomic(base); | ||
552 | |||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | #define CTX_RING_BUFFER_START 0x08 | ||
557 | |||
558 | /* Update the ringbuffer pointer in a saved context image */ | ||
559 | static void lr_context_update(struct drm_i915_gem_request *rq) | ||
560 | { | ||
561 | enum intel_ring_id ring_id = rq->ring->id; | ||
562 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state; | ||
563 | struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; | ||
564 | struct page *page; | ||
565 | uint32_t *reg_state; | ||
566 | |||
567 | BUG_ON(!ctx_obj); | ||
568 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); | ||
569 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); | ||
570 | |||
571 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | ||
572 | reg_state = kmap_atomic(page); | ||
573 | |||
574 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); | ||
575 | |||
576 | kunmap_atomic(reg_state); | ||
577 | } | ||
578 | |||
579 | /** | ||
580 | * i915_guc_submit() - Submit commands through GuC | ||
581 | * @client: the guc client where commands will go through | ||
582 | * @ctx: LRC where commands come from | ||
583 | * @ring: HW engine that will excute the commands | ||
584 | * | ||
585 | * Return: 0 if succeed | ||
586 | */ | ||
587 | int i915_guc_submit(struct i915_guc_client *client, | ||
588 | struct drm_i915_gem_request *rq) | ||
589 | { | ||
590 | struct intel_guc *guc = client->guc; | ||
591 | enum intel_ring_id ring_id = rq->ring->id; | ||
592 | unsigned long flags; | ||
593 | int q_ret, b_ret; | ||
594 | |||
595 | /* Need this because of the deferred pin ctx and ring */ | ||
596 | /* Shall we move this right after ring is pinned? */ | ||
597 | lr_context_update(rq); | ||
598 | |||
599 | spin_lock_irqsave(&client->wq_lock, flags); | ||
600 | |||
601 | q_ret = guc_add_workqueue_item(client, rq); | ||
602 | if (q_ret == 0) | ||
603 | b_ret = guc_ring_doorbell(client); | ||
604 | |||
605 | client->submissions[ring_id] += 1; | ||
606 | if (q_ret) { | ||
607 | client->q_fail += 1; | ||
608 | client->retcode = q_ret; | ||
609 | } else if (b_ret) { | ||
610 | client->b_fail += 1; | ||
611 | client->retcode = q_ret = b_ret; | ||
612 | } else { | ||
613 | client->retcode = 0; | ||
614 | } | ||
615 | spin_unlock_irqrestore(&client->wq_lock, flags); | ||
616 | |||
617 | spin_lock(&guc->host2guc_lock); | ||
618 | guc->submissions[ring_id] += 1; | ||
619 | guc->last_seqno[ring_id] = rq->seqno; | ||
620 | spin_unlock(&guc->host2guc_lock); | ||
621 | |||
622 | return q_ret; | ||
623 | } | ||
624 | |||
625 | /* | ||
626 | * Everything below here is concerned with setup & teardown, and is | ||
627 | * therefore not part of the somewhat time-critical batch-submission | ||
628 | * path of i915_guc_submit() above. | ||
629 | */ | ||
630 | |||
631 | /** | ||
632 | * gem_allocate_guc_obj() - Allocate gem object for GuC usage | ||
633 | * @dev: drm device | ||
634 | * @size: size of object | ||
635 | * | ||
636 | * This is a wrapper to create a gem obj. In order to use it inside GuC, the | ||
637 | * object needs to be pinned lifetime. Also we must pin it to gtt space other | ||
638 | * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC. | ||
639 | * | ||
640 | * Return: A drm_i915_gem_object if successful, otherwise NULL. | ||
641 | */ | ||
642 | static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, | ||
643 | u32 size) | ||
644 | { | ||
645 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
646 | struct drm_i915_gem_object *obj; | ||
647 | |||
648 | obj = i915_gem_alloc_object(dev, size); | ||
649 | if (!obj) | ||
650 | return NULL; | ||
651 | |||
652 | if (i915_gem_object_get_pages(obj)) { | ||
653 | drm_gem_object_unreference(&obj->base); | ||
654 | return NULL; | ||
655 | } | ||
656 | |||
657 | if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, | ||
658 | PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) { | ||
659 | drm_gem_object_unreference(&obj->base); | ||
660 | return NULL; | ||
661 | } | ||
662 | |||
663 | /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ | ||
664 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); | ||
665 | |||
666 | return obj; | ||
667 | } | ||
668 | |||
669 | /** | ||
670 | * gem_release_guc_obj() - Release gem object allocated for GuC usage | ||
671 | * @obj: gem obj to be released | ||
672 | */ | ||
673 | static void gem_release_guc_obj(struct drm_i915_gem_object *obj) | ||
674 | { | ||
675 | if (!obj) | ||
676 | return; | ||
677 | |||
678 | if (i915_gem_obj_is_pinned(obj)) | ||
679 | i915_gem_object_ggtt_unpin(obj); | ||
680 | |||
681 | drm_gem_object_unreference(&obj->base); | ||
682 | } | ||
683 | |||
684 | static void guc_client_free(struct drm_device *dev, | ||
685 | struct i915_guc_client *client) | ||
686 | { | ||
687 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
688 | struct intel_guc *guc = &dev_priv->guc; | ||
689 | |||
690 | if (!client) | ||
691 | return; | ||
692 | |||
693 | if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { | ||
694 | /* | ||
695 | * First disable the doorbell, then tell the GuC we've | ||
696 | * finished with it, finally deallocate it in our bitmap | ||
697 | */ | ||
698 | guc_disable_doorbell(guc, client); | ||
699 | host2guc_release_doorbell(guc, client); | ||
700 | release_doorbell(guc, client->doorbell_id); | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * XXX: wait for any outstanding submissions before freeing memory. | ||
705 | * Be sure to drop any locks | ||
706 | */ | ||
707 | |||
708 | gem_release_guc_obj(client->client_obj); | ||
709 | |||
710 | if (client->ctx_index != GUC_INVALID_CTX_ID) { | ||
711 | guc_fini_ctx_desc(guc, client); | ||
712 | ida_simple_remove(&guc->ctx_ids, client->ctx_index); | ||
713 | } | ||
714 | |||
715 | kfree(client); | ||
716 | } | ||
717 | |||
718 | /** | ||
719 | * guc_client_alloc() - Allocate an i915_guc_client | ||
720 | * @dev: drm device | ||
721 | * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW | ||
722 | * The kernel client to replace ExecList submission is created with | ||
723 | * NORMAL priority. Priority of a client for scheduler can be HIGH, | ||
724 | * while a preemption context can use CRITICAL. | ||
725 | * @ctx the context to own the client (we use the default render context) | ||
726 | * | ||
727 | * Return: An i915_guc_client object if success. | ||
728 | */ | ||
729 | static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, | ||
730 | uint32_t priority, | ||
731 | struct intel_context *ctx) | ||
732 | { | ||
733 | struct i915_guc_client *client; | ||
734 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
735 | struct intel_guc *guc = &dev_priv->guc; | ||
736 | struct drm_i915_gem_object *obj; | ||
737 | |||
738 | client = kzalloc(sizeof(*client), GFP_KERNEL); | ||
739 | if (!client) | ||
740 | return NULL; | ||
741 | |||
742 | client->doorbell_id = GUC_INVALID_DOORBELL_ID; | ||
743 | client->priority = priority; | ||
744 | client->owner = ctx; | ||
745 | client->guc = guc; | ||
746 | |||
747 | client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0, | ||
748 | GUC_MAX_GPU_CONTEXTS, GFP_KERNEL); | ||
749 | if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) { | ||
750 | client->ctx_index = GUC_INVALID_CTX_ID; | ||
751 | goto err; | ||
752 | } | ||
753 | |||
754 | /* The first page is doorbell/proc_desc. Two followed pages are wq. */ | ||
755 | obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE); | ||
756 | if (!obj) | ||
757 | goto err; | ||
758 | |||
759 | client->client_obj = obj; | ||
760 | client->wq_offset = GUC_DB_SIZE; | ||
761 | client->wq_size = GUC_WQ_SIZE; | ||
762 | spin_lock_init(&client->wq_lock); | ||
763 | |||
764 | client->doorbell_offset = select_doorbell_cacheline(guc); | ||
765 | |||
766 | /* | ||
767 | * Since the doorbell only requires a single cacheline, we can save | ||
768 | * space by putting the application process descriptor in the same | ||
769 | * page. Use the half of the page that doesn't include the doorbell. | ||
770 | */ | ||
771 | if (client->doorbell_offset >= (GUC_DB_SIZE / 2)) | ||
772 | client->proc_desc_offset = 0; | ||
773 | else | ||
774 | client->proc_desc_offset = (GUC_DB_SIZE / 2); | ||
775 | |||
776 | client->doorbell_id = assign_doorbell(guc, client->priority); | ||
777 | if (client->doorbell_id == GUC_INVALID_DOORBELL_ID) | ||
778 | /* XXX: evict a doorbell instead */ | ||
779 | goto err; | ||
780 | |||
781 | guc_init_proc_desc(guc, client); | ||
782 | guc_init_ctx_desc(guc, client); | ||
783 | guc_init_doorbell(guc, client); | ||
784 | |||
785 | /* XXX: Any cache flushes needed? General domain mgmt calls? */ | ||
786 | |||
787 | if (host2guc_allocate_doorbell(guc, client)) | ||
788 | goto err; | ||
789 | |||
790 | DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n", | ||
791 | priority, client, client->ctx_index, client->doorbell_id); | ||
792 | |||
793 | return client; | ||
794 | |||
795 | err: | ||
796 | DRM_ERROR("FAILED to create priority %u GuC client!\n", priority); | ||
797 | |||
798 | guc_client_free(dev, client); | ||
799 | return NULL; | ||
800 | } | ||
801 | |||
802 | static void guc_create_log(struct intel_guc *guc) | ||
803 | { | ||
804 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
805 | struct drm_i915_gem_object *obj; | ||
806 | unsigned long offset; | ||
807 | uint32_t size, flags; | ||
808 | |||
809 | if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN) | ||
810 | return; | ||
811 | |||
812 | if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) | ||
813 | i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; | ||
814 | |||
815 | /* The first page is to save log buffer state. Allocate one | ||
816 | * extra page for others in case for overlap */ | ||
817 | size = (1 + GUC_LOG_DPC_PAGES + 1 + | ||
818 | GUC_LOG_ISR_PAGES + 1 + | ||
819 | GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT; | ||
820 | |||
821 | obj = guc->log_obj; | ||
822 | if (!obj) { | ||
823 | obj = gem_allocate_guc_obj(dev_priv->dev, size); | ||
824 | if (!obj) { | ||
825 | /* logging will be off */ | ||
826 | i915.guc_log_level = -1; | ||
827 | return; | ||
828 | } | ||
829 | |||
830 | guc->log_obj = obj; | ||
831 | } | ||
832 | |||
833 | /* each allocated unit is a page */ | ||
834 | flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | | ||
835 | (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) | | ||
836 | (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | | ||
837 | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); | ||
838 | |||
839 | offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */ | ||
840 | guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; | ||
841 | } | ||
842 | |||
843 | /* | ||
844 | * Set up the memory resources to be shared with the GuC. At this point, | ||
845 | * we require just one object that can be mapped through the GGTT. | ||
846 | */ | ||
847 | int i915_guc_submission_init(struct drm_device *dev) | ||
848 | { | ||
849 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
850 | const size_t ctxsize = sizeof(struct guc_context_desc); | ||
851 | const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; | ||
852 | const size_t gemsize = round_up(poolsize, PAGE_SIZE); | ||
853 | struct intel_guc *guc = &dev_priv->guc; | ||
854 | |||
855 | if (!i915.enable_guc_submission) | ||
856 | return 0; /* not enabled */ | ||
857 | |||
858 | if (guc->ctx_pool_obj) | ||
859 | return 0; /* already allocated */ | ||
860 | |||
861 | guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize); | ||
862 | if (!guc->ctx_pool_obj) | ||
863 | return -ENOMEM; | ||
864 | |||
865 | spin_lock_init(&dev_priv->guc.host2guc_lock); | ||
866 | |||
867 | ida_init(&guc->ctx_ids); | ||
868 | |||
869 | guc_create_log(guc); | ||
870 | |||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | int i915_guc_submission_enable(struct drm_device *dev) | ||
875 | { | ||
876 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
877 | struct intel_guc *guc = &dev_priv->guc; | ||
878 | struct intel_context *ctx = dev_priv->ring[RCS].default_context; | ||
879 | struct i915_guc_client *client; | ||
880 | |||
881 | /* client for execbuf submission */ | ||
882 | client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); | ||
883 | if (!client) { | ||
884 | DRM_ERROR("Failed to create execbuf guc_client\n"); | ||
885 | return -ENOMEM; | ||
886 | } | ||
887 | |||
888 | guc->execbuf_client = client; | ||
889 | |||
890 | host2guc_sample_forcewake(guc, client); | ||
891 | |||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | void i915_guc_submission_disable(struct drm_device *dev) | ||
896 | { | ||
897 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
898 | struct intel_guc *guc = &dev_priv->guc; | ||
899 | |||
900 | guc_client_free(dev, guc->execbuf_client); | ||
901 | guc->execbuf_client = NULL; | ||
902 | } | ||
903 | |||
904 | void i915_guc_submission_fini(struct drm_device *dev) | ||
905 | { | ||
906 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
907 | struct intel_guc *guc = &dev_priv->guc; | ||
908 | |||
909 | gem_release_guc_obj(dev_priv->guc.log_obj); | ||
910 | guc->log_obj = NULL; | ||
911 | |||
912 | if (guc->ctx_pool_obj) | ||
913 | ida_destroy(&guc->ctx_ids); | ||
914 | gem_release_guc_obj(guc->ctx_pool_obj); | ||
915 | guc->ctx_pool_obj = NULL; | ||
916 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 88d064e80783..77740cd0beab 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -45,6 +45,18 @@ | |||
45 | * and related files, but that will be described in separate chapters. | 45 | * and related files, but that will be described in separate chapters. |
46 | */ | 46 | */ |
47 | 47 | ||
48 | static const u32 hpd_ilk[HPD_NUM_PINS] = { | ||
49 | [HPD_PORT_A] = DE_DP_A_HOTPLUG, | ||
50 | }; | ||
51 | |||
52 | static const u32 hpd_ivb[HPD_NUM_PINS] = { | ||
53 | [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, | ||
54 | }; | ||
55 | |||
56 | static const u32 hpd_bdw[HPD_NUM_PINS] = { | ||
57 | [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, | ||
58 | }; | ||
59 | |||
48 | static const u32 hpd_ibx[HPD_NUM_PINS] = { | 60 | static const u32 hpd_ibx[HPD_NUM_PINS] = { |
49 | [HPD_CRT] = SDE_CRT_HOTPLUG, | 61 | [HPD_CRT] = SDE_CRT_HOTPLUG, |
50 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | 62 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, |
@@ -62,6 +74,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = { | |||
62 | }; | 74 | }; |
63 | 75 | ||
64 | static const u32 hpd_spt[HPD_NUM_PINS] = { | 76 | static const u32 hpd_spt[HPD_NUM_PINS] = { |
77 | [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, | ||
65 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, | 78 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
66 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | 79 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, |
67 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, | 80 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, |
@@ -97,6 +110,7 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = { | |||
97 | 110 | ||
98 | /* BXT hpd list */ | 111 | /* BXT hpd list */ |
99 | static const u32 hpd_bxt[HPD_NUM_PINS] = { | 112 | static const u32 hpd_bxt[HPD_NUM_PINS] = { |
113 | [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, | ||
100 | [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, | 114 | [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, |
101 | [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC | 115 | [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC |
102 | }; | 116 | }; |
@@ -153,35 +167,46 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { | |||
153 | 167 | ||
154 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); | 168 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); |
155 | 169 | ||
156 | /* For display hotplug interrupt */ | 170 | /** |
157 | void | 171 | * ilk_update_display_irq - update DEIMR |
158 | ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) | 172 | * @dev_priv: driver private |
173 | * @interrupt_mask: mask of interrupt bits to update | ||
174 | * @enabled_irq_mask: mask of interrupt bits to enable | ||
175 | */ | ||
176 | static void ilk_update_display_irq(struct drm_i915_private *dev_priv, | ||
177 | uint32_t interrupt_mask, | ||
178 | uint32_t enabled_irq_mask) | ||
159 | { | 179 | { |
180 | uint32_t new_val; | ||
181 | |||
160 | assert_spin_locked(&dev_priv->irq_lock); | 182 | assert_spin_locked(&dev_priv->irq_lock); |
161 | 183 | ||
184 | WARN_ON(enabled_irq_mask & ~interrupt_mask); | ||
185 | |||
162 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | 186 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
163 | return; | 187 | return; |
164 | 188 | ||
165 | if ((dev_priv->irq_mask & mask) != 0) { | 189 | new_val = dev_priv->irq_mask; |
166 | dev_priv->irq_mask &= ~mask; | 190 | new_val &= ~interrupt_mask; |
191 | new_val |= (~enabled_irq_mask & interrupt_mask); | ||
192 | |||
193 | if (new_val != dev_priv->irq_mask) { | ||
194 | dev_priv->irq_mask = new_val; | ||
167 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 195 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
168 | POSTING_READ(DEIMR); | 196 | POSTING_READ(DEIMR); |
169 | } | 197 | } |
170 | } | 198 | } |
171 | 199 | ||
172 | void | 200 | void |
173 | ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) | 201 | ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
174 | { | 202 | { |
175 | assert_spin_locked(&dev_priv->irq_lock); | 203 | ilk_update_display_irq(dev_priv, mask, mask); |
176 | 204 | } | |
177 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | ||
178 | return; | ||
179 | 205 | ||
180 | if ((dev_priv->irq_mask & mask) != mask) { | 206 | void |
181 | dev_priv->irq_mask |= mask; | 207 | ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
182 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 208 | { |
183 | POSTING_READ(DEIMR); | 209 | ilk_update_display_irq(dev_priv, mask, 0); |
184 | } | ||
185 | } | 210 | } |
186 | 211 | ||
187 | /** | 212 | /** |
@@ -351,6 +376,38 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) | |||
351 | } | 376 | } |
352 | 377 | ||
353 | /** | 378 | /** |
379 | * bdw_update_port_irq - update DE port interrupt | ||
380 | * @dev_priv: driver private | ||
381 | * @interrupt_mask: mask of interrupt bits to update | ||
382 | * @enabled_irq_mask: mask of interrupt bits to enable | ||
383 | */ | ||
384 | static void bdw_update_port_irq(struct drm_i915_private *dev_priv, | ||
385 | uint32_t interrupt_mask, | ||
386 | uint32_t enabled_irq_mask) | ||
387 | { | ||
388 | uint32_t new_val; | ||
389 | uint32_t old_val; | ||
390 | |||
391 | assert_spin_locked(&dev_priv->irq_lock); | ||
392 | |||
393 | WARN_ON(enabled_irq_mask & ~interrupt_mask); | ||
394 | |||
395 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | ||
396 | return; | ||
397 | |||
398 | old_val = I915_READ(GEN8_DE_PORT_IMR); | ||
399 | |||
400 | new_val = old_val; | ||
401 | new_val &= ~interrupt_mask; | ||
402 | new_val |= (~enabled_irq_mask & interrupt_mask); | ||
403 | |||
404 | if (new_val != old_val) { | ||
405 | I915_WRITE(GEN8_DE_PORT_IMR, new_val); | ||
406 | POSTING_READ(GEN8_DE_PORT_IMR); | ||
407 | } | ||
408 | } | ||
409 | |||
410 | /** | ||
354 | * ibx_display_interrupt_update - update SDEIMR | 411 | * ibx_display_interrupt_update - update SDEIMR |
355 | * @dev_priv: driver private | 412 | * @dev_priv: driver private |
356 | * @interrupt_mask: mask of interrupt bits to update | 413 | * @interrupt_mask: mask of interrupt bits to update |
@@ -1263,7 +1320,31 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val) | |||
1263 | { | 1320 | { |
1264 | switch (port) { | 1321 | switch (port) { |
1265 | case PORT_A: | 1322 | case PORT_A: |
1266 | return val & BXT_PORTA_HOTPLUG_LONG_DETECT; | 1323 | return val & PORTA_HOTPLUG_LONG_DETECT; |
1324 | case PORT_B: | ||
1325 | return val & PORTB_HOTPLUG_LONG_DETECT; | ||
1326 | case PORT_C: | ||
1327 | return val & PORTC_HOTPLUG_LONG_DETECT; | ||
1328 | default: | ||
1329 | return false; | ||
1330 | } | ||
1331 | } | ||
1332 | |||
1333 | static bool spt_port_hotplug2_long_detect(enum port port, u32 val) | ||
1334 | { | ||
1335 | switch (port) { | ||
1336 | case PORT_E: | ||
1337 | return val & PORTE_HOTPLUG_LONG_DETECT; | ||
1338 | default: | ||
1339 | return false; | ||
1340 | } | ||
1341 | } | ||
1342 | |||
1343 | static bool spt_port_hotplug_long_detect(enum port port, u32 val) | ||
1344 | { | ||
1345 | switch (port) { | ||
1346 | case PORT_A: | ||
1347 | return val & PORTA_HOTPLUG_LONG_DETECT; | ||
1267 | case PORT_B: | 1348 | case PORT_B: |
1268 | return val & PORTB_HOTPLUG_LONG_DETECT; | 1349 | return val & PORTB_HOTPLUG_LONG_DETECT; |
1269 | case PORT_C: | 1350 | case PORT_C: |
@@ -1275,6 +1356,16 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val) | |||
1275 | } | 1356 | } |
1276 | } | 1357 | } |
1277 | 1358 | ||
1359 | static bool ilk_port_hotplug_long_detect(enum port port, u32 val) | ||
1360 | { | ||
1361 | switch (port) { | ||
1362 | case PORT_A: | ||
1363 | return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; | ||
1364 | default: | ||
1365 | return false; | ||
1366 | } | ||
1367 | } | ||
1368 | |||
1278 | static bool pch_port_hotplug_long_detect(enum port port, u32 val) | 1369 | static bool pch_port_hotplug_long_detect(enum port port, u32 val) |
1279 | { | 1370 | { |
1280 | switch (port) { | 1371 | switch (port) { |
@@ -1284,8 +1375,6 @@ static bool pch_port_hotplug_long_detect(enum port port, u32 val) | |||
1284 | return val & PORTC_HOTPLUG_LONG_DETECT; | 1375 | return val & PORTC_HOTPLUG_LONG_DETECT; |
1285 | case PORT_D: | 1376 | case PORT_D: |
1286 | return val & PORTD_HOTPLUG_LONG_DETECT; | 1377 | return val & PORTD_HOTPLUG_LONG_DETECT; |
1287 | case PORT_E: | ||
1288 | return val & PORTE_HOTPLUG_LONG_DETECT; | ||
1289 | default: | 1378 | default: |
1290 | return false; | 1379 | return false; |
1291 | } | 1380 | } |
@@ -1305,7 +1394,13 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) | |||
1305 | } | 1394 | } |
1306 | } | 1395 | } |
1307 | 1396 | ||
1308 | /* Get a bit mask of pins that have triggered, and which ones may be long. */ | 1397 | /* |
1398 | * Get a bit mask of pins that have triggered, and which ones may be long. | ||
1399 | * This can be called multiple times with the same masks to accumulate | ||
1400 | * hotplug detection results from several registers. | ||
1401 | * | ||
1402 | * Note that the caller is expected to zero out the masks initially. | ||
1403 | */ | ||
1309 | static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, | 1404 | static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, |
1310 | u32 hotplug_trigger, u32 dig_hotplug_reg, | 1405 | u32 hotplug_trigger, u32 dig_hotplug_reg, |
1311 | const u32 hpd[HPD_NUM_PINS], | 1406 | const u32 hpd[HPD_NUM_PINS], |
@@ -1314,9 +1409,6 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, | |||
1314 | enum port port; | 1409 | enum port port; |
1315 | int i; | 1410 | int i; |
1316 | 1411 | ||
1317 | *pin_mask = 0; | ||
1318 | *long_mask = 0; | ||
1319 | |||
1320 | for_each_hpd_pin(i) { | 1412 | for_each_hpd_pin(i) { |
1321 | if ((hpd[i] & hotplug_trigger) == 0) | 1413 | if ((hpd[i] & hotplug_trigger) == 0) |
1322 | continue; | 1414 | continue; |
@@ -1557,7 +1649,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) | |||
1557 | { | 1649 | { |
1558 | struct drm_i915_private *dev_priv = dev->dev_private; | 1650 | struct drm_i915_private *dev_priv = dev->dev_private; |
1559 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 1651 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
1560 | u32 pin_mask, long_mask; | 1652 | u32 pin_mask = 0, long_mask = 0; |
1561 | 1653 | ||
1562 | if (!hotplug_status) | 1654 | if (!hotplug_status) |
1563 | return; | 1655 | return; |
@@ -1572,20 +1664,26 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) | |||
1572 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { | 1664 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { |
1573 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | 1665 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; |
1574 | 1666 | ||
1575 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, | 1667 | if (hotplug_trigger) { |
1576 | hotplug_trigger, hpd_status_g4x, | 1668 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, |
1577 | i9xx_port_hotplug_long_detect); | 1669 | hotplug_trigger, hpd_status_g4x, |
1578 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | 1670 | i9xx_port_hotplug_long_detect); |
1671 | |||
1672 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
1673 | } | ||
1579 | 1674 | ||
1580 | if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | 1675 | if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) |
1581 | dp_aux_irq_handler(dev); | 1676 | dp_aux_irq_handler(dev); |
1582 | } else { | 1677 | } else { |
1583 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | 1678 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
1584 | 1679 | ||
1585 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, | 1680 | if (hotplug_trigger) { |
1586 | hotplug_trigger, hpd_status_i915, | 1681 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, |
1587 | i9xx_port_hotplug_long_detect); | 1682 | hotplug_trigger, hpd_status_i915, |
1588 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | 1683 | i9xx_port_hotplug_long_detect); |
1684 | |||
1685 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
1686 | } | ||
1589 | } | 1687 | } |
1590 | } | 1688 | } |
1591 | 1689 | ||
@@ -1679,23 +1777,30 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1679 | return ret; | 1777 | return ret; |
1680 | } | 1778 | } |
1681 | 1779 | ||
1780 | static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, | ||
1781 | const u32 hpd[HPD_NUM_PINS]) | ||
1782 | { | ||
1783 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1784 | u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; | ||
1785 | |||
1786 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | ||
1787 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | ||
1788 | |||
1789 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, | ||
1790 | dig_hotplug_reg, hpd, | ||
1791 | pch_port_hotplug_long_detect); | ||
1792 | |||
1793 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
1794 | } | ||
1795 | |||
1682 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | 1796 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
1683 | { | 1797 | { |
1684 | struct drm_i915_private *dev_priv = dev->dev_private; | 1798 | struct drm_i915_private *dev_priv = dev->dev_private; |
1685 | int pipe; | 1799 | int pipe; |
1686 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; | 1800 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
1687 | 1801 | ||
1688 | if (hotplug_trigger) { | 1802 | if (hotplug_trigger) |
1689 | u32 dig_hotplug_reg, pin_mask, long_mask; | 1803 | ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); |
1690 | |||
1691 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | ||
1692 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | ||
1693 | |||
1694 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, | ||
1695 | dig_hotplug_reg, hpd_ibx, | ||
1696 | pch_port_hotplug_long_detect); | ||
1697 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
1698 | } | ||
1699 | 1804 | ||
1700 | if (pch_iir & SDE_AUDIO_POWER_MASK) { | 1805 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
1701 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | 1806 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> |
@@ -1786,38 +1891,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
1786 | { | 1891 | { |
1787 | struct drm_i915_private *dev_priv = dev->dev_private; | 1892 | struct drm_i915_private *dev_priv = dev->dev_private; |
1788 | int pipe; | 1893 | int pipe; |
1789 | u32 hotplug_trigger; | 1894 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
1790 | 1895 | ||
1791 | if (HAS_PCH_SPT(dev)) | 1896 | if (hotplug_trigger) |
1792 | hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT; | 1897 | ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); |
1793 | else | ||
1794 | hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; | ||
1795 | |||
1796 | if (hotplug_trigger) { | ||
1797 | u32 dig_hotplug_reg, pin_mask, long_mask; | ||
1798 | |||
1799 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | ||
1800 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | ||
1801 | |||
1802 | if (HAS_PCH_SPT(dev)) { | ||
1803 | intel_get_hpd_pins(&pin_mask, &long_mask, | ||
1804 | hotplug_trigger, | ||
1805 | dig_hotplug_reg, hpd_spt, | ||
1806 | pch_port_hotplug_long_detect); | ||
1807 | |||
1808 | /* detect PORTE HP event */ | ||
1809 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); | ||
1810 | if (pch_port_hotplug_long_detect(PORT_E, | ||
1811 | dig_hotplug_reg)) | ||
1812 | long_mask |= 1 << HPD_PORT_E; | ||
1813 | } else | ||
1814 | intel_get_hpd_pins(&pin_mask, &long_mask, | ||
1815 | hotplug_trigger, | ||
1816 | dig_hotplug_reg, hpd_cpt, | ||
1817 | pch_port_hotplug_long_detect); | ||
1818 | |||
1819 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
1820 | } | ||
1821 | 1898 | ||
1822 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { | 1899 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
1823 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | 1900 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
@@ -1848,10 +1925,67 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
1848 | cpt_serr_int_handler(dev); | 1925 | cpt_serr_int_handler(dev); |
1849 | } | 1926 | } |
1850 | 1927 | ||
1928 | static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) | ||
1929 | { | ||
1930 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1931 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & | ||
1932 | ~SDE_PORTE_HOTPLUG_SPT; | ||
1933 | u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; | ||
1934 | u32 pin_mask = 0, long_mask = 0; | ||
1935 | |||
1936 | if (hotplug_trigger) { | ||
1937 | u32 dig_hotplug_reg; | ||
1938 | |||
1939 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | ||
1940 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | ||
1941 | |||
1942 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, | ||
1943 | dig_hotplug_reg, hpd_spt, | ||
1944 | spt_port_hotplug_long_detect); | ||
1945 | } | ||
1946 | |||
1947 | if (hotplug2_trigger) { | ||
1948 | u32 dig_hotplug_reg; | ||
1949 | |||
1950 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); | ||
1951 | I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); | ||
1952 | |||
1953 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, | ||
1954 | dig_hotplug_reg, hpd_spt, | ||
1955 | spt_port_hotplug2_long_detect); | ||
1956 | } | ||
1957 | |||
1958 | if (pin_mask) | ||
1959 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
1960 | |||
1961 | if (pch_iir & SDE_GMBUS_CPT) | ||
1962 | gmbus_irq_handler(dev); | ||
1963 | } | ||
1964 | |||
1965 | static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, | ||
1966 | const u32 hpd[HPD_NUM_PINS]) | ||
1967 | { | ||
1968 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1969 | u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; | ||
1970 | |||
1971 | dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); | ||
1972 | I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); | ||
1973 | |||
1974 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, | ||
1975 | dig_hotplug_reg, hpd, | ||
1976 | ilk_port_hotplug_long_detect); | ||
1977 | |||
1978 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
1979 | } | ||
1980 | |||
1851 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) | 1981 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1852 | { | 1982 | { |
1853 | struct drm_i915_private *dev_priv = dev->dev_private; | 1983 | struct drm_i915_private *dev_priv = dev->dev_private; |
1854 | enum pipe pipe; | 1984 | enum pipe pipe; |
1985 | u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; | ||
1986 | |||
1987 | if (hotplug_trigger) | ||
1988 | ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); | ||
1855 | 1989 | ||
1856 | if (de_iir & DE_AUX_CHANNEL_A) | 1990 | if (de_iir & DE_AUX_CHANNEL_A) |
1857 | dp_aux_irq_handler(dev); | 1991 | dp_aux_irq_handler(dev); |
@@ -1901,6 +2035,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) | |||
1901 | { | 2035 | { |
1902 | struct drm_i915_private *dev_priv = dev->dev_private; | 2036 | struct drm_i915_private *dev_priv = dev->dev_private; |
1903 | enum pipe pipe; | 2037 | enum pipe pipe; |
2038 | u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; | ||
2039 | |||
2040 | if (hotplug_trigger) | ||
2041 | ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); | ||
1904 | 2042 | ||
1905 | if (de_iir & DE_ERR_INT_IVB) | 2043 | if (de_iir & DE_ERR_INT_IVB) |
1906 | ivb_err_int_handler(dev); | 2044 | ivb_err_int_handler(dev); |
@@ -2013,27 +2151,19 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2013 | return ret; | 2151 | return ret; |
2014 | } | 2152 | } |
2015 | 2153 | ||
2016 | static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) | 2154 | static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, |
2155 | const u32 hpd[HPD_NUM_PINS]) | ||
2017 | { | 2156 | { |
2018 | struct drm_i915_private *dev_priv = dev->dev_private; | 2157 | struct drm_i915_private *dev_priv = to_i915(dev); |
2019 | u32 hp_control, hp_trigger; | 2158 | u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; |
2020 | u32 pin_mask, long_mask; | ||
2021 | |||
2022 | /* Get the status */ | ||
2023 | hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; | ||
2024 | hp_control = I915_READ(BXT_HOTPLUG_CTL); | ||
2025 | 2159 | ||
2026 | /* Hotplug not enabled ? */ | 2160 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); |
2027 | if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) { | 2161 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); |
2028 | DRM_ERROR("Interrupt when HPD disabled\n"); | ||
2029 | return; | ||
2030 | } | ||
2031 | 2162 | ||
2032 | /* Clear sticky bits in hpd status */ | 2163 | intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, |
2033 | I915_WRITE(BXT_HOTPLUG_CTL, hp_control); | 2164 | dig_hotplug_reg, hpd, |
2165 | bxt_port_hotplug_long_detect); | ||
2034 | 2166 | ||
2035 | intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control, | ||
2036 | hpd_bxt, bxt_port_hotplug_long_detect); | ||
2037 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | 2167 | intel_hpd_irq_handler(dev, pin_mask, long_mask); |
2038 | } | 2168 | } |
2039 | 2169 | ||
@@ -2050,7 +2180,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2050 | if (!intel_irqs_enabled(dev_priv)) | 2180 | if (!intel_irqs_enabled(dev_priv)) |
2051 | return IRQ_NONE; | 2181 | return IRQ_NONE; |
2052 | 2182 | ||
2053 | if (IS_GEN9(dev)) | 2183 | if (INTEL_INFO(dev_priv)->gen >= 9) |
2054 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | | 2184 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
2055 | GEN9_AUX_CHANNEL_D; | 2185 | GEN9_AUX_CHANNEL_D; |
2056 | 2186 | ||
@@ -2083,6 +2213,12 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2083 | tmp = I915_READ(GEN8_DE_PORT_IIR); | 2213 | tmp = I915_READ(GEN8_DE_PORT_IIR); |
2084 | if (tmp) { | 2214 | if (tmp) { |
2085 | bool found = false; | 2215 | bool found = false; |
2216 | u32 hotplug_trigger = 0; | ||
2217 | |||
2218 | if (IS_BROXTON(dev_priv)) | ||
2219 | hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK; | ||
2220 | else if (IS_BROADWELL(dev_priv)) | ||
2221 | hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG; | ||
2086 | 2222 | ||
2087 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); | 2223 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); |
2088 | ret = IRQ_HANDLED; | 2224 | ret = IRQ_HANDLED; |
@@ -2092,8 +2228,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2092 | found = true; | 2228 | found = true; |
2093 | } | 2229 | } |
2094 | 2230 | ||
2095 | if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) { | 2231 | if (hotplug_trigger) { |
2096 | bxt_hpd_handler(dev, tmp); | 2232 | if (IS_BROXTON(dev)) |
2233 | bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt); | ||
2234 | else | ||
2235 | ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw); | ||
2097 | found = true; | 2236 | found = true; |
2098 | } | 2237 | } |
2099 | 2238 | ||
@@ -2124,7 +2263,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2124 | intel_pipe_handle_vblank(dev, pipe)) | 2263 | intel_pipe_handle_vblank(dev, pipe)) |
2125 | intel_check_page_flip(dev, pipe); | 2264 | intel_check_page_flip(dev, pipe); |
2126 | 2265 | ||
2127 | if (IS_GEN9(dev)) | 2266 | if (INTEL_INFO(dev_priv)->gen >= 9) |
2128 | flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; | 2267 | flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; |
2129 | else | 2268 | else |
2130 | flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; | 2269 | flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; |
@@ -2142,7 +2281,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2142 | pipe); | 2281 | pipe); |
2143 | 2282 | ||
2144 | 2283 | ||
2145 | if (IS_GEN9(dev)) | 2284 | if (INTEL_INFO(dev_priv)->gen >= 9) |
2146 | fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; | 2285 | fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; |
2147 | else | 2286 | else |
2148 | fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | 2287 | fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; |
@@ -2166,7 +2305,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2166 | if (pch_iir) { | 2305 | if (pch_iir) { |
2167 | I915_WRITE(SDEIIR, pch_iir); | 2306 | I915_WRITE(SDEIIR, pch_iir); |
2168 | ret = IRQ_HANDLED; | 2307 | ret = IRQ_HANDLED; |
2169 | cpt_irq_handler(dev, pch_iir); | 2308 | |
2309 | if (HAS_PCH_SPT(dev_priv)) | ||
2310 | spt_irq_handler(dev, pch_iir); | ||
2311 | else | ||
2312 | cpt_irq_handler(dev, pch_iir); | ||
2170 | } else | 2313 | } else |
2171 | DRM_ERROR("The master control interrupt lied (SDE)!\n"); | 2314 | DRM_ERROR("The master control interrupt lied (SDE)!\n"); |
2172 | 2315 | ||
@@ -3026,86 +3169,124 @@ static void cherryview_irq_preinstall(struct drm_device *dev) | |||
3026 | vlv_display_irq_reset(dev_priv); | 3169 | vlv_display_irq_reset(dev_priv); |
3027 | } | 3170 | } |
3028 | 3171 | ||
3172 | static u32 intel_hpd_enabled_irqs(struct drm_device *dev, | ||
3173 | const u32 hpd[HPD_NUM_PINS]) | ||
3174 | { | ||
3175 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
3176 | struct intel_encoder *encoder; | ||
3177 | u32 enabled_irqs = 0; | ||
3178 | |||
3179 | for_each_intel_encoder(dev, encoder) | ||
3180 | if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) | ||
3181 | enabled_irqs |= hpd[encoder->hpd_pin]; | ||
3182 | |||
3183 | return enabled_irqs; | ||
3184 | } | ||
3185 | |||
3029 | static void ibx_hpd_irq_setup(struct drm_device *dev) | 3186 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
3030 | { | 3187 | { |
3031 | struct drm_i915_private *dev_priv = dev->dev_private; | 3188 | struct drm_i915_private *dev_priv = dev->dev_private; |
3032 | struct intel_encoder *intel_encoder; | 3189 | u32 hotplug_irqs, hotplug, enabled_irqs; |
3033 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; | ||
3034 | 3190 | ||
3035 | if (HAS_PCH_IBX(dev)) { | 3191 | if (HAS_PCH_IBX(dev)) { |
3036 | hotplug_irqs = SDE_HOTPLUG_MASK; | 3192 | hotplug_irqs = SDE_HOTPLUG_MASK; |
3037 | for_each_intel_encoder(dev, intel_encoder) | 3193 | enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); |
3038 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) | ||
3039 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; | ||
3040 | } else if (HAS_PCH_SPT(dev)) { | ||
3041 | hotplug_irqs = SDE_HOTPLUG_MASK_SPT; | ||
3042 | for_each_intel_encoder(dev, intel_encoder) | ||
3043 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) | ||
3044 | enabled_irqs |= hpd_spt[intel_encoder->hpd_pin]; | ||
3045 | } else { | 3194 | } else { |
3046 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; | 3195 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
3047 | for_each_intel_encoder(dev, intel_encoder) | 3196 | enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); |
3048 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) | ||
3049 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; | ||
3050 | } | 3197 | } |
3051 | 3198 | ||
3052 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | 3199 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
3053 | 3200 | ||
3054 | /* | 3201 | /* |
3055 | * Enable digital hotplug on the PCH, and configure the DP short pulse | 3202 | * Enable digital hotplug on the PCH, and configure the DP short pulse |
3056 | * duration to 2ms (which is the minimum in the Display Port spec) | 3203 | * duration to 2ms (which is the minimum in the Display Port spec). |
3057 | * | 3204 | * The pulse duration bits are reserved on LPT+. |
3058 | * This register is the same on all known PCH chips. | ||
3059 | */ | 3205 | */ |
3060 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | 3206 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
3061 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | 3207 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); |
3062 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | 3208 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; |
3063 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | 3209 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; |
3064 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | 3210 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; |
3211 | /* | ||
3212 | * When CPU and PCH are on the same package, port A | ||
3213 | * HPD must be enabled in both north and south. | ||
3214 | */ | ||
3215 | if (HAS_PCH_LPT_LP(dev)) | ||
3216 | hotplug |= PORTA_HOTPLUG_ENABLE; | ||
3065 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 3217 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
3218 | } | ||
3066 | 3219 | ||
3067 | /* enable SPT PORTE hot plug */ | 3220 | static void spt_hpd_irq_setup(struct drm_device *dev) |
3068 | if (HAS_PCH_SPT(dev)) { | 3221 | { |
3069 | hotplug = I915_READ(PCH_PORT_HOTPLUG2); | 3222 | struct drm_i915_private *dev_priv = dev->dev_private; |
3070 | hotplug |= PORTE_HOTPLUG_ENABLE; | 3223 | u32 hotplug_irqs, hotplug, enabled_irqs; |
3071 | I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); | 3224 | |
3072 | } | 3225 | hotplug_irqs = SDE_HOTPLUG_MASK_SPT; |
3226 | enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); | ||
3227 | |||
3228 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | ||
3229 | |||
3230 | /* Enable digital hotplug on the PCH */ | ||
3231 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | ||
3232 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | | ||
3233 | PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; | ||
3234 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | ||
3235 | |||
3236 | hotplug = I915_READ(PCH_PORT_HOTPLUG2); | ||
3237 | hotplug |= PORTE_HOTPLUG_ENABLE; | ||
3238 | I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); | ||
3073 | } | 3239 | } |
3074 | 3240 | ||
3075 | static void bxt_hpd_irq_setup(struct drm_device *dev) | 3241 | static void ilk_hpd_irq_setup(struct drm_device *dev) |
3076 | { | 3242 | { |
3077 | struct drm_i915_private *dev_priv = dev->dev_private; | 3243 | struct drm_i915_private *dev_priv = dev->dev_private; |
3078 | struct intel_encoder *intel_encoder; | 3244 | u32 hotplug_irqs, hotplug, enabled_irqs; |
3079 | u32 hotplug_port = 0; | 3245 | |
3080 | u32 hotplug_ctrl; | 3246 | if (INTEL_INFO(dev)->gen >= 8) { |
3081 | 3247 | hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; | |
3082 | /* Now, enable HPD */ | 3248 | enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); |
3083 | for_each_intel_encoder(dev, intel_encoder) { | 3249 | |
3084 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state | 3250 | bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); |
3085 | == HPD_ENABLED) | 3251 | } else if (INTEL_INFO(dev)->gen >= 7) { |
3086 | hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; | 3252 | hotplug_irqs = DE_DP_A_HOTPLUG_IVB; |
3253 | enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); | ||
3254 | |||
3255 | ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); | ||
3256 | } else { | ||
3257 | hotplug_irqs = DE_DP_A_HOTPLUG; | ||
3258 | enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); | ||
3259 | |||
3260 | ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); | ||
3087 | } | 3261 | } |
3088 | 3262 | ||
3089 | /* Mask all HPD control bits */ | 3263 | /* |
3090 | hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK; | 3264 | * Enable digital hotplug on the CPU, and configure the DP short pulse |
3265 | * duration to 2ms (which is the minimum in the Display Port spec) | ||
3266 | * The pulse duration bits are reserved on HSW+. | ||
3267 | */ | ||
3268 | hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); | ||
3269 | hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; | ||
3270 | hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; | ||
3271 | I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); | ||
3272 | |||
3273 | ibx_hpd_irq_setup(dev); | ||
3274 | } | ||
3275 | |||
3276 | static void bxt_hpd_irq_setup(struct drm_device *dev) | ||
3277 | { | ||
3278 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3279 | u32 hotplug_irqs, hotplug, enabled_irqs; | ||
3091 | 3280 | ||
3092 | /* Enable requested port in hotplug control */ | 3281 | enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); |
3093 | /* TODO: implement (short) HPD support on port A */ | 3282 | hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; |
3094 | WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA); | ||
3095 | if (hotplug_port & BXT_DE_PORT_HP_DDIB) | ||
3096 | hotplug_ctrl |= BXT_DDIB_HPD_ENABLE; | ||
3097 | if (hotplug_port & BXT_DE_PORT_HP_DDIC) | ||
3098 | hotplug_ctrl |= BXT_DDIC_HPD_ENABLE; | ||
3099 | I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl); | ||
3100 | 3283 | ||
3101 | /* Unmask DDI hotplug in IMR */ | 3284 | bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); |
3102 | hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port; | ||
3103 | I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl); | ||
3104 | 3285 | ||
3105 | /* Enable DDI hotplug in IER */ | 3286 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
3106 | hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port; | 3287 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | |
3107 | I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl); | 3288 | PORTA_HOTPLUG_ENABLE; |
3108 | POSTING_READ(GEN8_DE_PORT_IER); | 3289 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
3109 | } | 3290 | } |
3110 | 3291 | ||
3111 | static void ibx_irq_postinstall(struct drm_device *dev) | 3292 | static void ibx_irq_postinstall(struct drm_device *dev) |
@@ -3173,15 +3354,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
3173 | DE_PLANEB_FLIP_DONE_IVB | | 3354 | DE_PLANEB_FLIP_DONE_IVB | |
3174 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); | 3355 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); |
3175 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | | 3356 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
3176 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); | 3357 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | |
3358 | DE_DP_A_HOTPLUG_IVB); | ||
3177 | } else { | 3359 | } else { |
3178 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 3360 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
3179 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | 3361 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
3180 | DE_AUX_CHANNEL_A | | 3362 | DE_AUX_CHANNEL_A | |
3181 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | | 3363 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | |
3182 | DE_POISON); | 3364 | DE_POISON); |
3183 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | | 3365 | extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | |
3184 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | 3366 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | |
3367 | DE_DP_A_HOTPLUG); | ||
3185 | } | 3368 | } |
3186 | 3369 | ||
3187 | dev_priv->irq_mask = ~display_mask; | 3370 | dev_priv->irq_mask = ~display_mask; |
@@ -3377,24 +3560,31 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
3377 | { | 3560 | { |
3378 | uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; | 3561 | uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; |
3379 | uint32_t de_pipe_enables; | 3562 | uint32_t de_pipe_enables; |
3380 | int pipe; | 3563 | u32 de_port_masked = GEN8_AUX_CHANNEL_A; |
3381 | u32 de_port_en = GEN8_AUX_CHANNEL_A; | 3564 | u32 de_port_enables; |
3565 | enum pipe pipe; | ||
3382 | 3566 | ||
3383 | if (IS_GEN9(dev_priv)) { | 3567 | if (INTEL_INFO(dev_priv)->gen >= 9) { |
3384 | de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | | 3568 | de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | |
3385 | GEN9_DE_PIPE_IRQ_FAULT_ERRORS; | 3569 | GEN9_DE_PIPE_IRQ_FAULT_ERRORS; |
3386 | de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | | 3570 | de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
3387 | GEN9_AUX_CHANNEL_D; | 3571 | GEN9_AUX_CHANNEL_D; |
3388 | |||
3389 | if (IS_BROXTON(dev_priv)) | 3572 | if (IS_BROXTON(dev_priv)) |
3390 | de_port_en |= BXT_DE_PORT_GMBUS; | 3573 | de_port_masked |= BXT_DE_PORT_GMBUS; |
3391 | } else | 3574 | } else { |
3392 | de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | | 3575 | de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | |
3393 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | 3576 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; |
3577 | } | ||
3394 | 3578 | ||
3395 | de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | | 3579 | de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | |
3396 | GEN8_PIPE_FIFO_UNDERRUN; | 3580 | GEN8_PIPE_FIFO_UNDERRUN; |
3397 | 3581 | ||
3582 | de_port_enables = de_port_masked; | ||
3583 | if (IS_BROXTON(dev_priv)) | ||
3584 | de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; | ||
3585 | else if (IS_BROADWELL(dev_priv)) | ||
3586 | de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; | ||
3587 | |||
3398 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; | 3588 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; |
3399 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | 3589 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; |
3400 | dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; | 3590 | dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; |
@@ -3406,7 +3596,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
3406 | dev_priv->de_irq_mask[pipe], | 3596 | dev_priv->de_irq_mask[pipe], |
3407 | de_pipe_enables); | 3597 | de_pipe_enables); |
3408 | 3598 | ||
3409 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en); | 3599 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); |
3410 | } | 3600 | } |
3411 | 3601 | ||
3412 | static int gen8_irq_postinstall(struct drm_device *dev) | 3602 | static int gen8_irq_postinstall(struct drm_device *dev) |
@@ -3964,7 +4154,6 @@ static int i965_irq_postinstall(struct drm_device *dev) | |||
3964 | static void i915_hpd_irq_setup(struct drm_device *dev) | 4154 | static void i915_hpd_irq_setup(struct drm_device *dev) |
3965 | { | 4155 | { |
3966 | struct drm_i915_private *dev_priv = dev->dev_private; | 4156 | struct drm_i915_private *dev_priv = dev->dev_private; |
3967 | struct intel_encoder *intel_encoder; | ||
3968 | u32 hotplug_en; | 4157 | u32 hotplug_en; |
3969 | 4158 | ||
3970 | assert_spin_locked(&dev_priv->irq_lock); | 4159 | assert_spin_locked(&dev_priv->irq_lock); |
@@ -3973,9 +4162,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev) | |||
3973 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | 4162 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; |
3974 | /* Note HDMI and DP share hotplug bits */ | 4163 | /* Note HDMI and DP share hotplug bits */ |
3975 | /* enable bits are the same for all generations */ | 4164 | /* enable bits are the same for all generations */ |
3976 | for_each_intel_encoder(dev, intel_encoder) | 4165 | hotplug_en |= intel_hpd_enabled_irqs(dev, hpd_mask_i915); |
3977 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) | ||
3978 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | ||
3979 | /* Programming the CRT detection parameters tends | 4166 | /* Programming the CRT detection parameters tends |
3980 | to generate a spurious hotplug event about three | 4167 | to generate a spurious hotplug event about three |
3981 | seconds later. So just do it once. | 4168 | seconds later. So just do it once. |
@@ -4187,10 +4374,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4187 | dev->driver->irq_uninstall = gen8_irq_uninstall; | 4374 | dev->driver->irq_uninstall = gen8_irq_uninstall; |
4188 | dev->driver->enable_vblank = gen8_enable_vblank; | 4375 | dev->driver->enable_vblank = gen8_enable_vblank; |
4189 | dev->driver->disable_vblank = gen8_disable_vblank; | 4376 | dev->driver->disable_vblank = gen8_disable_vblank; |
4190 | if (HAS_PCH_SPLIT(dev)) | 4377 | if (IS_BROXTON(dev)) |
4191 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | ||
4192 | else | ||
4193 | dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; | 4378 | dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; |
4379 | else if (HAS_PCH_SPT(dev)) | ||
4380 | dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; | ||
4381 | else | ||
4382 | dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; | ||
4194 | } else if (HAS_PCH_SPLIT(dev)) { | 4383 | } else if (HAS_PCH_SPLIT(dev)) { |
4195 | dev->driver->irq_handler = ironlake_irq_handler; | 4384 | dev->driver->irq_handler = ironlake_irq_handler; |
4196 | dev->driver->irq_preinstall = ironlake_irq_reset; | 4385 | dev->driver->irq_preinstall = ironlake_irq_reset; |
@@ -4198,7 +4387,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4198 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | 4387 | dev->driver->irq_uninstall = ironlake_irq_uninstall; |
4199 | dev->driver->enable_vblank = ironlake_enable_vblank; | 4388 | dev->driver->enable_vblank = ironlake_enable_vblank; |
4200 | dev->driver->disable_vblank = ironlake_disable_vblank; | 4389 | dev->driver->disable_vblank = ironlake_disable_vblank; |
4201 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | 4390 | dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; |
4202 | } else { | 4391 | } else { |
4203 | if (INTEL_INFO(dev_priv)->gen == 2) { | 4392 | if (INTEL_INFO(dev_priv)->gen == 2) { |
4204 | dev->driver->irq_preinstall = i8xx_irq_preinstall; | 4393 | dev->driver->irq_preinstall = i8xx_irq_preinstall; |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 5ae4b0aba564..05053e2e9ff0 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
@@ -51,6 +51,7 @@ struct i915_params i915 __read_mostly = { | |||
51 | .use_mmio_flip = 0, | 51 | .use_mmio_flip = 0, |
52 | .mmio_debug = 0, | 52 | .mmio_debug = 0, |
53 | .verbose_state_checks = 1, | 53 | .verbose_state_checks = 1, |
54 | .nuclear_pageflip = 0, | ||
54 | .edp_vswing = 0, | 55 | .edp_vswing = 0, |
55 | .enable_guc_submission = false, | 56 | .enable_guc_submission = false, |
56 | .guc_log_level = -1, | 57 | .guc_log_level = -1, |
@@ -177,6 +178,10 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); | |||
177 | MODULE_PARM_DESC(verbose_state_checks, | 178 | MODULE_PARM_DESC(verbose_state_checks, |
178 | "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); | 179 | "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); |
179 | 180 | ||
181 | module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600); | ||
182 | MODULE_PARM_DESC(nuclear_pageflip, | ||
183 | "Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false)."); | ||
184 | |||
180 | /* WA to get away with the default setting in VBT for early platforms.Will be removed */ | 185 | /* WA to get away with the default setting in VBT for early platforms.Will be removed */ |
181 | module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); | 186 | module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); |
182 | MODULE_PARM_DESC(edp_vswing, | 187 | MODULE_PARM_DESC(edp_vswing, |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 83a0888756d6..65b5682b19ac 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -352,8 +352,8 @@ | |||
352 | */ | 352 | */ |
353 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) | 353 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) |
354 | #define MI_LRI_FORCE_POSTED (1<<12) | 354 | #define MI_LRI_FORCE_POSTED (1<<12) |
355 | #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1) | 355 | #define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) |
356 | #define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1) | 356 | #define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) |
357 | #define MI_SRM_LRM_GLOBAL_GTT (1<<22) | 357 | #define MI_SRM_LRM_GLOBAL_GTT (1<<22) |
358 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ | 358 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ |
359 | #define MI_FLUSH_DW_STORE_INDEX (1<<21) | 359 | #define MI_FLUSH_DW_STORE_INDEX (1<<21) |
@@ -364,8 +364,8 @@ | |||
364 | #define MI_INVALIDATE_BSD (1<<7) | 364 | #define MI_INVALIDATE_BSD (1<<7) |
365 | #define MI_FLUSH_DW_USE_GTT (1<<2) | 365 | #define MI_FLUSH_DW_USE_GTT (1<<2) |
366 | #define MI_FLUSH_DW_USE_PPGTT (0<<2) | 366 | #define MI_FLUSH_DW_USE_PPGTT (0<<2) |
367 | #define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1) | 367 | #define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) |
368 | #define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1) | 368 | #define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) |
369 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 369 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
370 | #define MI_BATCH_NON_SECURE (1) | 370 | #define MI_BATCH_NON_SECURE (1) |
371 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ | 371 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ |
@@ -1099,6 +1099,12 @@ enum skl_disp_power_wells { | |||
1099 | #define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ | 1099 | #define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ |
1100 | #define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) | 1100 | #define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) |
1101 | 1101 | ||
1102 | #define _CHV_CMN_DW0_CH0 0x8100 | ||
1103 | #define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19 | ||
1104 | #define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18 | ||
1105 | #define DPIO_ALLDL_POWERDOWN (1 << 1) | ||
1106 | #define DPIO_ANYDL_POWERDOWN (1 << 0) | ||
1107 | |||
1102 | #define _CHV_CMN_DW5_CH0 0x8114 | 1108 | #define _CHV_CMN_DW5_CH0 0x8114 |
1103 | #define CHV_BUFRIGHTENA1_DISABLE (0 << 20) | 1109 | #define CHV_BUFRIGHTENA1_DISABLE (0 << 20) |
1104 | #define CHV_BUFRIGHTENA1_NORMAL (1 << 20) | 1110 | #define CHV_BUFRIGHTENA1_NORMAL (1 << 20) |
@@ -1135,10 +1141,23 @@ enum skl_disp_power_wells { | |||
1135 | 1141 | ||
1136 | #define _CHV_CMN_DW19_CH0 0x814c | 1142 | #define _CHV_CMN_DW19_CH0 0x814c |
1137 | #define _CHV_CMN_DW6_CH1 0x8098 | 1143 | #define _CHV_CMN_DW6_CH1 0x8098 |
1144 | #define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */ | ||
1145 | #define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */ | ||
1146 | #define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */ | ||
1138 | #define CHV_CMN_USEDCLKCHANNEL (1 << 13) | 1147 | #define CHV_CMN_USEDCLKCHANNEL (1 << 13) |
1148 | |||
1139 | #define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1) | 1149 | #define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1) |
1140 | 1150 | ||
1151 | #define CHV_CMN_DW28 0x8170 | ||
1152 | #define DPIO_CL1POWERDOWNEN (1 << 23) | ||
1153 | #define DPIO_DYNPWRDOWNEN_CH0 (1 << 22) | ||
1154 | #define DPIO_SUS_CLK_CONFIG_ON (0 << 0) | ||
1155 | #define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0) | ||
1156 | #define DPIO_SUS_CLK_CONFIG_GATE (2 << 0) | ||
1157 | #define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0) | ||
1158 | |||
1141 | #define CHV_CMN_DW30 0x8178 | 1159 | #define CHV_CMN_DW30 0x8178 |
1160 | #define DPIO_CL2_LDOFUSE_PWRENB (1 << 6) | ||
1142 | #define DPIO_LRC_BYPASS (1 << 3) | 1161 | #define DPIO_LRC_BYPASS (1 << 3) |
1143 | 1162 | ||
1144 | #define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \ | 1163 | #define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \ |
@@ -1674,11 +1693,18 @@ enum skl_disp_power_wells { | |||
1674 | #define GFX_MODE_GEN7 0x0229c | 1693 | #define GFX_MODE_GEN7 0x0229c |
1675 | #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) | 1694 | #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) |
1676 | #define GFX_RUN_LIST_ENABLE (1<<15) | 1695 | #define GFX_RUN_LIST_ENABLE (1<<15) |
1696 | #define GFX_INTERRUPT_STEERING (1<<14) | ||
1677 | #define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) | 1697 | #define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) |
1678 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | 1698 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) |
1679 | #define GFX_REPLAY_MODE (1<<11) | 1699 | #define GFX_REPLAY_MODE (1<<11) |
1680 | #define GFX_PSMI_GRANULARITY (1<<10) | 1700 | #define GFX_PSMI_GRANULARITY (1<<10) |
1681 | #define GFX_PPGTT_ENABLE (1<<9) | 1701 | #define GFX_PPGTT_ENABLE (1<<9) |
1702 | #define GEN8_GFX_PPGTT_48B (1<<7) | ||
1703 | |||
1704 | #define GFX_FORWARD_VBLANK_MASK (3<<5) | ||
1705 | #define GFX_FORWARD_VBLANK_NEVER (0<<5) | ||
1706 | #define GFX_FORWARD_VBLANK_ALWAYS (1<<5) | ||
1707 | #define GFX_FORWARD_VBLANK_COND (2<<5) | ||
1682 | 1708 | ||
1683 | #define VLV_DISPLAY_BASE 0x180000 | 1709 | #define VLV_DISPLAY_BASE 0x180000 |
1684 | #define VLV_MIPI_BASE VLV_DISPLAY_BASE | 1710 | #define VLV_MIPI_BASE VLV_DISPLAY_BASE |
@@ -2185,16 +2211,20 @@ enum skl_disp_power_wells { | |||
2185 | #define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) | 2211 | #define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) |
2186 | #define DPLL_PORTD_READY_MASK (0xf) | 2212 | #define DPLL_PORTD_READY_MASK (0xf) |
2187 | #define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) | 2213 | #define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) |
2214 | #define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27)) | ||
2188 | #define PHY_LDO_DELAY_0NS 0x0 | 2215 | #define PHY_LDO_DELAY_0NS 0x0 |
2189 | #define PHY_LDO_DELAY_200NS 0x1 | 2216 | #define PHY_LDO_DELAY_200NS 0x1 |
2190 | #define PHY_LDO_DELAY_600NS 0x2 | 2217 | #define PHY_LDO_DELAY_600NS 0x2 |
2191 | #define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23)) | 2218 | #define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23)) |
2219 | #define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11)) | ||
2192 | #define PHY_CH_SU_PSR 0x1 | 2220 | #define PHY_CH_SU_PSR 0x1 |
2193 | #define PHY_CH_DEEP_PSR 0x7 | 2221 | #define PHY_CH_DEEP_PSR 0x7 |
2194 | #define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2)) | 2222 | #define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2)) |
2195 | #define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) | 2223 | #define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) |
2196 | #define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) | 2224 | #define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) |
2197 | #define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) | 2225 | #define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) |
2226 | #define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch)))) | ||
2227 | #define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline)))) | ||
2198 | 2228 | ||
2199 | /* | 2229 | /* |
2200 | * The i830 generation, in LVDS mode, defines P1 as the bit number set within | 2230 | * The i830 generation, in LVDS mode, defines P1 as the bit number set within |
@@ -4107,6 +4137,7 @@ enum skl_disp_power_wells { | |||
4107 | /* How many wires to use. I guess 3 was too hard */ | 4137 | /* How many wires to use. I guess 3 was too hard */ |
4108 | #define DP_PORT_WIDTH(width) (((width) - 1) << 19) | 4138 | #define DP_PORT_WIDTH(width) (((width) - 1) << 19) |
4109 | #define DP_PORT_WIDTH_MASK (7 << 19) | 4139 | #define DP_PORT_WIDTH_MASK (7 << 19) |
4140 | #define DP_PORT_WIDTH_SHIFT 19 | ||
4110 | 4141 | ||
4111 | /* Mystic DPCD version 1.1 special mode */ | 4142 | /* Mystic DPCD version 1.1 special mode */ |
4112 | #define DP_ENHANCED_FRAMING (1 << 18) | 4143 | #define DP_ENHANCED_FRAMING (1 << 18) |
@@ -4617,6 +4648,7 @@ enum skl_disp_power_wells { | |||
4617 | 4648 | ||
4618 | #define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) | 4649 | #define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) |
4619 | #define CBR_PND_DEADLINE_DISABLE (1<<31) | 4650 | #define CBR_PND_DEADLINE_DISABLE (1<<31) |
4651 | #define CBR_PWM_CLOCK_MUX_SELECT (1<<30) | ||
4620 | 4652 | ||
4621 | /* FIFO watermark sizes etc */ | 4653 | /* FIFO watermark sizes etc */ |
4622 | #define G4X_FIFO_LINE_SIZE 64 | 4654 | #define G4X_FIFO_LINE_SIZE 64 |
@@ -5363,15 +5395,17 @@ enum skl_disp_power_wells { | |||
5363 | 5395 | ||
5364 | #define CPU_VGACNTRL 0x41000 | 5396 | #define CPU_VGACNTRL 0x41000 |
5365 | 5397 | ||
5366 | #define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 | 5398 | #define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 |
5367 | #define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) | 5399 | #define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) |
5368 | #define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2) | 5400 | #define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */ |
5369 | #define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2) | 5401 | #define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */ |
5370 | #define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2) | 5402 | #define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */ |
5371 | #define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2) | 5403 | #define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */ |
5372 | #define DIGITAL_PORTA_NO_DETECT (0 << 0) | 5404 | #define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */ |
5373 | #define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1) | 5405 | #define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0) |
5374 | #define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0) | 5406 | #define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0) |
5407 | #define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0) | ||
5408 | #define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0) | ||
5375 | 5409 | ||
5376 | /* refresh rate hardware control */ | 5410 | /* refresh rate hardware control */ |
5377 | #define RR_HW_CTL 0x45300 | 5411 | #define RR_HW_CTL 0x45300 |
@@ -5693,11 +5727,12 @@ enum skl_disp_power_wells { | |||
5693 | #define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) | 5727 | #define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) |
5694 | #define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) | 5728 | #define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) |
5695 | 5729 | ||
5696 | #define GEN8_BCS_IRQ_SHIFT 16 | ||
5697 | #define GEN8_RCS_IRQ_SHIFT 0 | 5730 | #define GEN8_RCS_IRQ_SHIFT 0 |
5698 | #define GEN8_VCS2_IRQ_SHIFT 16 | 5731 | #define GEN8_BCS_IRQ_SHIFT 16 |
5699 | #define GEN8_VCS1_IRQ_SHIFT 0 | 5732 | #define GEN8_VCS1_IRQ_SHIFT 0 |
5733 | #define GEN8_VCS2_IRQ_SHIFT 16 | ||
5700 | #define GEN8_VECS_IRQ_SHIFT 0 | 5734 | #define GEN8_VECS_IRQ_SHIFT 0 |
5735 | #define GEN8_WD_IRQ_SHIFT 16 | ||
5701 | 5736 | ||
5702 | #define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) | 5737 | #define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) |
5703 | #define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) | 5738 | #define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) |
@@ -5763,21 +5798,6 @@ enum skl_disp_power_wells { | |||
5763 | #define GEN8_PCU_IIR 0x444e8 | 5798 | #define GEN8_PCU_IIR 0x444e8 |
5764 | #define GEN8_PCU_IER 0x444ec | 5799 | #define GEN8_PCU_IER 0x444ec |
5765 | 5800 | ||
5766 | /* BXT hotplug control */ | ||
5767 | #define BXT_HOTPLUG_CTL 0xC4030 | ||
5768 | #define BXT_DDIA_HPD_ENABLE (1 << 28) | ||
5769 | #define BXT_DDIA_HPD_STATUS (3 << 24) | ||
5770 | #define BXT_DDIC_HPD_ENABLE (1 << 12) | ||
5771 | #define BXT_DDIC_HPD_STATUS (3 << 8) | ||
5772 | #define BXT_DDIB_HPD_ENABLE (1 << 4) | ||
5773 | #define BXT_DDIB_HPD_STATUS (3 << 0) | ||
5774 | #define BXT_HOTPLUG_CTL_MASK (BXT_DDIA_HPD_ENABLE | \ | ||
5775 | BXT_DDIB_HPD_ENABLE | \ | ||
5776 | BXT_DDIC_HPD_ENABLE) | ||
5777 | #define BXT_HPD_STATUS_MASK (BXT_DDIA_HPD_STATUS | \ | ||
5778 | BXT_DDIB_HPD_STATUS | \ | ||
5779 | BXT_DDIC_HPD_STATUS) | ||
5780 | |||
5781 | #define ILK_DISPLAY_CHICKEN2 0x42004 | 5801 | #define ILK_DISPLAY_CHICKEN2 0x42004 |
5782 | /* Required on all Ironlake and Sandybridge according to the B-Spec. */ | 5802 | /* Required on all Ironlake and Sandybridge according to the B-Spec. */ |
5783 | #define ILK_ELPIN_409_SELECT (1 << 25) | 5803 | #define ILK_ELPIN_409_SELECT (1 << 25) |
@@ -5950,6 +5970,7 @@ enum skl_disp_power_wells { | |||
5950 | #define SDE_AUXB_CPT (1 << 25) | 5970 | #define SDE_AUXB_CPT (1 << 25) |
5951 | #define SDE_AUX_MASK_CPT (7 << 25) | 5971 | #define SDE_AUX_MASK_CPT (7 << 25) |
5952 | #define SDE_PORTE_HOTPLUG_SPT (1 << 25) | 5972 | #define SDE_PORTE_HOTPLUG_SPT (1 << 25) |
5973 | #define SDE_PORTA_HOTPLUG_SPT (1 << 24) | ||
5953 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 5974 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
5954 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | 5975 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) |
5955 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | 5976 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) |
@@ -5963,7 +5984,8 @@ enum skl_disp_power_wells { | |||
5963 | #define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \ | 5984 | #define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \ |
5964 | SDE_PORTD_HOTPLUG_CPT | \ | 5985 | SDE_PORTD_HOTPLUG_CPT | \ |
5965 | SDE_PORTC_HOTPLUG_CPT | \ | 5986 | SDE_PORTC_HOTPLUG_CPT | \ |
5966 | SDE_PORTB_HOTPLUG_CPT) | 5987 | SDE_PORTB_HOTPLUG_CPT | \ |
5988 | SDE_PORTA_HOTPLUG_SPT) | ||
5967 | #define SDE_GMBUS_CPT (1 << 17) | 5989 | #define SDE_GMBUS_CPT (1 << 17) |
5968 | #define SDE_ERROR_CPT (1 << 16) | 5990 | #define SDE_ERROR_CPT (1 << 16) |
5969 | #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) | 5991 | #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) |
@@ -5998,46 +6020,46 @@ enum skl_disp_power_wells { | |||
5998 | #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) | 6020 | #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) |
5999 | 6021 | ||
6000 | /* digital port hotplug */ | 6022 | /* digital port hotplug */ |
6001 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ | 6023 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ |
6002 | #define BXT_PORTA_HOTPLUG_ENABLE (1 << 28) | 6024 | #define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */ |
6003 | #define BXT_PORTA_HOTPLUG_STATUS_MASK (0x3 << 24) | 6025 | #define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */ |
6004 | #define BXT_PORTA_HOTPLUG_NO_DETECT (0 << 24) | 6026 | #define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */ |
6005 | #define BXT_PORTA_HOTPLUG_SHORT_DETECT (1 << 24) | 6027 | #define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */ |
6006 | #define BXT_PORTA_HOTPLUG_LONG_DETECT (2 << 24) | 6028 | #define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */ |
6007 | #define PORTD_HOTPLUG_ENABLE (1 << 20) | 6029 | #define PORTD_HOTPLUG_ENABLE (1 << 20) |
6008 | #define PORTD_PULSE_DURATION_2ms (0) | 6030 | #define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */ |
6009 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) | 6031 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */ |
6010 | #define PORTD_PULSE_DURATION_6ms (2 << 18) | 6032 | #define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */ |
6011 | #define PORTD_PULSE_DURATION_100ms (3 << 18) | 6033 | #define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */ |
6012 | #define PORTD_PULSE_DURATION_MASK (3 << 18) | 6034 | #define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */ |
6013 | #define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16) | 6035 | #define PORTD_HOTPLUG_STATUS_MASK (3 << 16) |
6014 | #define PORTD_HOTPLUG_NO_DETECT (0 << 16) | 6036 | #define PORTD_HOTPLUG_NO_DETECT (0 << 16) |
6015 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) | 6037 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) |
6016 | #define PORTD_HOTPLUG_LONG_DETECT (2 << 16) | 6038 | #define PORTD_HOTPLUG_LONG_DETECT (2 << 16) |
6017 | #define PORTC_HOTPLUG_ENABLE (1 << 12) | 6039 | #define PORTC_HOTPLUG_ENABLE (1 << 12) |
6018 | #define PORTC_PULSE_DURATION_2ms (0) | 6040 | #define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */ |
6019 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) | 6041 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */ |
6020 | #define PORTC_PULSE_DURATION_6ms (2 << 10) | 6042 | #define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */ |
6021 | #define PORTC_PULSE_DURATION_100ms (3 << 10) | 6043 | #define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */ |
6022 | #define PORTC_PULSE_DURATION_MASK (3 << 10) | 6044 | #define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */ |
6023 | #define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8) | 6045 | #define PORTC_HOTPLUG_STATUS_MASK (3 << 8) |
6024 | #define PORTC_HOTPLUG_NO_DETECT (0 << 8) | 6046 | #define PORTC_HOTPLUG_NO_DETECT (0 << 8) |
6025 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) | 6047 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) |
6026 | #define PORTC_HOTPLUG_LONG_DETECT (2 << 8) | 6048 | #define PORTC_HOTPLUG_LONG_DETECT (2 << 8) |
6027 | #define PORTB_HOTPLUG_ENABLE (1 << 4) | 6049 | #define PORTB_HOTPLUG_ENABLE (1 << 4) |
6028 | #define PORTB_PULSE_DURATION_2ms (0) | 6050 | #define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */ |
6029 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) | 6051 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */ |
6030 | #define PORTB_PULSE_DURATION_6ms (2 << 2) | 6052 | #define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */ |
6031 | #define PORTB_PULSE_DURATION_100ms (3 << 2) | 6053 | #define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */ |
6032 | #define PORTB_PULSE_DURATION_MASK (3 << 2) | 6054 | #define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */ |
6033 | #define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0) | 6055 | #define PORTB_HOTPLUG_STATUS_MASK (3 << 0) |
6034 | #define PORTB_HOTPLUG_NO_DETECT (0 << 0) | 6056 | #define PORTB_HOTPLUG_NO_DETECT (0 << 0) |
6035 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) | 6057 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) |
6036 | #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) | 6058 | #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) |
6037 | 6059 | ||
6038 | #define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 */ | 6060 | #define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */ |
6039 | #define PORTE_HOTPLUG_ENABLE (1 << 4) | 6061 | #define PORTE_HOTPLUG_ENABLE (1 << 4) |
6040 | #define PORTE_HOTPLUG_STATUS_MASK (0x3 << 0) | 6062 | #define PORTE_HOTPLUG_STATUS_MASK (3 << 0) |
6041 | #define PORTE_HOTPLUG_NO_DETECT (0 << 0) | 6063 | #define PORTE_HOTPLUG_NO_DETECT (0 << 0) |
6042 | #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) | 6064 | #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) |
6043 | #define PORTE_HOTPLUG_LONG_DETECT (2 << 0) | 6065 | #define PORTE_HOTPLUG_LONG_DETECT (2 << 0) |
@@ -6304,9 +6326,11 @@ enum skl_disp_power_wells { | |||
6304 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) | 6326 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) |
6305 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) | 6327 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) |
6306 | #define FDI_BC_BIFURCATION_SELECT (1 << 12) | 6328 | #define FDI_BC_BIFURCATION_SELECT (1 << 12) |
6329 | #define SPT_PWM_GRANULARITY (1<<0) | ||
6307 | #define SOUTH_CHICKEN2 0xc2004 | 6330 | #define SOUTH_CHICKEN2 0xc2004 |
6308 | #define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) | 6331 | #define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) |
6309 | #define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) | 6332 | #define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) |
6333 | #define LPT_PWM_GRANULARITY (1<<5) | ||
6310 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) | 6334 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) |
6311 | 6335 | ||
6312 | #define _FDI_RXA_CHICKEN 0xc200c | 6336 | #define _FDI_RXA_CHICKEN 0xc200c |
@@ -6870,7 +6894,9 @@ enum skl_disp_power_wells { | |||
6870 | #define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) | 6894 | #define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) |
6871 | 6895 | ||
6872 | #define GEN7_MISCCPCTL (0x9424) | 6896 | #define GEN7_MISCCPCTL (0x9424) |
6873 | #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) | 6897 | #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) |
6898 | #define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2) | ||
6899 | #define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4) | ||
6874 | 6900 | ||
6875 | #define GEN8_GARBCNTL 0xB004 | 6901 | #define GEN8_GARBCNTL 0xB004 |
6876 | #define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7) | 6902 | #define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7) |
@@ -7159,6 +7185,8 @@ enum skl_disp_power_wells { | |||
7159 | #define DDI_BUF_IS_IDLE (1<<7) | 7185 | #define DDI_BUF_IS_IDLE (1<<7) |
7160 | #define DDI_A_4_LANES (1<<4) | 7186 | #define DDI_A_4_LANES (1<<4) |
7161 | #define DDI_PORT_WIDTH(width) (((width) - 1) << 1) | 7187 | #define DDI_PORT_WIDTH(width) (((width) - 1) << 1) |
7188 | #define DDI_PORT_WIDTH_MASK (7 << 1) | ||
7189 | #define DDI_PORT_WIDTH_SHIFT 1 | ||
7162 | #define DDI_INIT_DISPLAY_DETECTED (1<<0) | 7190 | #define DDI_INIT_DISPLAY_DETECTED (1<<0) |
7163 | 7191 | ||
7164 | /* DDI Buffer Translations */ | 7192 | /* DDI Buffer Translations */ |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 2f34c47bd4bf..e6b5c7470ba0 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -186,33 +186,49 @@ DEFINE_EVENT(i915_va, i915_va_alloc, | |||
186 | TP_ARGS(vm, start, length, name) | 186 | TP_ARGS(vm, start, length, name) |
187 | ); | 187 | ); |
188 | 188 | ||
189 | DECLARE_EVENT_CLASS(i915_page_table_entry, | 189 | DECLARE_EVENT_CLASS(i915_px_entry, |
190 | TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), | 190 | TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift), |
191 | TP_ARGS(vm, pde, start, pde_shift), | 191 | TP_ARGS(vm, px, start, px_shift), |
192 | 192 | ||
193 | TP_STRUCT__entry( | 193 | TP_STRUCT__entry( |
194 | __field(struct i915_address_space *, vm) | 194 | __field(struct i915_address_space *, vm) |
195 | __field(u32, pde) | 195 | __field(u32, px) |
196 | __field(u64, start) | 196 | __field(u64, start) |
197 | __field(u64, end) | 197 | __field(u64, end) |
198 | ), | 198 | ), |
199 | 199 | ||
200 | TP_fast_assign( | 200 | TP_fast_assign( |
201 | __entry->vm = vm; | 201 | __entry->vm = vm; |
202 | __entry->pde = pde; | 202 | __entry->px = px; |
203 | __entry->start = start; | 203 | __entry->start = start; |
204 | __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1; | 204 | __entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1; |
205 | ), | 205 | ), |
206 | 206 | ||
207 | TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)", | 207 | TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)", |
208 | __entry->vm, __entry->pde, __entry->start, __entry->end) | 208 | __entry->vm, __entry->px, __entry->start, __entry->end) |
209 | ); | 209 | ); |
210 | 210 | ||
211 | DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc, | 211 | DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc, |
212 | TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), | 212 | TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), |
213 | TP_ARGS(vm, pde, start, pde_shift) | 213 | TP_ARGS(vm, pde, start, pde_shift) |
214 | ); | 214 | ); |
215 | 215 | ||
216 | DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc, | ||
217 | TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift), | ||
218 | TP_ARGS(vm, pdpe, start, pdpe_shift), | ||
219 | |||
220 | TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)", | ||
221 | __entry->vm, __entry->px, __entry->start, __entry->end) | ||
222 | ); | ||
223 | |||
224 | DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc, | ||
225 | TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift), | ||
226 | TP_ARGS(vm, pml4e, start, pml4e_shift), | ||
227 | |||
228 | TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)", | ||
229 | __entry->vm, __entry->px, __entry->start, __entry->end) | ||
230 | ); | ||
231 | |||
216 | /* Avoid extra math because we only support two sizes. The format is defined by | 232 | /* Avoid extra math because we only support two sizes. The format is defined by |
217 | * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */ | 233 | * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */ |
218 | #define TRACE_PT_SIZE(bits) \ | 234 | #define TRACE_PT_SIZE(bits) \ |
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 97a88b5f6a26..21c97f44d637 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h | |||
@@ -40,6 +40,19 @@ | |||
40 | #define INTEL_VGT_IF_VERSION \ | 40 | #define INTEL_VGT_IF_VERSION \ |
41 | INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) | 41 | INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) |
42 | 42 | ||
43 | /* | ||
44 | * notifications from guest to vgpu device model | ||
45 | */ | ||
46 | enum vgt_g2v_type { | ||
47 | VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2, | ||
48 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY, | ||
49 | VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE, | ||
50 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, | ||
51 | VGT_G2V_EXECLIST_CONTEXT_CREATE, | ||
52 | VGT_G2V_EXECLIST_CONTEXT_DESTROY, | ||
53 | VGT_G2V_MAX, | ||
54 | }; | ||
55 | |||
43 | struct vgt_if { | 56 | struct vgt_if { |
44 | uint64_t magic; /* VGT_MAGIC */ | 57 | uint64_t magic; /* VGT_MAGIC */ |
45 | uint16_t version_major; | 58 | uint16_t version_major; |
@@ -70,11 +83,28 @@ struct vgt_if { | |||
70 | uint32_t rsv3[0x200 - 24]; /* pad to half page */ | 83 | uint32_t rsv3[0x200 - 24]; /* pad to half page */ |
71 | /* | 84 | /* |
72 | * The bottom half page is for response from Gfx driver to hypervisor. | 85 | * The bottom half page is for response from Gfx driver to hypervisor. |
73 | * Set to reserved fields temporarily by now. | ||
74 | */ | 86 | */ |
75 | uint32_t rsv4; | 87 | uint32_t rsv4; |
76 | uint32_t display_ready; /* ready for display owner switch */ | 88 | uint32_t display_ready; /* ready for display owner switch */ |
77 | uint32_t rsv5[0x200 - 2]; /* pad to one page */ | 89 | |
90 | uint32_t rsv5[4]; | ||
91 | |||
92 | uint32_t g2v_notify; | ||
93 | uint32_t rsv6[7]; | ||
94 | |||
95 | uint32_t pdp0_lo; | ||
96 | uint32_t pdp0_hi; | ||
97 | uint32_t pdp1_lo; | ||
98 | uint32_t pdp1_hi; | ||
99 | uint32_t pdp2_lo; | ||
100 | uint32_t pdp2_hi; | ||
101 | uint32_t pdp3_lo; | ||
102 | uint32_t pdp3_hi; | ||
103 | |||
104 | uint32_t execlist_context_descriptor_lo; | ||
105 | uint32_t execlist_context_descriptor_hi; | ||
106 | |||
107 | uint32_t rsv7[0x200 - 24]; /* pad to one page */ | ||
78 | } __packed; | 108 | } __packed; |
79 | 109 | ||
80 | #define vgtif_reg(x) \ | 110 | #define vgtif_reg(x) \ |
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index e2531cf59266..2c4b1c44296c 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
@@ -85,22 +85,14 @@ intel_connector_atomic_get_property(struct drm_connector *connector, | |||
85 | struct drm_crtc_state * | 85 | struct drm_crtc_state * |
86 | intel_crtc_duplicate_state(struct drm_crtc *crtc) | 86 | intel_crtc_duplicate_state(struct drm_crtc *crtc) |
87 | { | 87 | { |
88 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
89 | struct intel_crtc_state *crtc_state; | 88 | struct intel_crtc_state *crtc_state; |
90 | 89 | ||
91 | if (WARN_ON(!intel_crtc->config)) | 90 | crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL); |
92 | crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); | ||
93 | else | ||
94 | crtc_state = kmemdup(intel_crtc->config, | ||
95 | sizeof(*intel_crtc->config), GFP_KERNEL); | ||
96 | |||
97 | if (!crtc_state) | 91 | if (!crtc_state) |
98 | return NULL; | 92 | return NULL; |
99 | 93 | ||
100 | __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); | 94 | __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); |
101 | 95 | ||
102 | crtc_state->base.crtc = crtc; | ||
103 | |||
104 | return &crtc_state->base; | 96 | return &crtc_state->base; |
105 | } | 97 | } |
106 | 98 | ||
@@ -149,9 +141,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, | |||
149 | int i, j; | 141 | int i, j; |
150 | 142 | ||
151 | num_scalers_need = hweight32(scaler_state->scaler_users); | 143 | num_scalers_need = hweight32(scaler_state->scaler_users); |
152 | DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n", | ||
153 | crtc_state, num_scalers_need, intel_crtc->num_scalers, | ||
154 | scaler_state->scaler_users); | ||
155 | 144 | ||
156 | /* | 145 | /* |
157 | * High level flow: | 146 | * High level flow: |
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index f1ab8e4b9c11..a11980696595 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c | |||
@@ -76,11 +76,7 @@ intel_plane_duplicate_state(struct drm_plane *plane) | |||
76 | struct drm_plane_state *state; | 76 | struct drm_plane_state *state; |
77 | struct intel_plane_state *intel_state; | 77 | struct intel_plane_state *intel_state; |
78 | 78 | ||
79 | if (WARN_ON(!plane->state)) | 79 | intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL); |
80 | intel_state = intel_create_plane_state(plane); | ||
81 | else | ||
82 | intel_state = kmemdup(plane->state, sizeof(*intel_state), | ||
83 | GFP_KERNEL); | ||
84 | 80 | ||
85 | if (!intel_state) | 81 | if (!intel_state) |
86 | return NULL; | 82 | return NULL; |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c19e669ffe50..68421c273c8c 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1350,21 +1350,3 @@ intel_parse_bios(struct drm_device *dev) | |||
1350 | 1350 | ||
1351 | return 0; | 1351 | return 0; |
1352 | } | 1352 | } |
1353 | |||
1354 | /* Ensure that vital registers have been initialised, even if the BIOS | ||
1355 | * is absent or just failing to do its job. | ||
1356 | */ | ||
1357 | void intel_setup_bios(struct drm_device *dev) | ||
1358 | { | ||
1359 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1360 | |||
1361 | /* Set the Panel Power On/Off timings if uninitialized. */ | ||
1362 | if (!HAS_PCH_SPLIT(dev) && | ||
1363 | I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { | ||
1364 | /* Set T2 to 40ms and T5 to 200ms */ | ||
1365 | I915_WRITE(PP_ON_DELAYS, 0x019007d0); | ||
1366 | |||
1367 | /* Set T3 to 35ms and Tx to 200ms */ | ||
1368 | I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); | ||
1369 | } | ||
1370 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 46cd5c7ebacd..1b7417e3131b 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -588,7 +588,6 @@ struct bdb_psr { | |||
588 | struct psr_table psr_table[16]; | 588 | struct psr_table psr_table[16]; |
589 | } __packed; | 589 | } __packed; |
590 | 590 | ||
591 | void intel_setup_bios(struct drm_device *dev); | ||
592 | int intel_parse_bios(struct drm_device *dev); | 591 | int intel_parse_bios(struct drm_device *dev); |
593 | 592 | ||
594 | /* | 593 | /* |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 61575f67a626..4823184258a0 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -707,7 +707,6 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) | |||
707 | intel_dp->DP = intel_dig_port->saved_port_bits | | 707 | intel_dp->DP = intel_dig_port->saved_port_bits | |
708 | DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); | 708 | DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); |
709 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); | 709 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); |
710 | |||
711 | } | 710 | } |
712 | 711 | ||
713 | static struct intel_encoder * | 712 | static struct intel_encoder * |
@@ -1242,9 +1241,10 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, | |||
1242 | static bool | 1241 | static bool |
1243 | hsw_ddi_pll_select(struct intel_crtc *intel_crtc, | 1242 | hsw_ddi_pll_select(struct intel_crtc *intel_crtc, |
1244 | struct intel_crtc_state *crtc_state, | 1243 | struct intel_crtc_state *crtc_state, |
1245 | struct intel_encoder *intel_encoder, | 1244 | struct intel_encoder *intel_encoder) |
1246 | int clock) | ||
1247 | { | 1245 | { |
1246 | int clock = crtc_state->port_clock; | ||
1247 | |||
1248 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { | 1248 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
1249 | struct intel_shared_dpll *pll; | 1249 | struct intel_shared_dpll *pll; |
1250 | uint32_t val; | 1250 | uint32_t val; |
@@ -1523,11 +1523,11 @@ skip_remaining_dividers: | |||
1523 | static bool | 1523 | static bool |
1524 | skl_ddi_pll_select(struct intel_crtc *intel_crtc, | 1524 | skl_ddi_pll_select(struct intel_crtc *intel_crtc, |
1525 | struct intel_crtc_state *crtc_state, | 1525 | struct intel_crtc_state *crtc_state, |
1526 | struct intel_encoder *intel_encoder, | 1526 | struct intel_encoder *intel_encoder) |
1527 | int clock) | ||
1528 | { | 1527 | { |
1529 | struct intel_shared_dpll *pll; | 1528 | struct intel_shared_dpll *pll; |
1530 | uint32_t ctrl1, cfgcr1, cfgcr2; | 1529 | uint32_t ctrl1, cfgcr1, cfgcr2; |
1530 | int clock = crtc_state->port_clock; | ||
1531 | 1531 | ||
1532 | /* | 1532 | /* |
1533 | * See comment in intel_dpll_hw_state to understand why we always use 0 | 1533 | * See comment in intel_dpll_hw_state to understand why we always use 0 |
@@ -1615,14 +1615,14 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = { | |||
1615 | static bool | 1615 | static bool |
1616 | bxt_ddi_pll_select(struct intel_crtc *intel_crtc, | 1616 | bxt_ddi_pll_select(struct intel_crtc *intel_crtc, |
1617 | struct intel_crtc_state *crtc_state, | 1617 | struct intel_crtc_state *crtc_state, |
1618 | struct intel_encoder *intel_encoder, | 1618 | struct intel_encoder *intel_encoder) |
1619 | int clock) | ||
1620 | { | 1619 | { |
1621 | struct intel_shared_dpll *pll; | 1620 | struct intel_shared_dpll *pll; |
1622 | struct bxt_clk_div clk_div = {0}; | 1621 | struct bxt_clk_div clk_div = {0}; |
1623 | int vco = 0; | 1622 | int vco = 0; |
1624 | uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; | 1623 | uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; |
1625 | uint32_t lanestagger; | 1624 | uint32_t lanestagger; |
1625 | int clock = crtc_state->port_clock; | ||
1626 | 1626 | ||
1627 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { | 1627 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
1628 | intel_clock_t best_clock; | 1628 | intel_clock_t best_clock; |
@@ -1750,17 +1750,16 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
1750 | struct drm_device *dev = intel_crtc->base.dev; | 1750 | struct drm_device *dev = intel_crtc->base.dev; |
1751 | struct intel_encoder *intel_encoder = | 1751 | struct intel_encoder *intel_encoder = |
1752 | intel_ddi_get_crtc_new_encoder(crtc_state); | 1752 | intel_ddi_get_crtc_new_encoder(crtc_state); |
1753 | int clock = crtc_state->port_clock; | ||
1754 | 1753 | ||
1755 | if (IS_SKYLAKE(dev)) | 1754 | if (IS_SKYLAKE(dev)) |
1756 | return skl_ddi_pll_select(intel_crtc, crtc_state, | 1755 | return skl_ddi_pll_select(intel_crtc, crtc_state, |
1757 | intel_encoder, clock); | 1756 | intel_encoder); |
1758 | else if (IS_BROXTON(dev)) | 1757 | else if (IS_BROXTON(dev)) |
1759 | return bxt_ddi_pll_select(intel_crtc, crtc_state, | 1758 | return bxt_ddi_pll_select(intel_crtc, crtc_state, |
1760 | intel_encoder, clock); | 1759 | intel_encoder); |
1761 | else | 1760 | else |
1762 | return hsw_ddi_pll_select(intel_crtc, crtc_state, | 1761 | return hsw_ddi_pll_select(intel_crtc, crtc_state, |
1763 | intel_encoder, clock); | 1762 | intel_encoder); |
1764 | } | 1763 | } |
1765 | 1764 | ||
1766 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) | 1765 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) |
@@ -1893,7 +1892,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) | |||
1893 | } else | 1892 | } else |
1894 | temp |= TRANS_DDI_MODE_SELECT_DP_SST; | 1893 | temp |= TRANS_DDI_MODE_SELECT_DP_SST; |
1895 | 1894 | ||
1896 | temp |= DDI_PORT_WIDTH(intel_dp->lane_count); | 1895 | temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count); |
1897 | } else if (type == INTEL_OUTPUT_DP_MST) { | 1896 | } else if (type == INTEL_OUTPUT_DP_MST) { |
1898 | struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp; | 1897 | struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp; |
1899 | 1898 | ||
@@ -1902,7 +1901,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) | |||
1902 | } else | 1901 | } else |
1903 | temp |= TRANS_DDI_MODE_SELECT_DP_SST; | 1902 | temp |= TRANS_DDI_MODE_SELECT_DP_SST; |
1904 | 1903 | ||
1905 | temp |= DDI_PORT_WIDTH(intel_dp->lane_count); | 1904 | temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count); |
1906 | } else { | 1905 | } else { |
1907 | WARN(1, "Invalid encoder type %d for pipe %c\n", | 1906 | WARN(1, "Invalid encoder type %d for pipe %c\n", |
1908 | intel_encoder->type, pipe_name(pipe)); | 1907 | intel_encoder->type, pipe_name(pipe)); |
@@ -2289,6 +2288,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | |||
2289 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 2288 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
2290 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 2289 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
2291 | 2290 | ||
2291 | intel_dp_set_link_params(intel_dp, crtc->config); | ||
2292 | |||
2292 | intel_ddi_init_dp_buf_reg(intel_encoder); | 2293 | intel_ddi_init_dp_buf_reg(intel_encoder); |
2293 | 2294 | ||
2294 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 2295 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
@@ -3069,6 +3070,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
3069 | case TRANS_DDI_MODE_SELECT_DP_SST: | 3070 | case TRANS_DDI_MODE_SELECT_DP_SST: |
3070 | case TRANS_DDI_MODE_SELECT_DP_MST: | 3071 | case TRANS_DDI_MODE_SELECT_DP_MST: |
3071 | pipe_config->has_dp_encoder = true; | 3072 | pipe_config->has_dp_encoder = true; |
3073 | pipe_config->lane_count = | ||
3074 | ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; | ||
3072 | intel_dp_get_m_n(intel_crtc, pipe_config); | 3075 | intel_dp_get_m_n(intel_crtc, pipe_config); |
3073 | break; | 3076 | break; |
3074 | default: | 3077 | default: |
@@ -3215,7 +3218,15 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
3215 | goto err; | 3218 | goto err; |
3216 | 3219 | ||
3217 | intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; | 3220 | intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; |
3218 | dev_priv->hotplug.irq_port[port] = intel_dig_port; | 3221 | /* |
3222 | * On BXT A0/A1, sw needs to activate DDIA HPD logic and | ||
3223 | * interrupts to check the external panel connection. | ||
3224 | */ | ||
3225 | if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0) | ||
3226 | && port == PORT_B) | ||
3227 | dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port; | ||
3228 | else | ||
3229 | dev_priv->hotplug.irq_port[port] = intel_dig_port; | ||
3219 | } | 3230 | } |
3220 | 3231 | ||
3221 | /* In theory we don't need the encoder->type check, but leave it just in | 3232 | /* In theory we don't need the encoder->type check, but leave it just in |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0379f14271de..0bb6d1daecf5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -72,6 +72,10 @@ static const uint32_t skl_primary_formats[] = { | |||
72 | DRM_FORMAT_ABGR8888, | 72 | DRM_FORMAT_ABGR8888, |
73 | DRM_FORMAT_XRGB2101010, | 73 | DRM_FORMAT_XRGB2101010, |
74 | DRM_FORMAT_XBGR2101010, | 74 | DRM_FORMAT_XBGR2101010, |
75 | DRM_FORMAT_YUYV, | ||
76 | DRM_FORMAT_YVYU, | ||
77 | DRM_FORMAT_UYVY, | ||
78 | DRM_FORMAT_VYUY, | ||
75 | }; | 79 | }; |
76 | 80 | ||
77 | /* Cursor formats */ | 81 | /* Cursor formats */ |
@@ -135,6 +139,39 @@ intel_pch_rawclk(struct drm_device *dev) | |||
135 | return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; | 139 | return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; |
136 | } | 140 | } |
137 | 141 | ||
142 | /* hrawclock is 1/4 the FSB frequency */ | ||
143 | int intel_hrawclk(struct drm_device *dev) | ||
144 | { | ||
145 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
146 | uint32_t clkcfg; | ||
147 | |||
148 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ | ||
149 | if (IS_VALLEYVIEW(dev)) | ||
150 | return 200; | ||
151 | |||
152 | clkcfg = I915_READ(CLKCFG); | ||
153 | switch (clkcfg & CLKCFG_FSB_MASK) { | ||
154 | case CLKCFG_FSB_400: | ||
155 | return 100; | ||
156 | case CLKCFG_FSB_533: | ||
157 | return 133; | ||
158 | case CLKCFG_FSB_667: | ||
159 | return 166; | ||
160 | case CLKCFG_FSB_800: | ||
161 | return 200; | ||
162 | case CLKCFG_FSB_1067: | ||
163 | return 266; | ||
164 | case CLKCFG_FSB_1333: | ||
165 | return 333; | ||
166 | /* these two are just a guess; one of them might be right */ | ||
167 | case CLKCFG_FSB_1600: | ||
168 | case CLKCFG_FSB_1600_ALT: | ||
169 | return 400; | ||
170 | default: | ||
171 | return 133; | ||
172 | } | ||
173 | } | ||
174 | |||
138 | static inline u32 /* units of 100MHz */ | 175 | static inline u32 /* units of 100MHz */ |
139 | intel_fdi_link_freq(struct drm_device *dev) | 176 | intel_fdi_link_freq(struct drm_device *dev) |
140 | { | 177 | { |
@@ -1061,54 +1098,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) | |||
1061 | } | 1098 | } |
1062 | } | 1099 | } |
1063 | 1100 | ||
1064 | /* | ||
1065 | * ibx_digital_port_connected - is the specified port connected? | ||
1066 | * @dev_priv: i915 private structure | ||
1067 | * @port: the port to test | ||
1068 | * | ||
1069 | * Returns true if @port is connected, false otherwise. | ||
1070 | */ | ||
1071 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, | ||
1072 | struct intel_digital_port *port) | ||
1073 | { | ||
1074 | u32 bit; | ||
1075 | |||
1076 | if (HAS_PCH_IBX(dev_priv->dev)) { | ||
1077 | switch (port->port) { | ||
1078 | case PORT_B: | ||
1079 | bit = SDE_PORTB_HOTPLUG; | ||
1080 | break; | ||
1081 | case PORT_C: | ||
1082 | bit = SDE_PORTC_HOTPLUG; | ||
1083 | break; | ||
1084 | case PORT_D: | ||
1085 | bit = SDE_PORTD_HOTPLUG; | ||
1086 | break; | ||
1087 | default: | ||
1088 | return true; | ||
1089 | } | ||
1090 | } else { | ||
1091 | switch (port->port) { | ||
1092 | case PORT_B: | ||
1093 | bit = SDE_PORTB_HOTPLUG_CPT; | ||
1094 | break; | ||
1095 | case PORT_C: | ||
1096 | bit = SDE_PORTC_HOTPLUG_CPT; | ||
1097 | break; | ||
1098 | case PORT_D: | ||
1099 | bit = SDE_PORTD_HOTPLUG_CPT; | ||
1100 | break; | ||
1101 | case PORT_E: | ||
1102 | bit = SDE_PORTE_HOTPLUG_SPT; | ||
1103 | break; | ||
1104 | default: | ||
1105 | return true; | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | return I915_READ(SDEISR) & bit; | ||
1110 | } | ||
1111 | |||
1112 | static const char *state_string(bool enabled) | 1101 | static const char *state_string(bool enabled) |
1113 | { | 1102 | { |
1114 | return enabled ? "on" : "off"; | 1103 | return enabled ? "on" : "off"; |
@@ -1585,26 +1574,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1585 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); | 1574 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); |
1586 | } | 1575 | } |
1587 | 1576 | ||
1588 | static void intel_init_dpio(struct drm_device *dev) | ||
1589 | { | ||
1590 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1591 | |||
1592 | if (!IS_VALLEYVIEW(dev)) | ||
1593 | return; | ||
1594 | |||
1595 | /* | ||
1596 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | ||
1597 | * CHV x1 PHY (DP/HDMI D) | ||
1598 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) | ||
1599 | */ | ||
1600 | if (IS_CHERRYVIEW(dev)) { | ||
1601 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | ||
1602 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | ||
1603 | } else { | ||
1604 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | ||
1605 | } | ||
1606 | } | ||
1607 | |||
1608 | static void vlv_enable_pll(struct intel_crtc *crtc, | 1577 | static void vlv_enable_pll(struct intel_crtc *crtc, |
1609 | const struct intel_crtc_state *pipe_config) | 1578 | const struct intel_crtc_state *pipe_config) |
1610 | { | 1579 | { |
@@ -1831,17 +1800,6 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
1831 | val &= ~DPIO_DCLKP_EN; | 1800 | val &= ~DPIO_DCLKP_EN; |
1832 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); | 1801 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); |
1833 | 1802 | ||
1834 | /* disable left/right clock distribution */ | ||
1835 | if (pipe != PIPE_B) { | ||
1836 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | ||
1837 | val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); | ||
1838 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); | ||
1839 | } else { | ||
1840 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); | ||
1841 | val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); | ||
1842 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); | ||
1843 | } | ||
1844 | |||
1845 | mutex_unlock(&dev_priv->sb_lock); | 1803 | mutex_unlock(&dev_priv->sb_lock); |
1846 | } | 1804 | } |
1847 | 1805 | ||
@@ -2936,8 +2894,6 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) | |||
2936 | I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); | 2894 | I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); |
2937 | I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); | 2895 | I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); |
2938 | I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); | 2896 | I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); |
2939 | DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n", | ||
2940 | intel_crtc->base.base.id, intel_crtc->pipe, id); | ||
2941 | } | 2897 | } |
2942 | 2898 | ||
2943 | /* | 2899 | /* |
@@ -3179,24 +3135,20 @@ static void intel_complete_page_flips(struct drm_device *dev) | |||
3179 | 3135 | ||
3180 | static void intel_update_primary_planes(struct drm_device *dev) | 3136 | static void intel_update_primary_planes(struct drm_device *dev) |
3181 | { | 3137 | { |
3182 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3183 | struct drm_crtc *crtc; | 3138 | struct drm_crtc *crtc; |
3184 | 3139 | ||
3185 | for_each_crtc(dev, crtc) { | 3140 | for_each_crtc(dev, crtc) { |
3186 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3141 | struct intel_plane *plane = to_intel_plane(crtc->primary); |
3142 | struct intel_plane_state *plane_state; | ||
3187 | 3143 | ||
3188 | drm_modeset_lock(&crtc->mutex, NULL); | 3144 | drm_modeset_lock_crtc(crtc, &plane->base); |
3189 | /* | 3145 | |
3190 | * FIXME: Once we have proper support for primary planes (and | 3146 | plane_state = to_intel_plane_state(plane->base.state); |
3191 | * disabling them without disabling the entire crtc) allow again | 3147 | |
3192 | * a NULL crtc->primary->fb. | 3148 | if (plane_state->base.fb) |
3193 | */ | 3149 | plane->commit_plane(&plane->base, plane_state); |
3194 | if (intel_crtc->active && crtc->primary->fb) | 3150 | |
3195 | dev_priv->display.update_primary_plane(crtc, | 3151 | drm_modeset_unlock_crtc(crtc); |
3196 | crtc->primary->fb, | ||
3197 | crtc->x, | ||
3198 | crtc->y); | ||
3199 | drm_modeset_unlock(&crtc->mutex); | ||
3200 | } | 3152 | } |
3201 | } | 3153 | } |
3202 | 3154 | ||
@@ -3240,6 +3192,9 @@ void intel_finish_reset(struct drm_device *dev) | |||
3240 | * so update the base address of all primary | 3192 | * so update the base address of all primary |
3241 | * planes to the the last fb to make sure we're | 3193 | * planes to the the last fb to make sure we're |
3242 | * showing the correct fb after a reset. | 3194 | * showing the correct fb after a reset. |
3195 | * | ||
3196 | * FIXME: Atomic will make this obsolete since we won't schedule | ||
3197 | * CS-based flips (which might get lost in gpu resets) any more. | ||
3243 | */ | 3198 | */ |
3244 | intel_update_primary_planes(dev); | 3199 | intel_update_primary_planes(dev); |
3245 | return; | 3200 | return; |
@@ -4963,12 +4918,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
4963 | 4918 | ||
4964 | intel_ddi_enable_pipe_clock(intel_crtc); | 4919 | intel_ddi_enable_pipe_clock(intel_crtc); |
4965 | 4920 | ||
4966 | if (INTEL_INFO(dev)->gen == 9) | 4921 | if (INTEL_INFO(dev)->gen >= 9) |
4967 | skylake_pfit_enable(intel_crtc); | 4922 | skylake_pfit_enable(intel_crtc); |
4968 | else if (INTEL_INFO(dev)->gen < 9) | ||
4969 | ironlake_pfit_enable(intel_crtc); | ||
4970 | else | 4923 | else |
4971 | MISSING_CASE(INTEL_INFO(dev)->gen); | 4924 | ironlake_pfit_enable(intel_crtc); |
4972 | 4925 | ||
4973 | /* | 4926 | /* |
4974 | * On ILK+ LUT must be loaded before the pipe is running but with | 4927 | * On ILK+ LUT must be loaded before the pipe is running but with |
@@ -5100,12 +5053,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
5100 | 5053 | ||
5101 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); | 5054 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
5102 | 5055 | ||
5103 | if (INTEL_INFO(dev)->gen == 9) | 5056 | if (INTEL_INFO(dev)->gen >= 9) |
5104 | skylake_scaler_disable(intel_crtc); | 5057 | skylake_scaler_disable(intel_crtc); |
5105 | else if (INTEL_INFO(dev)->gen < 9) | ||
5106 | ironlake_pfit_disable(intel_crtc); | ||
5107 | else | 5058 | else |
5108 | MISSING_CASE(INTEL_INFO(dev)->gen); | 5059 | ironlake_pfit_disable(intel_crtc); |
5109 | 5060 | ||
5110 | intel_ddi_disable_pipe_clock(intel_crtc); | 5061 | intel_ddi_disable_pipe_clock(intel_crtc); |
5111 | 5062 | ||
@@ -5277,6 +5228,21 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) | |||
5277 | modeset_put_power_domains(dev_priv, put_domains[i]); | 5228 | modeset_put_power_domains(dev_priv, put_domains[i]); |
5278 | } | 5229 | } |
5279 | 5230 | ||
5231 | static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) | ||
5232 | { | ||
5233 | int max_cdclk_freq = dev_priv->max_cdclk_freq; | ||
5234 | |||
5235 | if (INTEL_INFO(dev_priv)->gen >= 9 || | ||
5236 | IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | ||
5237 | return max_cdclk_freq; | ||
5238 | else if (IS_CHERRYVIEW(dev_priv)) | ||
5239 | return max_cdclk_freq*95/100; | ||
5240 | else if (INTEL_INFO(dev_priv)->gen < 4) | ||
5241 | return 2*max_cdclk_freq*90/100; | ||
5242 | else | ||
5243 | return max_cdclk_freq*90/100; | ||
5244 | } | ||
5245 | |||
5280 | static void intel_update_max_cdclk(struct drm_device *dev) | 5246 | static void intel_update_max_cdclk(struct drm_device *dev) |
5281 | { | 5247 | { |
5282 | struct drm_i915_private *dev_priv = dev->dev_private; | 5248 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -5316,8 +5282,13 @@ static void intel_update_max_cdclk(struct drm_device *dev) | |||
5316 | dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; | 5282 | dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; |
5317 | } | 5283 | } |
5318 | 5284 | ||
5285 | dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); | ||
5286 | |||
5319 | DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", | 5287 | DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", |
5320 | dev_priv->max_cdclk_freq); | 5288 | dev_priv->max_cdclk_freq); |
5289 | |||
5290 | DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n", | ||
5291 | dev_priv->max_dotclk_freq); | ||
5321 | } | 5292 | } |
5322 | 5293 | ||
5323 | static void intel_update_cdclk(struct drm_device *dev) | 5294 | static void intel_update_cdclk(struct drm_device *dev) |
@@ -6035,13 +6006,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
6035 | 6006 | ||
6036 | is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); | 6007 | is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); |
6037 | 6008 | ||
6038 | if (!is_dsi) { | ||
6039 | if (IS_CHERRYVIEW(dev)) | ||
6040 | chv_prepare_pll(intel_crtc, intel_crtc->config); | ||
6041 | else | ||
6042 | vlv_prepare_pll(intel_crtc, intel_crtc->config); | ||
6043 | } | ||
6044 | |||
6045 | if (intel_crtc->config->has_dp_encoder) | 6009 | if (intel_crtc->config->has_dp_encoder) |
6046 | intel_dp_set_m_n(intel_crtc, M1_N1); | 6010 | intel_dp_set_m_n(intel_crtc, M1_N1); |
6047 | 6011 | ||
@@ -6065,10 +6029,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
6065 | encoder->pre_pll_enable(encoder); | 6029 | encoder->pre_pll_enable(encoder); |
6066 | 6030 | ||
6067 | if (!is_dsi) { | 6031 | if (!is_dsi) { |
6068 | if (IS_CHERRYVIEW(dev)) | 6032 | if (IS_CHERRYVIEW(dev)) { |
6033 | chv_prepare_pll(intel_crtc, intel_crtc->config); | ||
6069 | chv_enable_pll(intel_crtc, intel_crtc->config); | 6034 | chv_enable_pll(intel_crtc, intel_crtc->config); |
6070 | else | 6035 | } else { |
6036 | vlv_prepare_pll(intel_crtc, intel_crtc->config); | ||
6071 | vlv_enable_pll(intel_crtc, intel_crtc->config); | 6037 | vlv_enable_pll(intel_crtc, intel_crtc->config); |
6038 | } | ||
6072 | } | 6039 | } |
6073 | 6040 | ||
6074 | for_each_encoder_on_crtc(dev, crtc, encoder) | 6041 | for_each_encoder_on_crtc(dev, crtc, encoder) |
@@ -6196,6 +6163,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
6196 | i9xx_disable_pll(intel_crtc); | 6163 | i9xx_disable_pll(intel_crtc); |
6197 | } | 6164 | } |
6198 | 6165 | ||
6166 | for_each_encoder_on_crtc(dev, crtc, encoder) | ||
6167 | if (encoder->post_pll_disable) | ||
6168 | encoder->post_pll_disable(encoder); | ||
6169 | |||
6199 | if (!IS_GEN2(dev)) | 6170 | if (!IS_GEN2(dev)) |
6200 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); | 6171 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); |
6201 | 6172 | ||
@@ -7377,8 +7348,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
7377 | 1 << DPIO_CHV_N_DIV_SHIFT); | 7348 | 1 << DPIO_CHV_N_DIV_SHIFT); |
7378 | 7349 | ||
7379 | /* M2 fraction division */ | 7350 | /* M2 fraction division */ |
7380 | if (bestm2_frac) | 7351 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); |
7381 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); | ||
7382 | 7352 | ||
7383 | /* M2 fraction division enable */ | 7353 | /* M2 fraction division enable */ |
7384 | dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); | 7354 | dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); |
@@ -8119,6 +8089,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | |||
8119 | else | 8089 | else |
8120 | i9xx_crtc_clock_get(crtc, pipe_config); | 8090 | i9xx_crtc_clock_get(crtc, pipe_config); |
8121 | 8091 | ||
8092 | /* | ||
8093 | * Normally the dotclock is filled in by the encoder .get_config() | ||
8094 | * but in case the pipe is enabled w/o any ports we need a sane | ||
8095 | * default. | ||
8096 | */ | ||
8097 | pipe_config->base.adjusted_mode.crtc_clock = | ||
8098 | pipe_config->port_clock / pipe_config->pixel_multiplier; | ||
8099 | |||
8122 | return true; | 8100 | return true; |
8123 | } | 8101 | } |
8124 | 8102 | ||
@@ -8380,8 +8358,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, | |||
8380 | 8358 | ||
8381 | if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) | 8359 | if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) |
8382 | with_spread = true; | 8360 | with_spread = true; |
8383 | if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && | 8361 | if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n")) |
8384 | with_fdi, "LP PCH doesn't have FDI\n")) | ||
8385 | with_fdi = false; | 8362 | with_fdi = false; |
8386 | 8363 | ||
8387 | mutex_lock(&dev_priv->sb_lock); | 8364 | mutex_lock(&dev_priv->sb_lock); |
@@ -8404,8 +8381,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, | |||
8404 | } | 8381 | } |
8405 | } | 8382 | } |
8406 | 8383 | ||
8407 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? | 8384 | reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; |
8408 | SBI_GEN0 : SBI_DBUFF0; | ||
8409 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); | 8385 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); |
8410 | tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; | 8386 | tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; |
8411 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); | 8387 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); |
@@ -8421,8 +8397,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev) | |||
8421 | 8397 | ||
8422 | mutex_lock(&dev_priv->sb_lock); | 8398 | mutex_lock(&dev_priv->sb_lock); |
8423 | 8399 | ||
8424 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? | 8400 | reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; |
8425 | SBI_GEN0 : SBI_DBUFF0; | ||
8426 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); | 8401 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); |
8427 | tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; | 8402 | tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; |
8428 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); | 8403 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); |
@@ -9434,7 +9409,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) | |||
9434 | 9409 | ||
9435 | DRM_DEBUG_KMS("Enabling package C8+\n"); | 9410 | DRM_DEBUG_KMS("Enabling package C8+\n"); |
9436 | 9411 | ||
9437 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 9412 | if (HAS_PCH_LPT_LP(dev)) { |
9438 | val = I915_READ(SOUTH_DSPCLK_GATE_D); | 9413 | val = I915_READ(SOUTH_DSPCLK_GATE_D); |
9439 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; | 9414 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; |
9440 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); | 9415 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); |
@@ -9454,7 +9429,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) | |||
9454 | hsw_restore_lcpll(dev_priv); | 9429 | hsw_restore_lcpll(dev_priv); |
9455 | lpt_init_pch_refclk(dev); | 9430 | lpt_init_pch_refclk(dev); |
9456 | 9431 | ||
9457 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 9432 | if (HAS_PCH_LPT_LP(dev)) { |
9458 | val = I915_READ(SOUTH_DSPCLK_GATE_D); | 9433 | val = I915_READ(SOUTH_DSPCLK_GATE_D); |
9459 | val |= PCH_LP_PARTITION_LEVEL_DISABLE; | 9434 | val |= PCH_LP_PARTITION_LEVEL_DISABLE; |
9460 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); | 9435 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); |
@@ -9804,12 +9779,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
9804 | } | 9779 | } |
9805 | 9780 | ||
9806 | if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { | 9781 | if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { |
9807 | if (INTEL_INFO(dev)->gen == 9) | 9782 | if (INTEL_INFO(dev)->gen >= 9) |
9808 | skylake_get_pfit_config(crtc, pipe_config); | 9783 | skylake_get_pfit_config(crtc, pipe_config); |
9809 | else if (INTEL_INFO(dev)->gen < 9) | ||
9810 | ironlake_get_pfit_config(crtc, pipe_config); | ||
9811 | else | 9784 | else |
9812 | MISSING_CASE(INTEL_INFO(dev)->gen); | 9785 | ironlake_get_pfit_config(crtc, pipe_config); |
9813 | } | 9786 | } |
9814 | 9787 | ||
9815 | if (IS_HASWELL(dev)) | 9788 | if (IS_HASWELL(dev)) |
@@ -9943,8 +9916,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
9943 | struct drm_i915_private *dev_priv = dev->dev_private; | 9916 | struct drm_i915_private *dev_priv = dev->dev_private; |
9944 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9917 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9945 | int pipe = intel_crtc->pipe; | 9918 | int pipe = intel_crtc->pipe; |
9946 | int x = crtc->cursor_x; | 9919 | struct drm_plane_state *cursor_state = crtc->cursor->state; |
9947 | int y = crtc->cursor_y; | 9920 | int x = cursor_state->crtc_x; |
9921 | int y = cursor_state->crtc_y; | ||
9948 | u32 base = 0, pos = 0; | 9922 | u32 base = 0, pos = 0; |
9949 | 9923 | ||
9950 | if (on) | 9924 | if (on) |
@@ -9957,7 +9931,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
9957 | base = 0; | 9931 | base = 0; |
9958 | 9932 | ||
9959 | if (x < 0) { | 9933 | if (x < 0) { |
9960 | if (x + intel_crtc->base.cursor->state->crtc_w <= 0) | 9934 | if (x + cursor_state->crtc_w <= 0) |
9961 | base = 0; | 9935 | base = 0; |
9962 | 9936 | ||
9963 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | 9937 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; |
@@ -9966,7 +9940,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
9966 | pos |= x << CURSOR_X_SHIFT; | 9940 | pos |= x << CURSOR_X_SHIFT; |
9967 | 9941 | ||
9968 | if (y < 0) { | 9942 | if (y < 0) { |
9969 | if (y + intel_crtc->base.cursor->state->crtc_h <= 0) | 9943 | if (y + cursor_state->crtc_h <= 0) |
9970 | base = 0; | 9944 | base = 0; |
9971 | 9945 | ||
9972 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | 9946 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; |
@@ -9982,8 +9956,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
9982 | /* ILK+ do this automagically */ | 9956 | /* ILK+ do this automagically */ |
9983 | if (HAS_GMCH_DISPLAY(dev) && | 9957 | if (HAS_GMCH_DISPLAY(dev) && |
9984 | crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { | 9958 | crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { |
9985 | base += (intel_crtc->base.cursor->state->crtc_h * | 9959 | base += (cursor_state->crtc_h * |
9986 | intel_crtc->base.cursor->state->crtc_w - 1) * 4; | 9960 | cursor_state->crtc_w - 1) * 4; |
9987 | } | 9961 | } |
9988 | 9962 | ||
9989 | if (IS_845G(dev) || IS_I865G(dev)) | 9963 | if (IS_845G(dev) || IS_I865G(dev)) |
@@ -11034,10 +11008,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
11034 | DERRMR_PIPEB_PRI_FLIP_DONE | | 11008 | DERRMR_PIPEB_PRI_FLIP_DONE | |
11035 | DERRMR_PIPEC_PRI_FLIP_DONE)); | 11009 | DERRMR_PIPEC_PRI_FLIP_DONE)); |
11036 | if (IS_GEN8(dev)) | 11010 | if (IS_GEN8(dev)) |
11037 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | | 11011 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | |
11038 | MI_SRM_LRM_GLOBAL_GTT); | 11012 | MI_SRM_LRM_GLOBAL_GTT); |
11039 | else | 11013 | else |
11040 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | | 11014 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM | |
11041 | MI_SRM_LRM_GLOBAL_GTT); | 11015 | MI_SRM_LRM_GLOBAL_GTT); |
11042 | intel_ring_emit(ring, DERRMR); | 11016 | intel_ring_emit(ring, DERRMR); |
11043 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); | 11017 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); |
@@ -11161,11 +11135,10 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) | |||
11161 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | 11135 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) |
11162 | { | 11136 | { |
11163 | struct drm_device *dev = intel_crtc->base.dev; | 11137 | struct drm_device *dev = intel_crtc->base.dev; |
11164 | u32 start_vbl_count; | ||
11165 | 11138 | ||
11166 | intel_mark_page_flip_active(intel_crtc); | 11139 | intel_mark_page_flip_active(intel_crtc); |
11167 | 11140 | ||
11168 | intel_pipe_update_start(intel_crtc, &start_vbl_count); | 11141 | intel_pipe_update_start(intel_crtc); |
11169 | 11142 | ||
11170 | if (INTEL_INFO(dev)->gen >= 9) | 11143 | if (INTEL_INFO(dev)->gen >= 9) |
11171 | skl_do_mmio_flip(intel_crtc); | 11144 | skl_do_mmio_flip(intel_crtc); |
@@ -11173,7 +11146,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | |||
11173 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ | 11146 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ |
11174 | ilk_do_mmio_flip(intel_crtc); | 11147 | ilk_do_mmio_flip(intel_crtc); |
11175 | 11148 | ||
11176 | intel_pipe_update_end(intel_crtc, start_vbl_count); | 11149 | intel_pipe_update_end(intel_crtc); |
11177 | } | 11150 | } |
11178 | 11151 | ||
11179 | static void intel_mmio_flip_work_func(struct work_struct *work) | 11152 | static void intel_mmio_flip_work_func(struct work_struct *work) |
@@ -11237,6 +11210,9 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, | |||
11237 | if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) | 11210 | if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) |
11238 | return true; | 11211 | return true; |
11239 | 11212 | ||
11213 | if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) | ||
11214 | return false; | ||
11215 | |||
11240 | if (!work->enable_stall_check) | 11216 | if (!work->enable_stall_check) |
11241 | return false; | 11217 | return false; |
11242 | 11218 | ||
@@ -11627,7 +11603,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
11627 | intel_crtc->atomic.update_wm_pre = true; | 11603 | intel_crtc->atomic.update_wm_pre = true; |
11628 | } | 11604 | } |
11629 | 11605 | ||
11630 | if (visible) | 11606 | if (visible || was_visible) |
11631 | intel_crtc->atomic.fb_bits |= | 11607 | intel_crtc->atomic.fb_bits |= |
11632 | to_intel_plane(plane)->frontbuffer_bit; | 11608 | to_intel_plane(plane)->frontbuffer_bit; |
11633 | 11609 | ||
@@ -11900,14 +11876,16 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
11900 | pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, | 11876 | pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, |
11901 | pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, | 11877 | pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, |
11902 | pipe_config->fdi_m_n.tu); | 11878 | pipe_config->fdi_m_n.tu); |
11903 | DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", | 11879 | DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", |
11904 | pipe_config->has_dp_encoder, | 11880 | pipe_config->has_dp_encoder, |
11881 | pipe_config->lane_count, | ||
11905 | pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, | 11882 | pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, |
11906 | pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, | 11883 | pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, |
11907 | pipe_config->dp_m_n.tu); | 11884 | pipe_config->dp_m_n.tu); |
11908 | 11885 | ||
11909 | DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", | 11886 | DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", |
11910 | pipe_config->has_dp_encoder, | 11887 | pipe_config->has_dp_encoder, |
11888 | pipe_config->lane_count, | ||
11911 | pipe_config->dp_m2_n2.gmch_m, | 11889 | pipe_config->dp_m2_n2.gmch_m, |
11912 | pipe_config->dp_m2_n2.gmch_n, | 11890 | pipe_config->dp_m2_n2.gmch_n, |
11913 | pipe_config->dp_m2_n2.link_m, | 11891 | pipe_config->dp_m2_n2.link_m, |
@@ -12119,10 +12097,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
12119 | (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) | 12097 | (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) |
12120 | pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; | 12098 | pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; |
12121 | 12099 | ||
12122 | /* Compute a starting value for pipe_config->pipe_bpp taking the source | ||
12123 | * plane pixel format and any sink constraints into account. Returns the | ||
12124 | * source plane bpp so that dithering can be selected on mismatches | ||
12125 | * after encoders and crtc also have had their say. */ | ||
12126 | base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), | 12100 | base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), |
12127 | pipe_config); | 12101 | pipe_config); |
12128 | if (base_bpp < 0) | 12102 | if (base_bpp < 0) |
@@ -12191,7 +12165,7 @@ encoder_retry: | |||
12191 | /* Dithering seems to not pass-through bits correctly when it should, so | 12165 | /* Dithering seems to not pass-through bits correctly when it should, so |
12192 | * only enable it on 6bpc panels. */ | 12166 | * only enable it on 6bpc panels. */ |
12193 | pipe_config->dither = pipe_config->pipe_bpp == 6*3; | 12167 | pipe_config->dither = pipe_config->pipe_bpp == 6*3; |
12194 | DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", | 12168 | DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", |
12195 | base_bpp, pipe_config->pipe_bpp, pipe_config->dither); | 12169 | base_bpp, pipe_config->pipe_bpp, pipe_config->dither); |
12196 | 12170 | ||
12197 | fail: | 12171 | fail: |
@@ -12414,6 +12388,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
12414 | PIPE_CONF_CHECK_M_N(fdi_m_n); | 12388 | PIPE_CONF_CHECK_M_N(fdi_m_n); |
12415 | 12389 | ||
12416 | PIPE_CONF_CHECK_I(has_dp_encoder); | 12390 | PIPE_CONF_CHECK_I(has_dp_encoder); |
12391 | PIPE_CONF_CHECK_I(lane_count); | ||
12417 | 12392 | ||
12418 | if (INTEL_INFO(dev)->gen < 8) { | 12393 | if (INTEL_INFO(dev)->gen < 8) { |
12419 | PIPE_CONF_CHECK_M_N(dp_m_n); | 12394 | PIPE_CONF_CHECK_M_N(dp_m_n); |
@@ -12464,16 +12439,16 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
12464 | PIPE_CONF_CHECK_I(pipe_src_w); | 12439 | PIPE_CONF_CHECK_I(pipe_src_w); |
12465 | PIPE_CONF_CHECK_I(pipe_src_h); | 12440 | PIPE_CONF_CHECK_I(pipe_src_h); |
12466 | 12441 | ||
12467 | PIPE_CONF_CHECK_I(gmch_pfit.control); | 12442 | PIPE_CONF_CHECK_X(gmch_pfit.control); |
12468 | /* pfit ratios are autocomputed by the hw on gen4+ */ | 12443 | /* pfit ratios are autocomputed by the hw on gen4+ */ |
12469 | if (INTEL_INFO(dev)->gen < 4) | 12444 | if (INTEL_INFO(dev)->gen < 4) |
12470 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | 12445 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); |
12471 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | 12446 | PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); |
12472 | 12447 | ||
12473 | PIPE_CONF_CHECK_I(pch_pfit.enabled); | 12448 | PIPE_CONF_CHECK_I(pch_pfit.enabled); |
12474 | if (current_config->pch_pfit.enabled) { | 12449 | if (current_config->pch_pfit.enabled) { |
12475 | PIPE_CONF_CHECK_I(pch_pfit.pos); | 12450 | PIPE_CONF_CHECK_X(pch_pfit.pos); |
12476 | PIPE_CONF_CHECK_I(pch_pfit.size); | 12451 | PIPE_CONF_CHECK_X(pch_pfit.size); |
12477 | } | 12452 | } |
12478 | 12453 | ||
12479 | PIPE_CONF_CHECK_I(scaler_state.scaler_id); | 12454 | PIPE_CONF_CHECK_I(scaler_state.scaler_id); |
@@ -13451,7 +13426,9 @@ intel_commit_primary_plane(struct drm_plane *plane, | |||
13451 | /* FIXME: kill this fastboot hack */ | 13426 | /* FIXME: kill this fastboot hack */ |
13452 | intel_update_pipe_size(intel_crtc); | 13427 | intel_update_pipe_size(intel_crtc); |
13453 | 13428 | ||
13454 | dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y); | 13429 | dev_priv->display.update_primary_plane(crtc, fb, |
13430 | state->src.x1 >> 16, | ||
13431 | state->src.y1 >> 16); | ||
13455 | } | 13432 | } |
13456 | 13433 | ||
13457 | static void | 13434 | static void |
@@ -13475,7 +13452,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, | |||
13475 | 13452 | ||
13476 | /* Perform vblank evasion around commit operation */ | 13453 | /* Perform vblank evasion around commit operation */ |
13477 | if (crtc->state->active) | 13454 | if (crtc->state->active) |
13478 | intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count); | 13455 | intel_pipe_update_start(intel_crtc); |
13479 | 13456 | ||
13480 | if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9) | 13457 | if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9) |
13481 | skl_detach_scalers(intel_crtc); | 13458 | skl_detach_scalers(intel_crtc); |
@@ -13487,7 +13464,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, | |||
13487 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 13464 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
13488 | 13465 | ||
13489 | if (crtc->state->active) | 13466 | if (crtc->state->active) |
13490 | intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count); | 13467 | intel_pipe_update_end(intel_crtc); |
13491 | } | 13468 | } |
13492 | 13469 | ||
13493 | /** | 13470 | /** |
@@ -13656,10 +13633,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
13656 | crtc = crtc ? crtc : plane->crtc; | 13633 | crtc = crtc ? crtc : plane->crtc; |
13657 | intel_crtc = to_intel_crtc(crtc); | 13634 | intel_crtc = to_intel_crtc(crtc); |
13658 | 13635 | ||
13659 | plane->fb = state->base.fb; | ||
13660 | crtc->cursor_x = state->base.crtc_x; | ||
13661 | crtc->cursor_y = state->base.crtc_y; | ||
13662 | |||
13663 | if (intel_crtc->cursor_bo == obj) | 13636 | if (intel_crtc->cursor_bo == obj) |
13664 | goto update; | 13637 | goto update; |
13665 | 13638 | ||
@@ -14798,8 +14771,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
14798 | } | 14771 | } |
14799 | } | 14772 | } |
14800 | 14773 | ||
14801 | intel_init_dpio(dev); | ||
14802 | |||
14803 | intel_shared_dpll_init(dev); | 14774 | intel_shared_dpll_init(dev); |
14804 | 14775 | ||
14805 | /* Just disable it once at startup */ | 14776 | /* Just disable it once at startup */ |
@@ -14881,13 +14852,22 @@ intel_check_plane_mapping(struct intel_crtc *crtc) | |||
14881 | return true; | 14852 | return true; |
14882 | } | 14853 | } |
14883 | 14854 | ||
14855 | static bool intel_crtc_has_encoders(struct intel_crtc *crtc) | ||
14856 | { | ||
14857 | struct drm_device *dev = crtc->base.dev; | ||
14858 | struct intel_encoder *encoder; | ||
14859 | |||
14860 | for_each_encoder_on_crtc(dev, &crtc->base, encoder) | ||
14861 | return true; | ||
14862 | |||
14863 | return false; | ||
14864 | } | ||
14865 | |||
14884 | static void intel_sanitize_crtc(struct intel_crtc *crtc) | 14866 | static void intel_sanitize_crtc(struct intel_crtc *crtc) |
14885 | { | 14867 | { |
14886 | struct drm_device *dev = crtc->base.dev; | 14868 | struct drm_device *dev = crtc->base.dev; |
14887 | struct drm_i915_private *dev_priv = dev->dev_private; | 14869 | struct drm_i915_private *dev_priv = dev->dev_private; |
14888 | struct intel_encoder *encoder; | ||
14889 | u32 reg; | 14870 | u32 reg; |
14890 | bool enable; | ||
14891 | 14871 | ||
14892 | /* Clear any frame start delays used for debugging left by the BIOS */ | 14872 | /* Clear any frame start delays used for debugging left by the BIOS */ |
14893 | reg = PIPECONF(crtc->config->cpu_transcoder); | 14873 | reg = PIPECONF(crtc->config->cpu_transcoder); |
@@ -14931,16 +14911,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
14931 | 14911 | ||
14932 | /* Adjust the state of the output pipe according to whether we | 14912 | /* Adjust the state of the output pipe according to whether we |
14933 | * have active connectors/encoders. */ | 14913 | * have active connectors/encoders. */ |
14934 | enable = false; | 14914 | if (!intel_crtc_has_encoders(crtc)) |
14935 | for_each_encoder_on_crtc(dev, &crtc->base, encoder) { | ||
14936 | enable = true; | ||
14937 | break; | ||
14938 | } | ||
14939 | |||
14940 | if (!enable) | ||
14941 | intel_crtc_disable_noatomic(&crtc->base); | 14915 | intel_crtc_disable_noatomic(&crtc->base); |
14942 | 14916 | ||
14943 | if (crtc->active != crtc->base.state->active) { | 14917 | if (crtc->active != crtc->base.state->active) { |
14918 | struct intel_encoder *encoder; | ||
14944 | 14919 | ||
14945 | /* This can happen either due to bugs in the get_hw_state | 14920 | /* This can happen either due to bugs in the get_hw_state |
14946 | * functions or because of calls to intel_crtc_disable_noatomic, | 14921 | * functions or because of calls to intel_crtc_disable_noatomic, |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 7bb96d5850de..292c753abb38 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -130,6 +130,11 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp); | |||
130 | static void vlv_steal_power_sequencer(struct drm_device *dev, | 130 | static void vlv_steal_power_sequencer(struct drm_device *dev, |
131 | enum pipe pipe); | 131 | enum pipe pipe); |
132 | 132 | ||
133 | static unsigned int intel_dp_unused_lane_mask(int lane_count) | ||
134 | { | ||
135 | return ~((1 << lane_count) - 1) & 0xf; | ||
136 | } | ||
137 | |||
133 | static int | 138 | static int |
134 | intel_dp_max_link_bw(struct intel_dp *intel_dp) | 139 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
135 | { | 140 | { |
@@ -253,40 +258,6 @@ static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) | |||
253 | dst[i] = src >> ((3-i) * 8); | 258 | dst[i] = src >> ((3-i) * 8); |
254 | } | 259 | } |
255 | 260 | ||
256 | /* hrawclock is 1/4 the FSB frequency */ | ||
257 | static int | ||
258 | intel_hrawclk(struct drm_device *dev) | ||
259 | { | ||
260 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
261 | uint32_t clkcfg; | ||
262 | |||
263 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ | ||
264 | if (IS_VALLEYVIEW(dev)) | ||
265 | return 200; | ||
266 | |||
267 | clkcfg = I915_READ(CLKCFG); | ||
268 | switch (clkcfg & CLKCFG_FSB_MASK) { | ||
269 | case CLKCFG_FSB_400: | ||
270 | return 100; | ||
271 | case CLKCFG_FSB_533: | ||
272 | return 133; | ||
273 | case CLKCFG_FSB_667: | ||
274 | return 166; | ||
275 | case CLKCFG_FSB_800: | ||
276 | return 200; | ||
277 | case CLKCFG_FSB_1067: | ||
278 | return 266; | ||
279 | case CLKCFG_FSB_1333: | ||
280 | return 333; | ||
281 | /* these two are just a guess; one of them might be right */ | ||
282 | case CLKCFG_FSB_1600: | ||
283 | case CLKCFG_FSB_1600_ALT: | ||
284 | return 400; | ||
285 | default: | ||
286 | return 133; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static void | 261 | static void |
291 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, | 262 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
292 | struct intel_dp *intel_dp); | 263 | struct intel_dp *intel_dp); |
@@ -333,7 +304,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) | |||
333 | struct drm_device *dev = intel_dig_port->base.base.dev; | 304 | struct drm_device *dev = intel_dig_port->base.base.dev; |
334 | struct drm_i915_private *dev_priv = dev->dev_private; | 305 | struct drm_i915_private *dev_priv = dev->dev_private; |
335 | enum pipe pipe = intel_dp->pps_pipe; | 306 | enum pipe pipe = intel_dp->pps_pipe; |
336 | bool pll_enabled; | 307 | bool pll_enabled, release_cl_override = false; |
308 | enum dpio_phy phy = DPIO_PHY(pipe); | ||
309 | enum dpio_channel ch = vlv_pipe_to_channel(pipe); | ||
337 | uint32_t DP; | 310 | uint32_t DP; |
338 | 311 | ||
339 | if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, | 312 | if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, |
@@ -363,9 +336,13 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) | |||
363 | * The DPLL for the pipe must be enabled for this to work. | 336 | * The DPLL for the pipe must be enabled for this to work. |
364 | * So enable temporarily it if it's not already enabled. | 337 | * So enable temporarily it if it's not already enabled. |
365 | */ | 338 | */ |
366 | if (!pll_enabled) | 339 | if (!pll_enabled) { |
340 | release_cl_override = IS_CHERRYVIEW(dev) && | ||
341 | !chv_phy_powergate_ch(dev_priv, phy, ch, true); | ||
342 | |||
367 | vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? | 343 | vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? |
368 | &chv_dpll[0].dpll : &vlv_dpll[0].dpll); | 344 | &chv_dpll[0].dpll : &vlv_dpll[0].dpll); |
345 | } | ||
369 | 346 | ||
370 | /* | 347 | /* |
371 | * Similar magic as in intel_dp_enable_port(). | 348 | * Similar magic as in intel_dp_enable_port(). |
@@ -382,8 +359,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) | |||
382 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 359 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
383 | POSTING_READ(intel_dp->output_reg); | 360 | POSTING_READ(intel_dp->output_reg); |
384 | 361 | ||
385 | if (!pll_enabled) | 362 | if (!pll_enabled) { |
386 | vlv_force_pll_off(dev, pipe); | 363 | vlv_force_pll_off(dev, pipe); |
364 | |||
365 | if (release_cl_override) | ||
366 | chv_phy_powergate_ch(dev_priv, phy, ch, false); | ||
367 | } | ||
387 | } | 368 | } |
388 | 369 | ||
389 | static enum pipe | 370 | static enum pipe |
@@ -1384,6 +1365,19 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) | |||
1384 | return rate_to_index(rate, intel_dp->sink_rates); | 1365 | return rate_to_index(rate, intel_dp->sink_rates); |
1385 | } | 1366 | } |
1386 | 1367 | ||
1368 | static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, | ||
1369 | uint8_t *link_bw, uint8_t *rate_select) | ||
1370 | { | ||
1371 | if (intel_dp->num_sink_rates) { | ||
1372 | *link_bw = 0; | ||
1373 | *rate_select = | ||
1374 | intel_dp_rate_select(intel_dp, port_clock); | ||
1375 | } else { | ||
1376 | *link_bw = drm_dp_link_rate_to_bw_code(port_clock); | ||
1377 | *rate_select = 0; | ||
1378 | } | ||
1379 | } | ||
1380 | |||
1387 | bool | 1381 | bool |
1388 | intel_dp_compute_config(struct intel_encoder *encoder, | 1382 | intel_dp_compute_config(struct intel_encoder *encoder, |
1389 | struct intel_crtc_state *pipe_config) | 1383 | struct intel_crtc_state *pipe_config) |
@@ -1405,6 +1399,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
1405 | int link_avail, link_clock; | 1399 | int link_avail, link_clock; |
1406 | int common_rates[DP_MAX_SUPPORTED_RATES] = {}; | 1400 | int common_rates[DP_MAX_SUPPORTED_RATES] = {}; |
1407 | int common_len; | 1401 | int common_len; |
1402 | uint8_t link_bw, rate_select; | ||
1408 | 1403 | ||
1409 | common_len = intel_dp_common_rates(intel_dp, common_rates); | 1404 | common_len = intel_dp_common_rates(intel_dp, common_rates); |
1410 | 1405 | ||
@@ -1500,32 +1495,23 @@ found: | |||
1500 | * CEA-861-E - 5.1 Default Encoding Parameters | 1495 | * CEA-861-E - 5.1 Default Encoding Parameters |
1501 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry | 1496 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry |
1502 | */ | 1497 | */ |
1503 | if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) | 1498 | pipe_config->limited_color_range = |
1504 | intel_dp->color_range = DP_COLOR_RANGE_16_235; | 1499 | bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1; |
1505 | else | ||
1506 | intel_dp->color_range = 0; | ||
1507 | } | ||
1508 | |||
1509 | if (intel_dp->color_range) | ||
1510 | pipe_config->limited_color_range = true; | ||
1511 | |||
1512 | intel_dp->lane_count = lane_count; | ||
1513 | |||
1514 | if (intel_dp->num_sink_rates) { | ||
1515 | intel_dp->link_bw = 0; | ||
1516 | intel_dp->rate_select = | ||
1517 | intel_dp_rate_select(intel_dp, common_rates[clock]); | ||
1518 | } else { | 1500 | } else { |
1519 | intel_dp->link_bw = | 1501 | pipe_config->limited_color_range = |
1520 | drm_dp_link_rate_to_bw_code(common_rates[clock]); | 1502 | intel_dp->limited_color_range; |
1521 | intel_dp->rate_select = 0; | ||
1522 | } | 1503 | } |
1523 | 1504 | ||
1505 | pipe_config->lane_count = lane_count; | ||
1506 | |||
1524 | pipe_config->pipe_bpp = bpp; | 1507 | pipe_config->pipe_bpp = bpp; |
1525 | pipe_config->port_clock = common_rates[clock]; | 1508 | pipe_config->port_clock = common_rates[clock]; |
1526 | 1509 | ||
1527 | DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", | 1510 | intel_dp_compute_rate(intel_dp, pipe_config->port_clock, |
1528 | intel_dp->link_bw, intel_dp->lane_count, | 1511 | &link_bw, &rate_select); |
1512 | |||
1513 | DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n", | ||
1514 | link_bw, rate_select, pipe_config->lane_count, | ||
1529 | pipe_config->port_clock, bpp); | 1515 | pipe_config->port_clock, bpp); |
1530 | DRM_DEBUG_KMS("DP link bw required %i available %i\n", | 1516 | DRM_DEBUG_KMS("DP link bw required %i available %i\n", |
1531 | mode_rate, link_avail); | 1517 | mode_rate, link_avail); |
@@ -1587,6 +1573,13 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) | |||
1587 | udelay(500); | 1573 | udelay(500); |
1588 | } | 1574 | } |
1589 | 1575 | ||
1576 | void intel_dp_set_link_params(struct intel_dp *intel_dp, | ||
1577 | const struct intel_crtc_state *pipe_config) | ||
1578 | { | ||
1579 | intel_dp->link_rate = pipe_config->port_clock; | ||
1580 | intel_dp->lane_count = pipe_config->lane_count; | ||
1581 | } | ||
1582 | |||
1590 | static void intel_dp_prepare(struct intel_encoder *encoder) | 1583 | static void intel_dp_prepare(struct intel_encoder *encoder) |
1591 | { | 1584 | { |
1592 | struct drm_device *dev = encoder->base.dev; | 1585 | struct drm_device *dev = encoder->base.dev; |
@@ -1596,6 +1589,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder) | |||
1596 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 1589 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
1597 | struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | 1590 | struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; |
1598 | 1591 | ||
1592 | intel_dp_set_link_params(intel_dp, crtc->config); | ||
1593 | |||
1599 | /* | 1594 | /* |
1600 | * There are four kinds of DP registers: | 1595 | * There are four kinds of DP registers: |
1601 | * | 1596 | * |
@@ -1620,7 +1615,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder) | |||
1620 | 1615 | ||
1621 | /* Handle DP bits in common between all three register formats */ | 1616 | /* Handle DP bits in common between all three register formats */ |
1622 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | 1617 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
1623 | intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); | 1618 | intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count); |
1624 | 1619 | ||
1625 | if (crtc->config->has_audio) | 1620 | if (crtc->config->has_audio) |
1626 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; | 1621 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
@@ -1650,8 +1645,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder) | |||
1650 | trans_dp &= ~TRANS_DP_ENH_FRAMING; | 1645 | trans_dp &= ~TRANS_DP_ENH_FRAMING; |
1651 | I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); | 1646 | I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); |
1652 | } else { | 1647 | } else { |
1653 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) | 1648 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && |
1654 | intel_dp->DP |= intel_dp->color_range; | 1649 | crtc->config->limited_color_range) |
1650 | intel_dp->DP |= DP_COLOR_RANGE_16_235; | ||
1655 | 1651 | ||
1656 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 1652 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
1657 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 1653 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
@@ -2291,13 +2287,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder, | |||
2291 | pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; | 2287 | pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; |
2292 | 2288 | ||
2293 | if (HAS_PCH_CPT(dev) && port != PORT_A) { | 2289 | if (HAS_PCH_CPT(dev) && port != PORT_A) { |
2294 | tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); | 2290 | u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); |
2295 | if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) | 2291 | |
2292 | if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) | ||
2296 | flags |= DRM_MODE_FLAG_PHSYNC; | 2293 | flags |= DRM_MODE_FLAG_PHSYNC; |
2297 | else | 2294 | else |
2298 | flags |= DRM_MODE_FLAG_NHSYNC; | 2295 | flags |= DRM_MODE_FLAG_NHSYNC; |
2299 | 2296 | ||
2300 | if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) | 2297 | if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) |
2301 | flags |= DRM_MODE_FLAG_PVSYNC; | 2298 | flags |= DRM_MODE_FLAG_PVSYNC; |
2302 | else | 2299 | else |
2303 | flags |= DRM_MODE_FLAG_NVSYNC; | 2300 | flags |= DRM_MODE_FLAG_NVSYNC; |
@@ -2321,6 +2318,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder, | |||
2321 | 2318 | ||
2322 | pipe_config->has_dp_encoder = true; | 2319 | pipe_config->has_dp_encoder = true; |
2323 | 2320 | ||
2321 | pipe_config->lane_count = | ||
2322 | ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; | ||
2323 | |||
2324 | intel_dp_get_m_n(crtc, pipe_config); | 2324 | intel_dp_get_m_n(crtc, pipe_config); |
2325 | 2325 | ||
2326 | if (port == PORT_A) { | 2326 | if (port == PORT_A) { |
@@ -2400,38 +2400,62 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder) | |||
2400 | intel_dp_link_down(intel_dp); | 2400 | intel_dp_link_down(intel_dp); |
2401 | } | 2401 | } |
2402 | 2402 | ||
2403 | static void chv_post_disable_dp(struct intel_encoder *encoder) | 2403 | static void chv_data_lane_soft_reset(struct intel_encoder *encoder, |
2404 | bool reset) | ||
2404 | { | 2405 | { |
2405 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 2406 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2406 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); | 2407 | enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); |
2407 | struct drm_device *dev = encoder->base.dev; | 2408 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
2408 | struct drm_i915_private *dev_priv = dev->dev_private; | 2409 | enum pipe pipe = crtc->pipe; |
2409 | struct intel_crtc *intel_crtc = | 2410 | uint32_t val; |
2410 | to_intel_crtc(encoder->base.crtc); | ||
2411 | enum dpio_channel ch = vlv_dport_to_channel(dport); | ||
2412 | enum pipe pipe = intel_crtc->pipe; | ||
2413 | u32 val; | ||
2414 | 2411 | ||
2415 | intel_dp_link_down(intel_dp); | 2412 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); |
2413 | if (reset) | ||
2414 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
2415 | else | ||
2416 | val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; | ||
2417 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); | ||
2416 | 2418 | ||
2417 | mutex_lock(&dev_priv->sb_lock); | 2419 | if (crtc->config->lane_count > 2) { |
2420 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); | ||
2421 | if (reset) | ||
2422 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
2423 | else | ||
2424 | val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; | ||
2425 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); | ||
2426 | } | ||
2418 | 2427 | ||
2419 | /* Propagate soft reset to data lane reset */ | ||
2420 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); | 2428 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); |
2421 | val |= CHV_PCS_REQ_SOFTRESET_EN; | 2429 | val |= CHV_PCS_REQ_SOFTRESET_EN; |
2430 | if (reset) | ||
2431 | val &= ~DPIO_PCS_CLK_SOFT_RESET; | ||
2432 | else | ||
2433 | val |= DPIO_PCS_CLK_SOFT_RESET; | ||
2422 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); | 2434 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); |
2423 | 2435 | ||
2424 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); | 2436 | if (crtc->config->lane_count > 2) { |
2425 | val |= CHV_PCS_REQ_SOFTRESET_EN; | 2437 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); |
2426 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); | 2438 | val |= CHV_PCS_REQ_SOFTRESET_EN; |
2439 | if (reset) | ||
2440 | val &= ~DPIO_PCS_CLK_SOFT_RESET; | ||
2441 | else | ||
2442 | val |= DPIO_PCS_CLK_SOFT_RESET; | ||
2443 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); | ||
2444 | } | ||
2445 | } | ||
2427 | 2446 | ||
2428 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); | 2447 | static void chv_post_disable_dp(struct intel_encoder *encoder) |
2429 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | 2448 | { |
2430 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); | 2449 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
2450 | struct drm_device *dev = encoder->base.dev; | ||
2451 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2452 | |||
2453 | intel_dp_link_down(intel_dp); | ||
2454 | |||
2455 | mutex_lock(&dev_priv->sb_lock); | ||
2431 | 2456 | ||
2432 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); | 2457 | /* Assert data lane reset */ |
2433 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | 2458 | chv_data_lane_soft_reset(encoder, true); |
2434 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); | ||
2435 | 2459 | ||
2436 | mutex_unlock(&dev_priv->sb_lock); | 2460 | mutex_unlock(&dev_priv->sb_lock); |
2437 | } | 2461 | } |
@@ -2551,7 +2575,6 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
2551 | struct drm_i915_private *dev_priv = dev->dev_private; | 2575 | struct drm_i915_private *dev_priv = dev->dev_private; |
2552 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 2576 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
2553 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 2577 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
2554 | unsigned int lane_mask = 0x0; | ||
2555 | 2578 | ||
2556 | if (WARN_ON(dp_reg & DP_PORT_EN)) | 2579 | if (WARN_ON(dp_reg & DP_PORT_EN)) |
2557 | return; | 2580 | return; |
@@ -2569,9 +2592,15 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
2569 | 2592 | ||
2570 | pps_unlock(intel_dp); | 2593 | pps_unlock(intel_dp); |
2571 | 2594 | ||
2572 | if (IS_VALLEYVIEW(dev)) | 2595 | if (IS_VALLEYVIEW(dev)) { |
2596 | unsigned int lane_mask = 0x0; | ||
2597 | |||
2598 | if (IS_CHERRYVIEW(dev)) | ||
2599 | lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count); | ||
2600 | |||
2573 | vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), | 2601 | vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), |
2574 | lane_mask); | 2602 | lane_mask); |
2603 | } | ||
2575 | 2604 | ||
2576 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 2605 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
2577 | intel_dp_start_link_train(intel_dp); | 2606 | intel_dp_start_link_train(intel_dp); |
@@ -2798,31 +2827,19 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) | |||
2798 | val &= ~DPIO_LANEDESKEW_STRAP_OVRD; | 2827 | val &= ~DPIO_LANEDESKEW_STRAP_OVRD; |
2799 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); | 2828 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); |
2800 | 2829 | ||
2801 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); | 2830 | if (intel_crtc->config->lane_count > 2) { |
2802 | val &= ~DPIO_LANEDESKEW_STRAP_OVRD; | 2831 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); |
2803 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); | 2832 | val &= ~DPIO_LANEDESKEW_STRAP_OVRD; |
2804 | 2833 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); | |
2805 | /* Deassert soft data lane reset*/ | 2834 | } |
2806 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); | ||
2807 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
2808 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); | ||
2809 | |||
2810 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); | ||
2811 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
2812 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); | ||
2813 | |||
2814 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); | ||
2815 | val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
2816 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); | ||
2817 | |||
2818 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); | ||
2819 | val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
2820 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); | ||
2821 | 2835 | ||
2822 | /* Program Tx lane latency optimal setting*/ | 2836 | /* Program Tx lane latency optimal setting*/ |
2823 | for (i = 0; i < 4; i++) { | 2837 | for (i = 0; i < intel_crtc->config->lane_count; i++) { |
2824 | /* Set the upar bit */ | 2838 | /* Set the upar bit */ |
2825 | data = (i == 1) ? 0x0 : 0x1; | 2839 | if (intel_crtc->config->lane_count == 1) |
2840 | data = 0x0; | ||
2841 | else | ||
2842 | data = (i == 1) ? 0x0 : 0x1; | ||
2826 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), | 2843 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), |
2827 | data << DPIO_UPAR_SHIFT); | 2844 | data << DPIO_UPAR_SHIFT); |
2828 | } | 2845 | } |
@@ -2843,9 +2860,11 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) | |||
2843 | val |= DPIO_TX2_STAGGER_MASK(0x1f); | 2860 | val |= DPIO_TX2_STAGGER_MASK(0x1f); |
2844 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); | 2861 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); |
2845 | 2862 | ||
2846 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); | 2863 | if (intel_crtc->config->lane_count > 2) { |
2847 | val |= DPIO_TX2_STAGGER_MASK(0x1f); | 2864 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); |
2848 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); | 2865 | val |= DPIO_TX2_STAGGER_MASK(0x1f); |
2866 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); | ||
2867 | } | ||
2849 | 2868 | ||
2850 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), | 2869 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), |
2851 | DPIO_LANESTAGGER_STRAP(stagger) | | 2870 | DPIO_LANESTAGGER_STRAP(stagger) | |
@@ -2854,16 +2873,27 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) | |||
2854 | DPIO_TX1_STAGGER_MULT(6) | | 2873 | DPIO_TX1_STAGGER_MULT(6) | |
2855 | DPIO_TX2_STAGGER_MULT(0)); | 2874 | DPIO_TX2_STAGGER_MULT(0)); |
2856 | 2875 | ||
2857 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), | 2876 | if (intel_crtc->config->lane_count > 2) { |
2858 | DPIO_LANESTAGGER_STRAP(stagger) | | 2877 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), |
2859 | DPIO_LANESTAGGER_STRAP_OVRD | | 2878 | DPIO_LANESTAGGER_STRAP(stagger) | |
2860 | DPIO_TX1_STAGGER_MASK(0x1f) | | 2879 | DPIO_LANESTAGGER_STRAP_OVRD | |
2861 | DPIO_TX1_STAGGER_MULT(7) | | 2880 | DPIO_TX1_STAGGER_MASK(0x1f) | |
2862 | DPIO_TX2_STAGGER_MULT(5)); | 2881 | DPIO_TX1_STAGGER_MULT(7) | |
2882 | DPIO_TX2_STAGGER_MULT(5)); | ||
2883 | } | ||
2884 | |||
2885 | /* Deassert data lane reset */ | ||
2886 | chv_data_lane_soft_reset(encoder, false); | ||
2863 | 2887 | ||
2864 | mutex_unlock(&dev_priv->sb_lock); | 2888 | mutex_unlock(&dev_priv->sb_lock); |
2865 | 2889 | ||
2866 | intel_enable_dp(encoder); | 2890 | intel_enable_dp(encoder); |
2891 | |||
2892 | /* Second common lane will stay alive on its own now */ | ||
2893 | if (dport->release_cl2_override) { | ||
2894 | chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); | ||
2895 | dport->release_cl2_override = false; | ||
2896 | } | ||
2867 | } | 2897 | } |
2868 | 2898 | ||
2869 | static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) | 2899 | static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) |
@@ -2875,12 +2905,27 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) | |||
2875 | to_intel_crtc(encoder->base.crtc); | 2905 | to_intel_crtc(encoder->base.crtc); |
2876 | enum dpio_channel ch = vlv_dport_to_channel(dport); | 2906 | enum dpio_channel ch = vlv_dport_to_channel(dport); |
2877 | enum pipe pipe = intel_crtc->pipe; | 2907 | enum pipe pipe = intel_crtc->pipe; |
2908 | unsigned int lane_mask = | ||
2909 | intel_dp_unused_lane_mask(intel_crtc->config->lane_count); | ||
2878 | u32 val; | 2910 | u32 val; |
2879 | 2911 | ||
2880 | intel_dp_prepare(encoder); | 2912 | intel_dp_prepare(encoder); |
2881 | 2913 | ||
2914 | /* | ||
2915 | * Must trick the second common lane into life. | ||
2916 | * Otherwise we can't even access the PLL. | ||
2917 | */ | ||
2918 | if (ch == DPIO_CH0 && pipe == PIPE_B) | ||
2919 | dport->release_cl2_override = | ||
2920 | !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); | ||
2921 | |||
2922 | chv_phy_powergate_lanes(encoder, true, lane_mask); | ||
2923 | |||
2882 | mutex_lock(&dev_priv->sb_lock); | 2924 | mutex_lock(&dev_priv->sb_lock); |
2883 | 2925 | ||
2926 | /* Assert data lane reset */ | ||
2927 | chv_data_lane_soft_reset(encoder, true); | ||
2928 | |||
2884 | /* program left/right clock distribution */ | 2929 | /* program left/right clock distribution */ |
2885 | if (pipe != PIPE_B) { | 2930 | if (pipe != PIPE_B) { |
2886 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | 2931 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); |
@@ -2909,13 +2954,15 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) | |||
2909 | val |= CHV_PCS_USEDCLKCHANNEL; | 2954 | val |= CHV_PCS_USEDCLKCHANNEL; |
2910 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); | 2955 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); |
2911 | 2956 | ||
2912 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); | 2957 | if (intel_crtc->config->lane_count > 2) { |
2913 | val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; | 2958 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); |
2914 | if (pipe != PIPE_B) | 2959 | val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; |
2915 | val &= ~CHV_PCS_USEDCLKCHANNEL; | 2960 | if (pipe != PIPE_B) |
2916 | else | 2961 | val &= ~CHV_PCS_USEDCLKCHANNEL; |
2917 | val |= CHV_PCS_USEDCLKCHANNEL; | 2962 | else |
2918 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); | 2963 | val |= CHV_PCS_USEDCLKCHANNEL; |
2964 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); | ||
2965 | } | ||
2919 | 2966 | ||
2920 | /* | 2967 | /* |
2921 | * This a a bit weird since generally CL | 2968 | * This a a bit weird since generally CL |
@@ -2932,6 +2979,39 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) | |||
2932 | mutex_unlock(&dev_priv->sb_lock); | 2979 | mutex_unlock(&dev_priv->sb_lock); |
2933 | } | 2980 | } |
2934 | 2981 | ||
2982 | static void chv_dp_post_pll_disable(struct intel_encoder *encoder) | ||
2983 | { | ||
2984 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
2985 | enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; | ||
2986 | u32 val; | ||
2987 | |||
2988 | mutex_lock(&dev_priv->sb_lock); | ||
2989 | |||
2990 | /* disable left/right clock distribution */ | ||
2991 | if (pipe != PIPE_B) { | ||
2992 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | ||
2993 | val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); | ||
2994 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); | ||
2995 | } else { | ||
2996 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); | ||
2997 | val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); | ||
2998 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); | ||
2999 | } | ||
3000 | |||
3001 | mutex_unlock(&dev_priv->sb_lock); | ||
3002 | |||
3003 | /* | ||
3004 | * Leave the power down bit cleared for at least one | ||
3005 | * lane so that chv_powergate_phy_ch() will power | ||
3006 | * on something when the channel is otherwise unused. | ||
3007 | * When the port is off and the override is removed | ||
3008 | * the lanes power down anyway, so otherwise it doesn't | ||
3009 | * really matter what the state of power down bits is | ||
3010 | * after this. | ||
3011 | */ | ||
3012 | chv_phy_powergate_lanes(encoder, false, 0x0); | ||
3013 | } | ||
3014 | |||
2935 | /* | 3015 | /* |
2936 | * Native read with retry for link status and receiver capability reads for | 3016 | * Native read with retry for link status and receiver capability reads for |
2937 | * cases where the sink may still be asleep. | 3017 | * cases where the sink may still be asleep. |
@@ -3168,6 +3248,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) | |||
3168 | return 0; | 3248 | return 0; |
3169 | } | 3249 | } |
3170 | 3250 | ||
3251 | static bool chv_need_uniq_trans_scale(uint8_t train_set) | ||
3252 | { | ||
3253 | return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 && | ||
3254 | (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3; | ||
3255 | } | ||
3256 | |||
3171 | static uint32_t chv_signal_levels(struct intel_dp *intel_dp) | 3257 | static uint32_t chv_signal_levels(struct intel_dp *intel_dp) |
3172 | { | 3258 | { |
3173 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 3259 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
@@ -3259,24 +3345,28 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) | |||
3259 | val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; | 3345 | val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; |
3260 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); | 3346 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); |
3261 | 3347 | ||
3262 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); | 3348 | if (intel_crtc->config->lane_count > 2) { |
3263 | val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); | 3349 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); |
3264 | val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); | 3350 | val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); |
3265 | val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; | 3351 | val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); |
3266 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); | 3352 | val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; |
3353 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); | ||
3354 | } | ||
3267 | 3355 | ||
3268 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); | 3356 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); |
3269 | val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); | 3357 | val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); |
3270 | val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; | 3358 | val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; |
3271 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); | 3359 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); |
3272 | 3360 | ||
3273 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); | 3361 | if (intel_crtc->config->lane_count > 2) { |
3274 | val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); | 3362 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); |
3275 | val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; | 3363 | val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); |
3276 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); | 3364 | val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; |
3365 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); | ||
3366 | } | ||
3277 | 3367 | ||
3278 | /* Program swing deemph */ | 3368 | /* Program swing deemph */ |
3279 | for (i = 0; i < 4; i++) { | 3369 | for (i = 0; i < intel_crtc->config->lane_count; i++) { |
3280 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); | 3370 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); |
3281 | val &= ~DPIO_SWING_DEEMPH9P5_MASK; | 3371 | val &= ~DPIO_SWING_DEEMPH9P5_MASK; |
3282 | val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; | 3372 | val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; |
@@ -3284,43 +3374,36 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) | |||
3284 | } | 3374 | } |
3285 | 3375 | ||
3286 | /* Program swing margin */ | 3376 | /* Program swing margin */ |
3287 | for (i = 0; i < 4; i++) { | 3377 | for (i = 0; i < intel_crtc->config->lane_count; i++) { |
3288 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); | 3378 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); |
3379 | |||
3289 | val &= ~DPIO_SWING_MARGIN000_MASK; | 3380 | val &= ~DPIO_SWING_MARGIN000_MASK; |
3290 | val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; | 3381 | val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; |
3382 | |||
3383 | /* | ||
3384 | * Supposedly this value shouldn't matter when unique transition | ||
3385 | * scale is disabled, but in fact it does matter. Let's just | ||
3386 | * always program the same value and hope it's OK. | ||
3387 | */ | ||
3388 | val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); | ||
3389 | val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; | ||
3390 | |||
3291 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); | 3391 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); |
3292 | } | 3392 | } |
3293 | 3393 | ||
3294 | /* Disable unique transition scale */ | 3394 | /* |
3295 | for (i = 0; i < 4; i++) { | 3395 | * The document said it needs to set bit 27 for ch0 and bit 26 |
3396 | * for ch1. Might be a typo in the doc. | ||
3397 | * For now, for this unique transition scale selection, set bit | ||
3398 | * 27 for ch0 and ch1. | ||
3399 | */ | ||
3400 | for (i = 0; i < intel_crtc->config->lane_count; i++) { | ||
3296 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); | 3401 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); |
3297 | val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; | 3402 | if (chv_need_uniq_trans_scale(train_set)) |
3298 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); | ||
3299 | } | ||
3300 | |||
3301 | if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK) | ||
3302 | == DP_TRAIN_PRE_EMPH_LEVEL_0) && | ||
3303 | ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK) | ||
3304 | == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) { | ||
3305 | |||
3306 | /* | ||
3307 | * The document said it needs to set bit 27 for ch0 and bit 26 | ||
3308 | * for ch1. Might be a typo in the doc. | ||
3309 | * For now, for this unique transition scale selection, set bit | ||
3310 | * 27 for ch0 and ch1. | ||
3311 | */ | ||
3312 | for (i = 0; i < 4; i++) { | ||
3313 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); | ||
3314 | val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; | 3403 | val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; |
3315 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); | 3404 | else |
3316 | } | 3405 | val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; |
3317 | 3406 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); | |
3318 | for (i = 0; i < 4; i++) { | ||
3319 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); | ||
3320 | val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); | ||
3321 | val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT); | ||
3322 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); | ||
3323 | } | ||
3324 | } | 3407 | } |
3325 | 3408 | ||
3326 | /* Start swing calculation */ | 3409 | /* Start swing calculation */ |
@@ -3328,9 +3411,11 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) | |||
3328 | val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; | 3411 | val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; |
3329 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); | 3412 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); |
3330 | 3413 | ||
3331 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); | 3414 | if (intel_crtc->config->lane_count > 2) { |
3332 | val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; | 3415 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); |
3333 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); | 3416 | val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; |
3417 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); | ||
3418 | } | ||
3334 | 3419 | ||
3335 | /* LRC Bypass */ | 3420 | /* LRC Bypass */ |
3336 | val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); | 3421 | val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); |
@@ -3521,8 +3606,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
3521 | uint8_t dp_train_pat) | 3606 | uint8_t dp_train_pat) |
3522 | { | 3607 | { |
3523 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3608 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3524 | struct drm_device *dev = intel_dig_port->base.base.dev; | 3609 | struct drm_i915_private *dev_priv = |
3525 | struct drm_i915_private *dev_priv = dev->dev_private; | 3610 | to_i915(intel_dig_port->base.base.dev); |
3526 | uint8_t buf[sizeof(intel_dp->train_set) + 1]; | 3611 | uint8_t buf[sizeof(intel_dp->train_set) + 1]; |
3527 | int ret, len; | 3612 | int ret, len; |
3528 | 3613 | ||
@@ -3563,8 +3648,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, | |||
3563 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) | 3648 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) |
3564 | { | 3649 | { |
3565 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3650 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3566 | struct drm_device *dev = intel_dig_port->base.base.dev; | 3651 | struct drm_i915_private *dev_priv = |
3567 | struct drm_i915_private *dev_priv = dev->dev_private; | 3652 | to_i915(intel_dig_port->base.base.dev); |
3568 | int ret; | 3653 | int ret; |
3569 | 3654 | ||
3570 | intel_get_adjust_train(intel_dp, link_status); | 3655 | intel_get_adjust_train(intel_dp, link_status); |
@@ -3621,19 +3706,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
3621 | int voltage_tries, loop_tries; | 3706 | int voltage_tries, loop_tries; |
3622 | uint32_t DP = intel_dp->DP; | 3707 | uint32_t DP = intel_dp->DP; |
3623 | uint8_t link_config[2]; | 3708 | uint8_t link_config[2]; |
3709 | uint8_t link_bw, rate_select; | ||
3624 | 3710 | ||
3625 | if (HAS_DDI(dev)) | 3711 | if (HAS_DDI(dev)) |
3626 | intel_ddi_prepare_link_retrain(encoder); | 3712 | intel_ddi_prepare_link_retrain(encoder); |
3627 | 3713 | ||
3714 | intel_dp_compute_rate(intel_dp, intel_dp->link_rate, | ||
3715 | &link_bw, &rate_select); | ||
3716 | |||
3628 | /* Write the link configuration data */ | 3717 | /* Write the link configuration data */ |
3629 | link_config[0] = intel_dp->link_bw; | 3718 | link_config[0] = link_bw; |
3630 | link_config[1] = intel_dp->lane_count; | 3719 | link_config[1] = intel_dp->lane_count; |
3631 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) | 3720 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
3632 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 3721 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
3633 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); | 3722 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); |
3634 | if (intel_dp->num_sink_rates) | 3723 | if (intel_dp->num_sink_rates) |
3635 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, | 3724 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, |
3636 | &intel_dp->rate_select, 1); | 3725 | &rate_select, 1); |
3637 | 3726 | ||
3638 | link_config[0] = 0; | 3727 | link_config[0] = 0; |
3639 | link_config[1] = DP_SET_ANSI_8B10B; | 3728 | link_config[1] = DP_SET_ANSI_8B10B; |
@@ -3724,14 +3813,27 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
3724 | void | 3813 | void |
3725 | intel_dp_complete_link_train(struct intel_dp *intel_dp) | 3814 | intel_dp_complete_link_train(struct intel_dp *intel_dp) |
3726 | { | 3815 | { |
3816 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
3817 | struct drm_device *dev = dig_port->base.base.dev; | ||
3727 | bool channel_eq = false; | 3818 | bool channel_eq = false; |
3728 | int tries, cr_tries; | 3819 | int tries, cr_tries; |
3729 | uint32_t DP = intel_dp->DP; | 3820 | uint32_t DP = intel_dp->DP; |
3730 | uint32_t training_pattern = DP_TRAINING_PATTERN_2; | 3821 | uint32_t training_pattern = DP_TRAINING_PATTERN_2; |
3731 | 3822 | ||
3732 | /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ | 3823 | /* |
3733 | if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) | 3824 | * Training Pattern 3 for HBR2 or 1.2 devices that support it. |
3825 | * | ||
3826 | * Intel platforms that support HBR2 also support TPS3. TPS3 support is | ||
3827 | * also mandatory for downstream devices that support HBR2. | ||
3828 | * | ||
3829 | * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is | ||
3830 | * supported but still not enabled. | ||
3831 | */ | ||
3832 | if (intel_dp_source_supports_hbr2(dev) && | ||
3833 | drm_dp_tps3_supported(intel_dp->dpcd)) | ||
3734 | training_pattern = DP_TRAINING_PATTERN_3; | 3834 | training_pattern = DP_TRAINING_PATTERN_3; |
3835 | else if (intel_dp->link_rate == 540000) | ||
3836 | DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n"); | ||
3735 | 3837 | ||
3736 | /* channel equalization */ | 3838 | /* channel equalization */ |
3737 | if (!intel_dp_set_link_train(intel_dp, &DP, | 3839 | if (!intel_dp_set_link_train(intel_dp, &DP, |
@@ -3759,7 +3861,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
3759 | } | 3861 | } |
3760 | 3862 | ||
3761 | /* Make sure clock is still ok */ | 3863 | /* Make sure clock is still ok */ |
3762 | if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { | 3864 | if (!drm_dp_clock_recovery_ok(link_status, |
3865 | intel_dp->lane_count)) { | ||
3763 | intel_dp->train_set_valid = false; | 3866 | intel_dp->train_set_valid = false; |
3764 | intel_dp_start_link_train(intel_dp); | 3867 | intel_dp_start_link_train(intel_dp); |
3765 | intel_dp_set_link_train(intel_dp, &DP, | 3868 | intel_dp_set_link_train(intel_dp, &DP, |
@@ -3769,7 +3872,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
3769 | continue; | 3872 | continue; |
3770 | } | 3873 | } |
3771 | 3874 | ||
3772 | if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { | 3875 | if (drm_dp_channel_eq_ok(link_status, |
3876 | intel_dp->lane_count)) { | ||
3773 | channel_eq = true; | 3877 | channel_eq = true; |
3774 | break; | 3878 | break; |
3775 | } | 3879 | } |
@@ -3910,19 +4014,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3910 | } | 4014 | } |
3911 | } | 4015 | } |
3912 | 4016 | ||
3913 | /* Training Pattern 3 support, Intel platforms that support HBR2 alone | 4017 | DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", |
3914 | * have support for TP3 hence that check is used along with dpcd check | 4018 | yesno(intel_dp_source_supports_hbr2(dev)), |
3915 | * to ensure TP3 can be enabled. | 4019 | yesno(drm_dp_tps3_supported(intel_dp->dpcd))); |
3916 | * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is | ||
3917 | * supported but still not enabled. | ||
3918 | */ | ||
3919 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && | ||
3920 | intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && | ||
3921 | intel_dp_source_supports_hbr2(dev)) { | ||
3922 | intel_dp->use_tps3 = true; | ||
3923 | DRM_DEBUG_KMS("Displayport TPS3 supported\n"); | ||
3924 | } else | ||
3925 | intel_dp->use_tps3 = false; | ||
3926 | 4020 | ||
3927 | /* Intermediate frequency support */ | 4021 | /* Intermediate frequency support */ |
3928 | if (is_edp(intel_dp) && | 4022 | if (is_edp(intel_dp) && |
@@ -4008,22 +4102,30 @@ intel_dp_probe_mst(struct intel_dp *intel_dp) | |||
4008 | return intel_dp->is_mst; | 4102 | return intel_dp->is_mst; |
4009 | } | 4103 | } |
4010 | 4104 | ||
4011 | static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp) | 4105 | static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) |
4012 | { | 4106 | { |
4013 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 4107 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
4014 | struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); | 4108 | struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); |
4015 | u8 buf; | 4109 | u8 buf; |
4110 | int ret = 0; | ||
4016 | 4111 | ||
4017 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { | 4112 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { |
4018 | DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); | 4113 | DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); |
4019 | return; | 4114 | ret = -EIO; |
4115 | goto out; | ||
4020 | } | 4116 | } |
4021 | 4117 | ||
4022 | if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, | 4118 | if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, |
4023 | buf & ~DP_TEST_SINK_START) < 0) | 4119 | buf & ~DP_TEST_SINK_START) < 0) { |
4024 | DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); | 4120 | DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); |
4121 | ret = -EIO; | ||
4122 | goto out; | ||
4123 | } | ||
4025 | 4124 | ||
4125 | intel_dp->sink_crc.started = false; | ||
4126 | out: | ||
4026 | hsw_enable_ips(intel_crtc); | 4127 | hsw_enable_ips(intel_crtc); |
4128 | return ret; | ||
4027 | } | 4129 | } |
4028 | 4130 | ||
4029 | static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) | 4131 | static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) |
@@ -4031,6 +4133,13 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) | |||
4031 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 4133 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
4032 | struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); | 4134 | struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); |
4033 | u8 buf; | 4135 | u8 buf; |
4136 | int ret; | ||
4137 | |||
4138 | if (intel_dp->sink_crc.started) { | ||
4139 | ret = intel_dp_sink_crc_stop(intel_dp); | ||
4140 | if (ret) | ||
4141 | return ret; | ||
4142 | } | ||
4034 | 4143 | ||
4035 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) | 4144 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) |
4036 | return -EIO; | 4145 | return -EIO; |
@@ -4038,6 +4147,8 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) | |||
4038 | if (!(buf & DP_TEST_CRC_SUPPORTED)) | 4147 | if (!(buf & DP_TEST_CRC_SUPPORTED)) |
4039 | return -ENOTTY; | 4148 | return -ENOTTY; |
4040 | 4149 | ||
4150 | intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK; | ||
4151 | |||
4041 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) | 4152 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) |
4042 | return -EIO; | 4153 | return -EIO; |
4043 | 4154 | ||
@@ -4049,6 +4160,7 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) | |||
4049 | return -EIO; | 4160 | return -EIO; |
4050 | } | 4161 | } |
4051 | 4162 | ||
4163 | intel_dp->sink_crc.started = true; | ||
4052 | return 0; | 4164 | return 0; |
4053 | } | 4165 | } |
4054 | 4166 | ||
@@ -4058,38 +4170,55 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) | |||
4058 | struct drm_device *dev = dig_port->base.base.dev; | 4170 | struct drm_device *dev = dig_port->base.base.dev; |
4059 | struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); | 4171 | struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); |
4060 | u8 buf; | 4172 | u8 buf; |
4061 | int test_crc_count; | 4173 | int count, ret; |
4062 | int attempts = 6; | 4174 | int attempts = 6; |
4063 | int ret; | 4175 | bool old_equal_new; |
4064 | 4176 | ||
4065 | ret = intel_dp_sink_crc_start(intel_dp); | 4177 | ret = intel_dp_sink_crc_start(intel_dp); |
4066 | if (ret) | 4178 | if (ret) |
4067 | return ret; | 4179 | return ret; |
4068 | 4180 | ||
4069 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) { | ||
4070 | ret = -EIO; | ||
4071 | goto stop; | ||
4072 | } | ||
4073 | |||
4074 | test_crc_count = buf & DP_TEST_COUNT_MASK; | ||
4075 | |||
4076 | do { | 4181 | do { |
4182 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
4183 | |||
4077 | if (drm_dp_dpcd_readb(&intel_dp->aux, | 4184 | if (drm_dp_dpcd_readb(&intel_dp->aux, |
4078 | DP_TEST_SINK_MISC, &buf) < 0) { | 4185 | DP_TEST_SINK_MISC, &buf) < 0) { |
4079 | ret = -EIO; | 4186 | ret = -EIO; |
4080 | goto stop; | 4187 | goto stop; |
4081 | } | 4188 | } |
4082 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 4189 | count = buf & DP_TEST_COUNT_MASK; |
4083 | } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count); | 4190 | |
4191 | /* | ||
4192 | * Count might be reset during the loop. In this case | ||
4193 | * last known count needs to be reset as well. | ||
4194 | */ | ||
4195 | if (count == 0) | ||
4196 | intel_dp->sink_crc.last_count = 0; | ||
4197 | |||
4198 | if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) { | ||
4199 | ret = -EIO; | ||
4200 | goto stop; | ||
4201 | } | ||
4202 | |||
4203 | old_equal_new = (count == intel_dp->sink_crc.last_count && | ||
4204 | !memcmp(intel_dp->sink_crc.last_crc, crc, | ||
4205 | 6 * sizeof(u8))); | ||
4206 | |||
4207 | } while (--attempts && (count == 0 || old_equal_new)); | ||
4208 | |||
4209 | intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK; | ||
4210 | memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8)); | ||
4084 | 4211 | ||
4085 | if (attempts == 0) { | 4212 | if (attempts == 0) { |
4086 | DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n"); | 4213 | if (old_equal_new) { |
4087 | ret = -ETIMEDOUT; | 4214 | DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n"); |
4088 | goto stop; | 4215 | } else { |
4216 | DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); | ||
4217 | ret = -ETIMEDOUT; | ||
4218 | goto stop; | ||
4219 | } | ||
4089 | } | 4220 | } |
4090 | 4221 | ||
4091 | if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) | ||
4092 | ret = -EIO; | ||
4093 | stop: | 4222 | stop: |
4094 | intel_dp_sink_crc_stop(intel_dp); | 4223 | intel_dp_sink_crc_stop(intel_dp); |
4095 | return ret; | 4224 | return ret; |
@@ -4249,7 +4378,8 @@ go_again: | |||
4249 | if (bret == true) { | 4378 | if (bret == true) { |
4250 | 4379 | ||
4251 | /* check link status - esi[10] = 0x200c */ | 4380 | /* check link status - esi[10] = 0x200c */ |
4252 | if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { | 4381 | if (intel_dp->active_mst_links && |
4382 | !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { | ||
4253 | DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); | 4383 | DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); |
4254 | intel_dp_start_link_train(intel_dp); | 4384 | intel_dp_start_link_train(intel_dp); |
4255 | intel_dp_complete_link_train(intel_dp); | 4385 | intel_dp_complete_link_train(intel_dp); |
@@ -4411,6 +4541,147 @@ edp_detect(struct intel_dp *intel_dp) | |||
4411 | return status; | 4541 | return status; |
4412 | } | 4542 | } |
4413 | 4543 | ||
4544 | static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, | ||
4545 | struct intel_digital_port *port) | ||
4546 | { | ||
4547 | u32 bit; | ||
4548 | |||
4549 | switch (port->port) { | ||
4550 | case PORT_A: | ||
4551 | return true; | ||
4552 | case PORT_B: | ||
4553 | bit = SDE_PORTB_HOTPLUG; | ||
4554 | break; | ||
4555 | case PORT_C: | ||
4556 | bit = SDE_PORTC_HOTPLUG; | ||
4557 | break; | ||
4558 | case PORT_D: | ||
4559 | bit = SDE_PORTD_HOTPLUG; | ||
4560 | break; | ||
4561 | default: | ||
4562 | MISSING_CASE(port->port); | ||
4563 | return false; | ||
4564 | } | ||
4565 | |||
4566 | return I915_READ(SDEISR) & bit; | ||
4567 | } | ||
4568 | |||
4569 | static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv, | ||
4570 | struct intel_digital_port *port) | ||
4571 | { | ||
4572 | u32 bit; | ||
4573 | |||
4574 | switch (port->port) { | ||
4575 | case PORT_A: | ||
4576 | return true; | ||
4577 | case PORT_B: | ||
4578 | bit = SDE_PORTB_HOTPLUG_CPT; | ||
4579 | break; | ||
4580 | case PORT_C: | ||
4581 | bit = SDE_PORTC_HOTPLUG_CPT; | ||
4582 | break; | ||
4583 | case PORT_D: | ||
4584 | bit = SDE_PORTD_HOTPLUG_CPT; | ||
4585 | break; | ||
4586 | default: | ||
4587 | MISSING_CASE(port->port); | ||
4588 | return false; | ||
4589 | } | ||
4590 | |||
4591 | return I915_READ(SDEISR) & bit; | ||
4592 | } | ||
4593 | |||
4594 | static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv, | ||
4595 | struct intel_digital_port *port) | ||
4596 | { | ||
4597 | u32 bit; | ||
4598 | |||
4599 | switch (port->port) { | ||
4600 | case PORT_B: | ||
4601 | bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; | ||
4602 | break; | ||
4603 | case PORT_C: | ||
4604 | bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; | ||
4605 | break; | ||
4606 | case PORT_D: | ||
4607 | bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; | ||
4608 | break; | ||
4609 | default: | ||
4610 | MISSING_CASE(port->port); | ||
4611 | return false; | ||
4612 | } | ||
4613 | |||
4614 | return I915_READ(PORT_HOTPLUG_STAT) & bit; | ||
4615 | } | ||
4616 | |||
4617 | static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv, | ||
4618 | struct intel_digital_port *port) | ||
4619 | { | ||
4620 | u32 bit; | ||
4621 | |||
4622 | switch (port->port) { | ||
4623 | case PORT_B: | ||
4624 | bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; | ||
4625 | break; | ||
4626 | case PORT_C: | ||
4627 | bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; | ||
4628 | break; | ||
4629 | case PORT_D: | ||
4630 | bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; | ||
4631 | break; | ||
4632 | default: | ||
4633 | MISSING_CASE(port->port); | ||
4634 | return false; | ||
4635 | } | ||
4636 | |||
4637 | return I915_READ(PORT_HOTPLUG_STAT) & bit; | ||
4638 | } | ||
4639 | |||
4640 | static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, | ||
4641 | struct intel_digital_port *port) | ||
4642 | { | ||
4643 | u32 bit; | ||
4644 | |||
4645 | switch (port->port) { | ||
4646 | case PORT_A: | ||
4647 | bit = BXT_DE_PORT_HP_DDIA; | ||
4648 | break; | ||
4649 | case PORT_B: | ||
4650 | bit = BXT_DE_PORT_HP_DDIB; | ||
4651 | break; | ||
4652 | case PORT_C: | ||
4653 | bit = BXT_DE_PORT_HP_DDIC; | ||
4654 | break; | ||
4655 | default: | ||
4656 | MISSING_CASE(port->port); | ||
4657 | return false; | ||
4658 | } | ||
4659 | |||
4660 | return I915_READ(GEN8_DE_PORT_ISR) & bit; | ||
4661 | } | ||
4662 | |||
4663 | /* | ||
4664 | * intel_digital_port_connected - is the specified port connected? | ||
4665 | * @dev_priv: i915 private structure | ||
4666 | * @port: the port to test | ||
4667 | * | ||
4668 | * Return %true if @port is connected, %false otherwise. | ||
4669 | */ | ||
4670 | static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | ||
4671 | struct intel_digital_port *port) | ||
4672 | { | ||
4673 | if (HAS_PCH_IBX(dev_priv)) | ||
4674 | return ibx_digital_port_connected(dev_priv, port); | ||
4675 | if (HAS_PCH_SPLIT(dev_priv)) | ||
4676 | return cpt_digital_port_connected(dev_priv, port); | ||
4677 | else if (IS_BROXTON(dev_priv)) | ||
4678 | return bxt_digital_port_connected(dev_priv, port); | ||
4679 | else if (IS_VALLEYVIEW(dev_priv)) | ||
4680 | return vlv_digital_port_connected(dev_priv, port); | ||
4681 | else | ||
4682 | return g4x_digital_port_connected(dev_priv, port); | ||
4683 | } | ||
4684 | |||
4414 | static enum drm_connector_status | 4685 | static enum drm_connector_status |
4415 | ironlake_dp_detect(struct intel_dp *intel_dp) | 4686 | ironlake_dp_detect(struct intel_dp *intel_dp) |
4416 | { | 4687 | { |
@@ -4418,59 +4689,17 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
4418 | struct drm_i915_private *dev_priv = dev->dev_private; | 4689 | struct drm_i915_private *dev_priv = dev->dev_private; |
4419 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 4690 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
4420 | 4691 | ||
4421 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) | 4692 | if (!intel_digital_port_connected(dev_priv, intel_dig_port)) |
4422 | return connector_status_disconnected; | 4693 | return connector_status_disconnected; |
4423 | 4694 | ||
4424 | return intel_dp_detect_dpcd(intel_dp); | 4695 | return intel_dp_detect_dpcd(intel_dp); |
4425 | } | 4696 | } |
4426 | 4697 | ||
4427 | static int g4x_digital_port_connected(struct drm_device *dev, | ||
4428 | struct intel_digital_port *intel_dig_port) | ||
4429 | { | ||
4430 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4431 | uint32_t bit; | ||
4432 | |||
4433 | if (IS_VALLEYVIEW(dev)) { | ||
4434 | switch (intel_dig_port->port) { | ||
4435 | case PORT_B: | ||
4436 | bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; | ||
4437 | break; | ||
4438 | case PORT_C: | ||
4439 | bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; | ||
4440 | break; | ||
4441 | case PORT_D: | ||
4442 | bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; | ||
4443 | break; | ||
4444 | default: | ||
4445 | return -EINVAL; | ||
4446 | } | ||
4447 | } else { | ||
4448 | switch (intel_dig_port->port) { | ||
4449 | case PORT_B: | ||
4450 | bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; | ||
4451 | break; | ||
4452 | case PORT_C: | ||
4453 | bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; | ||
4454 | break; | ||
4455 | case PORT_D: | ||
4456 | bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; | ||
4457 | break; | ||
4458 | default: | ||
4459 | return -EINVAL; | ||
4460 | } | ||
4461 | } | ||
4462 | |||
4463 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) | ||
4464 | return 0; | ||
4465 | return 1; | ||
4466 | } | ||
4467 | |||
4468 | static enum drm_connector_status | 4698 | static enum drm_connector_status |
4469 | g4x_dp_detect(struct intel_dp *intel_dp) | 4699 | g4x_dp_detect(struct intel_dp *intel_dp) |
4470 | { | 4700 | { |
4471 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 4701 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
4472 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 4702 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
4473 | int ret; | ||
4474 | 4703 | ||
4475 | /* Can't disconnect eDP, but you can close the lid... */ | 4704 | /* Can't disconnect eDP, but you can close the lid... */ |
4476 | if (is_edp(intel_dp)) { | 4705 | if (is_edp(intel_dp)) { |
@@ -4482,10 +4711,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
4482 | return status; | 4711 | return status; |
4483 | } | 4712 | } |
4484 | 4713 | ||
4485 | ret = g4x_digital_port_connected(dev, intel_dig_port); | 4714 | if (!intel_digital_port_connected(dev->dev_private, intel_dig_port)) |
4486 | if (ret == -EINVAL) | ||
4487 | return connector_status_unknown; | ||
4488 | else if (ret == 0) | ||
4489 | return connector_status_disconnected; | 4715 | return connector_status_disconnected; |
4490 | 4716 | ||
4491 | return intel_dp_detect_dpcd(intel_dp); | 4717 | return intel_dp_detect_dpcd(intel_dp); |
@@ -4729,7 +4955,7 @@ intel_dp_set_property(struct drm_connector *connector, | |||
4729 | 4955 | ||
4730 | if (property == dev_priv->broadcast_rgb_property) { | 4956 | if (property == dev_priv->broadcast_rgb_property) { |
4731 | bool old_auto = intel_dp->color_range_auto; | 4957 | bool old_auto = intel_dp->color_range_auto; |
4732 | uint32_t old_range = intel_dp->color_range; | 4958 | bool old_range = intel_dp->limited_color_range; |
4733 | 4959 | ||
4734 | switch (val) { | 4960 | switch (val) { |
4735 | case INTEL_BROADCAST_RGB_AUTO: | 4961 | case INTEL_BROADCAST_RGB_AUTO: |
@@ -4737,18 +4963,18 @@ intel_dp_set_property(struct drm_connector *connector, | |||
4737 | break; | 4963 | break; |
4738 | case INTEL_BROADCAST_RGB_FULL: | 4964 | case INTEL_BROADCAST_RGB_FULL: |
4739 | intel_dp->color_range_auto = false; | 4965 | intel_dp->color_range_auto = false; |
4740 | intel_dp->color_range = 0; | 4966 | intel_dp->limited_color_range = false; |
4741 | break; | 4967 | break; |
4742 | case INTEL_BROADCAST_RGB_LIMITED: | 4968 | case INTEL_BROADCAST_RGB_LIMITED: |
4743 | intel_dp->color_range_auto = false; | 4969 | intel_dp->color_range_auto = false; |
4744 | intel_dp->color_range = DP_COLOR_RANGE_16_235; | 4970 | intel_dp->limited_color_range = true; |
4745 | break; | 4971 | break; |
4746 | default: | 4972 | default: |
4747 | return -EINVAL; | 4973 | return -EINVAL; |
4748 | } | 4974 | } |
4749 | 4975 | ||
4750 | if (old_auto == intel_dp->color_range_auto && | 4976 | if (old_auto == intel_dp->color_range_auto && |
4751 | old_range == intel_dp->color_range) | 4977 | old_range == intel_dp->limited_color_range) |
4752 | return 0; | 4978 | return 0; |
4753 | 4979 | ||
4754 | goto done; | 4980 | goto done; |
@@ -4948,13 +5174,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
4948 | /* indicate that we need to restart link training */ | 5174 | /* indicate that we need to restart link training */ |
4949 | intel_dp->train_set_valid = false; | 5175 | intel_dp->train_set_valid = false; |
4950 | 5176 | ||
4951 | if (HAS_PCH_SPLIT(dev)) { | 5177 | if (!intel_digital_port_connected(dev_priv, intel_dig_port)) |
4952 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) | 5178 | goto mst_fail; |
4953 | goto mst_fail; | ||
4954 | } else { | ||
4955 | if (g4x_digital_port_connected(dev, intel_dig_port) != 1) | ||
4956 | goto mst_fail; | ||
4957 | } | ||
4958 | 5179 | ||
4959 | if (!intel_dp_get_dpcd(intel_dp)) { | 5180 | if (!intel_dp_get_dpcd(intel_dp)) { |
4960 | goto mst_fail; | 5181 | goto mst_fail; |
@@ -5854,6 +6075,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
5854 | break; | 6075 | break; |
5855 | case PORT_B: | 6076 | case PORT_B: |
5856 | intel_encoder->hpd_pin = HPD_PORT_B; | 6077 | intel_encoder->hpd_pin = HPD_PORT_B; |
6078 | if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) | ||
6079 | intel_encoder->hpd_pin = HPD_PORT_A; | ||
5857 | break; | 6080 | break; |
5858 | case PORT_C: | 6081 | case PORT_C: |
5859 | intel_encoder->hpd_pin = HPD_PORT_C; | 6082 | intel_encoder->hpd_pin = HPD_PORT_C; |
@@ -5954,6 +6177,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
5954 | intel_encoder->pre_enable = chv_pre_enable_dp; | 6177 | intel_encoder->pre_enable = chv_pre_enable_dp; |
5955 | intel_encoder->enable = vlv_enable_dp; | 6178 | intel_encoder->enable = vlv_enable_dp; |
5956 | intel_encoder->post_disable = chv_post_disable_dp; | 6179 | intel_encoder->post_disable = chv_post_disable_dp; |
6180 | intel_encoder->post_pll_disable = chv_dp_post_pll_disable; | ||
5957 | } else if (IS_VALLEYVIEW(dev)) { | 6181 | } else if (IS_VALLEYVIEW(dev)) { |
5958 | intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; | 6182 | intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; |
5959 | intel_encoder->pre_enable = vlv_pre_enable_dp; | 6183 | intel_encoder->pre_enable = vlv_pre_enable_dp; |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..ff8ba55853be 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -39,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
39 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 39 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
40 | struct drm_atomic_state *state; | 40 | struct drm_atomic_state *state; |
41 | int bpp, i; | 41 | int bpp, i; |
42 | int lane_count, slots, rate; | 42 | int lane_count, slots; |
43 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 43 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
44 | struct drm_connector *drm_connector; | 44 | struct drm_connector *drm_connector; |
45 | struct intel_connector *connector, *found = NULL; | 45 | struct intel_connector *connector, *found = NULL; |
@@ -56,20 +56,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
56 | */ | 56 | */ |
57 | lane_count = drm_dp_max_lane_count(intel_dp->dpcd); | 57 | lane_count = drm_dp_max_lane_count(intel_dp->dpcd); |
58 | 58 | ||
59 | rate = intel_dp_max_link_rate(intel_dp); | ||
60 | 59 | ||
61 | if (intel_dp->num_sink_rates) { | 60 | pipe_config->lane_count = lane_count; |
62 | intel_dp->link_bw = 0; | ||
63 | intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate); | ||
64 | } else { | ||
65 | intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate); | ||
66 | intel_dp->rate_select = 0; | ||
67 | } | ||
68 | |||
69 | intel_dp->lane_count = lane_count; | ||
70 | 61 | ||
71 | pipe_config->pipe_bpp = 24; | 62 | pipe_config->pipe_bpp = 24; |
72 | pipe_config->port_clock = rate; | 63 | pipe_config->port_clock = intel_dp_max_link_rate(intel_dp); |
73 | 64 | ||
74 | state = pipe_config->base.state; | 65 | state = pipe_config->base.state; |
75 | 66 | ||
@@ -184,6 +175,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) | |||
184 | if (intel_dp->active_mst_links == 0) { | 175 | if (intel_dp->active_mst_links == 0) { |
185 | enum port port = intel_ddi_get_encoder_port(encoder); | 176 | enum port port = intel_ddi_get_encoder_port(encoder); |
186 | 177 | ||
178 | intel_dp_set_link_params(intel_dp, intel_crtc->config); | ||
179 | |||
187 | /* FIXME: add support for SKL */ | 180 | /* FIXME: add support for SKL */ |
188 | if (INTEL_INFO(dev)->gen < 9) | 181 | if (INTEL_INFO(dev)->gen < 9) |
189 | I915_WRITE(PORT_CLK_SEL(port), | 182 | I915_WRITE(PORT_CLK_SEL(port), |
@@ -286,6 +279,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, | |||
286 | break; | 279 | break; |
287 | } | 280 | } |
288 | pipe_config->base.adjusted_mode.flags |= flags; | 281 | pipe_config->base.adjusted_mode.flags |= flags; |
282 | |||
283 | pipe_config->lane_count = | ||
284 | ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; | ||
285 | |||
289 | intel_dp_get_m_n(crtc, pipe_config); | 286 | intel_dp_get_m_n(crtc, pipe_config); |
290 | 287 | ||
291 | intel_ddi_clock_get(&intel_dig_port->base, pipe_config); | 288 | intel_ddi_clock_get(&intel_dig_port->base, pipe_config); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 354432fdfcf0..12972fba2661 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -142,6 +142,7 @@ struct intel_encoder { | |||
142 | void (*mode_set)(struct intel_encoder *intel_encoder); | 142 | void (*mode_set)(struct intel_encoder *intel_encoder); |
143 | void (*disable)(struct intel_encoder *); | 143 | void (*disable)(struct intel_encoder *); |
144 | void (*post_disable)(struct intel_encoder *); | 144 | void (*post_disable)(struct intel_encoder *); |
145 | void (*post_pll_disable)(struct intel_encoder *); | ||
145 | /* Read out the current hw state of this connector, returning true if | 146 | /* Read out the current hw state of this connector, returning true if |
146 | * the encoder is active. If the encoder is enabled it also set the pipe | 147 | * the encoder is active. If the encoder is enabled it also set the pipe |
147 | * it is connected to in the pipe parameter. */ | 148 | * it is connected to in the pipe parameter. */ |
@@ -423,6 +424,8 @@ struct intel_crtc_state { | |||
423 | /* Used by SDVO (and if we ever fix it, HDMI). */ | 424 | /* Used by SDVO (and if we ever fix it, HDMI). */ |
424 | unsigned pixel_multiplier; | 425 | unsigned pixel_multiplier; |
425 | 426 | ||
427 | uint8_t lane_count; | ||
428 | |||
426 | /* Panel fitter controls for gen2-gen4 + VLV */ | 429 | /* Panel fitter controls for gen2-gen4 + VLV */ |
427 | struct { | 430 | struct { |
428 | u32 control; | 431 | u32 control; |
@@ -561,6 +564,8 @@ struct intel_crtc { | |||
561 | int scanline_offset; | 564 | int scanline_offset; |
562 | 565 | ||
563 | unsigned start_vbl_count; | 566 | unsigned start_vbl_count; |
567 | ktime_t start_vbl_time; | ||
568 | |||
564 | struct intel_crtc_atomic_commit atomic; | 569 | struct intel_crtc_atomic_commit atomic; |
565 | 570 | ||
566 | /* scalers available on this crtc */ | 571 | /* scalers available on this crtc */ |
@@ -657,13 +662,14 @@ struct cxsr_latency { | |||
657 | struct intel_hdmi { | 662 | struct intel_hdmi { |
658 | u32 hdmi_reg; | 663 | u32 hdmi_reg; |
659 | int ddc_bus; | 664 | int ddc_bus; |
660 | uint32_t color_range; | 665 | bool limited_color_range; |
661 | bool color_range_auto; | 666 | bool color_range_auto; |
662 | bool has_hdmi_sink; | 667 | bool has_hdmi_sink; |
663 | bool has_audio; | 668 | bool has_audio; |
664 | enum hdmi_force_audio force_audio; | 669 | enum hdmi_force_audio force_audio; |
665 | bool rgb_quant_range_selectable; | 670 | bool rgb_quant_range_selectable; |
666 | enum hdmi_picture_aspect aspect_ratio; | 671 | enum hdmi_picture_aspect aspect_ratio; |
672 | struct intel_connector *attached_connector; | ||
667 | void (*write_infoframe)(struct drm_encoder *encoder, | 673 | void (*write_infoframe)(struct drm_encoder *encoder, |
668 | enum hdmi_infoframe_type type, | 674 | enum hdmi_infoframe_type type, |
669 | const void *frame, ssize_t len); | 675 | const void *frame, ssize_t len); |
@@ -696,23 +702,29 @@ enum link_m_n_set { | |||
696 | M2_N2 | 702 | M2_N2 |
697 | }; | 703 | }; |
698 | 704 | ||
705 | struct sink_crc { | ||
706 | bool started; | ||
707 | u8 last_crc[6]; | ||
708 | int last_count; | ||
709 | }; | ||
710 | |||
699 | struct intel_dp { | 711 | struct intel_dp { |
700 | uint32_t output_reg; | 712 | uint32_t output_reg; |
701 | uint32_t aux_ch_ctl_reg; | 713 | uint32_t aux_ch_ctl_reg; |
702 | uint32_t DP; | 714 | uint32_t DP; |
715 | int link_rate; | ||
716 | uint8_t lane_count; | ||
703 | bool has_audio; | 717 | bool has_audio; |
704 | enum hdmi_force_audio force_audio; | 718 | enum hdmi_force_audio force_audio; |
705 | uint32_t color_range; | 719 | bool limited_color_range; |
706 | bool color_range_auto; | 720 | bool color_range_auto; |
707 | uint8_t link_bw; | ||
708 | uint8_t rate_select; | ||
709 | uint8_t lane_count; | ||
710 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 721 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
711 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; | 722 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; |
712 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | 723 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; |
713 | /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ | 724 | /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ |
714 | uint8_t num_sink_rates; | 725 | uint8_t num_sink_rates; |
715 | int sink_rates[DP_MAX_SUPPORTED_RATES]; | 726 | int sink_rates[DP_MAX_SUPPORTED_RATES]; |
727 | struct sink_crc sink_crc; | ||
716 | struct drm_dp_aux aux; | 728 | struct drm_dp_aux aux; |
717 | uint8_t train_set[4]; | 729 | uint8_t train_set[4]; |
718 | int panel_power_up_delay; | 730 | int panel_power_up_delay; |
@@ -735,7 +747,6 @@ struct intel_dp { | |||
735 | enum pipe pps_pipe; | 747 | enum pipe pps_pipe; |
736 | struct edp_power_seq pps_delays; | 748 | struct edp_power_seq pps_delays; |
737 | 749 | ||
738 | bool use_tps3; | ||
739 | bool can_mst; /* this port supports mst */ | 750 | bool can_mst; /* this port supports mst */ |
740 | bool is_mst; | 751 | bool is_mst; |
741 | int active_mst_links; | 752 | int active_mst_links; |
@@ -770,6 +781,7 @@ struct intel_digital_port { | |||
770 | struct intel_dp dp; | 781 | struct intel_dp dp; |
771 | struct intel_hdmi hdmi; | 782 | struct intel_hdmi hdmi; |
772 | enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); | 783 | enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); |
784 | bool release_cl2_override; | ||
773 | }; | 785 | }; |
774 | 786 | ||
775 | struct intel_dp_mst_encoder { | 787 | struct intel_dp_mst_encoder { |
@@ -779,7 +791,7 @@ struct intel_dp_mst_encoder { | |||
779 | void *port; /* store this opaque as its illegal to dereference it */ | 791 | void *port; /* store this opaque as its illegal to dereference it */ |
780 | }; | 792 | }; |
781 | 793 | ||
782 | static inline int | 794 | static inline enum dpio_channel |
783 | vlv_dport_to_channel(struct intel_digital_port *dport) | 795 | vlv_dport_to_channel(struct intel_digital_port *dport) |
784 | { | 796 | { |
785 | switch (dport->port) { | 797 | switch (dport->port) { |
@@ -793,7 +805,21 @@ vlv_dport_to_channel(struct intel_digital_port *dport) | |||
793 | } | 805 | } |
794 | } | 806 | } |
795 | 807 | ||
796 | static inline int | 808 | static inline enum dpio_phy |
809 | vlv_dport_to_phy(struct intel_digital_port *dport) | ||
810 | { | ||
811 | switch (dport->port) { | ||
812 | case PORT_B: | ||
813 | case PORT_C: | ||
814 | return DPIO_PHY0; | ||
815 | case PORT_D: | ||
816 | return DPIO_PHY1; | ||
817 | default: | ||
818 | BUG(); | ||
819 | } | ||
820 | } | ||
821 | |||
822 | static inline enum dpio_channel | ||
797 | vlv_pipe_to_channel(enum pipe pipe) | 823 | vlv_pipe_to_channel(enum pipe pipe) |
798 | { | 824 | { |
799 | switch (pipe) { | 825 | switch (pipe) { |
@@ -987,6 +1013,7 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); | |||
987 | extern const struct drm_plane_funcs intel_plane_funcs; | 1013 | extern const struct drm_plane_funcs intel_plane_funcs; |
988 | bool intel_has_pending_fb_unpin(struct drm_device *dev); | 1014 | bool intel_has_pending_fb_unpin(struct drm_device *dev); |
989 | int intel_pch_rawclk(struct drm_device *dev); | 1015 | int intel_pch_rawclk(struct drm_device *dev); |
1016 | int intel_hrawclk(struct drm_device *dev); | ||
990 | void intel_mark_busy(struct drm_device *dev); | 1017 | void intel_mark_busy(struct drm_device *dev); |
991 | void intel_mark_idle(struct drm_device *dev); | 1018 | void intel_mark_idle(struct drm_device *dev); |
992 | void intel_crtc_restore_mode(struct drm_crtc *crtc); | 1019 | void intel_crtc_restore_mode(struct drm_crtc *crtc); |
@@ -995,8 +1022,6 @@ void intel_encoder_destroy(struct drm_encoder *encoder); | |||
995 | int intel_connector_init(struct intel_connector *); | 1022 | int intel_connector_init(struct intel_connector *); |
996 | struct intel_connector *intel_connector_alloc(void); | 1023 | struct intel_connector *intel_connector_alloc(void); |
997 | bool intel_connector_get_hw_state(struct intel_connector *connector); | 1024 | bool intel_connector_get_hw_state(struct intel_connector *connector); |
998 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, | ||
999 | struct intel_digital_port *port); | ||
1000 | void intel_connector_attach_encoder(struct intel_connector *connector, | 1025 | void intel_connector_attach_encoder(struct intel_connector *connector, |
1001 | struct intel_encoder *encoder); | 1026 | struct intel_encoder *encoder); |
1002 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector); | 1027 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector); |
@@ -1153,6 +1178,8 @@ void assert_csr_loaded(struct drm_i915_private *dev_priv); | |||
1153 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); | 1178 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); |
1154 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | 1179 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
1155 | struct intel_connector *intel_connector); | 1180 | struct intel_connector *intel_connector); |
1181 | void intel_dp_set_link_params(struct intel_dp *intel_dp, | ||
1182 | const struct intel_crtc_state *pipe_config); | ||
1156 | void intel_dp_start_link_train(struct intel_dp *intel_dp); | 1183 | void intel_dp_start_link_train(struct intel_dp *intel_dp); |
1157 | void intel_dp_complete_link_train(struct intel_dp *intel_dp); | 1184 | void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
1158 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); | 1185 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); |
@@ -1337,6 +1364,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv); | |||
1337 | 1364 | ||
1338 | void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); | 1365 | void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); |
1339 | 1366 | ||
1367 | void chv_phy_powergate_lanes(struct intel_encoder *encoder, | ||
1368 | bool override, unsigned int mask); | ||
1369 | bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, | ||
1370 | enum dpio_channel ch, bool override); | ||
1371 | |||
1372 | |||
1340 | /* intel_pm.c */ | 1373 | /* intel_pm.c */ |
1341 | void intel_init_clock_gating(struct drm_device *dev); | 1374 | void intel_init_clock_gating(struct drm_device *dev); |
1342 | void intel_suspend_hw(struct drm_device *dev); | 1375 | void intel_suspend_hw(struct drm_device *dev); |
@@ -1382,9 +1415,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); | |||
1382 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); | 1415 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); |
1383 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | 1416 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
1384 | struct drm_file *file_priv); | 1417 | struct drm_file *file_priv); |
1385 | void intel_pipe_update_start(struct intel_crtc *crtc, | 1418 | void intel_pipe_update_start(struct intel_crtc *crtc); |
1386 | uint32_t *start_vbl_count); | 1419 | void intel_pipe_update_end(struct intel_crtc *crtc); |
1387 | void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count); | ||
1388 | 1420 | ||
1389 | /* intel_tv.c */ | 1421 | /* intel_tv.c */ |
1390 | void intel_tv_init(struct drm_device *dev); | 1422 | void intel_tv_init(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 32a6c7184ca4..8f9c8144b294 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -654,6 +654,7 @@ intel_dsi_mode_valid(struct drm_connector *connector, | |||
654 | { | 654 | { |
655 | struct intel_connector *intel_connector = to_intel_connector(connector); | 655 | struct intel_connector *intel_connector = to_intel_connector(connector); |
656 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 656 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
657 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | ||
657 | 658 | ||
658 | DRM_DEBUG_KMS("\n"); | 659 | DRM_DEBUG_KMS("\n"); |
659 | 660 | ||
@@ -667,6 +668,8 @@ intel_dsi_mode_valid(struct drm_connector *connector, | |||
667 | return MODE_PANEL; | 668 | return MODE_PANEL; |
668 | if (mode->vdisplay > fixed_mode->vdisplay) | 669 | if (mode->vdisplay > fixed_mode->vdisplay) |
669 | return MODE_PANEL; | 670 | return MODE_PANEL; |
671 | if (fixed_mode->clock > max_dotclk) | ||
672 | return MODE_CLOCK_HIGH; | ||
670 | } | 673 | } |
671 | 674 | ||
672 | return MODE_OK; | 675 | return MODE_OK; |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index dc532bb61d22..c80fe1f49ede 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -201,6 +201,8 @@ intel_dvo_mode_valid(struct drm_connector *connector, | |||
201 | struct drm_display_mode *mode) | 201 | struct drm_display_mode *mode) |
202 | { | 202 | { |
203 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); | 203 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); |
204 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | ||
205 | int target_clock = mode->clock; | ||
204 | 206 | ||
205 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 207 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
206 | return MODE_NO_DBLESCAN; | 208 | return MODE_NO_DBLESCAN; |
@@ -212,8 +214,13 @@ intel_dvo_mode_valid(struct drm_connector *connector, | |||
212 | return MODE_PANEL; | 214 | return MODE_PANEL; |
213 | if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay) | 215 | if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay) |
214 | return MODE_PANEL; | 216 | return MODE_PANEL; |
217 | |||
218 | target_clock = intel_dvo->panel_fixed_mode->clock; | ||
215 | } | 219 | } |
216 | 220 | ||
221 | if (target_clock > max_dotclk) | ||
222 | return MODE_CLOCK_HIGH; | ||
223 | |||
217 | return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); | 224 | return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); |
218 | } | 225 | } |
219 | 226 | ||
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index ab2b856d91c3..c5de3a453180 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -263,7 +263,7 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
263 | 263 | ||
264 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ | 264 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
265 | 265 | ||
266 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", | 266 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n", |
267 | fb->width, fb->height, | 267 | fb->width, fb->height, |
268 | i915_gem_obj_ggtt_offset(obj), obj); | 268 | i915_gem_obj_ggtt_offset(obj), obj); |
269 | 269 | ||
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h new file mode 100644 index 000000000000..4ec2d27a557e --- /dev/null +++ b/drivers/gpu/drm/i915/intel_guc.h | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | #ifndef _INTEL_GUC_H_ | ||
25 | #define _INTEL_GUC_H_ | ||
26 | |||
27 | #include "intel_guc_fwif.h" | ||
28 | #include "i915_guc_reg.h" | ||
29 | |||
30 | struct i915_guc_client { | ||
31 | struct drm_i915_gem_object *client_obj; | ||
32 | struct intel_context *owner; | ||
33 | struct intel_guc *guc; | ||
34 | uint32_t priority; | ||
35 | uint32_t ctx_index; | ||
36 | |||
37 | uint32_t proc_desc_offset; | ||
38 | uint32_t doorbell_offset; | ||
39 | uint32_t cookie; | ||
40 | uint16_t doorbell_id; | ||
41 | uint16_t padding; /* Maintain alignment */ | ||
42 | |||
43 | uint32_t wq_offset; | ||
44 | uint32_t wq_size; | ||
45 | |||
46 | spinlock_t wq_lock; /* Protects all data below */ | ||
47 | uint32_t wq_tail; | ||
48 | |||
49 | /* GuC submission statistics & status */ | ||
50 | uint64_t submissions[I915_NUM_RINGS]; | ||
51 | uint32_t q_fail; | ||
52 | uint32_t b_fail; | ||
53 | int retcode; | ||
54 | }; | ||
55 | |||
56 | enum intel_guc_fw_status { | ||
57 | GUC_FIRMWARE_FAIL = -1, | ||
58 | GUC_FIRMWARE_NONE = 0, | ||
59 | GUC_FIRMWARE_PENDING, | ||
60 | GUC_FIRMWARE_SUCCESS | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * This structure encapsulates all the data needed during the process | ||
65 | * of fetching, caching, and loading the firmware image into the GuC. | ||
66 | */ | ||
67 | struct intel_guc_fw { | ||
68 | struct drm_device * guc_dev; | ||
69 | const char * guc_fw_path; | ||
70 | size_t guc_fw_size; | ||
71 | struct drm_i915_gem_object * guc_fw_obj; | ||
72 | enum intel_guc_fw_status guc_fw_fetch_status; | ||
73 | enum intel_guc_fw_status guc_fw_load_status; | ||
74 | |||
75 | uint16_t guc_fw_major_wanted; | ||
76 | uint16_t guc_fw_minor_wanted; | ||
77 | uint16_t guc_fw_major_found; | ||
78 | uint16_t guc_fw_minor_found; | ||
79 | }; | ||
80 | |||
81 | struct intel_guc { | ||
82 | struct intel_guc_fw guc_fw; | ||
83 | |||
84 | uint32_t log_flags; | ||
85 | struct drm_i915_gem_object *log_obj; | ||
86 | |||
87 | struct drm_i915_gem_object *ctx_pool_obj; | ||
88 | struct ida ctx_ids; | ||
89 | |||
90 | struct i915_guc_client *execbuf_client; | ||
91 | |||
92 | spinlock_t host2guc_lock; /* Protects all data below */ | ||
93 | |||
94 | DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); | ||
95 | uint32_t db_cacheline; /* Cyclic counter mod pagesize */ | ||
96 | |||
97 | /* Action status & statistics */ | ||
98 | uint64_t action_count; /* Total commands issued */ | ||
99 | uint32_t action_cmd; /* Last command word */ | ||
100 | uint32_t action_status; /* Last return status */ | ||
101 | uint32_t action_fail; /* Total number of failures */ | ||
102 | int32_t action_err; /* Last error code */ | ||
103 | |||
104 | uint64_t submissions[I915_NUM_RINGS]; | ||
105 | uint32_t last_seqno[I915_NUM_RINGS]; | ||
106 | }; | ||
107 | |||
108 | /* intel_guc_loader.c */ | ||
109 | extern void intel_guc_ucode_init(struct drm_device *dev); | ||
110 | extern int intel_guc_ucode_load(struct drm_device *dev); | ||
111 | extern void intel_guc_ucode_fini(struct drm_device *dev); | ||
112 | extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); | ||
113 | |||
114 | /* i915_guc_submission.c */ | ||
115 | int i915_guc_submission_init(struct drm_device *dev); | ||
116 | int i915_guc_submission_enable(struct drm_device *dev); | ||
117 | int i915_guc_submit(struct i915_guc_client *client, | ||
118 | struct drm_i915_gem_request *rq); | ||
119 | void i915_guc_submission_disable(struct drm_device *dev); | ||
120 | void i915_guc_submission_fini(struct drm_device *dev); | ||
121 | |||
122 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 18d7f20936c8..e1f47ba2b4b0 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h | |||
@@ -32,17 +32,16 @@ | |||
32 | * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST. | 32 | * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #define GFXCORE_FAMILY_GEN8 11 | ||
36 | #define GFXCORE_FAMILY_GEN9 12 | 35 | #define GFXCORE_FAMILY_GEN9 12 |
37 | #define GFXCORE_FAMILY_FORCE_ULONG 0x7fffffff | 36 | #define GFXCORE_FAMILY_UNKNOWN 0x7fffffff |
38 | 37 | ||
39 | #define GUC_CTX_PRIORITY_CRITICAL 0 | 38 | #define GUC_CTX_PRIORITY_KMD_HIGH 0 |
40 | #define GUC_CTX_PRIORITY_HIGH 1 | 39 | #define GUC_CTX_PRIORITY_HIGH 1 |
41 | #define GUC_CTX_PRIORITY_NORMAL 2 | 40 | #define GUC_CTX_PRIORITY_KMD_NORMAL 2 |
42 | #define GUC_CTX_PRIORITY_LOW 3 | 41 | #define GUC_CTX_PRIORITY_NORMAL 3 |
43 | 42 | ||
44 | #define GUC_MAX_GPU_CONTEXTS 1024 | 43 | #define GUC_MAX_GPU_CONTEXTS 1024 |
45 | #define GUC_INVALID_CTX_ID (GUC_MAX_GPU_CONTEXTS + 1) | 44 | #define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS |
46 | 45 | ||
47 | /* Work queue item header definitions */ | 46 | /* Work queue item header definitions */ |
48 | #define WQ_STATUS_ACTIVE 1 | 47 | #define WQ_STATUS_ACTIVE 1 |
@@ -76,6 +75,7 @@ | |||
76 | #define GUC_CTX_DESC_ATTR_RESET (1 << 4) | 75 | #define GUC_CTX_DESC_ATTR_RESET (1 << 4) |
77 | #define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5) | 76 | #define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5) |
78 | #define GUC_CTX_DESC_ATTR_PCH (1 << 6) | 77 | #define GUC_CTX_DESC_ATTR_PCH (1 << 6) |
78 | #define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7) | ||
79 | 79 | ||
80 | /* The guc control data is 10 DWORDs */ | 80 | /* The guc control data is 10 DWORDs */ |
81 | #define GUC_CTL_CTXINFO 0 | 81 | #define GUC_CTL_CTXINFO 0 |
@@ -108,6 +108,7 @@ | |||
108 | #define GUC_CTL_DISABLE_SCHEDULER (1 << 4) | 108 | #define GUC_CTL_DISABLE_SCHEDULER (1 << 4) |
109 | #define GUC_CTL_PREEMPTION_LOG (1 << 5) | 109 | #define GUC_CTL_PREEMPTION_LOG (1 << 5) |
110 | #define GUC_CTL_ENABLE_SLPC (1 << 7) | 110 | #define GUC_CTL_ENABLE_SLPC (1 << 7) |
111 | #define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8) | ||
111 | #define GUC_CTL_DEBUG 8 | 112 | #define GUC_CTL_DEBUG 8 |
112 | #define GUC_LOG_VERBOSITY_SHIFT 0 | 113 | #define GUC_LOG_VERBOSITY_SHIFT 0 |
113 | #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) | 114 | #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) |
@@ -117,8 +118,9 @@ | |||
117 | /* Verbosity range-check limits, without the shift */ | 118 | /* Verbosity range-check limits, without the shift */ |
118 | #define GUC_LOG_VERBOSITY_MIN 0 | 119 | #define GUC_LOG_VERBOSITY_MIN 0 |
119 | #define GUC_LOG_VERBOSITY_MAX 3 | 120 | #define GUC_LOG_VERBOSITY_MAX 3 |
121 | #define GUC_CTL_RSRVD 9 | ||
120 | 122 | ||
121 | #define GUC_CTL_MAX_DWORDS (GUC_CTL_DEBUG + 1) | 123 | #define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1) |
122 | 124 | ||
123 | struct guc_doorbell_info { | 125 | struct guc_doorbell_info { |
124 | u32 db_status; | 126 | u32 db_status; |
@@ -208,7 +210,9 @@ struct guc_context_desc { | |||
208 | 210 | ||
209 | u32 engine_presence; | 211 | u32 engine_presence; |
210 | 212 | ||
211 | u32 reserved0[1]; | 213 | u8 engine_suspended; |
214 | |||
215 | u8 reserved0[3]; | ||
212 | u64 reserved1[1]; | 216 | u64 reserved1[1]; |
213 | 217 | ||
214 | u64 desc_private; | 218 | u64 desc_private; |
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c new file mode 100644 index 000000000000..5eafd31fb4a6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_guc_loader.c | |||
@@ -0,0 +1,606 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Vinit Azad <vinit.azad@intel.com> | ||
25 | * Ben Widawsky <ben@bwidawsk.net> | ||
26 | * Dave Gordon <david.s.gordon@intel.com> | ||
27 | * Alex Dai <yu.dai@intel.com> | ||
28 | */ | ||
29 | #include <linux/firmware.h> | ||
30 | #include "i915_drv.h" | ||
31 | #include "intel_guc.h" | ||
32 | |||
33 | /** | ||
34 | * DOC: GuC | ||
35 | * | ||
36 | * intel_guc: | ||
37 | * Top level structure of guc. It handles firmware loading and manages client | ||
38 | * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy | ||
39 | * ExecList submission. | ||
40 | * | ||
41 | * Firmware versioning: | ||
42 | * The firmware build process will generate a version header file with major and | ||
43 | * minor version defined. The versions are built into CSS header of firmware. | ||
44 | * i915 kernel driver set the minimal firmware version required per platform. | ||
45 | * The firmware installation package will install (symbolic link) proper version | ||
46 | * of firmware. | ||
47 | * | ||
48 | * GuC address space: | ||
49 | * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP), | ||
50 | * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is | ||
51 | * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects | ||
52 | * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM. | ||
53 | * | ||
54 | * Firmware log: | ||
55 | * Firmware log is enabled by setting i915.guc_log_level to non-negative level. | ||
56 | * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from | ||
57 | * i915_guc_load_status will print out firmware loading status and scratch | ||
58 | * registers value. | ||
59 | * | ||
60 | */ | ||
61 | |||
62 | #define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin" | ||
63 | MODULE_FIRMWARE(I915_SKL_GUC_UCODE); | ||
64 | |||
65 | /* User-friendly representation of an enum */ | ||
66 | const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) | ||
67 | { | ||
68 | switch (status) { | ||
69 | case GUC_FIRMWARE_FAIL: | ||
70 | return "FAIL"; | ||
71 | case GUC_FIRMWARE_NONE: | ||
72 | return "NONE"; | ||
73 | case GUC_FIRMWARE_PENDING: | ||
74 | return "PENDING"; | ||
75 | case GUC_FIRMWARE_SUCCESS: | ||
76 | return "SUCCESS"; | ||
77 | default: | ||
78 | return "UNKNOWN!"; | ||
79 | } | ||
80 | }; | ||
81 | |||
82 | static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) | ||
83 | { | ||
84 | struct intel_engine_cs *ring; | ||
85 | int i, irqs; | ||
86 | |||
87 | /* tell all command streamers NOT to forward interrupts and vblank to GuC */ | ||
88 | irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); | ||
89 | irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); | ||
90 | for_each_ring(ring, dev_priv, i) | ||
91 | I915_WRITE(RING_MODE_GEN7(ring), irqs); | ||
92 | |||
93 | /* tell DE to send nothing to GuC */ | ||
94 | I915_WRITE(DE_GUCRMR, ~0); | ||
95 | |||
96 | /* route all GT interrupts to the host */ | ||
97 | I915_WRITE(GUC_BCS_RCS_IER, 0); | ||
98 | I915_WRITE(GUC_VCS2_VCS1_IER, 0); | ||
99 | I915_WRITE(GUC_WD_VECS_IER, 0); | ||
100 | } | ||
101 | |||
102 | static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) | ||
103 | { | ||
104 | struct intel_engine_cs *ring; | ||
105 | int i, irqs; | ||
106 | |||
107 | /* tell all command streamers to forward interrupts and vblank to GuC */ | ||
108 | irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); | ||
109 | irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); | ||
110 | for_each_ring(ring, dev_priv, i) | ||
111 | I915_WRITE(RING_MODE_GEN7(ring), irqs); | ||
112 | |||
113 | /* tell DE to send (all) flip_done to GuC */ | ||
114 | irqs = DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEA_SPR_FLIP_DONE | | ||
115 | DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEB_SPR_FLIP_DONE | | ||
116 | DERRMR_PIPEC_PRI_FLIP_DONE | DERRMR_PIPEC_SPR_FLIP_DONE; | ||
117 | /* Unmasked bits will cause GuC response message to be sent */ | ||
118 | I915_WRITE(DE_GUCRMR, ~irqs); | ||
119 | |||
120 | /* route USER_INTERRUPT to Host, all others are sent to GuC. */ | ||
121 | irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | ||
122 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; | ||
123 | /* These three registers have the same bit definitions */ | ||
124 | I915_WRITE(GUC_BCS_RCS_IER, ~irqs); | ||
125 | I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); | ||
126 | I915_WRITE(GUC_WD_VECS_IER, ~irqs); | ||
127 | } | ||
128 | |||
129 | static u32 get_gttype(struct drm_i915_private *dev_priv) | ||
130 | { | ||
131 | /* XXX: GT type based on PCI device ID? field seems unused by fw */ | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static u32 get_core_family(struct drm_i915_private *dev_priv) | ||
136 | { | ||
137 | switch (INTEL_INFO(dev_priv)->gen) { | ||
138 | case 9: | ||
139 | return GFXCORE_FAMILY_GEN9; | ||
140 | |||
141 | default: | ||
142 | DRM_ERROR("GUC: unsupported core family\n"); | ||
143 | return GFXCORE_FAMILY_UNKNOWN; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static void set_guc_init_params(struct drm_i915_private *dev_priv) | ||
148 | { | ||
149 | struct intel_guc *guc = &dev_priv->guc; | ||
150 | u32 params[GUC_CTL_MAX_DWORDS]; | ||
151 | int i; | ||
152 | |||
153 | memset(¶ms, 0, sizeof(params)); | ||
154 | |||
155 | params[GUC_CTL_DEVICE_INFO] |= | ||
156 | (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) | | ||
157 | (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT); | ||
158 | |||
159 | /* | ||
160 | * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one | ||
161 | * second. This ARAR is calculated by: | ||
162 | * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10 | ||
163 | */ | ||
164 | params[GUC_CTL_ARAT_HIGH] = 0; | ||
165 | params[GUC_CTL_ARAT_LOW] = 100000000; | ||
166 | |||
167 | params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER; | ||
168 | |||
169 | params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER | | ||
170 | GUC_CTL_VCS2_ENABLED; | ||
171 | |||
172 | if (i915.guc_log_level >= 0) { | ||
173 | params[GUC_CTL_LOG_PARAMS] = guc->log_flags; | ||
174 | params[GUC_CTL_DEBUG] = | ||
175 | i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; | ||
176 | } | ||
177 | |||
178 | /* If GuC submission is enabled, set up additional parameters here */ | ||
179 | if (i915.enable_guc_submission) { | ||
180 | u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj); | ||
181 | u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; | ||
182 | |||
183 | pgs >>= PAGE_SHIFT; | ||
184 | params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) | | ||
185 | (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT); | ||
186 | |||
187 | params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS; | ||
188 | |||
189 | /* Unmask this bit to enable the GuC's internal scheduler */ | ||
190 | params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER; | ||
191 | } | ||
192 | |||
193 | I915_WRITE(SOFT_SCRATCH(0), 0); | ||
194 | |||
195 | for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) | ||
196 | I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Read the GuC status register (GUC_STATUS) and store it in the | ||
201 | * specified location; then return a boolean indicating whether | ||
202 | * the value matches either of two values representing completion | ||
203 | * of the GuC boot process. | ||
204 | * | ||
205 | * This is used for polling the GuC status in a wait_for_atomic() | ||
206 | * loop below. | ||
207 | */ | ||
208 | static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, | ||
209 | u32 *status) | ||
210 | { | ||
211 | u32 val = I915_READ(GUC_STATUS); | ||
212 | *status = val; | ||
213 | return ((val & GS_UKERNEL_MASK) == GS_UKERNEL_READY || | ||
214 | (val & GS_UKERNEL_MASK) == GS_UKERNEL_LAPIC_DONE); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Transfer the firmware image to RAM for execution by the microcontroller. | ||
219 | * | ||
220 | * GuC Firmware layout: | ||
221 | * +-------------------------------+ ---- | ||
222 | * | CSS header | 128B | ||
223 | * | contains major/minor version | | ||
224 | * +-------------------------------+ ---- | ||
225 | * | uCode | | ||
226 | * +-------------------------------+ ---- | ||
227 | * | RSA signature | 256B | ||
228 | * +-------------------------------+ ---- | ||
229 | * | ||
230 | * Architecturally, the DMA engine is bidirectional, and can potentially even | ||
231 | * transfer between GTT locations. This functionality is left out of the API | ||
232 | * for now as there is no need for it. | ||
233 | * | ||
234 | * Note that GuC needs the CSS header plus uKernel code to be copied by the | ||
235 | * DMA engine in one operation, whereas the RSA signature is loaded via MMIO. | ||
236 | */ | ||
237 | |||
238 | #define UOS_CSS_HEADER_OFFSET 0 | ||
239 | #define UOS_VER_MINOR_OFFSET 0x44 | ||
240 | #define UOS_VER_MAJOR_OFFSET 0x46 | ||
241 | #define UOS_CSS_HEADER_SIZE 0x80 | ||
242 | #define UOS_RSA_SIG_SIZE 0x100 | ||
243 | |||
244 | static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) | ||
245 | { | ||
246 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | ||
247 | struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; | ||
248 | unsigned long offset; | ||
249 | struct sg_table *sg = fw_obj->pages; | ||
250 | u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)]; | ||
251 | int i, ret = 0; | ||
252 | |||
253 | /* uCode size, also is where RSA signature starts */ | ||
254 | offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE; | ||
255 | I915_WRITE(DMA_COPY_SIZE, ucode_size); | ||
256 | |||
257 | /* Copy RSA signature from the fw image to HW for verification */ | ||
258 | sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset); | ||
259 | for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++) | ||
260 | I915_WRITE(UOS_RSA_SCRATCH_0 + i * sizeof(u32), rsa[i]); | ||
261 | |||
262 | /* Set the source address for the new blob */ | ||
263 | offset = i915_gem_obj_ggtt_offset(fw_obj); | ||
264 | I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); | ||
265 | I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); | ||
266 | |||
267 | /* | ||
268 | * Set the DMA destination. Current uCode expects the code to be | ||
269 | * loaded at 8k; locations below this are used for the stack. | ||
270 | */ | ||
271 | I915_WRITE(DMA_ADDR_1_LOW, 0x2000); | ||
272 | I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); | ||
273 | |||
274 | /* Finally start the DMA */ | ||
275 | I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); | ||
276 | |||
277 | /* | ||
278 | * Spin-wait for the DMA to complete & the GuC to start up. | ||
279 | * NB: Docs recommend not using the interrupt for completion. | ||
280 | * Measurements indicate this should take no more than 20ms, so a | ||
281 | * timeout here indicates that the GuC has failed and is unusable. | ||
282 | * (Higher levels of the driver will attempt to fall back to | ||
283 | * execlist mode if this happens.) | ||
284 | */ | ||
285 | ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100); | ||
286 | |||
287 | DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n", | ||
288 | I915_READ(DMA_CTRL), status); | ||
289 | |||
290 | if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { | ||
291 | DRM_ERROR("GuC firmware signature verification failed\n"); | ||
292 | ret = -ENOEXEC; | ||
293 | } | ||
294 | |||
295 | DRM_DEBUG_DRIVER("returning %d\n", ret); | ||
296 | |||
297 | return ret; | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Load the GuC firmware blob into the MinuteIA. | ||
302 | */ | ||
303 | static int guc_ucode_xfer(struct drm_i915_private *dev_priv) | ||
304 | { | ||
305 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | ||
306 | struct drm_device *dev = dev_priv->dev; | ||
307 | int ret; | ||
308 | |||
309 | ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); | ||
310 | if (ret) { | ||
311 | DRM_DEBUG_DRIVER("set-domain failed %d\n", ret); | ||
312 | return ret; | ||
313 | } | ||
314 | |||
315 | ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0); | ||
316 | if (ret) { | ||
317 | DRM_DEBUG_DRIVER("pin failed %d\n", ret); | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ | ||
322 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); | ||
323 | |||
324 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
325 | |||
326 | /* init WOPCM */ | ||
327 | I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); | ||
328 | I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); | ||
329 | |||
330 | /* Enable MIA caching. GuC clock gating is disabled. */ | ||
331 | I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); | ||
332 | |||
333 | /* WaC6DisallowByGfxPause*/ | ||
334 | I915_WRITE(GEN6_GFXPAUSE, 0x30FFF); | ||
335 | |||
336 | if (IS_BROXTON(dev)) | ||
337 | I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); | ||
338 | else | ||
339 | I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); | ||
340 | |||
341 | if (IS_GEN9(dev)) { | ||
342 | /* DOP Clock Gating Enable for GuC clocks */ | ||
343 | I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | | ||
344 | I915_READ(GEN7_MISCCPCTL))); | ||
345 | |||
346 | /* allows for 5us before GT can go to RC6 */ | ||
347 | I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); | ||
348 | } | ||
349 | |||
350 | set_guc_init_params(dev_priv); | ||
351 | |||
352 | ret = guc_ucode_xfer_dma(dev_priv); | ||
353 | |||
354 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
355 | |||
356 | /* | ||
357 | * We keep the object pages for reuse during resume. But we can unpin it | ||
358 | * now that DMA has completed, so it doesn't continue to take up space. | ||
359 | */ | ||
360 | i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj); | ||
361 | |||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | /** | ||
366 | * intel_guc_ucode_load() - load GuC uCode into the device | ||
367 | * @dev: drm device | ||
368 | * | ||
369 | * Called from gem_init_hw() during driver loading and also after a GPU reset. | ||
370 | * | ||
371 | * The firmware image should have already been fetched into memory by the | ||
372 | * earlier call to intel_guc_ucode_init(), so here we need only check that | ||
373 | * is succeeded, and then transfer the image to the h/w. | ||
374 | * | ||
375 | * Return: non-zero code on error | ||
376 | */ | ||
377 | int intel_guc_ucode_load(struct drm_device *dev) | ||
378 | { | ||
379 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
380 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | ||
381 | int err = 0; | ||
382 | |||
383 | DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", | ||
384 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), | ||
385 | intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); | ||
386 | |||
387 | direct_interrupts_to_host(dev_priv); | ||
388 | i915_guc_submission_disable(dev); | ||
389 | |||
390 | if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) | ||
391 | return 0; | ||
392 | |||
393 | if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS && | ||
394 | guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) | ||
395 | return -ENOEXEC; | ||
396 | |||
397 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; | ||
398 | |||
399 | DRM_DEBUG_DRIVER("GuC fw fetch status %s\n", | ||
400 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); | ||
401 | |||
402 | switch (guc_fw->guc_fw_fetch_status) { | ||
403 | case GUC_FIRMWARE_FAIL: | ||
404 | /* something went wrong :( */ | ||
405 | err = -EIO; | ||
406 | goto fail; | ||
407 | |||
408 | case GUC_FIRMWARE_NONE: | ||
409 | case GUC_FIRMWARE_PENDING: | ||
410 | default: | ||
411 | /* "can't happen" */ | ||
412 | WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n", | ||
413 | guc_fw->guc_fw_path, | ||
414 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), | ||
415 | guc_fw->guc_fw_fetch_status); | ||
416 | err = -ENXIO; | ||
417 | goto fail; | ||
418 | |||
419 | case GUC_FIRMWARE_SUCCESS: | ||
420 | break; | ||
421 | } | ||
422 | |||
423 | err = i915_guc_submission_init(dev); | ||
424 | if (err) | ||
425 | goto fail; | ||
426 | |||
427 | err = guc_ucode_xfer(dev_priv); | ||
428 | if (err) | ||
429 | goto fail; | ||
430 | |||
431 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; | ||
432 | |||
433 | DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", | ||
434 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), | ||
435 | intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); | ||
436 | |||
437 | if (i915.enable_guc_submission) { | ||
438 | err = i915_guc_submission_enable(dev); | ||
439 | if (err) | ||
440 | goto fail; | ||
441 | direct_interrupts_to_guc(dev_priv); | ||
442 | } | ||
443 | |||
444 | return 0; | ||
445 | |||
446 | fail: | ||
447 | if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) | ||
448 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; | ||
449 | |||
450 | direct_interrupts_to_host(dev_priv); | ||
451 | i915_guc_submission_disable(dev); | ||
452 | |||
453 | return err; | ||
454 | } | ||
455 | |||
456 | static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) | ||
457 | { | ||
458 | struct drm_i915_gem_object *obj; | ||
459 | const struct firmware *fw; | ||
460 | const u8 *css_header; | ||
461 | const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE; | ||
462 | const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE | ||
463 | - 0x8000; /* 32k reserved (8K stack + 24k context) */ | ||
464 | int err; | ||
465 | |||
466 | DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n", | ||
467 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); | ||
468 | |||
469 | err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev); | ||
470 | if (err) | ||
471 | goto fail; | ||
472 | if (!fw) | ||
473 | goto fail; | ||
474 | |||
475 | DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n", | ||
476 | guc_fw->guc_fw_path, fw); | ||
477 | DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n", | ||
478 | fw->size, minsize, maxsize); | ||
479 | |||
480 | /* Check the size of the blob befoe examining buffer contents */ | ||
481 | if (fw->size < minsize || fw->size > maxsize) | ||
482 | goto fail; | ||
483 | |||
484 | /* | ||
485 | * The GuC firmware image has the version number embedded at a well-known | ||
486 | * offset within the firmware blob; note that major / minor version are | ||
487 | * TWO bytes each (i.e. u16), although all pointers and offsets are defined | ||
488 | * in terms of bytes (u8). | ||
489 | */ | ||
490 | css_header = fw->data + UOS_CSS_HEADER_OFFSET; | ||
491 | guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET); | ||
492 | guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET); | ||
493 | |||
494 | if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted || | ||
495 | guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) { | ||
496 | DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n", | ||
497 | guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found, | ||
498 | guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); | ||
499 | err = -ENOEXEC; | ||
500 | goto fail; | ||
501 | } | ||
502 | |||
503 | DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n", | ||
504 | guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found, | ||
505 | guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); | ||
506 | |||
507 | obj = i915_gem_object_create_from_data(dev, fw->data, fw->size); | ||
508 | if (IS_ERR_OR_NULL(obj)) { | ||
509 | err = obj ? PTR_ERR(obj) : -ENOMEM; | ||
510 | goto fail; | ||
511 | } | ||
512 | |||
513 | guc_fw->guc_fw_obj = obj; | ||
514 | guc_fw->guc_fw_size = fw->size; | ||
515 | |||
516 | DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n", | ||
517 | guc_fw->guc_fw_obj); | ||
518 | |||
519 | release_firmware(fw); | ||
520 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS; | ||
521 | return; | ||
522 | |||
523 | fail: | ||
524 | DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n", | ||
525 | err, fw, guc_fw->guc_fw_obj); | ||
526 | DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n", | ||
527 | guc_fw->guc_fw_path, err); | ||
528 | |||
529 | obj = guc_fw->guc_fw_obj; | ||
530 | if (obj) | ||
531 | drm_gem_object_unreference(&obj->base); | ||
532 | guc_fw->guc_fw_obj = NULL; | ||
533 | |||
534 | release_firmware(fw); /* OK even if fw is NULL */ | ||
535 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * intel_guc_ucode_init() - define parameters and fetch firmware | ||
540 | * @dev: drm device | ||
541 | * | ||
542 | * Called early during driver load, but after GEM is initialised. | ||
543 | * The device struct_mutex must be held by the caller, as we're | ||
544 | * going to allocate a GEM object to hold the firmware image. | ||
545 | * | ||
546 | * The firmware will be transferred to the GuC's memory later, | ||
547 | * when intel_guc_ucode_load() is called. | ||
548 | */ | ||
549 | void intel_guc_ucode_init(struct drm_device *dev) | ||
550 | { | ||
551 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
552 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | ||
553 | const char *fw_path; | ||
554 | |||
555 | if (!HAS_GUC_SCHED(dev)) | ||
556 | i915.enable_guc_submission = false; | ||
557 | |||
558 | if (!HAS_GUC_UCODE(dev)) { | ||
559 | fw_path = NULL; | ||
560 | } else if (IS_SKYLAKE(dev)) { | ||
561 | fw_path = I915_SKL_GUC_UCODE; | ||
562 | guc_fw->guc_fw_major_wanted = 4; | ||
563 | guc_fw->guc_fw_minor_wanted = 3; | ||
564 | } else { | ||
565 | i915.enable_guc_submission = false; | ||
566 | fw_path = ""; /* unknown device */ | ||
567 | } | ||
568 | |||
569 | guc_fw->guc_dev = dev; | ||
570 | guc_fw->guc_fw_path = fw_path; | ||
571 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; | ||
572 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; | ||
573 | |||
574 | if (fw_path == NULL) | ||
575 | return; | ||
576 | |||
577 | if (*fw_path == '\0') { | ||
578 | DRM_ERROR("No GuC firmware known for this platform\n"); | ||
579 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; | ||
580 | return; | ||
581 | } | ||
582 | |||
583 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; | ||
584 | DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); | ||
585 | guc_fw_fetch(dev, guc_fw); | ||
586 | /* status must now be FAIL or SUCCESS */ | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * intel_guc_ucode_fini() - clean up all allocated resources | ||
591 | * @dev: drm device | ||
592 | */ | ||
593 | void intel_guc_ucode_fini(struct drm_device *dev) | ||
594 | { | ||
595 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
596 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | ||
597 | |||
598 | direct_interrupts_to_host(dev_priv); | ||
599 | i915_guc_submission_fini(dev); | ||
600 | |||
601 | if (guc_fw->guc_fw_obj) | ||
602 | drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); | ||
603 | guc_fw->guc_fw_obj = NULL; | ||
604 | |||
605 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; | ||
606 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index dcd336bcdfe7..e978c59dc243 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -848,8 +848,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder) | |||
848 | u32 hdmi_val; | 848 | u32 hdmi_val; |
849 | 849 | ||
850 | hdmi_val = SDVO_ENCODING_HDMI; | 850 | hdmi_val = SDVO_ENCODING_HDMI; |
851 | if (!HAS_PCH_SPLIT(dev)) | 851 | if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) |
852 | hdmi_val |= intel_hdmi->color_range; | 852 | hdmi_val |= HDMI_COLOR_RANGE_16_235; |
853 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 853 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
854 | hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; | 854 | hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; |
855 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 855 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
@@ -1260,11 +1260,12 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
1260 | 1260 | ||
1261 | if (intel_hdmi->color_range_auto) { | 1261 | if (intel_hdmi->color_range_auto) { |
1262 | /* See CEA-861-E - 5.1 Default Encoding Parameters */ | 1262 | /* See CEA-861-E - 5.1 Default Encoding Parameters */ |
1263 | if (pipe_config->has_hdmi_sink && | 1263 | pipe_config->limited_color_range = |
1264 | drm_match_cea_mode(adjusted_mode) > 1) | 1264 | pipe_config->has_hdmi_sink && |
1265 | intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; | 1265 | drm_match_cea_mode(adjusted_mode) > 1; |
1266 | else | 1266 | } else { |
1267 | intel_hdmi->color_range = 0; | 1267 | pipe_config->limited_color_range = |
1268 | intel_hdmi->limited_color_range; | ||
1268 | } | 1269 | } |
1269 | 1270 | ||
1270 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { | 1271 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { |
@@ -1273,9 +1274,6 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
1273 | clock_12bpc *= 2; | 1274 | clock_12bpc *= 2; |
1274 | } | 1275 | } |
1275 | 1276 | ||
1276 | if (intel_hdmi->color_range) | ||
1277 | pipe_config->limited_color_range = true; | ||
1278 | |||
1279 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) | 1277 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) |
1280 | pipe_config->has_pch_encoder = true; | 1278 | pipe_config->has_pch_encoder = true; |
1281 | 1279 | ||
@@ -1470,7 +1468,7 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
1470 | 1468 | ||
1471 | if (property == dev_priv->broadcast_rgb_property) { | 1469 | if (property == dev_priv->broadcast_rgb_property) { |
1472 | bool old_auto = intel_hdmi->color_range_auto; | 1470 | bool old_auto = intel_hdmi->color_range_auto; |
1473 | uint32_t old_range = intel_hdmi->color_range; | 1471 | bool old_range = intel_hdmi->limited_color_range; |
1474 | 1472 | ||
1475 | switch (val) { | 1473 | switch (val) { |
1476 | case INTEL_BROADCAST_RGB_AUTO: | 1474 | case INTEL_BROADCAST_RGB_AUTO: |
@@ -1478,18 +1476,18 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
1478 | break; | 1476 | break; |
1479 | case INTEL_BROADCAST_RGB_FULL: | 1477 | case INTEL_BROADCAST_RGB_FULL: |
1480 | intel_hdmi->color_range_auto = false; | 1478 | intel_hdmi->color_range_auto = false; |
1481 | intel_hdmi->color_range = 0; | 1479 | intel_hdmi->limited_color_range = false; |
1482 | break; | 1480 | break; |
1483 | case INTEL_BROADCAST_RGB_LIMITED: | 1481 | case INTEL_BROADCAST_RGB_LIMITED: |
1484 | intel_hdmi->color_range_auto = false; | 1482 | intel_hdmi->color_range_auto = false; |
1485 | intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; | 1483 | intel_hdmi->limited_color_range = true; |
1486 | break; | 1484 | break; |
1487 | default: | 1485 | default: |
1488 | return -EINVAL; | 1486 | return -EINVAL; |
1489 | } | 1487 | } |
1490 | 1488 | ||
1491 | if (old_auto == intel_hdmi->color_range_auto && | 1489 | if (old_auto == intel_hdmi->color_range_auto && |
1492 | old_range == intel_hdmi->color_range) | 1490 | old_range == intel_hdmi->limited_color_range) |
1493 | return 0; | 1491 | return 0; |
1494 | 1492 | ||
1495 | goto done; | 1493 | goto done; |
@@ -1617,6 +1615,50 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | |||
1617 | mutex_unlock(&dev_priv->sb_lock); | 1615 | mutex_unlock(&dev_priv->sb_lock); |
1618 | } | 1616 | } |
1619 | 1617 | ||
1618 | static void chv_data_lane_soft_reset(struct intel_encoder *encoder, | ||
1619 | bool reset) | ||
1620 | { | ||
1621 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
1622 | enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); | ||
1623 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
1624 | enum pipe pipe = crtc->pipe; | ||
1625 | uint32_t val; | ||
1626 | |||
1627 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); | ||
1628 | if (reset) | ||
1629 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
1630 | else | ||
1631 | val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; | ||
1632 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); | ||
1633 | |||
1634 | if (crtc->config->lane_count > 2) { | ||
1635 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); | ||
1636 | if (reset) | ||
1637 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
1638 | else | ||
1639 | val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; | ||
1640 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); | ||
1641 | } | ||
1642 | |||
1643 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); | ||
1644 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
1645 | if (reset) | ||
1646 | val &= ~DPIO_PCS_CLK_SOFT_RESET; | ||
1647 | else | ||
1648 | val |= DPIO_PCS_CLK_SOFT_RESET; | ||
1649 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); | ||
1650 | |||
1651 | if (crtc->config->lane_count > 2) { | ||
1652 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); | ||
1653 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
1654 | if (reset) | ||
1655 | val &= ~DPIO_PCS_CLK_SOFT_RESET; | ||
1656 | else | ||
1657 | val |= DPIO_PCS_CLK_SOFT_RESET; | ||
1658 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); | ||
1659 | } | ||
1660 | } | ||
1661 | |||
1620 | static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | 1662 | static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) |
1621 | { | 1663 | { |
1622 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1664 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
@@ -1630,8 +1672,21 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | |||
1630 | 1672 | ||
1631 | intel_hdmi_prepare(encoder); | 1673 | intel_hdmi_prepare(encoder); |
1632 | 1674 | ||
1675 | /* | ||
1676 | * Must trick the second common lane into life. | ||
1677 | * Otherwise we can't even access the PLL. | ||
1678 | */ | ||
1679 | if (ch == DPIO_CH0 && pipe == PIPE_B) | ||
1680 | dport->release_cl2_override = | ||
1681 | !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); | ||
1682 | |||
1683 | chv_phy_powergate_lanes(encoder, true, 0x0); | ||
1684 | |||
1633 | mutex_lock(&dev_priv->sb_lock); | 1685 | mutex_lock(&dev_priv->sb_lock); |
1634 | 1686 | ||
1687 | /* Assert data lane reset */ | ||
1688 | chv_data_lane_soft_reset(encoder, true); | ||
1689 | |||
1635 | /* program left/right clock distribution */ | 1690 | /* program left/right clock distribution */ |
1636 | if (pipe != PIPE_B) { | 1691 | if (pipe != PIPE_B) { |
1637 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | 1692 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); |
@@ -1683,6 +1738,39 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | |||
1683 | mutex_unlock(&dev_priv->sb_lock); | 1738 | mutex_unlock(&dev_priv->sb_lock); |
1684 | } | 1739 | } |
1685 | 1740 | ||
1741 | static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) | ||
1742 | { | ||
1743 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
1744 | enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; | ||
1745 | u32 val; | ||
1746 | |||
1747 | mutex_lock(&dev_priv->sb_lock); | ||
1748 | |||
1749 | /* disable left/right clock distribution */ | ||
1750 | if (pipe != PIPE_B) { | ||
1751 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | ||
1752 | val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); | ||
1753 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); | ||
1754 | } else { | ||
1755 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); | ||
1756 | val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); | ||
1757 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); | ||
1758 | } | ||
1759 | |||
1760 | mutex_unlock(&dev_priv->sb_lock); | ||
1761 | |||
1762 | /* | ||
1763 | * Leave the power down bit cleared for at least one | ||
1764 | * lane so that chv_powergate_phy_ch() will power | ||
1765 | * on something when the channel is otherwise unused. | ||
1766 | * When the port is off and the override is removed | ||
1767 | * the lanes power down anyway, so otherwise it doesn't | ||
1768 | * really matter what the state of power down bits is | ||
1769 | * after this. | ||
1770 | */ | ||
1771 | chv_phy_powergate_lanes(encoder, false, 0x0); | ||
1772 | } | ||
1773 | |||
1686 | static void vlv_hdmi_post_disable(struct intel_encoder *encoder) | 1774 | static void vlv_hdmi_post_disable(struct intel_encoder *encoder) |
1687 | { | 1775 | { |
1688 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1776 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
@@ -1701,33 +1789,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder) | |||
1701 | 1789 | ||
1702 | static void chv_hdmi_post_disable(struct intel_encoder *encoder) | 1790 | static void chv_hdmi_post_disable(struct intel_encoder *encoder) |
1703 | { | 1791 | { |
1704 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | ||
1705 | struct drm_device *dev = encoder->base.dev; | 1792 | struct drm_device *dev = encoder->base.dev; |
1706 | struct drm_i915_private *dev_priv = dev->dev_private; | 1793 | struct drm_i915_private *dev_priv = dev->dev_private; |
1707 | struct intel_crtc *intel_crtc = | ||
1708 | to_intel_crtc(encoder->base.crtc); | ||
1709 | enum dpio_channel ch = vlv_dport_to_channel(dport); | ||
1710 | enum pipe pipe = intel_crtc->pipe; | ||
1711 | u32 val; | ||
1712 | 1794 | ||
1713 | mutex_lock(&dev_priv->sb_lock); | 1795 | mutex_lock(&dev_priv->sb_lock); |
1714 | 1796 | ||
1715 | /* Propagate soft reset to data lane reset */ | 1797 | /* Assert data lane reset */ |
1716 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); | 1798 | chv_data_lane_soft_reset(encoder, true); |
1717 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
1718 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); | ||
1719 | |||
1720 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); | ||
1721 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
1722 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); | ||
1723 | |||
1724 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); | ||
1725 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
1726 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); | ||
1727 | |||
1728 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); | ||
1729 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
1730 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); | ||
1731 | 1799 | ||
1732 | mutex_unlock(&dev_priv->sb_lock); | 1800 | mutex_unlock(&dev_priv->sb_lock); |
1733 | } | 1801 | } |
@@ -1758,23 +1826,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
1758 | val &= ~DPIO_LANEDESKEW_STRAP_OVRD; | 1826 | val &= ~DPIO_LANEDESKEW_STRAP_OVRD; |
1759 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); | 1827 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); |
1760 | 1828 | ||
1761 | /* Deassert soft data lane reset*/ | ||
1762 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); | ||
1763 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
1764 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); | ||
1765 | |||
1766 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); | ||
1767 | val |= CHV_PCS_REQ_SOFTRESET_EN; | ||
1768 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); | ||
1769 | |||
1770 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); | ||
1771 | val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
1772 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); | ||
1773 | |||
1774 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); | ||
1775 | val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | ||
1776 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); | ||
1777 | |||
1778 | /* Program Tx latency optimal setting */ | 1829 | /* Program Tx latency optimal setting */ |
1779 | for (i = 0; i < 4; i++) { | 1830 | for (i = 0; i < 4; i++) { |
1780 | /* Set the upar bit */ | 1831 | /* Set the upar bit */ |
@@ -1817,6 +1868,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
1817 | DPIO_TX1_STAGGER_MULT(7) | | 1868 | DPIO_TX1_STAGGER_MULT(7) | |
1818 | DPIO_TX2_STAGGER_MULT(5)); | 1869 | DPIO_TX2_STAGGER_MULT(5)); |
1819 | 1870 | ||
1871 | /* Deassert data lane reset */ | ||
1872 | chv_data_lane_soft_reset(encoder, false); | ||
1873 | |||
1820 | /* Clear calc init */ | 1874 | /* Clear calc init */ |
1821 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); | 1875 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); |
1822 | val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); | 1876 | val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); |
@@ -1851,31 +1905,33 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
1851 | 1905 | ||
1852 | for (i = 0; i < 4; i++) { | 1906 | for (i = 0; i < 4; i++) { |
1853 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); | 1907 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); |
1908 | |||
1854 | val &= ~DPIO_SWING_MARGIN000_MASK; | 1909 | val &= ~DPIO_SWING_MARGIN000_MASK; |
1855 | val |= 102 << DPIO_SWING_MARGIN000_SHIFT; | 1910 | val |= 102 << DPIO_SWING_MARGIN000_SHIFT; |
1911 | |||
1912 | /* | ||
1913 | * Supposedly this value shouldn't matter when unique transition | ||
1914 | * scale is disabled, but in fact it does matter. Let's just | ||
1915 | * always program the same value and hope it's OK. | ||
1916 | */ | ||
1917 | val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); | ||
1918 | val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; | ||
1919 | |||
1856 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); | 1920 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); |
1857 | } | 1921 | } |
1858 | 1922 | ||
1859 | /* Disable unique transition scale */ | 1923 | /* |
1924 | * The document said it needs to set bit 27 for ch0 and bit 26 | ||
1925 | * for ch1. Might be a typo in the doc. | ||
1926 | * For now, for this unique transition scale selection, set bit | ||
1927 | * 27 for ch0 and ch1. | ||
1928 | */ | ||
1860 | for (i = 0; i < 4; i++) { | 1929 | for (i = 0; i < 4; i++) { |
1861 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); | 1930 | val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); |
1862 | val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; | 1931 | val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; |
1863 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); | 1932 | vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); |
1864 | } | 1933 | } |
1865 | 1934 | ||
1866 | /* Additional steps for 1200mV-0dB */ | ||
1867 | #if 0 | ||
1868 | val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch)); | ||
1869 | if (ch) | ||
1870 | val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1; | ||
1871 | else | ||
1872 | val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0; | ||
1873 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val); | ||
1874 | |||
1875 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch), | ||
1876 | vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) | | ||
1877 | (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT)); | ||
1878 | #endif | ||
1879 | /* Start swing calculation */ | 1935 | /* Start swing calculation */ |
1880 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); | 1936 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); |
1881 | val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; | 1937 | val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; |
@@ -1899,6 +1955,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
1899 | g4x_enable_hdmi(encoder); | 1955 | g4x_enable_hdmi(encoder); |
1900 | 1956 | ||
1901 | vlv_wait_port_ready(dev_priv, dport, 0x0); | 1957 | vlv_wait_port_ready(dev_priv, dport, 0x0); |
1958 | |||
1959 | /* Second common lane will stay alive on its own now */ | ||
1960 | if (dport->release_cl2_override) { | ||
1961 | chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); | ||
1962 | dport->release_cl2_override = false; | ||
1963 | } | ||
1902 | } | 1964 | } |
1903 | 1965 | ||
1904 | static void intel_hdmi_destroy(struct drm_connector *connector) | 1966 | static void intel_hdmi_destroy(struct drm_connector *connector) |
@@ -1974,7 +2036,14 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
1974 | intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; | 2036 | intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; |
1975 | else | 2037 | else |
1976 | intel_hdmi->ddc_bus = GMBUS_PIN_DPB; | 2038 | intel_hdmi->ddc_bus = GMBUS_PIN_DPB; |
1977 | intel_encoder->hpd_pin = HPD_PORT_B; | 2039 | /* |
2040 | * On BXT A0/A1, sw needs to activate DDIA HPD logic and | ||
2041 | * interrupts to check the external panel connection. | ||
2042 | */ | ||
2043 | if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) | ||
2044 | intel_encoder->hpd_pin = HPD_PORT_A; | ||
2045 | else | ||
2046 | intel_encoder->hpd_pin = HPD_PORT_B; | ||
1978 | break; | 2047 | break; |
1979 | case PORT_C: | 2048 | case PORT_C: |
1980 | if (IS_BROXTON(dev_priv)) | 2049 | if (IS_BROXTON(dev_priv)) |
@@ -2051,6 +2120,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
2051 | 2120 | ||
2052 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 2121 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
2053 | drm_connector_register(connector); | 2122 | drm_connector_register(connector); |
2123 | intel_hdmi->attached_connector = intel_connector; | ||
2054 | 2124 | ||
2055 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 2125 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
2056 | * 0xd. Failure to do so will result in spurious interrupts being | 2126 | * 0xd. Failure to do so will result in spurious interrupts being |
@@ -2097,6 +2167,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) | |||
2097 | intel_encoder->pre_enable = chv_hdmi_pre_enable; | 2167 | intel_encoder->pre_enable = chv_hdmi_pre_enable; |
2098 | intel_encoder->enable = vlv_enable_hdmi; | 2168 | intel_encoder->enable = vlv_enable_hdmi; |
2099 | intel_encoder->post_disable = chv_hdmi_post_disable; | 2169 | intel_encoder->post_disable = chv_hdmi_post_disable; |
2170 | intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable; | ||
2100 | } else if (IS_VALLEYVIEW(dev)) { | 2171 | } else if (IS_VALLEYVIEW(dev)) { |
2101 | intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; | 2172 | intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; |
2102 | intel_encoder->pre_enable = vlv_hdmi_pre_enable; | 2173 | intel_encoder->pre_enable = vlv_hdmi_pre_enable; |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..28a712e7d2d0 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -196,13 +196,21 @@ | |||
196 | reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ | 196 | reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ |
197 | } | 197 | } |
198 | 198 | ||
199 | #define ASSIGN_CTX_PML4(ppgtt, reg_state) { \ | ||
200 | reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ | ||
201 | reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ | ||
202 | } | ||
203 | |||
199 | enum { | 204 | enum { |
200 | ADVANCED_CONTEXT = 0, | 205 | ADVANCED_CONTEXT = 0, |
201 | LEGACY_CONTEXT, | 206 | LEGACY_32B_CONTEXT, |
202 | ADVANCED_AD_CONTEXT, | 207 | ADVANCED_AD_CONTEXT, |
203 | LEGACY_64B_CONTEXT | 208 | LEGACY_64B_CONTEXT |
204 | }; | 209 | }; |
205 | #define GEN8_CTX_MODE_SHIFT 3 | 210 | #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 |
211 | #define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ | ||
212 | LEGACY_64B_CONTEXT :\ | ||
213 | LEGACY_32B_CONTEXT) | ||
206 | enum { | 214 | enum { |
207 | FAULT_AND_HANG = 0, | 215 | FAULT_AND_HANG = 0, |
208 | FAULT_AND_HALT, /* Debug only */ | 216 | FAULT_AND_HALT, /* Debug only */ |
@@ -228,6 +236,12 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists | |||
228 | { | 236 | { |
229 | WARN_ON(i915.enable_ppgtt == -1); | 237 | WARN_ON(i915.enable_ppgtt == -1); |
230 | 238 | ||
239 | /* On platforms with execlist available, vGPU will only | ||
240 | * support execlist mode, no ring buffer mode. | ||
241 | */ | ||
242 | if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) | ||
243 | return 1; | ||
244 | |||
231 | if (INTEL_INFO(dev)->gen >= 9) | 245 | if (INTEL_INFO(dev)->gen >= 9) |
232 | return 1; | 246 | return 1; |
233 | 247 | ||
@@ -255,25 +269,27 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists | |||
255 | */ | 269 | */ |
256 | u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) | 270 | u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) |
257 | { | 271 | { |
258 | u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj); | 272 | u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) + |
273 | LRC_PPHWSP_PN * PAGE_SIZE; | ||
259 | 274 | ||
260 | /* LRCA is required to be 4K aligned so the more significant 20 bits | 275 | /* LRCA is required to be 4K aligned so the more significant 20 bits |
261 | * are globally unique */ | 276 | * are globally unique */ |
262 | return lrca >> 12; | 277 | return lrca >> 12; |
263 | } | 278 | } |
264 | 279 | ||
265 | static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq) | 280 | uint64_t intel_lr_context_descriptor(struct intel_context *ctx, |
281 | struct intel_engine_cs *ring) | ||
266 | { | 282 | { |
267 | struct intel_engine_cs *ring = rq->ring; | ||
268 | struct drm_device *dev = ring->dev; | 283 | struct drm_device *dev = ring->dev; |
269 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; | 284 | struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; |
270 | uint64_t desc; | 285 | uint64_t desc; |
271 | uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); | 286 | uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + |
287 | LRC_PPHWSP_PN * PAGE_SIZE; | ||
272 | 288 | ||
273 | WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); | 289 | WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); |
274 | 290 | ||
275 | desc = GEN8_CTX_VALID; | 291 | desc = GEN8_CTX_VALID; |
276 | desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT; | 292 | desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT; |
277 | if (IS_GEN8(ctx_obj->base.dev)) | 293 | if (IS_GEN8(ctx_obj->base.dev)) |
278 | desc |= GEN8_CTX_L3LLC_COHERENT; | 294 | desc |= GEN8_CTX_L3LLC_COHERENT; |
279 | desc |= GEN8_CTX_PRIVILEGE; | 295 | desc |= GEN8_CTX_PRIVILEGE; |
@@ -304,13 +320,13 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0, | |||
304 | uint64_t desc[2]; | 320 | uint64_t desc[2]; |
305 | 321 | ||
306 | if (rq1) { | 322 | if (rq1) { |
307 | desc[1] = execlists_ctx_descriptor(rq1); | 323 | desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring); |
308 | rq1->elsp_submitted++; | 324 | rq1->elsp_submitted++; |
309 | } else { | 325 | } else { |
310 | desc[1] = 0; | 326 | desc[1] = 0; |
311 | } | 327 | } |
312 | 328 | ||
313 | desc[0] = execlists_ctx_descriptor(rq0); | 329 | desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring); |
314 | rq0->elsp_submitted++; | 330 | rq0->elsp_submitted++; |
315 | 331 | ||
316 | /* You must always write both descriptors in the order below. */ | 332 | /* You must always write both descriptors in the order below. */ |
@@ -342,16 +358,18 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) | |||
342 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); | 358 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); |
343 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); | 359 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); |
344 | 360 | ||
345 | page = i915_gem_object_get_page(ctx_obj, 1); | 361 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
346 | reg_state = kmap_atomic(page); | 362 | reg_state = kmap_atomic(page); |
347 | 363 | ||
348 | reg_state[CTX_RING_TAIL+1] = rq->tail; | 364 | reg_state[CTX_RING_TAIL+1] = rq->tail; |
349 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); | 365 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); |
350 | 366 | ||
351 | /* True PPGTT with dynamic page allocation: update PDP registers and | 367 | if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { |
352 | * point the unallocated PDPs to the scratch page | 368 | /* True 32b PPGTT with dynamic page allocation: update PDP |
353 | */ | 369 | * registers and point the unallocated PDPs to scratch page. |
354 | if (ppgtt) { | 370 | * PML4 is allocated during ppgtt init, so this is not needed |
371 | * in 48-bit mode. | ||
372 | */ | ||
355 | ASSIGN_CTX_PDP(ppgtt, reg_state, 3); | 373 | ASSIGN_CTX_PDP(ppgtt, reg_state, 3); |
356 | ASSIGN_CTX_PDP(ppgtt, reg_state, 2); | 374 | ASSIGN_CTX_PDP(ppgtt, reg_state, 2); |
357 | ASSIGN_CTX_PDP(ppgtt, reg_state, 1); | 375 | ASSIGN_CTX_PDP(ppgtt, reg_state, 1); |
@@ -538,8 +556,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) | |||
538 | 556 | ||
539 | i915_gem_request_reference(request); | 557 | i915_gem_request_reference(request); |
540 | 558 | ||
541 | request->tail = request->ringbuf->tail; | ||
542 | |||
543 | spin_lock_irq(&ring->execlist_lock); | 559 | spin_lock_irq(&ring->execlist_lock); |
544 | 560 | ||
545 | list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) | 561 | list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) |
@@ -692,13 +708,19 @@ static void | |||
692 | intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) | 708 | intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) |
693 | { | 709 | { |
694 | struct intel_engine_cs *ring = request->ring; | 710 | struct intel_engine_cs *ring = request->ring; |
711 | struct drm_i915_private *dev_priv = request->i915; | ||
695 | 712 | ||
696 | intel_logical_ring_advance(request->ringbuf); | 713 | intel_logical_ring_advance(request->ringbuf); |
697 | 714 | ||
715 | request->tail = request->ringbuf->tail; | ||
716 | |||
698 | if (intel_ring_stopped(ring)) | 717 | if (intel_ring_stopped(ring)) |
699 | return; | 718 | return; |
700 | 719 | ||
701 | execlists_context_queue(request); | 720 | if (dev_priv->guc.execbuf_client) |
721 | i915_guc_submit(dev_priv->guc.execbuf_client, request); | ||
722 | else | ||
723 | execlists_context_queue(request); | ||
702 | } | 724 | } |
703 | 725 | ||
704 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) | 726 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) |
@@ -988,6 +1010,7 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) | |||
988 | 1010 | ||
989 | static int intel_lr_context_pin(struct drm_i915_gem_request *rq) | 1011 | static int intel_lr_context_pin(struct drm_i915_gem_request *rq) |
990 | { | 1012 | { |
1013 | struct drm_i915_private *dev_priv = rq->i915; | ||
991 | struct intel_engine_cs *ring = rq->ring; | 1014 | struct intel_engine_cs *ring = rq->ring; |
992 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; | 1015 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; |
993 | struct intel_ringbuffer *ringbuf = rq->ringbuf; | 1016 | struct intel_ringbuffer *ringbuf = rq->ringbuf; |
@@ -995,8 +1018,8 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq) | |||
995 | 1018 | ||
996 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | 1019 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
997 | if (rq->ctx->engine[ring->id].pin_count++ == 0) { | 1020 | if (rq->ctx->engine[ring->id].pin_count++ == 0) { |
998 | ret = i915_gem_obj_ggtt_pin(ctx_obj, | 1021 | ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, |
999 | GEN8_LR_CONTEXT_ALIGN, 0); | 1022 | PIN_OFFSET_BIAS | GUC_WOPCM_TOP); |
1000 | if (ret) | 1023 | if (ret) |
1001 | goto reset_pin_count; | 1024 | goto reset_pin_count; |
1002 | 1025 | ||
@@ -1005,6 +1028,10 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq) | |||
1005 | goto unpin_ctx_obj; | 1028 | goto unpin_ctx_obj; |
1006 | 1029 | ||
1007 | ctx_obj->dirty = true; | 1030 | ctx_obj->dirty = true; |
1031 | |||
1032 | /* Invalidate GuC TLB. */ | ||
1033 | if (i915.enable_guc_submission) | ||
1034 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); | ||
1008 | } | 1035 | } |
1009 | 1036 | ||
1010 | return ret; | 1037 | return ret; |
@@ -1111,7 +1138,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, | |||
1111 | if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) | 1138 | if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) |
1112 | l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; | 1139 | l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; |
1113 | 1140 | ||
1114 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) | | 1141 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | |
1115 | MI_SRM_LRM_GLOBAL_GTT)); | 1142 | MI_SRM_LRM_GLOBAL_GTT)); |
1116 | wa_ctx_emit(batch, index, GEN8_L3SQCREG4); | 1143 | wa_ctx_emit(batch, index, GEN8_L3SQCREG4); |
1117 | wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); | 1144 | wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); |
@@ -1129,7 +1156,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, | |||
1129 | wa_ctx_emit(batch, index, 0); | 1156 | wa_ctx_emit(batch, index, 0); |
1130 | wa_ctx_emit(batch, index, 0); | 1157 | wa_ctx_emit(batch, index, 0); |
1131 | 1158 | ||
1132 | wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) | | 1159 | wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | |
1133 | MI_SRM_LRM_GLOBAL_GTT)); | 1160 | MI_SRM_LRM_GLOBAL_GTT)); |
1134 | wa_ctx_emit(batch, index, GEN8_L3SQCREG4); | 1161 | wa_ctx_emit(batch, index, GEN8_L3SQCREG4); |
1135 | wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); | 1162 | wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); |
@@ -1517,12 +1544,16 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, | |||
1517 | * Ideally, we should set Force PD Restore in ctx descriptor, | 1544 | * Ideally, we should set Force PD Restore in ctx descriptor, |
1518 | * but we can't. Force Restore would be a second option, but | 1545 | * but we can't. Force Restore would be a second option, but |
1519 | * it is unsafe in case of lite-restore (because the ctx is | 1546 | * it is unsafe in case of lite-restore (because the ctx is |
1520 | * not idle). */ | 1547 | * not idle). PML4 is allocated during ppgtt init so this is |
1548 | * not needed in 48-bit.*/ | ||
1521 | if (req->ctx->ppgtt && | 1549 | if (req->ctx->ppgtt && |
1522 | (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { | 1550 | (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { |
1523 | ret = intel_logical_ring_emit_pdps(req); | 1551 | if (!USES_FULL_48BIT_PPGTT(req->i915) && |
1524 | if (ret) | 1552 | !intel_vgpu_active(req->i915->dev)) { |
1525 | return ret; | 1553 | ret = intel_logical_ring_emit_pdps(req); |
1554 | if (ret) | ||
1555 | return ret; | ||
1556 | } | ||
1526 | 1557 | ||
1527 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); | 1558 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); |
1528 | } | 1559 | } |
@@ -1688,6 +1719,34 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) | |||
1688 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); | 1719 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); |
1689 | } | 1720 | } |
1690 | 1721 | ||
1722 | static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) | ||
1723 | { | ||
1724 | |||
1725 | /* | ||
1726 | * On BXT A steppings there is a HW coherency issue whereby the | ||
1727 | * MI_STORE_DATA_IMM storing the completed request's seqno | ||
1728 | * occasionally doesn't invalidate the CPU cache. Work around this by | ||
1729 | * clflushing the corresponding cacheline whenever the caller wants | ||
1730 | * the coherency to be guaranteed. Note that this cacheline is known | ||
1731 | * to be clean at this point, since we only write it in | ||
1732 | * bxt_a_set_seqno(), where we also do a clflush after the write. So | ||
1733 | * this clflush in practice becomes an invalidate operation. | ||
1734 | */ | ||
1735 | |||
1736 | if (!lazy_coherency) | ||
1737 | intel_flush_status_page(ring, I915_GEM_HWS_INDEX); | ||
1738 | |||
1739 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | ||
1740 | } | ||
1741 | |||
1742 | static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) | ||
1743 | { | ||
1744 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); | ||
1745 | |||
1746 | /* See bxt_a_get_seqno() explaining the reason for the clflush. */ | ||
1747 | intel_flush_status_page(ring, I915_GEM_HWS_INDEX); | ||
1748 | } | ||
1749 | |||
1691 | static int gen8_emit_request(struct drm_i915_gem_request *request) | 1750 | static int gen8_emit_request(struct drm_i915_gem_request *request) |
1692 | { | 1751 | { |
1693 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1752 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
@@ -1857,8 +1916,13 @@ static int logical_render_ring_init(struct drm_device *dev) | |||
1857 | ring->init_hw = gen8_init_render_ring; | 1916 | ring->init_hw = gen8_init_render_ring; |
1858 | ring->init_context = gen8_init_rcs_context; | 1917 | ring->init_context = gen8_init_rcs_context; |
1859 | ring->cleanup = intel_fini_pipe_control; | 1918 | ring->cleanup = intel_fini_pipe_control; |
1860 | ring->get_seqno = gen8_get_seqno; | 1919 | if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { |
1861 | ring->set_seqno = gen8_set_seqno; | 1920 | ring->get_seqno = bxt_a_get_seqno; |
1921 | ring->set_seqno = bxt_a_set_seqno; | ||
1922 | } else { | ||
1923 | ring->get_seqno = gen8_get_seqno; | ||
1924 | ring->set_seqno = gen8_set_seqno; | ||
1925 | } | ||
1862 | ring->emit_request = gen8_emit_request; | 1926 | ring->emit_request = gen8_emit_request; |
1863 | ring->emit_flush = gen8_emit_flush_render; | 1927 | ring->emit_flush = gen8_emit_flush_render; |
1864 | ring->irq_get = gen8_logical_ring_get_irq; | 1928 | ring->irq_get = gen8_logical_ring_get_irq; |
@@ -1904,8 +1968,13 @@ static int logical_bsd_ring_init(struct drm_device *dev) | |||
1904 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; | 1968 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
1905 | 1969 | ||
1906 | ring->init_hw = gen8_init_common_ring; | 1970 | ring->init_hw = gen8_init_common_ring; |
1907 | ring->get_seqno = gen8_get_seqno; | 1971 | if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { |
1908 | ring->set_seqno = gen8_set_seqno; | 1972 | ring->get_seqno = bxt_a_get_seqno; |
1973 | ring->set_seqno = bxt_a_set_seqno; | ||
1974 | } else { | ||
1975 | ring->get_seqno = gen8_get_seqno; | ||
1976 | ring->set_seqno = gen8_set_seqno; | ||
1977 | } | ||
1909 | ring->emit_request = gen8_emit_request; | 1978 | ring->emit_request = gen8_emit_request; |
1910 | ring->emit_flush = gen8_emit_flush; | 1979 | ring->emit_flush = gen8_emit_flush; |
1911 | ring->irq_get = gen8_logical_ring_get_irq; | 1980 | ring->irq_get = gen8_logical_ring_get_irq; |
@@ -1954,8 +2023,13 @@ static int logical_blt_ring_init(struct drm_device *dev) | |||
1954 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; | 2023 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
1955 | 2024 | ||
1956 | ring->init_hw = gen8_init_common_ring; | 2025 | ring->init_hw = gen8_init_common_ring; |
1957 | ring->get_seqno = gen8_get_seqno; | 2026 | if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { |
1958 | ring->set_seqno = gen8_set_seqno; | 2027 | ring->get_seqno = bxt_a_get_seqno; |
2028 | ring->set_seqno = bxt_a_set_seqno; | ||
2029 | } else { | ||
2030 | ring->get_seqno = gen8_get_seqno; | ||
2031 | ring->set_seqno = gen8_set_seqno; | ||
2032 | } | ||
1959 | ring->emit_request = gen8_emit_request; | 2033 | ring->emit_request = gen8_emit_request; |
1960 | ring->emit_flush = gen8_emit_flush; | 2034 | ring->emit_flush = gen8_emit_flush; |
1961 | ring->irq_get = gen8_logical_ring_get_irq; | 2035 | ring->irq_get = gen8_logical_ring_get_irq; |
@@ -1979,8 +2053,13 @@ static int logical_vebox_ring_init(struct drm_device *dev) | |||
1979 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; | 2053 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
1980 | 2054 | ||
1981 | ring->init_hw = gen8_init_common_ring; | 2055 | ring->init_hw = gen8_init_common_ring; |
1982 | ring->get_seqno = gen8_get_seqno; | 2056 | if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { |
1983 | ring->set_seqno = gen8_set_seqno; | 2057 | ring->get_seqno = bxt_a_get_seqno; |
2058 | ring->set_seqno = bxt_a_set_seqno; | ||
2059 | } else { | ||
2060 | ring->get_seqno = gen8_get_seqno; | ||
2061 | ring->set_seqno = gen8_set_seqno; | ||
2062 | } | ||
1984 | ring->emit_request = gen8_emit_request; | 2063 | ring->emit_request = gen8_emit_request; |
1985 | ring->emit_flush = gen8_emit_flush; | 2064 | ring->emit_flush = gen8_emit_flush; |
1986 | ring->irq_get = gen8_logical_ring_get_irq; | 2065 | ring->irq_get = gen8_logical_ring_get_irq; |
@@ -2126,7 +2205,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
2126 | 2205 | ||
2127 | /* The second page of the context object contains some fields which must | 2206 | /* The second page of the context object contains some fields which must |
2128 | * be set up prior to the first execution. */ | 2207 | * be set up prior to the first execution. */ |
2129 | page = i915_gem_object_get_page(ctx_obj, 1); | 2208 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
2130 | reg_state = kmap_atomic(page); | 2209 | reg_state = kmap_atomic(page); |
2131 | 2210 | ||
2132 | /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM | 2211 | /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM |
@@ -2203,13 +2282,24 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
2203 | reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); | 2282 | reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); |
2204 | reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); | 2283 | reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); |
2205 | 2284 | ||
2206 | /* With dynamic page allocation, PDPs may not be allocated at this point, | 2285 | if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { |
2207 | * Point the unallocated PDPs to the scratch page | 2286 | /* 64b PPGTT (48bit canonical) |
2208 | */ | 2287 | * PDP0_DESCRIPTOR contains the base address to PML4 and |
2209 | ASSIGN_CTX_PDP(ppgtt, reg_state, 3); | 2288 | * other PDP Descriptors are ignored. |
2210 | ASSIGN_CTX_PDP(ppgtt, reg_state, 2); | 2289 | */ |
2211 | ASSIGN_CTX_PDP(ppgtt, reg_state, 1); | 2290 | ASSIGN_CTX_PML4(ppgtt, reg_state); |
2212 | ASSIGN_CTX_PDP(ppgtt, reg_state, 0); | 2291 | } else { |
2292 | /* 32b PPGTT | ||
2293 | * PDP*_DESCRIPTOR contains the base address of space supported. | ||
2294 | * With dynamic page allocation, PDPs may not be allocated at | ||
2295 | * this point. Point the unallocated PDPs to the scratch page | ||
2296 | */ | ||
2297 | ASSIGN_CTX_PDP(ppgtt, reg_state, 3); | ||
2298 | ASSIGN_CTX_PDP(ppgtt, reg_state, 2); | ||
2299 | ASSIGN_CTX_PDP(ppgtt, reg_state, 1); | ||
2300 | ASSIGN_CTX_PDP(ppgtt, reg_state, 0); | ||
2301 | } | ||
2302 | |||
2213 | if (ring->id == RCS) { | 2303 | if (ring->id == RCS) { |
2214 | reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); | 2304 | reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); |
2215 | reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; | 2305 | reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; |
@@ -2250,8 +2340,7 @@ void intel_lr_context_free(struct intel_context *ctx) | |||
2250 | i915_gem_object_ggtt_unpin(ctx_obj); | 2340 | i915_gem_object_ggtt_unpin(ctx_obj); |
2251 | } | 2341 | } |
2252 | WARN_ON(ctx->engine[ring->id].pin_count); | 2342 | WARN_ON(ctx->engine[ring->id].pin_count); |
2253 | intel_destroy_ringbuffer_obj(ringbuf); | 2343 | intel_ringbuffer_free(ringbuf); |
2254 | kfree(ringbuf); | ||
2255 | drm_gem_object_unreference(&ctx_obj->base); | 2344 | drm_gem_object_unreference(&ctx_obj->base); |
2256 | } | 2345 | } |
2257 | } | 2346 | } |
@@ -2285,12 +2374,13 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, | |||
2285 | struct drm_i915_gem_object *default_ctx_obj) | 2374 | struct drm_i915_gem_object *default_ctx_obj) |
2286 | { | 2375 | { |
2287 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 2376 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
2377 | struct page *page; | ||
2288 | 2378 | ||
2289 | /* The status page is offset 0 from the default context object | 2379 | /* The HWSP is part of the default context object in LRC mode. */ |
2290 | * in LRC mode. */ | 2380 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) |
2291 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj); | 2381 | + LRC_PPHWSP_PN * PAGE_SIZE; |
2292 | ring->status_page.page_addr = | 2382 | page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN); |
2293 | kmap(sg_page(default_ctx_obj->pages->sgl)); | 2383 | ring->status_page.page_addr = kmap(page); |
2294 | ring->status_page.obj = default_ctx_obj; | 2384 | ring->status_page.obj = default_ctx_obj; |
2295 | 2385 | ||
2296 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), | 2386 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), |
@@ -2316,6 +2406,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
2316 | { | 2406 | { |
2317 | const bool is_global_default_ctx = (ctx == ring->default_context); | 2407 | const bool is_global_default_ctx = (ctx == ring->default_context); |
2318 | struct drm_device *dev = ring->dev; | 2408 | struct drm_device *dev = ring->dev; |
2409 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2319 | struct drm_i915_gem_object *ctx_obj; | 2410 | struct drm_i915_gem_object *ctx_obj; |
2320 | uint32_t context_size; | 2411 | uint32_t context_size; |
2321 | struct intel_ringbuffer *ringbuf; | 2412 | struct intel_ringbuffer *ringbuf; |
@@ -2326,6 +2417,9 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
2326 | 2417 | ||
2327 | context_size = round_up(get_lr_context_size(ring), 4096); | 2418 | context_size = round_up(get_lr_context_size(ring), 4096); |
2328 | 2419 | ||
2420 | /* One extra page as the sharing data between driver and GuC */ | ||
2421 | context_size += PAGE_SIZE * LRC_PPHWSP_PN; | ||
2422 | |||
2329 | ctx_obj = i915_gem_alloc_object(dev, context_size); | 2423 | ctx_obj = i915_gem_alloc_object(dev, context_size); |
2330 | if (!ctx_obj) { | 2424 | if (!ctx_obj) { |
2331 | DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); | 2425 | DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); |
@@ -2333,51 +2427,34 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
2333 | } | 2427 | } |
2334 | 2428 | ||
2335 | if (is_global_default_ctx) { | 2429 | if (is_global_default_ctx) { |
2336 | ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); | 2430 | ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, |
2431 | PIN_OFFSET_BIAS | GUC_WOPCM_TOP); | ||
2337 | if (ret) { | 2432 | if (ret) { |
2338 | DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", | 2433 | DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", |
2339 | ret); | 2434 | ret); |
2340 | drm_gem_object_unreference(&ctx_obj->base); | 2435 | drm_gem_object_unreference(&ctx_obj->base); |
2341 | return ret; | 2436 | return ret; |
2342 | } | 2437 | } |
2438 | |||
2439 | /* Invalidate GuC TLB. */ | ||
2440 | if (i915.enable_guc_submission) | ||
2441 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); | ||
2343 | } | 2442 | } |
2344 | 2443 | ||
2345 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); | 2444 | ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); |
2346 | if (!ringbuf) { | 2445 | if (IS_ERR(ringbuf)) { |
2347 | DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", | 2446 | ret = PTR_ERR(ringbuf); |
2348 | ring->name); | ||
2349 | ret = -ENOMEM; | ||
2350 | goto error_unpin_ctx; | 2447 | goto error_unpin_ctx; |
2351 | } | 2448 | } |
2352 | 2449 | ||
2353 | ringbuf->ring = ring; | 2450 | if (is_global_default_ctx) { |
2354 | 2451 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | |
2355 | ringbuf->size = 32 * PAGE_SIZE; | ||
2356 | ringbuf->effective_size = ringbuf->size; | ||
2357 | ringbuf->head = 0; | ||
2358 | ringbuf->tail = 0; | ||
2359 | ringbuf->last_retired_head = -1; | ||
2360 | intel_ring_update_space(ringbuf); | ||
2361 | |||
2362 | if (ringbuf->obj == NULL) { | ||
2363 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | ||
2364 | if (ret) { | 2452 | if (ret) { |
2365 | DRM_DEBUG_DRIVER( | 2453 | DRM_ERROR( |
2366 | "Failed to allocate ringbuffer obj %s: %d\n", | 2454 | "Failed to pin and map ringbuffer %s: %d\n", |
2367 | ring->name, ret); | 2455 | ring->name, ret); |
2368 | goto error_free_rbuf; | 2456 | goto error_ringbuf; |
2369 | } | 2457 | } |
2370 | |||
2371 | if (is_global_default_ctx) { | ||
2372 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | ||
2373 | if (ret) { | ||
2374 | DRM_ERROR( | ||
2375 | "Failed to pin and map ringbuffer %s: %d\n", | ||
2376 | ring->name, ret); | ||
2377 | goto error_destroy_rbuf; | ||
2378 | } | ||
2379 | } | ||
2380 | |||
2381 | } | 2458 | } |
2382 | 2459 | ||
2383 | ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); | 2460 | ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); |
@@ -2419,10 +2496,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
2419 | error: | 2496 | error: |
2420 | if (is_global_default_ctx) | 2497 | if (is_global_default_ctx) |
2421 | intel_unpin_ringbuffer_obj(ringbuf); | 2498 | intel_unpin_ringbuffer_obj(ringbuf); |
2422 | error_destroy_rbuf: | 2499 | error_ringbuf: |
2423 | intel_destroy_ringbuffer_obj(ringbuf); | 2500 | intel_ringbuffer_free(ringbuf); |
2424 | error_free_rbuf: | ||
2425 | kfree(ringbuf); | ||
2426 | error_unpin_ctx: | 2501 | error_unpin_ctx: |
2427 | if (is_global_default_ctx) | 2502 | if (is_global_default_ctx) |
2428 | i915_gem_object_ggtt_unpin(ctx_obj); | 2503 | i915_gem_object_ggtt_unpin(ctx_obj); |
@@ -2452,7 +2527,7 @@ void intel_lr_context_reset(struct drm_device *dev, | |||
2452 | WARN(1, "Failed get_pages for context obj\n"); | 2527 | WARN(1, "Failed get_pages for context obj\n"); |
2453 | continue; | 2528 | continue; |
2454 | } | 2529 | } |
2455 | page = i915_gem_object_get_page(ctx_obj, 1); | 2530 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
2456 | reg_state = kmap_atomic(page); | 2531 | reg_state = kmap_atomic(page); |
2457 | 2532 | ||
2458 | reg_state[CTX_RING_HEAD+1] = 0; | 2533 | reg_state[CTX_RING_HEAD+1] = 0; |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..4cc54b371afd 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
@@ -68,12 +68,20 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, | |||
68 | } | 68 | } |
69 | 69 | ||
70 | /* Logical Ring Contexts */ | 70 | /* Logical Ring Contexts */ |
71 | |||
72 | /* One extra page is added before LRC for GuC as shared data */ | ||
73 | #define LRC_GUCSHR_PN (0) | ||
74 | #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) | ||
75 | #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) | ||
76 | |||
71 | void intel_lr_context_free(struct intel_context *ctx); | 77 | void intel_lr_context_free(struct intel_context *ctx); |
72 | int intel_lr_context_deferred_create(struct intel_context *ctx, | 78 | int intel_lr_context_deferred_create(struct intel_context *ctx, |
73 | struct intel_engine_cs *ring); | 79 | struct intel_engine_cs *ring); |
74 | void intel_lr_context_unpin(struct drm_i915_gem_request *req); | 80 | void intel_lr_context_unpin(struct drm_i915_gem_request *req); |
75 | void intel_lr_context_reset(struct drm_device *dev, | 81 | void intel_lr_context_reset(struct drm_device *dev, |
76 | struct intel_context *ctx); | 82 | struct intel_context *ctx); |
83 | uint64_t intel_lr_context_descriptor(struct intel_context *ctx, | ||
84 | struct intel_engine_cs *ring); | ||
77 | 85 | ||
78 | /* Execlists */ | 86 | /* Execlists */ |
79 | int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); | 87 | int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 881b5d13592e..2c2d1f0737c8 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -289,11 +289,14 @@ intel_lvds_mode_valid(struct drm_connector *connector, | |||
289 | { | 289 | { |
290 | struct intel_connector *intel_connector = to_intel_connector(connector); | 290 | struct intel_connector *intel_connector = to_intel_connector(connector); |
291 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 291 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
292 | int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; | ||
292 | 293 | ||
293 | if (mode->hdisplay > fixed_mode->hdisplay) | 294 | if (mode->hdisplay > fixed_mode->hdisplay) |
294 | return MODE_PANEL; | 295 | return MODE_PANEL; |
295 | if (mode->vdisplay > fixed_mode->vdisplay) | 296 | if (mode->vdisplay > fixed_mode->vdisplay) |
296 | return MODE_PANEL; | 297 | return MODE_PANEL; |
298 | if (fixed_mode->clock > max_pixclk) | ||
299 | return MODE_CLOCK_HIGH; | ||
297 | 300 | ||
298 | return MODE_OK; | 301 | return MODE_OK; |
299 | } | 302 | } |
@@ -952,7 +955,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
952 | if (HAS_PCH_SPLIT(dev)) { | 955 | if (HAS_PCH_SPLIT(dev)) { |
953 | I915_WRITE(PCH_PP_CONTROL, | 956 | I915_WRITE(PCH_PP_CONTROL, |
954 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | 957 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); |
955 | } else { | 958 | } else if (INTEL_INFO(dev_priv)->gen < 5) { |
956 | I915_WRITE(PP_CONTROL, | 959 | I915_WRITE(PP_CONTROL, |
957 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | 960 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
958 | } | 961 | } |
@@ -982,6 +985,18 @@ void intel_lvds_init(struct drm_device *dev) | |||
982 | DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n"); | 985 | DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n"); |
983 | } | 986 | } |
984 | 987 | ||
988 | /* Set the Panel Power On/Off timings if uninitialized. */ | ||
989 | if (INTEL_INFO(dev_priv)->gen < 5 && | ||
990 | I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { | ||
991 | /* Set T2 to 40ms and T5 to 200ms */ | ||
992 | I915_WRITE(PP_ON_DELAYS, 0x019007d0); | ||
993 | |||
994 | /* Set T3 to 35ms and Tx to 200ms */ | ||
995 | I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); | ||
996 | |||
997 | DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n"); | ||
998 | } | ||
999 | |||
985 | lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); | 1000 | lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); |
986 | if (!lvds_encoder) | 1001 | if (!lvds_encoder) |
987 | return; | 1002 | return; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e2ab3f6ed022..2034438a0664 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -484,7 +484,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, | |||
484 | return val; | 484 | return val; |
485 | } | 485 | } |
486 | 486 | ||
487 | static u32 bdw_get_backlight(struct intel_connector *connector) | 487 | static u32 lpt_get_backlight(struct intel_connector *connector) |
488 | { | 488 | { |
489 | struct drm_device *dev = connector->base.dev; | 489 | struct drm_device *dev = connector->base.dev; |
490 | struct drm_i915_private *dev_priv = dev->dev_private; | 490 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -576,7 +576,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) | |||
576 | return val; | 576 | return val; |
577 | } | 577 | } |
578 | 578 | ||
579 | static void bdw_set_backlight(struct intel_connector *connector, u32 level) | 579 | static void lpt_set_backlight(struct intel_connector *connector, u32 level) |
580 | { | 580 | { |
581 | struct drm_device *dev = connector->base.dev; | 581 | struct drm_device *dev = connector->base.dev; |
582 | struct drm_i915_private *dev_priv = dev->dev_private; | 582 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -729,6 +729,18 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector, | |||
729 | mutex_unlock(&dev_priv->backlight_lock); | 729 | mutex_unlock(&dev_priv->backlight_lock); |
730 | } | 730 | } |
731 | 731 | ||
732 | static void lpt_disable_backlight(struct intel_connector *connector) | ||
733 | { | ||
734 | struct drm_device *dev = connector->base.dev; | ||
735 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
736 | u32 tmp; | ||
737 | |||
738 | intel_panel_actually_set_backlight(connector, 0); | ||
739 | |||
740 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | ||
741 | I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); | ||
742 | } | ||
743 | |||
732 | static void pch_disable_backlight(struct intel_connector *connector) | 744 | static void pch_disable_backlight(struct intel_connector *connector) |
733 | { | 745 | { |
734 | struct drm_device *dev = connector->base.dev; | 746 | struct drm_device *dev = connector->base.dev; |
@@ -829,7 +841,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector) | |||
829 | mutex_unlock(&dev_priv->backlight_lock); | 841 | mutex_unlock(&dev_priv->backlight_lock); |
830 | } | 842 | } |
831 | 843 | ||
832 | static void bdw_enable_backlight(struct intel_connector *connector) | 844 | static void lpt_enable_backlight(struct intel_connector *connector) |
833 | { | 845 | { |
834 | struct drm_device *dev = connector->base.dev; | 846 | struct drm_device *dev = connector->base.dev; |
835 | struct drm_i915_private *dev_priv = dev->dev_private; | 847 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1212,10 +1224,149 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) | |||
1212 | #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ | 1224 | #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ |
1213 | 1225 | ||
1214 | /* | 1226 | /* |
1215 | * Note: The setup hooks can't assume pipe is set! | 1227 | * SPT: This value represents the period of the PWM stream in clock periods |
1228 | * multiplied by 16 (default increment) or 128 (alternate increment selected in | ||
1229 | * SCHICKEN_1 bit 0). PWM clock is 24 MHz. | ||
1230 | */ | ||
1231 | static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | ||
1232 | { | ||
1233 | struct drm_device *dev = connector->base.dev; | ||
1234 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1235 | u32 mul, clock; | ||
1236 | |||
1237 | if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) | ||
1238 | mul = 128; | ||
1239 | else | ||
1240 | mul = 16; | ||
1241 | |||
1242 | clock = MHz(24); | ||
1243 | |||
1244 | return clock / (pwm_freq_hz * mul); | ||
1245 | } | ||
1246 | |||
1247 | /* | ||
1248 | * LPT: This value represents the period of the PWM stream in clock periods | ||
1249 | * multiplied by 128 (default increment) or 16 (alternate increment, selected in | ||
1250 | * LPT SOUTH_CHICKEN2 register bit 5). | ||
1251 | */ | ||
1252 | static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | ||
1253 | { | ||
1254 | struct drm_device *dev = connector->base.dev; | ||
1255 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1256 | u32 mul, clock; | ||
1257 | |||
1258 | if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY) | ||
1259 | mul = 16; | ||
1260 | else | ||
1261 | mul = 128; | ||
1262 | |||
1263 | if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) | ||
1264 | clock = MHz(135); /* LPT:H */ | ||
1265 | else | ||
1266 | clock = MHz(24); /* LPT:LP */ | ||
1267 | |||
1268 | return clock / (pwm_freq_hz * mul); | ||
1269 | } | ||
1270 | |||
1271 | /* | ||
1272 | * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH | ||
1273 | * display raw clocks multiplied by 128. | ||
1274 | */ | ||
1275 | static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | ||
1276 | { | ||
1277 | struct drm_device *dev = connector->base.dev; | ||
1278 | int clock = MHz(intel_pch_rawclk(dev)); | ||
1279 | |||
1280 | return clock / (pwm_freq_hz * 128); | ||
1281 | } | ||
1282 | |||
1283 | /* | ||
1284 | * Gen2: This field determines the number of time base events (display core | ||
1285 | * clock frequency/32) in total for a complete cycle of modulated backlight | ||
1286 | * control. | ||
1216 | * | 1287 | * |
1217 | * XXX: Query mode clock or hardware clock and program PWM modulation frequency | 1288 | * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock) |
1218 | * appropriately when it's 0. Use VBT and/or sane defaults. | 1289 | * divided by 32. |
1290 | */ | ||
1291 | static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | ||
1292 | { | ||
1293 | struct drm_device *dev = connector->base.dev; | ||
1294 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1295 | int clock; | ||
1296 | |||
1297 | if (IS_PINEVIEW(dev)) | ||
1298 | clock = intel_hrawclk(dev); | ||
1299 | else | ||
1300 | clock = 1000 * dev_priv->display.get_display_clock_speed(dev); | ||
1301 | |||
1302 | return clock / (pwm_freq_hz * 32); | ||
1303 | } | ||
1304 | |||
1305 | /* | ||
1306 | * Gen4: This value represents the period of the PWM stream in display core | ||
1307 | * clocks multiplied by 128. | ||
1308 | */ | ||
1309 | static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | ||
1310 | { | ||
1311 | struct drm_device *dev = connector->base.dev; | ||
1312 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1313 | int clock = 1000 * dev_priv->display.get_display_clock_speed(dev); | ||
1314 | |||
1315 | return clock / (pwm_freq_hz * 128); | ||
1316 | } | ||
1317 | |||
1318 | /* | ||
1319 | * VLV: This value represents the period of the PWM stream in display core | ||
1320 | * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks | ||
1321 | * multiplied by 16. CHV uses a 19.2MHz S0IX clock. | ||
1322 | */ | ||
1323 | static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | ||
1324 | { | ||
1325 | struct drm_device *dev = connector->base.dev; | ||
1326 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1327 | int clock; | ||
1328 | |||
1329 | if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) { | ||
1330 | if (IS_CHERRYVIEW(dev)) | ||
1331 | return KHz(19200) / (pwm_freq_hz * 16); | ||
1332 | else | ||
1333 | return MHz(25) / (pwm_freq_hz * 16); | ||
1334 | } else { | ||
1335 | clock = intel_hrawclk(dev); | ||
1336 | return MHz(clock) / (pwm_freq_hz * 128); | ||
1337 | } | ||
1338 | } | ||
1339 | |||
1340 | static u32 get_backlight_max_vbt(struct intel_connector *connector) | ||
1341 | { | ||
1342 | struct drm_device *dev = connector->base.dev; | ||
1343 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1344 | u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; | ||
1345 | u32 pwm; | ||
1346 | |||
1347 | if (!pwm_freq_hz) { | ||
1348 | DRM_DEBUG_KMS("backlight frequency not specified in VBT\n"); | ||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1352 | if (!dev_priv->display.backlight_hz_to_pwm) { | ||
1353 | DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n"); | ||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | pwm = dev_priv->display.backlight_hz_to_pwm(connector, pwm_freq_hz); | ||
1358 | if (!pwm) { | ||
1359 | DRM_DEBUG_KMS("backlight frequency conversion failed\n"); | ||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1363 | DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz); | ||
1364 | |||
1365 | return pwm; | ||
1366 | } | ||
1367 | |||
1368 | /* | ||
1369 | * Note: The setup hooks can't assume pipe is set! | ||
1219 | */ | 1370 | */ |
1220 | static u32 get_backlight_min_vbt(struct intel_connector *connector) | 1371 | static u32 get_backlight_min_vbt(struct intel_connector *connector) |
1221 | { | 1372 | { |
@@ -1243,7 +1394,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) | |||
1243 | return scale(min, 0, 255, 0, panel->backlight.max); | 1394 | return scale(min, 0, 255, 0, panel->backlight.max); |
1244 | } | 1395 | } |
1245 | 1396 | ||
1246 | static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused) | 1397 | static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) |
1247 | { | 1398 | { |
1248 | struct drm_device *dev = connector->base.dev; | 1399 | struct drm_device *dev = connector->base.dev; |
1249 | struct drm_i915_private *dev_priv = dev->dev_private; | 1400 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1255,12 +1406,16 @@ static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unus | |||
1255 | 1406 | ||
1256 | pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); | 1407 | pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); |
1257 | panel->backlight.max = pch_ctl2 >> 16; | 1408 | panel->backlight.max = pch_ctl2 >> 16; |
1409 | |||
1410 | if (!panel->backlight.max) | ||
1411 | panel->backlight.max = get_backlight_max_vbt(connector); | ||
1412 | |||
1258 | if (!panel->backlight.max) | 1413 | if (!panel->backlight.max) |
1259 | return -ENODEV; | 1414 | return -ENODEV; |
1260 | 1415 | ||
1261 | panel->backlight.min = get_backlight_min_vbt(connector); | 1416 | panel->backlight.min = get_backlight_min_vbt(connector); |
1262 | 1417 | ||
1263 | val = bdw_get_backlight(connector); | 1418 | val = lpt_get_backlight(connector); |
1264 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | 1419 | panel->backlight.level = intel_panel_compute_brightness(connector, val); |
1265 | 1420 | ||
1266 | panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) && | 1421 | panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) && |
@@ -1281,6 +1436,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus | |||
1281 | 1436 | ||
1282 | pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); | 1437 | pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); |
1283 | panel->backlight.max = pch_ctl2 >> 16; | 1438 | panel->backlight.max = pch_ctl2 >> 16; |
1439 | |||
1440 | if (!panel->backlight.max) | ||
1441 | panel->backlight.max = get_backlight_max_vbt(connector); | ||
1442 | |||
1284 | if (!panel->backlight.max) | 1443 | if (!panel->backlight.max) |
1285 | return -ENODEV; | 1444 | return -ENODEV; |
1286 | 1445 | ||
@@ -1312,12 +1471,18 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu | |||
1312 | panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; | 1471 | panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; |
1313 | 1472 | ||
1314 | panel->backlight.max = ctl >> 17; | 1473 | panel->backlight.max = ctl >> 17; |
1315 | if (panel->backlight.combination_mode) | 1474 | |
1316 | panel->backlight.max *= 0xff; | 1475 | if (!panel->backlight.max) { |
1476 | panel->backlight.max = get_backlight_max_vbt(connector); | ||
1477 | panel->backlight.max >>= 1; | ||
1478 | } | ||
1317 | 1479 | ||
1318 | if (!panel->backlight.max) | 1480 | if (!panel->backlight.max) |
1319 | return -ENODEV; | 1481 | return -ENODEV; |
1320 | 1482 | ||
1483 | if (panel->backlight.combination_mode) | ||
1484 | panel->backlight.max *= 0xff; | ||
1485 | |||
1321 | panel->backlight.min = get_backlight_min_vbt(connector); | 1486 | panel->backlight.min = get_backlight_min_vbt(connector); |
1322 | 1487 | ||
1323 | val = i9xx_get_backlight(connector); | 1488 | val = i9xx_get_backlight(connector); |
@@ -1341,12 +1506,16 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu | |||
1341 | 1506 | ||
1342 | ctl = I915_READ(BLC_PWM_CTL); | 1507 | ctl = I915_READ(BLC_PWM_CTL); |
1343 | panel->backlight.max = ctl >> 16; | 1508 | panel->backlight.max = ctl >> 16; |
1344 | if (panel->backlight.combination_mode) | 1509 | |
1345 | panel->backlight.max *= 0xff; | 1510 | if (!panel->backlight.max) |
1511 | panel->backlight.max = get_backlight_max_vbt(connector); | ||
1346 | 1512 | ||
1347 | if (!panel->backlight.max) | 1513 | if (!panel->backlight.max) |
1348 | return -ENODEV; | 1514 | return -ENODEV; |
1349 | 1515 | ||
1516 | if (panel->backlight.combination_mode) | ||
1517 | panel->backlight.max *= 0xff; | ||
1518 | |||
1350 | panel->backlight.min = get_backlight_min_vbt(connector); | 1519 | panel->backlight.min = get_backlight_min_vbt(connector); |
1351 | 1520 | ||
1352 | val = i9xx_get_backlight(connector); | 1521 | val = i9xx_get_backlight(connector); |
@@ -1363,21 +1532,8 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe | |||
1363 | struct drm_device *dev = connector->base.dev; | 1532 | struct drm_device *dev = connector->base.dev; |
1364 | struct drm_i915_private *dev_priv = dev->dev_private; | 1533 | struct drm_i915_private *dev_priv = dev->dev_private; |
1365 | struct intel_panel *panel = &connector->panel; | 1534 | struct intel_panel *panel = &connector->panel; |
1366 | enum pipe p; | ||
1367 | u32 ctl, ctl2, val; | 1535 | u32 ctl, ctl2, val; |
1368 | 1536 | ||
1369 | for_each_pipe(dev_priv, p) { | ||
1370 | u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p)); | ||
1371 | |||
1372 | /* Skip if the modulation freq is already set */ | ||
1373 | if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK) | ||
1374 | continue; | ||
1375 | |||
1376 | cur_val &= BACKLIGHT_DUTY_CYCLE_MASK; | ||
1377 | I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) | | ||
1378 | cur_val); | ||
1379 | } | ||
1380 | |||
1381 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) | 1537 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) |
1382 | return -ENODEV; | 1538 | return -ENODEV; |
1383 | 1539 | ||
@@ -1386,6 +1542,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe | |||
1386 | 1542 | ||
1387 | ctl = I915_READ(VLV_BLC_PWM_CTL(pipe)); | 1543 | ctl = I915_READ(VLV_BLC_PWM_CTL(pipe)); |
1388 | panel->backlight.max = ctl >> 16; | 1544 | panel->backlight.max = ctl >> 16; |
1545 | |||
1546 | if (!panel->backlight.max) | ||
1547 | panel->backlight.max = get_backlight_max_vbt(connector); | ||
1548 | |||
1389 | if (!panel->backlight.max) | 1549 | if (!panel->backlight.max) |
1390 | return -ENODEV; | 1550 | return -ENODEV; |
1391 | 1551 | ||
@@ -1412,6 +1572,10 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) | |||
1412 | panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; | 1572 | panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; |
1413 | 1573 | ||
1414 | panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1); | 1574 | panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1); |
1575 | |||
1576 | if (!panel->backlight.max) | ||
1577 | panel->backlight.max = get_backlight_max_vbt(connector); | ||
1578 | |||
1415 | if (!panel->backlight.max) | 1579 | if (!panel->backlight.max) |
1416 | return -ENODEV; | 1580 | return -ENODEV; |
1417 | 1581 | ||
@@ -1519,18 +1683,23 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) | |||
1519 | dev_priv->display.disable_backlight = bxt_disable_backlight; | 1683 | dev_priv->display.disable_backlight = bxt_disable_backlight; |
1520 | dev_priv->display.set_backlight = bxt_set_backlight; | 1684 | dev_priv->display.set_backlight = bxt_set_backlight; |
1521 | dev_priv->display.get_backlight = bxt_get_backlight; | 1685 | dev_priv->display.get_backlight = bxt_get_backlight; |
1522 | } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev)) { | 1686 | } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) { |
1523 | dev_priv->display.setup_backlight = bdw_setup_backlight; | 1687 | dev_priv->display.setup_backlight = lpt_setup_backlight; |
1524 | dev_priv->display.enable_backlight = bdw_enable_backlight; | 1688 | dev_priv->display.enable_backlight = lpt_enable_backlight; |
1525 | dev_priv->display.disable_backlight = pch_disable_backlight; | 1689 | dev_priv->display.disable_backlight = lpt_disable_backlight; |
1526 | dev_priv->display.set_backlight = bdw_set_backlight; | 1690 | dev_priv->display.set_backlight = lpt_set_backlight; |
1527 | dev_priv->display.get_backlight = bdw_get_backlight; | 1691 | dev_priv->display.get_backlight = lpt_get_backlight; |
1692 | if (HAS_PCH_LPT(dev)) | ||
1693 | dev_priv->display.backlight_hz_to_pwm = lpt_hz_to_pwm; | ||
1694 | else | ||
1695 | dev_priv->display.backlight_hz_to_pwm = spt_hz_to_pwm; | ||
1528 | } else if (HAS_PCH_SPLIT(dev)) { | 1696 | } else if (HAS_PCH_SPLIT(dev)) { |
1529 | dev_priv->display.setup_backlight = pch_setup_backlight; | 1697 | dev_priv->display.setup_backlight = pch_setup_backlight; |
1530 | dev_priv->display.enable_backlight = pch_enable_backlight; | 1698 | dev_priv->display.enable_backlight = pch_enable_backlight; |
1531 | dev_priv->display.disable_backlight = pch_disable_backlight; | 1699 | dev_priv->display.disable_backlight = pch_disable_backlight; |
1532 | dev_priv->display.set_backlight = pch_set_backlight; | 1700 | dev_priv->display.set_backlight = pch_set_backlight; |
1533 | dev_priv->display.get_backlight = pch_get_backlight; | 1701 | dev_priv->display.get_backlight = pch_get_backlight; |
1702 | dev_priv->display.backlight_hz_to_pwm = pch_hz_to_pwm; | ||
1534 | } else if (IS_VALLEYVIEW(dev)) { | 1703 | } else if (IS_VALLEYVIEW(dev)) { |
1535 | if (dev_priv->vbt.has_mipi) { | 1704 | if (dev_priv->vbt.has_mipi) { |
1536 | dev_priv->display.setup_backlight = pwm_setup_backlight; | 1705 | dev_priv->display.setup_backlight = pwm_setup_backlight; |
@@ -1544,6 +1713,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) | |||
1544 | dev_priv->display.disable_backlight = vlv_disable_backlight; | 1713 | dev_priv->display.disable_backlight = vlv_disable_backlight; |
1545 | dev_priv->display.set_backlight = vlv_set_backlight; | 1714 | dev_priv->display.set_backlight = vlv_set_backlight; |
1546 | dev_priv->display.get_backlight = vlv_get_backlight; | 1715 | dev_priv->display.get_backlight = vlv_get_backlight; |
1716 | dev_priv->display.backlight_hz_to_pwm = vlv_hz_to_pwm; | ||
1547 | } | 1717 | } |
1548 | } else if (IS_GEN4(dev)) { | 1718 | } else if (IS_GEN4(dev)) { |
1549 | dev_priv->display.setup_backlight = i965_setup_backlight; | 1719 | dev_priv->display.setup_backlight = i965_setup_backlight; |
@@ -1551,12 +1721,14 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) | |||
1551 | dev_priv->display.disable_backlight = i965_disable_backlight; | 1721 | dev_priv->display.disable_backlight = i965_disable_backlight; |
1552 | dev_priv->display.set_backlight = i9xx_set_backlight; | 1722 | dev_priv->display.set_backlight = i9xx_set_backlight; |
1553 | dev_priv->display.get_backlight = i9xx_get_backlight; | 1723 | dev_priv->display.get_backlight = i9xx_get_backlight; |
1724 | dev_priv->display.backlight_hz_to_pwm = i965_hz_to_pwm; | ||
1554 | } else { | 1725 | } else { |
1555 | dev_priv->display.setup_backlight = i9xx_setup_backlight; | 1726 | dev_priv->display.setup_backlight = i9xx_setup_backlight; |
1556 | dev_priv->display.enable_backlight = i9xx_enable_backlight; | 1727 | dev_priv->display.enable_backlight = i9xx_enable_backlight; |
1557 | dev_priv->display.disable_backlight = i9xx_disable_backlight; | 1728 | dev_priv->display.disable_backlight = i9xx_disable_backlight; |
1558 | dev_priv->display.set_backlight = i9xx_set_backlight; | 1729 | dev_priv->display.set_backlight = i9xx_set_backlight; |
1559 | dev_priv->display.get_backlight = i9xx_get_backlight; | 1730 | dev_priv->display.get_backlight = i9xx_get_backlight; |
1731 | dev_priv->display.backlight_hz_to_pwm = i9xx_hz_to_pwm; | ||
1560 | } | 1732 | } |
1561 | } | 1733 | } |
1562 | 1734 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ddbb7ed0a193..1f6b5bbc74ef 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -116,18 +116,24 @@ static void bxt_init_clock_gating(struct drm_device *dev) | |||
116 | 116 | ||
117 | gen9_init_clock_gating(dev); | 117 | gen9_init_clock_gating(dev); |
118 | 118 | ||
119 | /* WaDisableSDEUnitClockGating:bxt */ | ||
120 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | ||
121 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | ||
122 | |||
119 | /* | 123 | /* |
120 | * FIXME: | 124 | * FIXME: |
121 | * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only. | ||
122 | * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. | 125 | * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. |
123 | */ | 126 | */ |
124 | /* WaDisableSDEUnitClockGating:bxt */ | ||
125 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | 127 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | |
126 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE | | ||
127 | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); | 128 | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); |
128 | 129 | ||
129 | /* FIXME: apply on A0 only */ | 130 | if (INTEL_REVID(dev) == BXT_REVID_A0) { |
130 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); | 131 | /* |
132 | * Hardware specification requires this bit to be | ||
133 | * set to 1 for A0 | ||
134 | */ | ||
135 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); | ||
136 | } | ||
131 | } | 137 | } |
132 | 138 | ||
133 | static void i915_pineview_get_mem_freq(struct drm_device *dev) | 139 | static void i915_pineview_get_mem_freq(struct drm_device *dev) |
@@ -3166,7 +3172,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, | |||
3166 | if (fb) { | 3172 | if (fb) { |
3167 | p->plane[0].enabled = true; | 3173 | p->plane[0].enabled = true; |
3168 | p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? | 3174 | p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? |
3169 | drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8; | 3175 | drm_format_plane_cpp(fb->pixel_format, 1) : |
3176 | drm_format_plane_cpp(fb->pixel_format, 0); | ||
3170 | p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? | 3177 | p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? |
3171 | drm_format_plane_cpp(fb->pixel_format, 0) : 0; | 3178 | drm_format_plane_cpp(fb->pixel_format, 0) : 0; |
3172 | p->plane[0].tiling = fb->modifier[0]; | 3179 | p->plane[0].tiling = fb->modifier[0]; |
@@ -5565,7 +5572,7 @@ static void cherryview_enable_rps(struct drm_device *dev) | |||
5565 | /* RPS code assumes GPLL is used */ | 5572 | /* RPS code assumes GPLL is used */ |
5566 | WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); | 5573 | WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); |
5567 | 5574 | ||
5568 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); | 5575 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); |
5569 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | 5576 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); |
5570 | 5577 | ||
5571 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; | 5578 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; |
@@ -5655,7 +5662,7 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
5655 | /* RPS code assumes GPLL is used */ | 5662 | /* RPS code assumes GPLL is used */ |
5656 | WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); | 5663 | WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); |
5657 | 5664 | ||
5658 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); | 5665 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); |
5659 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | 5666 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); |
5660 | 5667 | ||
5661 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; | 5668 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; |
@@ -6604,7 +6611,7 @@ static void lpt_init_clock_gating(struct drm_device *dev) | |||
6604 | * TODO: this bit should only be enabled when really needed, then | 6611 | * TODO: this bit should only be enabled when really needed, then |
6605 | * disabled when not needed anymore in order to save power. | 6612 | * disabled when not needed anymore in order to save power. |
6606 | */ | 6613 | */ |
6607 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) | 6614 | if (HAS_PCH_LPT_LP(dev)) |
6608 | I915_WRITE(SOUTH_DSPCLK_GATE_D, | 6615 | I915_WRITE(SOUTH_DSPCLK_GATE_D, |
6609 | I915_READ(SOUTH_DSPCLK_GATE_D) | | 6616 | I915_READ(SOUTH_DSPCLK_GATE_D) | |
6610 | PCH_LP_PARTITION_LEVEL_DISABLE); | 6617 | PCH_LP_PARTITION_LEVEL_DISABLE); |
@@ -6619,7 +6626,7 @@ static void lpt_suspend_hw(struct drm_device *dev) | |||
6619 | { | 6626 | { |
6620 | struct drm_i915_private *dev_priv = dev->dev_private; | 6627 | struct drm_i915_private *dev_priv = dev->dev_private; |
6621 | 6628 | ||
6622 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 6629 | if (HAS_PCH_LPT_LP(dev)) { |
6623 | uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); | 6630 | uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); |
6624 | 6631 | ||
6625 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; | 6632 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e6b8db996ef..20a75bb516ac 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | |||
1996 | return 0; | 1996 | return 0; |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | 1999 | static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) |
2000 | { | 2000 | { |
2001 | drm_gem_object_unreference(&ringbuf->obj->base); | 2001 | drm_gem_object_unreference(&ringbuf->obj->base); |
2002 | ringbuf->obj = NULL; | 2002 | ringbuf->obj = NULL; |
2003 | } | 2003 | } |
2004 | 2004 | ||
2005 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, | 2005 | static int intel_alloc_ringbuffer_obj(struct drm_device *dev, |
2006 | struct intel_ringbuffer *ringbuf) | 2006 | struct intel_ringbuffer *ringbuf) |
2007 | { | 2007 | { |
2008 | struct drm_i915_gem_object *obj; | 2008 | struct drm_i915_gem_object *obj; |
2009 | 2009 | ||
@@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev, | |||
2023 | return 0; | 2023 | return 0; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | struct intel_ringbuffer * | ||
2027 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) | ||
2028 | { | ||
2029 | struct intel_ringbuffer *ring; | ||
2030 | int ret; | ||
2031 | |||
2032 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||
2033 | if (ring == NULL) | ||
2034 | return ERR_PTR(-ENOMEM); | ||
2035 | |||
2036 | ring->ring = engine; | ||
2037 | |||
2038 | ring->size = size; | ||
2039 | /* Workaround an erratum on the i830 which causes a hang if | ||
2040 | * the TAIL pointer points to within the last 2 cachelines | ||
2041 | * of the buffer. | ||
2042 | */ | ||
2043 | ring->effective_size = size; | ||
2044 | if (IS_I830(engine->dev) || IS_845G(engine->dev)) | ||
2045 | ring->effective_size -= 2 * CACHELINE_BYTES; | ||
2046 | |||
2047 | ring->last_retired_head = -1; | ||
2048 | intel_ring_update_space(ring); | ||
2049 | |||
2050 | ret = intel_alloc_ringbuffer_obj(engine->dev, ring); | ||
2051 | if (ret) { | ||
2052 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", | ||
2053 | engine->name, ret); | ||
2054 | kfree(ring); | ||
2055 | return ERR_PTR(ret); | ||
2056 | } | ||
2057 | |||
2058 | return ring; | ||
2059 | } | ||
2060 | |||
2061 | void | ||
2062 | intel_ringbuffer_free(struct intel_ringbuffer *ring) | ||
2063 | { | ||
2064 | intel_destroy_ringbuffer_obj(ring); | ||
2065 | kfree(ring); | ||
2066 | } | ||
2067 | |||
2026 | static int intel_init_ring_buffer(struct drm_device *dev, | 2068 | static int intel_init_ring_buffer(struct drm_device *dev, |
2027 | struct intel_engine_cs *ring) | 2069 | struct intel_engine_cs *ring) |
2028 | { | 2070 | { |
@@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2031 | 2073 | ||
2032 | WARN_ON(ring->buffer); | 2074 | WARN_ON(ring->buffer); |
2033 | 2075 | ||
2034 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); | ||
2035 | if (!ringbuf) | ||
2036 | return -ENOMEM; | ||
2037 | ring->buffer = ringbuf; | ||
2038 | |||
2039 | ring->dev = dev; | 2076 | ring->dev = dev; |
2040 | INIT_LIST_HEAD(&ring->active_list); | 2077 | INIT_LIST_HEAD(&ring->active_list); |
2041 | INIT_LIST_HEAD(&ring->request_list); | 2078 | INIT_LIST_HEAD(&ring->request_list); |
2042 | INIT_LIST_HEAD(&ring->execlist_queue); | 2079 | INIT_LIST_HEAD(&ring->execlist_queue); |
2043 | i915_gem_batch_pool_init(dev, &ring->batch_pool); | 2080 | i915_gem_batch_pool_init(dev, &ring->batch_pool); |
2044 | ringbuf->size = 32 * PAGE_SIZE; | ||
2045 | ringbuf->ring = ring; | ||
2046 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); | 2081 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); |
2047 | 2082 | ||
2048 | init_waitqueue_head(&ring->irq_queue); | 2083 | init_waitqueue_head(&ring->irq_queue); |
2049 | 2084 | ||
2085 | ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); | ||
2086 | if (IS_ERR(ringbuf)) | ||
2087 | return PTR_ERR(ringbuf); | ||
2088 | ring->buffer = ringbuf; | ||
2089 | |||
2050 | if (I915_NEED_GFX_HWS(dev)) { | 2090 | if (I915_NEED_GFX_HWS(dev)) { |
2051 | ret = init_status_page(ring); | 2091 | ret = init_status_page(ring); |
2052 | if (ret) | 2092 | if (ret) |
@@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2058 | goto error; | 2098 | goto error; |
2059 | } | 2099 | } |
2060 | 2100 | ||
2061 | WARN_ON(ringbuf->obj); | ||
2062 | |||
2063 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | ||
2064 | if (ret) { | ||
2065 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", | ||
2066 | ring->name, ret); | ||
2067 | goto error; | ||
2068 | } | ||
2069 | |||
2070 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | 2101 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); |
2071 | if (ret) { | 2102 | if (ret) { |
2072 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", | 2103 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", |
@@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2075 | goto error; | 2106 | goto error; |
2076 | } | 2107 | } |
2077 | 2108 | ||
2078 | /* Workaround an erratum on the i830 which causes a hang if | ||
2079 | * the TAIL pointer points to within the last 2 cachelines | ||
2080 | * of the buffer. | ||
2081 | */ | ||
2082 | ringbuf->effective_size = ringbuf->size; | ||
2083 | if (IS_I830(dev) || IS_845G(dev)) | ||
2084 | ringbuf->effective_size -= 2 * CACHELINE_BYTES; | ||
2085 | |||
2086 | ret = i915_cmd_parser_init_ring(ring); | 2109 | ret = i915_cmd_parser_init_ring(ring); |
2087 | if (ret) | 2110 | if (ret) |
2088 | goto error; | 2111 | goto error; |
@@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2090 | return 0; | 2113 | return 0; |
2091 | 2114 | ||
2092 | error: | 2115 | error: |
2093 | kfree(ringbuf); | 2116 | intel_ringbuffer_free(ringbuf); |
2094 | ring->buffer = NULL; | 2117 | ring->buffer = NULL; |
2095 | return ret; | 2118 | return ret; |
2096 | } | 2119 | } |
@@ -2098,19 +2121,18 @@ error: | |||
2098 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | 2121 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) |
2099 | { | 2122 | { |
2100 | struct drm_i915_private *dev_priv; | 2123 | struct drm_i915_private *dev_priv; |
2101 | struct intel_ringbuffer *ringbuf; | ||
2102 | 2124 | ||
2103 | if (!intel_ring_initialized(ring)) | 2125 | if (!intel_ring_initialized(ring)) |
2104 | return; | 2126 | return; |
2105 | 2127 | ||
2106 | dev_priv = to_i915(ring->dev); | 2128 | dev_priv = to_i915(ring->dev); |
2107 | ringbuf = ring->buffer; | ||
2108 | 2129 | ||
2109 | intel_stop_ring_buffer(ring); | 2130 | intel_stop_ring_buffer(ring); |
2110 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); | 2131 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); |
2111 | 2132 | ||
2112 | intel_unpin_ringbuffer_obj(ringbuf); | 2133 | intel_unpin_ringbuffer_obj(ring->buffer); |
2113 | intel_destroy_ringbuffer_obj(ringbuf); | 2134 | intel_ringbuffer_free(ring->buffer); |
2135 | ring->buffer = NULL; | ||
2114 | 2136 | ||
2115 | if (ring->cleanup) | 2137 | if (ring->cleanup) |
2116 | ring->cleanup(ring); | 2138 | ring->cleanup(ring); |
@@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
2119 | 2141 | ||
2120 | i915_cmd_parser_fini_ring(ring); | 2142 | i915_cmd_parser_fini_ring(ring); |
2121 | i915_gem_batch_pool_fini(&ring->batch_pool); | 2143 | i915_gem_batch_pool_fini(&ring->batch_pool); |
2122 | |||
2123 | kfree(ringbuf); | ||
2124 | ring->buffer = NULL; | ||
2125 | } | 2144 | } |
2126 | 2145 | ||
2127 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | 2146 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2e85fda94963..49fa41dc0eb6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -377,6 +377,13 @@ intel_ring_sync_index(struct intel_engine_cs *ring, | |||
377 | return idx; | 377 | return idx; |
378 | } | 378 | } |
379 | 379 | ||
380 | static inline void | ||
381 | intel_flush_status_page(struct intel_engine_cs *ring, int reg) | ||
382 | { | ||
383 | drm_clflush_virt_range(&ring->status_page.page_addr[reg], | ||
384 | sizeof(uint32_t)); | ||
385 | } | ||
386 | |||
380 | static inline u32 | 387 | static inline u32 |
381 | intel_read_status_page(struct intel_engine_cs *ring, | 388 | intel_read_status_page(struct intel_engine_cs *ring, |
382 | int reg) | 389 | int reg) |
@@ -413,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs *ring, | |||
413 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 | 420 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
414 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | 421 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
415 | 422 | ||
416 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); | 423 | struct intel_ringbuffer * |
424 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); | ||
417 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | 425 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
418 | struct intel_ringbuffer *ringbuf); | 426 | struct intel_ringbuffer *ringbuf); |
419 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); | 427 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
420 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, | 428 | void intel_ringbuffer_free(struct intel_ringbuffer *ring); |
421 | struct intel_ringbuffer *ringbuf); | ||
422 | 429 | ||
423 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); | 430 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
424 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); | 431 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..3f682a1a08ce 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -855,6 +855,25 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, | |||
855 | 855 | ||
856 | static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) | 856 | static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) |
857 | { | 857 | { |
858 | enum pipe pipe; | ||
859 | |||
860 | /* | ||
861 | * Enable the CRI clock source so we can get at the | ||
862 | * display and the reference clock for VGA | ||
863 | * hotplug / manual detection. Supposedly DSI also | ||
864 | * needs the ref clock up and running. | ||
865 | * | ||
866 | * CHV DPLL B/C have some issues if VGA mode is enabled. | ||
867 | */ | ||
868 | for_each_pipe(dev_priv->dev, pipe) { | ||
869 | u32 val = I915_READ(DPLL(pipe)); | ||
870 | |||
871 | val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; | ||
872 | if (pipe != PIPE_A) | ||
873 | val |= DPLL_INTEGRATED_CRI_CLK_VLV; | ||
874 | |||
875 | I915_WRITE(DPLL(pipe), val); | ||
876 | } | ||
858 | 877 | ||
859 | spin_lock_irq(&dev_priv->irq_lock); | 878 | spin_lock_irq(&dev_priv->irq_lock); |
860 | valleyview_enable_display_irqs(dev_priv); | 879 | valleyview_enable_display_irqs(dev_priv); |
@@ -906,13 +925,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | |||
906 | { | 925 | { |
907 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); | 926 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); |
908 | 927 | ||
909 | /* | 928 | /* since ref/cri clock was enabled */ |
910 | * Enable the CRI clock source so we can get at the | ||
911 | * display and the reference clock for VGA | ||
912 | * hotplug / manual detection. | ||
913 | */ | ||
914 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS | | ||
915 | DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | ||
916 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | 929 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ |
917 | 930 | ||
918 | vlv_set_power_well(dev_priv, power_well, true); | 931 | vlv_set_power_well(dev_priv, power_well, true); |
@@ -947,30 +960,126 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, | |||
947 | vlv_set_power_well(dev_priv, power_well, false); | 960 | vlv_set_power_well(dev_priv, power_well, false); |
948 | } | 961 | } |
949 | 962 | ||
963 | #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) | ||
964 | |||
965 | static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, | ||
966 | int power_well_id) | ||
967 | { | ||
968 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
969 | struct i915_power_well *power_well; | ||
970 | int i; | ||
971 | |||
972 | for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { | ||
973 | if (power_well->data == power_well_id) | ||
974 | return power_well; | ||
975 | } | ||
976 | |||
977 | return NULL; | ||
978 | } | ||
979 | |||
980 | #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) | ||
981 | |||
982 | static void assert_chv_phy_status(struct drm_i915_private *dev_priv) | ||
983 | { | ||
984 | struct i915_power_well *cmn_bc = | ||
985 | lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); | ||
986 | struct i915_power_well *cmn_d = | ||
987 | lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); | ||
988 | u32 phy_control = dev_priv->chv_phy_control; | ||
989 | u32 phy_status = 0; | ||
990 | u32 tmp; | ||
991 | |||
992 | if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { | ||
993 | phy_status |= PHY_POWERGOOD(DPIO_PHY0); | ||
994 | |||
995 | /* this assumes override is only used to enable lanes */ | ||
996 | if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) | ||
997 | phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); | ||
998 | |||
999 | if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) | ||
1000 | phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); | ||
1001 | |||
1002 | /* CL1 is on whenever anything is on in either channel */ | ||
1003 | if (BITS_SET(phy_control, | ||
1004 | PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | | ||
1005 | PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) | ||
1006 | phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); | ||
1007 | |||
1008 | /* | ||
1009 | * The DPLLB check accounts for the pipe B + port A usage | ||
1010 | * with CL2 powered up but all the lanes in the second channel | ||
1011 | * powered down. | ||
1012 | */ | ||
1013 | if (BITS_SET(phy_control, | ||
1014 | PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && | ||
1015 | (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) | ||
1016 | phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); | ||
1017 | |||
1018 | if (BITS_SET(phy_control, | ||
1019 | PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) | ||
1020 | phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); | ||
1021 | if (BITS_SET(phy_control, | ||
1022 | PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) | ||
1023 | phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); | ||
1024 | |||
1025 | if (BITS_SET(phy_control, | ||
1026 | PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) | ||
1027 | phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); | ||
1028 | if (BITS_SET(phy_control, | ||
1029 | PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) | ||
1030 | phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); | ||
1031 | } | ||
1032 | |||
1033 | if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { | ||
1034 | phy_status |= PHY_POWERGOOD(DPIO_PHY1); | ||
1035 | |||
1036 | /* this assumes override is only used to enable lanes */ | ||
1037 | if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) | ||
1038 | phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); | ||
1039 | |||
1040 | if (BITS_SET(phy_control, | ||
1041 | PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) | ||
1042 | phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); | ||
1043 | |||
1044 | if (BITS_SET(phy_control, | ||
1045 | PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) | ||
1046 | phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); | ||
1047 | if (BITS_SET(phy_control, | ||
1048 | PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) | ||
1049 | phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); | ||
1050 | } | ||
1051 | |||
1052 | /* | ||
1053 | * The PHY may be busy with some initial calibration and whatnot, | ||
1054 | * so the power state can take a while to actually change. | ||
1055 | */ | ||
1056 | if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS)) == phy_status, 10)) | ||
1057 | WARN(phy_status != tmp, | ||
1058 | "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", | ||
1059 | tmp, phy_status, dev_priv->chv_phy_control); | ||
1060 | } | ||
1061 | |||
1062 | #undef BITS_SET | ||
1063 | |||
950 | static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | 1064 | static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, |
951 | struct i915_power_well *power_well) | 1065 | struct i915_power_well *power_well) |
952 | { | 1066 | { |
953 | enum dpio_phy phy; | 1067 | enum dpio_phy phy; |
1068 | enum pipe pipe; | ||
1069 | uint32_t tmp; | ||
954 | 1070 | ||
955 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && | 1071 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && |
956 | power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); | 1072 | power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); |
957 | 1073 | ||
958 | /* | ||
959 | * Enable the CRI clock source so we can get at the | ||
960 | * display and the reference clock for VGA | ||
961 | * hotplug / manual detection. | ||
962 | */ | ||
963 | if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { | 1074 | if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { |
1075 | pipe = PIPE_A; | ||
964 | phy = DPIO_PHY0; | 1076 | phy = DPIO_PHY0; |
965 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS | | ||
966 | DPLL_REF_CLK_ENABLE_VLV); | ||
967 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS | | ||
968 | DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | ||
969 | } else { | 1077 | } else { |
1078 | pipe = PIPE_C; | ||
970 | phy = DPIO_PHY1; | 1079 | phy = DPIO_PHY1; |
971 | I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS | | ||
972 | DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | ||
973 | } | 1080 | } |
1081 | |||
1082 | /* since ref/cri clock was enabled */ | ||
974 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | 1083 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ |
975 | vlv_set_power_well(dev_priv, power_well, true); | 1084 | vlv_set_power_well(dev_priv, power_well, true); |
976 | 1085 | ||
@@ -978,8 +1087,38 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | |||
978 | if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) | 1087 | if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) |
979 | DRM_ERROR("Display PHY %d is not power up\n", phy); | 1088 | DRM_ERROR("Display PHY %d is not power up\n", phy); |
980 | 1089 | ||
1090 | mutex_lock(&dev_priv->sb_lock); | ||
1091 | |||
1092 | /* Enable dynamic power down */ | ||
1093 | tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); | ||
1094 | tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | | ||
1095 | DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; | ||
1096 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); | ||
1097 | |||
1098 | if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { | ||
1099 | tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); | ||
1100 | tmp |= DPIO_DYNPWRDOWNEN_CH1; | ||
1101 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); | ||
1102 | } else { | ||
1103 | /* | ||
1104 | * Force the non-existing CL2 off. BXT does this | ||
1105 | * too, so maybe it saves some power even though | ||
1106 | * CL2 doesn't exist? | ||
1107 | */ | ||
1108 | tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); | ||
1109 | tmp |= DPIO_CL2_LDOFUSE_PWRENB; | ||
1110 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); | ||
1111 | } | ||
1112 | |||
1113 | mutex_unlock(&dev_priv->sb_lock); | ||
1114 | |||
981 | dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); | 1115 | dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); |
982 | I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); | 1116 | I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); |
1117 | |||
1118 | DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", | ||
1119 | phy, dev_priv->chv_phy_control); | ||
1120 | |||
1121 | assert_chv_phy_status(dev_priv); | ||
983 | } | 1122 | } |
984 | 1123 | ||
985 | static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, | 1124 | static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, |
@@ -1003,6 +1142,124 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, | |||
1003 | I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); | 1142 | I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); |
1004 | 1143 | ||
1005 | vlv_set_power_well(dev_priv, power_well, false); | 1144 | vlv_set_power_well(dev_priv, power_well, false); |
1145 | |||
1146 | DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", | ||
1147 | phy, dev_priv->chv_phy_control); | ||
1148 | |||
1149 | assert_chv_phy_status(dev_priv); | ||
1150 | } | ||
1151 | |||
1152 | static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, | ||
1153 | enum dpio_channel ch, bool override, unsigned int mask) | ||
1154 | { | ||
1155 | enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; | ||
1156 | u32 reg, val, expected, actual; | ||
1157 | |||
1158 | if (ch == DPIO_CH0) | ||
1159 | reg = _CHV_CMN_DW0_CH0; | ||
1160 | else | ||
1161 | reg = _CHV_CMN_DW6_CH1; | ||
1162 | |||
1163 | mutex_lock(&dev_priv->sb_lock); | ||
1164 | val = vlv_dpio_read(dev_priv, pipe, reg); | ||
1165 | mutex_unlock(&dev_priv->sb_lock); | ||
1166 | |||
1167 | /* | ||
1168 | * This assumes !override is only used when the port is disabled. | ||
1169 | * All lanes should power down even without the override when | ||
1170 | * the port is disabled. | ||
1171 | */ | ||
1172 | if (!override || mask == 0xf) { | ||
1173 | expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; | ||
1174 | /* | ||
1175 | * If CH1 common lane is not active anymore | ||
1176 | * (eg. for pipe B DPLL) the entire channel will | ||
1177 | * shut down, which causes the common lane registers | ||
1178 | * to read as 0. That means we can't actually check | ||
1179 | * the lane power down status bits, but as the entire | ||
1180 | * register reads as 0 it's a good indication that the | ||
1181 | * channel is indeed entirely powered down. | ||
1182 | */ | ||
1183 | if (ch == DPIO_CH1 && val == 0) | ||
1184 | expected = 0; | ||
1185 | } else if (mask != 0x0) { | ||
1186 | expected = DPIO_ANYDL_POWERDOWN; | ||
1187 | } else { | ||
1188 | expected = 0; | ||
1189 | } | ||
1190 | |||
1191 | if (ch == DPIO_CH0) | ||
1192 | actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; | ||
1193 | else | ||
1194 | actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; | ||
1195 | actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; | ||
1196 | |||
1197 | WARN(actual != expected, | ||
1198 | "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", | ||
1199 | !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), | ||
1200 | !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), | ||
1201 | reg, val); | ||
1202 | } | ||
1203 | |||
1204 | bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, | ||
1205 | enum dpio_channel ch, bool override) | ||
1206 | { | ||
1207 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
1208 | bool was_override; | ||
1209 | |||
1210 | mutex_lock(&power_domains->lock); | ||
1211 | |||
1212 | was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); | ||
1213 | |||
1214 | if (override == was_override) | ||
1215 | goto out; | ||
1216 | |||
1217 | if (override) | ||
1218 | dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); | ||
1219 | else | ||
1220 | dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); | ||
1221 | |||
1222 | I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); | ||
1223 | |||
1224 | DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", | ||
1225 | phy, ch, dev_priv->chv_phy_control); | ||
1226 | |||
1227 | assert_chv_phy_status(dev_priv); | ||
1228 | |||
1229 | out: | ||
1230 | mutex_unlock(&power_domains->lock); | ||
1231 | |||
1232 | return was_override; | ||
1233 | } | ||
1234 | |||
1235 | void chv_phy_powergate_lanes(struct intel_encoder *encoder, | ||
1236 | bool override, unsigned int mask) | ||
1237 | { | ||
1238 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
1239 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
1240 | enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); | ||
1241 | enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); | ||
1242 | |||
1243 | mutex_lock(&power_domains->lock); | ||
1244 | |||
1245 | dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); | ||
1246 | dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); | ||
1247 | |||
1248 | if (override) | ||
1249 | dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); | ||
1250 | else | ||
1251 | dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); | ||
1252 | |||
1253 | I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); | ||
1254 | |||
1255 | DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", | ||
1256 | phy, ch, mask, dev_priv->chv_phy_control); | ||
1257 | |||
1258 | assert_chv_phy_status(dev_priv); | ||
1259 | |||
1260 | assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); | ||
1261 | |||
1262 | mutex_unlock(&power_domains->lock); | ||
1006 | } | 1263 | } |
1007 | 1264 | ||
1008 | static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, | 1265 | static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, |
@@ -1166,8 +1423,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
1166 | intel_runtime_pm_put(dev_priv); | 1423 | intel_runtime_pm_put(dev_priv); |
1167 | } | 1424 | } |
1168 | 1425 | ||
1169 | #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) | ||
1170 | |||
1171 | #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ | 1426 | #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ |
1172 | BIT(POWER_DOMAIN_PIPE_A) | \ | 1427 | BIT(POWER_DOMAIN_PIPE_A) | \ |
1173 | BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ | 1428 | BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ |
@@ -1429,21 +1684,6 @@ static struct i915_power_well chv_power_wells[] = { | |||
1429 | }, | 1684 | }, |
1430 | }; | 1685 | }; |
1431 | 1686 | ||
1432 | static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, | ||
1433 | int power_well_id) | ||
1434 | { | ||
1435 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
1436 | struct i915_power_well *power_well; | ||
1437 | int i; | ||
1438 | |||
1439 | for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { | ||
1440 | if (power_well->data == power_well_id) | ||
1441 | return power_well; | ||
1442 | } | ||
1443 | |||
1444 | return NULL; | ||
1445 | } | ||
1446 | |||
1447 | bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, | 1687 | bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, |
1448 | int power_well_id) | 1688 | int power_well_id) |
1449 | { | 1689 | { |
@@ -1629,19 +1869,72 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) | |||
1629 | * DISPLAY_PHY_CONTROL can get corrupted if read. As a | 1869 | * DISPLAY_PHY_CONTROL can get corrupted if read. As a |
1630 | * workaround never ever read DISPLAY_PHY_CONTROL, and | 1870 | * workaround never ever read DISPLAY_PHY_CONTROL, and |
1631 | * instead maintain a shadow copy ourselves. Use the actual | 1871 | * instead maintain a shadow copy ourselves. Use the actual |
1632 | * power well state to reconstruct the expected initial | 1872 | * power well state and lane status to reconstruct the |
1633 | * value. | 1873 | * expected initial value. |
1634 | */ | 1874 | */ |
1635 | dev_priv->chv_phy_control = | 1875 | dev_priv->chv_phy_control = |
1636 | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | | 1876 | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | |
1637 | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | | 1877 | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | |
1638 | PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) | | 1878 | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | |
1639 | PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) | | 1879 | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | |
1640 | PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0); | 1880 | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); |
1641 | if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) | 1881 | |
1882 | /* | ||
1883 | * If all lanes are disabled we leave the override disabled | ||
1884 | * with all power down bits cleared to match the state we | ||
1885 | * would use after disabling the port. Otherwise enable the | ||
1886 | * override and set the lane powerdown bits accding to the | ||
1887 | * current lane status. | ||
1888 | */ | ||
1889 | if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { | ||
1890 | uint32_t status = I915_READ(DPLL(PIPE_A)); | ||
1891 | unsigned int mask; | ||
1892 | |||
1893 | mask = status & DPLL_PORTB_READY_MASK; | ||
1894 | if (mask == 0xf) | ||
1895 | mask = 0x0; | ||
1896 | else | ||
1897 | dev_priv->chv_phy_control |= | ||
1898 | PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); | ||
1899 | |||
1900 | dev_priv->chv_phy_control |= | ||
1901 | PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); | ||
1902 | |||
1903 | mask = (status & DPLL_PORTC_READY_MASK) >> 4; | ||
1904 | if (mask == 0xf) | ||
1905 | mask = 0x0; | ||
1906 | else | ||
1907 | dev_priv->chv_phy_control |= | ||
1908 | PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); | ||
1909 | |||
1910 | dev_priv->chv_phy_control |= | ||
1911 | PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); | ||
1912 | |||
1642 | dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); | 1913 | dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); |
1643 | if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) | 1914 | } |
1915 | |||
1916 | if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { | ||
1917 | uint32_t status = I915_READ(DPIO_PHY_STATUS); | ||
1918 | unsigned int mask; | ||
1919 | |||
1920 | mask = status & DPLL_PORTD_READY_MASK; | ||
1921 | |||
1922 | if (mask == 0xf) | ||
1923 | mask = 0x0; | ||
1924 | else | ||
1925 | dev_priv->chv_phy_control |= | ||
1926 | PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); | ||
1927 | |||
1928 | dev_priv->chv_phy_control |= | ||
1929 | PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); | ||
1930 | |||
1644 | dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); | 1931 | dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); |
1932 | } | ||
1933 | |||
1934 | I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); | ||
1935 | |||
1936 | DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", | ||
1937 | dev_priv->chv_phy_control); | ||
1645 | } | 1938 | } |
1646 | 1939 | ||
1647 | static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) | 1940 | static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) |
@@ -1687,7 +1980,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) | |||
1687 | power_domains->initializing = true; | 1980 | power_domains->initializing = true; |
1688 | 1981 | ||
1689 | if (IS_CHERRYVIEW(dev)) { | 1982 | if (IS_CHERRYVIEW(dev)) { |
1983 | mutex_lock(&power_domains->lock); | ||
1690 | chv_phy_control_init(dev_priv); | 1984 | chv_phy_control_init(dev_priv); |
1985 | mutex_unlock(&power_domains->lock); | ||
1691 | } else if (IS_VALLEYVIEW(dev)) { | 1986 | } else if (IS_VALLEYVIEW(dev)) { |
1692 | mutex_lock(&power_domains->lock); | 1987 | mutex_lock(&power_domains->lock); |
1693 | vlv_cmnlane_wa(dev_priv); | 1988 | vlv_cmnlane_wa(dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index c98098e884cc..ca3dd7c682bd 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) | 53 | #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) |
54 | 54 | ||
55 | 55 | ||
56 | static const char *tv_format_names[] = { | 56 | static const char * const tv_format_names[] = { |
57 | "NTSC_M" , "NTSC_J" , "NTSC_443", | 57 | "NTSC_M" , "NTSC_J" , "NTSC_443", |
58 | "PAL_B" , "PAL_D" , "PAL_G" , | 58 | "PAL_B" , "PAL_D" , "PAL_G" , |
59 | "PAL_H" , "PAL_I" , "PAL_M" , | 59 | "PAL_H" , "PAL_I" , "PAL_M" , |
@@ -63,7 +63,7 @@ static const char *tv_format_names[] = { | |||
63 | "SECAM_60" | 63 | "SECAM_60" |
64 | }; | 64 | }; |
65 | 65 | ||
66 | #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) | 66 | #define TV_FORMAT_NUM ARRAY_SIZE(tv_format_names) |
67 | 67 | ||
68 | struct intel_sdvo { | 68 | struct intel_sdvo { |
69 | struct intel_encoder base; | 69 | struct intel_encoder base; |
@@ -452,7 +452,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, | |||
452 | DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); | 452 | DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); |
453 | } | 453 | } |
454 | 454 | ||
455 | static const char *cmd_status_names[] = { | 455 | static const char * const cmd_status_names[] = { |
456 | "Power on", | 456 | "Power on", |
457 | "Success", | 457 | "Success", |
458 | "Not supported", | 458 | "Not supported", |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9d8af2f8a875..9553859ca151 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -76,7 +76,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs) | |||
76 | * avoid random delays. The value written to @start_vbl_count should be | 76 | * avoid random delays. The value written to @start_vbl_count should be |
77 | * supplied to intel_pipe_update_end() for error checking. | 77 | * supplied to intel_pipe_update_end() for error checking. |
78 | */ | 78 | */ |
79 | void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) | 79 | void intel_pipe_update_start(struct intel_crtc *crtc) |
80 | { | 80 | { |
81 | struct drm_device *dev = crtc->base.dev; | 81 | struct drm_device *dev = crtc->base.dev; |
82 | const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; | 82 | const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; |
@@ -95,7 +95,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) | |||
95 | max = vblank_start - 1; | 95 | max = vblank_start - 1; |
96 | 96 | ||
97 | local_irq_disable(); | 97 | local_irq_disable(); |
98 | *start_vbl_count = 0; | 98 | crtc->start_vbl_count = 0; |
99 | 99 | ||
100 | if (min <= 0 || max <= 0) | 100 | if (min <= 0 || max <= 0) |
101 | return; | 101 | return; |
@@ -134,9 +134,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) | |||
134 | 134 | ||
135 | drm_crtc_vblank_put(&crtc->base); | 135 | drm_crtc_vblank_put(&crtc->base); |
136 | 136 | ||
137 | *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); | 137 | crtc->start_vbl_time = ktime_get(); |
138 | crtc->start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); | ||
138 | 139 | ||
139 | trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count); | 140 | trace_i915_pipe_update_vblank_evaded(crtc, min, max, |
141 | crtc->start_vbl_count); | ||
140 | } | 142 | } |
141 | 143 | ||
142 | /** | 144 | /** |
@@ -148,19 +150,21 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) | |||
148 | * re-enables interrupts and verifies the update was actually completed | 150 | * re-enables interrupts and verifies the update was actually completed |
149 | * before a vblank using the value of @start_vbl_count. | 151 | * before a vblank using the value of @start_vbl_count. |
150 | */ | 152 | */ |
151 | void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count) | 153 | void intel_pipe_update_end(struct intel_crtc *crtc) |
152 | { | 154 | { |
153 | struct drm_device *dev = crtc->base.dev; | 155 | struct drm_device *dev = crtc->base.dev; |
154 | enum pipe pipe = crtc->pipe; | 156 | enum pipe pipe = crtc->pipe; |
155 | u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); | 157 | u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); |
158 | ktime_t end_vbl_time = ktime_get(); | ||
156 | 159 | ||
157 | trace_i915_pipe_update_end(crtc, end_vbl_count); | 160 | trace_i915_pipe_update_end(crtc, end_vbl_count); |
158 | 161 | ||
159 | local_irq_enable(); | 162 | local_irq_enable(); |
160 | 163 | ||
161 | if (start_vbl_count && start_vbl_count != end_vbl_count) | 164 | if (crtc->start_vbl_count && crtc->start_vbl_count != end_vbl_count) |
162 | DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n", | 165 | DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us\n", |
163 | pipe_name(pipe), start_vbl_count, end_vbl_count); | 166 | pipe_name(pipe), crtc->start_vbl_count, end_vbl_count, |
167 | ktime_us_delta(end_vbl_time, crtc->start_vbl_time)); | ||
164 | } | 168 | } |
165 | 169 | ||
166 | static void | 170 | static void |
@@ -923,8 +927,6 @@ intel_commit_sprite_plane(struct drm_plane *plane, | |||
923 | 927 | ||
924 | crtc = crtc ? crtc : plane->crtc; | 928 | crtc = crtc ? crtc : plane->crtc; |
925 | 929 | ||
926 | plane->fb = fb; | ||
927 | |||
928 | if (!crtc->state->active) | 930 | if (!crtc->state->active) |
929 | return; | 931 | return; |
930 | 932 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 590ceabe2d5e..29983cba2e3e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1291,7 +1291,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) | |||
1291 | return; | 1291 | return; |
1292 | 1292 | ||
1293 | 1293 | ||
1294 | for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { | 1294 | for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { |
1295 | tv_mode = tv_modes + i; | 1295 | tv_mode = tv_modes + i; |
1296 | 1296 | ||
1297 | if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == | 1297 | if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9d3c2e420d2b..440e2a5658af 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -52,8 +52,7 @@ static const char * const forcewake_domain_names[] = { | |||
52 | const char * | 52 | const char * |
53 | intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) | 53 | intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) |
54 | { | 54 | { |
55 | BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) != | 55 | BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); |
56 | FW_DOMAIN_ID_COUNT); | ||
57 | 56 | ||
58 | if (id >= 0 && id < FW_DOMAIN_ID_COUNT) | 57 | if (id >= 0 && id < FW_DOMAIN_ID_COUNT) |
59 | return forcewake_domain_names[id]; | 58 | return forcewake_domain_names[id]; |
@@ -770,6 +769,7 @@ static u##x \ | |||
770 | gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | 769 | gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ |
771 | enum forcewake_domains fw_engine; \ | 770 | enum forcewake_domains fw_engine; \ |
772 | GEN6_READ_HEADER(x); \ | 771 | GEN6_READ_HEADER(x); \ |
772 | hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ | ||
773 | if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ | 773 | if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ |
774 | fw_engine = 0; \ | 774 | fw_engine = 0; \ |
775 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ | 775 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ |
@@ -783,6 +783,7 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | |||
783 | if (fw_engine) \ | 783 | if (fw_engine) \ |
784 | __force_wake_get(dev_priv, fw_engine); \ | 784 | __force_wake_get(dev_priv, fw_engine); \ |
785 | val = __raw_i915_read##x(dev_priv, reg); \ | 785 | val = __raw_i915_read##x(dev_priv, reg); \ |
786 | hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ | ||
786 | GEN6_READ_FOOTER; \ | 787 | GEN6_READ_FOOTER; \ |
787 | } | 788 | } |
788 | 789 | ||
@@ -983,6 +984,7 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ | |||
983 | bool trace) { \ | 984 | bool trace) { \ |
984 | enum forcewake_domains fw_engine; \ | 985 | enum forcewake_domains fw_engine; \ |
985 | GEN6_WRITE_HEADER; \ | 986 | GEN6_WRITE_HEADER; \ |
987 | hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ | ||
986 | if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ | 988 | if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ |
987 | is_gen9_shadowed(dev_priv, reg)) \ | 989 | is_gen9_shadowed(dev_priv, reg)) \ |
988 | fw_engine = 0; \ | 990 | fw_engine = 0; \ |
@@ -997,6 +999,8 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ | |||
997 | if (fw_engine) \ | 999 | if (fw_engine) \ |
998 | __force_wake_get(dev_priv, fw_engine); \ | 1000 | __force_wake_get(dev_priv, fw_engine); \ |
999 | __raw_i915_write##x(dev_priv, reg, val); \ | 1001 | __raw_i915_write##x(dev_priv, reg, val); \ |
1002 | hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ | ||
1003 | hsw_unclaimed_reg_detect(dev_priv); \ | ||
1000 | GEN6_WRITE_FOOTER; \ | 1004 | GEN6_WRITE_FOOTER; \ |
1001 | } | 1005 | } |
1002 | 1006 | ||
@@ -1198,8 +1202,6 @@ void intel_uncore_init(struct drm_device *dev) | |||
1198 | 1202 | ||
1199 | switch (INTEL_INFO(dev)->gen) { | 1203 | switch (INTEL_INFO(dev)->gen) { |
1200 | default: | 1204 | default: |
1201 | MISSING_CASE(INTEL_INFO(dev)->gen); | ||
1202 | return; | ||
1203 | case 9: | 1205 | case 9: |
1204 | ASSIGN_WRITE_MMIO_VFUNCS(gen9); | 1206 | ASSIGN_WRITE_MMIO_VFUNCS(gen9); |
1205 | ASSIGN_READ_MMIO_VFUNCS(gen9); | 1207 | ASSIGN_READ_MMIO_VFUNCS(gen9); |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index d0c88107996a..9ec4716df7b5 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -634,6 +634,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) | |||
634 | (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); | 634 | (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); |
635 | } | 635 | } |
636 | 636 | ||
637 | static inline bool | ||
638 | drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) | ||
639 | { | ||
640 | return dpcd[DP_DPCD_REV] >= 0x12 && | ||
641 | dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; | ||
642 | } | ||
643 | |||
637 | /* | 644 | /* |
638 | * DisplayPort AUX channel | 645 | * DisplayPort AUX channel |
639 | */ | 646 | */ |