diff options
author | Dave Airlie <airlied@redhat.com> | 2009-12-07 23:03:47 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2009-12-07 23:03:47 -0500 |
commit | 3ff99164f67aae78a2bd2313f65ad55bddb1ffea (patch) | |
tree | d6bba03616d1be6ab9e6d9e92641a6f4047e1e15 | |
parent | 1bd049fa895f9c6743f38b52ce14775f5a31ea63 (diff) | |
parent | f2b115e69d46344ae7afcaad5823496d2a0d8650 (diff) |
Merge remote branch 'anholt/drm-intel-next' into drm-linus
This merges the upstream Intel tree and fixes up numerous conflicts
due to patches merged into Linus tree later in -rc cycle.
Conflicts:
drivers/char/agp/intel-agp.c
drivers/gpu/drm/drm_dp_i2c_helper.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_suspend.c
38 files changed, 3405 insertions, 829 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 3cb56a049e24..37678550b3eb 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -36,10 +36,10 @@ | |||
36 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | 36 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 |
37 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | 37 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC |
38 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | 38 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE |
39 | #define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010 | 39 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 |
40 | #define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011 | 40 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 |
41 | #define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000 | 41 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 |
42 | #define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001 | 42 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 |
43 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | 43 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 |
44 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | 44 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 |
45 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | 45 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 |
@@ -50,20 +50,20 @@ | |||
50 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | 50 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 |
51 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | 51 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 |
52 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | 52 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 |
53 | #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 | 53 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 |
54 | #define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02 | 54 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 |
55 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | 55 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 |
56 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 | 56 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 |
57 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 | 57 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 |
58 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 | 58 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 |
59 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | 59 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 |
60 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | 60 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 |
61 | #define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 | 61 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 |
62 | #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 | 62 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 |
63 | #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 | 63 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 |
64 | #define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062 | 64 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 |
65 | #define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB 0x006a | 65 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a |
66 | #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 | 66 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 |
67 | 67 | ||
68 | /* cover 915 and 945 variants */ | 68 | /* cover 915 and 945 variants */ |
69 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | 69 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ |
@@ -83,22 +83,22 @@ | |||
83 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | 83 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ |
84 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | 84 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ |
85 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ | 85 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ |
86 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ | 86 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ |
87 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) | 87 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) |
88 | 88 | ||
89 | #define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ | 89 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ |
90 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) | 90 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) |
91 | 91 | ||
92 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ | 92 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ |
93 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | 93 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ |
94 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | 94 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ |
95 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | 95 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ |
96 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | 96 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ |
97 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | 97 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ |
98 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ | 98 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ |
99 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ | 99 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ |
100 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \ | 100 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ |
101 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB) | 101 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB) |
102 | 102 | ||
103 | extern int agp_memory_reserved; | 103 | extern int agp_memory_reserved; |
104 | 104 | ||
@@ -653,7 +653,7 @@ static void intel_i830_init_gtt_entries(void) | |||
653 | size = 512; | 653 | size = 512; |
654 | } | 654 | } |
655 | size += 4; /* add in BIOS popup space */ | 655 | size += 4; /* add in BIOS popup space */ |
656 | } else if (IS_G33 && !IS_IGD) { | 656 | } else if (IS_G33 && !IS_PINEVIEW) { |
657 | /* G33's GTT size defined in gmch_ctrl */ | 657 | /* G33's GTT size defined in gmch_ctrl */ |
658 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | 658 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { |
659 | case G33_PGETBL_SIZE_1M: | 659 | case G33_PGETBL_SIZE_1M: |
@@ -669,7 +669,7 @@ static void intel_i830_init_gtt_entries(void) | |||
669 | size = 512; | 669 | size = 512; |
670 | } | 670 | } |
671 | size += 4; | 671 | size += 4; |
672 | } else if (IS_G4X || IS_IGD) { | 672 | } else if (IS_G4X || IS_PINEVIEW) { |
673 | /* On 4 series hardware, GTT stolen is separate from graphics | 673 | /* On 4 series hardware, GTT stolen is separate from graphics |
674 | * stolen, ignore it in stolen gtt entries counting. However, | 674 | * stolen, ignore it in stolen gtt entries counting. However, |
675 | * 4KB of the stolen memory doesn't get mapped to the GTT. | 675 | * 4KB of the stolen memory doesn't get mapped to the GTT. |
@@ -1352,15 +1352,15 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | |||
1352 | { | 1352 | { |
1353 | switch (agp_bridge->dev->device) { | 1353 | switch (agp_bridge->dev->device) { |
1354 | case PCI_DEVICE_ID_INTEL_GM45_HB: | 1354 | case PCI_DEVICE_ID_INTEL_GM45_HB: |
1355 | case PCI_DEVICE_ID_INTEL_IGD_E_HB: | 1355 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: |
1356 | case PCI_DEVICE_ID_INTEL_Q45_HB: | 1356 | case PCI_DEVICE_ID_INTEL_Q45_HB: |
1357 | case PCI_DEVICE_ID_INTEL_G45_HB: | 1357 | case PCI_DEVICE_ID_INTEL_G45_HB: |
1358 | case PCI_DEVICE_ID_INTEL_G41_HB: | 1358 | case PCI_DEVICE_ID_INTEL_G41_HB: |
1359 | case PCI_DEVICE_ID_INTEL_B43_HB: | 1359 | case PCI_DEVICE_ID_INTEL_B43_HB: |
1360 | case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: | 1360 | case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: |
1361 | case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: | 1361 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: |
1362 | case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: | 1362 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: |
1363 | case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB: | 1363 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: |
1364 | *gtt_offset = *gtt_size = MB(2); | 1364 | *gtt_offset = *gtt_size = MB(2); |
1365 | break; | 1365 | break; |
1366 | default: | 1366 | default: |
@@ -2340,14 +2340,14 @@ static const struct intel_driver_description { | |||
2340 | NULL, &intel_g33_driver }, | 2340 | NULL, &intel_g33_driver }, |
2341 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", | 2341 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", |
2342 | NULL, &intel_g33_driver }, | 2342 | NULL, &intel_g33_driver }, |
2343 | { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD", | 2343 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview", |
2344 | NULL, &intel_g33_driver }, | 2344 | NULL, &intel_g33_driver }, |
2345 | { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD", | 2345 | { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview", |
2346 | NULL, &intel_g33_driver }, | 2346 | NULL, &intel_g33_driver }, |
2347 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, | 2347 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, |
2348 | "Mobile Intel® GM45 Express", NULL, &intel_i965_driver }, | 2348 | "GM45", NULL, &intel_i965_driver }, |
2349 | { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, | 2349 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, |
2350 | "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, | 2350 | "Eaglelake", NULL, &intel_i965_driver }, |
2351 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, | 2351 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, |
2352 | "Q45/Q43", NULL, &intel_i965_driver }, | 2352 | "Q45/Q43", NULL, &intel_i965_driver }, |
2353 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, | 2353 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, |
@@ -2356,14 +2356,14 @@ static const struct intel_driver_description { | |||
2356 | "B43", NULL, &intel_i965_driver }, | 2356 | "B43", NULL, &intel_i965_driver }, |
2357 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, | 2357 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, |
2358 | "G41", NULL, &intel_i965_driver }, | 2358 | "G41", NULL, &intel_i965_driver }, |
2359 | { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, | 2359 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, |
2360 | "IGDNG/D", NULL, &intel_i965_driver }, | 2360 | "Ironlake/D", NULL, &intel_i965_driver }, |
2361 | { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, | 2361 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, |
2362 | "IGDNG/M", NULL, &intel_i965_driver }, | 2362 | "Ironlake/M", NULL, &intel_i965_driver }, |
2363 | { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, | 2363 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, |
2364 | "IGDNG/MA", NULL, &intel_i965_driver }, | 2364 | "Ironlake/MA", NULL, &intel_i965_driver }, |
2365 | { PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, | 2365 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, |
2366 | "IGDNG/MC2", NULL, &intel_i965_driver }, | 2366 | "Ironlake/MC2", NULL, &intel_i965_driver }, |
2367 | { 0, 0, 0, NULL, NULL, NULL } | 2367 | { 0, 0, 0, NULL, NULL, NULL } |
2368 | }; | 2368 | }; |
2369 | 2369 | ||
@@ -2545,8 +2545,8 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2545 | ID(PCI_DEVICE_ID_INTEL_82945G_HB), | 2545 | ID(PCI_DEVICE_ID_INTEL_82945G_HB), |
2546 | ID(PCI_DEVICE_ID_INTEL_82945GM_HB), | 2546 | ID(PCI_DEVICE_ID_INTEL_82945GM_HB), |
2547 | ID(PCI_DEVICE_ID_INTEL_82945GME_HB), | 2547 | ID(PCI_DEVICE_ID_INTEL_82945GME_HB), |
2548 | ID(PCI_DEVICE_ID_INTEL_IGDGM_HB), | 2548 | ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB), |
2549 | ID(PCI_DEVICE_ID_INTEL_IGDG_HB), | 2549 | ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB), |
2550 | ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), | 2550 | ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), |
2551 | ID(PCI_DEVICE_ID_INTEL_82G35_HB), | 2551 | ID(PCI_DEVICE_ID_INTEL_82G35_HB), |
2552 | ID(PCI_DEVICE_ID_INTEL_82965Q_HB), | 2552 | ID(PCI_DEVICE_ID_INTEL_82965Q_HB), |
@@ -2557,15 +2557,15 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2557 | ID(PCI_DEVICE_ID_INTEL_Q35_HB), | 2557 | ID(PCI_DEVICE_ID_INTEL_Q35_HB), |
2558 | ID(PCI_DEVICE_ID_INTEL_Q33_HB), | 2558 | ID(PCI_DEVICE_ID_INTEL_Q33_HB), |
2559 | ID(PCI_DEVICE_ID_INTEL_GM45_HB), | 2559 | ID(PCI_DEVICE_ID_INTEL_GM45_HB), |
2560 | ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), | 2560 | ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB), |
2561 | ID(PCI_DEVICE_ID_INTEL_Q45_HB), | 2561 | ID(PCI_DEVICE_ID_INTEL_Q45_HB), |
2562 | ID(PCI_DEVICE_ID_INTEL_G45_HB), | 2562 | ID(PCI_DEVICE_ID_INTEL_G45_HB), |
2563 | ID(PCI_DEVICE_ID_INTEL_G41_HB), | 2563 | ID(PCI_DEVICE_ID_INTEL_G41_HB), |
2564 | ID(PCI_DEVICE_ID_INTEL_B43_HB), | 2564 | ID(PCI_DEVICE_ID_INTEL_B43_HB), |
2565 | ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), | 2565 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), |
2566 | ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), | 2566 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), |
2567 | ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), | 2567 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), |
2568 | ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB), | 2568 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), |
2569 | { } | 2569 | { } |
2570 | }; | 2570 | }; |
2571 | 2571 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 4fe321dc900c..4a7bbdbedfc2 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -256,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev, | |||
256 | mutex_unlock(&dev->mode_config.idr_mutex); | 256 | mutex_unlock(&dev->mode_config.idr_mutex); |
257 | } | 257 | } |
258 | 258 | ||
259 | void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) | 259 | struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, |
260 | uint32_t id, uint32_t type) | ||
260 | { | 261 | { |
261 | struct drm_mode_object *obj = NULL; | 262 | struct drm_mode_object *obj = NULL; |
262 | 263 | ||
@@ -2630,7 +2631,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, | |||
2630 | goto out; | 2631 | goto out; |
2631 | } | 2632 | } |
2632 | 2633 | ||
2633 | e->event.base.type = DRM_EVENT_VBLANK; | 2634 | e->event.base.type = DRM_EVENT_FLIP_COMPLETE; |
2634 | e->event.base.length = sizeof e->event; | 2635 | e->event.base.length = sizeof e->event; |
2635 | e->event.user_data = page_flip->user_data; | 2636 | e->event.user_data = page_flip->user_data; |
2636 | e->base.event = &e->event.base; | 2637 | e->base.event = &e->event.base; |
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c index f1c7c856e9db..548887c8506f 100644 --- a/drivers/gpu/drm/drm_dp_i2c_helper.c +++ b/drivers/gpu/drm/drm_dp_i2c_helper.c | |||
@@ -160,7 +160,7 @@ i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, | |||
160 | if (ret >= 0) | 160 | if (ret >= 0) |
161 | ret = num; | 161 | ret = num; |
162 | i2c_algo_dp_aux_stop(adapter, reading); | 162 | i2c_algo_dp_aux_stop(adapter, reading); |
163 | DRM_DEBUG("dp_aux_xfer return %d\n", ret); | 163 | DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret); |
164 | return ret; | 164 | return ret; |
165 | } | 165 | } |
166 | 166 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 6b3ce6d38848..7998ee66b317 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc) | |||
429 | 429 | ||
430 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 430 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
431 | /* Going from 0->1 means we have to enable interrupts again */ | 431 | /* Going from 0->1 means we have to enable interrupts again */ |
432 | if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && | 432 | if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { |
433 | !dev->vblank_enabled[crtc]) { | 433 | if (!dev->vblank_enabled[crtc]) { |
434 | ret = dev->driver->enable_vblank(dev, crtc); | 434 | ret = dev->driver->enable_vblank(dev, crtc); |
435 | DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); | 435 | DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); |
436 | if (ret) | 436 | if (ret) |
437 | atomic_dec(&dev->vblank_refcount[crtc]); | ||
438 | else { | ||
439 | dev->vblank_enabled[crtc] = 1; | ||
440 | drm_update_vblank_count(dev, crtc); | ||
441 | } | ||
442 | } | ||
443 | } else { | ||
444 | if (!dev->vblank_enabled[crtc]) { | ||
437 | atomic_dec(&dev->vblank_refcount[crtc]); | 445 | atomic_dec(&dev->vblank_refcount[crtc]); |
438 | else { | 446 | ret = -EINVAL; |
439 | dev->vblank_enabled[crtc] = 1; | ||
440 | drm_update_vblank_count(dev, crtc); | ||
441 | } | 447 | } |
442 | } | 448 | } |
443 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 449 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
@@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc) | |||
464 | } | 470 | } |
465 | EXPORT_SYMBOL(drm_vblank_put); | 471 | EXPORT_SYMBOL(drm_vblank_put); |
466 | 472 | ||
473 | void drm_vblank_off(struct drm_device *dev, int crtc) | ||
474 | { | ||
475 | unsigned long irqflags; | ||
476 | |||
477 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
478 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | ||
479 | dev->vblank_enabled[crtc] = 0; | ||
480 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); | ||
481 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
482 | } | ||
483 | EXPORT_SYMBOL(drm_vblank_off); | ||
484 | |||
467 | /** | 485 | /** |
468 | * drm_vblank_pre_modeset - account for vblanks across mode sets | 486 | * drm_vblank_pre_modeset - account for vblanks across mode sets |
469 | * @dev: DRM device | 487 | * @dev: DRM device |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index e3d049229cdd..9929f84ec3e1 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
22 | intel_fb.o \ | 22 | intel_fb.o \ |
23 | intel_tv.o \ | 23 | intel_tv.o \ |
24 | intel_dvo.o \ | 24 | intel_dvo.o \ |
25 | intel_overlay.o \ | ||
25 | dvo_ch7xxx.o \ | 26 | dvo_ch7xxx.o \ |
26 | dvo_ch7017.o \ | 27 | dvo_ch7017.o \ |
27 | dvo_ivch.o \ | 28 | dvo_ivch.o \ |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 621815b531db..1184c14ba87d 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo, | |||
249 | if (val != CH7017_DEVICE_ID_VALUE && | 249 | if (val != CH7017_DEVICE_ID_VALUE && |
250 | val != CH7018_DEVICE_ID_VALUE && | 250 | val != CH7018_DEVICE_ID_VALUE && |
251 | val != CH7019_DEVICE_ID_VALUE) { | 251 | val != CH7019_DEVICE_ID_VALUE) { |
252 | DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", | 252 | DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " |
253 | "Slave %d.\n", | ||
253 | val, i2cbus->adapter.name,dvo->slave_addr); | 254 | val, i2cbus->adapter.name,dvo->slave_addr); |
254 | goto fail; | 255 | goto fail; |
255 | } | 256 | } |
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, | |||
284 | uint8_t horizontal_active_pixel_output, vertical_active_line_output; | 285 | uint8_t horizontal_active_pixel_output, vertical_active_line_output; |
285 | uint8_t active_input_line_output; | 286 | uint8_t active_input_line_output; |
286 | 287 | ||
287 | DRM_DEBUG("Registers before mode setting\n"); | 288 | DRM_DEBUG_KMS("Registers before mode setting\n"); |
288 | ch7017_dump_regs(dvo); | 289 | ch7017_dump_regs(dvo); |
289 | 290 | ||
290 | /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ | 291 | /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ |
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, | |||
346 | /* Turn the LVDS back on with new settings. */ | 347 | /* Turn the LVDS back on with new settings. */ |
347 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); | 348 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); |
348 | 349 | ||
349 | DRM_DEBUG("Registers after mode setting\n"); | 350 | DRM_DEBUG_KMS("Registers after mode setting\n"); |
350 | ch7017_dump_regs(dvo); | 351 | ch7017_dump_regs(dvo); |
351 | } | 352 | } |
352 | 353 | ||
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo) | |||
386 | #define DUMP(reg) \ | 387 | #define DUMP(reg) \ |
387 | do { \ | 388 | do { \ |
388 | ch7017_read(dvo, reg, &val); \ | 389 | ch7017_read(dvo, reg, &val); \ |
389 | DRM_DEBUG(#reg ": %02x\n", val); \ | 390 | DRM_DEBUG_KMS(#reg ": %02x\n", val); \ |
390 | } while (0) | 391 | } while (0) |
391 | 392 | ||
392 | DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); | 393 | DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); |
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index a9b896289680..d56ff5cc22b2 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
152 | }; | 152 | }; |
153 | 153 | ||
154 | if (!ch7xxx->quiet) { | 154 | if (!ch7xxx->quiet) { |
155 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 155 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", |
156 | addr, i2cbus->adapter.name, dvo->slave_addr); | 156 | addr, i2cbus->adapter.name, dvo->slave_addr); |
157 | } | 157 | } |
158 | return false; | 158 | return false; |
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
179 | return true; | 179 | return true; |
180 | 180 | ||
181 | if (!ch7xxx->quiet) { | 181 | if (!ch7xxx->quiet) { |
182 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 182 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
183 | addr, i2cbus->adapter.name, dvo->slave_addr); | 183 | addr, i2cbus->adapter.name, dvo->slave_addr); |
184 | } | 184 | } |
185 | 185 | ||
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
207 | 207 | ||
208 | name = ch7xxx_get_id(vendor); | 208 | name = ch7xxx_get_id(vendor); |
209 | if (!name) { | 209 | if (!name) { |
210 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | 210 | DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s " |
211 | "slave %d.\n", | ||
211 | vendor, adapter->name, dvo->slave_addr); | 212 | vendor, adapter->name, dvo->slave_addr); |
212 | goto out; | 213 | goto out; |
213 | } | 214 | } |
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
217 | goto out; | 218 | goto out; |
218 | 219 | ||
219 | if (device != CH7xxx_DID) { | 220 | if (device != CH7xxx_DID) { |
220 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | 221 | DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s " |
222 | "slave %d.\n", | ||
221 | vendor, adapter->name, dvo->slave_addr); | 223 | vendor, adapter->name, dvo->slave_addr); |
222 | goto out; | 224 | goto out; |
223 | } | 225 | } |
224 | 226 | ||
225 | ch7xxx->quiet = false; | 227 | ch7xxx->quiet = false; |
226 | DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", | 228 | DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", |
227 | name, vendor, device); | 229 | name, vendor, device); |
228 | return true; | 230 | return true; |
229 | out: | 231 | out: |
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) | |||
315 | 317 | ||
316 | for (i = 0; i < CH7xxx_NUM_REGS; i++) { | 318 | for (i = 0; i < CH7xxx_NUM_REGS; i++) { |
317 | if ((i % 8) == 0 ) | 319 | if ((i % 8) == 0 ) |
318 | DRM_DEBUG("\n %02X: ", i); | 320 | DRM_LOG_KMS("\n %02X: ", i); |
319 | DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]); | 321 | DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); |
320 | } | 322 | } |
321 | } | 323 | } |
322 | 324 | ||
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index aa176f9921fe..24169e528f0f 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
202 | }; | 202 | }; |
203 | 203 | ||
204 | if (!priv->quiet) { | 204 | if (!priv->quiet) { |
205 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 205 | DRM_DEBUG_KMS("Unable to read register 0x%02x from " |
206 | "%s:%02x.\n", | ||
206 | addr, i2cbus->adapter.name, dvo->slave_addr); | 207 | addr, i2cbus->adapter.name, dvo->slave_addr); |
207 | } | 208 | } |
208 | return false; | 209 | return false; |
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | |||
230 | return true; | 231 | return true; |
231 | 232 | ||
232 | if (!priv->quiet) { | 233 | if (!priv->quiet) { |
233 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 234 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
234 | addr, i2cbus->adapter.name, dvo->slave_addr); | 235 | addr, i2cbus->adapter.name, dvo->slave_addr); |
235 | } | 236 | } |
236 | 237 | ||
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, | |||
261 | * the address it's responding on. | 262 | * the address it's responding on. |
262 | */ | 263 | */ |
263 | if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { | 264 | if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { |
264 | DRM_DEBUG("ivch detect failed due to address mismatch " | 265 | DRM_DEBUG_KMS("ivch detect failed due to address mismatch " |
265 | "(%d vs %d)\n", | 266 | "(%d vs %d)\n", |
266 | (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); | 267 | (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); |
267 | goto out; | 268 | goto out; |
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) | |||
367 | uint16_t val; | 368 | uint16_t val; |
368 | 369 | ||
369 | ivch_read(dvo, VR00, &val); | 370 | ivch_read(dvo, VR00, &val); |
370 | DRM_DEBUG("VR00: 0x%04x\n", val); | 371 | DRM_LOG_KMS("VR00: 0x%04x\n", val); |
371 | ivch_read(dvo, VR01, &val); | 372 | ivch_read(dvo, VR01, &val); |
372 | DRM_DEBUG("VR01: 0x%04x\n", val); | 373 | DRM_LOG_KMS("VR01: 0x%04x\n", val); |
373 | ivch_read(dvo, VR30, &val); | 374 | ivch_read(dvo, VR30, &val); |
374 | DRM_DEBUG("VR30: 0x%04x\n", val); | 375 | DRM_LOG_KMS("VR30: 0x%04x\n", val); |
375 | ivch_read(dvo, VR40, &val); | 376 | ivch_read(dvo, VR40, &val); |
376 | DRM_DEBUG("VR40: 0x%04x\n", val); | 377 | DRM_LOG_KMS("VR40: 0x%04x\n", val); |
377 | 378 | ||
378 | /* GPIO registers */ | 379 | /* GPIO registers */ |
379 | ivch_read(dvo, VR80, &val); | 380 | ivch_read(dvo, VR80, &val); |
380 | DRM_DEBUG("VR80: 0x%04x\n", val); | 381 | DRM_LOG_KMS("VR80: 0x%04x\n", val); |
381 | ivch_read(dvo, VR81, &val); | 382 | ivch_read(dvo, VR81, &val); |
382 | DRM_DEBUG("VR81: 0x%04x\n", val); | 383 | DRM_LOG_KMS("VR81: 0x%04x\n", val); |
383 | ivch_read(dvo, VR82, &val); | 384 | ivch_read(dvo, VR82, &val); |
384 | DRM_DEBUG("VR82: 0x%04x\n", val); | 385 | DRM_LOG_KMS("VR82: 0x%04x\n", val); |
385 | ivch_read(dvo, VR83, &val); | 386 | ivch_read(dvo, VR83, &val); |
386 | DRM_DEBUG("VR83: 0x%04x\n", val); | 387 | DRM_LOG_KMS("VR83: 0x%04x\n", val); |
387 | ivch_read(dvo, VR84, &val); | 388 | ivch_read(dvo, VR84, &val); |
388 | DRM_DEBUG("VR84: 0x%04x\n", val); | 389 | DRM_LOG_KMS("VR84: 0x%04x\n", val); |
389 | ivch_read(dvo, VR85, &val); | 390 | ivch_read(dvo, VR85, &val); |
390 | DRM_DEBUG("VR85: 0x%04x\n", val); | 391 | DRM_LOG_KMS("VR85: 0x%04x\n", val); |
391 | ivch_read(dvo, VR86, &val); | 392 | ivch_read(dvo, VR86, &val); |
392 | DRM_DEBUG("VR86: 0x%04x\n", val); | 393 | DRM_LOG_KMS("VR86: 0x%04x\n", val); |
393 | ivch_read(dvo, VR87, &val); | 394 | ivch_read(dvo, VR87, &val); |
394 | DRM_DEBUG("VR87: 0x%04x\n", val); | 395 | DRM_LOG_KMS("VR87: 0x%04x\n", val); |
395 | ivch_read(dvo, VR88, &val); | 396 | ivch_read(dvo, VR88, &val); |
396 | DRM_DEBUG("VR88: 0x%04x\n", val); | 397 | DRM_LOG_KMS("VR88: 0x%04x\n", val); |
397 | 398 | ||
398 | /* Scratch register 0 - AIM Panel type */ | 399 | /* Scratch register 0 - AIM Panel type */ |
399 | ivch_read(dvo, VR8E, &val); | 400 | ivch_read(dvo, VR8E, &val); |
400 | DRM_DEBUG("VR8E: 0x%04x\n", val); | 401 | DRM_LOG_KMS("VR8E: 0x%04x\n", val); |
401 | 402 | ||
402 | /* Scratch register 1 - Status register */ | 403 | /* Scratch register 1 - Status register */ |
403 | ivch_read(dvo, VR8F, &val); | 404 | ivch_read(dvo, VR8F, &val); |
404 | DRM_DEBUG("VR8F: 0x%04x\n", val); | 405 | DRM_LOG_KMS("VR8F: 0x%04x\n", val); |
405 | } | 406 | } |
406 | 407 | ||
407 | static void ivch_save(struct intel_dvo_device *dvo) | 408 | static void ivch_save(struct intel_dvo_device *dvo) |
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index e1c1f7341e5c..0001c13f0a80 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | if (!sil->quiet) { | 107 | if (!sil->quiet) { |
108 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 108 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", |
109 | addr, i2cbus->adapter.name, dvo->slave_addr); | 109 | addr, i2cbus->adapter.name, dvo->slave_addr); |
110 | } | 110 | } |
111 | return false; | 111 | return false; |
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
131 | return true; | 131 | return true; |
132 | 132 | ||
133 | if (!sil->quiet) { | 133 | if (!sil->quiet) { |
134 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 134 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
135 | addr, i2cbus->adapter.name, dvo->slave_addr); | 135 | addr, i2cbus->adapter.name, dvo->slave_addr); |
136 | } | 136 | } |
137 | 137 | ||
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
158 | goto out; | 158 | goto out; |
159 | 159 | ||
160 | if (ch != (SIL164_VID & 0xff)) { | 160 | if (ch != (SIL164_VID & 0xff)) { |
161 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | 161 | DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", |
162 | ch, adapter->name, dvo->slave_addr); | 162 | ch, adapter->name, dvo->slave_addr); |
163 | goto out; | 163 | goto out; |
164 | } | 164 | } |
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
167 | goto out; | 167 | goto out; |
168 | 168 | ||
169 | if (ch != (SIL164_DID & 0xff)) { | 169 | if (ch != (SIL164_DID & 0xff)) { |
170 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | 170 | DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", |
171 | ch, adapter->name, dvo->slave_addr); | 171 | ch, adapter->name, dvo->slave_addr); |
172 | goto out; | 172 | goto out; |
173 | } | 173 | } |
174 | sil->quiet = false; | 174 | sil->quiet = false; |
175 | 175 | ||
176 | DRM_DEBUG("init sil164 dvo controller successfully!\n"); | 176 | DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n"); |
177 | return true; | 177 | return true; |
178 | 178 | ||
179 | out: | 179 | out: |
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo) | |||
241 | uint8_t val; | 241 | uint8_t val; |
242 | 242 | ||
243 | sil164_readb(dvo, SIL164_FREQ_LO, &val); | 243 | sil164_readb(dvo, SIL164_FREQ_LO, &val); |
244 | DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val); | 244 | DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); |
245 | sil164_readb(dvo, SIL164_FREQ_HI, &val); | 245 | sil164_readb(dvo, SIL164_FREQ_HI, &val); |
246 | DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val); | 246 | DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); |
247 | sil164_readb(dvo, SIL164_REG8, &val); | 247 | sil164_readb(dvo, SIL164_REG8, &val); |
248 | DRM_DEBUG("SIL164_REG8: 0x%02x\n", val); | 248 | DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val); |
249 | sil164_readb(dvo, SIL164_REG9, &val); | 249 | sil164_readb(dvo, SIL164_REG9, &val); |
250 | DRM_DEBUG("SIL164_REG9: 0x%02x\n", val); | 250 | DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val); |
251 | sil164_readb(dvo, SIL164_REGC, &val); | 251 | sil164_readb(dvo, SIL164_REGC, &val); |
252 | DRM_DEBUG("SIL164_REGC: 0x%02x\n", val); | 252 | DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); |
253 | } | 253 | } |
254 | 254 | ||
255 | static void sil164_save(struct intel_dvo_device *dvo) | 255 | static void sil164_save(struct intel_dvo_device *dvo) |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 9ecc907384ec..c7c391bc116a 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
130 | }; | 130 | }; |
131 | 131 | ||
132 | if (!tfp->quiet) { | 132 | if (!tfp->quiet) { |
133 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 133 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", |
134 | addr, i2cbus->adapter.name, dvo->slave_addr); | 134 | addr, i2cbus->adapter.name, dvo->slave_addr); |
135 | } | 135 | } |
136 | return false; | 136 | return false; |
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
156 | return true; | 156 | return true; |
157 | 157 | ||
158 | if (!tfp->quiet) { | 158 | if (!tfp->quiet) { |
159 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 159 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
160 | addr, i2cbus->adapter.name, dvo->slave_addr); | 160 | addr, i2cbus->adapter.name, dvo->slave_addr); |
161 | } | 161 | } |
162 | 162 | ||
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo, | |||
191 | tfp->quiet = true; | 191 | tfp->quiet = true; |
192 | 192 | ||
193 | if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { | 193 | if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { |
194 | DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", | 194 | DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " |
195 | "Slave %d.\n", | ||
195 | id, adapter->name, dvo->slave_addr); | 196 | id, adapter->name, dvo->slave_addr); |
196 | goto out; | 197 | goto out; |
197 | } | 198 | } |
198 | 199 | ||
199 | if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { | 200 | if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { |
200 | DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", | 201 | DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " |
202 | "Slave %d.\n", | ||
201 | id, adapter->name, dvo->slave_addr); | 203 | id, adapter->name, dvo->slave_addr); |
202 | goto out; | 204 | goto out; |
203 | } | 205 | } |
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo) | |||
262 | uint8_t val, val2; | 264 | uint8_t val, val2; |
263 | 265 | ||
264 | tfp410_readb(dvo, TFP410_REV, &val); | 266 | tfp410_readb(dvo, TFP410_REV, &val); |
265 | DRM_DEBUG("TFP410_REV: 0x%02X\n", val); | 267 | DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val); |
266 | tfp410_readb(dvo, TFP410_CTL_1, &val); | 268 | tfp410_readb(dvo, TFP410_CTL_1, &val); |
267 | DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val); | 269 | DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val); |
268 | tfp410_readb(dvo, TFP410_CTL_2, &val); | 270 | tfp410_readb(dvo, TFP410_CTL_2, &val); |
269 | DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val); | 271 | DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val); |
270 | tfp410_readb(dvo, TFP410_CTL_3, &val); | 272 | tfp410_readb(dvo, TFP410_CTL_3, &val); |
271 | DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val); | 273 | DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val); |
272 | tfp410_readb(dvo, TFP410_USERCFG, &val); | 274 | tfp410_readb(dvo, TFP410_USERCFG, &val); |
273 | DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val); | 275 | DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val); |
274 | tfp410_readb(dvo, TFP410_DE_DLY, &val); | 276 | tfp410_readb(dvo, TFP410_DE_DLY, &val); |
275 | DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val); | 277 | DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val); |
276 | tfp410_readb(dvo, TFP410_DE_CTL, &val); | 278 | tfp410_readb(dvo, TFP410_DE_CTL, &val); |
277 | DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val); | 279 | DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val); |
278 | tfp410_readb(dvo, TFP410_DE_TOP, &val); | 280 | tfp410_readb(dvo, TFP410_DE_TOP, &val); |
279 | DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val); | 281 | DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val); |
280 | tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); | 282 | tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); |
281 | tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); | 283 | tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); |
282 | DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); | 284 | DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); |
283 | tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); | 285 | tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); |
284 | tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); | 286 | tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); |
285 | DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); | 287 | DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); |
286 | tfp410_readb(dvo, TFP410_H_RES_LO, &val); | 288 | tfp410_readb(dvo, TFP410_H_RES_LO, &val); |
287 | tfp410_readb(dvo, TFP410_H_RES_HI, &val2); | 289 | tfp410_readb(dvo, TFP410_H_RES_HI, &val2); |
288 | DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val); | 290 | DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); |
289 | tfp410_readb(dvo, TFP410_V_RES_LO, &val); | 291 | tfp410_readb(dvo, TFP410_V_RES_LO, &val); |
290 | tfp410_readb(dvo, TFP410_V_RES_HI, &val2); | 292 | tfp410_readb(dvo, TFP410_V_RES_HI, &val2); |
291 | DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val); | 293 | DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); |
292 | } | 294 | } |
293 | 295 | ||
294 | static void tfp410_save(struct intel_dvo_device *dvo) | 296 | static void tfp410_save(struct intel_dvo_device *dvo) |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 26bf0552b3cb..eeed4e34c757 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
30 | #include <linux/debugfs.h> | ||
30 | #include "drmP.h" | 31 | #include "drmP.h" |
31 | #include "drm.h" | 32 | #include "drm.h" |
32 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
@@ -160,7 +161,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
160 | struct drm_device *dev = node->minor->dev; | 161 | struct drm_device *dev = node->minor->dev; |
161 | drm_i915_private_t *dev_priv = dev->dev_private; | 162 | drm_i915_private_t *dev_priv = dev->dev_private; |
162 | 163 | ||
163 | if (!IS_IGDNG(dev)) { | 164 | if (!IS_IRONLAKE(dev)) { |
164 | seq_printf(m, "Interrupt enable: %08x\n", | 165 | seq_printf(m, "Interrupt enable: %08x\n", |
165 | I915_READ(IER)); | 166 | I915_READ(IER)); |
166 | seq_printf(m, "Interrupt identity: %08x\n", | 167 | seq_printf(m, "Interrupt identity: %08x\n", |
@@ -412,6 +413,109 @@ static int i915_registers_info(struct seq_file *m, void *data) { | |||
412 | return 0; | 413 | return 0; |
413 | } | 414 | } |
414 | 415 | ||
416 | static int | ||
417 | i915_wedged_open(struct inode *inode, | ||
418 | struct file *filp) | ||
419 | { | ||
420 | filp->private_data = inode->i_private; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static ssize_t | ||
425 | i915_wedged_read(struct file *filp, | ||
426 | char __user *ubuf, | ||
427 | size_t max, | ||
428 | loff_t *ppos) | ||
429 | { | ||
430 | struct drm_device *dev = filp->private_data; | ||
431 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
432 | char buf[80]; | ||
433 | int len; | ||
434 | |||
435 | len = snprintf(buf, sizeof (buf), | ||
436 | "wedged : %d\n", | ||
437 | atomic_read(&dev_priv->mm.wedged)); | ||
438 | |||
439 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | ||
440 | } | ||
441 | |||
442 | static ssize_t | ||
443 | i915_wedged_write(struct file *filp, | ||
444 | const char __user *ubuf, | ||
445 | size_t cnt, | ||
446 | loff_t *ppos) | ||
447 | { | ||
448 | struct drm_device *dev = filp->private_data; | ||
449 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
450 | char buf[20]; | ||
451 | int val = 1; | ||
452 | |||
453 | if (cnt > 0) { | ||
454 | if (cnt > sizeof (buf) - 1) | ||
455 | return -EINVAL; | ||
456 | |||
457 | if (copy_from_user(buf, ubuf, cnt)) | ||
458 | return -EFAULT; | ||
459 | buf[cnt] = 0; | ||
460 | |||
461 | val = simple_strtoul(buf, NULL, 0); | ||
462 | } | ||
463 | |||
464 | DRM_INFO("Manually setting wedged to %d\n", val); | ||
465 | |||
466 | atomic_set(&dev_priv->mm.wedged, val); | ||
467 | if (val) { | ||
468 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
469 | queue_work(dev_priv->wq, &dev_priv->error_work); | ||
470 | } | ||
471 | |||
472 | return cnt; | ||
473 | } | ||
474 | |||
475 | static const struct file_operations i915_wedged_fops = { | ||
476 | .owner = THIS_MODULE, | ||
477 | .open = i915_wedged_open, | ||
478 | .read = i915_wedged_read, | ||
479 | .write = i915_wedged_write, | ||
480 | }; | ||
481 | |||
482 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | ||
483 | * allocated we need to hook into the minor for release. */ | ||
484 | static int | ||
485 | drm_add_fake_info_node(struct drm_minor *minor, | ||
486 | struct dentry *ent, | ||
487 | const void *key) | ||
488 | { | ||
489 | struct drm_info_node *node; | ||
490 | |||
491 | node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); | ||
492 | if (node == NULL) { | ||
493 | debugfs_remove(ent); | ||
494 | return -ENOMEM; | ||
495 | } | ||
496 | |||
497 | node->minor = minor; | ||
498 | node->dent = ent; | ||
499 | node->info_ent = (void *) key; | ||
500 | list_add(&node->list, &minor->debugfs_nodes.list); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | ||
506 | { | ||
507 | struct drm_device *dev = minor->dev; | ||
508 | struct dentry *ent; | ||
509 | |||
510 | ent = debugfs_create_file("i915_wedged", | ||
511 | S_IRUGO | S_IWUSR, | ||
512 | root, dev, | ||
513 | &i915_wedged_fops); | ||
514 | if (IS_ERR(ent)) | ||
515 | return PTR_ERR(ent); | ||
516 | |||
517 | return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); | ||
518 | } | ||
415 | 519 | ||
416 | static struct drm_info_list i915_debugfs_list[] = { | 520 | static struct drm_info_list i915_debugfs_list[] = { |
417 | {"i915_regs", i915_registers_info, 0}, | 521 | {"i915_regs", i915_registers_info, 0}, |
@@ -432,6 +536,12 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
432 | 536 | ||
433 | int i915_debugfs_init(struct drm_minor *minor) | 537 | int i915_debugfs_init(struct drm_minor *minor) |
434 | { | 538 | { |
539 | int ret; | ||
540 | |||
541 | ret = i915_wedged_create(minor->debugfs_root, minor); | ||
542 | if (ret) | ||
543 | return ret; | ||
544 | |||
435 | return drm_debugfs_create_files(i915_debugfs_list, | 545 | return drm_debugfs_create_files(i915_debugfs_list, |
436 | I915_DEBUGFS_ENTRIES, | 546 | I915_DEBUGFS_ENTRIES, |
437 | minor->debugfs_root, minor); | 547 | minor->debugfs_root, minor); |
@@ -441,7 +551,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | |||
441 | { | 551 | { |
442 | drm_debugfs_remove_files(i915_debugfs_list, | 552 | drm_debugfs_remove_files(i915_debugfs_list, |
443 | I915_DEBUGFS_ENTRIES, minor); | 553 | I915_DEBUGFS_ENTRIES, minor); |
554 | drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, | ||
555 | 1, minor); | ||
444 | } | 556 | } |
445 | 557 | ||
446 | #endif /* CONFIG_DEBUG_FS */ | 558 | #endif /* CONFIG_DEBUG_FS */ |
447 | |||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e5b138be45fa..701bfeac7f57 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -807,6 +807,12 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
807 | case I915_PARAM_NUM_FENCES_AVAIL: | 807 | case I915_PARAM_NUM_FENCES_AVAIL: |
808 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | 808 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; |
809 | break; | 809 | break; |
810 | case I915_PARAM_HAS_OVERLAY: | ||
811 | value = dev_priv->overlay ? 1 : 0; | ||
812 | break; | ||
813 | case I915_PARAM_HAS_PAGEFLIPPING: | ||
814 | value = 1; | ||
815 | break; | ||
810 | default: | 816 | default: |
811 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 817 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
812 | param->param); | 818 | param->param); |
@@ -962,7 +968,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | |||
962 | * Some of the preallocated space is taken by the GTT | 968 | * Some of the preallocated space is taken by the GTT |
963 | * and popup. GTT is 1K per MB of aperture size, and popup is 4K. | 969 | * and popup. GTT is 1K per MB of aperture size, and popup is 4K. |
964 | */ | 970 | */ |
965 | if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) | 971 | if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) |
966 | overhead = 4096; | 972 | overhead = 4096; |
967 | else | 973 | else |
968 | overhead = (*aperture_size / 1024) + 4096; | 974 | overhead = (*aperture_size / 1024) + 4096; |
@@ -1048,7 +1054,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, | |||
1048 | int gtt_offset, gtt_size; | 1054 | int gtt_offset, gtt_size; |
1049 | 1055 | ||
1050 | if (IS_I965G(dev)) { | 1056 | if (IS_I965G(dev)) { |
1051 | if (IS_G4X(dev) || IS_IGDNG(dev)) { | 1057 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { |
1052 | gtt_offset = 2*1024*1024; | 1058 | gtt_offset = 2*1024*1024; |
1053 | gtt_size = 2*1024*1024; | 1059 | gtt_size = 2*1024*1024; |
1054 | } else { | 1060 | } else { |
@@ -1070,7 +1076,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, | |||
1070 | 1076 | ||
1071 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | 1077 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); |
1072 | 1078 | ||
1073 | DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); | 1079 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); |
1074 | 1080 | ||
1075 | /* Mask out these reserved bits on this hardware. */ | 1081 | /* Mask out these reserved bits on this hardware. */ |
1076 | if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || | 1082 | if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || |
@@ -1096,7 +1102,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, | |||
1096 | phys =(entry & PTE_ADDRESS_MASK) | | 1102 | phys =(entry & PTE_ADDRESS_MASK) | |
1097 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | 1103 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); |
1098 | 1104 | ||
1099 | DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); | 1105 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); |
1100 | 1106 | ||
1101 | return phys; | 1107 | return phys; |
1102 | } | 1108 | } |
@@ -1306,7 +1312,7 @@ static void i915_get_mem_freq(struct drm_device *dev) | |||
1306 | drm_i915_private_t *dev_priv = dev->dev_private; | 1312 | drm_i915_private_t *dev_priv = dev->dev_private; |
1307 | u32 tmp; | 1313 | u32 tmp; |
1308 | 1314 | ||
1309 | if (!IS_IGD(dev)) | 1315 | if (!IS_PINEVIEW(dev)) |
1310 | return; | 1316 | return; |
1311 | 1317 | ||
1312 | tmp = I915_READ(CLKCFG); | 1318 | tmp = I915_READ(CLKCFG); |
@@ -1413,7 +1419,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1413 | if (ret) | 1419 | if (ret) |
1414 | goto out_iomapfree; | 1420 | goto out_iomapfree; |
1415 | 1421 | ||
1416 | dev_priv->wq = create_workqueue("i915"); | 1422 | dev_priv->wq = create_singlethread_workqueue("i915"); |
1417 | if (dev_priv->wq == NULL) { | 1423 | if (dev_priv->wq == NULL) { |
1418 | DRM_ERROR("Failed to create our workqueue.\n"); | 1424 | DRM_ERROR("Failed to create our workqueue.\n"); |
1419 | ret = -ENOMEM; | 1425 | ret = -ENOMEM; |
@@ -1434,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1434 | 1440 | ||
1435 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 1441 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
1436 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 1442 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
1437 | if (IS_G4X(dev) || IS_IGDNG(dev)) { | 1443 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { |
1438 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | 1444 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
1439 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 1445 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
1440 | } | 1446 | } |
@@ -1489,9 +1495,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1489 | } | 1495 | } |
1490 | 1496 | ||
1491 | /* Must be done after probing outputs */ | 1497 | /* Must be done after probing outputs */ |
1492 | /* FIXME: verify on IGDNG */ | 1498 | intel_opregion_init(dev, 0); |
1493 | if (!IS_IGDNG(dev)) | ||
1494 | intel_opregion_init(dev, 0); | ||
1495 | 1499 | ||
1496 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | 1500 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
1497 | (unsigned long) dev); | 1501 | (unsigned long) dev); |
@@ -1525,6 +1529,15 @@ int i915_driver_unload(struct drm_device *dev) | |||
1525 | } | 1529 | } |
1526 | 1530 | ||
1527 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1531 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1532 | /* | ||
1533 | * free the memory space allocated for the child device | ||
1534 | * config parsed from VBT | ||
1535 | */ | ||
1536 | if (dev_priv->child_dev && dev_priv->child_dev_num) { | ||
1537 | kfree(dev_priv->child_dev); | ||
1538 | dev_priv->child_dev = NULL; | ||
1539 | dev_priv->child_dev_num = 0; | ||
1540 | } | ||
1528 | drm_irq_uninstall(dev); | 1541 | drm_irq_uninstall(dev); |
1529 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 1542 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1530 | } | 1543 | } |
@@ -1535,8 +1548,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
1535 | if (dev_priv->regs != NULL) | 1548 | if (dev_priv->regs != NULL) |
1536 | iounmap(dev_priv->regs); | 1549 | iounmap(dev_priv->regs); |
1537 | 1550 | ||
1538 | if (!IS_IGDNG(dev)) | 1551 | intel_opregion_free(dev, 0); |
1539 | intel_opregion_free(dev, 0); | ||
1540 | 1552 | ||
1541 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1553 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1542 | intel_modeset_cleanup(dev); | 1554 | intel_modeset_cleanup(dev); |
@@ -1548,6 +1560,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1548 | mutex_unlock(&dev->struct_mutex); | 1560 | mutex_unlock(&dev->struct_mutex); |
1549 | drm_mm_takedown(&dev_priv->vram); | 1561 | drm_mm_takedown(&dev_priv->vram); |
1550 | i915_gem_lastclose(dev); | 1562 | i915_gem_lastclose(dev); |
1563 | |||
1564 | intel_cleanup_overlay(dev); | ||
1551 | } | 1565 | } |
1552 | 1566 | ||
1553 | pci_dev_put(dev_priv->bridge_dev); | 1567 | pci_dev_put(dev_priv->bridge_dev); |
@@ -1656,6 +1670,8 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1656 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), | 1670 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), |
1657 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | 1671 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), |
1658 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), | 1672 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), |
1673 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
1674 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
1659 | }; | 1675 | }; |
1660 | 1676 | ||
1661 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 1677 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a725f6591192..df5b943fccda 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -170,6 +170,8 @@ struct drm_i915_display_funcs { | |||
170 | /* clock gating init */ | 170 | /* clock gating init */ |
171 | }; | 171 | }; |
172 | 172 | ||
173 | struct intel_overlay; | ||
174 | |||
173 | typedef struct drm_i915_private { | 175 | typedef struct drm_i915_private { |
174 | struct drm_device *dev; | 176 | struct drm_device *dev; |
175 | 177 | ||
@@ -187,6 +189,7 @@ typedef struct drm_i915_private { | |||
187 | unsigned int status_gfx_addr; | 189 | unsigned int status_gfx_addr; |
188 | drm_local_map_t hws_map; | 190 | drm_local_map_t hws_map; |
189 | struct drm_gem_object *hws_obj; | 191 | struct drm_gem_object *hws_obj; |
192 | struct drm_gem_object *pwrctx; | ||
190 | 193 | ||
191 | struct resource mch_res; | 194 | struct resource mch_res; |
192 | 195 | ||
@@ -206,11 +209,13 @@ typedef struct drm_i915_private { | |||
206 | /** Cached value of IMR to avoid reads in updating the bitfield */ | 209 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
207 | u32 irq_mask_reg; | 210 | u32 irq_mask_reg; |
208 | u32 pipestat[2]; | 211 | u32 pipestat[2]; |
209 | /** splitted irq regs for graphics and display engine on IGDNG, | 212 | /** splitted irq regs for graphics and display engine on Ironlake, |
210 | irq_mask_reg is still used for display irq. */ | 213 | irq_mask_reg is still used for display irq. */ |
211 | u32 gt_irq_mask_reg; | 214 | u32 gt_irq_mask_reg; |
212 | u32 gt_irq_enable_reg; | 215 | u32 gt_irq_enable_reg; |
213 | u32 de_irq_enable_reg; | 216 | u32 de_irq_enable_reg; |
217 | u32 pch_irq_mask_reg; | ||
218 | u32 pch_irq_enable_reg; | ||
214 | 219 | ||
215 | u32 hotplug_supported_mask; | 220 | u32 hotplug_supported_mask; |
216 | struct work_struct hotplug_work; | 221 | struct work_struct hotplug_work; |
@@ -240,6 +245,9 @@ typedef struct drm_i915_private { | |||
240 | 245 | ||
241 | struct intel_opregion opregion; | 246 | struct intel_opregion opregion; |
242 | 247 | ||
248 | /* overlay */ | ||
249 | struct intel_overlay *overlay; | ||
250 | |||
243 | /* LVDS info */ | 251 | /* LVDS info */ |
244 | int backlight_duty_cycle; /* restore backlight to this value */ | 252 | int backlight_duty_cycle; /* restore backlight to this value */ |
245 | bool panel_wants_dither; | 253 | bool panel_wants_dither; |
@@ -258,7 +266,7 @@ typedef struct drm_i915_private { | |||
258 | 266 | ||
259 | struct notifier_block lid_notifier; | 267 | struct notifier_block lid_notifier; |
260 | 268 | ||
261 | int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ | 269 | int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */ |
262 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 270 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
263 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 271 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
264 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | 272 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
@@ -280,6 +288,7 @@ typedef struct drm_i915_private { | |||
280 | u32 saveDSPBCNTR; | 288 | u32 saveDSPBCNTR; |
281 | u32 saveDSPARB; | 289 | u32 saveDSPARB; |
282 | u32 saveRENDERSTANDBY; | 290 | u32 saveRENDERSTANDBY; |
291 | u32 savePWRCTXA; | ||
283 | u32 saveHWS; | 292 | u32 saveHWS; |
284 | u32 savePIPEACONF; | 293 | u32 savePIPEACONF; |
285 | u32 savePIPEBCONF; | 294 | u32 savePIPEBCONF; |
@@ -539,13 +548,21 @@ typedef struct drm_i915_private { | |||
539 | /* indicate whether the LVDS_BORDER should be enabled or not */ | 548 | /* indicate whether the LVDS_BORDER should be enabled or not */ |
540 | unsigned int lvds_border_bits; | 549 | unsigned int lvds_border_bits; |
541 | 550 | ||
551 | struct drm_crtc *plane_to_crtc_mapping[2]; | ||
552 | struct drm_crtc *pipe_to_crtc_mapping[2]; | ||
553 | wait_queue_head_t pending_flip_queue; | ||
554 | |||
542 | /* Reclocking support */ | 555 | /* Reclocking support */ |
543 | bool render_reclock_avail; | 556 | bool render_reclock_avail; |
544 | bool lvds_downclock_avail; | 557 | bool lvds_downclock_avail; |
558 | /* indicates the reduced downclock for LVDS*/ | ||
559 | int lvds_downclock; | ||
545 | struct work_struct idle_work; | 560 | struct work_struct idle_work; |
546 | struct timer_list idle_timer; | 561 | struct timer_list idle_timer; |
547 | bool busy; | 562 | bool busy; |
548 | u16 orig_clock; | 563 | u16 orig_clock; |
564 | int child_dev_num; | ||
565 | struct child_device_config *child_dev; | ||
549 | } drm_i915_private_t; | 566 | } drm_i915_private_t; |
550 | 567 | ||
551 | /** driver private structure attached to each drm_gem_object */ | 568 | /** driver private structure attached to each drm_gem_object */ |
@@ -638,6 +655,13 @@ struct drm_i915_gem_object { | |||
638 | * Advice: are the backing pages purgeable? | 655 | * Advice: are the backing pages purgeable? |
639 | */ | 656 | */ |
640 | int madv; | 657 | int madv; |
658 | |||
659 | /** | ||
660 | * Number of crtcs where this object is currently the fb, but | ||
661 | * will be page flipped away on the next vblank. When it | ||
662 | * reaches 0, dev_priv->pending_flip_queue will be woken up. | ||
663 | */ | ||
664 | atomic_t pending_flip; | ||
641 | }; | 665 | }; |
642 | 666 | ||
643 | /** | 667 | /** |
@@ -738,6 +762,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | |||
738 | void | 762 | void |
739 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 763 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
740 | 764 | ||
765 | void intel_enable_asle (struct drm_device *dev); | ||
766 | |||
741 | 767 | ||
742 | /* i915_mem.c */ | 768 | /* i915_mem.c */ |
743 | extern int i915_mem_alloc(struct drm_device *dev, void *data, | 769 | extern int i915_mem_alloc(struct drm_device *dev, void *data, |
@@ -813,6 +839,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | |||
813 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 839 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
814 | unsigned long end); | 840 | unsigned long end); |
815 | int i915_gem_idle(struct drm_device *dev); | 841 | int i915_gem_idle(struct drm_device *dev); |
842 | uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | ||
843 | uint32_t flush_domains); | ||
844 | int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); | ||
816 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 845 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
817 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 846 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
818 | int write); | 847 | int write); |
@@ -824,6 +853,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); | |||
824 | int i915_gem_object_get_pages(struct drm_gem_object *obj); | 853 | int i915_gem_object_get_pages(struct drm_gem_object *obj); |
825 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 854 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
826 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 855 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
856 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | ||
827 | 857 | ||
828 | void i915_gem_shrinker_init(void); | 858 | void i915_gem_shrinker_init(void); |
829 | void i915_gem_shrinker_exit(void); | 859 | void i915_gem_shrinker_exit(void); |
@@ -863,11 +893,13 @@ extern int i915_restore_state(struct drm_device *dev); | |||
863 | extern int intel_opregion_init(struct drm_device *dev, int resume); | 893 | extern int intel_opregion_init(struct drm_device *dev, int resume); |
864 | extern void intel_opregion_free(struct drm_device *dev, int suspend); | 894 | extern void intel_opregion_free(struct drm_device *dev, int suspend); |
865 | extern void opregion_asle_intr(struct drm_device *dev); | 895 | extern void opregion_asle_intr(struct drm_device *dev); |
896 | extern void ironlake_opregion_gse_intr(struct drm_device *dev); | ||
866 | extern void opregion_enable_asle(struct drm_device *dev); | 897 | extern void opregion_enable_asle(struct drm_device *dev); |
867 | #else | 898 | #else |
868 | static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } | 899 | static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } |
869 | static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } | 900 | static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } |
870 | static inline void opregion_asle_intr(struct drm_device *dev) { return; } | 901 | static inline void opregion_asle_intr(struct drm_device *dev) { return; } |
902 | static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; } | ||
871 | static inline void opregion_enable_asle(struct drm_device *dev) { return; } | 903 | static inline void opregion_enable_asle(struct drm_device *dev) { return; } |
872 | #endif | 904 | #endif |
873 | 905 | ||
@@ -955,8 +987,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
955 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 987 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
956 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 988 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
957 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 989 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) |
958 | #define IS_I855(dev) ((dev)->pci_device == 0x3582) | ||
959 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 990 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
991 | #define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) | ||
960 | 992 | ||
961 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) | 993 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) |
962 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 994 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
@@ -990,47 +1022,51 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
990 | (dev)->pci_device == 0x2E42 || \ | 1022 | (dev)->pci_device == 0x2E42 || \ |
991 | IS_GM45(dev)) | 1023 | IS_GM45(dev)) |
992 | 1024 | ||
993 | #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) | 1025 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
994 | #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) | 1026 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
995 | #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) | 1027 | #define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev)) |
996 | 1028 | ||
997 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | 1029 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ |
998 | (dev)->pci_device == 0x29B2 || \ | 1030 | (dev)->pci_device == 0x29B2 || \ |
999 | (dev)->pci_device == 0x29D2 || \ | 1031 | (dev)->pci_device == 0x29D2 || \ |
1000 | (IS_IGD(dev))) | 1032 | (IS_PINEVIEW(dev))) |
1001 | 1033 | ||
1002 | #define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042) | 1034 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
1003 | #define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046) | 1035 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1004 | #define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev)) | 1036 | #define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) |
1005 | 1037 | ||
1006 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ | 1038 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ |
1007 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ | 1039 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ |
1008 | IS_IGDNG(dev)) | 1040 | IS_IRONLAKE(dev)) |
1009 | 1041 | ||
1010 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ | 1042 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ |
1011 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ | 1043 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ |
1012 | IS_IGD(dev) || IS_IGDNG_M(dev)) | 1044 | IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev)) |
1013 | 1045 | ||
1014 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ | 1046 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ |
1015 | IS_IGDNG(dev)) | 1047 | IS_IRONLAKE(dev)) |
1016 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1048 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
1017 | * rows, which changed the alignment requirements and fence programming. | 1049 | * rows, which changed the alignment requirements and fence programming. |
1018 | */ | 1050 | */ |
1019 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ | 1051 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ |
1020 | IS_I915GM(dev))) | 1052 | IS_I915GM(dev))) |
1021 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 1053 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev)) |
1022 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 1054 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1023 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) | 1055 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1056 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | ||
1057 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | ||
1058 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) | ||
1024 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) | 1059 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) |
1025 | /* dsparb controlled by hw only */ | 1060 | /* dsparb controlled by hw only */ |
1026 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 1061 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1027 | 1062 | ||
1028 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) | 1063 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) |
1029 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 1064 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1030 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ | 1065 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ |
1031 | (IS_I9XX(dev) || IS_GM45(dev)) && \ | 1066 | (IS_I9XX(dev) || IS_GM45(dev)) && \ |
1032 | !IS_IGD(dev) && \ | 1067 | !IS_PINEVIEW(dev) && \ |
1033 | !IS_IGDNG(dev)) | 1068 | !IS_IRONLAKE(dev)) |
1069 | #define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev)) | ||
1034 | 1070 | ||
1035 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1071 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1036 | 1072 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index abfc27b0c2ea..5b46623d62d4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1583,7 +1583,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1583 | * | 1583 | * |
1584 | * Returned sequence numbers are nonzero on success. | 1584 | * Returned sequence numbers are nonzero on success. |
1585 | */ | 1585 | */ |
1586 | static uint32_t | 1586 | uint32_t |
1587 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | 1587 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, |
1588 | uint32_t flush_domains) | 1588 | uint32_t flush_domains) |
1589 | { | 1589 | { |
@@ -1617,7 +1617,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1617 | OUT_RING(MI_USER_INTERRUPT); | 1617 | OUT_RING(MI_USER_INTERRUPT); |
1618 | ADVANCE_LP_RING(); | 1618 | ADVANCE_LP_RING(); |
1619 | 1619 | ||
1620 | DRM_DEBUG("%d\n", seqno); | 1620 | DRM_DEBUG_DRIVER("%d\n", seqno); |
1621 | 1621 | ||
1622 | request->seqno = seqno; | 1622 | request->seqno = seqno; |
1623 | request->emitted_jiffies = jiffies; | 1623 | request->emitted_jiffies = jiffies; |
@@ -1820,12 +1820,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1820 | mutex_unlock(&dev->struct_mutex); | 1820 | mutex_unlock(&dev->struct_mutex); |
1821 | } | 1821 | } |
1822 | 1822 | ||
1823 | /** | 1823 | int |
1824 | * Waits for a sequence number to be signaled, and cleans up the | 1824 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) |
1825 | * request and object lists appropriately for that event. | ||
1826 | */ | ||
1827 | static int | ||
1828 | i915_wait_request(struct drm_device *dev, uint32_t seqno) | ||
1829 | { | 1825 | { |
1830 | drm_i915_private_t *dev_priv = dev->dev_private; | 1826 | drm_i915_private_t *dev_priv = dev->dev_private; |
1831 | u32 ier; | 1827 | u32 ier; |
@@ -1837,7 +1833,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1837 | return -EIO; | 1833 | return -EIO; |
1838 | 1834 | ||
1839 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { | 1835 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { |
1840 | if (IS_IGDNG(dev)) | 1836 | if (IS_IRONLAKE(dev)) |
1841 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 1837 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
1842 | else | 1838 | else |
1843 | ier = I915_READ(IER); | 1839 | ier = I915_READ(IER); |
@@ -1852,10 +1848,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1852 | 1848 | ||
1853 | dev_priv->mm.waiting_gem_seqno = seqno; | 1849 | dev_priv->mm.waiting_gem_seqno = seqno; |
1854 | i915_user_irq_get(dev); | 1850 | i915_user_irq_get(dev); |
1855 | ret = wait_event_interruptible(dev_priv->irq_queue, | 1851 | if (interruptible) |
1856 | i915_seqno_passed(i915_get_gem_seqno(dev), | 1852 | ret = wait_event_interruptible(dev_priv->irq_queue, |
1857 | seqno) || | 1853 | i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || |
1858 | atomic_read(&dev_priv->mm.wedged)); | 1854 | atomic_read(&dev_priv->mm.wedged)); |
1855 | else | ||
1856 | wait_event(dev_priv->irq_queue, | ||
1857 | i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || | ||
1858 | atomic_read(&dev_priv->mm.wedged)); | ||
1859 | |||
1859 | i915_user_irq_put(dev); | 1860 | i915_user_irq_put(dev); |
1860 | dev_priv->mm.waiting_gem_seqno = 0; | 1861 | dev_priv->mm.waiting_gem_seqno = 0; |
1861 | 1862 | ||
@@ -1879,6 +1880,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1879 | return ret; | 1880 | return ret; |
1880 | } | 1881 | } |
1881 | 1882 | ||
1883 | /** | ||
1884 | * Waits for a sequence number to be signaled, and cleans up the | ||
1885 | * request and object lists appropriately for that event. | ||
1886 | */ | ||
1887 | static int | ||
1888 | i915_wait_request(struct drm_device *dev, uint32_t seqno) | ||
1889 | { | ||
1890 | return i915_do_wait_request(dev, seqno, 1); | ||
1891 | } | ||
1892 | |||
1882 | static void | 1893 | static void |
1883 | i915_gem_flush(struct drm_device *dev, | 1894 | i915_gem_flush(struct drm_device *dev, |
1884 | uint32_t invalidate_domains, | 1895 | uint32_t invalidate_domains, |
@@ -1947,7 +1958,7 @@ i915_gem_flush(struct drm_device *dev, | |||
1947 | #endif | 1958 | #endif |
1948 | BEGIN_LP_RING(2); | 1959 | BEGIN_LP_RING(2); |
1949 | OUT_RING(cmd); | 1960 | OUT_RING(cmd); |
1950 | OUT_RING(0); /* noop */ | 1961 | OUT_RING(MI_NOOP); |
1951 | ADVANCE_LP_RING(); | 1962 | ADVANCE_LP_RING(); |
1952 | } | 1963 | } |
1953 | } | 1964 | } |
@@ -2760,6 +2771,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2760 | old_write_domain); | 2771 | old_write_domain); |
2761 | } | 2772 | } |
2762 | 2773 | ||
2774 | void | ||
2775 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | ||
2776 | { | ||
2777 | switch (obj->write_domain) { | ||
2778 | case I915_GEM_DOMAIN_GTT: | ||
2779 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2780 | break; | ||
2781 | case I915_GEM_DOMAIN_CPU: | ||
2782 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2783 | break; | ||
2784 | default: | ||
2785 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2786 | break; | ||
2787 | } | ||
2788 | } | ||
2789 | |||
2763 | /** | 2790 | /** |
2764 | * Moves a single object to the GTT read, and possibly write domain. | 2791 | * Moves a single object to the GTT read, and possibly write domain. |
2765 | * | 2792 | * |
@@ -3525,6 +3552,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | |||
3525 | return 0; | 3552 | return 0; |
3526 | } | 3553 | } |
3527 | 3554 | ||
3555 | static int | ||
3556 | i915_gem_wait_for_pending_flip(struct drm_device *dev, | ||
3557 | struct drm_gem_object **object_list, | ||
3558 | int count) | ||
3559 | { | ||
3560 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3561 | struct drm_i915_gem_object *obj_priv; | ||
3562 | DEFINE_WAIT(wait); | ||
3563 | int i, ret = 0; | ||
3564 | |||
3565 | for (;;) { | ||
3566 | prepare_to_wait(&dev_priv->pending_flip_queue, | ||
3567 | &wait, TASK_INTERRUPTIBLE); | ||
3568 | for (i = 0; i < count; i++) { | ||
3569 | obj_priv = object_list[i]->driver_private; | ||
3570 | if (atomic_read(&obj_priv->pending_flip) > 0) | ||
3571 | break; | ||
3572 | } | ||
3573 | if (i == count) | ||
3574 | break; | ||
3575 | |||
3576 | if (!signal_pending(current)) { | ||
3577 | mutex_unlock(&dev->struct_mutex); | ||
3578 | schedule(); | ||
3579 | mutex_lock(&dev->struct_mutex); | ||
3580 | continue; | ||
3581 | } | ||
3582 | ret = -ERESTARTSYS; | ||
3583 | break; | ||
3584 | } | ||
3585 | finish_wait(&dev_priv->pending_flip_queue, &wait); | ||
3586 | |||
3587 | return ret; | ||
3588 | } | ||
3589 | |||
3528 | int | 3590 | int |
3529 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3591 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
3530 | struct drm_file *file_priv) | 3592 | struct drm_file *file_priv) |
@@ -3540,7 +3602,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3540 | int ret, ret2, i, pinned = 0; | 3602 | int ret, ret2, i, pinned = 0; |
3541 | uint64_t exec_offset; | 3603 | uint64_t exec_offset; |
3542 | uint32_t seqno, flush_domains, reloc_index; | 3604 | uint32_t seqno, flush_domains, reloc_index; |
3543 | int pin_tries; | 3605 | int pin_tries, flips; |
3544 | 3606 | ||
3545 | #if WATCH_EXEC | 3607 | #if WATCH_EXEC |
3546 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 3608 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", |
@@ -3552,8 +3614,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3552 | return -EINVAL; | 3614 | return -EINVAL; |
3553 | } | 3615 | } |
3554 | /* Copy in the exec list from userland */ | 3616 | /* Copy in the exec list from userland */ |
3555 | exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); | 3617 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); |
3556 | object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); | 3618 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); |
3557 | if (exec_list == NULL || object_list == NULL) { | 3619 | if (exec_list == NULL || object_list == NULL) { |
3558 | DRM_ERROR("Failed to allocate exec or object list " | 3620 | DRM_ERROR("Failed to allocate exec or object list " |
3559 | "for %d buffers\n", | 3621 | "for %d buffers\n", |
@@ -3598,20 +3660,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3598 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3660 | i915_verify_inactive(dev, __FILE__, __LINE__); |
3599 | 3661 | ||
3600 | if (atomic_read(&dev_priv->mm.wedged)) { | 3662 | if (atomic_read(&dev_priv->mm.wedged)) { |
3601 | DRM_ERROR("Execbuf while wedged\n"); | ||
3602 | mutex_unlock(&dev->struct_mutex); | 3663 | mutex_unlock(&dev->struct_mutex); |
3603 | ret = -EIO; | 3664 | ret = -EIO; |
3604 | goto pre_mutex_err; | 3665 | goto pre_mutex_err; |
3605 | } | 3666 | } |
3606 | 3667 | ||
3607 | if (dev_priv->mm.suspended) { | 3668 | if (dev_priv->mm.suspended) { |
3608 | DRM_ERROR("Execbuf while VT-switched.\n"); | ||
3609 | mutex_unlock(&dev->struct_mutex); | 3669 | mutex_unlock(&dev->struct_mutex); |
3610 | ret = -EBUSY; | 3670 | ret = -EBUSY; |
3611 | goto pre_mutex_err; | 3671 | goto pre_mutex_err; |
3612 | } | 3672 | } |
3613 | 3673 | ||
3614 | /* Look up object handles */ | 3674 | /* Look up object handles */ |
3675 | flips = 0; | ||
3615 | for (i = 0; i < args->buffer_count; i++) { | 3676 | for (i = 0; i < args->buffer_count; i++) { |
3616 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 3677 | object_list[i] = drm_gem_object_lookup(dev, file_priv, |
3617 | exec_list[i].handle); | 3678 | exec_list[i].handle); |
@@ -3630,6 +3691,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3630 | goto err; | 3691 | goto err; |
3631 | } | 3692 | } |
3632 | obj_priv->in_execbuffer = true; | 3693 | obj_priv->in_execbuffer = true; |
3694 | flips += atomic_read(&obj_priv->pending_flip); | ||
3695 | } | ||
3696 | |||
3697 | if (flips > 0) { | ||
3698 | ret = i915_gem_wait_for_pending_flip(dev, object_list, | ||
3699 | args->buffer_count); | ||
3700 | if (ret) | ||
3701 | goto err; | ||
3633 | } | 3702 | } |
3634 | 3703 | ||
3635 | /* Pin and relocate */ | 3704 | /* Pin and relocate */ |
@@ -4356,7 +4425,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4356 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 4425 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
4357 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | 4426 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); |
4358 | I915_READ(HWS_PGA); /* posting read */ | 4427 | I915_READ(HWS_PGA); /* posting read */ |
4359 | DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | 4428 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); |
4360 | 4429 | ||
4361 | return 0; | 4430 | return 0; |
4362 | } | 4431 | } |
@@ -4614,8 +4683,8 @@ i915_gem_load(struct drm_device *dev) | |||
4614 | for (i = 0; i < 8; i++) | 4683 | for (i = 0; i < 8; i++) |
4615 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | 4684 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); |
4616 | } | 4685 | } |
4617 | |||
4618 | i915_gem_detect_bit_6_swizzle(dev); | 4686 | i915_gem_detect_bit_6_swizzle(dev); |
4687 | init_waitqueue_head(&dev_priv->pending_flip_queue); | ||
4619 | } | 4688 | } |
4620 | 4689 | ||
4621 | /* | 4690 | /* |
@@ -4790,7 +4859,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4790 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 4859 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
4791 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | 4860 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; |
4792 | 4861 | ||
4793 | DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); | 4862 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); |
4794 | ret = copy_from_user(obj_addr, user_data, args->size); | 4863 | ret = copy_from_user(obj_addr, user_data, args->size); |
4795 | if (ret) | 4864 | if (ret) |
4796 | return -EFAULT; | 4865 | return -EFAULT; |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 200e398453ca..30d6af6c09bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
121 | 0, pcibios_align_resource, | 121 | 0, pcibios_align_resource, |
122 | dev_priv->bridge_dev); | 122 | dev_priv->bridge_dev); |
123 | if (ret) { | 123 | if (ret) { |
124 | DRM_DEBUG("failed bus alloc: %d\n", ret); | 124 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); |
125 | dev_priv->mch_res.start = 0; | 125 | dev_priv->mch_res.start = 0; |
126 | goto out; | 126 | goto out; |
127 | } | 127 | } |
@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
209 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 209 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
210 | bool need_disable; | 210 | bool need_disable; |
211 | 211 | ||
212 | if (IS_IGDNG(dev)) { | 212 | if (IS_IRONLAKE(dev)) { |
213 | /* On IGDNG whatever DRAM config, GPU always do | 213 | /* On Ironlake whatever DRAM config, GPU always do |
214 | * same swizzling setup. | 214 | * same swizzling setup. |
215 | */ | 215 | */ |
216 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | 216 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index aa7fd82aa6eb..ae17c4b45b31 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -43,10 +43,13 @@ | |||
43 | * we leave them always unmasked in IMR and then control enabling them through | 43 | * we leave them always unmasked in IMR and then control enabling them through |
44 | * PIPESTAT alone. | 44 | * PIPESTAT alone. |
45 | */ | 45 | */ |
46 | #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ | 46 | #define I915_INTERRUPT_ENABLE_FIX \ |
47 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ | 47 | (I915_ASLE_INTERRUPT | \ |
48 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ | 48 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ |
49 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 49 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ |
50 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ | ||
51 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ | ||
52 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | ||
50 | 53 | ||
51 | /** Interrupts that we mask and unmask at runtime. */ | 54 | /** Interrupts that we mask and unmask at runtime. */ |
52 | #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) | 55 | #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) |
@@ -61,7 +64,7 @@ | |||
61 | DRM_I915_VBLANK_PIPE_B) | 64 | DRM_I915_VBLANK_PIPE_B) |
62 | 65 | ||
63 | void | 66 | void |
64 | igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | 67 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) |
65 | { | 68 | { |
66 | if ((dev_priv->gt_irq_mask_reg & mask) != 0) { | 69 | if ((dev_priv->gt_irq_mask_reg & mask) != 0) { |
67 | dev_priv->gt_irq_mask_reg &= ~mask; | 70 | dev_priv->gt_irq_mask_reg &= ~mask; |
@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
71 | } | 74 | } |
72 | 75 | ||
73 | static inline void | 76 | static inline void |
74 | igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | 77 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) |
75 | { | 78 | { |
76 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { | 79 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { |
77 | dev_priv->gt_irq_mask_reg |= mask; | 80 | dev_priv->gt_irq_mask_reg |= mask; |
@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
82 | 85 | ||
83 | /* For display hotplug interrupt */ | 86 | /* For display hotplug interrupt */ |
84 | void | 87 | void |
85 | igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 88 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
86 | { | 89 | { |
87 | if ((dev_priv->irq_mask_reg & mask) != 0) { | 90 | if ((dev_priv->irq_mask_reg & mask) != 0) { |
88 | dev_priv->irq_mask_reg &= ~mask; | 91 | dev_priv->irq_mask_reg &= ~mask; |
@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
92 | } | 95 | } |
93 | 96 | ||
94 | static inline void | 97 | static inline void |
95 | igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 98 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
96 | { | 99 | { |
97 | if ((dev_priv->irq_mask_reg & mask) != mask) { | 100 | if ((dev_priv->irq_mask_reg & mask) != mask) { |
98 | dev_priv->irq_mask_reg |= mask; | 101 | dev_priv->irq_mask_reg |= mask; |
@@ -157,6 +160,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |||
157 | } | 160 | } |
158 | 161 | ||
159 | /** | 162 | /** |
163 | * intel_enable_asle - enable ASLE interrupt for OpRegion | ||
164 | */ | ||
165 | void intel_enable_asle (struct drm_device *dev) | ||
166 | { | ||
167 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
168 | |||
169 | if (IS_IRONLAKE(dev)) | ||
170 | ironlake_enable_display_irq(dev_priv, DE_GSE); | ||
171 | else | ||
172 | i915_enable_pipestat(dev_priv, 1, | ||
173 | I915_LEGACY_BLC_EVENT_ENABLE); | ||
174 | } | ||
175 | |||
176 | /** | ||
160 | * i915_pipe_enabled - check if a pipe is enabled | 177 | * i915_pipe_enabled - check if a pipe is enabled |
161 | * @dev: DRM device | 178 | * @dev: DRM device |
162 | * @pipe: pipe to check | 179 | * @pipe: pipe to check |
@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
191 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; | 208 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; |
192 | 209 | ||
193 | if (!i915_pipe_enabled(dev, pipe)) { | 210 | if (!i915_pipe_enabled(dev, pipe)) { |
194 | DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); | 211 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
212 | "pipe %d\n", pipe); | ||
195 | return 0; | 213 | return 0; |
196 | } | 214 | } |
197 | 215 | ||
@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
220 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | 238 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; |
221 | 239 | ||
222 | if (!i915_pipe_enabled(dev, pipe)) { | 240 | if (!i915_pipe_enabled(dev, pipe)) { |
223 | DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); | 241 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
242 | "pipe %d\n", pipe); | ||
224 | return 0; | 243 | return 0; |
225 | } | 244 | } |
226 | 245 | ||
@@ -250,12 +269,12 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
250 | drm_sysfs_hotplug_event(dev); | 269 | drm_sysfs_hotplug_event(dev); |
251 | } | 270 | } |
252 | 271 | ||
253 | irqreturn_t igdng_irq_handler(struct drm_device *dev) | 272 | irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
254 | { | 273 | { |
255 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 274 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
256 | int ret = IRQ_NONE; | 275 | int ret = IRQ_NONE; |
257 | u32 de_iir, gt_iir, de_ier; | 276 | u32 de_iir, gt_iir, de_ier, pch_iir; |
258 | u32 new_de_iir, new_gt_iir; | 277 | u32 new_de_iir, new_gt_iir, new_pch_iir; |
259 | struct drm_i915_master_private *master_priv; | 278 | struct drm_i915_master_private *master_priv; |
260 | 279 | ||
261 | /* disable master interrupt before clearing iir */ | 280 | /* disable master interrupt before clearing iir */ |
@@ -265,13 +284,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | |||
265 | 284 | ||
266 | de_iir = I915_READ(DEIIR); | 285 | de_iir = I915_READ(DEIIR); |
267 | gt_iir = I915_READ(GTIIR); | 286 | gt_iir = I915_READ(GTIIR); |
287 | pch_iir = I915_READ(SDEIIR); | ||
268 | 288 | ||
269 | for (;;) { | 289 | for (;;) { |
270 | if (de_iir == 0 && gt_iir == 0) | 290 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) |
271 | break; | 291 | break; |
272 | 292 | ||
273 | ret = IRQ_HANDLED; | 293 | ret = IRQ_HANDLED; |
274 | 294 | ||
295 | /* should clear PCH hotplug event before clear CPU irq */ | ||
296 | I915_WRITE(SDEIIR, pch_iir); | ||
297 | new_pch_iir = I915_READ(SDEIIR); | ||
298 | |||
275 | I915_WRITE(DEIIR, de_iir); | 299 | I915_WRITE(DEIIR, de_iir); |
276 | new_de_iir = I915_READ(DEIIR); | 300 | new_de_iir = I915_READ(DEIIR); |
277 | I915_WRITE(GTIIR, gt_iir); | 301 | I915_WRITE(GTIIR, gt_iir); |
@@ -291,8 +315,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | |||
291 | DRM_WAKEUP(&dev_priv->irq_queue); | 315 | DRM_WAKEUP(&dev_priv->irq_queue); |
292 | } | 316 | } |
293 | 317 | ||
318 | if (de_iir & DE_GSE) | ||
319 | ironlake_opregion_gse_intr(dev); | ||
320 | |||
321 | /* check event from PCH */ | ||
322 | if ((de_iir & DE_PCH_EVENT) && | ||
323 | (pch_iir & SDE_HOTPLUG_MASK)) { | ||
324 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
325 | } | ||
326 | |||
294 | de_iir = new_de_iir; | 327 | de_iir = new_de_iir; |
295 | gt_iir = new_gt_iir; | 328 | gt_iir = new_gt_iir; |
329 | pch_iir = new_pch_iir; | ||
296 | } | 330 | } |
297 | 331 | ||
298 | I915_WRITE(DEIER, de_ier); | 332 | I915_WRITE(DEIER, de_ier); |
@@ -317,19 +351,19 @@ static void i915_error_work_func(struct work_struct *work) | |||
317 | char *reset_event[] = { "RESET=1", NULL }; | 351 | char *reset_event[] = { "RESET=1", NULL }; |
318 | char *reset_done_event[] = { "ERROR=0", NULL }; | 352 | char *reset_done_event[] = { "ERROR=0", NULL }; |
319 | 353 | ||
320 | DRM_DEBUG("generating error event\n"); | 354 | DRM_DEBUG_DRIVER("generating error event\n"); |
321 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | 355 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
322 | 356 | ||
323 | if (atomic_read(&dev_priv->mm.wedged)) { | 357 | if (atomic_read(&dev_priv->mm.wedged)) { |
324 | if (IS_I965G(dev)) { | 358 | if (IS_I965G(dev)) { |
325 | DRM_DEBUG("resetting chip\n"); | 359 | DRM_DEBUG_DRIVER("resetting chip\n"); |
326 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | 360 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); |
327 | if (!i965_reset(dev, GDRST_RENDER)) { | 361 | if (!i965_reset(dev, GDRST_RENDER)) { |
328 | atomic_set(&dev_priv->mm.wedged, 0); | 362 | atomic_set(&dev_priv->mm.wedged, 0); |
329 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | 363 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); |
330 | } | 364 | } |
331 | } else { | 365 | } else { |
332 | printk("reboot required\n"); | 366 | DRM_DEBUG_DRIVER("reboot required\n"); |
333 | } | 367 | } |
334 | } | 368 | } |
335 | } | 369 | } |
@@ -355,7 +389,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
355 | 389 | ||
356 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 390 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
357 | if (!error) { | 391 | if (!error) { |
358 | DRM_DEBUG("out ot memory, not capturing error state\n"); | 392 | DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); |
359 | goto out; | 393 | goto out; |
360 | } | 394 | } |
361 | 395 | ||
@@ -535,8 +569,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
535 | 569 | ||
536 | atomic_inc(&dev_priv->irq_received); | 570 | atomic_inc(&dev_priv->irq_received); |
537 | 571 | ||
538 | if (IS_IGDNG(dev)) | 572 | if (IS_IRONLAKE(dev)) |
539 | return igdng_irq_handler(dev); | 573 | return ironlake_irq_handler(dev); |
540 | 574 | ||
541 | iir = I915_READ(IIR); | 575 | iir = I915_READ(IIR); |
542 | 576 | ||
@@ -568,14 +602,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
568 | */ | 602 | */ |
569 | if (pipea_stats & 0x8000ffff) { | 603 | if (pipea_stats & 0x8000ffff) { |
570 | if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) | 604 | if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) |
571 | DRM_DEBUG("pipe a underrun\n"); | 605 | DRM_DEBUG_DRIVER("pipe a underrun\n"); |
572 | I915_WRITE(PIPEASTAT, pipea_stats); | 606 | I915_WRITE(PIPEASTAT, pipea_stats); |
573 | irq_received = 1; | 607 | irq_received = 1; |
574 | } | 608 | } |
575 | 609 | ||
576 | if (pipeb_stats & 0x8000ffff) { | 610 | if (pipeb_stats & 0x8000ffff) { |
577 | if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) | 611 | if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) |
578 | DRM_DEBUG("pipe b underrun\n"); | 612 | DRM_DEBUG_DRIVER("pipe b underrun\n"); |
579 | I915_WRITE(PIPEBSTAT, pipeb_stats); | 613 | I915_WRITE(PIPEBSTAT, pipeb_stats); |
580 | irq_received = 1; | 614 | irq_received = 1; |
581 | } | 615 | } |
@@ -591,7 +625,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
591 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | 625 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { |
592 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 626 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
593 | 627 | ||
594 | DRM_DEBUG("hotplug event received, stat 0x%08x\n", | 628 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
595 | hotplug_status); | 629 | hotplug_status); |
596 | if (hotplug_status & dev_priv->hotplug_supported_mask) | 630 | if (hotplug_status & dev_priv->hotplug_supported_mask) |
597 | queue_work(dev_priv->wq, | 631 | queue_work(dev_priv->wq, |
@@ -599,27 +633,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
599 | 633 | ||
600 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 634 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
601 | I915_READ(PORT_HOTPLUG_STAT); | 635 | I915_READ(PORT_HOTPLUG_STAT); |
602 | |||
603 | /* EOS interrupts occurs */ | ||
604 | if (IS_IGD(dev) && | ||
605 | (hotplug_status & CRT_EOS_INT_STATUS)) { | ||
606 | u32 temp; | ||
607 | |||
608 | DRM_DEBUG("EOS interrupt occurs\n"); | ||
609 | /* status is already cleared */ | ||
610 | temp = I915_READ(ADPA); | ||
611 | temp &= ~ADPA_DAC_ENABLE; | ||
612 | I915_WRITE(ADPA, temp); | ||
613 | |||
614 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
615 | temp &= ~CRT_EOS_INT_EN; | ||
616 | I915_WRITE(PORT_HOTPLUG_EN, temp); | ||
617 | |||
618 | temp = I915_READ(PORT_HOTPLUG_STAT); | ||
619 | if (temp & CRT_EOS_INT_STATUS) | ||
620 | I915_WRITE(PORT_HOTPLUG_STAT, | ||
621 | CRT_EOS_INT_STATUS); | ||
622 | } | ||
623 | } | 636 | } |
624 | 637 | ||
625 | I915_WRITE(IIR, iir); | 638 | I915_WRITE(IIR, iir); |
@@ -641,14 +654,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
641 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 654 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
642 | } | 655 | } |
643 | 656 | ||
657 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) | ||
658 | intel_prepare_page_flip(dev, 0); | ||
659 | |||
660 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) | ||
661 | intel_prepare_page_flip(dev, 1); | ||
662 | |||
644 | if (pipea_stats & vblank_status) { | 663 | if (pipea_stats & vblank_status) { |
645 | vblank++; | 664 | vblank++; |
646 | drm_handle_vblank(dev, 0); | 665 | drm_handle_vblank(dev, 0); |
666 | intel_finish_page_flip(dev, 0); | ||
647 | } | 667 | } |
648 | 668 | ||
649 | if (pipeb_stats & vblank_status) { | 669 | if (pipeb_stats & vblank_status) { |
650 | vblank++; | 670 | vblank++; |
651 | drm_handle_vblank(dev, 1); | 671 | drm_handle_vblank(dev, 1); |
672 | intel_finish_page_flip(dev, 1); | ||
652 | } | 673 | } |
653 | 674 | ||
654 | if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 675 | if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || |
@@ -684,7 +705,7 @@ static int i915_emit_irq(struct drm_device * dev) | |||
684 | 705 | ||
685 | i915_kernel_lost_context(dev); | 706 | i915_kernel_lost_context(dev); |
686 | 707 | ||
687 | DRM_DEBUG("\n"); | 708 | DRM_DEBUG_DRIVER("\n"); |
688 | 709 | ||
689 | dev_priv->counter++; | 710 | dev_priv->counter++; |
690 | if (dev_priv->counter > 0x7FFFFFFFUL) | 711 | if (dev_priv->counter > 0x7FFFFFFFUL) |
@@ -709,8 +730,8 @@ void i915_user_irq_get(struct drm_device *dev) | |||
709 | 730 | ||
710 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 731 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
711 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { | 732 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { |
712 | if (IS_IGDNG(dev)) | 733 | if (IS_IRONLAKE(dev)) |
713 | igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 734 | ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); |
714 | else | 735 | else |
715 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 736 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
716 | } | 737 | } |
@@ -725,8 +746,8 @@ void i915_user_irq_put(struct drm_device *dev) | |||
725 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 746 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
726 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); | 747 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); |
727 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { | 748 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { |
728 | if (IS_IGDNG(dev)) | 749 | if (IS_IRONLAKE(dev)) |
729 | igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 750 | ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); |
730 | else | 751 | else |
731 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 752 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
732 | } | 753 | } |
@@ -749,7 +770,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
749 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 770 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
750 | int ret = 0; | 771 | int ret = 0; |
751 | 772 | ||
752 | DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, | 773 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
753 | READ_BREADCRUMB(dev_priv)); | 774 | READ_BREADCRUMB(dev_priv)); |
754 | 775 | ||
755 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { | 776 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
@@ -832,7 +853,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
832 | if (!(pipeconf & PIPEACONF_ENABLE)) | 853 | if (!(pipeconf & PIPEACONF_ENABLE)) |
833 | return -EINVAL; | 854 | return -EINVAL; |
834 | 855 | ||
835 | if (IS_IGDNG(dev)) | 856 | if (IS_IRONLAKE(dev)) |
836 | return 0; | 857 | return 0; |
837 | 858 | ||
838 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 859 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
@@ -854,7 +875,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
854 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 875 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
855 | unsigned long irqflags; | 876 | unsigned long irqflags; |
856 | 877 | ||
857 | if (IS_IGDNG(dev)) | 878 | if (IS_IRONLAKE(dev)) |
858 | return; | 879 | return; |
859 | 880 | ||
860 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 881 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
@@ -868,7 +889,7 @@ void i915_enable_interrupt (struct drm_device *dev) | |||
868 | { | 889 | { |
869 | struct drm_i915_private *dev_priv = dev->dev_private; | 890 | struct drm_i915_private *dev_priv = dev->dev_private; |
870 | 891 | ||
871 | if (!IS_IGDNG(dev)) | 892 | if (!IS_IRONLAKE(dev)) |
872 | opregion_enable_asle(dev); | 893 | opregion_enable_asle(dev); |
873 | dev_priv->irq_enabled = 1; | 894 | dev_priv->irq_enabled = 1; |
874 | } | 895 | } |
@@ -976,7 +997,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
976 | 997 | ||
977 | /* drm_dma.h hooks | 998 | /* drm_dma.h hooks |
978 | */ | 999 | */ |
979 | static void igdng_irq_preinstall(struct drm_device *dev) | 1000 | static void ironlake_irq_preinstall(struct drm_device *dev) |
980 | { | 1001 | { |
981 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1002 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
982 | 1003 | ||
@@ -992,14 +1013,21 @@ static void igdng_irq_preinstall(struct drm_device *dev) | |||
992 | I915_WRITE(GTIMR, 0xffffffff); | 1013 | I915_WRITE(GTIMR, 0xffffffff); |
993 | I915_WRITE(GTIER, 0x0); | 1014 | I915_WRITE(GTIER, 0x0); |
994 | (void) I915_READ(GTIER); | 1015 | (void) I915_READ(GTIER); |
1016 | |||
1017 | /* south display irq */ | ||
1018 | I915_WRITE(SDEIMR, 0xffffffff); | ||
1019 | I915_WRITE(SDEIER, 0x0); | ||
1020 | (void) I915_READ(SDEIER); | ||
995 | } | 1021 | } |
996 | 1022 | ||
997 | static int igdng_irq_postinstall(struct drm_device *dev) | 1023 | static int ironlake_irq_postinstall(struct drm_device *dev) |
998 | { | 1024 | { |
999 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1025 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1000 | /* enable kind of interrupts always enabled */ | 1026 | /* enable kind of interrupts always enabled */ |
1001 | u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */; | 1027 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; |
1002 | u32 render_mask = GT_USER_INTERRUPT; | 1028 | u32 render_mask = GT_USER_INTERRUPT; |
1029 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | ||
1030 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | ||
1003 | 1031 | ||
1004 | dev_priv->irq_mask_reg = ~display_mask; | 1032 | dev_priv->irq_mask_reg = ~display_mask; |
1005 | dev_priv->de_irq_enable_reg = display_mask; | 1033 | dev_priv->de_irq_enable_reg = display_mask; |
@@ -1019,6 +1047,14 @@ static int igdng_irq_postinstall(struct drm_device *dev) | |||
1019 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1047 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); |
1020 | (void) I915_READ(GTIER); | 1048 | (void) I915_READ(GTIER); |
1021 | 1049 | ||
1050 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; | ||
1051 | dev_priv->pch_irq_enable_reg = hotplug_mask; | ||
1052 | |||
1053 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | ||
1054 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); | ||
1055 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); | ||
1056 | (void) I915_READ(SDEIER); | ||
1057 | |||
1022 | return 0; | 1058 | return 0; |
1023 | } | 1059 | } |
1024 | 1060 | ||
@@ -1031,8 +1067,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
1031 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 1067 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
1032 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | 1068 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); |
1033 | 1069 | ||
1034 | if (IS_IGDNG(dev)) { | 1070 | if (IS_IRONLAKE(dev)) { |
1035 | igdng_irq_preinstall(dev); | 1071 | ironlake_irq_preinstall(dev); |
1036 | return; | 1072 | return; |
1037 | } | 1073 | } |
1038 | 1074 | ||
@@ -1059,8 +1095,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1059 | 1095 | ||
1060 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 1096 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1061 | 1097 | ||
1062 | if (IS_IGDNG(dev)) | 1098 | if (IS_IRONLAKE(dev)) |
1063 | return igdng_irq_postinstall(dev); | 1099 | return ironlake_irq_postinstall(dev); |
1064 | 1100 | ||
1065 | /* Unmask the interrupts that we always want on. */ | 1101 | /* Unmask the interrupts that we always want on. */ |
1066 | dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; | 1102 | dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; |
@@ -1120,7 +1156,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1120 | return 0; | 1156 | return 0; |
1121 | } | 1157 | } |
1122 | 1158 | ||
1123 | static void igdng_irq_uninstall(struct drm_device *dev) | 1159 | static void ironlake_irq_uninstall(struct drm_device *dev) |
1124 | { | 1160 | { |
1125 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1161 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1126 | I915_WRITE(HWSTAM, 0xffffffff); | 1162 | I915_WRITE(HWSTAM, 0xffffffff); |
@@ -1143,8 +1179,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev) | |||
1143 | 1179 | ||
1144 | dev_priv->vblank_pipe = 0; | 1180 | dev_priv->vblank_pipe = 0; |
1145 | 1181 | ||
1146 | if (IS_IGDNG(dev)) { | 1182 | if (IS_IRONLAKE(dev)) { |
1147 | igdng_irq_uninstall(dev); | 1183 | ironlake_irq_uninstall(dev); |
1148 | return; | 1184 | return; |
1149 | } | 1185 | } |
1150 | 1186 | ||
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 2d5193556d3f..7cc8410239cb 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -118,6 +118,10 @@ struct opregion_asle { | |||
118 | #define ASLE_BACKLIGHT_FAIL (2<<12) | 118 | #define ASLE_BACKLIGHT_FAIL (2<<12) |
119 | #define ASLE_PFIT_FAIL (2<<14) | 119 | #define ASLE_PFIT_FAIL (2<<14) |
120 | #define ASLE_PWM_FREQ_FAIL (2<<16) | 120 | #define ASLE_PWM_FREQ_FAIL (2<<16) |
121 | #define ASLE_ALS_ILLUM_FAILED (1<<10) | ||
122 | #define ASLE_BACKLIGHT_FAILED (1<<12) | ||
123 | #define ASLE_PFIT_FAILED (1<<14) | ||
124 | #define ASLE_PWM_FREQ_FAILED (1<<16) | ||
121 | 125 | ||
122 | /* ASLE backlight brightness to set */ | 126 | /* ASLE backlight brightness to set */ |
123 | #define ASLE_BCLP_VALID (1<<31) | 127 | #define ASLE_BCLP_VALID (1<<31) |
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
163 | if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) | 167 | if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) |
164 | pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); | 168 | pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); |
165 | else { | 169 | else { |
166 | if (IS_IGD(dev)) { | 170 | if (IS_PINEVIEW(dev)) { |
167 | blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | 171 | blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); |
168 | max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> | 172 | max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> |
169 | BACKLIGHT_MODULATION_FREQ_SHIFT; | 173 | BACKLIGHT_MODULATION_FREQ_SHIFT; |
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev) | |||
224 | asle_req = asle->aslc & ASLE_REQ_MSK; | 228 | asle_req = asle->aslc & ASLE_REQ_MSK; |
225 | 229 | ||
226 | if (!asle_req) { | 230 | if (!asle_req) { |
227 | DRM_DEBUG("non asle set request??\n"); | 231 | DRM_DEBUG_DRIVER("non asle set request??\n"); |
228 | return; | 232 | return; |
229 | } | 233 | } |
230 | 234 | ||
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev) | |||
243 | asle->aslc = asle_stat; | 247 | asle->aslc = asle_stat; |
244 | } | 248 | } |
245 | 249 | ||
250 | static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) | ||
251 | { | ||
252 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
253 | struct opregion_asle *asle = dev_priv->opregion.asle; | ||
254 | u32 cpu_pwm_ctl, pch_pwm_ctl2; | ||
255 | u32 max_backlight, level; | ||
256 | |||
257 | if (!(bclp & ASLE_BCLP_VALID)) | ||
258 | return ASLE_BACKLIGHT_FAILED; | ||
259 | |||
260 | bclp &= ASLE_BCLP_MSK; | ||
261 | if (bclp < 0 || bclp > 255) | ||
262 | return ASLE_BACKLIGHT_FAILED; | ||
263 | |||
264 | cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL); | ||
265 | pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); | ||
266 | /* get the max PWM frequency */ | ||
267 | max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
268 | /* calculate the expected PMW frequency */ | ||
269 | level = (bclp * max_backlight) / 255; | ||
270 | /* reserve the high 16 bits */ | ||
271 | cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK); | ||
272 | /* write the updated PWM frequency */ | ||
273 | I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level); | ||
274 | |||
275 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | void ironlake_opregion_gse_intr(struct drm_device *dev) | ||
281 | { | ||
282 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
283 | struct opregion_asle *asle = dev_priv->opregion.asle; | ||
284 | u32 asle_stat = 0; | ||
285 | u32 asle_req; | ||
286 | |||
287 | if (!asle) | ||
288 | return; | ||
289 | |||
290 | asle_req = asle->aslc & ASLE_REQ_MSK; | ||
291 | |||
292 | if (!asle_req) { | ||
293 | DRM_DEBUG_DRIVER("non asle set request??\n"); | ||
294 | return; | ||
295 | } | ||
296 | |||
297 | if (asle_req & ASLE_SET_ALS_ILLUM) { | ||
298 | DRM_DEBUG_DRIVER("Illum is not supported\n"); | ||
299 | asle_stat |= ASLE_ALS_ILLUM_FAILED; | ||
300 | } | ||
301 | |||
302 | if (asle_req & ASLE_SET_BACKLIGHT) | ||
303 | asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp); | ||
304 | |||
305 | if (asle_req & ASLE_SET_PFIT) { | ||
306 | DRM_DEBUG_DRIVER("Pfit is not supported\n"); | ||
307 | asle_stat |= ASLE_PFIT_FAILED; | ||
308 | } | ||
309 | |||
310 | if (asle_req & ASLE_SET_PWM_FREQ) { | ||
311 | DRM_DEBUG_DRIVER("PWM freq is not supported\n"); | ||
312 | asle_stat |= ASLE_PWM_FREQ_FAILED; | ||
313 | } | ||
314 | |||
315 | asle->aslc = asle_stat; | ||
316 | } | ||
246 | #define ASLE_ALS_EN (1<<0) | 317 | #define ASLE_ALS_EN (1<<0) |
247 | #define ASLE_BLC_EN (1<<1) | 318 | #define ASLE_BLC_EN (1<<1) |
248 | #define ASLE_PFIT_EN (1<<2) | 319 | #define ASLE_PFIT_EN (1<<2) |
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev) | |||
258 | unsigned long irqflags; | 329 | unsigned long irqflags; |
259 | 330 | ||
260 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 331 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
261 | i915_enable_pipestat(dev_priv, 1, | 332 | intel_enable_asle(dev); |
262 | I915_LEGACY_BLC_EVENT_ENABLE); | ||
263 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, | 333 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, |
264 | irqflags); | 334 | irqflags); |
265 | } | 335 | } |
@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume) | |||
361 | int err = 0; | 431 | int err = 0; |
362 | 432 | ||
363 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); | 433 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); |
364 | DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); | 434 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); |
365 | if (asls == 0) { | 435 | if (asls == 0) { |
366 | DRM_DEBUG("ACPI OpRegion not supported!\n"); | 436 | DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); |
367 | return -ENOTSUPP; | 437 | return -ENOTSUPP; |
368 | } | 438 | } |
369 | 439 | ||
@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume) | |||
373 | 443 | ||
374 | opregion->header = base; | 444 | opregion->header = base; |
375 | if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { | 445 | if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { |
376 | DRM_DEBUG("opregion signature mismatch\n"); | 446 | DRM_DEBUG_DRIVER("opregion signature mismatch\n"); |
377 | err = -EINVAL; | 447 | err = -EINVAL; |
378 | goto err_out; | 448 | goto err_out; |
379 | } | 449 | } |
380 | 450 | ||
381 | mboxes = opregion->header->mboxes; | 451 | mboxes = opregion->header->mboxes; |
382 | if (mboxes & MBOX_ACPI) { | 452 | if (mboxes & MBOX_ACPI) { |
383 | DRM_DEBUG("Public ACPI methods supported\n"); | 453 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
384 | opregion->acpi = base + OPREGION_ACPI_OFFSET; | 454 | opregion->acpi = base + OPREGION_ACPI_OFFSET; |
385 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 455 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
386 | intel_didl_outputs(dev); | 456 | intel_didl_outputs(dev); |
387 | } else { | 457 | } else { |
388 | DRM_DEBUG("Public ACPI methods not supported\n"); | 458 | DRM_DEBUG_DRIVER("Public ACPI methods not supported\n"); |
389 | err = -ENOTSUPP; | 459 | err = -ENOTSUPP; |
390 | goto err_out; | 460 | goto err_out; |
391 | } | 461 | } |
392 | opregion->enabled = 1; | 462 | opregion->enabled = 1; |
393 | 463 | ||
394 | if (mboxes & MBOX_SWSCI) { | 464 | if (mboxes & MBOX_SWSCI) { |
395 | DRM_DEBUG("SWSCI supported\n"); | 465 | DRM_DEBUG_DRIVER("SWSCI supported\n"); |
396 | opregion->swsci = base + OPREGION_SWSCI_OFFSET; | 466 | opregion->swsci = base + OPREGION_SWSCI_OFFSET; |
397 | } | 467 | } |
398 | if (mboxes & MBOX_ASLE) { | 468 | if (mboxes & MBOX_ASLE) { |
399 | DRM_DEBUG("ASLE supported\n"); | 469 | DRM_DEBUG_DRIVER("ASLE supported\n"); |
400 | opregion->asle = base + OPREGION_ASLE_OFFSET; | 470 | opregion->asle = base + OPREGION_ASLE_OFFSET; |
401 | opregion_enable_asle(dev); | 471 | opregion_enable_asle(dev); |
402 | } | 472 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1687edf68795..974b3cf70618 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -140,6 +140,7 @@ | |||
140 | #define MI_NOOP MI_INSTR(0, 0) | 140 | #define MI_NOOP MI_INSTR(0, 0) |
141 | #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) | 141 | #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) |
142 | #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) | 142 | #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) |
143 | #define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) | ||
143 | #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) | 144 | #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) |
144 | #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) | 145 | #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) |
145 | #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) | 146 | #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) |
@@ -151,7 +152,13 @@ | |||
151 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ | 152 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ |
152 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 153 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
153 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | 154 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) |
155 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) | ||
156 | #define MI_OVERLAY_CONTINUE (0x0<<21) | ||
157 | #define MI_OVERLAY_ON (0x1<<21) | ||
158 | #define MI_OVERLAY_OFF (0x2<<21) | ||
154 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) | 159 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) |
160 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | ||
161 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | ||
155 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 162 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
156 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 163 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
157 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) | 164 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) |
@@ -260,6 +267,8 @@ | |||
260 | #define HWS_PGA 0x02080 | 267 | #define HWS_PGA 0x02080 |
261 | #define HWS_ADDRESS_MASK 0xfffff000 | 268 | #define HWS_ADDRESS_MASK 0xfffff000 |
262 | #define HWS_START_ADDRESS_SHIFT 4 | 269 | #define HWS_START_ADDRESS_SHIFT 4 |
270 | #define PWRCTXA 0x2088 /* 965GM+ only */ | ||
271 | #define PWRCTX_EN (1<<0) | ||
263 | #define IPEIR 0x02088 | 272 | #define IPEIR 0x02088 |
264 | #define IPEHR 0x0208c | 273 | #define IPEHR 0x0208c |
265 | #define INSTDONE 0x02090 | 274 | #define INSTDONE 0x02090 |
@@ -405,6 +414,13 @@ | |||
405 | # define GPIO_DATA_VAL_IN (1 << 12) | 414 | # define GPIO_DATA_VAL_IN (1 << 12) |
406 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) | 415 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) |
407 | 416 | ||
417 | #define GMBUS0 0x5100 | ||
418 | #define GMBUS1 0x5104 | ||
419 | #define GMBUS2 0x5108 | ||
420 | #define GMBUS3 0x510c | ||
421 | #define GMBUS4 0x5110 | ||
422 | #define GMBUS5 0x5120 | ||
423 | |||
408 | /* | 424 | /* |
409 | * Clock control & power management | 425 | * Clock control & power management |
410 | */ | 426 | */ |
@@ -435,7 +451,7 @@ | |||
435 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ | 451 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ |
436 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ | 452 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ |
437 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ | 453 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ |
438 | #define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ | 454 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ |
439 | 455 | ||
440 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) | 456 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) |
441 | #define I915_CRC_ERROR_ENABLE (1UL<<29) | 457 | #define I915_CRC_ERROR_ENABLE (1UL<<29) |
@@ -512,7 +528,7 @@ | |||
512 | */ | 528 | */ |
513 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 | 529 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 |
514 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 | 530 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 |
515 | #define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 | 531 | #define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15 |
516 | /* i830, required in DVO non-gang */ | 532 | /* i830, required in DVO non-gang */ |
517 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) | 533 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) |
518 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ | 534 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ |
@@ -522,7 +538,7 @@ | |||
522 | #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) | 538 | #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) |
523 | #define PLL_REF_INPUT_MASK (3 << 13) | 539 | #define PLL_REF_INPUT_MASK (3 << 13) |
524 | #define PLL_LOAD_PULSE_PHASE_SHIFT 9 | 540 | #define PLL_LOAD_PULSE_PHASE_SHIFT 9 |
525 | /* IGDNG */ | 541 | /* Ironlake */ |
526 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 | 542 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 |
527 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) | 543 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) |
528 | # define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) | 544 | # define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) |
@@ -586,12 +602,12 @@ | |||
586 | #define FPB0 0x06048 | 602 | #define FPB0 0x06048 |
587 | #define FPB1 0x0604c | 603 | #define FPB1 0x0604c |
588 | #define FP_N_DIV_MASK 0x003f0000 | 604 | #define FP_N_DIV_MASK 0x003f0000 |
589 | #define FP_N_IGD_DIV_MASK 0x00ff0000 | 605 | #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 |
590 | #define FP_N_DIV_SHIFT 16 | 606 | #define FP_N_DIV_SHIFT 16 |
591 | #define FP_M1_DIV_MASK 0x00003f00 | 607 | #define FP_M1_DIV_MASK 0x00003f00 |
592 | #define FP_M1_DIV_SHIFT 8 | 608 | #define FP_M1_DIV_SHIFT 8 |
593 | #define FP_M2_DIV_MASK 0x0000003f | 609 | #define FP_M2_DIV_MASK 0x0000003f |
594 | #define FP_M2_IGD_DIV_MASK 0x000000ff | 610 | #define FP_M2_PINEVIEW_DIV_MASK 0x000000ff |
595 | #define FP_M2_DIV_SHIFT 0 | 611 | #define FP_M2_DIV_SHIFT 0 |
596 | #define DPLL_TEST 0x606c | 612 | #define DPLL_TEST 0x606c |
597 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) | 613 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) |
@@ -769,7 +785,8 @@ | |||
769 | 785 | ||
770 | /** GM965 GM45 render standby register */ | 786 | /** GM965 GM45 render standby register */ |
771 | #define MCHBAR_RENDER_STANDBY 0x111B8 | 787 | #define MCHBAR_RENDER_STANDBY 0x111B8 |
772 | 788 | #define RCX_SW_EXIT (1<<23) | |
789 | #define RSX_STATUS_MASK 0x00700000 | ||
773 | #define PEG_BAND_GAP_DATA 0x14d68 | 790 | #define PEG_BAND_GAP_DATA 0x14d68 |
774 | 791 | ||
775 | /* | 792 | /* |
@@ -844,7 +861,6 @@ | |||
844 | #define SDVOB_HOTPLUG_INT_EN (1 << 26) | 861 | #define SDVOB_HOTPLUG_INT_EN (1 << 26) |
845 | #define SDVOC_HOTPLUG_INT_EN (1 << 25) | 862 | #define SDVOC_HOTPLUG_INT_EN (1 << 25) |
846 | #define TV_HOTPLUG_INT_EN (1 << 18) | 863 | #define TV_HOTPLUG_INT_EN (1 << 18) |
847 | #define CRT_EOS_INT_EN (1 << 10) | ||
848 | #define CRT_HOTPLUG_INT_EN (1 << 9) | 864 | #define CRT_HOTPLUG_INT_EN (1 << 9) |
849 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) | 865 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) |
850 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) | 866 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) |
@@ -868,7 +884,6 @@ | |||
868 | HDMID_HOTPLUG_INT_EN | \ | 884 | HDMID_HOTPLUG_INT_EN | \ |
869 | SDVOB_HOTPLUG_INT_EN | \ | 885 | SDVOB_HOTPLUG_INT_EN | \ |
870 | SDVOC_HOTPLUG_INT_EN | \ | 886 | SDVOC_HOTPLUG_INT_EN | \ |
871 | TV_HOTPLUG_INT_EN | \ | ||
872 | CRT_HOTPLUG_INT_EN) | 887 | CRT_HOTPLUG_INT_EN) |
873 | 888 | ||
874 | 889 | ||
@@ -879,7 +894,6 @@ | |||
879 | #define DPC_HOTPLUG_INT_STATUS (1 << 28) | 894 | #define DPC_HOTPLUG_INT_STATUS (1 << 28) |
880 | #define HDMID_HOTPLUG_INT_STATUS (1 << 27) | 895 | #define HDMID_HOTPLUG_INT_STATUS (1 << 27) |
881 | #define DPD_HOTPLUG_INT_STATUS (1 << 27) | 896 | #define DPD_HOTPLUG_INT_STATUS (1 << 27) |
882 | #define CRT_EOS_INT_STATUS (1 << 12) | ||
883 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) | 897 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) |
884 | #define TV_HOTPLUG_INT_STATUS (1 << 10) | 898 | #define TV_HOTPLUG_INT_STATUS (1 << 10) |
885 | #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) | 899 | #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) |
@@ -1620,7 +1634,7 @@ | |||
1620 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) | 1634 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) |
1621 | 1635 | ||
1622 | #define DP_SCRAMBLING_DISABLE (1 << 12) | 1636 | #define DP_SCRAMBLING_DISABLE (1 << 12) |
1623 | #define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) | 1637 | #define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) |
1624 | 1638 | ||
1625 | /** limit RGB values to avoid confusing TVs */ | 1639 | /** limit RGB values to avoid confusing TVs */ |
1626 | #define DP_COLOR_RANGE_16_235 (1 << 8) | 1640 | #define DP_COLOR_RANGE_16_235 (1 << 8) |
@@ -1808,7 +1822,7 @@ | |||
1808 | #define DSPFW3 0x7003c | 1822 | #define DSPFW3 0x7003c |
1809 | #define DSPFW_HPLL_SR_EN (1<<31) | 1823 | #define DSPFW_HPLL_SR_EN (1<<31) |
1810 | #define DSPFW_CURSOR_SR_SHIFT 24 | 1824 | #define DSPFW_CURSOR_SR_SHIFT 24 |
1811 | #define IGD_SELF_REFRESH_EN (1<<30) | 1825 | #define PINEVIEW_SELF_REFRESH_EN (1<<30) |
1812 | 1826 | ||
1813 | /* FIFO watermark sizes etc */ | 1827 | /* FIFO watermark sizes etc */ |
1814 | #define G4X_FIFO_LINE_SIZE 64 | 1828 | #define G4X_FIFO_LINE_SIZE 64 |
@@ -1824,16 +1838,16 @@ | |||
1824 | #define G4X_MAX_WM 0x3f | 1838 | #define G4X_MAX_WM 0x3f |
1825 | #define I915_MAX_WM 0x3f | 1839 | #define I915_MAX_WM 0x3f |
1826 | 1840 | ||
1827 | #define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ | 1841 | #define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */ |
1828 | #define IGD_FIFO_LINE_SIZE 64 | 1842 | #define PINEVIEW_FIFO_LINE_SIZE 64 |
1829 | #define IGD_MAX_WM 0x1ff | 1843 | #define PINEVIEW_MAX_WM 0x1ff |
1830 | #define IGD_DFT_WM 0x3f | 1844 | #define PINEVIEW_DFT_WM 0x3f |
1831 | #define IGD_DFT_HPLLOFF_WM 0 | 1845 | #define PINEVIEW_DFT_HPLLOFF_WM 0 |
1832 | #define IGD_GUARD_WM 10 | 1846 | #define PINEVIEW_GUARD_WM 10 |
1833 | #define IGD_CURSOR_FIFO 64 | 1847 | #define PINEVIEW_CURSOR_FIFO 64 |
1834 | #define IGD_CURSOR_MAX_WM 0x3f | 1848 | #define PINEVIEW_CURSOR_MAX_WM 0x3f |
1835 | #define IGD_CURSOR_DFT_WM 0 | 1849 | #define PINEVIEW_CURSOR_DFT_WM 0 |
1836 | #define IGD_CURSOR_GUARD_WM 5 | 1850 | #define PINEVIEW_CURSOR_GUARD_WM 5 |
1837 | 1851 | ||
1838 | /* | 1852 | /* |
1839 | * The two pipe frame counter registers are not synchronized, so | 1853 | * The two pipe frame counter registers are not synchronized, so |
@@ -1907,6 +1921,7 @@ | |||
1907 | #define DISPPLANE_16BPP (0x5<<26) | 1921 | #define DISPPLANE_16BPP (0x5<<26) |
1908 | #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) | 1922 | #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) |
1909 | #define DISPPLANE_32BPP (0x7<<26) | 1923 | #define DISPPLANE_32BPP (0x7<<26) |
1924 | #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) | ||
1910 | #define DISPPLANE_STEREO_ENABLE (1<<25) | 1925 | #define DISPPLANE_STEREO_ENABLE (1<<25) |
1911 | #define DISPPLANE_STEREO_DISABLE 0 | 1926 | #define DISPPLANE_STEREO_DISABLE 0 |
1912 | #define DISPPLANE_SEL_PIPE_MASK (1<<24) | 1927 | #define DISPPLANE_SEL_PIPE_MASK (1<<24) |
@@ -1918,7 +1933,7 @@ | |||
1918 | #define DISPPLANE_NO_LINE_DOUBLE 0 | 1933 | #define DISPPLANE_NO_LINE_DOUBLE 0 |
1919 | #define DISPPLANE_STEREO_POLARITY_FIRST 0 | 1934 | #define DISPPLANE_STEREO_POLARITY_FIRST 0 |
1920 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) | 1935 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) |
1921 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */ | 1936 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ |
1922 | #define DISPPLANE_TILED (1<<10) | 1937 | #define DISPPLANE_TILED (1<<10) |
1923 | #define DSPAADDR 0x70184 | 1938 | #define DSPAADDR 0x70184 |
1924 | #define DSPASTRIDE 0x70188 | 1939 | #define DSPASTRIDE 0x70188 |
@@ -1971,7 +1986,7 @@ | |||
1971 | # define VGA_2X_MODE (1 << 30) | 1986 | # define VGA_2X_MODE (1 << 30) |
1972 | # define VGA_PIPE_B_SELECT (1 << 29) | 1987 | # define VGA_PIPE_B_SELECT (1 << 29) |
1973 | 1988 | ||
1974 | /* IGDNG */ | 1989 | /* Ironlake */ |
1975 | 1990 | ||
1976 | #define CPU_VGACNTRL 0x41000 | 1991 | #define CPU_VGACNTRL 0x41000 |
1977 | 1992 | ||
@@ -2117,6 +2132,7 @@ | |||
2117 | #define SDE_PORTC_HOTPLUG (1 << 9) | 2132 | #define SDE_PORTC_HOTPLUG (1 << 9) |
2118 | #define SDE_PORTB_HOTPLUG (1 << 8) | 2133 | #define SDE_PORTB_HOTPLUG (1 << 8) |
2119 | #define SDE_SDVOB_HOTPLUG (1 << 6) | 2134 | #define SDE_SDVOB_HOTPLUG (1 << 6) |
2135 | #define SDE_HOTPLUG_MASK (0xf << 8) | ||
2120 | 2136 | ||
2121 | #define SDEISR 0xc4000 | 2137 | #define SDEISR 0xc4000 |
2122 | #define SDEIMR 0xc4004 | 2138 | #define SDEIMR 0xc4004 |
@@ -2157,6 +2173,13 @@ | |||
2157 | #define PCH_GPIOE 0xc5020 | 2173 | #define PCH_GPIOE 0xc5020 |
2158 | #define PCH_GPIOF 0xc5024 | 2174 | #define PCH_GPIOF 0xc5024 |
2159 | 2175 | ||
2176 | #define PCH_GMBUS0 0xc5100 | ||
2177 | #define PCH_GMBUS1 0xc5104 | ||
2178 | #define PCH_GMBUS2 0xc5108 | ||
2179 | #define PCH_GMBUS3 0xc510c | ||
2180 | #define PCH_GMBUS4 0xc5110 | ||
2181 | #define PCH_GMBUS5 0xc5120 | ||
2182 | |||
2160 | #define PCH_DPLL_A 0xc6014 | 2183 | #define PCH_DPLL_A 0xc6014 |
2161 | #define PCH_DPLL_B 0xc6018 | 2184 | #define PCH_DPLL_B 0xc6018 |
2162 | 2185 | ||
@@ -2292,7 +2315,7 @@ | |||
2292 | #define FDI_DP_PORT_WIDTH_X3 (2<<19) | 2315 | #define FDI_DP_PORT_WIDTH_X3 (2<<19) |
2293 | #define FDI_DP_PORT_WIDTH_X4 (3<<19) | 2316 | #define FDI_DP_PORT_WIDTH_X4 (3<<19) |
2294 | #define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) | 2317 | #define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) |
2295 | /* IGDNG: hardwired to 1 */ | 2318 | /* Ironlake: hardwired to 1 */ |
2296 | #define FDI_TX_PLL_ENABLE (1<<14) | 2319 | #define FDI_TX_PLL_ENABLE (1<<14) |
2297 | /* both Tx and Rx */ | 2320 | /* both Tx and Rx */ |
2298 | #define FDI_SCRAMBLING_ENABLE (0<<7) | 2321 | #define FDI_SCRAMBLING_ENABLE (0<<7) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 6eec8171a44e..ab35e81b7cbf 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -27,14 +27,14 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "i915_drm.h" | 29 | #include "i915_drm.h" |
30 | #include "i915_drv.h" | 30 | #include "intel_drv.h" |
31 | 31 | ||
32 | static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | 32 | static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) |
33 | { | 33 | { |
34 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
35 | u32 dpll_reg; | 35 | u32 dpll_reg; |
36 | 36 | ||
37 | if (IS_IGDNG(dev)) { | 37 | if (IS_IRONLAKE(dev)) { |
38 | dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; | 38 | dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; |
39 | } else { | 39 | } else { |
40 | dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; | 40 | dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; |
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
53 | if (!i915_pipe_enabled(dev, pipe)) | 53 | if (!i915_pipe_enabled(dev, pipe)) |
54 | return; | 54 | return; |
55 | 55 | ||
56 | if (IS_IGDNG(dev)) | 56 | if (IS_IRONLAKE(dev)) |
57 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 57 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; |
58 | 58 | ||
59 | if (pipe == PIPE_A) | 59 | if (pipe == PIPE_A) |
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | |||
75 | if (!i915_pipe_enabled(dev, pipe)) | 75 | if (!i915_pipe_enabled(dev, pipe)) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | if (IS_IGDNG(dev)) | 78 | if (IS_IRONLAKE(dev)) |
79 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 79 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; |
80 | 80 | ||
81 | if (pipe == PIPE_A) | 81 | if (pipe == PIPE_A) |
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
240 | return; | 240 | return; |
241 | 241 | ||
242 | if (IS_IGDNG(dev)) { | 242 | if (IS_IRONLAKE(dev)) { |
243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | 243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); |
244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | 244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); |
245 | } | 245 | } |
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
247 | /* Pipe & plane A info */ | 247 | /* Pipe & plane A info */ |
248 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 248 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); |
249 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 249 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); |
250 | if (IS_IGDNG(dev)) { | 250 | if (IS_IRONLAKE(dev)) { |
251 | dev_priv->saveFPA0 = I915_READ(PCH_FPA0); | 251 | dev_priv->saveFPA0 = I915_READ(PCH_FPA0); |
252 | dev_priv->saveFPA1 = I915_READ(PCH_FPA1); | 252 | dev_priv->saveFPA1 = I915_READ(PCH_FPA1); |
253 | dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); | 253 | dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); |
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
256 | dev_priv->saveFPA1 = I915_READ(FPA1); | 256 | dev_priv->saveFPA1 = I915_READ(FPA1); |
257 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); | 257 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); |
258 | } | 258 | } |
259 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | 259 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) |
260 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); | 260 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); |
261 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); | 261 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); |
262 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); | 262 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); |
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
264 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); | 264 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); |
265 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); | 265 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); |
266 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); | 266 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); |
267 | if (!IS_IGDNG(dev)) | 267 | if (!IS_IRONLAKE(dev)) |
268 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | 268 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); |
269 | 269 | ||
270 | if (IS_IGDNG(dev)) { | 270 | if (IS_IRONLAKE(dev)) { |
271 | dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); | 271 | dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); |
272 | dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); | 272 | dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); |
273 | dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); | 273 | dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); |
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
304 | /* Pipe & plane B info */ | 304 | /* Pipe & plane B info */ |
305 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); | 305 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); |
306 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); | 306 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); |
307 | if (IS_IGDNG(dev)) { | 307 | if (IS_IRONLAKE(dev)) { |
308 | dev_priv->saveFPB0 = I915_READ(PCH_FPB0); | 308 | dev_priv->saveFPB0 = I915_READ(PCH_FPB0); |
309 | dev_priv->saveFPB1 = I915_READ(PCH_FPB1); | 309 | dev_priv->saveFPB1 = I915_READ(PCH_FPB1); |
310 | dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); | 310 | dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); |
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
313 | dev_priv->saveFPB1 = I915_READ(FPB1); | 313 | dev_priv->saveFPB1 = I915_READ(FPB1); |
314 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); | 314 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); |
315 | } | 315 | } |
316 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | 316 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) |
317 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); | 317 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); |
318 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); | 318 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); |
319 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); | 319 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); |
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
321 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); | 321 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); |
322 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); | 322 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); |
323 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); | 323 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); |
324 | if (!IS_IGDNG(dev)) | 324 | if (!IS_IRONLAKE(dev)) |
325 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); | 325 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); |
326 | 326 | ||
327 | if (IS_IGDNG(dev)) { | 327 | if (IS_IRONLAKE(dev)) { |
328 | dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); | 328 | dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); |
329 | dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); | 329 | dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); |
330 | dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); | 330 | dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); |
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
370 | return; | 370 | return; |
371 | 371 | ||
372 | if (IS_IGDNG(dev)) { | 372 | if (IS_IRONLAKE(dev)) { |
373 | dpll_a_reg = PCH_DPLL_A; | 373 | dpll_a_reg = PCH_DPLL_A; |
374 | dpll_b_reg = PCH_DPLL_B; | 374 | dpll_b_reg = PCH_DPLL_B; |
375 | fpa0_reg = PCH_FPA0; | 375 | fpa0_reg = PCH_FPA0; |
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
385 | fpb1_reg = FPB1; | 385 | fpb1_reg = FPB1; |
386 | } | 386 | } |
387 | 387 | ||
388 | if (IS_IGDNG(dev)) { | 388 | if (IS_IRONLAKE(dev)) { |
389 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); | 389 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); |
390 | I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); | 390 | I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); |
391 | } | 391 | } |
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
402 | /* Actually enable it */ | 402 | /* Actually enable it */ |
403 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); | 403 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); |
404 | DRM_UDELAY(150); | 404 | DRM_UDELAY(150); |
405 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | 405 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) |
406 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); | 406 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); |
407 | DRM_UDELAY(150); | 407 | DRM_UDELAY(150); |
408 | 408 | ||
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
413 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); | 413 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); |
414 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); | 414 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); |
415 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); | 415 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); |
416 | if (!IS_IGDNG(dev)) | 416 | if (!IS_IRONLAKE(dev)) |
417 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | 417 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); |
418 | 418 | ||
419 | if (IS_IGDNG(dev)) { | 419 | if (IS_IRONLAKE(dev)) { |
420 | I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); | 420 | I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); |
421 | I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); | 421 | I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); |
422 | I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); | 422 | I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); |
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
467 | /* Actually enable it */ | 467 | /* Actually enable it */ |
468 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); | 468 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); |
469 | DRM_UDELAY(150); | 469 | DRM_UDELAY(150); |
470 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | 470 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) |
471 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | 471 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); |
472 | DRM_UDELAY(150); | 472 | DRM_UDELAY(150); |
473 | 473 | ||
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
478 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); | 478 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); |
479 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); | 479 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); |
480 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); | 480 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); |
481 | if (!IS_IGDNG(dev)) | 481 | if (!IS_IRONLAKE(dev)) |
482 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | 482 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); |
483 | 483 | ||
484 | if (IS_IGDNG(dev)) { | 484 | if (IS_IRONLAKE(dev)) { |
485 | I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); | 485 | I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); |
486 | I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); | 486 | I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); |
487 | I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); | 487 | I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); |
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev) | |||
546 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | 546 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); |
547 | 547 | ||
548 | /* CRT state */ | 548 | /* CRT state */ |
549 | if (IS_IGDNG(dev)) { | 549 | if (IS_IRONLAKE(dev)) { |
550 | dev_priv->saveADPA = I915_READ(PCH_ADPA); | 550 | dev_priv->saveADPA = I915_READ(PCH_ADPA); |
551 | } else { | 551 | } else { |
552 | dev_priv->saveADPA = I915_READ(ADPA); | 552 | dev_priv->saveADPA = I915_READ(ADPA); |
553 | } | 553 | } |
554 | 554 | ||
555 | /* LVDS state */ | 555 | /* LVDS state */ |
556 | if (IS_IGDNG(dev)) { | 556 | if (IS_IRONLAKE(dev)) { |
557 | dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); | 557 | dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); |
558 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); | 558 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); |
559 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); | 559 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); |
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev) | |||
571 | dev_priv->saveLVDS = I915_READ(LVDS); | 571 | dev_priv->saveLVDS = I915_READ(LVDS); |
572 | } | 572 | } |
573 | 573 | ||
574 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) | 574 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) |
575 | dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); | 575 | dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); |
576 | 576 | ||
577 | if (IS_IGDNG(dev)) { | 577 | if (IS_IRONLAKE(dev)) { |
578 | dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); | 578 | dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); |
579 | dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); | 579 | dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); |
580 | dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); | 580 | dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); |
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev) | |||
614 | dev_priv->saveVGA0 = I915_READ(VGA0); | 614 | dev_priv->saveVGA0 = I915_READ(VGA0); |
615 | dev_priv->saveVGA1 = I915_READ(VGA1); | 615 | dev_priv->saveVGA1 = I915_READ(VGA1); |
616 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); | 616 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); |
617 | if (IS_IGDNG(dev)) | 617 | if (IS_IRONLAKE(dev)) |
618 | dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); | 618 | dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); |
619 | else | 619 | else |
620 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | 620 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); |
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev) | |||
656 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | 656 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); |
657 | 657 | ||
658 | /* CRT state */ | 658 | /* CRT state */ |
659 | if (IS_IGDNG(dev)) | 659 | if (IS_IRONLAKE(dev)) |
660 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); | 660 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); |
661 | else | 661 | else |
662 | I915_WRITE(ADPA, dev_priv->saveADPA); | 662 | I915_WRITE(ADPA, dev_priv->saveADPA); |
663 | 663 | ||
664 | /* LVDS state */ | 664 | /* LVDS state */ |
665 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | 665 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) |
666 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); | 666 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); |
667 | 667 | ||
668 | if (IS_IGDNG(dev)) { | 668 | if (IS_IRONLAKE(dev)) { |
669 | I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); | 669 | I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); |
670 | } else if (IS_MOBILE(dev) && !IS_I830(dev)) | 670 | } else if (IS_MOBILE(dev) && !IS_I830(dev)) |
671 | I915_WRITE(LVDS, dev_priv->saveLVDS); | 671 | I915_WRITE(LVDS, dev_priv->saveLVDS); |
672 | 672 | ||
673 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) | 673 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) |
674 | I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); | 674 | I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); |
675 | 675 | ||
676 | if (IS_IGDNG(dev)) { | 676 | if (IS_IRONLAKE(dev)) { |
677 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); | 677 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); |
678 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); | 678 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); |
679 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); | 679 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); |
@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev) | |||
713 | } | 713 | } |
714 | 714 | ||
715 | /* VGA state */ | 715 | /* VGA state */ |
716 | if (IS_IGDNG(dev)) | 716 | if (IS_IRONLAKE(dev)) |
717 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); | 717 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); |
718 | else | 718 | else |
719 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); | 719 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); |
@@ -733,8 +733,10 @@ int i915_save_state(struct drm_device *dev) | |||
733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
734 | 734 | ||
735 | /* Render Standby */ | 735 | /* Render Standby */ |
736 | if (IS_I965G(dev) && IS_MOBILE(dev)) | 736 | if (I915_HAS_RC6(dev)) { |
737 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | 737 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); |
738 | dev_priv->savePWRCTXA = I915_READ(PWRCTXA); | ||
739 | } | ||
738 | 740 | ||
739 | /* Hardware status page */ | 741 | /* Hardware status page */ |
740 | dev_priv->saveHWS = I915_READ(HWS_PGA); | 742 | dev_priv->saveHWS = I915_READ(HWS_PGA); |
@@ -742,7 +744,7 @@ int i915_save_state(struct drm_device *dev) | |||
742 | i915_save_display(dev); | 744 | i915_save_display(dev); |
743 | 745 | ||
744 | /* Interrupt state */ | 746 | /* Interrupt state */ |
745 | if (IS_IGDNG(dev)) { | 747 | if (IS_IRONLAKE(dev)) { |
746 | dev_priv->saveDEIER = I915_READ(DEIER); | 748 | dev_priv->saveDEIER = I915_READ(DEIER); |
747 | dev_priv->saveDEIMR = I915_READ(DEIMR); | 749 | dev_priv->saveDEIMR = I915_READ(DEIMR); |
748 | dev_priv->saveGTIER = I915_READ(GTIER); | 750 | dev_priv->saveGTIER = I915_READ(GTIER); |
@@ -796,8 +798,10 @@ int i915_restore_state(struct drm_device *dev) | |||
796 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 798 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
797 | 799 | ||
798 | /* Render Standby */ | 800 | /* Render Standby */ |
799 | if (IS_I965G(dev) && IS_MOBILE(dev)) | 801 | if (I915_HAS_RC6(dev)) { |
800 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | 802 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); |
803 | I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA); | ||
804 | } | ||
801 | 805 | ||
802 | /* Hardware status page */ | 806 | /* Hardware status page */ |
803 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 807 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
@@ -817,7 +821,7 @@ int i915_restore_state(struct drm_device *dev) | |||
817 | i915_restore_display(dev); | 821 | i915_restore_display(dev); |
818 | 822 | ||
819 | /* Interrupt state */ | 823 | /* Interrupt state */ |
820 | if (IS_IGDNG(dev)) { | 824 | if (IS_IRONLAKE(dev)) { |
821 | I915_WRITE(DEIER, dev_priv->saveDEIER); | 825 | I915_WRITE(DEIER, dev_priv->saveDEIER); |
822 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); | 826 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); |
823 | I915_WRITE(GTIER, dev_priv->saveGTIER); | 827 | I915_WRITE(GTIER, dev_priv->saveGTIER); |
@@ -846,6 +850,9 @@ int i915_restore_state(struct drm_device *dev) | |||
846 | for (i = 0; i < 3; i++) | 850 | for (i = 0; i < 3; i++) |
847 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | 851 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); |
848 | 852 | ||
853 | /* I2C state */ | ||
854 | intel_i2c_reset_gmbus(dev); | ||
855 | |||
849 | return 0; | 856 | return 0; |
850 | } | 857 | } |
851 | 858 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 96cd256e60e6..f27567747580 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -114,6 +114,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
114 | struct lvds_dvo_timing *dvo_timing; | 114 | struct lvds_dvo_timing *dvo_timing; |
115 | struct drm_display_mode *panel_fixed_mode; | 115 | struct drm_display_mode *panel_fixed_mode; |
116 | int lfp_data_size, dvo_timing_offset; | 116 | int lfp_data_size, dvo_timing_offset; |
117 | int i, temp_downclock; | ||
118 | struct drm_display_mode *temp_mode; | ||
117 | 119 | ||
118 | /* Defaults if we can't find VBT info */ | 120 | /* Defaults if we can't find VBT info */ |
119 | dev_priv->lvds_dither = 0; | 121 | dev_priv->lvds_dither = 0; |
@@ -159,9 +161,49 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
159 | 161 | ||
160 | dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; | 162 | dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; |
161 | 163 | ||
162 | DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); | 164 | DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); |
163 | drm_mode_debug_printmodeline(panel_fixed_mode); | 165 | drm_mode_debug_printmodeline(panel_fixed_mode); |
164 | 166 | ||
167 | temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL); | ||
168 | temp_downclock = panel_fixed_mode->clock; | ||
169 | /* | ||
170 | * enumerate the LVDS panel timing info entry in VBT to check whether | ||
171 | * the LVDS downclock is found. | ||
172 | */ | ||
173 | for (i = 0; i < 16; i++) { | ||
174 | entry = (struct bdb_lvds_lfp_data_entry *) | ||
175 | ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i)); | ||
176 | dvo_timing = (struct lvds_dvo_timing *) | ||
177 | ((unsigned char *)entry + dvo_timing_offset); | ||
178 | |||
179 | fill_detail_timing_data(temp_mode, dvo_timing); | ||
180 | |||
181 | if (temp_mode->hdisplay == panel_fixed_mode->hdisplay && | ||
182 | temp_mode->hsync_start == panel_fixed_mode->hsync_start && | ||
183 | temp_mode->hsync_end == panel_fixed_mode->hsync_end && | ||
184 | temp_mode->htotal == panel_fixed_mode->htotal && | ||
185 | temp_mode->vdisplay == panel_fixed_mode->vdisplay && | ||
186 | temp_mode->vsync_start == panel_fixed_mode->vsync_start && | ||
187 | temp_mode->vsync_end == panel_fixed_mode->vsync_end && | ||
188 | temp_mode->vtotal == panel_fixed_mode->vtotal && | ||
189 | temp_mode->clock < temp_downclock) { | ||
190 | /* | ||
191 | * downclock is already found. But we expect | ||
192 | * to find the lower downclock. | ||
193 | */ | ||
194 | temp_downclock = temp_mode->clock; | ||
195 | } | ||
196 | /* clear it to zero */ | ||
197 | memset(temp_mode, 0, sizeof(*temp_mode)); | ||
198 | } | ||
199 | kfree(temp_mode); | ||
200 | if (temp_downclock < panel_fixed_mode->clock) { | ||
201 | dev_priv->lvds_downclock_avail = 1; | ||
202 | dev_priv->lvds_downclock = temp_downclock; | ||
203 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", | ||
204 | "Normal Clock %dKHz, downclock %dKHz\n", | ||
205 | temp_downclock, panel_fixed_mode->clock); | ||
206 | } | ||
165 | return; | 207 | return; |
166 | } | 208 | } |
167 | 209 | ||
@@ -217,7 +259,7 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
217 | if (IS_I85X(dev_priv->dev)) | 259 | if (IS_I85X(dev_priv->dev)) |
218 | dev_priv->lvds_ssc_freq = | 260 | dev_priv->lvds_ssc_freq = |
219 | general->ssc_freq ? 66 : 48; | 261 | general->ssc_freq ? 66 : 48; |
220 | else if (IS_IGDNG(dev_priv->dev)) | 262 | else if (IS_IRONLAKE(dev_priv->dev)) |
221 | dev_priv->lvds_ssc_freq = | 263 | dev_priv->lvds_ssc_freq = |
222 | general->ssc_freq ? 100 : 120; | 264 | general->ssc_freq ? 100 : 120; |
223 | else | 265 | else |
@@ -241,22 +283,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv, | |||
241 | GPIOF, | 283 | GPIOF, |
242 | }; | 284 | }; |
243 | 285 | ||
244 | /* Set sensible defaults in case we can't find the general block | ||
245 | or it is the wrong chipset */ | ||
246 | dev_priv->crt_ddc_bus = -1; | ||
247 | |||
248 | general = find_section(bdb, BDB_GENERAL_DEFINITIONS); | 286 | general = find_section(bdb, BDB_GENERAL_DEFINITIONS); |
249 | if (general) { | 287 | if (general) { |
250 | u16 block_size = get_blocksize(general); | 288 | u16 block_size = get_blocksize(general); |
251 | if (block_size >= sizeof(*general)) { | 289 | if (block_size >= sizeof(*general)) { |
252 | int bus_pin = general->crt_ddc_gmbus_pin; | 290 | int bus_pin = general->crt_ddc_gmbus_pin; |
253 | DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin); | 291 | DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); |
254 | if ((bus_pin >= 1) && (bus_pin <= 6)) { | 292 | if ((bus_pin >= 1) && (bus_pin <= 6)) { |
255 | dev_priv->crt_ddc_bus = | 293 | dev_priv->crt_ddc_bus = |
256 | crt_bus_map_table[bus_pin-1]; | 294 | crt_bus_map_table[bus_pin-1]; |
257 | } | 295 | } |
258 | } else { | 296 | } else { |
259 | DRM_DEBUG("BDB_GD too small (%d). Invalid.\n", | 297 | DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", |
260 | block_size); | 298 | block_size); |
261 | } | 299 | } |
262 | } | 300 | } |
@@ -274,7 +312,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
274 | 312 | ||
275 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); | 313 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); |
276 | if (!p_defs) { | 314 | if (!p_defs) { |
277 | DRM_DEBUG("No general definition block is found\n"); | 315 | DRM_DEBUG_KMS("No general definition block is found\n"); |
278 | return; | 316 | return; |
279 | } | 317 | } |
280 | /* judge whether the size of child device meets the requirements. | 318 | /* judge whether the size of child device meets the requirements. |
@@ -284,7 +322,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
284 | */ | 322 | */ |
285 | if (p_defs->child_dev_size != sizeof(*p_child)) { | 323 | if (p_defs->child_dev_size != sizeof(*p_child)) { |
286 | /* different child dev size . Ignore it */ | 324 | /* different child dev size . Ignore it */ |
287 | DRM_DEBUG("different child size is found. Invalid.\n"); | 325 | DRM_DEBUG_KMS("different child size is found. Invalid.\n"); |
288 | return; | 326 | return; |
289 | } | 327 | } |
290 | /* get the block size of general definitions */ | 328 | /* get the block size of general definitions */ |
@@ -310,11 +348,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
310 | if (p_child->dvo_port != DEVICE_PORT_DVOB && | 348 | if (p_child->dvo_port != DEVICE_PORT_DVOB && |
311 | p_child->dvo_port != DEVICE_PORT_DVOC) { | 349 | p_child->dvo_port != DEVICE_PORT_DVOC) { |
312 | /* skip the incorrect SDVO port */ | 350 | /* skip the incorrect SDVO port */ |
313 | DRM_DEBUG("Incorrect SDVO port. Skip it \n"); | 351 | DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n"); |
314 | continue; | 352 | continue; |
315 | } | 353 | } |
316 | DRM_DEBUG("the SDVO device with slave addr %2x is found on " | 354 | DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" |
317 | "%s port\n", | 355 | " %s port\n", |
318 | p_child->slave_addr, | 356 | p_child->slave_addr, |
319 | (p_child->dvo_port == DEVICE_PORT_DVOB) ? | 357 | (p_child->dvo_port == DEVICE_PORT_DVOB) ? |
320 | "SDVOB" : "SDVOC"); | 358 | "SDVOB" : "SDVOC"); |
@@ -325,21 +363,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
325 | p_mapping->dvo_wiring = p_child->dvo_wiring; | 363 | p_mapping->dvo_wiring = p_child->dvo_wiring; |
326 | p_mapping->initialized = 1; | 364 | p_mapping->initialized = 1; |
327 | } else { | 365 | } else { |
328 | DRM_DEBUG("Maybe one SDVO port is shared by " | 366 | DRM_DEBUG_KMS("Maybe one SDVO port is shared by " |
329 | "two SDVO device.\n"); | 367 | "two SDVO device.\n"); |
330 | } | 368 | } |
331 | if (p_child->slave2_addr) { | 369 | if (p_child->slave2_addr) { |
332 | /* Maybe this is a SDVO device with multiple inputs */ | 370 | /* Maybe this is a SDVO device with multiple inputs */ |
333 | /* And the mapping info is not added */ | 371 | /* And the mapping info is not added */ |
334 | DRM_DEBUG("there exists the slave2_addr. Maybe this " | 372 | DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" |
335 | "is a SDVO device with multiple inputs.\n"); | 373 | " is a SDVO device with multiple inputs.\n"); |
336 | } | 374 | } |
337 | count++; | 375 | count++; |
338 | } | 376 | } |
339 | 377 | ||
340 | if (!count) { | 378 | if (!count) { |
341 | /* No SDVO device info is found */ | 379 | /* No SDVO device info is found */ |
342 | DRM_DEBUG("No SDVO device info is found in VBT\n"); | 380 | DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); |
343 | } | 381 | } |
344 | return; | 382 | return; |
345 | } | 383 | } |
@@ -366,6 +404,70 @@ parse_driver_features(struct drm_i915_private *dev_priv, | |||
366 | dev_priv->render_reclock_avail = true; | 404 | dev_priv->render_reclock_avail = true; |
367 | } | 405 | } |
368 | 406 | ||
407 | static void | ||
408 | parse_device_mapping(struct drm_i915_private *dev_priv, | ||
409 | struct bdb_header *bdb) | ||
410 | { | ||
411 | struct bdb_general_definitions *p_defs; | ||
412 | struct child_device_config *p_child, *child_dev_ptr; | ||
413 | int i, child_device_num, count; | ||
414 | u16 block_size; | ||
415 | |||
416 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); | ||
417 | if (!p_defs) { | ||
418 | DRM_DEBUG_KMS("No general definition block is found\n"); | ||
419 | return; | ||
420 | } | ||
421 | /* judge whether the size of child device meets the requirements. | ||
422 | * If the child device size obtained from general definition block | ||
423 | * is different with sizeof(struct child_device_config), skip the | ||
424 | * parsing of sdvo device info | ||
425 | */ | ||
426 | if (p_defs->child_dev_size != sizeof(*p_child)) { | ||
427 | /* different child dev size . Ignore it */ | ||
428 | DRM_DEBUG_KMS("different child size is found. Invalid.\n"); | ||
429 | return; | ||
430 | } | ||
431 | /* get the block size of general definitions */ | ||
432 | block_size = get_blocksize(p_defs); | ||
433 | /* get the number of child device */ | ||
434 | child_device_num = (block_size - sizeof(*p_defs)) / | ||
435 | sizeof(*p_child); | ||
436 | count = 0; | ||
437 | /* get the number of child device that is present */ | ||
438 | for (i = 0; i < child_device_num; i++) { | ||
439 | p_child = &(p_defs->devices[i]); | ||
440 | if (!p_child->device_type) { | ||
441 | /* skip the device block if device type is invalid */ | ||
442 | continue; | ||
443 | } | ||
444 | count++; | ||
445 | } | ||
446 | if (!count) { | ||
447 | DRM_DEBUG_KMS("no child dev is parsed from VBT \n"); | ||
448 | return; | ||
449 | } | ||
450 | dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL); | ||
451 | if (!dev_priv->child_dev) { | ||
452 | DRM_DEBUG_KMS("No memory space for child device\n"); | ||
453 | return; | ||
454 | } | ||
455 | |||
456 | dev_priv->child_dev_num = count; | ||
457 | count = 0; | ||
458 | for (i = 0; i < child_device_num; i++) { | ||
459 | p_child = &(p_defs->devices[i]); | ||
460 | if (!p_child->device_type) { | ||
461 | /* skip the device block if device type is invalid */ | ||
462 | continue; | ||
463 | } | ||
464 | child_dev_ptr = dev_priv->child_dev + count; | ||
465 | count++; | ||
466 | memcpy((void *)child_dev_ptr, (void *)p_child, | ||
467 | sizeof(*p_child)); | ||
468 | } | ||
469 | return; | ||
470 | } | ||
369 | /** | 471 | /** |
370 | * intel_init_bios - initialize VBIOS settings & find VBT | 472 | * intel_init_bios - initialize VBIOS settings & find VBT |
371 | * @dev: DRM device | 473 | * @dev: DRM device |
@@ -417,6 +519,7 @@ intel_init_bios(struct drm_device *dev) | |||
417 | parse_lfp_panel_data(dev_priv, bdb); | 519 | parse_lfp_panel_data(dev_priv, bdb); |
418 | parse_sdvo_panel_data(dev_priv, bdb); | 520 | parse_sdvo_panel_data(dev_priv, bdb); |
419 | parse_sdvo_device_mapping(dev_priv, bdb); | 521 | parse_sdvo_device_mapping(dev_priv, bdb); |
522 | parse_device_mapping(dev_priv, bdb); | ||
420 | parse_driver_features(dev_priv, bdb); | 523 | parse_driver_features(dev_priv, bdb); |
421 | 524 | ||
422 | pci_unmap_rom(pdev, bios); | 525 | pci_unmap_rom(pdev, bios); |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 0f8e5f69ac7a..425ac9d7f724 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *dev); | |||
549 | #define SWF14_APM_STANDBY 0x1 | 549 | #define SWF14_APM_STANDBY 0x1 |
550 | #define SWF14_APM_RESTORE 0x0 | 550 | #define SWF14_APM_RESTORE 0x0 |
551 | 551 | ||
552 | /* Add the device class for LFP, TV, HDMI */ | ||
553 | #define DEVICE_TYPE_INT_LFP 0x1022 | ||
554 | #define DEVICE_TYPE_INT_TV 0x1009 | ||
555 | #define DEVICE_TYPE_HDMI 0x60D2 | ||
556 | #define DEVICE_TYPE_DP 0x68C6 | ||
557 | #define DEVICE_TYPE_eDP 0x78C6 | ||
558 | |||
559 | /* define the DVO port for HDMI output type */ | ||
560 | #define DVO_B 1 | ||
561 | #define DVO_C 2 | ||
562 | #define DVO_D 3 | ||
563 | |||
564 | /* define the PORT for DP output type */ | ||
565 | #define PORT_IDPB 7 | ||
566 | #define PORT_IDPC 8 | ||
567 | #define PORT_IDPD 9 | ||
568 | |||
552 | #endif /* _I830_BIOS_H_ */ | 569 | #endif /* _I830_BIOS_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e5051446c48e..9f3d3e563414 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | |||
39 | struct drm_i915_private *dev_priv = dev->dev_private; | 39 | struct drm_i915_private *dev_priv = dev->dev_private; |
40 | u32 temp, reg; | 40 | u32 temp, reg; |
41 | 41 | ||
42 | if (IS_IGDNG(dev)) | 42 | if (IS_IRONLAKE(dev)) |
43 | reg = PCH_ADPA; | 43 | reg = PCH_ADPA; |
44 | else | 44 | else |
45 | reg = ADPA; | 45 | reg = ADPA; |
@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | |||
64 | } | 64 | } |
65 | 65 | ||
66 | I915_WRITE(reg, temp); | 66 | I915_WRITE(reg, temp); |
67 | |||
68 | if (IS_IGD(dev)) { | ||
69 | if (mode == DRM_MODE_DPMS_OFF) { | ||
70 | /* turn off DAC */ | ||
71 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
72 | temp &= ~CRT_EOS_INT_EN; | ||
73 | I915_WRITE(PORT_HOTPLUG_EN, temp); | ||
74 | |||
75 | temp = I915_READ(PORT_HOTPLUG_STAT); | ||
76 | if (temp & CRT_EOS_INT_STATUS) | ||
77 | I915_WRITE(PORT_HOTPLUG_STAT, | ||
78 | CRT_EOS_INT_STATUS); | ||
79 | } else { | ||
80 | /* turn on DAC. EOS interrupt must be enabled after DAC | ||
81 | * is enabled, so it sounds not good to enable it in | ||
82 | * i915_driver_irq_postinstall() | ||
83 | * wait 12.5ms after DAC is enabled | ||
84 | */ | ||
85 | msleep(13); | ||
86 | temp = I915_READ(PORT_HOTPLUG_STAT); | ||
87 | if (temp & CRT_EOS_INT_STATUS) | ||
88 | I915_WRITE(PORT_HOTPLUG_STAT, | ||
89 | CRT_EOS_INT_STATUS); | ||
90 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
91 | temp |= CRT_EOS_INT_EN; | ||
92 | I915_WRITE(PORT_HOTPLUG_EN, temp); | ||
93 | } | ||
94 | } | ||
95 | } | 67 | } |
96 | 68 | ||
97 | static int intel_crt_mode_valid(struct drm_connector *connector, | 69 | static int intel_crt_mode_valid(struct drm_connector *connector, |
@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
141 | else | 113 | else |
142 | dpll_md_reg = DPLL_B_MD; | 114 | dpll_md_reg = DPLL_B_MD; |
143 | 115 | ||
144 | if (IS_IGDNG(dev)) | 116 | if (IS_IRONLAKE(dev)) |
145 | adpa_reg = PCH_ADPA; | 117 | adpa_reg = PCH_ADPA; |
146 | else | 118 | else |
147 | adpa_reg = ADPA; | 119 | adpa_reg = ADPA; |
@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
150 | * Disable separate mode multiplier used when cloning SDVO to CRT | 122 | * Disable separate mode multiplier used when cloning SDVO to CRT |
151 | * XXX this needs to be adjusted when we really are cloning | 123 | * XXX this needs to be adjusted when we really are cloning |
152 | */ | 124 | */ |
153 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | 125 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { |
154 | dpll_md = I915_READ(dpll_md_reg); | 126 | dpll_md = I915_READ(dpll_md_reg); |
155 | I915_WRITE(dpll_md_reg, | 127 | I915_WRITE(dpll_md_reg, |
156 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); | 128 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); |
@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
164 | 136 | ||
165 | if (intel_crtc->pipe == 0) { | 137 | if (intel_crtc->pipe == 0) { |
166 | adpa |= ADPA_PIPE_A_SELECT; | 138 | adpa |= ADPA_PIPE_A_SELECT; |
167 | if (!IS_IGDNG(dev)) | 139 | if (!IS_IRONLAKE(dev)) |
168 | I915_WRITE(BCLRPAT_A, 0); | 140 | I915_WRITE(BCLRPAT_A, 0); |
169 | } else { | 141 | } else { |
170 | adpa |= ADPA_PIPE_B_SELECT; | 142 | adpa |= ADPA_PIPE_B_SELECT; |
171 | if (!IS_IGDNG(dev)) | 143 | if (!IS_IRONLAKE(dev)) |
172 | I915_WRITE(BCLRPAT_B, 0); | 144 | I915_WRITE(BCLRPAT_B, 0); |
173 | } | 145 | } |
174 | 146 | ||
175 | I915_WRITE(adpa_reg, adpa); | 147 | I915_WRITE(adpa_reg, adpa); |
176 | } | 148 | } |
177 | 149 | ||
178 | static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | 150 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) |
179 | { | 151 | { |
180 | struct drm_device *dev = connector->dev; | 152 | struct drm_device *dev = connector->dev; |
181 | struct drm_i915_private *dev_priv = dev->dev_private; | 153 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -194,7 +166,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
194 | ADPA_CRT_HOTPLUG_ENABLE | | 166 | ADPA_CRT_HOTPLUG_ENABLE | |
195 | ADPA_CRT_HOTPLUG_FORCE_TRIGGER); | 167 | ADPA_CRT_HOTPLUG_FORCE_TRIGGER); |
196 | 168 | ||
197 | DRM_DEBUG("pch crt adpa 0x%x", adpa); | 169 | DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); |
198 | I915_WRITE(PCH_ADPA, adpa); | 170 | I915_WRITE(PCH_ADPA, adpa); |
199 | 171 | ||
200 | while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) | 172 | while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) |
@@ -227,8 +199,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
227 | u32 hotplug_en; | 199 | u32 hotplug_en; |
228 | int i, tries = 0; | 200 | int i, tries = 0; |
229 | 201 | ||
230 | if (IS_IGDNG(dev)) | 202 | if (IS_IRONLAKE(dev)) |
231 | return intel_igdng_crt_detect_hotplug(connector); | 203 | return intel_ironlake_crt_detect_hotplug(connector); |
232 | 204 | ||
233 | /* | 205 | /* |
234 | * On 4 series desktop, CRT detect sequence need to be done twice | 206 | * On 4 series desktop, CRT detect sequence need to be done twice |
@@ -549,12 +521,12 @@ void intel_crt_init(struct drm_device *dev) | |||
549 | &intel_output->enc); | 521 | &intel_output->enc); |
550 | 522 | ||
551 | /* Set up the DDC bus. */ | 523 | /* Set up the DDC bus. */ |
552 | if (IS_IGDNG(dev)) | 524 | if (IS_IRONLAKE(dev)) |
553 | i2c_reg = PCH_GPIOA; | 525 | i2c_reg = PCH_GPIOA; |
554 | else { | 526 | else { |
555 | i2c_reg = GPIOA; | 527 | i2c_reg = GPIOA; |
556 | /* Use VBT information for CRT DDC if available */ | 528 | /* Use VBT information for CRT DDC if available */ |
557 | if (dev_priv->crt_ddc_bus != -1) | 529 | if (dev_priv->crt_ddc_bus != 0) |
558 | i2c_reg = dev_priv->crt_ddc_bus; | 530 | i2c_reg = dev_priv->crt_ddc_bus; |
559 | } | 531 | } |
560 | intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); | 532 | intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 897230832c8c..5146b8094ae0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -102,32 +102,32 @@ struct intel_limit { | |||
102 | #define I9XX_DOT_MAX 400000 | 102 | #define I9XX_DOT_MAX 400000 |
103 | #define I9XX_VCO_MIN 1400000 | 103 | #define I9XX_VCO_MIN 1400000 |
104 | #define I9XX_VCO_MAX 2800000 | 104 | #define I9XX_VCO_MAX 2800000 |
105 | #define IGD_VCO_MIN 1700000 | 105 | #define PINEVIEW_VCO_MIN 1700000 |
106 | #define IGD_VCO_MAX 3500000 | 106 | #define PINEVIEW_VCO_MAX 3500000 |
107 | #define I9XX_N_MIN 1 | 107 | #define I9XX_N_MIN 1 |
108 | #define I9XX_N_MAX 6 | 108 | #define I9XX_N_MAX 6 |
109 | /* IGD's Ncounter is a ring counter */ | 109 | /* Pineview's Ncounter is a ring counter */ |
110 | #define IGD_N_MIN 3 | 110 | #define PINEVIEW_N_MIN 3 |
111 | #define IGD_N_MAX 6 | 111 | #define PINEVIEW_N_MAX 6 |
112 | #define I9XX_M_MIN 70 | 112 | #define I9XX_M_MIN 70 |
113 | #define I9XX_M_MAX 120 | 113 | #define I9XX_M_MAX 120 |
114 | #define IGD_M_MIN 2 | 114 | #define PINEVIEW_M_MIN 2 |
115 | #define IGD_M_MAX 256 | 115 | #define PINEVIEW_M_MAX 256 |
116 | #define I9XX_M1_MIN 10 | 116 | #define I9XX_M1_MIN 10 |
117 | #define I9XX_M1_MAX 22 | 117 | #define I9XX_M1_MAX 22 |
118 | #define I9XX_M2_MIN 5 | 118 | #define I9XX_M2_MIN 5 |
119 | #define I9XX_M2_MAX 9 | 119 | #define I9XX_M2_MAX 9 |
120 | /* IGD M1 is reserved, and must be 0 */ | 120 | /* Pineview M1 is reserved, and must be 0 */ |
121 | #define IGD_M1_MIN 0 | 121 | #define PINEVIEW_M1_MIN 0 |
122 | #define IGD_M1_MAX 0 | 122 | #define PINEVIEW_M1_MAX 0 |
123 | #define IGD_M2_MIN 0 | 123 | #define PINEVIEW_M2_MIN 0 |
124 | #define IGD_M2_MAX 254 | 124 | #define PINEVIEW_M2_MAX 254 |
125 | #define I9XX_P_SDVO_DAC_MIN 5 | 125 | #define I9XX_P_SDVO_DAC_MIN 5 |
126 | #define I9XX_P_SDVO_DAC_MAX 80 | 126 | #define I9XX_P_SDVO_DAC_MAX 80 |
127 | #define I9XX_P_LVDS_MIN 7 | 127 | #define I9XX_P_LVDS_MIN 7 |
128 | #define I9XX_P_LVDS_MAX 98 | 128 | #define I9XX_P_LVDS_MAX 98 |
129 | #define IGD_P_LVDS_MIN 7 | 129 | #define PINEVIEW_P_LVDS_MIN 7 |
130 | #define IGD_P_LVDS_MAX 112 | 130 | #define PINEVIEW_P_LVDS_MAX 112 |
131 | #define I9XX_P1_MIN 1 | 131 | #define I9XX_P1_MIN 1 |
132 | #define I9XX_P1_MAX 8 | 132 | #define I9XX_P1_MAX 8 |
133 | #define I9XX_P2_SDVO_DAC_SLOW 10 | 133 | #define I9XX_P2_SDVO_DAC_SLOW 10 |
@@ -234,33 +234,33 @@ struct intel_limit { | |||
234 | #define G4X_P2_DISPLAY_PORT_FAST 10 | 234 | #define G4X_P2_DISPLAY_PORT_FAST 10 |
235 | #define G4X_P2_DISPLAY_PORT_LIMIT 0 | 235 | #define G4X_P2_DISPLAY_PORT_LIMIT 0 |
236 | 236 | ||
237 | /* IGDNG */ | 237 | /* Ironlake */ |
238 | /* as we calculate clock using (register_value + 2) for | 238 | /* as we calculate clock using (register_value + 2) for |
239 | N/M1/M2, so here the range value for them is (actual_value-2). | 239 | N/M1/M2, so here the range value for them is (actual_value-2). |
240 | */ | 240 | */ |
241 | #define IGDNG_DOT_MIN 25000 | 241 | #define IRONLAKE_DOT_MIN 25000 |
242 | #define IGDNG_DOT_MAX 350000 | 242 | #define IRONLAKE_DOT_MAX 350000 |
243 | #define IGDNG_VCO_MIN 1760000 | 243 | #define IRONLAKE_VCO_MIN 1760000 |
244 | #define IGDNG_VCO_MAX 3510000 | 244 | #define IRONLAKE_VCO_MAX 3510000 |
245 | #define IGDNG_N_MIN 1 | 245 | #define IRONLAKE_N_MIN 1 |
246 | #define IGDNG_N_MAX 5 | 246 | #define IRONLAKE_N_MAX 5 |
247 | #define IGDNG_M_MIN 79 | 247 | #define IRONLAKE_M_MIN 79 |
248 | #define IGDNG_M_MAX 118 | 248 | #define IRONLAKE_M_MAX 118 |
249 | #define IGDNG_M1_MIN 12 | 249 | #define IRONLAKE_M1_MIN 12 |
250 | #define IGDNG_M1_MAX 23 | 250 | #define IRONLAKE_M1_MAX 23 |
251 | #define IGDNG_M2_MIN 5 | 251 | #define IRONLAKE_M2_MIN 5 |
252 | #define IGDNG_M2_MAX 9 | 252 | #define IRONLAKE_M2_MAX 9 |
253 | #define IGDNG_P_SDVO_DAC_MIN 5 | 253 | #define IRONLAKE_P_SDVO_DAC_MIN 5 |
254 | #define IGDNG_P_SDVO_DAC_MAX 80 | 254 | #define IRONLAKE_P_SDVO_DAC_MAX 80 |
255 | #define IGDNG_P_LVDS_MIN 28 | 255 | #define IRONLAKE_P_LVDS_MIN 28 |
256 | #define IGDNG_P_LVDS_MAX 112 | 256 | #define IRONLAKE_P_LVDS_MAX 112 |
257 | #define IGDNG_P1_MIN 1 | 257 | #define IRONLAKE_P1_MIN 1 |
258 | #define IGDNG_P1_MAX 8 | 258 | #define IRONLAKE_P1_MAX 8 |
259 | #define IGDNG_P2_SDVO_DAC_SLOW 10 | 259 | #define IRONLAKE_P2_SDVO_DAC_SLOW 10 |
260 | #define IGDNG_P2_SDVO_DAC_FAST 5 | 260 | #define IRONLAKE_P2_SDVO_DAC_FAST 5 |
261 | #define IGDNG_P2_LVDS_SLOW 14 /* single channel */ | 261 | #define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */ |
262 | #define IGDNG_P2_LVDS_FAST 7 /* double channel */ | 262 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ |
263 | #define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */ | 263 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ |
264 | 264 | ||
265 | static bool | 265 | static bool |
266 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 266 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
@@ -272,15 +272,15 @@ static bool | |||
272 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 272 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
273 | int target, int refclk, intel_clock_t *best_clock); | 273 | int target, int refclk, intel_clock_t *best_clock); |
274 | static bool | 274 | static bool |
275 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 275 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
276 | int target, int refclk, intel_clock_t *best_clock); | 276 | int target, int refclk, intel_clock_t *best_clock); |
277 | 277 | ||
278 | static bool | 278 | static bool |
279 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | 279 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
280 | int target, int refclk, intel_clock_t *best_clock); | 280 | int target, int refclk, intel_clock_t *best_clock); |
281 | static bool | 281 | static bool |
282 | intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, | 282 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
283 | int target, int refclk, intel_clock_t *best_clock); | 283 | int target, int refclk, intel_clock_t *best_clock); |
284 | 284 | ||
285 | static const intel_limit_t intel_limits_i8xx_dvo = { | 285 | static const intel_limit_t intel_limits_i8xx_dvo = { |
286 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 286 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
@@ -453,13 +453,13 @@ static const intel_limit_t intel_limits_g4x_display_port = { | |||
453 | .find_pll = intel_find_pll_g4x_dp, | 453 | .find_pll = intel_find_pll_g4x_dp, |
454 | }; | 454 | }; |
455 | 455 | ||
456 | static const intel_limit_t intel_limits_igd_sdvo = { | 456 | static const intel_limit_t intel_limits_pineview_sdvo = { |
457 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | 457 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, |
458 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | 458 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, |
459 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | 459 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, |
460 | .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, | 460 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, |
461 | .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, | 461 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, |
462 | .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, | 462 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, |
463 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | 463 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, |
464 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 464 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, |
465 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 465 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
@@ -468,59 +468,59 @@ static const intel_limit_t intel_limits_igd_sdvo = { | |||
468 | .find_reduced_pll = intel_find_best_reduced_PLL, | 468 | .find_reduced_pll = intel_find_best_reduced_PLL, |
469 | }; | 469 | }; |
470 | 470 | ||
471 | static const intel_limit_t intel_limits_igd_lvds = { | 471 | static const intel_limit_t intel_limits_pineview_lvds = { |
472 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 472 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
473 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | 473 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, |
474 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | 474 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, |
475 | .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, | 475 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, |
476 | .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, | 476 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, |
477 | .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, | 477 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, |
478 | .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, | 478 | .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, |
479 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 479 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, |
480 | /* IGD only supports single-channel mode. */ | 480 | /* Pineview only supports single-channel mode. */ |
481 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 481 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
482 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | 482 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, |
483 | .find_pll = intel_find_best_PLL, | 483 | .find_pll = intel_find_best_PLL, |
484 | .find_reduced_pll = intel_find_best_reduced_PLL, | 484 | .find_reduced_pll = intel_find_best_reduced_PLL, |
485 | }; | 485 | }; |
486 | 486 | ||
487 | static const intel_limit_t intel_limits_igdng_sdvo = { | 487 | static const intel_limit_t intel_limits_ironlake_sdvo = { |
488 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, | 488 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
489 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, | 489 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
490 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, | 490 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, |
491 | .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, | 491 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, |
492 | .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, | 492 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
493 | .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, | 493 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
494 | .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX }, | 494 | .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, |
495 | .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, | 495 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, |
496 | .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, | 496 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
497 | .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, | 497 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, |
498 | .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, | 498 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, |
499 | .find_pll = intel_igdng_find_best_PLL, | 499 | .find_pll = intel_ironlake_find_best_PLL, |
500 | }; | 500 | }; |
501 | 501 | ||
502 | static const intel_limit_t intel_limits_igdng_lvds = { | 502 | static const intel_limit_t intel_limits_ironlake_lvds = { |
503 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, | 503 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
504 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, | 504 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
505 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, | 505 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, |
506 | .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, | 506 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, |
507 | .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, | 507 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
508 | .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, | 508 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
509 | .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX }, | 509 | .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, |
510 | .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, | 510 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, |
511 | .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, | 511 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
512 | .p2_slow = IGDNG_P2_LVDS_SLOW, | 512 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, |
513 | .p2_fast = IGDNG_P2_LVDS_FAST }, | 513 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, |
514 | .find_pll = intel_igdng_find_best_PLL, | 514 | .find_pll = intel_ironlake_find_best_PLL, |
515 | }; | 515 | }; |
516 | 516 | ||
517 | static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) | 517 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) |
518 | { | 518 | { |
519 | const intel_limit_t *limit; | 519 | const intel_limit_t *limit; |
520 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 520 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
521 | limit = &intel_limits_igdng_lvds; | 521 | limit = &intel_limits_ironlake_lvds; |
522 | else | 522 | else |
523 | limit = &intel_limits_igdng_sdvo; | 523 | limit = &intel_limits_ironlake_sdvo; |
524 | 524 | ||
525 | return limit; | 525 | return limit; |
526 | } | 526 | } |
@@ -557,20 +557,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
557 | struct drm_device *dev = crtc->dev; | 557 | struct drm_device *dev = crtc->dev; |
558 | const intel_limit_t *limit; | 558 | const intel_limit_t *limit; |
559 | 559 | ||
560 | if (IS_IGDNG(dev)) | 560 | if (IS_IRONLAKE(dev)) |
561 | limit = intel_igdng_limit(crtc); | 561 | limit = intel_ironlake_limit(crtc); |
562 | else if (IS_G4X(dev)) { | 562 | else if (IS_G4X(dev)) { |
563 | limit = intel_g4x_limit(crtc); | 563 | limit = intel_g4x_limit(crtc); |
564 | } else if (IS_I9XX(dev) && !IS_IGD(dev)) { | 564 | } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) { |
565 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 565 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
566 | limit = &intel_limits_i9xx_lvds; | 566 | limit = &intel_limits_i9xx_lvds; |
567 | else | 567 | else |
568 | limit = &intel_limits_i9xx_sdvo; | 568 | limit = &intel_limits_i9xx_sdvo; |
569 | } else if (IS_IGD(dev)) { | 569 | } else if (IS_PINEVIEW(dev)) { |
570 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 570 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
571 | limit = &intel_limits_igd_lvds; | 571 | limit = &intel_limits_pineview_lvds; |
572 | else | 572 | else |
573 | limit = &intel_limits_igd_sdvo; | 573 | limit = &intel_limits_pineview_sdvo; |
574 | } else { | 574 | } else { |
575 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 575 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
576 | limit = &intel_limits_i8xx_lvds; | 576 | limit = &intel_limits_i8xx_lvds; |
@@ -580,8 +580,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
580 | return limit; | 580 | return limit; |
581 | } | 581 | } |
582 | 582 | ||
583 | /* m1 is reserved as 0 in IGD, n is a ring counter */ | 583 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
584 | static void igd_clock(int refclk, intel_clock_t *clock) | 584 | static void pineview_clock(int refclk, intel_clock_t *clock) |
585 | { | 585 | { |
586 | clock->m = clock->m2 + 2; | 586 | clock->m = clock->m2 + 2; |
587 | clock->p = clock->p1 * clock->p2; | 587 | clock->p = clock->p1 * clock->p2; |
@@ -591,8 +591,8 @@ static void igd_clock(int refclk, intel_clock_t *clock) | |||
591 | 591 | ||
592 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) | 592 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) |
593 | { | 593 | { |
594 | if (IS_IGD(dev)) { | 594 | if (IS_PINEVIEW(dev)) { |
595 | igd_clock(refclk, clock); | 595 | pineview_clock(refclk, clock); |
596 | return; | 596 | return; |
597 | } | 597 | } |
598 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | 598 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
@@ -657,7 +657,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | |||
657 | INTELPllInvalid ("m2 out of range\n"); | 657 | INTELPllInvalid ("m2 out of range\n"); |
658 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | 658 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
659 | INTELPllInvalid ("m1 out of range\n"); | 659 | INTELPllInvalid ("m1 out of range\n"); |
660 | if (clock->m1 <= clock->m2 && !IS_IGD(dev)) | 660 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) |
661 | INTELPllInvalid ("m1 <= m2\n"); | 661 | INTELPllInvalid ("m1 <= m2\n"); |
662 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 662 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
663 | INTELPllInvalid ("m out of range\n"); | 663 | INTELPllInvalid ("m out of range\n"); |
@@ -706,16 +706,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
706 | 706 | ||
707 | memset (best_clock, 0, sizeof (*best_clock)); | 707 | memset (best_clock, 0, sizeof (*best_clock)); |
708 | 708 | ||
709 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { | 709 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
710 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; | 710 | clock.m1++) { |
711 | clock.m1++) { | 711 | for (clock.m2 = limit->m2.min; |
712 | for (clock.m2 = limit->m2.min; | 712 | clock.m2 <= limit->m2.max; clock.m2++) { |
713 | clock.m2 <= limit->m2.max; clock.m2++) { | 713 | /* m1 is always 0 in Pineview */ |
714 | /* m1 is always 0 in IGD */ | 714 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) |
715 | if (clock.m2 >= clock.m1 && !IS_IGD(dev)) | 715 | break; |
716 | break; | 716 | for (clock.n = limit->n.min; |
717 | for (clock.n = limit->n.min; | 717 | clock.n <= limit->n.max; clock.n++) { |
718 | clock.n <= limit->n.max; clock.n++) { | 718 | for (clock.p1 = limit->p1.min; |
719 | clock.p1 <= limit->p1.max; clock.p1++) { | ||
719 | int this_err; | 720 | int this_err; |
720 | 721 | ||
721 | intel_clock(dev, refclk, &clock); | 722 | intel_clock(dev, refclk, &clock); |
@@ -751,8 +752,8 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
751 | 752 | ||
752 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { | 753 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { |
753 | for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { | 754 | for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { |
754 | /* m1 is always 0 in IGD */ | 755 | /* m1 is always 0 in Pineview */ |
755 | if (clock.m2 >= clock.m1 && !IS_IGD(dev)) | 756 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) |
756 | break; | 757 | break; |
757 | for (clock.n = limit->n.min; clock.n <= limit->n.max; | 758 | for (clock.n = limit->n.min; clock.n <= limit->n.max; |
758 | clock.n++) { | 759 | clock.n++) { |
@@ -833,8 +834,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
833 | } | 834 | } |
834 | 835 | ||
835 | static bool | 836 | static bool |
836 | intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 837 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
837 | int target, int refclk, intel_clock_t *best_clock) | 838 | int target, int refclk, intel_clock_t *best_clock) |
838 | { | 839 | { |
839 | struct drm_device *dev = crtc->dev; | 840 | struct drm_device *dev = crtc->dev; |
840 | intel_clock_t clock; | 841 | intel_clock_t clock; |
@@ -857,8 +858,8 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
857 | } | 858 | } |
858 | 859 | ||
859 | static bool | 860 | static bool |
860 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 861 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
861 | int target, int refclk, intel_clock_t *best_clock) | 862 | int target, int refclk, intel_clock_t *best_clock) |
862 | { | 863 | { |
863 | struct drm_device *dev = crtc->dev; | 864 | struct drm_device *dev = crtc->dev; |
864 | struct drm_i915_private *dev_priv = dev->dev_private; | 865 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -871,7 +872,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
871 | return true; | 872 | return true; |
872 | 873 | ||
873 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | 874 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
874 | return intel_find_pll_igdng_dp(limit, crtc, target, | 875 | return intel_find_pll_ironlake_dp(limit, crtc, target, |
875 | refclk, best_clock); | 876 | refclk, best_clock); |
876 | 877 | ||
877 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 878 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
@@ -949,7 +950,7 @@ void | |||
949 | intel_wait_for_vblank(struct drm_device *dev) | 950 | intel_wait_for_vblank(struct drm_device *dev) |
950 | { | 951 | { |
951 | /* Wait for 20ms, i.e. one cycle at 50hz. */ | 952 | /* Wait for 20ms, i.e. one cycle at 50hz. */ |
952 | mdelay(20); | 953 | msleep(20); |
953 | } | 954 | } |
954 | 955 | ||
955 | /* Parameters have changed, update FBC info */ | 956 | /* Parameters have changed, update FBC info */ |
@@ -994,7 +995,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
994 | fbc_ctl |= dev_priv->cfb_fence; | 995 | fbc_ctl |= dev_priv->cfb_fence; |
995 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 996 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
996 | 997 | ||
997 | DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", | 998 | DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", |
998 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); | 999 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); |
999 | } | 1000 | } |
1000 | 1001 | ||
@@ -1017,7 +1018,7 @@ void i8xx_disable_fbc(struct drm_device *dev) | |||
1017 | 1018 | ||
1018 | intel_wait_for_vblank(dev); | 1019 | intel_wait_for_vblank(dev); |
1019 | 1020 | ||
1020 | DRM_DEBUG("disabled FBC\n"); | 1021 | DRM_DEBUG_KMS("disabled FBC\n"); |
1021 | } | 1022 | } |
1022 | 1023 | ||
1023 | static bool i8xx_fbc_enabled(struct drm_crtc *crtc) | 1024 | static bool i8xx_fbc_enabled(struct drm_crtc *crtc) |
@@ -1062,7 +1063,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1062 | /* enable it... */ | 1063 | /* enable it... */ |
1063 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); | 1064 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); |
1064 | 1065 | ||
1065 | DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); | 1066 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1066 | } | 1067 | } |
1067 | 1068 | ||
1068 | void g4x_disable_fbc(struct drm_device *dev) | 1069 | void g4x_disable_fbc(struct drm_device *dev) |
@@ -1076,7 +1077,7 @@ void g4x_disable_fbc(struct drm_device *dev) | |||
1076 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | 1077 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); |
1077 | intel_wait_for_vblank(dev); | 1078 | intel_wait_for_vblank(dev); |
1078 | 1079 | ||
1079 | DRM_DEBUG("disabled FBC\n"); | 1080 | DRM_DEBUG_KMS("disabled FBC\n"); |
1080 | } | 1081 | } |
1081 | 1082 | ||
1082 | static bool g4x_fbc_enabled(struct drm_crtc *crtc) | 1083 | static bool g4x_fbc_enabled(struct drm_crtc *crtc) |
@@ -1141,25 +1142,27 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1141 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1142 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1142 | */ | 1143 | */ |
1143 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1144 | if (intel_fb->obj->size > dev_priv->cfb_size) { |
1144 | DRM_DEBUG("framebuffer too large, disabling compression\n"); | 1145 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1146 | "compression\n"); | ||
1145 | goto out_disable; | 1147 | goto out_disable; |
1146 | } | 1148 | } |
1147 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | 1149 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || |
1148 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | 1150 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { |
1149 | DRM_DEBUG("mode incompatible with compression, disabling\n"); | 1151 | DRM_DEBUG_KMS("mode incompatible with compression, " |
1152 | "disabling\n"); | ||
1150 | goto out_disable; | 1153 | goto out_disable; |
1151 | } | 1154 | } |
1152 | if ((mode->hdisplay > 2048) || | 1155 | if ((mode->hdisplay > 2048) || |
1153 | (mode->vdisplay > 1536)) { | 1156 | (mode->vdisplay > 1536)) { |
1154 | DRM_DEBUG("mode too large for compression, disabling\n"); | 1157 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
1155 | goto out_disable; | 1158 | goto out_disable; |
1156 | } | 1159 | } |
1157 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { | 1160 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { |
1158 | DRM_DEBUG("plane not 0, disabling compression\n"); | 1161 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); |
1159 | goto out_disable; | 1162 | goto out_disable; |
1160 | } | 1163 | } |
1161 | if (obj_priv->tiling_mode != I915_TILING_X) { | 1164 | if (obj_priv->tiling_mode != I915_TILING_X) { |
1162 | DRM_DEBUG("framebuffer not tiled, disabling compression\n"); | 1165 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); |
1163 | goto out_disable; | 1166 | goto out_disable; |
1164 | } | 1167 | } |
1165 | 1168 | ||
@@ -1181,13 +1184,57 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1181 | return; | 1184 | return; |
1182 | 1185 | ||
1183 | out_disable: | 1186 | out_disable: |
1184 | DRM_DEBUG("unsupported config, disabling FBC\n"); | 1187 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); |
1185 | /* Multiple disables should be harmless */ | 1188 | /* Multiple disables should be harmless */ |
1186 | if (dev_priv->display.fbc_enabled(crtc)) | 1189 | if (dev_priv->display.fbc_enabled(crtc)) |
1187 | dev_priv->display.disable_fbc(dev); | 1190 | dev_priv->display.disable_fbc(dev); |
1188 | } | 1191 | } |
1189 | 1192 | ||
1190 | static int | 1193 | static int |
1194 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | ||
1195 | { | ||
1196 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1197 | u32 alignment; | ||
1198 | int ret; | ||
1199 | |||
1200 | switch (obj_priv->tiling_mode) { | ||
1201 | case I915_TILING_NONE: | ||
1202 | alignment = 64 * 1024; | ||
1203 | break; | ||
1204 | case I915_TILING_X: | ||
1205 | /* pin() will align the object as required by fence */ | ||
1206 | alignment = 0; | ||
1207 | break; | ||
1208 | case I915_TILING_Y: | ||
1209 | /* FIXME: Is this true? */ | ||
1210 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | ||
1211 | return -EINVAL; | ||
1212 | default: | ||
1213 | BUG(); | ||
1214 | } | ||
1215 | |||
1216 | ret = i915_gem_object_pin(obj, alignment); | ||
1217 | if (ret != 0) | ||
1218 | return ret; | ||
1219 | |||
1220 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | ||
1221 | * fence, whereas 965+ only requires a fence if using | ||
1222 | * framebuffer compression. For simplicity, we always install | ||
1223 | * a fence as the cost is not that onerous. | ||
1224 | */ | ||
1225 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
1226 | obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1227 | ret = i915_gem_object_get_fence_reg(obj); | ||
1228 | if (ret != 0) { | ||
1229 | i915_gem_object_unpin(obj); | ||
1230 | return ret; | ||
1231 | } | ||
1232 | } | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static int | ||
1191 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | 1238 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
1192 | struct drm_framebuffer *old_fb) | 1239 | struct drm_framebuffer *old_fb) |
1193 | { | 1240 | { |
@@ -1206,12 +1253,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1206 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; | 1253 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; |
1207 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); | 1254 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); |
1208 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | 1255 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
1209 | u32 dspcntr, alignment; | 1256 | u32 dspcntr; |
1210 | int ret; | 1257 | int ret; |
1211 | 1258 | ||
1212 | /* no fb bound */ | 1259 | /* no fb bound */ |
1213 | if (!crtc->fb) { | 1260 | if (!crtc->fb) { |
1214 | DRM_DEBUG("No FB bound\n"); | 1261 | DRM_DEBUG_KMS("No FB bound\n"); |
1215 | return 0; | 1262 | return 0; |
1216 | } | 1263 | } |
1217 | 1264 | ||
@@ -1228,24 +1275,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1228 | obj = intel_fb->obj; | 1275 | obj = intel_fb->obj; |
1229 | obj_priv = obj->driver_private; | 1276 | obj_priv = obj->driver_private; |
1230 | 1277 | ||
1231 | switch (obj_priv->tiling_mode) { | ||
1232 | case I915_TILING_NONE: | ||
1233 | alignment = 64 * 1024; | ||
1234 | break; | ||
1235 | case I915_TILING_X: | ||
1236 | /* pin() will align the object as required by fence */ | ||
1237 | alignment = 0; | ||
1238 | break; | ||
1239 | case I915_TILING_Y: | ||
1240 | /* FIXME: Is this true? */ | ||
1241 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | ||
1242 | return -EINVAL; | ||
1243 | default: | ||
1244 | BUG(); | ||
1245 | } | ||
1246 | |||
1247 | mutex_lock(&dev->struct_mutex); | 1278 | mutex_lock(&dev->struct_mutex); |
1248 | ret = i915_gem_object_pin(obj, alignment); | 1279 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
1249 | if (ret != 0) { | 1280 | if (ret != 0) { |
1250 | mutex_unlock(&dev->struct_mutex); | 1281 | mutex_unlock(&dev->struct_mutex); |
1251 | return ret; | 1282 | return ret; |
@@ -1258,20 +1289,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1258 | return ret; | 1289 | return ret; |
1259 | } | 1290 | } |
1260 | 1291 | ||
1261 | /* Install a fence for tiled scan-out. Pre-i965 always needs a fence, | ||
1262 | * whereas 965+ only requires a fence if using framebuffer compression. | ||
1263 | * For simplicity, we always install a fence as the cost is not that onerous. | ||
1264 | */ | ||
1265 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
1266 | obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1267 | ret = i915_gem_object_get_fence_reg(obj); | ||
1268 | if (ret != 0) { | ||
1269 | i915_gem_object_unpin(obj); | ||
1270 | mutex_unlock(&dev->struct_mutex); | ||
1271 | return ret; | ||
1272 | } | ||
1273 | } | ||
1274 | |||
1275 | dspcntr = I915_READ(dspcntr_reg); | 1292 | dspcntr = I915_READ(dspcntr_reg); |
1276 | /* Mask out pixel format bits in case we change it */ | 1293 | /* Mask out pixel format bits in case we change it */ |
1277 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | 1294 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
@@ -1287,7 +1304,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1287 | break; | 1304 | break; |
1288 | case 24: | 1305 | case 24: |
1289 | case 32: | 1306 | case 32: |
1290 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | 1307 | if (crtc->fb->depth == 30) |
1308 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; | ||
1309 | else | ||
1310 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | ||
1291 | break; | 1311 | break; |
1292 | default: | 1312 | default: |
1293 | DRM_ERROR("Unknown color depth\n"); | 1313 | DRM_ERROR("Unknown color depth\n"); |
@@ -1302,7 +1322,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1302 | dspcntr &= ~DISPPLANE_TILED; | 1322 | dspcntr &= ~DISPPLANE_TILED; |
1303 | } | 1323 | } |
1304 | 1324 | ||
1305 | if (IS_IGDNG(dev)) | 1325 | if (IS_IRONLAKE(dev)) |
1306 | /* must disable */ | 1326 | /* must disable */ |
1307 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | 1327 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
1308 | 1328 | ||
@@ -1311,7 +1331,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1311 | Start = obj_priv->gtt_offset; | 1331 | Start = obj_priv->gtt_offset; |
1312 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | 1332 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); |
1313 | 1333 | ||
1314 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | 1334 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); |
1315 | I915_WRITE(dspstride, crtc->fb->pitch); | 1335 | I915_WRITE(dspstride, crtc->fb->pitch); |
1316 | if (IS_I965G(dev)) { | 1336 | if (IS_I965G(dev)) { |
1317 | I915_WRITE(dspbase, Offset); | 1337 | I915_WRITE(dspbase, Offset); |
@@ -1363,7 +1383,7 @@ static void i915_disable_vga (struct drm_device *dev) | |||
1363 | u8 sr1; | 1383 | u8 sr1; |
1364 | u32 vga_reg; | 1384 | u32 vga_reg; |
1365 | 1385 | ||
1366 | if (IS_IGDNG(dev)) | 1386 | if (IS_IRONLAKE(dev)) |
1367 | vga_reg = CPU_VGACNTRL; | 1387 | vga_reg = CPU_VGACNTRL; |
1368 | else | 1388 | else |
1369 | vga_reg = VGACNTRL; | 1389 | vga_reg = VGACNTRL; |
@@ -1379,19 +1399,19 @@ static void i915_disable_vga (struct drm_device *dev) | |||
1379 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | 1399 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); |
1380 | } | 1400 | } |
1381 | 1401 | ||
1382 | static void igdng_disable_pll_edp (struct drm_crtc *crtc) | 1402 | static void ironlake_disable_pll_edp (struct drm_crtc *crtc) |
1383 | { | 1403 | { |
1384 | struct drm_device *dev = crtc->dev; | 1404 | struct drm_device *dev = crtc->dev; |
1385 | struct drm_i915_private *dev_priv = dev->dev_private; | 1405 | struct drm_i915_private *dev_priv = dev->dev_private; |
1386 | u32 dpa_ctl; | 1406 | u32 dpa_ctl; |
1387 | 1407 | ||
1388 | DRM_DEBUG("\n"); | 1408 | DRM_DEBUG_KMS("\n"); |
1389 | dpa_ctl = I915_READ(DP_A); | 1409 | dpa_ctl = I915_READ(DP_A); |
1390 | dpa_ctl &= ~DP_PLL_ENABLE; | 1410 | dpa_ctl &= ~DP_PLL_ENABLE; |
1391 | I915_WRITE(DP_A, dpa_ctl); | 1411 | I915_WRITE(DP_A, dpa_ctl); |
1392 | } | 1412 | } |
1393 | 1413 | ||
1394 | static void igdng_enable_pll_edp (struct drm_crtc *crtc) | 1414 | static void ironlake_enable_pll_edp (struct drm_crtc *crtc) |
1395 | { | 1415 | { |
1396 | struct drm_device *dev = crtc->dev; | 1416 | struct drm_device *dev = crtc->dev; |
1397 | struct drm_i915_private *dev_priv = dev->dev_private; | 1417 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1404,13 +1424,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc) | |||
1404 | } | 1424 | } |
1405 | 1425 | ||
1406 | 1426 | ||
1407 | static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) | 1427 | static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) |
1408 | { | 1428 | { |
1409 | struct drm_device *dev = crtc->dev; | 1429 | struct drm_device *dev = crtc->dev; |
1410 | struct drm_i915_private *dev_priv = dev->dev_private; | 1430 | struct drm_i915_private *dev_priv = dev->dev_private; |
1411 | u32 dpa_ctl; | 1431 | u32 dpa_ctl; |
1412 | 1432 | ||
1413 | DRM_DEBUG("eDP PLL enable for clock %d\n", clock); | 1433 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
1414 | dpa_ctl = I915_READ(DP_A); | 1434 | dpa_ctl = I915_READ(DP_A); |
1415 | dpa_ctl &= ~DP_PLL_FREQ_MASK; | 1435 | dpa_ctl &= ~DP_PLL_FREQ_MASK; |
1416 | 1436 | ||
@@ -1440,7 +1460,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) | |||
1440 | udelay(500); | 1460 | udelay(500); |
1441 | } | 1461 | } |
1442 | 1462 | ||
1443 | static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | 1463 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) |
1444 | { | 1464 | { |
1445 | struct drm_device *dev = crtc->dev; | 1465 | struct drm_device *dev = crtc->dev; |
1446 | struct drm_i915_private *dev_priv = dev->dev_private; | 1466 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1481,10 +1501,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1481 | case DRM_MODE_DPMS_ON: | 1501 | case DRM_MODE_DPMS_ON: |
1482 | case DRM_MODE_DPMS_STANDBY: | 1502 | case DRM_MODE_DPMS_STANDBY: |
1483 | case DRM_MODE_DPMS_SUSPEND: | 1503 | case DRM_MODE_DPMS_SUSPEND: |
1484 | DRM_DEBUG("crtc %d dpms on\n", pipe); | 1504 | DRM_DEBUG_KMS("crtc %d dpms on\n", pipe); |
1505 | |||
1506 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
1507 | temp = I915_READ(PCH_LVDS); | ||
1508 | if ((temp & LVDS_PORT_EN) == 0) { | ||
1509 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
1510 | POSTING_READ(PCH_LVDS); | ||
1511 | } | ||
1512 | } | ||
1513 | |||
1485 | if (HAS_eDP) { | 1514 | if (HAS_eDP) { |
1486 | /* enable eDP PLL */ | 1515 | /* enable eDP PLL */ |
1487 | igdng_enable_pll_edp(crtc); | 1516 | ironlake_enable_pll_edp(crtc); |
1488 | } else { | 1517 | } else { |
1489 | /* enable PCH DPLL */ | 1518 | /* enable PCH DPLL */ |
1490 | temp = I915_READ(pch_dpll_reg); | 1519 | temp = I915_READ(pch_dpll_reg); |
@@ -1501,7 +1530,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1501 | I915_READ(fdi_rx_reg); | 1530 | I915_READ(fdi_rx_reg); |
1502 | udelay(200); | 1531 | udelay(200); |
1503 | 1532 | ||
1504 | /* Enable CPU FDI TX PLL, always on for IGDNG */ | 1533 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
1505 | temp = I915_READ(fdi_tx_reg); | 1534 | temp = I915_READ(fdi_tx_reg); |
1506 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | 1535 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { |
1507 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | 1536 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); |
@@ -1568,12 +1597,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1568 | udelay(150); | 1597 | udelay(150); |
1569 | 1598 | ||
1570 | temp = I915_READ(fdi_rx_iir_reg); | 1599 | temp = I915_READ(fdi_rx_iir_reg); |
1571 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1600 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1572 | 1601 | ||
1573 | if ((temp & FDI_RX_BIT_LOCK) == 0) { | 1602 | if ((temp & FDI_RX_BIT_LOCK) == 0) { |
1574 | for (j = 0; j < tries; j++) { | 1603 | for (j = 0; j < tries; j++) { |
1575 | temp = I915_READ(fdi_rx_iir_reg); | 1604 | temp = I915_READ(fdi_rx_iir_reg); |
1576 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1605 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", |
1606 | temp); | ||
1577 | if (temp & FDI_RX_BIT_LOCK) | 1607 | if (temp & FDI_RX_BIT_LOCK) |
1578 | break; | 1608 | break; |
1579 | udelay(200); | 1609 | udelay(200); |
@@ -1582,11 +1612,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1582 | I915_WRITE(fdi_rx_iir_reg, | 1612 | I915_WRITE(fdi_rx_iir_reg, |
1583 | temp | FDI_RX_BIT_LOCK); | 1613 | temp | FDI_RX_BIT_LOCK); |
1584 | else | 1614 | else |
1585 | DRM_DEBUG("train 1 fail\n"); | 1615 | DRM_DEBUG_KMS("train 1 fail\n"); |
1586 | } else { | 1616 | } else { |
1587 | I915_WRITE(fdi_rx_iir_reg, | 1617 | I915_WRITE(fdi_rx_iir_reg, |
1588 | temp | FDI_RX_BIT_LOCK); | 1618 | temp | FDI_RX_BIT_LOCK); |
1589 | DRM_DEBUG("train 1 ok 2!\n"); | 1619 | DRM_DEBUG_KMS("train 1 ok 2!\n"); |
1590 | } | 1620 | } |
1591 | temp = I915_READ(fdi_tx_reg); | 1621 | temp = I915_READ(fdi_tx_reg); |
1592 | temp &= ~FDI_LINK_TRAIN_NONE; | 1622 | temp &= ~FDI_LINK_TRAIN_NONE; |
@@ -1601,12 +1631,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1601 | udelay(150); | 1631 | udelay(150); |
1602 | 1632 | ||
1603 | temp = I915_READ(fdi_rx_iir_reg); | 1633 | temp = I915_READ(fdi_rx_iir_reg); |
1604 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1634 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1605 | 1635 | ||
1606 | if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { | 1636 | if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { |
1607 | for (j = 0; j < tries; j++) { | 1637 | for (j = 0; j < tries; j++) { |
1608 | temp = I915_READ(fdi_rx_iir_reg); | 1638 | temp = I915_READ(fdi_rx_iir_reg); |
1609 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1639 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", |
1640 | temp); | ||
1610 | if (temp & FDI_RX_SYMBOL_LOCK) | 1641 | if (temp & FDI_RX_SYMBOL_LOCK) |
1611 | break; | 1642 | break; |
1612 | udelay(200); | 1643 | udelay(200); |
@@ -1614,15 +1645,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1614 | if (j != tries) { | 1645 | if (j != tries) { |
1615 | I915_WRITE(fdi_rx_iir_reg, | 1646 | I915_WRITE(fdi_rx_iir_reg, |
1616 | temp | FDI_RX_SYMBOL_LOCK); | 1647 | temp | FDI_RX_SYMBOL_LOCK); |
1617 | DRM_DEBUG("train 2 ok 1!\n"); | 1648 | DRM_DEBUG_KMS("train 2 ok 1!\n"); |
1618 | } else | 1649 | } else |
1619 | DRM_DEBUG("train 2 fail\n"); | 1650 | DRM_DEBUG_KMS("train 2 fail\n"); |
1620 | } else { | 1651 | } else { |
1621 | I915_WRITE(fdi_rx_iir_reg, | 1652 | I915_WRITE(fdi_rx_iir_reg, |
1622 | temp | FDI_RX_SYMBOL_LOCK); | 1653 | temp | FDI_RX_SYMBOL_LOCK); |
1623 | DRM_DEBUG("train 2 ok 2!\n"); | 1654 | DRM_DEBUG_KMS("train 2 ok 2!\n"); |
1624 | } | 1655 | } |
1625 | DRM_DEBUG("train done\n"); | 1656 | DRM_DEBUG_KMS("train done\n"); |
1626 | 1657 | ||
1627 | /* set transcoder timing */ | 1658 | /* set transcoder timing */ |
1628 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); | 1659 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); |
@@ -1664,9 +1695,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1664 | 1695 | ||
1665 | break; | 1696 | break; |
1666 | case DRM_MODE_DPMS_OFF: | 1697 | case DRM_MODE_DPMS_OFF: |
1667 | DRM_DEBUG("crtc %d dpms off\n", pipe); | 1698 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
1668 | |||
1669 | i915_disable_vga(dev); | ||
1670 | 1699 | ||
1671 | /* Disable display plane */ | 1700 | /* Disable display plane */ |
1672 | temp = I915_READ(dspcntr_reg); | 1701 | temp = I915_READ(dspcntr_reg); |
@@ -1677,6 +1706,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1677 | I915_READ(dspbase_reg); | 1706 | I915_READ(dspbase_reg); |
1678 | } | 1707 | } |
1679 | 1708 | ||
1709 | i915_disable_vga(dev); | ||
1710 | |||
1680 | /* disable cpu pipe, disable after all planes disabled */ | 1711 | /* disable cpu pipe, disable after all planes disabled */ |
1681 | temp = I915_READ(pipeconf_reg); | 1712 | temp = I915_READ(pipeconf_reg); |
1682 | if ((temp & PIPEACONF_ENABLE) != 0) { | 1713 | if ((temp & PIPEACONF_ENABLE) != 0) { |
@@ -1690,16 +1721,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1690 | udelay(500); | 1721 | udelay(500); |
1691 | continue; | 1722 | continue; |
1692 | } else { | 1723 | } else { |
1693 | DRM_DEBUG("pipe %d off delay\n", pipe); | 1724 | DRM_DEBUG_KMS("pipe %d off delay\n", |
1725 | pipe); | ||
1694 | break; | 1726 | break; |
1695 | } | 1727 | } |
1696 | } | 1728 | } |
1697 | } else | 1729 | } else |
1698 | DRM_DEBUG("crtc %d is disabled\n", pipe); | 1730 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); |
1699 | 1731 | ||
1700 | if (HAS_eDP) { | 1732 | udelay(100); |
1701 | igdng_disable_pll_edp(crtc); | 1733 | |
1734 | /* Disable PF */ | ||
1735 | temp = I915_READ(pf_ctl_reg); | ||
1736 | if ((temp & PF_ENABLE) != 0) { | ||
1737 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
1738 | I915_READ(pf_ctl_reg); | ||
1702 | } | 1739 | } |
1740 | I915_WRITE(pf_win_size, 0); | ||
1703 | 1741 | ||
1704 | /* disable CPU FDI tx and PCH FDI rx */ | 1742 | /* disable CPU FDI tx and PCH FDI rx */ |
1705 | temp = I915_READ(fdi_tx_reg); | 1743 | temp = I915_READ(fdi_tx_reg); |
@@ -1725,6 +1763,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1725 | 1763 | ||
1726 | udelay(100); | 1764 | udelay(100); |
1727 | 1765 | ||
1766 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
1767 | temp = I915_READ(PCH_LVDS); | ||
1768 | I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); | ||
1769 | I915_READ(PCH_LVDS); | ||
1770 | udelay(100); | ||
1771 | } | ||
1772 | |||
1728 | /* disable PCH transcoder */ | 1773 | /* disable PCH transcoder */ |
1729 | temp = I915_READ(transconf_reg); | 1774 | temp = I915_READ(transconf_reg); |
1730 | if ((temp & TRANS_ENABLE) != 0) { | 1775 | if ((temp & TRANS_ENABLE) != 0) { |
@@ -1738,12 +1783,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1738 | udelay(500); | 1783 | udelay(500); |
1739 | continue; | 1784 | continue; |
1740 | } else { | 1785 | } else { |
1741 | DRM_DEBUG("transcoder %d off delay\n", pipe); | 1786 | DRM_DEBUG_KMS("transcoder %d off " |
1787 | "delay\n", pipe); | ||
1742 | break; | 1788 | break; |
1743 | } | 1789 | } |
1744 | } | 1790 | } |
1745 | } | 1791 | } |
1746 | 1792 | ||
1793 | udelay(100); | ||
1794 | |||
1747 | /* disable PCH DPLL */ | 1795 | /* disable PCH DPLL */ |
1748 | temp = I915_READ(pch_dpll_reg); | 1796 | temp = I915_READ(pch_dpll_reg); |
1749 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 1797 | if ((temp & DPLL_VCO_ENABLE) != 0) { |
@@ -1751,14 +1799,20 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1751 | I915_READ(pch_dpll_reg); | 1799 | I915_READ(pch_dpll_reg); |
1752 | } | 1800 | } |
1753 | 1801 | ||
1754 | temp = I915_READ(fdi_rx_reg); | 1802 | if (HAS_eDP) { |
1755 | if ((temp & FDI_RX_PLL_ENABLE) != 0) { | 1803 | ironlake_disable_pll_edp(crtc); |
1756 | temp &= ~FDI_SEL_PCDCLK; | ||
1757 | temp &= ~FDI_RX_PLL_ENABLE; | ||
1758 | I915_WRITE(fdi_rx_reg, temp); | ||
1759 | I915_READ(fdi_rx_reg); | ||
1760 | } | 1804 | } |
1761 | 1805 | ||
1806 | temp = I915_READ(fdi_rx_reg); | ||
1807 | temp &= ~FDI_SEL_PCDCLK; | ||
1808 | I915_WRITE(fdi_rx_reg, temp); | ||
1809 | I915_READ(fdi_rx_reg); | ||
1810 | |||
1811 | temp = I915_READ(fdi_rx_reg); | ||
1812 | temp &= ~FDI_RX_PLL_ENABLE; | ||
1813 | I915_WRITE(fdi_rx_reg, temp); | ||
1814 | I915_READ(fdi_rx_reg); | ||
1815 | |||
1762 | /* Disable CPU FDI TX PLL */ | 1816 | /* Disable CPU FDI TX PLL */ |
1763 | temp = I915_READ(fdi_tx_reg); | 1817 | temp = I915_READ(fdi_tx_reg); |
1764 | if ((temp & FDI_TX_PLL_ENABLE) != 0) { | 1818 | if ((temp & FDI_TX_PLL_ENABLE) != 0) { |
@@ -1767,20 +1821,43 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1767 | udelay(100); | 1821 | udelay(100); |
1768 | } | 1822 | } |
1769 | 1823 | ||
1770 | /* Disable PF */ | ||
1771 | temp = I915_READ(pf_ctl_reg); | ||
1772 | if ((temp & PF_ENABLE) != 0) { | ||
1773 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
1774 | I915_READ(pf_ctl_reg); | ||
1775 | } | ||
1776 | I915_WRITE(pf_win_size, 0); | ||
1777 | |||
1778 | /* Wait for the clocks to turn off. */ | 1824 | /* Wait for the clocks to turn off. */ |
1779 | udelay(150); | 1825 | udelay(100); |
1780 | break; | 1826 | break; |
1781 | } | 1827 | } |
1782 | } | 1828 | } |
1783 | 1829 | ||
1830 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | ||
1831 | { | ||
1832 | struct intel_overlay *overlay; | ||
1833 | int ret; | ||
1834 | |||
1835 | if (!enable && intel_crtc->overlay) { | ||
1836 | overlay = intel_crtc->overlay; | ||
1837 | mutex_lock(&overlay->dev->struct_mutex); | ||
1838 | for (;;) { | ||
1839 | ret = intel_overlay_switch_off(overlay); | ||
1840 | if (ret == 0) | ||
1841 | break; | ||
1842 | |||
1843 | ret = intel_overlay_recover_from_interrupt(overlay, 0); | ||
1844 | if (ret != 0) { | ||
1845 | /* overlay doesn't react anymore. Usually | ||
1846 | * results in a black screen and an unkillable | ||
1847 | * X server. */ | ||
1848 | BUG(); | ||
1849 | overlay->hw_wedged = HW_WEDGED; | ||
1850 | break; | ||
1851 | } | ||
1852 | } | ||
1853 | mutex_unlock(&overlay->dev->struct_mutex); | ||
1854 | } | ||
1855 | /* Let userspace switch the overlay on again. In most cases userspace | ||
1856 | * has to recompute where to put it anyway. */ | ||
1857 | |||
1858 | return; | ||
1859 | } | ||
1860 | |||
1784 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | 1861 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) |
1785 | { | 1862 | { |
1786 | struct drm_device *dev = crtc->dev; | 1863 | struct drm_device *dev = crtc->dev; |
@@ -1839,12 +1916,14 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1839 | intel_update_fbc(crtc, &crtc->mode); | 1916 | intel_update_fbc(crtc, &crtc->mode); |
1840 | 1917 | ||
1841 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 1918 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
1842 | //intel_crtc_dpms_video(crtc, true); TODO | 1919 | intel_crtc_dpms_overlay(intel_crtc, true); |
1843 | break; | 1920 | break; |
1844 | case DRM_MODE_DPMS_OFF: | 1921 | case DRM_MODE_DPMS_OFF: |
1845 | intel_update_watermarks(dev); | 1922 | intel_update_watermarks(dev); |
1923 | |||
1846 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | 1924 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
1847 | //intel_crtc_dpms_video(crtc, FALSE); TODO | 1925 | intel_crtc_dpms_overlay(intel_crtc, false); |
1926 | drm_vblank_off(dev, pipe); | ||
1848 | 1927 | ||
1849 | if (dev_priv->cfb_plane == plane && | 1928 | if (dev_priv->cfb_plane == plane && |
1850 | dev_priv->display.disable_fbc) | 1929 | dev_priv->display.disable_fbc) |
@@ -1963,7 +2042,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
1963 | struct drm_display_mode *adjusted_mode) | 2042 | struct drm_display_mode *adjusted_mode) |
1964 | { | 2043 | { |
1965 | struct drm_device *dev = crtc->dev; | 2044 | struct drm_device *dev = crtc->dev; |
1966 | if (IS_IGDNG(dev)) { | 2045 | if (IS_IRONLAKE(dev)) { |
1967 | /* FDI link clock is fixed at 2.7G */ | 2046 | /* FDI link clock is fixed at 2.7G */ |
1968 | if (mode->clock * 3 > 27000 * 4) | 2047 | if (mode->clock * 3 > 27000 * 4) |
1969 | return MODE_CLOCK_HIGH; | 2048 | return MODE_CLOCK_HIGH; |
@@ -2039,7 +2118,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev) | |||
2039 | * Return the pipe currently connected to the panel fitter, | 2118 | * Return the pipe currently connected to the panel fitter, |
2040 | * or -1 if the panel fitter is not present or not in use | 2119 | * or -1 if the panel fitter is not present or not in use |
2041 | */ | 2120 | */ |
2042 | static int intel_panel_fitter_pipe (struct drm_device *dev) | 2121 | int intel_panel_fitter_pipe (struct drm_device *dev) |
2043 | { | 2122 | { |
2044 | struct drm_i915_private *dev_priv = dev->dev_private; | 2123 | struct drm_i915_private *dev_priv = dev->dev_private; |
2045 | u32 pfit_control; | 2124 | u32 pfit_control; |
@@ -2083,9 +2162,8 @@ fdi_reduce_ratio(u32 *num, u32 *den) | |||
2083 | #define LINK_N 0x80000 | 2162 | #define LINK_N 0x80000 |
2084 | 2163 | ||
2085 | static void | 2164 | static void |
2086 | igdng_compute_m_n(int bits_per_pixel, int nlanes, | 2165 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
2087 | int pixel_clock, int link_clock, | 2166 | int link_clock, struct fdi_m_n *m_n) |
2088 | struct fdi_m_n *m_n) | ||
2089 | { | 2167 | { |
2090 | u64 temp; | 2168 | u64 temp; |
2091 | 2169 | ||
@@ -2113,34 +2191,34 @@ struct intel_watermark_params { | |||
2113 | unsigned long cacheline_size; | 2191 | unsigned long cacheline_size; |
2114 | }; | 2192 | }; |
2115 | 2193 | ||
2116 | /* IGD has different values for various configs */ | 2194 | /* Pineview has different values for various configs */ |
2117 | static struct intel_watermark_params igd_display_wm = { | 2195 | static struct intel_watermark_params pineview_display_wm = { |
2118 | IGD_DISPLAY_FIFO, | 2196 | PINEVIEW_DISPLAY_FIFO, |
2119 | IGD_MAX_WM, | 2197 | PINEVIEW_MAX_WM, |
2120 | IGD_DFT_WM, | 2198 | PINEVIEW_DFT_WM, |
2121 | IGD_GUARD_WM, | 2199 | PINEVIEW_GUARD_WM, |
2122 | IGD_FIFO_LINE_SIZE | 2200 | PINEVIEW_FIFO_LINE_SIZE |
2123 | }; | 2201 | }; |
2124 | static struct intel_watermark_params igd_display_hplloff_wm = { | 2202 | static struct intel_watermark_params pineview_display_hplloff_wm = { |
2125 | IGD_DISPLAY_FIFO, | 2203 | PINEVIEW_DISPLAY_FIFO, |
2126 | IGD_MAX_WM, | 2204 | PINEVIEW_MAX_WM, |
2127 | IGD_DFT_HPLLOFF_WM, | 2205 | PINEVIEW_DFT_HPLLOFF_WM, |
2128 | IGD_GUARD_WM, | 2206 | PINEVIEW_GUARD_WM, |
2129 | IGD_FIFO_LINE_SIZE | 2207 | PINEVIEW_FIFO_LINE_SIZE |
2130 | }; | 2208 | }; |
2131 | static struct intel_watermark_params igd_cursor_wm = { | 2209 | static struct intel_watermark_params pineview_cursor_wm = { |
2132 | IGD_CURSOR_FIFO, | 2210 | PINEVIEW_CURSOR_FIFO, |
2133 | IGD_CURSOR_MAX_WM, | 2211 | PINEVIEW_CURSOR_MAX_WM, |
2134 | IGD_CURSOR_DFT_WM, | 2212 | PINEVIEW_CURSOR_DFT_WM, |
2135 | IGD_CURSOR_GUARD_WM, | 2213 | PINEVIEW_CURSOR_GUARD_WM, |
2136 | IGD_FIFO_LINE_SIZE, | 2214 | PINEVIEW_FIFO_LINE_SIZE, |
2137 | }; | 2215 | }; |
2138 | static struct intel_watermark_params igd_cursor_hplloff_wm = { | 2216 | static struct intel_watermark_params pineview_cursor_hplloff_wm = { |
2139 | IGD_CURSOR_FIFO, | 2217 | PINEVIEW_CURSOR_FIFO, |
2140 | IGD_CURSOR_MAX_WM, | 2218 | PINEVIEW_CURSOR_MAX_WM, |
2141 | IGD_CURSOR_DFT_WM, | 2219 | PINEVIEW_CURSOR_DFT_WM, |
2142 | IGD_CURSOR_GUARD_WM, | 2220 | PINEVIEW_CURSOR_GUARD_WM, |
2143 | IGD_FIFO_LINE_SIZE | 2221 | PINEVIEW_FIFO_LINE_SIZE |
2144 | }; | 2222 | }; |
2145 | static struct intel_watermark_params g4x_wm_info = { | 2223 | static struct intel_watermark_params g4x_wm_info = { |
2146 | G4X_FIFO_SIZE, | 2224 | G4X_FIFO_SIZE, |
@@ -2213,11 +2291,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2213 | 1000; | 2291 | 1000; |
2214 | entries_required /= wm->cacheline_size; | 2292 | entries_required /= wm->cacheline_size; |
2215 | 2293 | ||
2216 | DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); | 2294 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); |
2217 | 2295 | ||
2218 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); | 2296 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); |
2219 | 2297 | ||
2220 | DRM_DEBUG("FIFO watermark level: %d\n", wm_size); | 2298 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); |
2221 | 2299 | ||
2222 | /* Don't promote wm_size to unsigned... */ | 2300 | /* Don't promote wm_size to unsigned... */ |
2223 | if (wm_size > (long)wm->max_wm) | 2301 | if (wm_size > (long)wm->max_wm) |
@@ -2279,50 +2357,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, | |||
2279 | return latency; | 2357 | return latency; |
2280 | } | 2358 | } |
2281 | 2359 | ||
2282 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); | 2360 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
2283 | 2361 | ||
2284 | return NULL; | 2362 | return NULL; |
2285 | } | 2363 | } |
2286 | 2364 | ||
2287 | static void igd_disable_cxsr(struct drm_device *dev) | 2365 | static void pineview_disable_cxsr(struct drm_device *dev) |
2288 | { | 2366 | { |
2289 | struct drm_i915_private *dev_priv = dev->dev_private; | 2367 | struct drm_i915_private *dev_priv = dev->dev_private; |
2290 | u32 reg; | 2368 | u32 reg; |
2291 | 2369 | ||
2292 | /* deactivate cxsr */ | 2370 | /* deactivate cxsr */ |
2293 | reg = I915_READ(DSPFW3); | 2371 | reg = I915_READ(DSPFW3); |
2294 | reg &= ~(IGD_SELF_REFRESH_EN); | 2372 | reg &= ~(PINEVIEW_SELF_REFRESH_EN); |
2295 | I915_WRITE(DSPFW3, reg); | 2373 | I915_WRITE(DSPFW3, reg); |
2296 | DRM_INFO("Big FIFO is disabled\n"); | 2374 | DRM_INFO("Big FIFO is disabled\n"); |
2297 | } | 2375 | } |
2298 | 2376 | ||
2299 | static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | 2377 | static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, |
2300 | int pixel_size) | 2378 | int pixel_size) |
2301 | { | 2379 | { |
2302 | struct drm_i915_private *dev_priv = dev->dev_private; | 2380 | struct drm_i915_private *dev_priv = dev->dev_private; |
2303 | u32 reg; | 2381 | u32 reg; |
2304 | unsigned long wm; | 2382 | unsigned long wm; |
2305 | struct cxsr_latency *latency; | 2383 | struct cxsr_latency *latency; |
2306 | 2384 | ||
2307 | latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq, | 2385 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, |
2308 | dev_priv->mem_freq); | 2386 | dev_priv->mem_freq); |
2309 | if (!latency) { | 2387 | if (!latency) { |
2310 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); | 2388 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
2311 | igd_disable_cxsr(dev); | 2389 | pineview_disable_cxsr(dev); |
2312 | return; | 2390 | return; |
2313 | } | 2391 | } |
2314 | 2392 | ||
2315 | /* Display SR */ | 2393 | /* Display SR */ |
2316 | wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size, | 2394 | wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size, |
2317 | latency->display_sr); | 2395 | latency->display_sr); |
2318 | reg = I915_READ(DSPFW1); | 2396 | reg = I915_READ(DSPFW1); |
2319 | reg &= 0x7fffff; | 2397 | reg &= 0x7fffff; |
2320 | reg |= wm << 23; | 2398 | reg |= wm << 23; |
2321 | I915_WRITE(DSPFW1, reg); | 2399 | I915_WRITE(DSPFW1, reg); |
2322 | DRM_DEBUG("DSPFW1 register is %x\n", reg); | 2400 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); |
2323 | 2401 | ||
2324 | /* cursor SR */ | 2402 | /* cursor SR */ |
2325 | wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size, | 2403 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size, |
2326 | latency->cursor_sr); | 2404 | latency->cursor_sr); |
2327 | reg = I915_READ(DSPFW3); | 2405 | reg = I915_READ(DSPFW3); |
2328 | reg &= ~(0x3f << 24); | 2406 | reg &= ~(0x3f << 24); |
@@ -2330,7 +2408,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2330 | I915_WRITE(DSPFW3, reg); | 2408 | I915_WRITE(DSPFW3, reg); |
2331 | 2409 | ||
2332 | /* Display HPLL off SR */ | 2410 | /* Display HPLL off SR */ |
2333 | wm = intel_calculate_wm(clock, &igd_display_hplloff_wm, | 2411 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, |
2334 | latency->display_hpll_disable, I915_FIFO_LINE_SIZE); | 2412 | latency->display_hpll_disable, I915_FIFO_LINE_SIZE); |
2335 | reg = I915_READ(DSPFW3); | 2413 | reg = I915_READ(DSPFW3); |
2336 | reg &= 0xfffffe00; | 2414 | reg &= 0xfffffe00; |
@@ -2338,17 +2416,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2338 | I915_WRITE(DSPFW3, reg); | 2416 | I915_WRITE(DSPFW3, reg); |
2339 | 2417 | ||
2340 | /* cursor HPLL off SR */ | 2418 | /* cursor HPLL off SR */ |
2341 | wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size, | 2419 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size, |
2342 | latency->cursor_hpll_disable); | 2420 | latency->cursor_hpll_disable); |
2343 | reg = I915_READ(DSPFW3); | 2421 | reg = I915_READ(DSPFW3); |
2344 | reg &= ~(0x3f << 16); | 2422 | reg &= ~(0x3f << 16); |
2345 | reg |= (wm & 0x3f) << 16; | 2423 | reg |= (wm & 0x3f) << 16; |
2346 | I915_WRITE(DSPFW3, reg); | 2424 | I915_WRITE(DSPFW3, reg); |
2347 | DRM_DEBUG("DSPFW3 register is %x\n", reg); | 2425 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); |
2348 | 2426 | ||
2349 | /* activate cxsr */ | 2427 | /* activate cxsr */ |
2350 | reg = I915_READ(DSPFW3); | 2428 | reg = I915_READ(DSPFW3); |
2351 | reg |= IGD_SELF_REFRESH_EN; | 2429 | reg |= PINEVIEW_SELF_REFRESH_EN; |
2352 | I915_WRITE(DSPFW3, reg); | 2430 | I915_WRITE(DSPFW3, reg); |
2353 | 2431 | ||
2354 | DRM_INFO("Big FIFO is enabled\n"); | 2432 | DRM_INFO("Big FIFO is enabled\n"); |
@@ -2384,8 +2462,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |||
2384 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | 2462 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - |
2385 | (dsparb & 0x7f); | 2463 | (dsparb & 0x7f); |
2386 | 2464 | ||
2387 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | 2465 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2388 | size); | 2466 | plane ? "B" : "A", size); |
2389 | 2467 | ||
2390 | return size; | 2468 | return size; |
2391 | } | 2469 | } |
@@ -2403,8 +2481,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) | |||
2403 | (dsparb & 0x1ff); | 2481 | (dsparb & 0x1ff); |
2404 | size >>= 1; /* Convert to cachelines */ | 2482 | size >>= 1; /* Convert to cachelines */ |
2405 | 2483 | ||
2406 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | 2484 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2407 | size); | 2485 | plane ? "B" : "A", size); |
2408 | 2486 | ||
2409 | return size; | 2487 | return size; |
2410 | } | 2488 | } |
@@ -2418,7 +2496,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) | |||
2418 | size = dsparb & 0x7f; | 2496 | size = dsparb & 0x7f; |
2419 | size >>= 2; /* Convert to cachelines */ | 2497 | size >>= 2; /* Convert to cachelines */ |
2420 | 2498 | ||
2421 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | 2499 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2500 | plane ? "B" : "A", | ||
2422 | size); | 2501 | size); |
2423 | 2502 | ||
2424 | return size; | 2503 | return size; |
@@ -2433,8 +2512,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
2433 | size = dsparb & 0x7f; | 2512 | size = dsparb & 0x7f; |
2434 | size >>= 1; /* Convert to cachelines */ | 2513 | size >>= 1; /* Convert to cachelines */ |
2435 | 2514 | ||
2436 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | 2515 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2437 | size); | 2516 | plane ? "B" : "A", size); |
2438 | 2517 | ||
2439 | return size; | 2518 | return size; |
2440 | } | 2519 | } |
@@ -2509,15 +2588,39 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2509 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 2588 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
2510 | } | 2589 | } |
2511 | 2590 | ||
2512 | static void i965_update_wm(struct drm_device *dev, int unused, int unused2, | 2591 | static void i965_update_wm(struct drm_device *dev, int planea_clock, |
2513 | int unused3, int unused4) | 2592 | int planeb_clock, int sr_hdisplay, int pixel_size) |
2514 | { | 2593 | { |
2515 | struct drm_i915_private *dev_priv = dev->dev_private; | 2594 | struct drm_i915_private *dev_priv = dev->dev_private; |
2595 | unsigned long line_time_us; | ||
2596 | int sr_clock, sr_entries, srwm = 1; | ||
2597 | |||
2598 | /* Calc sr entries for one plane configs */ | ||
2599 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | ||
2600 | /* self-refresh has much higher latency */ | ||
2601 | const static int sr_latency_ns = 12000; | ||
2602 | |||
2603 | sr_clock = planea_clock ? planea_clock : planeb_clock; | ||
2604 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | ||
2605 | |||
2606 | /* Use ns/us then divide to preserve precision */ | ||
2607 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | ||
2608 | pixel_size * sr_hdisplay) / 1000; | ||
2609 | sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); | ||
2610 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | ||
2611 | srwm = I945_FIFO_SIZE - sr_entries; | ||
2612 | if (srwm < 0) | ||
2613 | srwm = 1; | ||
2614 | srwm &= 0x3f; | ||
2615 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
2616 | } | ||
2516 | 2617 | ||
2517 | DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); | 2618 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
2619 | srwm); | ||
2518 | 2620 | ||
2519 | /* 965 has limitations... */ | 2621 | /* 965 has limitations... */ |
2520 | I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); | 2622 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | |
2623 | (8 << 0)); | ||
2521 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | 2624 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
2522 | } | 2625 | } |
2523 | 2626 | ||
@@ -2553,7 +2656,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2553 | pixel_size, latency_ns); | 2656 | pixel_size, latency_ns); |
2554 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, | 2657 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, |
2555 | pixel_size, latency_ns); | 2658 | pixel_size, latency_ns); |
2556 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 2659 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
2557 | 2660 | ||
2558 | /* | 2661 | /* |
2559 | * Overlay gets an aggressive default since video jitter is bad. | 2662 | * Overlay gets an aggressive default since video jitter is bad. |
@@ -2573,14 +2676,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2573 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | 2676 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * |
2574 | pixel_size * sr_hdisplay) / 1000; | 2677 | pixel_size * sr_hdisplay) / 1000; |
2575 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 2678 | sr_entries = roundup(sr_entries / cacheline_size, 1); |
2576 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 2679 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); |
2577 | srwm = total_size - sr_entries; | 2680 | srwm = total_size - sr_entries; |
2578 | if (srwm < 0) | 2681 | if (srwm < 0) |
2579 | srwm = 1; | 2682 | srwm = 1; |
2580 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | 2683 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); |
2581 | } | 2684 | } |
2582 | 2685 | ||
2583 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 2686 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
2584 | planea_wm, planeb_wm, cwm, srwm); | 2687 | planea_wm, planeb_wm, cwm, srwm); |
2585 | 2688 | ||
2586 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | 2689 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); |
@@ -2607,7 +2710,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
2607 | pixel_size, latency_ns); | 2710 | pixel_size, latency_ns); |
2608 | fwater_lo |= (3<<8) | planea_wm; | 2711 | fwater_lo |= (3<<8) | planea_wm; |
2609 | 2712 | ||
2610 | DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); | 2713 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); |
2611 | 2714 | ||
2612 | I915_WRITE(FW_BLC, fwater_lo); | 2715 | I915_WRITE(FW_BLC, fwater_lo); |
2613 | } | 2716 | } |
@@ -2661,11 +2764,11 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
2661 | if (crtc->enabled) { | 2764 | if (crtc->enabled) { |
2662 | enabled++; | 2765 | enabled++; |
2663 | if (intel_crtc->plane == 0) { | 2766 | if (intel_crtc->plane == 0) { |
2664 | DRM_DEBUG("plane A (pipe %d) clock: %d\n", | 2767 | DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", |
2665 | intel_crtc->pipe, crtc->mode.clock); | 2768 | intel_crtc->pipe, crtc->mode.clock); |
2666 | planea_clock = crtc->mode.clock; | 2769 | planea_clock = crtc->mode.clock; |
2667 | } else { | 2770 | } else { |
2668 | DRM_DEBUG("plane B (pipe %d) clock: %d\n", | 2771 | DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", |
2669 | intel_crtc->pipe, crtc->mode.clock); | 2772 | intel_crtc->pipe, crtc->mode.clock); |
2670 | planeb_clock = crtc->mode.clock; | 2773 | planeb_clock = crtc->mode.clock; |
2671 | } | 2774 | } |
@@ -2682,10 +2785,10 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
2682 | return; | 2785 | return; |
2683 | 2786 | ||
2684 | /* Single plane configs can enable self refresh */ | 2787 | /* Single plane configs can enable self refresh */ |
2685 | if (enabled == 1 && IS_IGD(dev)) | 2788 | if (enabled == 1 && IS_PINEVIEW(dev)) |
2686 | igd_enable_cxsr(dev, sr_clock, pixel_size); | 2789 | pineview_enable_cxsr(dev, sr_clock, pixel_size); |
2687 | else if (IS_IGD(dev)) | 2790 | else if (IS_PINEVIEW(dev)) |
2688 | igd_disable_cxsr(dev); | 2791 | pineview_disable_cxsr(dev); |
2689 | 2792 | ||
2690 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | 2793 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, |
2691 | sr_hdisplay, pixel_size); | 2794 | sr_hdisplay, pixel_size); |
@@ -2779,10 +2882,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2779 | 2882 | ||
2780 | if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { | 2883 | if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { |
2781 | refclk = dev_priv->lvds_ssc_freq * 1000; | 2884 | refclk = dev_priv->lvds_ssc_freq * 1000; |
2782 | DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); | 2885 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
2886 | refclk / 1000); | ||
2783 | } else if (IS_I9XX(dev)) { | 2887 | } else if (IS_I9XX(dev)) { |
2784 | refclk = 96000; | 2888 | refclk = 96000; |
2785 | if (IS_IGDNG(dev)) | 2889 | if (IS_IRONLAKE(dev)) |
2786 | refclk = 120000; /* 120Mhz refclk */ | 2890 | refclk = 120000; /* 120Mhz refclk */ |
2787 | } else { | 2891 | } else { |
2788 | refclk = 48000; | 2892 | refclk = 48000; |
@@ -2802,14 +2906,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2802 | return -EINVAL; | 2906 | return -EINVAL; |
2803 | } | 2907 | } |
2804 | 2908 | ||
2805 | if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) { | 2909 | if (is_lvds && limit->find_reduced_pll && |
2910 | dev_priv->lvds_downclock_avail) { | ||
2806 | memcpy(&reduced_clock, &clock, sizeof(intel_clock_t)); | 2911 | memcpy(&reduced_clock, &clock, sizeof(intel_clock_t)); |
2807 | has_reduced_clock = limit->find_reduced_pll(limit, crtc, | 2912 | has_reduced_clock = limit->find_reduced_pll(limit, crtc, |
2808 | (adjusted_mode->clock*3/4), | 2913 | dev_priv->lvds_downclock, |
2809 | refclk, | 2914 | refclk, |
2810 | &reduced_clock); | 2915 | &reduced_clock); |
2916 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | ||
2917 | /* | ||
2918 | * If the different P is found, it means that we can't | ||
2919 | * switch the display clock by using the FP0/FP1. | ||
2920 | * In such case we will disable the LVDS downclock | ||
2921 | * feature. | ||
2922 | */ | ||
2923 | DRM_DEBUG_KMS("Different P is found for " | ||
2924 | "LVDS clock/downclock\n"); | ||
2925 | has_reduced_clock = 0; | ||
2926 | } | ||
2811 | } | 2927 | } |
2812 | |||
2813 | /* SDVO TV has fixed PLL values depend on its clock range, | 2928 | /* SDVO TV has fixed PLL values depend on its clock range, |
2814 | this mirrors vbios setting. */ | 2929 | this mirrors vbios setting. */ |
2815 | if (is_sdvo && is_tv) { | 2930 | if (is_sdvo && is_tv) { |
@@ -2831,7 +2946,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2831 | } | 2946 | } |
2832 | 2947 | ||
2833 | /* FDI link */ | 2948 | /* FDI link */ |
2834 | if (IS_IGDNG(dev)) { | 2949 | if (IS_IRONLAKE(dev)) { |
2835 | int lane, link_bw, bpp; | 2950 | int lane, link_bw, bpp; |
2836 | /* eDP doesn't require FDI link, so just set DP M/N | 2951 | /* eDP doesn't require FDI link, so just set DP M/N |
2837 | according to current link config */ | 2952 | according to current link config */ |
@@ -2873,8 +2988,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2873 | bpp = 24; | 2988 | bpp = 24; |
2874 | } | 2989 | } |
2875 | 2990 | ||
2876 | igdng_compute_m_n(bpp, lane, target_clock, | 2991 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); |
2877 | link_bw, &m_n); | ||
2878 | } | 2992 | } |
2879 | 2993 | ||
2880 | /* Ironlake: try to setup display ref clock before DPLL | 2994 | /* Ironlake: try to setup display ref clock before DPLL |
@@ -2882,7 +2996,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2882 | * PCH B stepping, previous chipset stepping should be | 2996 | * PCH B stepping, previous chipset stepping should be |
2883 | * ignoring this setting. | 2997 | * ignoring this setting. |
2884 | */ | 2998 | */ |
2885 | if (IS_IGDNG(dev)) { | 2999 | if (IS_IRONLAKE(dev)) { |
2886 | temp = I915_READ(PCH_DREF_CONTROL); | 3000 | temp = I915_READ(PCH_DREF_CONTROL); |
2887 | /* Always enable nonspread source */ | 3001 | /* Always enable nonspread source */ |
2888 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | 3002 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; |
@@ -2917,7 +3031,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2917 | } | 3031 | } |
2918 | } | 3032 | } |
2919 | 3033 | ||
2920 | if (IS_IGD(dev)) { | 3034 | if (IS_PINEVIEW(dev)) { |
2921 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | 3035 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
2922 | if (has_reduced_clock) | 3036 | if (has_reduced_clock) |
2923 | fp2 = (1 << reduced_clock.n) << 16 | | 3037 | fp2 = (1 << reduced_clock.n) << 16 | |
@@ -2929,7 +3043,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2929 | reduced_clock.m2; | 3043 | reduced_clock.m2; |
2930 | } | 3044 | } |
2931 | 3045 | ||
2932 | if (!IS_IGDNG(dev)) | 3046 | if (!IS_IRONLAKE(dev)) |
2933 | dpll = DPLL_VGA_MODE_DIS; | 3047 | dpll = DPLL_VGA_MODE_DIS; |
2934 | 3048 | ||
2935 | if (IS_I9XX(dev)) { | 3049 | if (IS_I9XX(dev)) { |
@@ -2942,19 +3056,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2942 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | 3056 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; |
2943 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 3057 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
2944 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | 3058 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; |
2945 | else if (IS_IGDNG(dev)) | 3059 | else if (IS_IRONLAKE(dev)) |
2946 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | 3060 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
2947 | } | 3061 | } |
2948 | if (is_dp) | 3062 | if (is_dp) |
2949 | dpll |= DPLL_DVO_HIGH_SPEED; | 3063 | dpll |= DPLL_DVO_HIGH_SPEED; |
2950 | 3064 | ||
2951 | /* compute bitmask from p1 value */ | 3065 | /* compute bitmask from p1 value */ |
2952 | if (IS_IGD(dev)) | 3066 | if (IS_PINEVIEW(dev)) |
2953 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; | 3067 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
2954 | else { | 3068 | else { |
2955 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 3069 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
2956 | /* also FPA1 */ | 3070 | /* also FPA1 */ |
2957 | if (IS_IGDNG(dev)) | 3071 | if (IS_IRONLAKE(dev)) |
2958 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | 3072 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
2959 | if (IS_G4X(dev) && has_reduced_clock) | 3073 | if (IS_G4X(dev) && has_reduced_clock) |
2960 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | 3074 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
@@ -2973,7 +3087,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2973 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | 3087 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
2974 | break; | 3088 | break; |
2975 | } | 3089 | } |
2976 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | 3090 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) |
2977 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); | 3091 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
2978 | } else { | 3092 | } else { |
2979 | if (is_lvds) { | 3093 | if (is_lvds) { |
@@ -3005,9 +3119,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3005 | /* Set up the display plane register */ | 3119 | /* Set up the display plane register */ |
3006 | dspcntr = DISPPLANE_GAMMA_ENABLE; | 3120 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
3007 | 3121 | ||
3008 | /* IGDNG's plane is forced to pipe, bit 24 is to | 3122 | /* Ironlake's plane is forced to pipe, bit 24 is to |
3009 | enable color space conversion */ | 3123 | enable color space conversion */ |
3010 | if (!IS_IGDNG(dev)) { | 3124 | if (!IS_IRONLAKE(dev)) { |
3011 | if (pipe == 0) | 3125 | if (pipe == 0) |
3012 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; | 3126 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
3013 | else | 3127 | else |
@@ -3034,20 +3148,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3034 | 3148 | ||
3035 | 3149 | ||
3036 | /* Disable the panel fitter if it was on our pipe */ | 3150 | /* Disable the panel fitter if it was on our pipe */ |
3037 | if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe) | 3151 | if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) |
3038 | I915_WRITE(PFIT_CONTROL, 0); | 3152 | I915_WRITE(PFIT_CONTROL, 0); |
3039 | 3153 | ||
3040 | DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 3154 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
3041 | drm_mode_debug_printmodeline(mode); | 3155 | drm_mode_debug_printmodeline(mode); |
3042 | 3156 | ||
3043 | /* assign to IGDNG registers */ | 3157 | /* assign to Ironlake registers */ |
3044 | if (IS_IGDNG(dev)) { | 3158 | if (IS_IRONLAKE(dev)) { |
3045 | fp_reg = pch_fp_reg; | 3159 | fp_reg = pch_fp_reg; |
3046 | dpll_reg = pch_dpll_reg; | 3160 | dpll_reg = pch_dpll_reg; |
3047 | } | 3161 | } |
3048 | 3162 | ||
3049 | if (is_edp) { | 3163 | if (is_edp) { |
3050 | igdng_disable_pll_edp(crtc); | 3164 | ironlake_disable_pll_edp(crtc); |
3051 | } else if ((dpll & DPLL_VCO_ENABLE)) { | 3165 | } else if ((dpll & DPLL_VCO_ENABLE)) { |
3052 | I915_WRITE(fp_reg, fp); | 3166 | I915_WRITE(fp_reg, fp); |
3053 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 3167 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
@@ -3062,7 +3176,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3062 | if (is_lvds) { | 3176 | if (is_lvds) { |
3063 | u32 lvds; | 3177 | u32 lvds; |
3064 | 3178 | ||
3065 | if (IS_IGDNG(dev)) | 3179 | if (IS_IRONLAKE(dev)) |
3066 | lvds_reg = PCH_LVDS; | 3180 | lvds_reg = PCH_LVDS; |
3067 | 3181 | ||
3068 | lvds = I915_READ(lvds_reg); | 3182 | lvds = I915_READ(lvds_reg); |
@@ -3095,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3095 | /* Wait for the clocks to stabilize. */ | 3209 | /* Wait for the clocks to stabilize. */ |
3096 | udelay(150); | 3210 | udelay(150); |
3097 | 3211 | ||
3098 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | 3212 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { |
3099 | if (is_sdvo) { | 3213 | if (is_sdvo) { |
3100 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | 3214 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; |
3101 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | 3215 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | |
@@ -3115,14 +3229,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3115 | I915_WRITE(fp_reg + 4, fp2); | 3229 | I915_WRITE(fp_reg + 4, fp2); |
3116 | intel_crtc->lowfreq_avail = true; | 3230 | intel_crtc->lowfreq_avail = true; |
3117 | if (HAS_PIPE_CXSR(dev)) { | 3231 | if (HAS_PIPE_CXSR(dev)) { |
3118 | DRM_DEBUG("enabling CxSR downclocking\n"); | 3232 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
3119 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | 3233 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
3120 | } | 3234 | } |
3121 | } else { | 3235 | } else { |
3122 | I915_WRITE(fp_reg + 4, fp); | 3236 | I915_WRITE(fp_reg + 4, fp); |
3123 | intel_crtc->lowfreq_avail = false; | 3237 | intel_crtc->lowfreq_avail = false; |
3124 | if (HAS_PIPE_CXSR(dev)) { | 3238 | if (HAS_PIPE_CXSR(dev)) { |
3125 | DRM_DEBUG("disabling CxSR downclocking\n"); | 3239 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
3126 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | 3240 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
3127 | } | 3241 | } |
3128 | } | 3242 | } |
@@ -3142,21 +3256,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3142 | /* pipesrc and dspsize control the size that is scaled from, which should | 3256 | /* pipesrc and dspsize control the size that is scaled from, which should |
3143 | * always be the user's requested size. | 3257 | * always be the user's requested size. |
3144 | */ | 3258 | */ |
3145 | if (!IS_IGDNG(dev)) { | 3259 | if (!IS_IRONLAKE(dev)) { |
3146 | I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | | 3260 | I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | |
3147 | (mode->hdisplay - 1)); | 3261 | (mode->hdisplay - 1)); |
3148 | I915_WRITE(dsppos_reg, 0); | 3262 | I915_WRITE(dsppos_reg, 0); |
3149 | } | 3263 | } |
3150 | I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | 3264 | I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
3151 | 3265 | ||
3152 | if (IS_IGDNG(dev)) { | 3266 | if (IS_IRONLAKE(dev)) { |
3153 | I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); | 3267 | I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); |
3154 | I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); | 3268 | I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); |
3155 | I915_WRITE(link_m1_reg, m_n.link_m); | 3269 | I915_WRITE(link_m1_reg, m_n.link_m); |
3156 | I915_WRITE(link_n1_reg, m_n.link_n); | 3270 | I915_WRITE(link_n1_reg, m_n.link_n); |
3157 | 3271 | ||
3158 | if (is_edp) { | 3272 | if (is_edp) { |
3159 | igdng_set_pll_edp(crtc, adjusted_mode->clock); | 3273 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
3160 | } else { | 3274 | } else { |
3161 | /* enable FDI RX PLL too */ | 3275 | /* enable FDI RX PLL too */ |
3162 | temp = I915_READ(fdi_rx_reg); | 3276 | temp = I915_READ(fdi_rx_reg); |
@@ -3170,7 +3284,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3170 | 3284 | ||
3171 | intel_wait_for_vblank(dev); | 3285 | intel_wait_for_vblank(dev); |
3172 | 3286 | ||
3173 | if (IS_IGDNG(dev)) { | 3287 | if (IS_IRONLAKE(dev)) { |
3174 | /* enable address swizzle for tiling buffer */ | 3288 | /* enable address swizzle for tiling buffer */ |
3175 | temp = I915_READ(DISP_ARB_CTL); | 3289 | temp = I915_READ(DISP_ARB_CTL); |
3176 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | 3290 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); |
@@ -3204,8 +3318,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
3204 | if (!crtc->enabled) | 3318 | if (!crtc->enabled) |
3205 | return; | 3319 | return; |
3206 | 3320 | ||
3207 | /* use legacy palette for IGDNG */ | 3321 | /* use legacy palette for Ironlake */ |
3208 | if (IS_IGDNG(dev)) | 3322 | if (IS_IRONLAKE(dev)) |
3209 | palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : | 3323 | palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : |
3210 | LGC_PALETTE_B; | 3324 | LGC_PALETTE_B; |
3211 | 3325 | ||
@@ -3234,11 +3348,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3234 | size_t addr; | 3348 | size_t addr; |
3235 | int ret; | 3349 | int ret; |
3236 | 3350 | ||
3237 | DRM_DEBUG("\n"); | 3351 | DRM_DEBUG_KMS("\n"); |
3238 | 3352 | ||
3239 | /* if we want to turn off the cursor ignore width and height */ | 3353 | /* if we want to turn off the cursor ignore width and height */ |
3240 | if (!handle) { | 3354 | if (!handle) { |
3241 | DRM_DEBUG("cursor off\n"); | 3355 | DRM_DEBUG_KMS("cursor off\n"); |
3242 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | 3356 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { |
3243 | temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 3357 | temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
3244 | temp |= CURSOR_MODE_DISABLE; | 3358 | temp |= CURSOR_MODE_DISABLE; |
@@ -3546,18 +3660,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
3546 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | 3660 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); |
3547 | 3661 | ||
3548 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 3662 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
3549 | if (IS_IGD(dev)) { | 3663 | if (IS_PINEVIEW(dev)) { |
3550 | clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; | 3664 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
3551 | clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; | 3665 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; |
3552 | } else { | 3666 | } else { |
3553 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | 3667 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; |
3554 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | 3668 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
3555 | } | 3669 | } |
3556 | 3670 | ||
3557 | if (IS_I9XX(dev)) { | 3671 | if (IS_I9XX(dev)) { |
3558 | if (IS_IGD(dev)) | 3672 | if (IS_PINEVIEW(dev)) |
3559 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> | 3673 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
3560 | DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); | 3674 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
3561 | else | 3675 | else |
3562 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | 3676 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> |
3563 | DPLL_FPA01_P1_POST_DIV_SHIFT); | 3677 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
@@ -3572,7 +3686,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
3572 | 7 : 14; | 3686 | 7 : 14; |
3573 | break; | 3687 | break; |
3574 | default: | 3688 | default: |
3575 | DRM_DEBUG("Unknown DPLL mode %08x in programmed " | 3689 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " |
3576 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); | 3690 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); |
3577 | return 0; | 3691 | return 0; |
3578 | } | 3692 | } |
@@ -3658,7 +3772,7 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
3658 | struct drm_device *dev = (struct drm_device *)arg; | 3772 | struct drm_device *dev = (struct drm_device *)arg; |
3659 | drm_i915_private_t *dev_priv = dev->dev_private; | 3773 | drm_i915_private_t *dev_priv = dev->dev_private; |
3660 | 3774 | ||
3661 | DRM_DEBUG("idle timer fired, downclocking\n"); | 3775 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); |
3662 | 3776 | ||
3663 | dev_priv->busy = false; | 3777 | dev_priv->busy = false; |
3664 | 3778 | ||
@@ -3669,11 +3783,11 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule) | |||
3669 | { | 3783 | { |
3670 | drm_i915_private_t *dev_priv = dev->dev_private; | 3784 | drm_i915_private_t *dev_priv = dev->dev_private; |
3671 | 3785 | ||
3672 | if (IS_IGDNG(dev)) | 3786 | if (IS_IRONLAKE(dev)) |
3673 | return; | 3787 | return; |
3674 | 3788 | ||
3675 | if (!dev_priv->render_reclock_avail) { | 3789 | if (!dev_priv->render_reclock_avail) { |
3676 | DRM_DEBUG("not reclocking render clock\n"); | 3790 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); |
3677 | return; | 3791 | return; |
3678 | } | 3792 | } |
3679 | 3793 | ||
@@ -3682,7 +3796,7 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule) | |||
3682 | pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); | 3796 | pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); |
3683 | else if (IS_I85X(dev)) | 3797 | else if (IS_I85X(dev)) |
3684 | pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); | 3798 | pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); |
3685 | DRM_DEBUG("increasing render clock frequency\n"); | 3799 | DRM_DEBUG_DRIVER("increasing render clock frequency\n"); |
3686 | 3800 | ||
3687 | /* Schedule downclock */ | 3801 | /* Schedule downclock */ |
3688 | if (schedule) | 3802 | if (schedule) |
@@ -3694,11 +3808,11 @@ void intel_decrease_renderclock(struct drm_device *dev) | |||
3694 | { | 3808 | { |
3695 | drm_i915_private_t *dev_priv = dev->dev_private; | 3809 | drm_i915_private_t *dev_priv = dev->dev_private; |
3696 | 3810 | ||
3697 | if (IS_IGDNG(dev)) | 3811 | if (IS_IRONLAKE(dev)) |
3698 | return; | 3812 | return; |
3699 | 3813 | ||
3700 | if (!dev_priv->render_reclock_avail) { | 3814 | if (!dev_priv->render_reclock_avail) { |
3701 | DRM_DEBUG("not reclocking render clock\n"); | 3815 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); |
3702 | return; | 3816 | return; |
3703 | } | 3817 | } |
3704 | 3818 | ||
@@ -3758,7 +3872,7 @@ void intel_decrease_renderclock(struct drm_device *dev) | |||
3758 | 3872 | ||
3759 | pci_write_config_word(dev->pdev, HPLLCC, hpllcc); | 3873 | pci_write_config_word(dev->pdev, HPLLCC, hpllcc); |
3760 | } | 3874 | } |
3761 | DRM_DEBUG("decreasing render clock frequency\n"); | 3875 | DRM_DEBUG_DRIVER("decreasing render clock frequency\n"); |
3762 | } | 3876 | } |
3763 | 3877 | ||
3764 | /* Note that no increase function is needed for this - increase_renderclock() | 3878 | /* Note that no increase function is needed for this - increase_renderclock() |
@@ -3766,7 +3880,7 @@ void intel_decrease_renderclock(struct drm_device *dev) | |||
3766 | */ | 3880 | */ |
3767 | void intel_decrease_displayclock(struct drm_device *dev) | 3881 | void intel_decrease_displayclock(struct drm_device *dev) |
3768 | { | 3882 | { |
3769 | if (IS_IGDNG(dev)) | 3883 | if (IS_IRONLAKE(dev)) |
3770 | return; | 3884 | return; |
3771 | 3885 | ||
3772 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || | 3886 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || |
@@ -3792,7 +3906,7 @@ static void intel_crtc_idle_timer(unsigned long arg) | |||
3792 | struct drm_crtc *crtc = &intel_crtc->base; | 3906 | struct drm_crtc *crtc = &intel_crtc->base; |
3793 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | 3907 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; |
3794 | 3908 | ||
3795 | DRM_DEBUG("idle timer fired, downclocking\n"); | 3909 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); |
3796 | 3910 | ||
3797 | intel_crtc->busy = false; | 3911 | intel_crtc->busy = false; |
3798 | 3912 | ||
@@ -3808,14 +3922,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
3808 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 3922 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; |
3809 | int dpll = I915_READ(dpll_reg); | 3923 | int dpll = I915_READ(dpll_reg); |
3810 | 3924 | ||
3811 | if (IS_IGDNG(dev)) | 3925 | if (IS_IRONLAKE(dev)) |
3812 | return; | 3926 | return; |
3813 | 3927 | ||
3814 | if (!dev_priv->lvds_downclock_avail) | 3928 | if (!dev_priv->lvds_downclock_avail) |
3815 | return; | 3929 | return; |
3816 | 3930 | ||
3817 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 3931 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
3818 | DRM_DEBUG("upclocking LVDS\n"); | 3932 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
3819 | 3933 | ||
3820 | /* Unlock panel regs */ | 3934 | /* Unlock panel regs */ |
3821 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 3935 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); |
@@ -3826,7 +3940,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
3826 | intel_wait_for_vblank(dev); | 3940 | intel_wait_for_vblank(dev); |
3827 | dpll = I915_READ(dpll_reg); | 3941 | dpll = I915_READ(dpll_reg); |
3828 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 3942 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
3829 | DRM_DEBUG("failed to upclock LVDS!\n"); | 3943 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
3830 | 3944 | ||
3831 | /* ...and lock them again */ | 3945 | /* ...and lock them again */ |
3832 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | 3946 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); |
@@ -3847,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
3847 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 3961 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; |
3848 | int dpll = I915_READ(dpll_reg); | 3962 | int dpll = I915_READ(dpll_reg); |
3849 | 3963 | ||
3850 | if (IS_IGDNG(dev)) | 3964 | if (IS_IRONLAKE(dev)) |
3851 | return; | 3965 | return; |
3852 | 3966 | ||
3853 | if (!dev_priv->lvds_downclock_avail) | 3967 | if (!dev_priv->lvds_downclock_avail) |
@@ -3858,7 +3972,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
3858 | * the manual case. | 3972 | * the manual case. |
3859 | */ | 3973 | */ |
3860 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { | 3974 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { |
3861 | DRM_DEBUG("downclocking LVDS\n"); | 3975 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
3862 | 3976 | ||
3863 | /* Unlock panel regs */ | 3977 | /* Unlock panel regs */ |
3864 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 3978 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); |
@@ -3869,7 +3983,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
3869 | intel_wait_for_vblank(dev); | 3983 | intel_wait_for_vblank(dev); |
3870 | dpll = I915_READ(dpll_reg); | 3984 | dpll = I915_READ(dpll_reg); |
3871 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | 3985 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
3872 | DRM_DEBUG("failed to downclock LVDS!\n"); | 3986 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); |
3873 | 3987 | ||
3874 | /* ...and lock them again */ | 3988 | /* ...and lock them again */ |
3875 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | 3989 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); |
@@ -3936,8 +4050,13 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
3936 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4050 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
3937 | return; | 4051 | return; |
3938 | 4052 | ||
3939 | dev_priv->busy = true; | 4053 | if (!dev_priv->busy) { |
3940 | intel_increase_renderclock(dev, true); | 4054 | dev_priv->busy = true; |
4055 | intel_increase_renderclock(dev, true); | ||
4056 | } else { | ||
4057 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
4058 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
4059 | } | ||
3941 | 4060 | ||
3942 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4061 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
3943 | if (!crtc->fb) | 4062 | if (!crtc->fb) |
@@ -3967,6 +4086,158 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) | |||
3967 | kfree(intel_crtc); | 4086 | kfree(intel_crtc); |
3968 | } | 4087 | } |
3969 | 4088 | ||
4089 | struct intel_unpin_work { | ||
4090 | struct work_struct work; | ||
4091 | struct drm_device *dev; | ||
4092 | struct drm_gem_object *obj; | ||
4093 | struct drm_pending_vblank_event *event; | ||
4094 | int pending; | ||
4095 | }; | ||
4096 | |||
4097 | static void intel_unpin_work_fn(struct work_struct *__work) | ||
4098 | { | ||
4099 | struct intel_unpin_work *work = | ||
4100 | container_of(__work, struct intel_unpin_work, work); | ||
4101 | |||
4102 | mutex_lock(&work->dev->struct_mutex); | ||
4103 | i915_gem_object_unpin(work->obj); | ||
4104 | drm_gem_object_unreference(work->obj); | ||
4105 | mutex_unlock(&work->dev->struct_mutex); | ||
4106 | kfree(work); | ||
4107 | } | ||
4108 | |||
4109 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | ||
4110 | { | ||
4111 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4112 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
4113 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4114 | struct intel_unpin_work *work; | ||
4115 | struct drm_i915_gem_object *obj_priv; | ||
4116 | struct drm_pending_vblank_event *e; | ||
4117 | struct timeval now; | ||
4118 | unsigned long flags; | ||
4119 | |||
4120 | /* Ignore early vblank irqs */ | ||
4121 | if (intel_crtc == NULL) | ||
4122 | return; | ||
4123 | |||
4124 | spin_lock_irqsave(&dev->event_lock, flags); | ||
4125 | work = intel_crtc->unpin_work; | ||
4126 | if (work == NULL || !work->pending) { | ||
4127 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
4128 | return; | ||
4129 | } | ||
4130 | |||
4131 | intel_crtc->unpin_work = NULL; | ||
4132 | drm_vblank_put(dev, intel_crtc->pipe); | ||
4133 | |||
4134 | if (work->event) { | ||
4135 | e = work->event; | ||
4136 | do_gettimeofday(&now); | ||
4137 | e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); | ||
4138 | e->event.tv_sec = now.tv_sec; | ||
4139 | e->event.tv_usec = now.tv_usec; | ||
4140 | list_add_tail(&e->base.link, | ||
4141 | &e->base.file_priv->event_list); | ||
4142 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
4143 | } | ||
4144 | |||
4145 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
4146 | |||
4147 | obj_priv = work->obj->driver_private; | ||
4148 | if (atomic_dec_and_test(&obj_priv->pending_flip)) | ||
4149 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | ||
4150 | schedule_work(&work->work); | ||
4151 | } | ||
4152 | |||
4153 | void intel_prepare_page_flip(struct drm_device *dev, int plane) | ||
4154 | { | ||
4155 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4156 | struct intel_crtc *intel_crtc = | ||
4157 | to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); | ||
4158 | unsigned long flags; | ||
4159 | |||
4160 | spin_lock_irqsave(&dev->event_lock, flags); | ||
4161 | if (intel_crtc->unpin_work) | ||
4162 | intel_crtc->unpin_work->pending = 1; | ||
4163 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
4164 | } | ||
4165 | |||
4166 | static int intel_crtc_page_flip(struct drm_crtc *crtc, | ||
4167 | struct drm_framebuffer *fb, | ||
4168 | struct drm_pending_vblank_event *event) | ||
4169 | { | ||
4170 | struct drm_device *dev = crtc->dev; | ||
4171 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4172 | struct intel_framebuffer *intel_fb; | ||
4173 | struct drm_i915_gem_object *obj_priv; | ||
4174 | struct drm_gem_object *obj; | ||
4175 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4176 | struct intel_unpin_work *work; | ||
4177 | unsigned long flags; | ||
4178 | int ret; | ||
4179 | RING_LOCALS; | ||
4180 | |||
4181 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
4182 | if (work == NULL) | ||
4183 | return -ENOMEM; | ||
4184 | |||
4185 | mutex_lock(&dev->struct_mutex); | ||
4186 | |||
4187 | work->event = event; | ||
4188 | work->dev = crtc->dev; | ||
4189 | intel_fb = to_intel_framebuffer(crtc->fb); | ||
4190 | work->obj = intel_fb->obj; | ||
4191 | INIT_WORK(&work->work, intel_unpin_work_fn); | ||
4192 | |||
4193 | /* We borrow the event spin lock for protecting unpin_work */ | ||
4194 | spin_lock_irqsave(&dev->event_lock, flags); | ||
4195 | if (intel_crtc->unpin_work) { | ||
4196 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
4197 | kfree(work); | ||
4198 | mutex_unlock(&dev->struct_mutex); | ||
4199 | return -EBUSY; | ||
4200 | } | ||
4201 | intel_crtc->unpin_work = work; | ||
4202 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
4203 | |||
4204 | intel_fb = to_intel_framebuffer(fb); | ||
4205 | obj = intel_fb->obj; | ||
4206 | |||
4207 | ret = intel_pin_and_fence_fb_obj(dev, obj); | ||
4208 | if (ret != 0) { | ||
4209 | kfree(work); | ||
4210 | mutex_unlock(&dev->struct_mutex); | ||
4211 | return ret; | ||
4212 | } | ||
4213 | |||
4214 | /* Reference the old fb object for the scheduled work. */ | ||
4215 | drm_gem_object_reference(work->obj); | ||
4216 | |||
4217 | crtc->fb = fb; | ||
4218 | i915_gem_object_flush_write_domain(obj); | ||
4219 | drm_vblank_get(dev, intel_crtc->pipe); | ||
4220 | obj_priv = obj->driver_private; | ||
4221 | atomic_inc(&obj_priv->pending_flip); | ||
4222 | |||
4223 | BEGIN_LP_RING(4); | ||
4224 | OUT_RING(MI_DISPLAY_FLIP | | ||
4225 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
4226 | OUT_RING(fb->pitch); | ||
4227 | if (IS_I965G(dev)) { | ||
4228 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | ||
4229 | OUT_RING((fb->width << 16) | fb->height); | ||
4230 | } else { | ||
4231 | OUT_RING(obj_priv->gtt_offset); | ||
4232 | OUT_RING(MI_NOOP); | ||
4233 | } | ||
4234 | ADVANCE_LP_RING(); | ||
4235 | |||
4236 | mutex_unlock(&dev->struct_mutex); | ||
4237 | |||
4238 | return 0; | ||
4239 | } | ||
4240 | |||
3970 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | 4241 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { |
3971 | .dpms = intel_crtc_dpms, | 4242 | .dpms = intel_crtc_dpms, |
3972 | .mode_fixup = intel_crtc_mode_fixup, | 4243 | .mode_fixup = intel_crtc_mode_fixup, |
@@ -3983,11 +4254,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { | |||
3983 | .gamma_set = intel_crtc_gamma_set, | 4254 | .gamma_set = intel_crtc_gamma_set, |
3984 | .set_config = drm_crtc_helper_set_config, | 4255 | .set_config = drm_crtc_helper_set_config, |
3985 | .destroy = intel_crtc_destroy, | 4256 | .destroy = intel_crtc_destroy, |
4257 | .page_flip = intel_crtc_page_flip, | ||
3986 | }; | 4258 | }; |
3987 | 4259 | ||
3988 | 4260 | ||
3989 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 4261 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
3990 | { | 4262 | { |
4263 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3991 | struct intel_crtc *intel_crtc; | 4264 | struct intel_crtc *intel_crtc; |
3992 | int i; | 4265 | int i; |
3993 | 4266 | ||
@@ -4010,10 +4283,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
4010 | intel_crtc->pipe = pipe; | 4283 | intel_crtc->pipe = pipe; |
4011 | intel_crtc->plane = pipe; | 4284 | intel_crtc->plane = pipe; |
4012 | if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { | 4285 | if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { |
4013 | DRM_DEBUG("swapping pipes & planes for FBC\n"); | 4286 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
4014 | intel_crtc->plane = ((pipe == 0) ? 1 : 0); | 4287 | intel_crtc->plane = ((pipe == 0) ? 1 : 0); |
4015 | } | 4288 | } |
4016 | 4289 | ||
4290 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || | ||
4291 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); | ||
4292 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | ||
4293 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | ||
4294 | |||
4017 | intel_crtc->cursor_addr = 0; | 4295 | intel_crtc->cursor_addr = 0; |
4018 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; | 4296 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; |
4019 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 4297 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
@@ -4090,7 +4368,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4090 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 4368 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
4091 | intel_lvds_init(dev); | 4369 | intel_lvds_init(dev); |
4092 | 4370 | ||
4093 | if (IS_IGDNG(dev)) { | 4371 | if (IS_IRONLAKE(dev)) { |
4094 | int found; | 4372 | int found; |
4095 | 4373 | ||
4096 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | 4374 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) |
@@ -4118,7 +4396,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4118 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 4396 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
4119 | intel_dp_init(dev, PCH_DP_D); | 4397 | intel_dp_init(dev, PCH_DP_D); |
4120 | 4398 | ||
4121 | } else if (IS_I9XX(dev)) { | 4399 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
4122 | bool found = false; | 4400 | bool found = false; |
4123 | 4401 | ||
4124 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 4402 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
@@ -4145,10 +4423,10 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4145 | 4423 | ||
4146 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | 4424 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) |
4147 | intel_dp_init(dev, DP_D); | 4425 | intel_dp_init(dev, DP_D); |
4148 | } else | 4426 | } else if (IS_I8XX(dev)) |
4149 | intel_dvo_init(dev); | 4427 | intel_dvo_init(dev); |
4150 | 4428 | ||
4151 | if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev)) | 4429 | if (SUPPORTS_TV(dev)) |
4152 | intel_tv_init(dev); | 4430 | intel_tv_init(dev); |
4153 | 4431 | ||
4154 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4432 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
@@ -4257,7 +4535,7 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4257 | * Disable clock gating reported to work incorrectly according to the | 4535 | * Disable clock gating reported to work incorrectly according to the |
4258 | * specs, but enable as much else as we can. | 4536 | * specs, but enable as much else as we can. |
4259 | */ | 4537 | */ |
4260 | if (IS_IGDNG(dev)) { | 4538 | if (IS_IRONLAKE(dev)) { |
4261 | return; | 4539 | return; |
4262 | } else if (IS_G4X(dev)) { | 4540 | } else if (IS_G4X(dev)) { |
4263 | uint32_t dspclk_gate; | 4541 | uint32_t dspclk_gate; |
@@ -4291,11 +4569,47 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4291 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | 4569 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | |
4292 | DSTATE_DOT_CLOCK_GATING; | 4570 | DSTATE_DOT_CLOCK_GATING; |
4293 | I915_WRITE(D_STATE, dstate); | 4571 | I915_WRITE(D_STATE, dstate); |
4294 | } else if (IS_I855(dev) || IS_I865G(dev)) { | 4572 | } else if (IS_I85X(dev) || IS_I865G(dev)) { |
4295 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | 4573 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); |
4296 | } else if (IS_I830(dev)) { | 4574 | } else if (IS_I830(dev)) { |
4297 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | 4575 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
4298 | } | 4576 | } |
4577 | |||
4578 | /* | ||
4579 | * GPU can automatically power down the render unit if given a page | ||
4580 | * to save state. | ||
4581 | */ | ||
4582 | if (I915_HAS_RC6(dev)) { | ||
4583 | struct drm_gem_object *pwrctx; | ||
4584 | struct drm_i915_gem_object *obj_priv; | ||
4585 | int ret; | ||
4586 | |||
4587 | pwrctx = drm_gem_object_alloc(dev, 4096); | ||
4588 | if (!pwrctx) { | ||
4589 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | ||
4590 | goto out; | ||
4591 | } | ||
4592 | |||
4593 | ret = i915_gem_object_pin(pwrctx, 4096); | ||
4594 | if (ret) { | ||
4595 | DRM_ERROR("failed to pin power context: %d\n", ret); | ||
4596 | drm_gem_object_unreference(pwrctx); | ||
4597 | goto out; | ||
4598 | } | ||
4599 | |||
4600 | i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
4601 | |||
4602 | obj_priv = pwrctx->driver_private; | ||
4603 | |||
4604 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | ||
4605 | I915_WRITE(MCHBAR_RENDER_STANDBY, | ||
4606 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | ||
4607 | |||
4608 | dev_priv->pwrctx = pwrctx; | ||
4609 | } | ||
4610 | |||
4611 | out: | ||
4612 | return; | ||
4299 | } | 4613 | } |
4300 | 4614 | ||
4301 | /* Set up chip specific display functions */ | 4615 | /* Set up chip specific display functions */ |
@@ -4304,8 +4618,8 @@ static void intel_init_display(struct drm_device *dev) | |||
4304 | struct drm_i915_private *dev_priv = dev->dev_private; | 4618 | struct drm_i915_private *dev_priv = dev->dev_private; |
4305 | 4619 | ||
4306 | /* We always want a DPMS function */ | 4620 | /* We always want a DPMS function */ |
4307 | if (IS_IGDNG(dev)) | 4621 | if (IS_IRONLAKE(dev)) |
4308 | dev_priv->display.dpms = igdng_crtc_dpms; | 4622 | dev_priv->display.dpms = ironlake_crtc_dpms; |
4309 | else | 4623 | else |
4310 | dev_priv->display.dpms = i9xx_crtc_dpms; | 4624 | dev_priv->display.dpms = i9xx_crtc_dpms; |
4311 | 4625 | ||
@@ -4324,13 +4638,13 @@ static void intel_init_display(struct drm_device *dev) | |||
4324 | } | 4638 | } |
4325 | 4639 | ||
4326 | /* Returns the core display clock speed */ | 4640 | /* Returns the core display clock speed */ |
4327 | if (IS_I945G(dev)) | 4641 | if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev))) |
4328 | dev_priv->display.get_display_clock_speed = | 4642 | dev_priv->display.get_display_clock_speed = |
4329 | i945_get_display_clock_speed; | 4643 | i945_get_display_clock_speed; |
4330 | else if (IS_I915G(dev)) | 4644 | else if (IS_I915G(dev)) |
4331 | dev_priv->display.get_display_clock_speed = | 4645 | dev_priv->display.get_display_clock_speed = |
4332 | i915_get_display_clock_speed; | 4646 | i915_get_display_clock_speed; |
4333 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) | 4647 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) |
4334 | dev_priv->display.get_display_clock_speed = | 4648 | dev_priv->display.get_display_clock_speed = |
4335 | i9xx_misc_get_display_clock_speed; | 4649 | i9xx_misc_get_display_clock_speed; |
4336 | else if (IS_I915GM(dev)) | 4650 | else if (IS_I915GM(dev)) |
@@ -4339,7 +4653,7 @@ static void intel_init_display(struct drm_device *dev) | |||
4339 | else if (IS_I865G(dev)) | 4653 | else if (IS_I865G(dev)) |
4340 | dev_priv->display.get_display_clock_speed = | 4654 | dev_priv->display.get_display_clock_speed = |
4341 | i865_get_display_clock_speed; | 4655 | i865_get_display_clock_speed; |
4342 | else if (IS_I855(dev)) | 4656 | else if (IS_I85X(dev)) |
4343 | dev_priv->display.get_display_clock_speed = | 4657 | dev_priv->display.get_display_clock_speed = |
4344 | i855_get_display_clock_speed; | 4658 | i855_get_display_clock_speed; |
4345 | else /* 852, 830 */ | 4659 | else /* 852, 830 */ |
@@ -4347,7 +4661,7 @@ static void intel_init_display(struct drm_device *dev) | |||
4347 | i830_get_display_clock_speed; | 4661 | i830_get_display_clock_speed; |
4348 | 4662 | ||
4349 | /* For FIFO watermark updates */ | 4663 | /* For FIFO watermark updates */ |
4350 | if (IS_IGDNG(dev)) | 4664 | if (IS_IRONLAKE(dev)) |
4351 | dev_priv->display.update_wm = NULL; | 4665 | dev_priv->display.update_wm = NULL; |
4352 | else if (IS_G4X(dev)) | 4666 | else if (IS_G4X(dev)) |
4353 | dev_priv->display.update_wm = g4x_update_wm; | 4667 | dev_priv->display.update_wm = g4x_update_wm; |
@@ -4403,7 +4717,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
4403 | num_pipe = 2; | 4717 | num_pipe = 2; |
4404 | else | 4718 | else |
4405 | num_pipe = 1; | 4719 | num_pipe = 1; |
4406 | DRM_DEBUG("%d display pipe%s available.\n", | 4720 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
4407 | num_pipe, num_pipe > 1 ? "s" : ""); | 4721 | num_pipe, num_pipe > 1 ? "s" : ""); |
4408 | 4722 | ||
4409 | if (IS_I85X(dev)) | 4723 | if (IS_I85X(dev)) |
@@ -4422,6 +4736,15 @@ void intel_modeset_init(struct drm_device *dev) | |||
4422 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 4736 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
4423 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 4737 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
4424 | (unsigned long)dev); | 4738 | (unsigned long)dev); |
4739 | |||
4740 | intel_setup_overlay(dev); | ||
4741 | |||
4742 | if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev), | ||
4743 | dev_priv->fsb_freq, | ||
4744 | dev_priv->mem_freq)) | ||
4745 | DRM_INFO("failed to find known CxSR latency " | ||
4746 | "(found fsb freq %d, mem freq %d), disabling CxSR\n", | ||
4747 | dev_priv->fsb_freq, dev_priv->mem_freq); | ||
4425 | } | 4748 | } |
4426 | 4749 | ||
4427 | void intel_modeset_cleanup(struct drm_device *dev) | 4750 | void intel_modeset_cleanup(struct drm_device *dev) |
@@ -4445,11 +4768,21 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4445 | intel_increase_renderclock(dev, false); | 4768 | intel_increase_renderclock(dev, false); |
4446 | del_timer_sync(&dev_priv->idle_timer); | 4769 | del_timer_sync(&dev_priv->idle_timer); |
4447 | 4770 | ||
4448 | mutex_unlock(&dev->struct_mutex); | ||
4449 | |||
4450 | if (dev_priv->display.disable_fbc) | 4771 | if (dev_priv->display.disable_fbc) |
4451 | dev_priv->display.disable_fbc(dev); | 4772 | dev_priv->display.disable_fbc(dev); |
4452 | 4773 | ||
4774 | if (dev_priv->pwrctx) { | ||
4775 | struct drm_i915_gem_object *obj_priv; | ||
4776 | |||
4777 | obj_priv = dev_priv->pwrctx->driver_private; | ||
4778 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | ||
4779 | I915_READ(PWRCTXA); | ||
4780 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
4781 | drm_gem_object_unreference(dev_priv->pwrctx); | ||
4782 | } | ||
4783 | |||
4784 | mutex_unlock(&dev->struct_mutex); | ||
4785 | |||
4453 | drm_mode_config_cleanup(dev); | 4786 | drm_mode_config_cleanup(dev); |
4454 | } | 4787 | } |
4455 | 4788 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 63424d5db9c6..4e7aa8b7b938 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "drm_dp_helper.h" | 36 | #include "drm_dp_helper.h" |
37 | 37 | ||
38 | |||
38 | #define DP_LINK_STATUS_SIZE 6 | 39 | #define DP_LINK_STATUS_SIZE 6 |
39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 40 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
40 | 41 | ||
@@ -223,8 +224,8 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
223 | */ | 224 | */ |
224 | if (IS_eDP(intel_output)) | 225 | if (IS_eDP(intel_output)) |
225 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 226 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
226 | else if (IS_IGDNG(dev)) | 227 | else if (IS_IRONLAKE(dev)) |
227 | aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ | 228 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ |
228 | else | 229 | else |
229 | aux_clock_divider = intel_hrawclk(dev) / 2; | 230 | aux_clock_divider = intel_hrawclk(dev) / 2; |
230 | 231 | ||
@@ -282,7 +283,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
282 | /* Timeouts occur when the device isn't connected, so they're | 283 | /* Timeouts occur when the device isn't connected, so they're |
283 | * "normal" -- don't fill the kernel log with these */ | 284 | * "normal" -- don't fill the kernel log with these */ |
284 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { | 285 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { |
285 | DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status); | 286 | DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); |
286 | return -ETIMEDOUT; | 287 | return -ETIMEDOUT; |
287 | } | 288 | } |
288 | 289 | ||
@@ -432,7 +433,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
432 | msg, msg_bytes, | 433 | msg, msg_bytes, |
433 | reply, reply_bytes); | 434 | reply, reply_bytes); |
434 | if (ret < 0) { | 435 | if (ret < 0) { |
435 | DRM_DEBUG("aux_ch failed %d\n", ret); | 436 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
436 | return ret; | 437 | return ret; |
437 | } | 438 | } |
438 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | 439 | switch (reply[0] & AUX_I2C_REPLY_MASK) { |
@@ -442,10 +443,10 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
442 | } | 443 | } |
443 | return reply_bytes - 1; | 444 | return reply_bytes - 1; |
444 | case AUX_I2C_REPLY_NACK: | 445 | case AUX_I2C_REPLY_NACK: |
445 | DRM_DEBUG("aux_ch nack\n"); | 446 | DRM_DEBUG_KMS("aux_ch nack\n"); |
446 | return -EREMOTEIO; | 447 | return -EREMOTEIO; |
447 | case AUX_I2C_REPLY_DEFER: | 448 | case AUX_I2C_REPLY_DEFER: |
448 | DRM_DEBUG("aux_ch defer\n"); | 449 | DRM_DEBUG_KMS("aux_ch defer\n"); |
449 | udelay(100); | 450 | udelay(100); |
450 | break; | 451 | break; |
451 | default: | 452 | default: |
@@ -495,7 +496,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
495 | dp_priv->link_bw = bws[clock]; | 496 | dp_priv->link_bw = bws[clock]; |
496 | dp_priv->lane_count = lane_count; | 497 | dp_priv->lane_count = lane_count; |
497 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | 498 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); |
498 | DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n", | 499 | DRM_DEBUG_KMS("Display port link bw %02x lane " |
500 | "count %d clock %d\n", | ||
499 | dp_priv->link_bw, dp_priv->lane_count, | 501 | dp_priv->link_bw, dp_priv->lane_count, |
500 | adjusted_mode->clock); | 502 | adjusted_mode->clock); |
501 | return true; | 503 | return true; |
@@ -574,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
574 | intel_dp_compute_m_n(3, lane_count, | 576 | intel_dp_compute_m_n(3, lane_count, |
575 | mode->clock, adjusted_mode->clock, &m_n); | 577 | mode->clock, adjusted_mode->clock, &m_n); |
576 | 578 | ||
577 | if (IS_IGDNG(dev)) { | 579 | if (IS_IRONLAKE(dev)) { |
578 | if (intel_crtc->pipe == 0) { | 580 | if (intel_crtc->pipe == 0) { |
579 | I915_WRITE(TRANSA_DATA_M1, | 581 | I915_WRITE(TRANSA_DATA_M1, |
580 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 582 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
@@ -666,23 +668,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
666 | } | 668 | } |
667 | } | 669 | } |
668 | 670 | ||
669 | static void igdng_edp_backlight_on (struct drm_device *dev) | 671 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
670 | { | 672 | { |
671 | struct drm_i915_private *dev_priv = dev->dev_private; | 673 | struct drm_i915_private *dev_priv = dev->dev_private; |
672 | u32 pp; | 674 | u32 pp; |
673 | 675 | ||
674 | DRM_DEBUG("\n"); | 676 | DRM_DEBUG_KMS("\n"); |
675 | pp = I915_READ(PCH_PP_CONTROL); | 677 | pp = I915_READ(PCH_PP_CONTROL); |
676 | pp |= EDP_BLC_ENABLE; | 678 | pp |= EDP_BLC_ENABLE; |
677 | I915_WRITE(PCH_PP_CONTROL, pp); | 679 | I915_WRITE(PCH_PP_CONTROL, pp); |
678 | } | 680 | } |
679 | 681 | ||
680 | static void igdng_edp_backlight_off (struct drm_device *dev) | 682 | static void ironlake_edp_backlight_off (struct drm_device *dev) |
681 | { | 683 | { |
682 | struct drm_i915_private *dev_priv = dev->dev_private; | 684 | struct drm_i915_private *dev_priv = dev->dev_private; |
683 | u32 pp; | 685 | u32 pp; |
684 | 686 | ||
685 | DRM_DEBUG("\n"); | 687 | DRM_DEBUG_KMS("\n"); |
686 | pp = I915_READ(PCH_PP_CONTROL); | 688 | pp = I915_READ(PCH_PP_CONTROL); |
687 | pp &= ~EDP_BLC_ENABLE; | 689 | pp &= ~EDP_BLC_ENABLE; |
688 | I915_WRITE(PCH_PP_CONTROL, pp); | 690 | I915_WRITE(PCH_PP_CONTROL, pp); |
@@ -701,13 +703,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
701 | if (dp_reg & DP_PORT_EN) { | 703 | if (dp_reg & DP_PORT_EN) { |
702 | intel_dp_link_down(intel_output, dp_priv->DP); | 704 | intel_dp_link_down(intel_output, dp_priv->DP); |
703 | if (IS_eDP(intel_output)) | 705 | if (IS_eDP(intel_output)) |
704 | igdng_edp_backlight_off(dev); | 706 | ironlake_edp_backlight_off(dev); |
705 | } | 707 | } |
706 | } else { | 708 | } else { |
707 | if (!(dp_reg & DP_PORT_EN)) { | 709 | if (!(dp_reg & DP_PORT_EN)) { |
708 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 710 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); |
709 | if (IS_eDP(intel_output)) | 711 | if (IS_eDP(intel_output)) |
710 | igdng_edp_backlight_on(dev); | 712 | ironlake_edp_backlight_on(dev); |
711 | } | 713 | } |
712 | } | 714 | } |
713 | dp_priv->dpms_mode = mode; | 715 | dp_priv->dpms_mode = mode; |
@@ -1070,7 +1072,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
1070 | struct drm_i915_private *dev_priv = dev->dev_private; | 1072 | struct drm_i915_private *dev_priv = dev->dev_private; |
1071 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1073 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; |
1072 | 1074 | ||
1073 | DRM_DEBUG("\n"); | 1075 | DRM_DEBUG_KMS("\n"); |
1074 | 1076 | ||
1075 | if (IS_eDP(intel_output)) { | 1077 | if (IS_eDP(intel_output)) { |
1076 | DP &= ~DP_PLL_ENABLE; | 1078 | DP &= ~DP_PLL_ENABLE; |
@@ -1131,7 +1133,7 @@ intel_dp_check_link_status(struct intel_output *intel_output) | |||
1131 | } | 1133 | } |
1132 | 1134 | ||
1133 | static enum drm_connector_status | 1135 | static enum drm_connector_status |
1134 | igdng_dp_detect(struct drm_connector *connector) | 1136 | ironlake_dp_detect(struct drm_connector *connector) |
1135 | { | 1137 | { |
1136 | struct intel_output *intel_output = to_intel_output(connector); | 1138 | struct intel_output *intel_output = to_intel_output(connector); |
1137 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1139 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; |
@@ -1166,8 +1168,8 @@ intel_dp_detect(struct drm_connector *connector) | |||
1166 | 1168 | ||
1167 | dp_priv->has_audio = false; | 1169 | dp_priv->has_audio = false; |
1168 | 1170 | ||
1169 | if (IS_IGDNG(dev)) | 1171 | if (IS_IRONLAKE(dev)) |
1170 | return igdng_dp_detect(connector); | 1172 | return ironlake_dp_detect(connector); |
1171 | 1173 | ||
1172 | temp = I915_READ(PORT_HOTPLUG_EN); | 1174 | temp = I915_READ(PORT_HOTPLUG_EN); |
1173 | 1175 | ||
@@ -1287,7 +1289,53 @@ intel_dp_hot_plug(struct intel_output *intel_output) | |||
1287 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | 1289 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) |
1288 | intel_dp_check_link_status(intel_output); | 1290 | intel_dp_check_link_status(intel_output); |
1289 | } | 1291 | } |
1290 | 1292 | /* | |
1293 | * Enumerate the child dev array parsed from VBT to check whether | ||
1294 | * the given DP is present. | ||
1295 | * If it is present, return 1. | ||
1296 | * If it is not present, return false. | ||
1297 | * If no child dev is parsed from VBT, it is assumed that the given | ||
1298 | * DP is present. | ||
1299 | */ | ||
1300 | static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg) | ||
1301 | { | ||
1302 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1303 | struct child_device_config *p_child; | ||
1304 | int i, dp_port, ret; | ||
1305 | |||
1306 | if (!dev_priv->child_dev_num) | ||
1307 | return 1; | ||
1308 | |||
1309 | dp_port = 0; | ||
1310 | if (dp_reg == DP_B || dp_reg == PCH_DP_B) | ||
1311 | dp_port = PORT_IDPB; | ||
1312 | else if (dp_reg == DP_C || dp_reg == PCH_DP_C) | ||
1313 | dp_port = PORT_IDPC; | ||
1314 | else if (dp_reg == DP_D || dp_reg == PCH_DP_D) | ||
1315 | dp_port = PORT_IDPD; | ||
1316 | |||
1317 | ret = 0; | ||
1318 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
1319 | p_child = dev_priv->child_dev + i; | ||
1320 | /* | ||
1321 | * If the device type is not DP, continue. | ||
1322 | */ | ||
1323 | if (p_child->device_type != DEVICE_TYPE_DP && | ||
1324 | p_child->device_type != DEVICE_TYPE_eDP) | ||
1325 | continue; | ||
1326 | /* Find the eDP port */ | ||
1327 | if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) { | ||
1328 | ret = 1; | ||
1329 | break; | ||
1330 | } | ||
1331 | /* Find the DP port */ | ||
1332 | if (p_child->dvo_port == dp_port) { | ||
1333 | ret = 1; | ||
1334 | break; | ||
1335 | } | ||
1336 | } | ||
1337 | return ret; | ||
1338 | } | ||
1291 | void | 1339 | void |
1292 | intel_dp_init(struct drm_device *dev, int output_reg) | 1340 | intel_dp_init(struct drm_device *dev, int output_reg) |
1293 | { | 1341 | { |
@@ -1297,6 +1345,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1297 | struct intel_dp_priv *dp_priv; | 1345 | struct intel_dp_priv *dp_priv; |
1298 | const char *name = NULL; | 1346 | const char *name = NULL; |
1299 | 1347 | ||
1348 | if (!dp_is_present_in_vbt(dev, output_reg)) { | ||
1349 | DRM_DEBUG_KMS("DP is not present. Ignore it\n"); | ||
1350 | return; | ||
1351 | } | ||
1300 | intel_output = kcalloc(sizeof(struct intel_output) + | 1352 | intel_output = kcalloc(sizeof(struct intel_output) + |
1301 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1353 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
1302 | if (!intel_output) | 1354 | if (!intel_output) |
@@ -1314,11 +1366,11 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1314 | else | 1366 | else |
1315 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | 1367 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; |
1316 | 1368 | ||
1317 | if (output_reg == DP_B) | 1369 | if (output_reg == DP_B || output_reg == PCH_DP_B) |
1318 | intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | 1370 | intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); |
1319 | else if (output_reg == DP_C) | 1371 | else if (output_reg == DP_C || output_reg == PCH_DP_C) |
1320 | intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); | 1372 | intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); |
1321 | else if (output_reg == DP_D) | 1373 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1322 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1374 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1323 | 1375 | ||
1324 | if (IS_eDP(intel_output)) { | 1376 | if (IS_eDP(intel_output)) { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ef61fe9507e2..9ffa31e13eb3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -110,6 +110,32 @@ struct intel_output { | |||
110 | int clone_mask; | 110 | int clone_mask; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | struct intel_crtc; | ||
114 | struct intel_overlay { | ||
115 | struct drm_device *dev; | ||
116 | struct intel_crtc *crtc; | ||
117 | struct drm_i915_gem_object *vid_bo; | ||
118 | struct drm_i915_gem_object *old_vid_bo; | ||
119 | int active; | ||
120 | int pfit_active; | ||
121 | u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ | ||
122 | u32 color_key; | ||
123 | u32 brightness, contrast, saturation; | ||
124 | u32 old_xscale, old_yscale; | ||
125 | /* register access */ | ||
126 | u32 flip_addr; | ||
127 | struct drm_i915_gem_object *reg_bo; | ||
128 | void *virt_addr; | ||
129 | /* flip handling */ | ||
130 | uint32_t last_flip_req; | ||
131 | int hw_wedged; | ||
132 | #define HW_WEDGED 1 | ||
133 | #define NEEDS_WAIT_FOR_FLIP 2 | ||
134 | #define RELEASE_OLD_VID 3 | ||
135 | #define SWITCH_OFF_STAGE_1 4 | ||
136 | #define SWITCH_OFF_STAGE_2 5 | ||
137 | }; | ||
138 | |||
113 | struct intel_crtc { | 139 | struct intel_crtc { |
114 | struct drm_crtc base; | 140 | struct drm_crtc base; |
115 | enum pipe pipe; | 141 | enum pipe pipe; |
@@ -121,6 +147,8 @@ struct intel_crtc { | |||
121 | bool busy; /* is scanout buffer being updated frequently? */ | 147 | bool busy; /* is scanout buffer being updated frequently? */ |
122 | struct timer_list idle_timer; | 148 | struct timer_list idle_timer; |
123 | bool lowfreq_avail; | 149 | bool lowfreq_avail; |
150 | struct intel_overlay *overlay; | ||
151 | struct intel_unpin_work *unpin_work; | ||
124 | }; | 152 | }; |
125 | 153 | ||
126 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
@@ -134,6 +162,8 @@ void intel_i2c_destroy(struct i2c_adapter *adapter); | |||
134 | int intel_ddc_get_modes(struct intel_output *intel_output); | 162 | int intel_ddc_get_modes(struct intel_output *intel_output); |
135 | extern bool intel_ddc_probe(struct intel_output *intel_output); | 163 | extern bool intel_ddc_probe(struct intel_output *intel_output); |
136 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); |
165 | void intel_i2c_reset_gmbus(struct drm_device *dev); | ||
166 | |||
137 | extern void intel_crt_init(struct drm_device *dev); | 167 | extern void intel_crt_init(struct drm_device *dev); |
138 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | 168 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); |
139 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | 169 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); |
@@ -148,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
148 | extern void intel_edp_link_config (struct intel_output *, int *, int *); | 178 | extern void intel_edp_link_config (struct intel_output *, int *, int *); |
149 | 179 | ||
150 | 180 | ||
181 | extern int intel_panel_fitter_pipe (struct drm_device *dev); | ||
151 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 182 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
152 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 183 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
153 | extern void intel_encoder_commit (struct drm_encoder *encoder); | 184 | extern void intel_encoder_commit (struct drm_encoder *encoder); |
@@ -183,4 +214,16 @@ extern int intel_framebuffer_create(struct drm_device *dev, | |||
183 | struct drm_framebuffer **fb, | 214 | struct drm_framebuffer **fb, |
184 | struct drm_gem_object *obj); | 215 | struct drm_gem_object *obj); |
185 | 216 | ||
217 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); | ||
218 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); | ||
219 | |||
220 | extern void intel_setup_overlay(struct drm_device *dev); | ||
221 | extern void intel_cleanup_overlay(struct drm_device *dev); | ||
222 | extern int intel_overlay_switch_off(struct intel_overlay *overlay); | ||
223 | extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | ||
224 | int interruptible); | ||
225 | extern int intel_overlay_put_image(struct drm_device *dev, void *data, | ||
226 | struct drm_file *file_priv); | ||
227 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, | ||
228 | struct drm_file *file_priv); | ||
186 | #endif /* __INTEL_DRV_H__ */ | 229 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 2b0fe54cd92c..d4823cc87895 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
230 | par->intel_fb = intel_fb; | 230 | par->intel_fb = intel_fb; |
231 | 231 | ||
232 | /* To allow resizeing without swapping buffers */ | 232 | /* To allow resizeing without swapping buffers */ |
233 | DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, | 233 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", |
234 | intel_fb->base.height, obj_priv->gtt_offset, fbo); | 234 | intel_fb->base.width, intel_fb->base.height, |
235 | obj_priv->gtt_offset, fbo); | ||
235 | 236 | ||
236 | mutex_unlock(&dev->struct_mutex); | 237 | mutex_unlock(&dev->struct_mutex); |
237 | return 0; | 238 | return 0; |
@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev) | |||
249 | { | 250 | { |
250 | int ret; | 251 | int ret; |
251 | 252 | ||
252 | DRM_DEBUG("\n"); | 253 | DRM_DEBUG_KMS("\n"); |
253 | ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); | 254 | ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); |
254 | return ret; | 255 | return ret; |
255 | } | 256 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index c33451aec1bd..f04dbbe7d400 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
82 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but | 82 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but |
83 | * we do this anyway which shows more stable in testing. | 83 | * we do this anyway which shows more stable in testing. |
84 | */ | 84 | */ |
85 | if (IS_IGDNG(dev)) { | 85 | if (IS_IRONLAKE(dev)) { |
86 | I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); | 86 | I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); |
87 | POSTING_READ(hdmi_priv->sdvox_reg); | 87 | POSTING_READ(hdmi_priv->sdvox_reg); |
88 | } | 88 | } |
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
99 | /* HW workaround, need to write this twice for issue that may result | 99 | /* HW workaround, need to write this twice for issue that may result |
100 | * in first write getting masked. | 100 | * in first write getting masked. |
101 | */ | 101 | */ |
102 | if (IS_IGDNG(dev)) { | 102 | if (IS_IRONLAKE(dev)) { |
103 | I915_WRITE(hdmi_priv->sdvox_reg, temp); | 103 | I915_WRITE(hdmi_priv->sdvox_reg, temp); |
104 | POSTING_READ(hdmi_priv->sdvox_reg); | 104 | POSTING_READ(hdmi_priv->sdvox_reg); |
105 | } | 105 | } |
@@ -225,7 +225,52 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | |||
225 | .destroy = intel_hdmi_enc_destroy, | 225 | .destroy = intel_hdmi_enc_destroy, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | 228 | /* | |
229 | * Enumerate the child dev array parsed from VBT to check whether | ||
230 | * the given HDMI is present. | ||
231 | * If it is present, return 1. | ||
232 | * If it is not present, return false. | ||
233 | * If no child dev is parsed from VBT, it assumes that the given | ||
234 | * HDMI is present. | ||
235 | */ | ||
236 | static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg) | ||
237 | { | ||
238 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
239 | struct child_device_config *p_child; | ||
240 | int i, hdmi_port, ret; | ||
241 | |||
242 | if (!dev_priv->child_dev_num) | ||
243 | return 1; | ||
244 | |||
245 | if (hdmi_reg == SDVOB) | ||
246 | hdmi_port = DVO_B; | ||
247 | else if (hdmi_reg == SDVOC) | ||
248 | hdmi_port = DVO_C; | ||
249 | else if (hdmi_reg == HDMIB) | ||
250 | hdmi_port = DVO_B; | ||
251 | else if (hdmi_reg == HDMIC) | ||
252 | hdmi_port = DVO_C; | ||
253 | else if (hdmi_reg == HDMID) | ||
254 | hdmi_port = DVO_D; | ||
255 | else | ||
256 | return 0; | ||
257 | |||
258 | ret = 0; | ||
259 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
260 | p_child = dev_priv->child_dev + i; | ||
261 | /* | ||
262 | * If the device type is not HDMI, continue. | ||
263 | */ | ||
264 | if (p_child->device_type != DEVICE_TYPE_HDMI) | ||
265 | continue; | ||
266 | /* Find the HDMI port */ | ||
267 | if (p_child->dvo_port == hdmi_port) { | ||
268 | ret = 1; | ||
269 | break; | ||
270 | } | ||
271 | } | ||
272 | return ret; | ||
273 | } | ||
229 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 274 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) |
230 | { | 275 | { |
231 | struct drm_i915_private *dev_priv = dev->dev_private; | 276 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -233,6 +278,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
233 | struct intel_output *intel_output; | 278 | struct intel_output *intel_output; |
234 | struct intel_hdmi_priv *hdmi_priv; | 279 | struct intel_hdmi_priv *hdmi_priv; |
235 | 280 | ||
281 | if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) { | ||
282 | DRM_DEBUG_KMS("HDMI is not present. Ignored it \n"); | ||
283 | return; | ||
284 | } | ||
236 | intel_output = kcalloc(sizeof(struct intel_output) + | 285 | intel_output = kcalloc(sizeof(struct intel_output) + |
237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 286 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); |
238 | if (!intel_output) | 287 | if (!intel_output) |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c7eab724c418..8673c735b8ab 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable) | |||
39 | struct drm_i915_private *dev_priv = dev->dev_private; | 39 | struct drm_i915_private *dev_priv = dev->dev_private; |
40 | 40 | ||
41 | /* When using bit bashing for I2C, this bit needs to be set to 1 */ | 41 | /* When using bit bashing for I2C, this bit needs to be set to 1 */ |
42 | if (!IS_IGD(dev)) | 42 | if (!IS_PINEVIEW(dev)) |
43 | return; | 43 | return; |
44 | if (enable) | 44 | if (enable) |
45 | I915_WRITE(DSPCLK_GATE_D, | 45 | I915_WRITE(DSPCLK_GATE_D, |
@@ -118,6 +118,23 @@ static void set_data(void *data, int state_high) | |||
118 | udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ | 118 | udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ |
119 | } | 119 | } |
120 | 120 | ||
121 | /* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C | ||
122 | * engine, but if the BIOS leaves it enabled, then that can break our use | ||
123 | * of the bit-banging I2C interfaces. This is notably the case with the | ||
124 | * Mac Mini in EFI mode. | ||
125 | */ | ||
126 | void | ||
127 | intel_i2c_reset_gmbus(struct drm_device *dev) | ||
128 | { | ||
129 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
130 | |||
131 | if (IS_IRONLAKE(dev)) { | ||
132 | I915_WRITE(PCH_GMBUS0, 0); | ||
133 | } else { | ||
134 | I915_WRITE(GMBUS0, 0); | ||
135 | } | ||
136 | } | ||
137 | |||
121 | /** | 138 | /** |
122 | * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg | 139 | * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg |
123 | * @dev: DRM device | 140 | * @dev: DRM device |
@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | |||
168 | if(i2c_bit_add_bus(&chan->adapter)) | 185 | if(i2c_bit_add_bus(&chan->adapter)) |
169 | goto out_free; | 186 | goto out_free; |
170 | 187 | ||
188 | intel_i2c_reset_gmbus(dev); | ||
189 | |||
171 | /* JJJ: raise SCL and SDA? */ | 190 | /* JJJ: raise SCL and SDA? */ |
172 | intel_i2c_quirk_set(dev, true); | 191 | intel_i2c_quirk_set(dev, true); |
173 | set_data(chan, 1); | 192 | set_data(chan, 1); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 05598ae10c4b..b04d1e63d439 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level) | |||
56 | struct drm_i915_private *dev_priv = dev->dev_private; | 56 | struct drm_i915_private *dev_priv = dev->dev_private; |
57 | u32 blc_pwm_ctl, reg; | 57 | u32 blc_pwm_ctl, reg; |
58 | 58 | ||
59 | if (IS_IGDNG(dev)) | 59 | if (IS_IRONLAKE(dev)) |
60 | reg = BLC_PWM_CPU_CTL; | 60 | reg = BLC_PWM_CPU_CTL; |
61 | else | 61 | else |
62 | reg = BLC_PWM_CTL; | 62 | reg = BLC_PWM_CTL; |
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) | |||
74 | struct drm_i915_private *dev_priv = dev->dev_private; | 74 | struct drm_i915_private *dev_priv = dev->dev_private; |
75 | u32 reg; | 75 | u32 reg; |
76 | 76 | ||
77 | if (IS_IGDNG(dev)) | 77 | if (IS_IRONLAKE(dev)) |
78 | reg = BLC_PWM_PCH_CTL2; | 78 | reg = BLC_PWM_PCH_CTL2; |
79 | else | 79 | else |
80 | reg = BLC_PWM_CTL; | 80 | reg = BLC_PWM_CTL; |
@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) | |||
91 | struct drm_i915_private *dev_priv = dev->dev_private; | 91 | struct drm_i915_private *dev_priv = dev->dev_private; |
92 | u32 pp_status, ctl_reg, status_reg; | 92 | u32 pp_status, ctl_reg, status_reg; |
93 | 93 | ||
94 | if (IS_IGDNG(dev)) { | 94 | if (IS_IRONLAKE(dev)) { |
95 | ctl_reg = PCH_PP_CONTROL; | 95 | ctl_reg = PCH_PP_CONTROL; |
96 | status_reg = PCH_PP_STATUS; | 96 | status_reg = PCH_PP_STATUS; |
97 | } else { | 97 | } else { |
@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector) | |||
137 | u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; | 137 | u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; |
138 | u32 pwm_ctl_reg; | 138 | u32 pwm_ctl_reg; |
139 | 139 | ||
140 | if (IS_IGDNG(dev)) { | 140 | if (IS_IRONLAKE(dev)) { |
141 | pp_on_reg = PCH_PP_ON_DELAYS; | 141 | pp_on_reg = PCH_PP_ON_DELAYS; |
142 | pp_off_reg = PCH_PP_OFF_DELAYS; | 142 | pp_off_reg = PCH_PP_OFF_DELAYS; |
143 | pp_ctl_reg = PCH_PP_CONTROL; | 143 | pp_ctl_reg = PCH_PP_CONTROL; |
@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector) | |||
174 | u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; | 174 | u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; |
175 | u32 pwm_ctl_reg; | 175 | u32 pwm_ctl_reg; |
176 | 176 | ||
177 | if (IS_IGDNG(dev)) { | 177 | if (IS_IRONLAKE(dev)) { |
178 | pp_on_reg = PCH_PP_ON_DELAYS; | 178 | pp_on_reg = PCH_PP_ON_DELAYS; |
179 | pp_off_reg = PCH_PP_OFF_DELAYS; | 179 | pp_off_reg = PCH_PP_OFF_DELAYS; |
180 | pp_ctl_reg = PCH_PP_CONTROL; | 180 | pp_ctl_reg = PCH_PP_CONTROL; |
@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
297 | } | 297 | } |
298 | 298 | ||
299 | /* full screen scale for now */ | 299 | /* full screen scale for now */ |
300 | if (IS_IGDNG(dev)) | 300 | if (IS_IRONLAKE(dev)) |
301 | goto out; | 301 | goto out; |
302 | 302 | ||
303 | /* 965+ wants fuzzy fitting */ | 303 | /* 965+ wants fuzzy fitting */ |
@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
327 | * to register description and PRM. | 327 | * to register description and PRM. |
328 | * Change the value here to see the borders for debugging | 328 | * Change the value here to see the borders for debugging |
329 | */ | 329 | */ |
330 | if (!IS_IGDNG(dev)) { | 330 | if (!IS_IRONLAKE(dev)) { |
331 | I915_WRITE(BCLRPAT_A, 0); | 331 | I915_WRITE(BCLRPAT_A, 0); |
332 | I915_WRITE(BCLRPAT_B, 0); | 332 | I915_WRITE(BCLRPAT_B, 0); |
333 | } | 333 | } |
@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) | |||
548 | struct drm_i915_private *dev_priv = dev->dev_private; | 548 | struct drm_i915_private *dev_priv = dev->dev_private; |
549 | u32 reg; | 549 | u32 reg; |
550 | 550 | ||
551 | if (IS_IGDNG(dev)) | 551 | if (IS_IRONLAKE(dev)) |
552 | reg = BLC_PWM_CPU_CTL; | 552 | reg = BLC_PWM_CPU_CTL; |
553 | else | 553 | else |
554 | reg = BLC_PWM_CTL; | 554 | reg = BLC_PWM_CTL; |
@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
587 | * settings. | 587 | * settings. |
588 | */ | 588 | */ |
589 | 589 | ||
590 | if (IS_IGDNG(dev)) | 590 | if (IS_IRONLAKE(dev)) |
591 | return; | 591 | return; |
592 | 592 | ||
593 | /* | 593 | /* |
@@ -914,6 +914,101 @@ static int intel_lid_present(void) | |||
914 | #endif | 914 | #endif |
915 | 915 | ||
916 | /** | 916 | /** |
917 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID | ||
918 | * @dev: drm device | ||
919 | * @connector: LVDS connector | ||
920 | * | ||
921 | * Find the reduced downclock for LVDS in EDID. | ||
922 | */ | ||
923 | static void intel_find_lvds_downclock(struct drm_device *dev, | ||
924 | struct drm_connector *connector) | ||
925 | { | ||
926 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
927 | struct drm_display_mode *scan, *panel_fixed_mode; | ||
928 | int temp_downclock; | ||
929 | |||
930 | panel_fixed_mode = dev_priv->panel_fixed_mode; | ||
931 | temp_downclock = panel_fixed_mode->clock; | ||
932 | |||
933 | mutex_lock(&dev->mode_config.mutex); | ||
934 | list_for_each_entry(scan, &connector->probed_modes, head) { | ||
935 | /* | ||
936 | * If one mode has the same resolution with the fixed_panel | ||
937 | * mode while they have the different refresh rate, it means | ||
938 | * that the reduced downclock is found for the LVDS. In such | ||
939 | * case we can set the different FPx0/1 to dynamically select | ||
940 | * between low and high frequency. | ||
941 | */ | ||
942 | if (scan->hdisplay == panel_fixed_mode->hdisplay && | ||
943 | scan->hsync_start == panel_fixed_mode->hsync_start && | ||
944 | scan->hsync_end == panel_fixed_mode->hsync_end && | ||
945 | scan->htotal == panel_fixed_mode->htotal && | ||
946 | scan->vdisplay == panel_fixed_mode->vdisplay && | ||
947 | scan->vsync_start == panel_fixed_mode->vsync_start && | ||
948 | scan->vsync_end == panel_fixed_mode->vsync_end && | ||
949 | scan->vtotal == panel_fixed_mode->vtotal) { | ||
950 | if (scan->clock < temp_downclock) { | ||
951 | /* | ||
952 | * The downclock is already found. But we | ||
953 | * expect to find the lower downclock. | ||
954 | */ | ||
955 | temp_downclock = scan->clock; | ||
956 | } | ||
957 | } | ||
958 | } | ||
959 | mutex_unlock(&dev->mode_config.mutex); | ||
960 | if (temp_downclock < panel_fixed_mode->clock) { | ||
961 | /* We found the downclock for LVDS. */ | ||
962 | dev_priv->lvds_downclock_avail = 1; | ||
963 | dev_priv->lvds_downclock = temp_downclock; | ||
964 | DRM_DEBUG_KMS("LVDS downclock is found in EDID. " | ||
965 | "Normal clock %dKhz, downclock %dKhz\n", | ||
966 | panel_fixed_mode->clock, temp_downclock); | ||
967 | } | ||
968 | return; | ||
969 | } | ||
970 | |||
971 | /* | ||
972 | * Enumerate the child dev array parsed from VBT to check whether | ||
973 | * the LVDS is present. | ||
974 | * If it is present, return 1. | ||
975 | * If it is not present, return false. | ||
976 | * If no child dev is parsed from VBT, it assumes that the LVDS is present. | ||
977 | * Note: The addin_offset should also be checked for LVDS panel. | ||
978 | * Only when it is non-zero, it is assumed that it is present. | ||
979 | */ | ||
980 | static int lvds_is_present_in_vbt(struct drm_device *dev) | ||
981 | { | ||
982 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
983 | struct child_device_config *p_child; | ||
984 | int i, ret; | ||
985 | |||
986 | if (!dev_priv->child_dev_num) | ||
987 | return 1; | ||
988 | |||
989 | ret = 0; | ||
990 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
991 | p_child = dev_priv->child_dev + i; | ||
992 | /* | ||
993 | * If the device type is not LFP, continue. | ||
994 | * If the device type is 0x22, it is also regarded as LFP. | ||
995 | */ | ||
996 | if (p_child->device_type != DEVICE_TYPE_INT_LFP && | ||
997 | p_child->device_type != DEVICE_TYPE_LFP) | ||
998 | continue; | ||
999 | |||
1000 | /* The addin_offset should be checked. Only when it is | ||
1001 | * non-zero, it is regarded as present. | ||
1002 | */ | ||
1003 | if (p_child->addin_offset) { | ||
1004 | ret = 1; | ||
1005 | break; | ||
1006 | } | ||
1007 | } | ||
1008 | return ret; | ||
1009 | } | ||
1010 | |||
1011 | /** | ||
917 | * intel_lvds_init - setup LVDS connectors on this device | 1012 | * intel_lvds_init - setup LVDS connectors on this device |
918 | * @dev: drm device | 1013 | * @dev: drm device |
919 | * | 1014 | * |
@@ -936,21 +1031,20 @@ void intel_lvds_init(struct drm_device *dev) | |||
936 | if (dmi_check_system(intel_no_lvds)) | 1031 | if (dmi_check_system(intel_no_lvds)) |
937 | return; | 1032 | return; |
938 | 1033 | ||
939 | /* Assume that any device without an ACPI LID device also doesn't | 1034 | /* |
940 | * have an integrated LVDS. We would be better off parsing the BIOS | 1035 | * Assume LVDS is present if there's an ACPI lid device or if the |
941 | * to get a reliable indicator, but that code isn't written yet. | 1036 | * device is present in the VBT. |
942 | * | ||
943 | * In the case of all-in-one desktops using LVDS that we've seen, | ||
944 | * they're using SDVO LVDS. | ||
945 | */ | 1037 | */ |
946 | if (!intel_lid_present()) | 1038 | if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) { |
1039 | DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n"); | ||
947 | return; | 1040 | return; |
1041 | } | ||
948 | 1042 | ||
949 | if (IS_IGDNG(dev)) { | 1043 | if (IS_IRONLAKE(dev)) { |
950 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 1044 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) |
951 | return; | 1045 | return; |
952 | if (dev_priv->edp_support) { | 1046 | if (dev_priv->edp_support) { |
953 | DRM_DEBUG("disable LVDS for eDP support\n"); | 1047 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); |
954 | return; | 1048 | return; |
955 | } | 1049 | } |
956 | gpio = PCH_GPIOC; | 1050 | gpio = PCH_GPIOC; |
@@ -1023,6 +1117,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
1023 | dev_priv->panel_fixed_mode = | 1117 | dev_priv->panel_fixed_mode = |
1024 | drm_mode_duplicate(dev, scan); | 1118 | drm_mode_duplicate(dev, scan); |
1025 | mutex_unlock(&dev->mode_config.mutex); | 1119 | mutex_unlock(&dev->mode_config.mutex); |
1120 | intel_find_lvds_downclock(dev, connector); | ||
1026 | goto out; | 1121 | goto out; |
1027 | } | 1122 | } |
1028 | mutex_unlock(&dev->mode_config.mutex); | 1123 | mutex_unlock(&dev->mode_config.mutex); |
@@ -1047,8 +1142,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
1047 | * correct mode. | 1142 | * correct mode. |
1048 | */ | 1143 | */ |
1049 | 1144 | ||
1050 | /* IGDNG: FIXME if still fail, not try pipe mode now */ | 1145 | /* Ironlake: FIXME if still fail, not try pipe mode now */ |
1051 | if (IS_IGDNG(dev)) | 1146 | if (IS_IRONLAKE(dev)) |
1052 | goto failed; | 1147 | goto failed; |
1053 | 1148 | ||
1054 | lvds = I915_READ(LVDS); | 1149 | lvds = I915_READ(LVDS); |
@@ -1069,7 +1164,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
1069 | goto failed; | 1164 | goto failed; |
1070 | 1165 | ||
1071 | out: | 1166 | out: |
1072 | if (IS_IGDNG(dev)) { | 1167 | if (IS_IRONLAKE(dev)) { |
1073 | u32 pwm; | 1168 | u32 pwm; |
1074 | /* make sure PWM is enabled */ | 1169 | /* make sure PWM is enabled */ |
1075 | pwm = I915_READ(BLC_PWM_CPU_CTL2); | 1170 | pwm = I915_READ(BLC_PWM_CPU_CTL2); |
@@ -1082,7 +1177,7 @@ out: | |||
1082 | } | 1177 | } |
1083 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; | 1178 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; |
1084 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { | 1179 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { |
1085 | DRM_DEBUG("lid notifier registration failed\n"); | 1180 | DRM_DEBUG_KMS("lid notifier registration failed\n"); |
1086 | dev_priv->lid_notifier.notifier_call = NULL; | 1181 | dev_priv->lid_notifier.notifier_call = NULL; |
1087 | } | 1182 | } |
1088 | drm_sysfs_connector_add(connector); | 1183 | drm_sysfs_connector_add(connector); |
@@ -1093,5 +1188,6 @@ failed: | |||
1093 | if (intel_output->ddc_bus) | 1188 | if (intel_output->ddc_bus) |
1094 | intel_i2c_destroy(intel_output->ddc_bus); | 1189 | intel_i2c_destroy(intel_output->ddc_bus); |
1095 | drm_connector_cleanup(connector); | 1190 | drm_connector_cleanup(connector); |
1191 | drm_encoder_cleanup(encoder); | ||
1096 | kfree(intel_output); | 1192 | kfree(intel_output); |
1097 | } | 1193 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c new file mode 100644 index 000000000000..2639591c72e9 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -0,0 +1,1416 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
21 | * SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Daniel Vetter <daniel@ffwll.ch> | ||
25 | * | ||
26 | * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c | ||
27 | */ | ||
28 | #include "drmP.h" | ||
29 | #include "drm.h" | ||
30 | #include "i915_drm.h" | ||
31 | #include "i915_drv.h" | ||
32 | #include "i915_reg.h" | ||
33 | #include "intel_drv.h" | ||
34 | |||
35 | /* Limits for overlay size. According to intel doc, the real limits are: | ||
36 | * Y width: 4095, UV width (planar): 2047, Y height: 2047, | ||
37 | * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use | ||
38 | * the mininum of both. */ | ||
39 | #define IMAGE_MAX_WIDTH 2048 | ||
40 | #define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */ | ||
41 | /* on 830 and 845 these large limits result in the card hanging */ | ||
42 | #define IMAGE_MAX_WIDTH_LEGACY 1024 | ||
43 | #define IMAGE_MAX_HEIGHT_LEGACY 1088 | ||
44 | |||
45 | /* overlay register definitions */ | ||
46 | /* OCMD register */ | ||
47 | #define OCMD_TILED_SURFACE (0x1<<19) | ||
48 | #define OCMD_MIRROR_MASK (0x3<<17) | ||
49 | #define OCMD_MIRROR_MODE (0x3<<17) | ||
50 | #define OCMD_MIRROR_HORIZONTAL (0x1<<17) | ||
51 | #define OCMD_MIRROR_VERTICAL (0x2<<17) | ||
52 | #define OCMD_MIRROR_BOTH (0x3<<17) | ||
53 | #define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */ | ||
54 | #define OCMD_UV_SWAP (0x1<<14) /* YVYU */ | ||
55 | #define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */ | ||
56 | #define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */ | ||
57 | #define OCMD_SOURCE_FORMAT_MASK (0xf<<10) | ||
58 | #define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */ | ||
59 | #define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */ | ||
60 | #define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */ | ||
61 | #define OCMD_YUV_422_PACKED (0x8<<10) | ||
62 | #define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */ | ||
63 | #define OCMD_YUV_420_PLANAR (0xc<<10) | ||
64 | #define OCMD_YUV_422_PLANAR (0xd<<10) | ||
65 | #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ | ||
66 | #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) | ||
67 | #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) | ||
68 | #define OCMD_BUF_TYPE_MASK (Ox1<<5) | ||
69 | #define OCMD_BUF_TYPE_FRAME (0x0<<5) | ||
70 | #define OCMD_BUF_TYPE_FIELD (0x1<<5) | ||
71 | #define OCMD_TEST_MODE (0x1<<4) | ||
72 | #define OCMD_BUFFER_SELECT (0x3<<2) | ||
73 | #define OCMD_BUFFER0 (0x0<<2) | ||
74 | #define OCMD_BUFFER1 (0x1<<2) | ||
75 | #define OCMD_FIELD_SELECT (0x1<<2) | ||
76 | #define OCMD_FIELD0 (0x0<<1) | ||
77 | #define OCMD_FIELD1 (0x1<<1) | ||
78 | #define OCMD_ENABLE (0x1<<0) | ||
79 | |||
80 | /* OCONFIG register */ | ||
81 | #define OCONF_PIPE_MASK (0x1<<18) | ||
82 | #define OCONF_PIPE_A (0x0<<18) | ||
83 | #define OCONF_PIPE_B (0x1<<18) | ||
84 | #define OCONF_GAMMA2_ENABLE (0x1<<16) | ||
85 | #define OCONF_CSC_MODE_BT601 (0x0<<5) | ||
86 | #define OCONF_CSC_MODE_BT709 (0x1<<5) | ||
87 | #define OCONF_CSC_BYPASS (0x1<<4) | ||
88 | #define OCONF_CC_OUT_8BIT (0x1<<3) | ||
89 | #define OCONF_TEST_MODE (0x1<<2) | ||
90 | #define OCONF_THREE_LINE_BUFFER (0x1<<0) | ||
91 | #define OCONF_TWO_LINE_BUFFER (0x0<<0) | ||
92 | |||
93 | /* DCLRKM (dst-key) register */ | ||
94 | #define DST_KEY_ENABLE (0x1<<31) | ||
95 | #define CLK_RGB24_MASK 0x0 | ||
96 | #define CLK_RGB16_MASK 0x070307 | ||
97 | #define CLK_RGB15_MASK 0x070707 | ||
98 | #define CLK_RGB8I_MASK 0xffffff | ||
99 | |||
100 | #define RGB16_TO_COLORKEY(c) \ | ||
101 | (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3)) | ||
102 | #define RGB15_TO_COLORKEY(c) \ | ||
103 | (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3)) | ||
104 | |||
105 | /* overlay flip addr flag */ | ||
106 | #define OFC_UPDATE 0x1 | ||
107 | |||
108 | /* polyphase filter coefficients */ | ||
109 | #define N_HORIZ_Y_TAPS 5 | ||
110 | #define N_VERT_Y_TAPS 3 | ||
111 | #define N_HORIZ_UV_TAPS 3 | ||
112 | #define N_VERT_UV_TAPS 3 | ||
113 | #define N_PHASES 17 | ||
114 | #define MAX_TAPS 5 | ||
115 | |||
116 | /* memory bufferd overlay registers */ | ||
117 | struct overlay_registers { | ||
118 | u32 OBUF_0Y; | ||
119 | u32 OBUF_1Y; | ||
120 | u32 OBUF_0U; | ||
121 | u32 OBUF_0V; | ||
122 | u32 OBUF_1U; | ||
123 | u32 OBUF_1V; | ||
124 | u32 OSTRIDE; | ||
125 | u32 YRGB_VPH; | ||
126 | u32 UV_VPH; | ||
127 | u32 HORZ_PH; | ||
128 | u32 INIT_PHS; | ||
129 | u32 DWINPOS; | ||
130 | u32 DWINSZ; | ||
131 | u32 SWIDTH; | ||
132 | u32 SWIDTHSW; | ||
133 | u32 SHEIGHT; | ||
134 | u32 YRGBSCALE; | ||
135 | u32 UVSCALE; | ||
136 | u32 OCLRC0; | ||
137 | u32 OCLRC1; | ||
138 | u32 DCLRKV; | ||
139 | u32 DCLRKM; | ||
140 | u32 SCLRKVH; | ||
141 | u32 SCLRKVL; | ||
142 | u32 SCLRKEN; | ||
143 | u32 OCONFIG; | ||
144 | u32 OCMD; | ||
145 | u32 RESERVED1; /* 0x6C */ | ||
146 | u32 OSTART_0Y; | ||
147 | u32 OSTART_1Y; | ||
148 | u32 OSTART_0U; | ||
149 | u32 OSTART_0V; | ||
150 | u32 OSTART_1U; | ||
151 | u32 OSTART_1V; | ||
152 | u32 OTILEOFF_0Y; | ||
153 | u32 OTILEOFF_1Y; | ||
154 | u32 OTILEOFF_0U; | ||
155 | u32 OTILEOFF_0V; | ||
156 | u32 OTILEOFF_1U; | ||
157 | u32 OTILEOFF_1V; | ||
158 | u32 FASTHSCALE; /* 0xA0 */ | ||
159 | u32 UVSCALEV; /* 0xA4 */ | ||
160 | u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ | ||
161 | u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ | ||
162 | u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; | ||
163 | u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ | ||
164 | u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; | ||
165 | u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ | ||
166 | u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; | ||
167 | u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ | ||
168 | u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; | ||
169 | }; | ||
170 | |||
171 | /* overlay flip addr flag */ | ||
172 | #define OFC_UPDATE 0x1 | ||
173 | |||
174 | #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) | ||
175 | #define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) | ||
176 | |||
177 | |||
178 | static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | ||
179 | { | ||
180 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; | ||
181 | struct overlay_registers *regs; | ||
182 | |||
183 | /* no recursive mappings */ | ||
184 | BUG_ON(overlay->virt_addr); | ||
185 | |||
186 | if (OVERLAY_NONPHYSICAL(overlay->dev)) { | ||
187 | regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
188 | overlay->reg_bo->gtt_offset); | ||
189 | |||
190 | if (!regs) { | ||
191 | DRM_ERROR("failed to map overlay regs in GTT\n"); | ||
192 | return NULL; | ||
193 | } | ||
194 | } else | ||
195 | regs = overlay->reg_bo->phys_obj->handle->vaddr; | ||
196 | |||
197 | return overlay->virt_addr = regs; | ||
198 | } | ||
199 | |||
200 | static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) | ||
201 | { | ||
202 | struct drm_device *dev = overlay->dev; | ||
203 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
204 | |||
205 | if (OVERLAY_NONPHYSICAL(overlay->dev)) | ||
206 | io_mapping_unmap_atomic(overlay->virt_addr); | ||
207 | |||
208 | overlay->virt_addr = NULL; | ||
209 | |||
210 | I915_READ(OVADD); /* flush wc cashes */ | ||
211 | |||
212 | return; | ||
213 | } | ||
214 | |||
215 | /* overlay needs to be disable in OCMD reg */ | ||
216 | static int intel_overlay_on(struct intel_overlay *overlay) | ||
217 | { | ||
218 | struct drm_device *dev = overlay->dev; | ||
219 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
220 | int ret; | ||
221 | RING_LOCALS; | ||
222 | |||
223 | BUG_ON(overlay->active); | ||
224 | |||
225 | overlay->active = 1; | ||
226 | overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; | ||
227 | |||
228 | BEGIN_LP_RING(6); | ||
229 | OUT_RING(MI_FLUSH); | ||
230 | OUT_RING(MI_NOOP); | ||
231 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); | ||
232 | OUT_RING(overlay->flip_addr | OFC_UPDATE); | ||
233 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
234 | OUT_RING(MI_NOOP); | ||
235 | ADVANCE_LP_RING(); | ||
236 | |||
237 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | ||
238 | if (overlay->last_flip_req == 0) | ||
239 | return -ENOMEM; | ||
240 | |||
241 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | ||
242 | if (ret != 0) | ||
243 | return ret; | ||
244 | |||
245 | overlay->hw_wedged = 0; | ||
246 | overlay->last_flip_req = 0; | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | /* overlay needs to be enabled in OCMD reg */ | ||
251 | static void intel_overlay_continue(struct intel_overlay *overlay, | ||
252 | bool load_polyphase_filter) | ||
253 | { | ||
254 | struct drm_device *dev = overlay->dev; | ||
255 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
256 | u32 flip_addr = overlay->flip_addr; | ||
257 | u32 tmp; | ||
258 | RING_LOCALS; | ||
259 | |||
260 | BUG_ON(!overlay->active); | ||
261 | |||
262 | if (load_polyphase_filter) | ||
263 | flip_addr |= OFC_UPDATE; | ||
264 | |||
265 | /* check for underruns */ | ||
266 | tmp = I915_READ(DOVSTA); | ||
267 | if (tmp & (1 << 17)) | ||
268 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); | ||
269 | |||
270 | BEGIN_LP_RING(4); | ||
271 | OUT_RING(MI_FLUSH); | ||
272 | OUT_RING(MI_NOOP); | ||
273 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | ||
274 | OUT_RING(flip_addr); | ||
275 | ADVANCE_LP_RING(); | ||
276 | |||
277 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | ||
278 | } | ||
279 | |||
280 | static int intel_overlay_wait_flip(struct intel_overlay *overlay) | ||
281 | { | ||
282 | struct drm_device *dev = overlay->dev; | ||
283 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
284 | int ret; | ||
285 | u32 tmp; | ||
286 | RING_LOCALS; | ||
287 | |||
288 | if (overlay->last_flip_req != 0) { | ||
289 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | ||
290 | if (ret == 0) { | ||
291 | overlay->last_flip_req = 0; | ||
292 | |||
293 | tmp = I915_READ(ISR); | ||
294 | |||
295 | if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) | ||
296 | return 0; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | /* synchronous slowpath */ | ||
301 | overlay->hw_wedged = RELEASE_OLD_VID; | ||
302 | |||
303 | BEGIN_LP_RING(2); | ||
304 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
305 | OUT_RING(MI_NOOP); | ||
306 | ADVANCE_LP_RING(); | ||
307 | |||
308 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | ||
309 | if (overlay->last_flip_req == 0) | ||
310 | return -ENOMEM; | ||
311 | |||
312 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | ||
313 | if (ret != 0) | ||
314 | return ret; | ||
315 | |||
316 | overlay->hw_wedged = 0; | ||
317 | overlay->last_flip_req = 0; | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | /* overlay needs to be disabled in OCMD reg */ | ||
322 | static int intel_overlay_off(struct intel_overlay *overlay) | ||
323 | { | ||
324 | u32 flip_addr = overlay->flip_addr; | ||
325 | struct drm_device *dev = overlay->dev; | ||
326 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
327 | int ret; | ||
328 | RING_LOCALS; | ||
329 | |||
330 | BUG_ON(!overlay->active); | ||
331 | |||
332 | /* According to intel docs the overlay hw may hang (when switching | ||
333 | * off) without loading the filter coeffs. It is however unclear whether | ||
334 | * this applies to the disabling of the overlay or to the switching off | ||
335 | * of the hw. Do it in both cases */ | ||
336 | flip_addr |= OFC_UPDATE; | ||
337 | |||
338 | /* wait for overlay to go idle */ | ||
339 | overlay->hw_wedged = SWITCH_OFF_STAGE_1; | ||
340 | |||
341 | BEGIN_LP_RING(6); | ||
342 | OUT_RING(MI_FLUSH); | ||
343 | OUT_RING(MI_NOOP); | ||
344 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | ||
345 | OUT_RING(flip_addr); | ||
346 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
347 | OUT_RING(MI_NOOP); | ||
348 | ADVANCE_LP_RING(); | ||
349 | |||
350 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | ||
351 | if (overlay->last_flip_req == 0) | ||
352 | return -ENOMEM; | ||
353 | |||
354 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | ||
355 | if (ret != 0) | ||
356 | return ret; | ||
357 | |||
358 | /* turn overlay off */ | ||
359 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; | ||
360 | |||
361 | BEGIN_LP_RING(6); | ||
362 | OUT_RING(MI_FLUSH); | ||
363 | OUT_RING(MI_NOOP); | ||
364 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | ||
365 | OUT_RING(flip_addr); | ||
366 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
367 | OUT_RING(MI_NOOP); | ||
368 | ADVANCE_LP_RING(); | ||
369 | |||
370 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | ||
371 | if (overlay->last_flip_req == 0) | ||
372 | return -ENOMEM; | ||
373 | |||
374 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | ||
375 | if (ret != 0) | ||
376 | return ret; | ||
377 | |||
378 | overlay->hw_wedged = 0; | ||
379 | overlay->last_flip_req = 0; | ||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | static void intel_overlay_off_tail(struct intel_overlay *overlay) | ||
384 | { | ||
385 | struct drm_gem_object *obj; | ||
386 | |||
387 | /* never have the overlay hw on without showing a frame */ | ||
388 | BUG_ON(!overlay->vid_bo); | ||
389 | obj = overlay->vid_bo->obj; | ||
390 | |||
391 | i915_gem_object_unpin(obj); | ||
392 | drm_gem_object_unreference(obj); | ||
393 | overlay->vid_bo = NULL; | ||
394 | |||
395 | overlay->crtc->overlay = NULL; | ||
396 | overlay->crtc = NULL; | ||
397 | overlay->active = 0; | ||
398 | } | ||
399 | |||
400 | /* recover from an interruption due to a signal | ||
401 | * We have to be careful not to repeat work forever an make forward progess. */ | ||
402 | int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | ||
403 | int interruptible) | ||
404 | { | ||
405 | struct drm_device *dev = overlay->dev; | ||
406 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
407 | struct drm_gem_object *obj; | ||
408 | u32 flip_addr; | ||
409 | int ret; | ||
410 | RING_LOCALS; | ||
411 | |||
412 | if (overlay->hw_wedged == HW_WEDGED) | ||
413 | return -EIO; | ||
414 | |||
415 | if (overlay->last_flip_req == 0) { | ||
416 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | ||
417 | if (overlay->last_flip_req == 0) | ||
418 | return -ENOMEM; | ||
419 | } | ||
420 | |||
421 | ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); | ||
422 | if (ret != 0) | ||
423 | return ret; | ||
424 | |||
425 | switch (overlay->hw_wedged) { | ||
426 | case RELEASE_OLD_VID: | ||
427 | obj = overlay->old_vid_bo->obj; | ||
428 | i915_gem_object_unpin(obj); | ||
429 | drm_gem_object_unreference(obj); | ||
430 | overlay->old_vid_bo = NULL; | ||
431 | break; | ||
432 | case SWITCH_OFF_STAGE_1: | ||
433 | flip_addr = overlay->flip_addr; | ||
434 | flip_addr |= OFC_UPDATE; | ||
435 | |||
436 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; | ||
437 | |||
438 | BEGIN_LP_RING(6); | ||
439 | OUT_RING(MI_FLUSH); | ||
440 | OUT_RING(MI_NOOP); | ||
441 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | ||
442 | OUT_RING(flip_addr); | ||
443 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
444 | OUT_RING(MI_NOOP); | ||
445 | ADVANCE_LP_RING(); | ||
446 | |||
447 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | ||
448 | if (overlay->last_flip_req == 0) | ||
449 | return -ENOMEM; | ||
450 | |||
451 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | ||
452 | interruptible); | ||
453 | if (ret != 0) | ||
454 | return ret; | ||
455 | |||
456 | case SWITCH_OFF_STAGE_2: | ||
457 | intel_overlay_off_tail(overlay); | ||
458 | break; | ||
459 | default: | ||
460 | BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP); | ||
461 | } | ||
462 | |||
463 | overlay->hw_wedged = 0; | ||
464 | overlay->last_flip_req = 0; | ||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | /* Wait for pending overlay flip and release old frame. | ||
469 | * Needs to be called before the overlay register are changed | ||
470 | * via intel_overlay_(un)map_regs_atomic */ | ||
471 | static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | ||
472 | { | ||
473 | int ret; | ||
474 | struct drm_gem_object *obj; | ||
475 | |||
476 | /* only wait if there is actually an old frame to release to | ||
477 | * guarantee forward progress */ | ||
478 | if (!overlay->old_vid_bo) | ||
479 | return 0; | ||
480 | |||
481 | ret = intel_overlay_wait_flip(overlay); | ||
482 | if (ret != 0) | ||
483 | return ret; | ||
484 | |||
485 | obj = overlay->old_vid_bo->obj; | ||
486 | i915_gem_object_unpin(obj); | ||
487 | drm_gem_object_unreference(obj); | ||
488 | overlay->old_vid_bo = NULL; | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | struct put_image_params { | ||
494 | int format; | ||
495 | short dst_x; | ||
496 | short dst_y; | ||
497 | short dst_w; | ||
498 | short dst_h; | ||
499 | short src_w; | ||
500 | short src_scan_h; | ||
501 | short src_scan_w; | ||
502 | short src_h; | ||
503 | short stride_Y; | ||
504 | short stride_UV; | ||
505 | int offset_Y; | ||
506 | int offset_U; | ||
507 | int offset_V; | ||
508 | }; | ||
509 | |||
510 | static int packed_depth_bytes(u32 format) | ||
511 | { | ||
512 | switch (format & I915_OVERLAY_DEPTH_MASK) { | ||
513 | case I915_OVERLAY_YUV422: | ||
514 | return 4; | ||
515 | case I915_OVERLAY_YUV411: | ||
516 | /* return 6; not implemented */ | ||
517 | default: | ||
518 | return -EINVAL; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | static int packed_width_bytes(u32 format, short width) | ||
523 | { | ||
524 | switch (format & I915_OVERLAY_DEPTH_MASK) { | ||
525 | case I915_OVERLAY_YUV422: | ||
526 | return width << 1; | ||
527 | default: | ||
528 | return -EINVAL; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | static int uv_hsubsampling(u32 format) | ||
533 | { | ||
534 | switch (format & I915_OVERLAY_DEPTH_MASK) { | ||
535 | case I915_OVERLAY_YUV422: | ||
536 | case I915_OVERLAY_YUV420: | ||
537 | return 2; | ||
538 | case I915_OVERLAY_YUV411: | ||
539 | case I915_OVERLAY_YUV410: | ||
540 | return 4; | ||
541 | default: | ||
542 | return -EINVAL; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | static int uv_vsubsampling(u32 format) | ||
547 | { | ||
548 | switch (format & I915_OVERLAY_DEPTH_MASK) { | ||
549 | case I915_OVERLAY_YUV420: | ||
550 | case I915_OVERLAY_YUV410: | ||
551 | return 2; | ||
552 | case I915_OVERLAY_YUV422: | ||
553 | case I915_OVERLAY_YUV411: | ||
554 | return 1; | ||
555 | default: | ||
556 | return -EINVAL; | ||
557 | } | ||
558 | } | ||
559 | |||
560 | static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) | ||
561 | { | ||
562 | u32 mask, shift, ret; | ||
563 | if (IS_I9XX(dev)) { | ||
564 | mask = 0x3f; | ||
565 | shift = 6; | ||
566 | } else { | ||
567 | mask = 0x1f; | ||
568 | shift = 5; | ||
569 | } | ||
570 | ret = ((offset + width + mask) >> shift) - (offset >> shift); | ||
571 | if (IS_I9XX(dev)) | ||
572 | ret <<= 1; | ||
573 | ret -=1; | ||
574 | return ret << 2; | ||
575 | } | ||
576 | |||
577 | static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { | ||
578 | 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, | ||
579 | 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, | ||
580 | 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, | ||
581 | 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, | ||
582 | 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, | ||
583 | 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, | ||
584 | 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, | ||
585 | 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, | ||
586 | 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, | ||
587 | 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, | ||
588 | 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, | ||
589 | 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, | ||
590 | 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, | ||
591 | 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, | ||
592 | 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, | ||
593 | 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, | ||
594 | 0xb000, 0x3000, 0x0800, 0x3000, 0xb000}; | ||
595 | static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { | ||
596 | 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, | ||
597 | 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, | ||
598 | 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880, | ||
599 | 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00, | ||
600 | 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0, | ||
601 | 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, | ||
602 | 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, | ||
603 | 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, | ||
604 | 0x3000, 0x0800, 0x3000}; | ||
605 | |||
606 | static void update_polyphase_filter(struct overlay_registers *regs) | ||
607 | { | ||
608 | memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); | ||
609 | memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs)); | ||
610 | } | ||
611 | |||
612 | static bool update_scaling_factors(struct intel_overlay *overlay, | ||
613 | struct overlay_registers *regs, | ||
614 | struct put_image_params *params) | ||
615 | { | ||
616 | /* fixed point with a 12 bit shift */ | ||
617 | u32 xscale, yscale, xscale_UV, yscale_UV; | ||
618 | #define FP_SHIFT 12 | ||
619 | #define FRACT_MASK 0xfff | ||
620 | bool scale_changed = false; | ||
621 | int uv_hscale = uv_hsubsampling(params->format); | ||
622 | int uv_vscale = uv_vsubsampling(params->format); | ||
623 | |||
624 | if (params->dst_w > 1) | ||
625 | xscale = ((params->src_scan_w - 1) << FP_SHIFT) | ||
626 | /(params->dst_w); | ||
627 | else | ||
628 | xscale = 1 << FP_SHIFT; | ||
629 | |||
630 | if (params->dst_h > 1) | ||
631 | yscale = ((params->src_scan_h - 1) << FP_SHIFT) | ||
632 | /(params->dst_h); | ||
633 | else | ||
634 | yscale = 1 << FP_SHIFT; | ||
635 | |||
636 | /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ | ||
637 | xscale_UV = xscale/uv_hscale; | ||
638 | yscale_UV = yscale/uv_vscale; | ||
639 | /* make the Y scale to UV scale ratio an exact multiply */ | ||
640 | xscale = xscale_UV * uv_hscale; | ||
641 | yscale = yscale_UV * uv_vscale; | ||
642 | /*} else { | ||
643 | xscale_UV = 0; | ||
644 | yscale_UV = 0; | ||
645 | }*/ | ||
646 | |||
647 | if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) | ||
648 | scale_changed = true; | ||
649 | overlay->old_xscale = xscale; | ||
650 | overlay->old_yscale = yscale; | ||
651 | |||
652 | regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20) | ||
653 | | ((xscale >> FP_SHIFT) << 16) | ||
654 | | ((xscale & FRACT_MASK) << 3); | ||
655 | regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20) | ||
656 | | ((xscale_UV >> FP_SHIFT) << 16) | ||
657 | | ((xscale_UV & FRACT_MASK) << 3); | ||
658 | regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16) | ||
659 | | ((yscale_UV >> FP_SHIFT) << 0); | ||
660 | |||
661 | if (scale_changed) | ||
662 | update_polyphase_filter(regs); | ||
663 | |||
664 | return scale_changed; | ||
665 | } | ||
666 | |||
667 | static void update_colorkey(struct intel_overlay *overlay, | ||
668 | struct overlay_registers *regs) | ||
669 | { | ||
670 | u32 key = overlay->color_key; | ||
671 | switch (overlay->crtc->base.fb->bits_per_pixel) { | ||
672 | case 8: | ||
673 | regs->DCLRKV = 0; | ||
674 | regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; | ||
675 | case 16: | ||
676 | if (overlay->crtc->base.fb->depth == 15) { | ||
677 | regs->DCLRKV = RGB15_TO_COLORKEY(key); | ||
678 | regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; | ||
679 | } else { | ||
680 | regs->DCLRKV = RGB16_TO_COLORKEY(key); | ||
681 | regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; | ||
682 | } | ||
683 | case 24: | ||
684 | case 32: | ||
685 | regs->DCLRKV = key; | ||
686 | regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; | ||
687 | } | ||
688 | } | ||
689 | |||
690 | static u32 overlay_cmd_reg(struct put_image_params *params) | ||
691 | { | ||
692 | u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0; | ||
693 | |||
694 | if (params->format & I915_OVERLAY_YUV_PLANAR) { | ||
695 | switch (params->format & I915_OVERLAY_DEPTH_MASK) { | ||
696 | case I915_OVERLAY_YUV422: | ||
697 | cmd |= OCMD_YUV_422_PLANAR; | ||
698 | break; | ||
699 | case I915_OVERLAY_YUV420: | ||
700 | cmd |= OCMD_YUV_420_PLANAR; | ||
701 | break; | ||
702 | case I915_OVERLAY_YUV411: | ||
703 | case I915_OVERLAY_YUV410: | ||
704 | cmd |= OCMD_YUV_410_PLANAR; | ||
705 | break; | ||
706 | } | ||
707 | } else { /* YUV packed */ | ||
708 | switch (params->format & I915_OVERLAY_DEPTH_MASK) { | ||
709 | case I915_OVERLAY_YUV422: | ||
710 | cmd |= OCMD_YUV_422_PACKED; | ||
711 | break; | ||
712 | case I915_OVERLAY_YUV411: | ||
713 | cmd |= OCMD_YUV_411_PACKED; | ||
714 | break; | ||
715 | } | ||
716 | |||
717 | switch (params->format & I915_OVERLAY_SWAP_MASK) { | ||
718 | case I915_OVERLAY_NO_SWAP: | ||
719 | break; | ||
720 | case I915_OVERLAY_UV_SWAP: | ||
721 | cmd |= OCMD_UV_SWAP; | ||
722 | break; | ||
723 | case I915_OVERLAY_Y_SWAP: | ||
724 | cmd |= OCMD_Y_SWAP; | ||
725 | break; | ||
726 | case I915_OVERLAY_Y_AND_UV_SWAP: | ||
727 | cmd |= OCMD_Y_AND_UV_SWAP; | ||
728 | break; | ||
729 | } | ||
730 | } | ||
731 | |||
732 | return cmd; | ||
733 | } | ||
734 | |||
735 | int intel_overlay_do_put_image(struct intel_overlay *overlay, | ||
736 | struct drm_gem_object *new_bo, | ||
737 | struct put_image_params *params) | ||
738 | { | ||
739 | int ret, tmp_width; | ||
740 | struct overlay_registers *regs; | ||
741 | bool scale_changed = false; | ||
742 | struct drm_i915_gem_object *bo_priv = new_bo->driver_private; | ||
743 | struct drm_device *dev = overlay->dev; | ||
744 | |||
745 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
746 | BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
747 | BUG_ON(!overlay); | ||
748 | |||
749 | ret = intel_overlay_release_old_vid(overlay); | ||
750 | if (ret != 0) | ||
751 | return ret; | ||
752 | |||
753 | ret = i915_gem_object_pin(new_bo, PAGE_SIZE); | ||
754 | if (ret != 0) | ||
755 | return ret; | ||
756 | |||
757 | ret = i915_gem_object_set_to_gtt_domain(new_bo, 0); | ||
758 | if (ret != 0) | ||
759 | goto out_unpin; | ||
760 | |||
761 | if (!overlay->active) { | ||
762 | regs = intel_overlay_map_regs_atomic(overlay); | ||
763 | if (!regs) { | ||
764 | ret = -ENOMEM; | ||
765 | goto out_unpin; | ||
766 | } | ||
767 | regs->OCONFIG = OCONF_CC_OUT_8BIT; | ||
768 | if (IS_I965GM(overlay->dev)) | ||
769 | regs->OCONFIG |= OCONF_CSC_MODE_BT709; | ||
770 | regs->OCONFIG |= overlay->crtc->pipe == 0 ? | ||
771 | OCONF_PIPE_A : OCONF_PIPE_B; | ||
772 | intel_overlay_unmap_regs_atomic(overlay); | ||
773 | |||
774 | ret = intel_overlay_on(overlay); | ||
775 | if (ret != 0) | ||
776 | goto out_unpin; | ||
777 | } | ||
778 | |||
779 | regs = intel_overlay_map_regs_atomic(overlay); | ||
780 | if (!regs) { | ||
781 | ret = -ENOMEM; | ||
782 | goto out_unpin; | ||
783 | } | ||
784 | |||
785 | regs->DWINPOS = (params->dst_y << 16) | params->dst_x; | ||
786 | regs->DWINSZ = (params->dst_h << 16) | params->dst_w; | ||
787 | |||
788 | if (params->format & I915_OVERLAY_YUV_PACKED) | ||
789 | tmp_width = packed_width_bytes(params->format, params->src_w); | ||
790 | else | ||
791 | tmp_width = params->src_w; | ||
792 | |||
793 | regs->SWIDTH = params->src_w; | ||
794 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, | ||
795 | params->offset_Y, tmp_width); | ||
796 | regs->SHEIGHT = params->src_h; | ||
797 | regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; | ||
798 | regs->OSTRIDE = params->stride_Y; | ||
799 | |||
800 | if (params->format & I915_OVERLAY_YUV_PLANAR) { | ||
801 | int uv_hscale = uv_hsubsampling(params->format); | ||
802 | int uv_vscale = uv_vsubsampling(params->format); | ||
803 | u32 tmp_U, tmp_V; | ||
804 | regs->SWIDTH |= (params->src_w/uv_hscale) << 16; | ||
805 | tmp_U = calc_swidthsw(overlay->dev, params->offset_U, | ||
806 | params->src_w/uv_hscale); | ||
807 | tmp_V = calc_swidthsw(overlay->dev, params->offset_V, | ||
808 | params->src_w/uv_hscale); | ||
809 | regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; | ||
810 | regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; | ||
811 | regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; | ||
812 | regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; | ||
813 | regs->OSTRIDE |= params->stride_UV << 16; | ||
814 | } | ||
815 | |||
816 | scale_changed = update_scaling_factors(overlay, regs, params); | ||
817 | |||
818 | update_colorkey(overlay, regs); | ||
819 | |||
820 | regs->OCMD = overlay_cmd_reg(params); | ||
821 | |||
822 | intel_overlay_unmap_regs_atomic(overlay); | ||
823 | |||
824 | intel_overlay_continue(overlay, scale_changed); | ||
825 | |||
826 | overlay->old_vid_bo = overlay->vid_bo; | ||
827 | overlay->vid_bo = new_bo->driver_private; | ||
828 | |||
829 | return 0; | ||
830 | |||
831 | out_unpin: | ||
832 | i915_gem_object_unpin(new_bo); | ||
833 | return ret; | ||
834 | } | ||
835 | |||
836 | int intel_overlay_switch_off(struct intel_overlay *overlay) | ||
837 | { | ||
838 | int ret; | ||
839 | struct overlay_registers *regs; | ||
840 | struct drm_device *dev = overlay->dev; | ||
841 | |||
842 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
843 | BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
844 | |||
845 | if (overlay->hw_wedged) { | ||
846 | ret = intel_overlay_recover_from_interrupt(overlay, 1); | ||
847 | if (ret != 0) | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | if (!overlay->active) | ||
852 | return 0; | ||
853 | |||
854 | ret = intel_overlay_release_old_vid(overlay); | ||
855 | if (ret != 0) | ||
856 | return ret; | ||
857 | |||
858 | regs = intel_overlay_map_regs_atomic(overlay); | ||
859 | regs->OCMD = 0; | ||
860 | intel_overlay_unmap_regs_atomic(overlay); | ||
861 | |||
862 | ret = intel_overlay_off(overlay); | ||
863 | if (ret != 0) | ||
864 | return ret; | ||
865 | |||
866 | intel_overlay_off_tail(overlay); | ||
867 | |||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, | ||
872 | struct intel_crtc *crtc) | ||
873 | { | ||
874 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; | ||
875 | u32 pipeconf; | ||
876 | int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
877 | |||
878 | if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) | ||
879 | return -EINVAL; | ||
880 | |||
881 | pipeconf = I915_READ(pipeconf_reg); | ||
882 | |||
883 | /* can't use the overlay with double wide pipe */ | ||
884 | if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE) | ||
885 | return -EINVAL; | ||
886 | |||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | static void update_pfit_vscale_ratio(struct intel_overlay *overlay) | ||
891 | { | ||
892 | struct drm_device *dev = overlay->dev; | ||
893 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
894 | u32 ratio; | ||
895 | u32 pfit_control = I915_READ(PFIT_CONTROL); | ||
896 | |||
897 | /* XXX: This is not the same logic as in the xorg driver, but more in | ||
898 | * line with the intel documentation for the i965 */ | ||
899 | if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) { | ||
900 | ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT; | ||
901 | } else { /* on i965 use the PGM reg to read out the autoscaler values */ | ||
902 | ratio = I915_READ(PFIT_PGM_RATIOS); | ||
903 | if (IS_I965G(dev)) | ||
904 | ratio >>= PFIT_VERT_SCALE_SHIFT_965; | ||
905 | else | ||
906 | ratio >>= PFIT_VERT_SCALE_SHIFT; | ||
907 | } | ||
908 | |||
909 | overlay->pfit_vscale_ratio = ratio; | ||
910 | } | ||
911 | |||
912 | static int check_overlay_dst(struct intel_overlay *overlay, | ||
913 | struct drm_intel_overlay_put_image *rec) | ||
914 | { | ||
915 | struct drm_display_mode *mode = &overlay->crtc->base.mode; | ||
916 | |||
917 | if ((rec->dst_x < mode->crtc_hdisplay) | ||
918 | && (rec->dst_x + rec->dst_width | ||
919 | <= mode->crtc_hdisplay) | ||
920 | && (rec->dst_y < mode->crtc_vdisplay) | ||
921 | && (rec->dst_y + rec->dst_height | ||
922 | <= mode->crtc_vdisplay)) | ||
923 | return 0; | ||
924 | else | ||
925 | return -EINVAL; | ||
926 | } | ||
927 | |||
928 | static int check_overlay_scaling(struct put_image_params *rec) | ||
929 | { | ||
930 | u32 tmp; | ||
931 | |||
932 | /* downscaling limit is 8.0 */ | ||
933 | tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16; | ||
934 | if (tmp > 7) | ||
935 | return -EINVAL; | ||
936 | tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16; | ||
937 | if (tmp > 7) | ||
938 | return -EINVAL; | ||
939 | |||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | static int check_overlay_src(struct drm_device *dev, | ||
944 | struct drm_intel_overlay_put_image *rec, | ||
945 | struct drm_gem_object *new_bo) | ||
946 | { | ||
947 | u32 stride_mask; | ||
948 | int depth; | ||
949 | int uv_hscale = uv_hsubsampling(rec->flags); | ||
950 | int uv_vscale = uv_vsubsampling(rec->flags); | ||
951 | size_t tmp; | ||
952 | |||
953 | /* check src dimensions */ | ||
954 | if (IS_845G(dev) || IS_I830(dev)) { | ||
955 | if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY | ||
956 | || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) | ||
957 | return -EINVAL; | ||
958 | } else { | ||
959 | if (rec->src_height > IMAGE_MAX_HEIGHT | ||
960 | || rec->src_width > IMAGE_MAX_WIDTH) | ||
961 | return -EINVAL; | ||
962 | } | ||
963 | /* better safe than sorry, use 4 as the maximal subsampling ratio */ | ||
964 | if (rec->src_height < N_VERT_Y_TAPS*4 | ||
965 | || rec->src_width < N_HORIZ_Y_TAPS*4) | ||
966 | return -EINVAL; | ||
967 | |||
968 | /* check alingment constrains */ | ||
969 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { | ||
970 | case I915_OVERLAY_RGB: | ||
971 | /* not implemented */ | ||
972 | return -EINVAL; | ||
973 | case I915_OVERLAY_YUV_PACKED: | ||
974 | depth = packed_depth_bytes(rec->flags); | ||
975 | if (uv_vscale != 1) | ||
976 | return -EINVAL; | ||
977 | if (depth < 0) | ||
978 | return depth; | ||
979 | /* ignore UV planes */ | ||
980 | rec->stride_UV = 0; | ||
981 | rec->offset_U = 0; | ||
982 | rec->offset_V = 0; | ||
983 | /* check pixel alignment */ | ||
984 | if (rec->offset_Y % depth) | ||
985 | return -EINVAL; | ||
986 | break; | ||
987 | case I915_OVERLAY_YUV_PLANAR: | ||
988 | if (uv_vscale < 0 || uv_hscale < 0) | ||
989 | return -EINVAL; | ||
990 | /* no offset restrictions for planar formats */ | ||
991 | break; | ||
992 | default: | ||
993 | return -EINVAL; | ||
994 | } | ||
995 | |||
996 | if (rec->src_width % uv_hscale) | ||
997 | return -EINVAL; | ||
998 | |||
999 | /* stride checking */ | ||
1000 | stride_mask = 63; | ||
1001 | |||
1002 | if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) | ||
1003 | return -EINVAL; | ||
1004 | if (IS_I965G(dev) && rec->stride_Y < 512) | ||
1005 | return -EINVAL; | ||
1006 | |||
1007 | tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? | ||
1008 | 4 : 8; | ||
1009 | if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024) | ||
1010 | return -EINVAL; | ||
1011 | |||
1012 | /* check buffer dimensions */ | ||
1013 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { | ||
1014 | case I915_OVERLAY_RGB: | ||
1015 | case I915_OVERLAY_YUV_PACKED: | ||
1016 | /* always 4 Y values per depth pixels */ | ||
1017 | if (packed_width_bytes(rec->flags, rec->src_width) | ||
1018 | > rec->stride_Y) | ||
1019 | return -EINVAL; | ||
1020 | |||
1021 | tmp = rec->stride_Y*rec->src_height; | ||
1022 | if (rec->offset_Y + tmp > new_bo->size) | ||
1023 | return -EINVAL; | ||
1024 | break; | ||
1025 | case I915_OVERLAY_YUV_PLANAR: | ||
1026 | if (rec->src_width > rec->stride_Y) | ||
1027 | return -EINVAL; | ||
1028 | if (rec->src_width/uv_hscale > rec->stride_UV) | ||
1029 | return -EINVAL; | ||
1030 | |||
1031 | tmp = rec->stride_Y*rec->src_height; | ||
1032 | if (rec->offset_Y + tmp > new_bo->size) | ||
1033 | return -EINVAL; | ||
1034 | tmp = rec->stride_UV*rec->src_height; | ||
1035 | tmp /= uv_vscale; | ||
1036 | if (rec->offset_U + tmp > new_bo->size | ||
1037 | || rec->offset_V + tmp > new_bo->size) | ||
1038 | return -EINVAL; | ||
1039 | break; | ||
1040 | } | ||
1041 | |||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | int intel_overlay_put_image(struct drm_device *dev, void *data, | ||
1046 | struct drm_file *file_priv) | ||
1047 | { | ||
1048 | struct drm_intel_overlay_put_image *put_image_rec = data; | ||
1049 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1050 | struct intel_overlay *overlay; | ||
1051 | struct drm_mode_object *drmmode_obj; | ||
1052 | struct intel_crtc *crtc; | ||
1053 | struct drm_gem_object *new_bo; | ||
1054 | struct put_image_params *params; | ||
1055 | int ret; | ||
1056 | |||
1057 | if (!dev_priv) { | ||
1058 | DRM_ERROR("called with no initialization\n"); | ||
1059 | return -EINVAL; | ||
1060 | } | ||
1061 | |||
1062 | overlay = dev_priv->overlay; | ||
1063 | if (!overlay) { | ||
1064 | DRM_DEBUG("userspace bug: no overlay\n"); | ||
1065 | return -ENODEV; | ||
1066 | } | ||
1067 | |||
1068 | if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) { | ||
1069 | mutex_lock(&dev->mode_config.mutex); | ||
1070 | mutex_lock(&dev->struct_mutex); | ||
1071 | |||
1072 | ret = intel_overlay_switch_off(overlay); | ||
1073 | |||
1074 | mutex_unlock(&dev->struct_mutex); | ||
1075 | mutex_unlock(&dev->mode_config.mutex); | ||
1076 | |||
1077 | return ret; | ||
1078 | } | ||
1079 | |||
1080 | params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL); | ||
1081 | if (!params) | ||
1082 | return -ENOMEM; | ||
1083 | |||
1084 | drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, | ||
1085 | DRM_MODE_OBJECT_CRTC); | ||
1086 | if (!drmmode_obj) | ||
1087 | return -ENOENT; | ||
1088 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); | ||
1089 | |||
1090 | new_bo = drm_gem_object_lookup(dev, file_priv, | ||
1091 | put_image_rec->bo_handle); | ||
1092 | if (!new_bo) | ||
1093 | return -ENOENT; | ||
1094 | |||
1095 | mutex_lock(&dev->mode_config.mutex); | ||
1096 | mutex_lock(&dev->struct_mutex); | ||
1097 | |||
1098 | if (overlay->hw_wedged) { | ||
1099 | ret = intel_overlay_recover_from_interrupt(overlay, 1); | ||
1100 | if (ret != 0) | ||
1101 | goto out_unlock; | ||
1102 | } | ||
1103 | |||
1104 | if (overlay->crtc != crtc) { | ||
1105 | struct drm_display_mode *mode = &crtc->base.mode; | ||
1106 | ret = intel_overlay_switch_off(overlay); | ||
1107 | if (ret != 0) | ||
1108 | goto out_unlock; | ||
1109 | |||
1110 | ret = check_overlay_possible_on_crtc(overlay, crtc); | ||
1111 | if (ret != 0) | ||
1112 | goto out_unlock; | ||
1113 | |||
1114 | overlay->crtc = crtc; | ||
1115 | crtc->overlay = overlay; | ||
1116 | |||
1117 | if (intel_panel_fitter_pipe(dev) == crtc->pipe | ||
1118 | /* and line to wide, i.e. one-line-mode */ | ||
1119 | && mode->hdisplay > 1024) { | ||
1120 | overlay->pfit_active = 1; | ||
1121 | update_pfit_vscale_ratio(overlay); | ||
1122 | } else | ||
1123 | overlay->pfit_active = 0; | ||
1124 | } | ||
1125 | |||
1126 | ret = check_overlay_dst(overlay, put_image_rec); | ||
1127 | if (ret != 0) | ||
1128 | goto out_unlock; | ||
1129 | |||
1130 | if (overlay->pfit_active) { | ||
1131 | params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / | ||
1132 | overlay->pfit_vscale_ratio); | ||
1133 | /* shifting right rounds downwards, so add 1 */ | ||
1134 | params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / | ||
1135 | overlay->pfit_vscale_ratio) + 1; | ||
1136 | } else { | ||
1137 | params->dst_y = put_image_rec->dst_y; | ||
1138 | params->dst_h = put_image_rec->dst_height; | ||
1139 | } | ||
1140 | params->dst_x = put_image_rec->dst_x; | ||
1141 | params->dst_w = put_image_rec->dst_width; | ||
1142 | |||
1143 | params->src_w = put_image_rec->src_width; | ||
1144 | params->src_h = put_image_rec->src_height; | ||
1145 | params->src_scan_w = put_image_rec->src_scan_width; | ||
1146 | params->src_scan_h = put_image_rec->src_scan_height; | ||
1147 | if (params->src_scan_h > params->src_h | ||
1148 | || params->src_scan_w > params->src_w) { | ||
1149 | ret = -EINVAL; | ||
1150 | goto out_unlock; | ||
1151 | } | ||
1152 | |||
1153 | ret = check_overlay_src(dev, put_image_rec, new_bo); | ||
1154 | if (ret != 0) | ||
1155 | goto out_unlock; | ||
1156 | params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; | ||
1157 | params->stride_Y = put_image_rec->stride_Y; | ||
1158 | params->stride_UV = put_image_rec->stride_UV; | ||
1159 | params->offset_Y = put_image_rec->offset_Y; | ||
1160 | params->offset_U = put_image_rec->offset_U; | ||
1161 | params->offset_V = put_image_rec->offset_V; | ||
1162 | |||
1163 | /* Check scaling after src size to prevent a divide-by-zero. */ | ||
1164 | ret = check_overlay_scaling(params); | ||
1165 | if (ret != 0) | ||
1166 | goto out_unlock; | ||
1167 | |||
1168 | ret = intel_overlay_do_put_image(overlay, new_bo, params); | ||
1169 | if (ret != 0) | ||
1170 | goto out_unlock; | ||
1171 | |||
1172 | mutex_unlock(&dev->struct_mutex); | ||
1173 | mutex_unlock(&dev->mode_config.mutex); | ||
1174 | |||
1175 | kfree(params); | ||
1176 | |||
1177 | return 0; | ||
1178 | |||
1179 | out_unlock: | ||
1180 | mutex_unlock(&dev->struct_mutex); | ||
1181 | mutex_unlock(&dev->mode_config.mutex); | ||
1182 | drm_gem_object_unreference(new_bo); | ||
1183 | kfree(params); | ||
1184 | |||
1185 | return ret; | ||
1186 | } | ||
1187 | |||
1188 | static void update_reg_attrs(struct intel_overlay *overlay, | ||
1189 | struct overlay_registers *regs) | ||
1190 | { | ||
1191 | regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff); | ||
1192 | regs->OCLRC1 = overlay->saturation; | ||
1193 | } | ||
1194 | |||
1195 | static bool check_gamma_bounds(u32 gamma1, u32 gamma2) | ||
1196 | { | ||
1197 | int i; | ||
1198 | |||
1199 | if (gamma1 & 0xff000000 || gamma2 & 0xff000000) | ||
1200 | return false; | ||
1201 | |||
1202 | for (i = 0; i < 3; i++) { | ||
1203 | if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) | ||
1204 | return false; | ||
1205 | } | ||
1206 | |||
1207 | return true; | ||
1208 | } | ||
1209 | |||
1210 | static bool check_gamma5_errata(u32 gamma5) | ||
1211 | { | ||
1212 | int i; | ||
1213 | |||
1214 | for (i = 0; i < 3; i++) { | ||
1215 | if (((gamma5 >> i*8) & 0xff) == 0x80) | ||
1216 | return false; | ||
1217 | } | ||
1218 | |||
1219 | return true; | ||
1220 | } | ||
1221 | |||
1222 | static int check_gamma(struct drm_intel_overlay_attrs *attrs) | ||
1223 | { | ||
1224 | if (!check_gamma_bounds(0, attrs->gamma0) | ||
1225 | || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) | ||
1226 | || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) | ||
1227 | || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) | ||
1228 | || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) | ||
1229 | || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) | ||
1230 | || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) | ||
1231 | return -EINVAL; | ||
1232 | if (!check_gamma5_errata(attrs->gamma5)) | ||
1233 | return -EINVAL; | ||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | int intel_overlay_attrs(struct drm_device *dev, void *data, | ||
1238 | struct drm_file *file_priv) | ||
1239 | { | ||
1240 | struct drm_intel_overlay_attrs *attrs = data; | ||
1241 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1242 | struct intel_overlay *overlay; | ||
1243 | struct overlay_registers *regs; | ||
1244 | int ret; | ||
1245 | |||
1246 | if (!dev_priv) { | ||
1247 | DRM_ERROR("called with no initialization\n"); | ||
1248 | return -EINVAL; | ||
1249 | } | ||
1250 | |||
1251 | overlay = dev_priv->overlay; | ||
1252 | if (!overlay) { | ||
1253 | DRM_DEBUG("userspace bug: no overlay\n"); | ||
1254 | return -ENODEV; | ||
1255 | } | ||
1256 | |||
1257 | mutex_lock(&dev->mode_config.mutex); | ||
1258 | mutex_lock(&dev->struct_mutex); | ||
1259 | |||
1260 | if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { | ||
1261 | attrs->color_key = overlay->color_key; | ||
1262 | attrs->brightness = overlay->brightness; | ||
1263 | attrs->contrast = overlay->contrast; | ||
1264 | attrs->saturation = overlay->saturation; | ||
1265 | |||
1266 | if (IS_I9XX(dev)) { | ||
1267 | attrs->gamma0 = I915_READ(OGAMC0); | ||
1268 | attrs->gamma1 = I915_READ(OGAMC1); | ||
1269 | attrs->gamma2 = I915_READ(OGAMC2); | ||
1270 | attrs->gamma3 = I915_READ(OGAMC3); | ||
1271 | attrs->gamma4 = I915_READ(OGAMC4); | ||
1272 | attrs->gamma5 = I915_READ(OGAMC5); | ||
1273 | } | ||
1274 | ret = 0; | ||
1275 | } else { | ||
1276 | overlay->color_key = attrs->color_key; | ||
1277 | if (attrs->brightness >= -128 && attrs->brightness <= 127) { | ||
1278 | overlay->brightness = attrs->brightness; | ||
1279 | } else { | ||
1280 | ret = -EINVAL; | ||
1281 | goto out_unlock; | ||
1282 | } | ||
1283 | if (attrs->contrast <= 255) { | ||
1284 | overlay->contrast = attrs->contrast; | ||
1285 | } else { | ||
1286 | ret = -EINVAL; | ||
1287 | goto out_unlock; | ||
1288 | } | ||
1289 | if (attrs->saturation <= 1023) { | ||
1290 | overlay->saturation = attrs->saturation; | ||
1291 | } else { | ||
1292 | ret = -EINVAL; | ||
1293 | goto out_unlock; | ||
1294 | } | ||
1295 | |||
1296 | regs = intel_overlay_map_regs_atomic(overlay); | ||
1297 | if (!regs) { | ||
1298 | ret = -ENOMEM; | ||
1299 | goto out_unlock; | ||
1300 | } | ||
1301 | |||
1302 | update_reg_attrs(overlay, regs); | ||
1303 | |||
1304 | intel_overlay_unmap_regs_atomic(overlay); | ||
1305 | |||
1306 | if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { | ||
1307 | if (!IS_I9XX(dev)) { | ||
1308 | ret = -EINVAL; | ||
1309 | goto out_unlock; | ||
1310 | } | ||
1311 | |||
1312 | if (overlay->active) { | ||
1313 | ret = -EBUSY; | ||
1314 | goto out_unlock; | ||
1315 | } | ||
1316 | |||
1317 | ret = check_gamma(attrs); | ||
1318 | if (ret != 0) | ||
1319 | goto out_unlock; | ||
1320 | |||
1321 | I915_WRITE(OGAMC0, attrs->gamma0); | ||
1322 | I915_WRITE(OGAMC1, attrs->gamma1); | ||
1323 | I915_WRITE(OGAMC2, attrs->gamma2); | ||
1324 | I915_WRITE(OGAMC3, attrs->gamma3); | ||
1325 | I915_WRITE(OGAMC4, attrs->gamma4); | ||
1326 | I915_WRITE(OGAMC5, attrs->gamma5); | ||
1327 | } | ||
1328 | ret = 0; | ||
1329 | } | ||
1330 | |||
1331 | out_unlock: | ||
1332 | mutex_unlock(&dev->struct_mutex); | ||
1333 | mutex_unlock(&dev->mode_config.mutex); | ||
1334 | |||
1335 | return ret; | ||
1336 | } | ||
1337 | |||
1338 | void intel_setup_overlay(struct drm_device *dev) | ||
1339 | { | ||
1340 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1341 | struct intel_overlay *overlay; | ||
1342 | struct drm_gem_object *reg_bo; | ||
1343 | struct overlay_registers *regs; | ||
1344 | int ret; | ||
1345 | |||
1346 | if (!OVERLAY_EXISTS(dev)) | ||
1347 | return; | ||
1348 | |||
1349 | overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); | ||
1350 | if (!overlay) | ||
1351 | return; | ||
1352 | overlay->dev = dev; | ||
1353 | |||
1354 | reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); | ||
1355 | if (!reg_bo) | ||
1356 | goto out_free; | ||
1357 | overlay->reg_bo = reg_bo->driver_private; | ||
1358 | |||
1359 | if (OVERLAY_NONPHYSICAL(dev)) { | ||
1360 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); | ||
1361 | if (ret) { | ||
1362 | DRM_ERROR("failed to pin overlay register bo\n"); | ||
1363 | goto out_free_bo; | ||
1364 | } | ||
1365 | overlay->flip_addr = overlay->reg_bo->gtt_offset; | ||
1366 | } else { | ||
1367 | ret = i915_gem_attach_phys_object(dev, reg_bo, | ||
1368 | I915_GEM_PHYS_OVERLAY_REGS); | ||
1369 | if (ret) { | ||
1370 | DRM_ERROR("failed to attach phys overlay regs\n"); | ||
1371 | goto out_free_bo; | ||
1372 | } | ||
1373 | overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; | ||
1374 | } | ||
1375 | |||
1376 | /* init all values */ | ||
1377 | overlay->color_key = 0x0101fe; | ||
1378 | overlay->brightness = -19; | ||
1379 | overlay->contrast = 75; | ||
1380 | overlay->saturation = 146; | ||
1381 | |||
1382 | regs = intel_overlay_map_regs_atomic(overlay); | ||
1383 | if (!regs) | ||
1384 | goto out_free_bo; | ||
1385 | |||
1386 | memset(regs, 0, sizeof(struct overlay_registers)); | ||
1387 | update_polyphase_filter(regs); | ||
1388 | |||
1389 | update_reg_attrs(overlay, regs); | ||
1390 | |||
1391 | intel_overlay_unmap_regs_atomic(overlay); | ||
1392 | |||
1393 | dev_priv->overlay = overlay; | ||
1394 | DRM_INFO("initialized overlay support\n"); | ||
1395 | return; | ||
1396 | |||
1397 | out_free_bo: | ||
1398 | drm_gem_object_unreference(reg_bo); | ||
1399 | out_free: | ||
1400 | kfree(overlay); | ||
1401 | return; | ||
1402 | } | ||
1403 | |||
1404 | void intel_cleanup_overlay(struct drm_device *dev) | ||
1405 | { | ||
1406 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1407 | |||
1408 | if (dev_priv->overlay) { | ||
1409 | /* The bo's should be free'd by the generic code already. | ||
1410 | * Furthermore modesetting teardown happens beforehand so the | ||
1411 | * hardware should be off already */ | ||
1412 | BUG_ON(dev_priv->overlay->active); | ||
1413 | |||
1414 | kfree(dev_priv->overlay); | ||
1415 | } | ||
1416 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 083bec2e50f9..dba5147f4064 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -36,8 +36,6 @@ | |||
36 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
37 | #include "intel_sdvo_regs.h" | 37 | #include "intel_sdvo_regs.h" |
38 | 38 | ||
39 | #undef SDVO_DEBUG | ||
40 | |||
41 | static char *tv_format_names[] = { | 39 | static char *tv_format_names[] = { |
42 | "NTSC_M" , "NTSC_J" , "NTSC_443", | 40 | "NTSC_M" , "NTSC_J" , "NTSC_443", |
43 | "PAL_B" , "PAL_D" , "PAL_G" , | 41 | "PAL_B" , "PAL_D" , "PAL_G" , |
@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name { | |||
356 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") | 354 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") |
357 | #define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) | 355 | #define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) |
358 | 356 | ||
359 | #ifdef SDVO_DEBUG | ||
360 | static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | 357 | static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, |
361 | void *args, int args_len) | 358 | void *args, int args_len) |
362 | { | 359 | { |
@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | |||
379 | DRM_LOG_KMS("(%02X)", cmd); | 376 | DRM_LOG_KMS("(%02X)", cmd); |
380 | DRM_LOG_KMS("\n"); | 377 | DRM_LOG_KMS("\n"); |
381 | } | 378 | } |
382 | #else | ||
383 | #define intel_sdvo_debug_write(o, c, a, l) | ||
384 | #endif | ||
385 | 379 | ||
386 | static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, | 380 | static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, |
387 | void *args, int args_len) | 381 | void *args, int args_len) |
@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, | |||
398 | intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); | 392 | intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); |
399 | } | 393 | } |
400 | 394 | ||
401 | #ifdef SDVO_DEBUG | ||
402 | static const char *cmd_status_names[] = { | 395 | static const char *cmd_status_names[] = { |
403 | "Power on", | 396 | "Power on", |
404 | "Success", | 397 | "Success", |
@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, | |||
427 | DRM_LOG_KMS("(??? %d)", status); | 420 | DRM_LOG_KMS("(??? %d)", status); |
428 | DRM_LOG_KMS("\n"); | 421 | DRM_LOG_KMS("\n"); |
429 | } | 422 | } |
430 | #else | ||
431 | #define intel_sdvo_debug_response(o, r, l, s) | ||
432 | #endif | ||
433 | 423 | ||
434 | static u8 intel_sdvo_read_response(struct intel_output *intel_output, | 424 | static u8 intel_sdvo_read_response(struct intel_output *intel_output, |
435 | void *response, int response_len) | 425 | void *response, int response_len) |
@@ -1627,6 +1617,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1627 | 1617 | ||
1628 | intel_sdvo_write_cmd(intel_output, | 1618 | intel_sdvo_write_cmd(intel_output, |
1629 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1619 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); |
1620 | if (sdvo_priv->is_tv) { | ||
1621 | /* add 30ms delay when the output type is SDVO-TV */ | ||
1622 | mdelay(30); | ||
1623 | } | ||
1630 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1624 | status = intel_sdvo_read_response(intel_output, &response, 2); |
1631 | 1625 | ||
1632 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); | 1626 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 9ca917931afb..552ec110b741 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1213 | tv_ctl |= TV_TRILEVEL_SYNC; | 1213 | tv_ctl |= TV_TRILEVEL_SYNC; |
1214 | if (tv_mode->pal_burst) | 1214 | if (tv_mode->pal_burst) |
1215 | tv_ctl |= TV_PAL_BURST; | 1215 | tv_ctl |= TV_PAL_BURST; |
1216 | |||
1216 | scctl1 = 0; | 1217 | scctl1 = 0; |
1217 | /* dda1 implies valid video levels */ | 1218 | if (tv_mode->dda1_inc) |
1218 | if (tv_mode->dda1_inc) { | ||
1219 | scctl1 |= TV_SC_DDA1_EN; | 1219 | scctl1 |= TV_SC_DDA1_EN; |
1220 | } | ||
1221 | |||
1222 | if (tv_mode->dda2_inc) | 1220 | if (tv_mode->dda2_inc) |
1223 | scctl1 |= TV_SC_DDA2_EN; | 1221 | scctl1 |= TV_SC_DDA2_EN; |
1224 | |||
1225 | if (tv_mode->dda3_inc) | 1222 | if (tv_mode->dda3_inc) |
1226 | scctl1 |= TV_SC_DDA3_EN; | 1223 | scctl1 |= TV_SC_DDA3_EN; |
1227 | |||
1228 | scctl1 |= tv_mode->sc_reset; | 1224 | scctl1 |= tv_mode->sc_reset; |
1229 | scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | 1225 | if (video_levels) |
1226 | scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
1230 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; | 1227 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; |
1231 | 1228 | ||
1232 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | | 1229 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | |
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
1416 | * 0 0 0 Component | 1413 | * 0 0 0 Component |
1417 | */ | 1414 | */ |
1418 | if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { | 1415 | if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { |
1419 | DRM_DEBUG("Detected Composite TV connection\n"); | 1416 | DRM_DEBUG_KMS("Detected Composite TV connection\n"); |
1420 | type = DRM_MODE_CONNECTOR_Composite; | 1417 | type = DRM_MODE_CONNECTOR_Composite; |
1421 | } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { | 1418 | } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { |
1422 | DRM_DEBUG("Detected S-Video TV connection\n"); | 1419 | DRM_DEBUG_KMS("Detected S-Video TV connection\n"); |
1423 | type = DRM_MODE_CONNECTOR_SVIDEO; | 1420 | type = DRM_MODE_CONNECTOR_SVIDEO; |
1424 | } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { | 1421 | } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { |
1425 | DRM_DEBUG("Detected Component TV connection\n"); | 1422 | DRM_DEBUG_KMS("Detected Component TV connection\n"); |
1426 | type = DRM_MODE_CONNECTOR_Component; | 1423 | type = DRM_MODE_CONNECTOR_Component; |
1427 | } else { | 1424 | } else { |
1428 | DRM_DEBUG("No TV connection detected\n"); | 1425 | DRM_DEBUG_KMS("No TV connection detected\n"); |
1429 | type = -1; | 1426 | type = -1; |
1430 | } | 1427 | } |
1431 | 1428 | ||
@@ -1702,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = { | |||
1702 | .destroy = intel_tv_enc_destroy, | 1699 | .destroy = intel_tv_enc_destroy, |
1703 | }; | 1700 | }; |
1704 | 1701 | ||
1702 | /* | ||
1703 | * Enumerate the child dev array parsed from VBT to check whether | ||
1704 | * the integrated TV is present. | ||
1705 | * If it is present, return 1. | ||
1706 | * If it is not present, return false. | ||
1707 | * If no child dev is parsed from VBT, it assumes that the TV is present. | ||
1708 | */ | ||
1709 | static int tv_is_present_in_vbt(struct drm_device *dev) | ||
1710 | { | ||
1711 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1712 | struct child_device_config *p_child; | ||
1713 | int i, ret; | ||
1714 | |||
1715 | if (!dev_priv->child_dev_num) | ||
1716 | return 1; | ||
1717 | |||
1718 | ret = 0; | ||
1719 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
1720 | p_child = dev_priv->child_dev + i; | ||
1721 | /* | ||
1722 | * If the device type is not TV, continue. | ||
1723 | */ | ||
1724 | if (p_child->device_type != DEVICE_TYPE_INT_TV && | ||
1725 | p_child->device_type != DEVICE_TYPE_TV) | ||
1726 | continue; | ||
1727 | /* Only when the addin_offset is non-zero, it is regarded | ||
1728 | * as present. | ||
1729 | */ | ||
1730 | if (p_child->addin_offset) { | ||
1731 | ret = 1; | ||
1732 | break; | ||
1733 | } | ||
1734 | } | ||
1735 | return ret; | ||
1736 | } | ||
1705 | 1737 | ||
1706 | void | 1738 | void |
1707 | intel_tv_init(struct drm_device *dev) | 1739 | intel_tv_init(struct drm_device *dev) |
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev) | |||
1717 | if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) | 1749 | if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) |
1718 | return; | 1750 | return; |
1719 | 1751 | ||
1752 | if (!tv_is_present_in_vbt(dev)) { | ||
1753 | DRM_DEBUG_KMS("Integrated TV is not present.\n"); | ||
1754 | return; | ||
1755 | } | ||
1720 | /* Even if we have an encoder we may not have a connector */ | 1756 | /* Even if we have an encoder we may not have a connector */ |
1721 | if (!dev_priv->int_tv_support) | 1757 | if (!dev_priv->int_tv_support) |
1722 | return; | 1758 | return; |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 4f7afc79dd82..0b2f9c2ad2c1 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) | |||
1941 | for (t = 0; t < dev_priv->usec_timeout; t++) { | 1941 | for (t = 0; t < dev_priv->usec_timeout; t++) { |
1942 | u32 done_age = GET_SCRATCH(dev_priv, 1); | 1942 | u32 done_age = GET_SCRATCH(dev_priv, 1); |
1943 | DRM_DEBUG("done_age = %d\n", done_age); | 1943 | DRM_DEBUG("done_age = %d\n", done_age); |
1944 | for (i = start; i < dma->buf_count; i++) { | 1944 | for (i = 0; i < dma->buf_count; i++) { |
1945 | buf = dma->buflist[i]; | 1945 | buf = dma->buflist[start]; |
1946 | buf_priv = buf->dev_private; | 1946 | buf_priv = buf->dev_private; |
1947 | if (buf->file_priv == NULL || (buf->pending && | 1947 | if (buf->file_priv == NULL || (buf->pending && |
1948 | buf_priv->age <= | 1948 | buf_priv->age <= |
@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) | |||
1951 | buf->pending = 0; | 1951 | buf->pending = 0; |
1952 | return buf; | 1952 | return buf; |
1953 | } | 1953 | } |
1954 | start = 0; | 1954 | if (++start >= dma->buf_count) |
1955 | start = 0; | ||
1955 | } | 1956 | } |
1956 | 1957 | ||
1957 | if (t) { | 1958 | if (t) { |
@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) | |||
1960 | } | 1961 | } |
1961 | } | 1962 | } |
1962 | 1963 | ||
1963 | DRM_DEBUG("returning NULL!\n"); | ||
1964 | return NULL; | 1964 | return NULL; |
1965 | } | 1965 | } |
1966 | 1966 | ||
1967 | #if 0 | ||
1968 | struct drm_buf *radeon_freelist_get(struct drm_device * dev) | ||
1969 | { | ||
1970 | struct drm_device_dma *dma = dev->dma; | ||
1971 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1972 | drm_radeon_buf_priv_t *buf_priv; | ||
1973 | struct drm_buf *buf; | ||
1974 | int i, t; | ||
1975 | int start; | ||
1976 | u32 done_age; | ||
1977 | |||
1978 | done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1)); | ||
1979 | if (++dev_priv->last_buf >= dma->buf_count) | ||
1980 | dev_priv->last_buf = 0; | ||
1981 | |||
1982 | start = dev_priv->last_buf; | ||
1983 | dev_priv->stats.freelist_loops++; | ||
1984 | |||
1985 | for (t = 0; t < 2; t++) { | ||
1986 | for (i = start; i < dma->buf_count; i++) { | ||
1987 | buf = dma->buflist[i]; | ||
1988 | buf_priv = buf->dev_private; | ||
1989 | if (buf->file_priv == 0 || (buf->pending && | ||
1990 | buf_priv->age <= | ||
1991 | done_age)) { | ||
1992 | dev_priv->stats.requested_bufs++; | ||
1993 | buf->pending = 0; | ||
1994 | return buf; | ||
1995 | } | ||
1996 | } | ||
1997 | start = 0; | ||
1998 | } | ||
1999 | |||
2000 | return NULL; | ||
2001 | } | ||
2002 | #endif | ||
2003 | |||
2004 | void radeon_freelist_reset(struct drm_device * dev) | 1967 | void radeon_freelist_reset(struct drm_device * dev) |
2005 | { | 1968 | { |
2006 | struct drm_device_dma *dma = dev->dma; | 1969 | struct drm_device_dma *dma = dev->dma; |
diff --git a/include/drm/drm.h b/include/drm/drm.h index 43a35b092f04..e3f46e0cb7dc 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h | |||
@@ -728,6 +728,7 @@ struct drm_event { | |||
728 | }; | 728 | }; |
729 | 729 | ||
730 | #define DRM_EVENT_VBLANK 0x01 | 730 | #define DRM_EVENT_VBLANK 0x01 |
731 | #define DRM_EVENT_FLIP_COMPLETE 0x02 | ||
731 | 732 | ||
732 | struct drm_event_vblank { | 733 | struct drm_event_vblank { |
733 | struct drm_event base; | 734 | struct drm_event base; |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index db56a6add5de..19ef8ebdc662 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1300,6 +1300,7 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc); | |||
1300 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); | 1300 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); |
1301 | extern int drm_vblank_get(struct drm_device *dev, int crtc); | 1301 | extern int drm_vblank_get(struct drm_device *dev, int crtc); |
1302 | extern void drm_vblank_put(struct drm_device *dev, int crtc); | 1302 | extern void drm_vblank_put(struct drm_device *dev, int crtc); |
1303 | extern void drm_vblank_off(struct drm_device *dev, int crtc); | ||
1303 | extern void drm_vblank_cleanup(struct drm_device *dev); | 1304 | extern void drm_vblank_cleanup(struct drm_device *dev); |
1304 | /* Modesetting support */ | 1305 | /* Modesetting support */ |
1305 | extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); | 1306 | extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
@@ -1524,14 +1525,27 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map) | |||
1524 | 1525 | ||
1525 | static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) | 1526 | static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) |
1526 | { | 1527 | { |
1528 | if (size != 0 && nmemb > ULONG_MAX / size) | ||
1529 | return NULL; | ||
1530 | |||
1527 | if (size * nmemb <= PAGE_SIZE) | 1531 | if (size * nmemb <= PAGE_SIZE) |
1528 | return kcalloc(nmemb, size, GFP_KERNEL); | 1532 | return kcalloc(nmemb, size, GFP_KERNEL); |
1529 | 1533 | ||
1534 | return __vmalloc(size * nmemb, | ||
1535 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
1536 | } | ||
1537 | |||
1538 | /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ | ||
1539 | static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) | ||
1540 | { | ||
1530 | if (size != 0 && nmemb > ULONG_MAX / size) | 1541 | if (size != 0 && nmemb > ULONG_MAX / size) |
1531 | return NULL; | 1542 | return NULL; |
1532 | 1543 | ||
1544 | if (size * nmemb <= PAGE_SIZE) | ||
1545 | return kmalloc(nmemb * size, GFP_KERNEL); | ||
1546 | |||
1533 | return __vmalloc(size * nmemb, | 1547 | return __vmalloc(size * nmemb, |
1534 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | 1548 | GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
1535 | } | 1549 | } |
1536 | 1550 | ||
1537 | static __inline void drm_free_large(void *ptr) | 1551 | static __inline void drm_free_large(void *ptr) |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 219f075d2733..fdf43abc36db 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -743,7 +743,8 @@ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, | |||
743 | struct drm_encoder *encoder); | 743 | struct drm_encoder *encoder); |
744 | extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | 744 | extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
745 | int gamma_size); | 745 | int gamma_size); |
746 | extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); | 746 | extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, |
747 | uint32_t id, uint32_t type); | ||
747 | /* IOCTLs */ | 748 | /* IOCTLs */ |
748 | extern int drm_mode_getresources(struct drm_device *dev, | 749 | extern int drm_mode_getresources(struct drm_device *dev, |
749 | void *data, struct drm_file *file_priv); | 750 | void *data, struct drm_file *file_priv); |
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 26641e95e0a4..393369147a2d 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h | |||
@@ -123,5 +123,5 @@ do { \ | |||
123 | remove_wait_queue(&(queue), &entry); \ | 123 | remove_wait_queue(&(queue), &entry); \ |
124 | } while (0) | 124 | } while (0) |
125 | 125 | ||
126 | #define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) | 126 | #define DRM_WAKEUP( queue ) wake_up( queue ) |
127 | #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) | 127 | #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index a04c3ab1d726..ec3f5e80a5df 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -186,6 +186,8 @@ typedef struct _drm_i915_sarea { | |||
186 | #define DRM_I915_GEM_MMAP_GTT 0x24 | 186 | #define DRM_I915_GEM_MMAP_GTT 0x24 |
187 | #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 | 187 | #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 |
188 | #define DRM_I915_GEM_MADVISE 0x26 | 188 | #define DRM_I915_GEM_MADVISE 0x26 |
189 | #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 | ||
190 | #define DRM_I915_OVERLAY_ATTRS 0x28 | ||
189 | 191 | ||
190 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | 192 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
191 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | 193 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
@@ -221,8 +223,10 @@ typedef struct _drm_i915_sarea { | |||
221 | #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) | 223 | #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) |
222 | #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) | 224 | #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) |
223 | #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) | 225 | #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) |
224 | #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) | 226 | #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) |
225 | #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) | 227 | #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) |
228 | #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image) | ||
229 | #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) | ||
226 | 230 | ||
227 | /* Allow drivers to submit batchbuffers directly to hardware, relying | 231 | /* Allow drivers to submit batchbuffers directly to hardware, relying |
228 | * on the security mechanisms provided by hardware. | 232 | * on the security mechanisms provided by hardware. |
@@ -266,6 +270,8 @@ typedef struct drm_i915_irq_wait { | |||
266 | #define I915_PARAM_CHIPSET_ID 4 | 270 | #define I915_PARAM_CHIPSET_ID 4 |
267 | #define I915_PARAM_HAS_GEM 5 | 271 | #define I915_PARAM_HAS_GEM 5 |
268 | #define I915_PARAM_NUM_FENCES_AVAIL 6 | 272 | #define I915_PARAM_NUM_FENCES_AVAIL 6 |
273 | #define I915_PARAM_HAS_OVERLAY 7 | ||
274 | #define I915_PARAM_HAS_PAGEFLIPPING 8 | ||
269 | 275 | ||
270 | typedef struct drm_i915_getparam { | 276 | typedef struct drm_i915_getparam { |
271 | int param; | 277 | int param; |
@@ -686,4 +692,70 @@ struct drm_i915_gem_madvise { | |||
686 | __u32 retained; | 692 | __u32 retained; |
687 | }; | 693 | }; |
688 | 694 | ||
695 | /* flags */ | ||
696 | #define I915_OVERLAY_TYPE_MASK 0xff | ||
697 | #define I915_OVERLAY_YUV_PLANAR 0x01 | ||
698 | #define I915_OVERLAY_YUV_PACKED 0x02 | ||
699 | #define I915_OVERLAY_RGB 0x03 | ||
700 | |||
701 | #define I915_OVERLAY_DEPTH_MASK 0xff00 | ||
702 | #define I915_OVERLAY_RGB24 0x1000 | ||
703 | #define I915_OVERLAY_RGB16 0x2000 | ||
704 | #define I915_OVERLAY_RGB15 0x3000 | ||
705 | #define I915_OVERLAY_YUV422 0x0100 | ||
706 | #define I915_OVERLAY_YUV411 0x0200 | ||
707 | #define I915_OVERLAY_YUV420 0x0300 | ||
708 | #define I915_OVERLAY_YUV410 0x0400 | ||
709 | |||
710 | #define I915_OVERLAY_SWAP_MASK 0xff0000 | ||
711 | #define I915_OVERLAY_NO_SWAP 0x000000 | ||
712 | #define I915_OVERLAY_UV_SWAP 0x010000 | ||
713 | #define I915_OVERLAY_Y_SWAP 0x020000 | ||
714 | #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 | ||
715 | |||
716 | #define I915_OVERLAY_FLAGS_MASK 0xff000000 | ||
717 | #define I915_OVERLAY_ENABLE 0x01000000 | ||
718 | |||
719 | struct drm_intel_overlay_put_image { | ||
720 | /* various flags and src format description */ | ||
721 | __u32 flags; | ||
722 | /* source picture description */ | ||
723 | __u32 bo_handle; | ||
724 | /* stride values and offsets are in bytes, buffer relative */ | ||
725 | __u16 stride_Y; /* stride for packed formats */ | ||
726 | __u16 stride_UV; | ||
727 | __u32 offset_Y; /* offset for packet formats */ | ||
728 | __u32 offset_U; | ||
729 | __u32 offset_V; | ||
730 | /* in pixels */ | ||
731 | __u16 src_width; | ||
732 | __u16 src_height; | ||
733 | /* to compensate the scaling factors for partially covered surfaces */ | ||
734 | __u16 src_scan_width; | ||
735 | __u16 src_scan_height; | ||
736 | /* output crtc description */ | ||
737 | __u32 crtc_id; | ||
738 | __u16 dst_x; | ||
739 | __u16 dst_y; | ||
740 | __u16 dst_width; | ||
741 | __u16 dst_height; | ||
742 | }; | ||
743 | |||
744 | /* flags */ | ||
745 | #define I915_OVERLAY_UPDATE_ATTRS (1<<0) | ||
746 | #define I915_OVERLAY_UPDATE_GAMMA (1<<1) | ||
747 | struct drm_intel_overlay_attrs { | ||
748 | __u32 flags; | ||
749 | __u32 color_key; | ||
750 | __s32 brightness; | ||
751 | __u32 contrast; | ||
752 | __u32 saturation; | ||
753 | __u32 gamma0; | ||
754 | __u32 gamma1; | ||
755 | __u32 gamma2; | ||
756 | __u32 gamma3; | ||
757 | __u32 gamma4; | ||
758 | __u32 gamma5; | ||
759 | }; | ||
760 | |||
689 | #endif /* _I915_DRM_H_ */ | 761 | #endif /* _I915_DRM_H_ */ |